mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Merge branch 'master' into BLAKE3
This commit is contained in:
commit
5b4fb4bf66
90
.github/workflows/backport_branches.yml
vendored
90
.github/workflows/backport_branches.yml
vendored
@ -112,10 +112,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#################################### ORDINARY BUILDS ####################################
|
#################################### ORDINARY BUILDS ####################################
|
||||||
@ -162,10 +160,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -209,10 +205,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebAsan:
|
BuilderDebAsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -254,10 +248,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebTsan:
|
BuilderDebTsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -299,10 +291,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebDebug:
|
BuilderDebDebug:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -344,10 +334,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinDarwin:
|
BuilderBinDarwin:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -391,10 +379,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinDarwinAarch64:
|
BuilderBinDarwinAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -438,10 +424,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
@ -468,10 +452,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### BUILD REPORTER #######################################
|
##################################### BUILD REPORTER #######################################
|
||||||
@ -514,10 +496,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
BuilderSpecialReport:
|
BuilderSpecialReport:
|
||||||
needs:
|
needs:
|
||||||
@ -554,10 +534,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||||
@ -594,10 +572,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||||
@ -634,10 +610,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
######################################### STRESS TESTS #######################################
|
######################################### STRESS TESTS #######################################
|
||||||
@ -677,10 +651,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
############################# INTEGRATION TESTS #############################################
|
############################# INTEGRATION TESTS #############################################
|
||||||
@ -716,10 +688,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
needs:
|
needs:
|
||||||
|
6
.github/workflows/cherry_pick.yml
vendored
6
.github/workflows/cherry_pick.yml
vendored
@ -40,8 +40,6 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
12
.github/workflows/docs_check.yml
vendored
12
.github/workflows/docs_check.yml
vendored
@ -125,10 +125,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
DocsCheck:
|
DocsCheck:
|
||||||
needs: DockerHubPush
|
needs: DockerHubPush
|
||||||
@ -158,10 +156,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
needs:
|
needs:
|
||||||
|
6
.github/workflows/docs_release.yml
vendored
6
.github/workflows/docs_release.yml
vendored
@ -116,8 +116,6 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
6
.github/workflows/jepsen.yml
vendored
6
.github/workflows/jepsen.yml
vendored
@ -36,8 +36,6 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
501
.github/workflows/master.yml
vendored
501
.github/workflows/master.yml
vendored
@ -112,10 +112,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
CompatibilityCheck:
|
CompatibilityCheck:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -146,10 +144,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
SharedBuildSmokeTest:
|
SharedBuildSmokeTest:
|
||||||
needs: [BuilderDebShared]
|
needs: [BuilderDebShared]
|
||||||
@ -180,10 +176,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#################################### ORDINARY BUILDS ####################################
|
#################################### ORDINARY BUILDS ####################################
|
||||||
@ -230,10 +224,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -273,10 +265,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinRelease:
|
BuilderBinRelease:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -320,56 +310,9 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
# BuilderBinGCC:
|
|
||||||
# needs: [DockerHubPush]
|
|
||||||
# runs-on: [self-hosted, builder]
|
|
||||||
# steps:
|
|
||||||
# - name: Set envs
|
|
||||||
# run: |
|
|
||||||
# cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
# TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
# IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
# REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
# CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
# BUILD_NAME=binary_gcc
|
|
||||||
# EOF
|
|
||||||
# - name: Download changed images
|
|
||||||
# uses: actions/download-artifact@v2
|
|
||||||
# with:
|
|
||||||
# name: changed_images
|
|
||||||
# path: ${{ env.IMAGES_PATH }}
|
|
||||||
# - name: Clear repository
|
|
||||||
# run: |
|
|
||||||
# sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
|
||||||
# - name: Check out repository code
|
|
||||||
# uses: actions/checkout@v2
|
|
||||||
# - name: Build
|
|
||||||
# run: |
|
|
||||||
# git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
|
||||||
# git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
|
||||||
# sudo rm -fr "$TEMP_PATH"
|
|
||||||
# mkdir -p "$TEMP_PATH"
|
|
||||||
# cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
# cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
# - name: Upload build URLs to artifacts
|
|
||||||
# if: ${{ success() || failure() }}
|
|
||||||
# uses: actions/upload-artifact@v2
|
|
||||||
# with:
|
|
||||||
# name: ${{ env.BUILD_URLS }}
|
|
||||||
# path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
# - name: Cleanup
|
|
||||||
# if: always()
|
|
||||||
# run: |
|
|
||||||
# # shellcheck disable=SC2046
|
|
||||||
# docker kill $(docker ps -q) ||:
|
|
||||||
# # shellcheck disable=SC2046
|
|
||||||
# docker rm -f $(docker ps -a -q) ||:
|
|
||||||
# sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebAsan:
|
BuilderDebAsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
@ -410,10 +353,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebUBsan:
|
BuilderDebUBsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -455,10 +396,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebTsan:
|
BuilderDebTsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -500,10 +439,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebMsan:
|
BuilderDebMsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -545,10 +482,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebDebug:
|
BuilderDebDebug:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -590,10 +525,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
##########################################################################################
|
##########################################################################################
|
||||||
##################################### SPECIAL BUILDS #####################################
|
##################################### SPECIAL BUILDS #####################################
|
||||||
@ -638,10 +571,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinClangTidy:
|
BuilderBinClangTidy:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -683,10 +614,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinDarwin:
|
BuilderBinDarwin:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -730,10 +659,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinAarch64:
|
BuilderBinAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -777,10 +704,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinFreeBSD:
|
BuilderBinFreeBSD:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -824,10 +749,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinDarwinAarch64:
|
BuilderBinDarwinAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -871,10 +794,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinPPC64:
|
BuilderBinPPC64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -918,10 +839,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinAmd64SSE2:
|
BuilderBinAmd64SSE2:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -965,10 +884,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
@ -995,10 +912,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### BUILD REPORTER #######################################
|
##################################### BUILD REPORTER #######################################
|
||||||
@ -1045,10 +960,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
BuilderSpecialReport:
|
BuilderSpecialReport:
|
||||||
needs:
|
needs:
|
||||||
@ -1092,10 +1005,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||||
@ -1132,10 +1043,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestReleaseDatabaseOrdinary:
|
FunctionalStatelessTestReleaseDatabaseOrdinary:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -1169,10 +1078,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestReleaseDatabaseReplicated0:
|
FunctionalStatelessTestReleaseDatabaseReplicated0:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -1208,10 +1115,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestReleaseDatabaseReplicated1:
|
FunctionalStatelessTestReleaseDatabaseReplicated1:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -1247,10 +1152,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestReleaseS3:
|
FunctionalStatelessTestReleaseS3:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -1284,10 +1187,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestAarch64:
|
FunctionalStatelessTestAarch64:
|
||||||
needs: [BuilderDebAarch64]
|
needs: [BuilderDebAarch64]
|
||||||
@ -1321,10 +1222,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestAsan0:
|
FunctionalStatelessTestAsan0:
|
||||||
needs: [BuilderDebAsan]
|
needs: [BuilderDebAsan]
|
||||||
@ -1360,10 +1259,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestAsan1:
|
FunctionalStatelessTestAsan1:
|
||||||
needs: [BuilderDebAsan]
|
needs: [BuilderDebAsan]
|
||||||
@ -1399,10 +1296,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestTsan0:
|
FunctionalStatelessTestTsan0:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -1438,10 +1333,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestTsan1:
|
FunctionalStatelessTestTsan1:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -1477,10 +1370,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestTsan2:
|
FunctionalStatelessTestTsan2:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -1516,10 +1407,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestUBsan:
|
FunctionalStatelessTestUBsan:
|
||||||
needs: [BuilderDebUBsan]
|
needs: [BuilderDebUBsan]
|
||||||
@ -1553,10 +1442,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestMsan0:
|
FunctionalStatelessTestMsan0:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -1592,10 +1479,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestMsan1:
|
FunctionalStatelessTestMsan1:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -1631,10 +1516,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestMsan2:
|
FunctionalStatelessTestMsan2:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -1670,10 +1553,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestDebug0:
|
FunctionalStatelessTestDebug0:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -1709,10 +1590,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestDebug1:
|
FunctionalStatelessTestDebug1:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -1748,10 +1627,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestDebug2:
|
FunctionalStatelessTestDebug2:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -1787,10 +1664,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||||
@ -1827,10 +1702,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestAarch64:
|
FunctionalStatefulTestAarch64:
|
||||||
needs: [BuilderDebAarch64]
|
needs: [BuilderDebAarch64]
|
||||||
@ -1864,10 +1737,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestAsan:
|
FunctionalStatefulTestAsan:
|
||||||
needs: [BuilderDebAsan]
|
needs: [BuilderDebAsan]
|
||||||
@ -1901,10 +1772,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestTsan:
|
FunctionalStatefulTestTsan:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -1938,10 +1807,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestMsan:
|
FunctionalStatefulTestMsan:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -1975,10 +1842,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestUBsan:
|
FunctionalStatefulTestUBsan:
|
||||||
needs: [BuilderDebUBsan]
|
needs: [BuilderDebUBsan]
|
||||||
@ -2012,10 +1877,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestDebug:
|
FunctionalStatefulTestDebug:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -2049,10 +1912,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
######################################### STRESS TESTS #######################################
|
######################################### STRESS TESTS #######################################
|
||||||
@ -2088,10 +1949,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
StressTestTsan:
|
StressTestTsan:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -2128,10 +1987,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
StressTestMsan:
|
StressTestMsan:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -2164,10 +2021,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
StressTestUBsan:
|
StressTestUBsan:
|
||||||
needs: [BuilderDebUBsan]
|
needs: [BuilderDebUBsan]
|
||||||
@ -2200,10 +2055,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
StressTestDebug:
|
StressTestDebug:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -2236,10 +2089,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
############################# INTEGRATION TESTS #############################################
|
############################# INTEGRATION TESTS #############################################
|
||||||
@ -2277,10 +2128,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsAsan1:
|
IntegrationTestsAsan1:
|
||||||
needs: [BuilderDebAsan]
|
needs: [BuilderDebAsan]
|
||||||
@ -2315,10 +2164,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsAsan2:
|
IntegrationTestsAsan2:
|
||||||
needs: [BuilderDebAsan]
|
needs: [BuilderDebAsan]
|
||||||
@ -2353,10 +2200,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan0:
|
IntegrationTestsTsan0:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -2391,10 +2236,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan1:
|
IntegrationTestsTsan1:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -2429,10 +2272,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan2:
|
IntegrationTestsTsan2:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -2467,10 +2308,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan3:
|
IntegrationTestsTsan3:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -2505,10 +2344,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsRelease0:
|
IntegrationTestsRelease0:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -2543,10 +2380,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsRelease1:
|
IntegrationTestsRelease1:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -2581,10 +2416,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
##################################### AST FUZZERS ############################################
|
##################################### AST FUZZERS ############################################
|
||||||
@ -2620,10 +2453,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
ASTFuzzerTestTsan:
|
ASTFuzzerTestTsan:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -2656,10 +2487,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
ASTFuzzerTestUBSan:
|
ASTFuzzerTestUBSan:
|
||||||
needs: [BuilderDebUBsan]
|
needs: [BuilderDebUBsan]
|
||||||
@ -2692,10 +2521,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
ASTFuzzerTestMSan:
|
ASTFuzzerTestMSan:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -2728,10 +2555,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
ASTFuzzerTestDebug:
|
ASTFuzzerTestDebug:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -2764,10 +2589,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
#################################### UNIT TESTS #############################################
|
#################################### UNIT TESTS #############################################
|
||||||
@ -2803,10 +2626,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
UnitTestsReleaseClang:
|
UnitTestsReleaseClang:
|
||||||
needs: [BuilderBinRelease]
|
needs: [BuilderBinRelease]
|
||||||
@ -2839,10 +2660,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
# UnitTestsReleaseGCC:
|
# UnitTestsReleaseGCC:
|
||||||
# needs: [BuilderBinGCC]
|
# needs: [BuilderBinGCC]
|
||||||
@ -2875,10 +2694,8 @@ jobs:
|
|||||||
# - name: Cleanup
|
# - name: Cleanup
|
||||||
# if: always()
|
# if: always()
|
||||||
# run: |
|
# run: |
|
||||||
# # shellcheck disable=SC2046
|
# docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
# docker kill $(docker ps -q) ||:
|
# docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# # shellcheck disable=SC2046
|
|
||||||
# docker rm -f $(docker ps -a -q) ||:
|
|
||||||
# sudo rm -fr "$TEMP_PATH"
|
# sudo rm -fr "$TEMP_PATH"
|
||||||
UnitTestsTsan:
|
UnitTestsTsan:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -2911,10 +2728,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
UnitTestsMsan:
|
UnitTestsMsan:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -2947,10 +2762,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
UnitTestsUBsan:
|
UnitTestsUBsan:
|
||||||
needs: [BuilderDebUBsan]
|
needs: [BuilderDebUBsan]
|
||||||
@ -2983,10 +2796,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
#################################### PERFORMANCE TESTS ######################################
|
#################################### PERFORMANCE TESTS ######################################
|
||||||
@ -3024,10 +2835,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
PerformanceComparisonX86-1:
|
PerformanceComparisonX86-1:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -3062,10 +2871,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
PerformanceComparisonX86-2:
|
PerformanceComparisonX86-2:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -3100,10 +2907,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
PerformanceComparisonX86-3:
|
PerformanceComparisonX86-3:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -3138,10 +2943,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
needs:
|
needs:
|
||||||
|
6
.github/workflows/nightly.yml
vendored
6
.github/workflows/nightly.yml
vendored
@ -119,8 +119,6 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
|
522
.github/workflows/pull_request.yml
vendored
522
.github/workflows/pull_request.yml
vendored
File diff suppressed because it is too large
Load Diff
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@ -66,8 +66,6 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
288
.github/workflows/release_branches.yml
vendored
288
.github/workflows/release_branches.yml
vendored
@ -103,10 +103,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#################################### ORDINARY BUILDS ####################################
|
#################################### ORDINARY BUILDS ####################################
|
||||||
@ -153,10 +151,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -196,10 +192,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebAsan:
|
BuilderDebAsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -241,10 +235,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebUBsan:
|
BuilderDebUBsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -286,10 +278,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebTsan:
|
BuilderDebTsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -331,10 +321,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebMsan:
|
BuilderDebMsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -376,10 +364,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderDebDebug:
|
BuilderDebDebug:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -421,10 +407,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinDarwin:
|
BuilderBinDarwin:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -468,10 +452,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinDarwinAarch64:
|
BuilderBinDarwinAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
@ -515,10 +497,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
@ -545,10 +525,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### BUILD REPORTER #######################################
|
##################################### BUILD REPORTER #######################################
|
||||||
@ -594,10 +572,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
BuilderSpecialReport:
|
BuilderSpecialReport:
|
||||||
needs:
|
needs:
|
||||||
@ -634,10 +610,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||||
@ -674,10 +648,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestAarch64:
|
FunctionalStatelessTestAarch64:
|
||||||
needs: [BuilderDebAarch64]
|
needs: [BuilderDebAarch64]
|
||||||
@ -711,10 +683,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestAsan0:
|
FunctionalStatelessTestAsan0:
|
||||||
needs: [BuilderDebAsan]
|
needs: [BuilderDebAsan]
|
||||||
@ -750,10 +720,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestAsan1:
|
FunctionalStatelessTestAsan1:
|
||||||
needs: [BuilderDebAsan]
|
needs: [BuilderDebAsan]
|
||||||
@ -789,10 +757,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestTsan0:
|
FunctionalStatelessTestTsan0:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -828,10 +794,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestTsan1:
|
FunctionalStatelessTestTsan1:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -867,10 +831,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestTsan2:
|
FunctionalStatelessTestTsan2:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -906,10 +868,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestUBsan:
|
FunctionalStatelessTestUBsan:
|
||||||
needs: [BuilderDebUBsan]
|
needs: [BuilderDebUBsan]
|
||||||
@ -943,10 +903,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestMsan0:
|
FunctionalStatelessTestMsan0:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -982,10 +940,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestMsan1:
|
FunctionalStatelessTestMsan1:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -1021,10 +977,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestMsan2:
|
FunctionalStatelessTestMsan2:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -1060,10 +1014,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestDebug0:
|
FunctionalStatelessTestDebug0:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -1099,10 +1051,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestDebug1:
|
FunctionalStatelessTestDebug1:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -1138,10 +1088,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestDebug2:
|
FunctionalStatelessTestDebug2:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -1177,10 +1125,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||||
@ -1217,10 +1163,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestAarch64:
|
FunctionalStatefulTestAarch64:
|
||||||
needs: [BuilderDebAarch64]
|
needs: [BuilderDebAarch64]
|
||||||
@ -1254,10 +1198,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestAsan:
|
FunctionalStatefulTestAsan:
|
||||||
needs: [BuilderDebAsan]
|
needs: [BuilderDebAsan]
|
||||||
@ -1291,10 +1233,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestTsan:
|
FunctionalStatefulTestTsan:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -1328,10 +1268,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestMsan:
|
FunctionalStatefulTestMsan:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -1365,10 +1303,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestUBsan:
|
FunctionalStatefulTestUBsan:
|
||||||
needs: [BuilderDebUBsan]
|
needs: [BuilderDebUBsan]
|
||||||
@ -1402,10 +1338,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatefulTestDebug:
|
FunctionalStatefulTestDebug:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -1439,10 +1373,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
######################################### STRESS TESTS #######################################
|
######################################### STRESS TESTS #######################################
|
||||||
@ -1478,10 +1410,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
StressTestTsan:
|
StressTestTsan:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -1518,10 +1448,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
StressTestMsan:
|
StressTestMsan:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
@ -1554,10 +1482,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
StressTestUBsan:
|
StressTestUBsan:
|
||||||
needs: [BuilderDebUBsan]
|
needs: [BuilderDebUBsan]
|
||||||
@ -1590,10 +1516,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
StressTestDebug:
|
StressTestDebug:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
@ -1626,10 +1550,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
############################# INTEGRATION TESTS #############################################
|
############################# INTEGRATION TESTS #############################################
|
||||||
@ -1667,10 +1589,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsAsan1:
|
IntegrationTestsAsan1:
|
||||||
needs: [BuilderDebAsan]
|
needs: [BuilderDebAsan]
|
||||||
@ -1705,10 +1625,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsAsan2:
|
IntegrationTestsAsan2:
|
||||||
needs: [BuilderDebAsan]
|
needs: [BuilderDebAsan]
|
||||||
@ -1743,10 +1661,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan0:
|
IntegrationTestsTsan0:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -1781,10 +1697,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan1:
|
IntegrationTestsTsan1:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -1819,10 +1733,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan2:
|
IntegrationTestsTsan2:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -1857,10 +1769,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan3:
|
IntegrationTestsTsan3:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
@ -1895,10 +1805,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsRelease0:
|
IntegrationTestsRelease0:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -1933,10 +1841,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsRelease1:
|
IntegrationTestsRelease1:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
@ -1971,10 +1877,8 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
needs:
|
needs:
|
||||||
|
6
.github/workflows/woboq.yml
vendored
6
.github/workflows/woboq.yml
vendored
@ -37,8 +37,6 @@ jobs:
|
|||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2046
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker kill $(docker ps -q) ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
# shellcheck disable=SC2046
|
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
* As a drawback, this only works if no dynamic object unloading happens after this point.
|
* As a drawback, this only works if no dynamic object unloading happens after this point.
|
||||||
* This function is thread-safe. You should call it to update cache after loading new shared libraries.
|
* This function is thread-safe. You should call it to update cache after loading new shared libraries.
|
||||||
* Otherwise exception handling from dlopened libraries won't work (will call std::terminate immediately).
|
* Otherwise exception handling from dlopened libraries won't work (will call std::terminate immediately).
|
||||||
|
* NOTE: dlopen is forbidden in our code.
|
||||||
*
|
*
|
||||||
* NOTE: It is disabled with Thread Sanitizer because TSan can only use original "dl_iterate_phdr" function.
|
* NOTE: It is disabled with Thread Sanitizer because TSan can only use original "dl_iterate_phdr" function.
|
||||||
*/
|
*/
|
||||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit e15858f8ad0ce8aba85cf74e3763874c76bf927c
|
Subproject commit 1be805e7cb2494aa8170015493474379b0362dfc
|
2
contrib/replxx
vendored
2
contrib/replxx
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 3fd0e3c9364a589447453d9906d854ebd8d385c5
|
Subproject commit 5d04501f93a4fb7f0bb8b73b8f614bc986f9e25b
|
@ -2,8 +2,10 @@ set (SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/zlib-ng)
|
|||||||
|
|
||||||
add_definitions(-DZLIB_COMPAT)
|
add_definitions(-DZLIB_COMPAT)
|
||||||
add_definitions(-DWITH_GZFILEOP)
|
add_definitions(-DWITH_GZFILEOP)
|
||||||
|
if(NOT ARCH_S390X)
|
||||||
add_definitions(-DUNALIGNED_OK)
|
add_definitions(-DUNALIGNED_OK)
|
||||||
add_definitions(-DUNALIGNED64_OK)
|
add_definitions(-DUNALIGNED64_OK)
|
||||||
|
endif()
|
||||||
|
|
||||||
set (HAVE_UNISTD_H 1)
|
set (HAVE_UNISTD_H 1)
|
||||||
add_definitions(-D_LARGEFILE64_SOURCE=1 -D__USE_LARGEFILE64)
|
add_definitions(-D_LARGEFILE64_SOURCE=1 -D__USE_LARGEFILE64)
|
||||||
|
@ -28,10 +28,9 @@ done
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
# cleanup for retry run if volume is not recreated
|
# cleanup for retry run if volume is not recreated
|
||||||
# shellcheck disable=SC2046
|
|
||||||
{
|
{
|
||||||
docker ps -aq | xargs -r docker kill || true
|
docker ps --all --quiet | xargs --no-run-if-empty docker kill || true
|
||||||
docker ps -aq | xargs -r docker rm || true
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm || true
|
||||||
}
|
}
|
||||||
|
|
||||||
echo "Start tests"
|
echo "Start tests"
|
||||||
|
@ -3145,6 +3145,17 @@ Result:
|
|||||||
└─────┴─────┴───────┘
|
└─────┴─────┴───────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## enable_extended_results_for_datetime_functions {#enable-extended-results-for-datetime-functions}
|
||||||
|
|
||||||
|
Enables or disables returning results of type `Date32` with extended range (compared to type `Date`) for functions [toStartOfYear](../../sql-reference/functions/date-time-functions.md#tostartofyear), [toStartOfISOYear](../../sql-reference/functions/date-time-functions.md#tostartofisoyear), [toStartOfQuarter](../../sql-reference/functions/date-time-functions.md#tostartofquarter), [toStartOfMonth](../../sql-reference/functions/date-time-functions.md#tostartofmonth), [toStartOfWeek](../../sql-reference/functions/date-time-functions.md#tostartofweek), [toMonday](../../sql-reference/functions/date-time-functions.md#tomonday) and [toLastDayOfMonth](../../sql-reference/functions/date-time-functions.md#tolastdayofmonth).
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- 0 — Functions return `Date` for all types of arguments.
|
||||||
|
- 1 — Functions return `Date32` for `Date32` or `DateTime64` arguments and `Date` otherwise.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
## optimize_move_to_prewhere {#optimize_move_to_prewhere}
|
## optimize_move_to_prewhere {#optimize_move_to_prewhere}
|
||||||
|
|
||||||
Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries.
|
Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries.
|
||||||
|
@ -268,13 +268,15 @@ Result:
|
|||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday` functions described below is `Date` or `DateTime`.
|
The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday` functions described below is determined by the configuration parameter [enable_extended_results_for_datetime_functions](../../operations/settings/settings#enable-extended-results-for-datetime-functions) which is `0` by default.
|
||||||
Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results.
|
|
||||||
In case argument is out of normal range:
|
Behavior for
|
||||||
|
* `enable_extended_results_for_datetime_functions = 0`: Functions `toStartOf*`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime`. Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results. In case argument is out of normal range:
|
||||||
* If the argument is smaller than 1970, the result will be calculated from the argument `1970-01-01 (00:00:00)` instead.
|
* If the argument is smaller than 1970, the result will be calculated from the argument `1970-01-01 (00:00:00)` instead.
|
||||||
* If the return type is `DateTime` and the argument is larger than `2106-02-07 08:28:15`, the result will be calculated from the argument `2106-02-07 08:28:15` instead.
|
* If the return type is `DateTime` and the argument is larger than `2106-02-07 08:28:15`, the result will be calculated from the argument `2106-02-07 08:28:15` instead.
|
||||||
* If the return type is `Date` and the argument is larger than `2149-06-06`, the result will be calculated from the argument `2149-06-06` instead.
|
* If the return type is `Date` and the argument is larger than `2149-06-06`, the result will be calculated from the argument `2149-06-06` instead.
|
||||||
* If `toLastDayOfMonth` is called with an argument greater then `2149-05-31`, the result will be calculated from the argument `2149-05-31` instead.
|
* If `toLastDayOfMonth` is called with an argument greater then `2149-05-31`, the result will be calculated from the argument `2149-05-31` instead.
|
||||||
|
* `enable_extended_results_for_datetime_functions = 1`: Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime` if their argument is a `Date` or `DateTime`, and they return `Date32` or `DateTime64` if their argument is a `Date32` or `DateTime64`.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## toStartOfYear
|
## toStartOfYear
|
||||||
@ -303,6 +305,8 @@ Returns the date.
|
|||||||
Rounds up a date or date with time to the last day of the month.
|
Rounds up a date or date with time to the last day of the month.
|
||||||
Returns the date.
|
Returns the date.
|
||||||
|
|
||||||
|
If `toLastDayOfMonth` is called with an argument of type `Date` greater then 2149-05-31, the result will be calculated from the argument 2149-05-31 instead.
|
||||||
|
|
||||||
## toMonday
|
## toMonday
|
||||||
|
|
||||||
Rounds down a date or date with time to the nearest Monday.
|
Rounds down a date or date with time to the nearest Monday.
|
||||||
|
@ -213,9 +213,10 @@ If the `WITH TOTALS` modifier is specified, another row will be calculated. This
|
|||||||
|
|
||||||
This extra row is only produced in `JSON*`, `TabSeparated*`, and `Pretty*` formats, separately from the other rows:
|
This extra row is only produced in `JSON*`, `TabSeparated*`, and `Pretty*` formats, separately from the other rows:
|
||||||
|
|
||||||
- In `JSON*` formats, this row is output as a separate ‘totals’ field.
|
- In `XML` and `JSON*` formats, this row is output as a separate ‘totals’ field.
|
||||||
- In `TabSeparated*` formats, the row comes after the main result, preceded by an empty row (after the other data).
|
- In `TabSeparated*`, `CSV*` and `Vertical` formats, the row comes after the main result, preceded by an empty row (after the other data).
|
||||||
- In `Pretty*` formats, the row is output as a separate table after the main result.
|
- In `Pretty*` formats, the row is output as a separate table after the main result.
|
||||||
|
- In `Template` format, the row is output according to specified template.
|
||||||
- In the other formats it is not available.
|
- In the other formats it is not available.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -135,9 +135,9 @@ In all other cases, we do not recommend using the asterisk, since it only gives
|
|||||||
|
|
||||||
In addition to results, you can also get minimum and maximum values for the results columns. To do this, set the **extremes** setting to 1. Minimums and maximums are calculated for numeric types, dates, and dates with times. For other columns, the default values are output.
|
In addition to results, you can also get minimum and maximum values for the results columns. To do this, set the **extremes** setting to 1. Minimums and maximums are calculated for numeric types, dates, and dates with times. For other columns, the default values are output.
|
||||||
|
|
||||||
An extra two rows are calculated – the minimums and maximums, respectively. These extra two rows are output in `JSON*`, `TabSeparated*`, and `Pretty*` [formats](../../../interfaces/formats.md), separate from the other rows. They are not output for other formats.
|
An extra two rows are calculated – the minimums and maximums, respectively. These extra two rows are output in `XML`, `JSON*`, `TabSeparated*`, `CSV*`, `Vertical`, `Template` and `Pretty*` [formats](../../../interfaces/formats.md), separate from the other rows. They are not output for other formats.
|
||||||
|
|
||||||
In `JSON*` formats, the extreme values are output in a separate ‘extremes’ field. In `TabSeparated*` formats, the row comes after the main result, and after ‘totals’ if present. It is preceded by an empty row (after the other data). In `Pretty*` formats, the row is output as a separate table after the main result, and after `totals` if present.
|
In `JSON*` and `XML` formats, the extreme values are output in a separate ‘extremes’ field. In `TabSeparated*`, `CSV*` and `Vertical` formats, the row comes after the main result, and after ‘totals’ if present. It is preceded by an empty row (after the other data). In `Pretty*` formats, the row is output as a separate table after the main result, and after `totals` if present. In `Template` format the extreme values are output according to specified template.
|
||||||
|
|
||||||
Extreme values are calculated for rows before `LIMIT`, but after `LIMIT BY`. However, when using `LIMIT offset, size`, the rows before `offset` are included in `extremes`. In stream requests, the result may also include a small number of rows that passed through `LIMIT`.
|
Extreme values are calculated for rows before `LIMIT`, but after `LIMIT BY`. However, when using `LIMIT offset, size`, the rows before `offset` are included in `extremes`. In stream requests, the result may also include a small number of rows that passed through `LIMIT`.
|
||||||
|
|
||||||
|
@ -3799,6 +3799,17 @@ Exception: Total regexp lengths too large.
|
|||||||
|
|
||||||
Значение по умолчанию: `1`.
|
Значение по умолчанию: `1`.
|
||||||
|
|
||||||
|
## enable_extended_results_for_datetime_functions {#enable-extended-results-for-datetime-functions}
|
||||||
|
|
||||||
|
Включает или отключает возвращение результатов типа `Date32` с расширенным диапазоном (по сравнению с типом `Date`) для функций [toStartOfYear](../../sql-reference/functions/date-time-functions.md#tostartofyear), [toStartOfISOYear](../../sql-reference/functions/date-time-functions.md#tostartofisoyear), [toStartOfQuarter](../../sql-reference/functions/date-time-functions.md#tostartofquarter), [toStartOfMonth](../../sql-reference/functions/date-time-functions.md#tostartofmonth), [toStartOfWeek](../../sql-reference/functions/date-time-functions.md#tostartofweek), [toMonday](../../sql-reference/functions/date-time-functions.md#tomonday) и [toLastDayOfMonth](../../sql-reference/functions/date-time-functions.md#tolastdayofmonth).
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- 0 — Функции возвращают результаты типа `Date` для всех типов аргументов.
|
||||||
|
- 1 — Функции возвращают результаты типа `Date32` для аргументов типа `Date32` или `DateTime64` и возвращают `Date` в других случаях.
|
||||||
|
|
||||||
|
Значение по умолчанию: `0`.
|
||||||
|
|
||||||
**Пример**
|
**Пример**
|
||||||
|
|
||||||
Запрос:
|
Запрос:
|
||||||
|
@ -268,24 +268,18 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp;
|
|||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
Тип возвращаемого описанными далее функциями `toStartOf*`, `toMonday` значения - `Date` или `DateTime`.
|
Тип возвращаемого значения описанными далее функциями `toStartOf*`, `toLastDayOfMonth`, `toMonday` определяется конфигурационным параметром [enable_extended_results_for_datetime_functions](../../operations/settings/settings#enable-extended-results-for-datetime-functions) имеющим по умолчанию значение `0`.
|
||||||
Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат.
|
|
||||||
Возвращаемые значения для значений вне нормального диапазона:
|
Поведение для
|
||||||
|
* `enable_extended_results_for_datetime_functions = 0`: Функции `toStartOf*`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime`. Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат.
|
||||||
|
В случае если значение аргумента вне нормального диапазона:
|
||||||
* `1970-01-01 (00:00:00)` будет возвращён для моментов времени до 1970 года,
|
* `1970-01-01 (00:00:00)` будет возвращён для моментов времени до 1970 года,
|
||||||
* `2106-02-07 08:28:15` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `DateTime`,
|
* `2106-02-07 08:28:15` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `DateTime`,
|
||||||
* `2149-06-06` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `Date`,
|
* `2149-06-06` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `Date`,
|
||||||
* `2149-05-31` будет результатом функции `toLastDayOfMonth` при обработке аргумента больше `2149-05-31`.
|
* `2149-05-31` будет результатом функции `toLastDayOfMonth` при обработке аргумента больше `2149-05-31`.
|
||||||
|
* `enable_extended_results_for_datetime_functions = 1`: Функции `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime` если их аргумент `Date` или `DateTime` и они возвращают `Date32` или `DateTime64` если их аргумент `Date32` или `DateTime64`.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
:::note
|
|
||||||
Тип возвращаемого описанными далее функциями `toStartOf*`, `toLastDayOfMonth`, `toMonday` значения - `Date` или `DateTime`.
|
|
||||||
Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат.
|
|
||||||
Возвращаемые значения для значений вне нормального диапазона:
|
|
||||||
* `1970-01-01 (00:00:00)` будет возвращён для моментов времени до 1970 года,
|
|
||||||
* `2106-02-07 08:28:15` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `DateTime`,
|
|
||||||
* `2149-06-06` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `Date`.
|
|
||||||
:::
|
|
||||||
*
|
|
||||||
## toStartOfYear {#tostartofyear}
|
## toStartOfYear {#tostartofyear}
|
||||||
|
|
||||||
Округляет дату или дату-с-временем вниз до первого дня года.
|
Округляет дату или дату-с-временем вниз до первого дня года.
|
||||||
@ -324,6 +318,8 @@ SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101;
|
|||||||
Округляет дату или дату-с-временем до последнего числа месяца.
|
Округляет дату или дату-с-временем до последнего числа месяца.
|
||||||
Возвращается дата.
|
Возвращается дата.
|
||||||
|
|
||||||
|
Если `toLastDayOfMonth` вызывается с аргументом типа `Date` большим чем 2149-05-31, то результат будет вычислен от аргумента 2149-05-31.
|
||||||
|
|
||||||
## toMonday {#tomonday}
|
## toMonday {#tomonday}
|
||||||
|
|
||||||
Округляет дату или дату-с-временем вниз до ближайшего понедельника.
|
Округляет дату или дату-с-временем вниз до ближайшего понедельника.
|
||||||
|
@ -1,2 +1 @@
|
|||||||
extern int mainEntryClickHouseBenchmark(int argc, char ** argv);
|
extern int mainEntryClickHouseBenchmark(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseBenchmark(argc_, argv_); }
|
|
||||||
|
@ -150,7 +150,7 @@ std::vector<String> Client::loadWarningMessages()
|
|||||||
|
|
||||||
size_t rows = packet.block.rows();
|
size_t rows = packet.block.rows();
|
||||||
for (size_t i = 0; i < rows; ++i)
|
for (size_t i = 0; i < rows; ++i)
|
||||||
messages.emplace_back(column.getDataAt(i).toString());
|
messages.emplace_back(column[i].get<String>());
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseClient(int argc, char ** argv);
|
int mainEntryClickHouseClient(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseClient(argc_, argv_); }
|
|
||||||
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseCompressor(int argc, char ** argv);
|
int mainEntryClickHouseCompressor(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseCompressor(argc_, argv_); }
|
|
||||||
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseClusterCopier(int argc, char ** argv);
|
int mainEntryClickHouseClusterCopier(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseClusterCopier(argc_, argv_); }
|
|
||||||
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseDisks(int argc, char ** argv);
|
int mainEntryClickHouseDisks(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseDisks(argc_, argv_); }
|
|
||||||
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseExtractFromConfig(int argc, char ** argv);
|
int mainEntryClickHouseExtractFromConfig(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseExtractFromConfig(argc_, argv_); }
|
|
||||||
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseFormat(int argc, char ** argv);
|
int mainEntryClickHouseFormat(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseFormat(argc_, argv_); }
|
|
||||||
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseGitImport(int argc, char ** argv);
|
int mainEntryClickHouseGitImport(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseGitImport(argc_, argv_); }
|
|
||||||
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseKeeperConverter(int argc, char ** argv);
|
int mainEntryClickHouseKeeperConverter(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseKeeperConverter(argc_, argv_); }
|
|
||||||
|
@ -9,6 +9,7 @@ set (CLICKHOUSE_LIBRARY_BRIDGE_SOURCES
|
|||||||
LibraryBridge.cpp
|
LibraryBridge.cpp
|
||||||
LibraryBridgeHandlerFactory.cpp
|
LibraryBridgeHandlerFactory.cpp
|
||||||
LibraryBridgeHandlers.cpp
|
LibraryBridgeHandlers.cpp
|
||||||
|
SharedLibrary.cpp
|
||||||
library-bridge.cpp
|
library-bridge.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ void placeStringColumn(const ColumnString & column, const char ** buffer, size_t
|
|||||||
size_t size = column.size();
|
size_t size = column.size();
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
*buffer = const_cast<char *>(column.getDataAtWithTerminatingZero(i).data);
|
*buffer = const_cast<char *>(column.getDataAt(i).data);
|
||||||
buffer += features_count;
|
buffer += features_count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,12 +7,13 @@
|
|||||||
#include <Columns/ColumnVector.h>
|
#include <Columns/ColumnVector.h>
|
||||||
#include <Columns/ColumnsNumber.h>
|
#include <Columns/ColumnsNumber.h>
|
||||||
#include <Columns/IColumn.h>
|
#include <Columns/IColumn.h>
|
||||||
#include <Common/SharedLibrary.h>
|
|
||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
|
#include "SharedLibrary.h"
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Common/SharedLibrary.h>
|
#include "SharedLibrary.h"
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include "ExternalDictionaryLibraryUtils.h"
|
#include "ExternalDictionaryLibraryUtils.h"
|
||||||
|
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
#include "SharedLibrary.h"
|
#include "SharedLibrary.h"
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <boost/core/noncopyable.hpp>
|
|
||||||
#include <base/phdr_cache.h>
|
#include <base/phdr_cache.h>
|
||||||
#include "Exception.h"
|
#include <Common/Exception.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseLocal(int argc, char ** argv);
|
int mainEntryClickHouseLocal(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseLocal(argc_, argv_); }
|
|
||||||
|
@ -402,6 +402,36 @@ void checkHarmfulEnvironmentVariables(char ** argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Don't allow dlopen in the main ClickHouse binary, because it is harmful and insecure.
|
||||||
|
/// We don't use it. But it can be used by some libraries for implementation of "plugins".
|
||||||
|
/// We absolutely discourage the ancient technique of loading
|
||||||
|
/// 3rd-party uncontrolled dangerous libraries into the process address space,
|
||||||
|
/// because it is insane.
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
void * dlopen(const char *, int)
|
||||||
|
{
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void * dlmopen(long, const char *, int) // NOLINT
|
||||||
|
{
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
int dlclose(void *)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char * dlerror()
|
||||||
|
{
|
||||||
|
return "ClickHouse does not allow dynamic library loading";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/// This allows to implement assert to forbid initialization of a class in static constructors.
|
/// This allows to implement assert to forbid initialization of a class in static constructors.
|
||||||
/// Usage:
|
/// Usage:
|
||||||
///
|
///
|
||||||
@ -422,6 +452,7 @@ int main(int argc_, char ** argv_)
|
|||||||
/// PHDR cache is required for query profiler to work reliably
|
/// PHDR cache is required for query profiler to work reliably
|
||||||
/// It also speed up exception handling, but exceptions from dynamically loaded libraries (dlopen)
|
/// It also speed up exception handling, but exceptions from dynamically loaded libraries (dlopen)
|
||||||
/// will work only after additional call of this function.
|
/// will work only after additional call of this function.
|
||||||
|
/// Note: we forbid dlopen in our code.
|
||||||
updatePHDRCache();
|
updatePHDRCache();
|
||||||
|
|
||||||
#ifndef DISABLE_HARMFUL_ENV_VAR_CHECK
|
#ifndef DISABLE_HARMFUL_ENV_VAR_CHECK
|
||||||
|
@ -1,3 +1 @@
|
|||||||
int mainEntryClickHouseObfuscator(int argc, char ** argv);
|
int mainEntryClickHouseObfuscator(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseObfuscator(argc_, argv_); }
|
|
||||||
|
|
||||||
|
@ -1,24 +1 @@
|
|||||||
#include <new>
|
|
||||||
|
|
||||||
#include <base/phdr_cache.h>
|
|
||||||
|
|
||||||
|
|
||||||
int mainEntryClickHouseServer(int argc, char ** argv);
|
int mainEntryClickHouseServer(int argc, char ** argv);
|
||||||
|
|
||||||
/**
|
|
||||||
* This is the entry-point for the split build server. The initialization
|
|
||||||
* is copied from single-binary entry point in main.cpp.
|
|
||||||
*/
|
|
||||||
int main(int argc_, char ** argv_)
|
|
||||||
{
|
|
||||||
/// Reset new handler to default (that throws std::bad_alloc)
|
|
||||||
/// It is needed because LLVM library clobbers it.
|
|
||||||
std::set_new_handler(nullptr);
|
|
||||||
|
|
||||||
/// PHDR cache is required for query profiler to work reliably
|
|
||||||
/// It also speed up exception handling, but exceptions from dynamically loaded libraries (dlopen)
|
|
||||||
/// will work only after additional call of this function.
|
|
||||||
updatePHDRCache();
|
|
||||||
|
|
||||||
return mainEntryClickHouseServer(argc_, argv_);
|
|
||||||
}
|
|
||||||
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseStaticFilesDiskUploader(int argc, char ** argv);
|
int mainEntryClickHouseStaticFilesDiskUploader(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseStaticFilesDiskUploader(argc_, argv_); }
|
|
||||||
|
@ -1,2 +1 @@
|
|||||||
int mainEntryClickHouseSU(int argc, char ** argv);
|
int mainEntryClickHouseSU(int argc, char ** argv);
|
||||||
int main(int argc_, char ** argv_) { return mainEntryClickHouseSU(argc_, argv_); }
|
|
||||||
|
@ -1,12 +1,18 @@
|
|||||||
#include <AggregateFunctions/AggregateFunctionCategoricalInformationValue.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
|
||||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
#include <AggregateFunctions/FactoryHelpers.h>
|
#include <AggregateFunctions/FactoryHelpers.h>
|
||||||
#include <AggregateFunctions/Helpers.h>
|
#include <AggregateFunctions/Helpers.h>
|
||||||
|
#include <Columns/ColumnArray.h>
|
||||||
|
#include <Columns/ColumnsNumber.h>
|
||||||
|
#include <DataTypes/DataTypeArray.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <IO/ReadHelpers.h>
|
||||||
|
#include <IO/WriteHelpers.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
struct Settings;
|
struct Settings;
|
||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
@ -15,6 +21,136 @@ namespace ErrorCodes
|
|||||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** The function takes arguments x1, x2, ... xn, y. All arguments are bool.
|
||||||
|
* x arguments represents the fact that some category is true.
|
||||||
|
*
|
||||||
|
* It calculates how many times y was true and how many times y was false when every n-th category was true
|
||||||
|
* and the total number of times y was true and false.
|
||||||
|
*
|
||||||
|
* So, the size of the state is (n + 1) * 2 cells.
|
||||||
|
*/
|
||||||
|
class AggregateFunctionCategoricalIV final : public IAggregateFunctionHelper<AggregateFunctionCategoricalIV>
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
using Counter = UInt64;
|
||||||
|
size_t category_count;
|
||||||
|
|
||||||
|
Counter & counter(AggregateDataPtr __restrict place, size_t i, bool what) const
|
||||||
|
{
|
||||||
|
return reinterpret_cast<Counter *>(place)[i * 2 + (what ? 1 : 0)];
|
||||||
|
}
|
||||||
|
|
||||||
|
const Counter & counter(ConstAggregateDataPtr __restrict place, size_t i, bool what) const
|
||||||
|
{
|
||||||
|
return reinterpret_cast<const Counter *>(place)[i * 2 + (what ? 1 : 0)];
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
AggregateFunctionCategoricalIV(const DataTypes & arguments_, const Array & params_) :
|
||||||
|
IAggregateFunctionHelper<AggregateFunctionCategoricalIV>{arguments_, params_},
|
||||||
|
category_count{arguments_.size() - 1}
|
||||||
|
{
|
||||||
|
// notice: argument types has been checked before
|
||||||
|
}
|
||||||
|
|
||||||
|
String getName() const override
|
||||||
|
{
|
||||||
|
return "categoricalInformationValue";
|
||||||
|
}
|
||||||
|
|
||||||
|
bool allocatesMemoryInArena() const override { return false; }
|
||||||
|
|
||||||
|
void create(AggregateDataPtr __restrict place) const override
|
||||||
|
{
|
||||||
|
memset(place, 0, sizeOfData());
|
||||||
|
}
|
||||||
|
|
||||||
|
void destroy(AggregateDataPtr __restrict) const noexcept override
|
||||||
|
{
|
||||||
|
// nothing
|
||||||
|
}
|
||||||
|
|
||||||
|
bool hasTrivialDestructor() const override
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t sizeOfData() const override
|
||||||
|
{
|
||||||
|
return sizeof(Counter) * (category_count + 1) * 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t alignOfData() const override
|
||||||
|
{
|
||||||
|
return alignof(Counter);
|
||||||
|
}
|
||||||
|
|
||||||
|
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||||
|
{
|
||||||
|
const auto * y_col = static_cast<const ColumnUInt8 *>(columns[category_count]);
|
||||||
|
bool y = y_col->getData()[row_num];
|
||||||
|
|
||||||
|
for (size_t i = 0; i < category_count; ++i)
|
||||||
|
{
|
||||||
|
const auto * x_col = static_cast<const ColumnUInt8 *>(columns[i]);
|
||||||
|
bool x = x_col->getData()[row_num];
|
||||||
|
|
||||||
|
if (x)
|
||||||
|
++counter(place, i, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
++counter(place, category_count, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i <= category_count; ++i)
|
||||||
|
{
|
||||||
|
counter(place, i, false) += counter(rhs, i, false);
|
||||||
|
counter(place, i, true) += counter(rhs, i, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||||
|
{
|
||||||
|
buf.write(place, sizeOfData());
|
||||||
|
}
|
||||||
|
|
||||||
|
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||||
|
{
|
||||||
|
buf.read(place, sizeOfData());
|
||||||
|
}
|
||||||
|
|
||||||
|
DataTypePtr getReturnType() const override
|
||||||
|
{
|
||||||
|
return std::make_shared<DataTypeArray>(
|
||||||
|
std::make_shared<DataTypeNumber<Float64>>());
|
||||||
|
}
|
||||||
|
|
||||||
|
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override /// NOLINT
|
||||||
|
{
|
||||||
|
auto & col = static_cast<ColumnArray &>(to);
|
||||||
|
auto & data_col = static_cast<ColumnFloat64 &>(col.getData());
|
||||||
|
auto & offset_col = static_cast<ColumnArray::ColumnOffsets &>(col.getOffsetsColumn());
|
||||||
|
|
||||||
|
data_col.reserve(data_col.size() + category_count);
|
||||||
|
|
||||||
|
Float64 sum_no = static_cast<Float64>(counter(place, category_count, false));
|
||||||
|
Float64 sum_yes = static_cast<Float64>(counter(place, category_count, true));
|
||||||
|
|
||||||
|
for (size_t i = 0; i < category_count; ++i)
|
||||||
|
{
|
||||||
|
Float64 no = static_cast<Float64>(counter(place, i, false));
|
||||||
|
Float64 yes = static_cast<Float64>(counter(place, i, true));
|
||||||
|
|
||||||
|
data_col.insertValue((no / sum_no - yes / sum_yes) * (log((no / sum_no) / (yes / sum_yes))));
|
||||||
|
}
|
||||||
|
|
||||||
|
offset_col.insertValue(data_col.size());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -39,16 +175,15 @@ AggregateFunctionPtr createAggregateFunctionCategoricalIV(
|
|||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<AggregateFunctionCategoricalIV<>>(arguments, params);
|
return std::make_shared<AggregateFunctionCategoricalIV>(arguments, params);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void registerAggregateFunctionCategoricalIV(
|
void registerAggregateFunctionCategoricalIV(AggregateFunctionFactory & factory)
|
||||||
AggregateFunctionFactory & factory
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
factory.registerFunction("categoricalInformationValue", createAggregateFunctionCategoricalIV);
|
AggregateFunctionProperties properties = { .returns_default_when_only_null = true };
|
||||||
|
factory.registerFunction("categoricalInformationValue", { createAggregateFunctionCategoricalIV, properties });
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,135 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
|
||||||
#include <Columns/ColumnArray.h>
|
|
||||||
#include <Columns/ColumnsNumber.h>
|
|
||||||
#include <DataTypes/DataTypeArray.h>
|
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
|
||||||
#include <IO/ReadHelpers.h>
|
|
||||||
#include <IO/WriteHelpers.h>
|
|
||||||
|
|
||||||
#include <base/range.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
struct Settings;
|
|
||||||
|
|
||||||
template <typename T = UInt64>
|
|
||||||
class AggregateFunctionCategoricalIV final : public IAggregateFunctionHelper<AggregateFunctionCategoricalIV<T>>
|
|
||||||
{
|
|
||||||
private:
|
|
||||||
size_t category_count;
|
|
||||||
|
|
||||||
public:
|
|
||||||
AggregateFunctionCategoricalIV(const DataTypes & arguments_, const Array & params_) :
|
|
||||||
IAggregateFunctionHelper<AggregateFunctionCategoricalIV<T>> {arguments_, params_},
|
|
||||||
category_count {arguments_.size() - 1}
|
|
||||||
{
|
|
||||||
// notice: argument types has been checked before
|
|
||||||
}
|
|
||||||
|
|
||||||
String getName() const override
|
|
||||||
{
|
|
||||||
return "categoricalInformationValue";
|
|
||||||
}
|
|
||||||
|
|
||||||
bool allocatesMemoryInArena() const override { return false; }
|
|
||||||
|
|
||||||
void create(AggregateDataPtr __restrict place) const override
|
|
||||||
{
|
|
||||||
memset(place, 0, sizeOfData());
|
|
||||||
}
|
|
||||||
|
|
||||||
void destroy(AggregateDataPtr __restrict) const noexcept override
|
|
||||||
{
|
|
||||||
// nothing
|
|
||||||
}
|
|
||||||
|
|
||||||
bool hasTrivialDestructor() const override
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t sizeOfData() const override
|
|
||||||
{
|
|
||||||
return sizeof(T) * (category_count + 1) * 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t alignOfData() const override
|
|
||||||
{
|
|
||||||
return alignof(T);
|
|
||||||
}
|
|
||||||
|
|
||||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
|
||||||
{
|
|
||||||
const auto * y_col = static_cast<const ColumnUInt8 *>(columns[category_count]);
|
|
||||||
bool y = y_col->getData()[row_num];
|
|
||||||
|
|
||||||
for (size_t i : collections::range(0, category_count))
|
|
||||||
{
|
|
||||||
const auto * x_col = static_cast<const ColumnUInt8 *>(columns[i]);
|
|
||||||
bool x = x_col->getData()[row_num];
|
|
||||||
|
|
||||||
if (x)
|
|
||||||
reinterpret_cast<T *>(place)[i * 2 + size_t(y)] += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
reinterpret_cast<T *>(place)[category_count * 2 + size_t(y)] += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
|
||||||
{
|
|
||||||
for (size_t i : collections::range(0, category_count + 1))
|
|
||||||
{
|
|
||||||
reinterpret_cast<T *>(place)[i * 2] += reinterpret_cast<const T *>(rhs)[i * 2];
|
|
||||||
reinterpret_cast<T *>(place)[i * 2 + 1] += reinterpret_cast<const T *>(rhs)[i * 2 + 1];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
|
||||||
{
|
|
||||||
buf.write(place, sizeOfData());
|
|
||||||
}
|
|
||||||
|
|
||||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
|
||||||
{
|
|
||||||
buf.read(place, sizeOfData());
|
|
||||||
}
|
|
||||||
|
|
||||||
DataTypePtr getReturnType() const override
|
|
||||||
{
|
|
||||||
return std::make_shared<DataTypeArray>(
|
|
||||||
std::make_shared<DataTypeNumber<Float64>>()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override /// NOLINT
|
|
||||||
{
|
|
||||||
auto & col = static_cast<ColumnArray &>(to);
|
|
||||||
auto & data_col = static_cast<ColumnFloat64 &>(col.getData());
|
|
||||||
auto & offset_col = static_cast<ColumnArray::ColumnOffsets &>(
|
|
||||||
col.getOffsetsColumn()
|
|
||||||
);
|
|
||||||
|
|
||||||
data_col.reserve(data_col.size() + category_count);
|
|
||||||
|
|
||||||
T sum_no = reinterpret_cast<const T *>(place)[category_count * 2];
|
|
||||||
T sum_yes = reinterpret_cast<const T *>(place)[category_count * 2 + 1];
|
|
||||||
|
|
||||||
Float64 rev_no = 1. / sum_no;
|
|
||||||
Float64 rev_yes = 1. / sum_yes;
|
|
||||||
|
|
||||||
for (size_t i : collections::range(0, category_count))
|
|
||||||
{
|
|
||||||
T no = reinterpret_cast<const T *>(place)[i * 2];
|
|
||||||
T yes = reinterpret_cast<const T *>(place)[i * 2 + 1];
|
|
||||||
|
|
||||||
data_col.insertValue((no * rev_no - yes * rev_yes) * (log(no * rev_no) - log(yes * rev_yes)));
|
|
||||||
}
|
|
||||||
|
|
||||||
offset_col.insertValue(data_col.size());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
@ -492,7 +492,7 @@ public:
|
|||||||
void insertResultInto(IColumn & to) const
|
void insertResultInto(IColumn & to) const
|
||||||
{
|
{
|
||||||
if (has())
|
if (has())
|
||||||
assert_cast<ColumnString &>(to).insertDataWithTerminatingZero(getData(), size);
|
assert_cast<ColumnString &>(to).insertData(getData(), size);
|
||||||
else
|
else
|
||||||
assert_cast<ColumnString &>(to).insertDefault();
|
assert_cast<ColumnString &>(to).insertDefault();
|
||||||
}
|
}
|
||||||
@ -569,7 +569,7 @@ public:
|
|||||||
|
|
||||||
void change(const IColumn & column, size_t row_num, Arena * arena)
|
void change(const IColumn & column, size_t row_num, Arena * arena)
|
||||||
{
|
{
|
||||||
changeImpl(assert_cast<const ColumnString &>(column).getDataAtWithTerminatingZero(row_num), arena);
|
changeImpl(assert_cast<const ColumnString &>(column).getDataAt(row_num), arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
void change(const Self & to, Arena * arena)
|
void change(const Self & to, Arena * arena)
|
||||||
@ -618,7 +618,7 @@ public:
|
|||||||
|
|
||||||
bool changeIfLess(const IColumn & column, size_t row_num, Arena * arena)
|
bool changeIfLess(const IColumn & column, size_t row_num, Arena * arena)
|
||||||
{
|
{
|
||||||
if (!has() || assert_cast<const ColumnString &>(column).getDataAtWithTerminatingZero(row_num) < getStringRef())
|
if (!has() || assert_cast<const ColumnString &>(column).getDataAt(row_num) < getStringRef())
|
||||||
{
|
{
|
||||||
change(column, row_num, arena);
|
change(column, row_num, arena);
|
||||||
return true;
|
return true;
|
||||||
@ -640,7 +640,7 @@ public:
|
|||||||
|
|
||||||
bool changeIfGreater(const IColumn & column, size_t row_num, Arena * arena)
|
bool changeIfGreater(const IColumn & column, size_t row_num, Arena * arena)
|
||||||
{
|
{
|
||||||
if (!has() || assert_cast<const ColumnString &>(column).getDataAtWithTerminatingZero(row_num) > getStringRef())
|
if (!has() || assert_cast<const ColumnString &>(column).getDataAt(row_num) > getStringRef())
|
||||||
{
|
{
|
||||||
change(column, row_num, arena);
|
change(column, row_num, arena);
|
||||||
return true;
|
return true;
|
||||||
@ -667,7 +667,7 @@ public:
|
|||||||
|
|
||||||
bool isEqualTo(const IColumn & column, size_t row_num) const
|
bool isEqualTo(const IColumn & column, size_t row_num) const
|
||||||
{
|
{
|
||||||
return has() && assert_cast<const ColumnString &>(column).getDataAtWithTerminatingZero(row_num) == getStringRef();
|
return has() && assert_cast<const ColumnString &>(column).getDataAt(row_num) == getStringRef();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool allocatesMemoryInArena()
|
static bool allocatesMemoryInArena()
|
||||||
|
@ -18,11 +18,6 @@
|
|||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
|
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Warray-bounds"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
struct Settings;
|
struct Settings;
|
||||||
@ -291,7 +286,3 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
@ -165,11 +165,6 @@ public:
|
|||||||
sorted = false;
|
sorted = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wclass-memaccess"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void write(DB::WriteBuffer & buf) const
|
void write(DB::WriteBuffer & buf) const
|
||||||
{
|
{
|
||||||
size_t size = samples.size();
|
size_t size = samples.size();
|
||||||
@ -193,10 +188,6 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// We allocate some memory on the stack to avoid allocations when there are many objects with a small number of elements.
|
/// We allocate some memory on the stack to avoid allocations when there are many objects with a small number of elements.
|
||||||
using Element = std::pair<T, UInt32>;
|
using Element = std::pair<T, UInt32>;
|
||||||
|
@ -187,9 +187,8 @@ void Suggest::fillWordsFromBlock(const Block & block)
|
|||||||
Words new_words;
|
Words new_words;
|
||||||
new_words.reserve(rows);
|
new_words.reserve(rows);
|
||||||
for (size_t i = 0; i < rows; ++i)
|
for (size_t i = 0; i < rows; ++i)
|
||||||
{
|
new_words.emplace_back(column[i].get<String>());
|
||||||
new_words.emplace_back(column.getDataAt(i).toString());
|
|
||||||
}
|
|
||||||
addWords(std::move(new_words));
|
addWords(std::move(new_words));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,23 +151,24 @@ void ColumnArray::get(size_t n, Field & res) const
|
|||||||
|
|
||||||
StringRef ColumnArray::getDataAt(size_t n) const
|
StringRef ColumnArray::getDataAt(size_t n) const
|
||||||
{
|
{
|
||||||
|
assert(n < size());
|
||||||
|
|
||||||
/** Returns the range of memory that covers all elements of the array.
|
/** Returns the range of memory that covers all elements of the array.
|
||||||
* Works for arrays of fixed length values.
|
* Works for arrays of fixed length values.
|
||||||
* For arrays of strings and arrays of arrays, the resulting chunk of memory may not be one-to-one correspondence with the elements,
|
|
||||||
* since it contains only the data laid in succession, but not the offsets.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
size_t offset_of_first_elem = offsetAt(n);
|
/// We are using pointer arithmetic on the addresses of the array elements.
|
||||||
StringRef first = getData().getDataAtWithTerminatingZero(offset_of_first_elem);
|
if (!data->isFixedAndContiguous())
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getDataAt is not supported for {}", getName());
|
||||||
|
|
||||||
size_t array_size = sizeAt(n);
|
size_t array_size = sizeAt(n);
|
||||||
if (array_size == 0)
|
if (array_size == 0)
|
||||||
return StringRef(first.data, 0);
|
return StringRef(nullptr, 0);
|
||||||
|
|
||||||
size_t offset_of_last_elem = getOffsets()[n] - 1;
|
size_t offset_of_first_elem = offsetAt(n);
|
||||||
StringRef last = getData().getDataAtWithTerminatingZero(offset_of_last_elem);
|
StringRef first = getData().getDataAt(offset_of_first_elem);
|
||||||
|
|
||||||
return StringRef(first.data, last.data + last.size - first.data);
|
return StringRef(first.data, first.size * array_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -183,7 +184,7 @@ void ColumnArray::insertData(const char * pos, size_t length)
|
|||||||
/** Similarly - only for arrays of fixed length values.
|
/** Similarly - only for arrays of fixed length values.
|
||||||
*/
|
*/
|
||||||
if (!data->isFixedAndContiguous())
|
if (!data->isFixedAndContiguous())
|
||||||
throw Exception("Method insertData is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method insertData is not supported for {}", getName());
|
||||||
|
|
||||||
size_t field_size = data->sizeOfValueIfFixed();
|
size_t field_size = data->sizeOfValueIfFixed();
|
||||||
|
|
||||||
|
@ -81,11 +81,6 @@ public:
|
|||||||
return data->getDataAt(0);
|
return data->getDataAt(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
StringRef getDataAtWithTerminatingZero(size_t) const override
|
|
||||||
{
|
|
||||||
return data->getDataAtWithTerminatingZero(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
UInt64 get64(size_t) const override
|
UInt64 get64(size_t) const override
|
||||||
{
|
{
|
||||||
return data->get64(0);
|
return data->get64(0);
|
||||||
|
@ -59,10 +59,6 @@ public:
|
|||||||
void get(size_t n, Field & res) const override { getDictionary().get(getIndexes().getUInt(n), res); }
|
void get(size_t n, Field & res) const override { getDictionary().get(getIndexes().getUInt(n), res); }
|
||||||
|
|
||||||
StringRef getDataAt(size_t n) const override { return getDictionary().getDataAt(getIndexes().getUInt(n)); }
|
StringRef getDataAt(size_t n) const override { return getDictionary().getDataAt(getIndexes().getUInt(n)); }
|
||||||
StringRef getDataAtWithTerminatingZero(size_t n) const override
|
|
||||||
{
|
|
||||||
return getDictionary().getDataAtWithTerminatingZero(getIndexes().getUInt(n));
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isDefaultAt(size_t n) const override { return getDictionary().isDefaultAt(getIndexes().getUInt(n)); }
|
bool isDefaultAt(size_t n) const override { return getDictionary().isDefaultAt(getIndexes().getUInt(n)); }
|
||||||
UInt64 get64(size_t n) const override { return getDictionary().get64(getIndexes().getUInt(n)); }
|
UInt64 get64(size_t n) const override { return getDictionary().get64(getIndexes().getUInt(n)); }
|
||||||
|
@ -108,24 +108,12 @@ public:
|
|||||||
return StringRef(&chars[offsetAt(n)], sizeAt(n) - 1);
|
return StringRef(&chars[offsetAt(n)], sizeAt(n) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
StringRef getDataAtWithTerminatingZero(size_t n) const override
|
|
||||||
{
|
|
||||||
assert(n < size());
|
|
||||||
return StringRef(&chars[offsetAt(n)], sizeAt(n));
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isDefaultAt(size_t n) const override
|
bool isDefaultAt(size_t n) const override
|
||||||
{
|
{
|
||||||
assert(n < size());
|
assert(n < size());
|
||||||
return sizeAt(n) == 1;
|
return sizeAt(n) == 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Suppress gcc 7.3.1 warning: '*((void*)&<anonymous> +8)' may be used uninitialized in this function
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void insert(const Field & x) override
|
void insert(const Field & x) override
|
||||||
{
|
{
|
||||||
const String & s = x.get<const String &>();
|
const String & s = x.get<const String &>();
|
||||||
@ -138,10 +126,6 @@ public:
|
|||||||
offsets.push_back(new_size);
|
offsets.push_back(new_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void insertFrom(const IColumn & src_, size_t n) override
|
void insertFrom(const IColumn & src_, size_t n) override
|
||||||
{
|
{
|
||||||
const ColumnString & src = assert_cast<const ColumnString &>(src_);
|
const ColumnString & src = assert_cast<const ColumnString &>(src_);
|
||||||
@ -177,17 +161,6 @@ public:
|
|||||||
offsets.push_back(new_size);
|
offsets.push_back(new_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Like getData, but inserting data should be zero-ending (i.e. length is 1 byte greater than real string size).
|
|
||||||
void insertDataWithTerminatingZero(const char * pos, size_t length)
|
|
||||||
{
|
|
||||||
const size_t old_size = chars.size();
|
|
||||||
const size_t new_size = old_size + length;
|
|
||||||
|
|
||||||
chars.resize(new_size);
|
|
||||||
memcpy(chars.data() + old_size, pos, length);
|
|
||||||
offsets.push_back(new_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void popBack(size_t n) override
|
void popBack(size_t n) override
|
||||||
{
|
{
|
||||||
size_t nested_n = offsets.back() - offsetAt(offsets.size() - n);
|
size_t nested_n = offsets.back() - offsetAt(offsets.size() - n);
|
||||||
|
@ -70,10 +70,6 @@ public:
|
|||||||
void get(size_t n, Field & res) const override { getNestedColumn()->get(n, res); }
|
void get(size_t n, Field & res) const override { getNestedColumn()->get(n, res); }
|
||||||
bool isDefaultAt(size_t n) const override { return n == 0; }
|
bool isDefaultAt(size_t n) const override { return n == 0; }
|
||||||
StringRef getDataAt(size_t n) const override { return getNestedColumn()->getDataAt(n); }
|
StringRef getDataAt(size_t n) const override { return getNestedColumn()->getDataAt(n); }
|
||||||
StringRef getDataAtWithTerminatingZero(size_t n) const override
|
|
||||||
{
|
|
||||||
return getNestedColumn()->getDataAtWithTerminatingZero(n);
|
|
||||||
}
|
|
||||||
UInt64 get64(size_t n) const override { return getNestedColumn()->get64(n); }
|
UInt64 get64(size_t n) const override { return getNestedColumn()->get64(n); }
|
||||||
UInt64 getUInt(size_t n) const override { return getNestedColumn()->getUInt(n); }
|
UInt64 getUInt(size_t n) const override { return getNestedColumn()->getUInt(n); }
|
||||||
Int64 getInt(size_t n) const override { return getNestedColumn()->getInt(n); }
|
Int64 getInt(size_t n) const override { return getNestedColumn()->getInt(n); }
|
||||||
|
@ -106,13 +106,6 @@ public:
|
|||||||
/// Is used to optimize some computations (in aggregation, for example).
|
/// Is used to optimize some computations (in aggregation, for example).
|
||||||
[[nodiscard]] virtual StringRef getDataAt(size_t n) const = 0;
|
[[nodiscard]] virtual StringRef getDataAt(size_t n) const = 0;
|
||||||
|
|
||||||
/// Like getData, but has special behavior for columns that contain variable-length strings.
|
|
||||||
/// Returns zero-ending memory chunk (i.e. its size is 1 byte longer).
|
|
||||||
[[nodiscard]] virtual StringRef getDataAtWithTerminatingZero(size_t n) const
|
|
||||||
{
|
|
||||||
return getDataAt(n);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// If column stores integers, it returns n-th element transformed to UInt64 using static_cast.
|
/// If column stores integers, it returns n-th element transformed to UInt64 using static_cast.
|
||||||
/// If column stores floating point numbers, bits of n-th elements are copied to lower bits of UInt64, the remaining bits are zeros.
|
/// If column stores floating point numbers, bits of n-th elements are copied to lower bits of UInt64, the remaining bits are zeros.
|
||||||
/// Is used to optimize some computations (in aggregation, for example).
|
/// Is used to optimize some computations (in aggregation, for example).
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
#include <Common/FieldVisitorsAccurateComparison.h>
|
#include <Common/FieldVisitorsAccurateComparison.h>
|
||||||
#include <DataTypes/getLeastSupertype.h>
|
#include <DataTypes/getLeastSupertype.h>
|
||||||
|
#include <DataTypes/ObjectUtils.h>
|
||||||
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
#include <Interpreters/castColumn.h>
|
#include <Interpreters/castColumn.h>
|
||||||
#include <Interpreters/convertFieldToType.h>
|
#include <Interpreters/convertFieldToType.h>
|
||||||
#include <Columns/ColumnObject.h>
|
#include <Columns/ColumnObject.h>
|
||||||
|
#include <Columns/ColumnTuple.h>
|
||||||
#include <Common/FieldVisitorToString.h>
|
#include <Common/FieldVisitorToString.h>
|
||||||
|
|
||||||
#include <Common/randomSeed.h>
|
#include <Common/randomSeed.h>
|
||||||
@ -118,3 +121,36 @@ TEST(ColumnObject, InsertRangeFrom)
|
|||||||
checkFieldsAreEqual(subcolumn_dst, fields_dst);
|
checkFieldsAreEqual(subcolumn_dst, fields_dst);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(ColumnObject, Unflatten)
|
||||||
|
{
|
||||||
|
auto check_empty_tuple = [](const auto & type, const auto & column)
|
||||||
|
{
|
||||||
|
const auto & type_tuple = assert_cast<const DataTypeTuple &>(*type);
|
||||||
|
const auto & column_tuple = assert_cast<const ColumnTuple &>(*column);
|
||||||
|
|
||||||
|
ASSERT_EQ(type_tuple.getElements().size(), 1);
|
||||||
|
ASSERT_EQ(type_tuple.getElements()[0]->getName(), "UInt8");
|
||||||
|
ASSERT_EQ(type_tuple.getElementNames()[0], ColumnObject::COLUMN_NAME_DUMMY);
|
||||||
|
|
||||||
|
ASSERT_EQ(column_tuple.getColumns().size(), 1);
|
||||||
|
ASSERT_EQ(column_tuple.getColumns()[0]->getName(), "UInt8");
|
||||||
|
};
|
||||||
|
|
||||||
|
{
|
||||||
|
auto column_object = ColumnObject::create(false);
|
||||||
|
auto [column, type] = unflattenObjectToTuple(*column_object);
|
||||||
|
|
||||||
|
check_empty_tuple(type, column);
|
||||||
|
ASSERT_EQ(column->size(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto column_object = ColumnObject::create(false);
|
||||||
|
column_object->insertManyDefaults(5);
|
||||||
|
auto [column, type] = unflattenObjectToTuple(*column_object);
|
||||||
|
|
||||||
|
check_empty_tuple(type, column);
|
||||||
|
ASSERT_EQ(column->size(), 5);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -281,14 +281,6 @@ private:
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
/** When using AllocatorWithStackMemory, located on the stack,
|
|
||||||
* GCC 4.9 mistakenly assumes that we can call `free` from a pointer to the stack.
|
|
||||||
* In fact, the combination of conditions inside AllocatorWithStackMemory does not allow this.
|
|
||||||
*/
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wfree-nonheap-object"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/** Allocator with optimization to place small memory ranges in automatic memory.
|
/** Allocator with optimization to place small memory ranges in automatic memory.
|
||||||
*/
|
*/
|
||||||
@ -366,7 +358,3 @@ extern template class Allocator<false, false>;
|
|||||||
extern template class Allocator<true, false>;
|
extern template class Allocator<true, false>;
|
||||||
extern template class Allocator<false, true>;
|
extern template class Allocator<false, true>;
|
||||||
extern template class Allocator<true, true>;
|
extern template class Allocator<true, true>;
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
@ -29,13 +29,6 @@
|
|||||||
#define DATE_LUT_ADD ((1970 - DATE_LUT_MIN_YEAR) * 366L * 86400)
|
#define DATE_LUT_ADD ((1970 - DATE_LUT_MIN_YEAR) * 366L * 86400)
|
||||||
|
|
||||||
|
|
||||||
#if defined(__PPC__)
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
/// Flags for toYearWeek() function.
|
/// Flags for toYearWeek() function.
|
||||||
enum class WeekModeFlag : UInt8
|
enum class WeekModeFlag : UInt8
|
||||||
{
|
{
|
||||||
@ -1445,9 +1438,3 @@ public:
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(__PPC__)
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
@ -225,9 +225,7 @@ public:
|
|||||||
void clear() { c_end = c_start; }
|
void clear() { c_end = c_start; }
|
||||||
|
|
||||||
template <typename ... TAllocatorParams>
|
template <typename ... TAllocatorParams>
|
||||||
#if defined(__clang__)
|
|
||||||
ALWAYS_INLINE /// Better performance in clang build, worse performance in gcc build.
|
ALWAYS_INLINE /// Better performance in clang build, worse performance in gcc build.
|
||||||
#endif
|
|
||||||
void reserve(size_t n, TAllocatorParams &&... allocator_params)
|
void reserve(size_t n, TAllocatorParams &&... allocator_params)
|
||||||
{
|
{
|
||||||
if (n > capacity())
|
if (n > capacity())
|
||||||
|
@ -1,9 +1,3 @@
|
|||||||
/// Bug in GCC: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59124
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Warray-bounds"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <Common/CompactArray.h>
|
#include <Common/CompactArray.h>
|
||||||
#include <IO/WriteBufferFromFile.h>
|
#include <IO/WriteBufferFromFile.h>
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
@ -262,7 +256,3 @@ int main()
|
|||||||
runTests();
|
runTests();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
@ -69,11 +69,6 @@ static void aggregate1(Map & map, Source::const_iterator begin, Source::const_it
|
|||||||
++map[*it];
|
++map[*it];
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void aggregate12(Map & map, Source::const_iterator begin, Source::const_iterator end)
|
static void aggregate12(Map & map, Source::const_iterator begin, Source::const_iterator end)
|
||||||
{
|
{
|
||||||
Map::LookupResult found = nullptr;
|
Map::LookupResult found = nullptr;
|
||||||
@ -122,10 +117,6 @@ static void aggregate22(MapTwoLevel & map, Source::const_iterator begin, Source:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void merge2(MapTwoLevel * maps, size_t num_threads, size_t bucket)
|
static void merge2(MapTwoLevel * maps, size_t num_threads, size_t bucket)
|
||||||
{
|
{
|
||||||
for (size_t i = 1; i < num_threads; ++i)
|
for (size_t i = 1; i < num_threads; ++i)
|
||||||
|
@ -62,11 +62,6 @@ struct AggregateIndependent
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
template <typename Map>
|
template <typename Map>
|
||||||
struct AggregateIndependentWithSequentialKeysOptimization
|
struct AggregateIndependentWithSequentialKeysOptimization
|
||||||
{
|
{
|
||||||
@ -115,11 +110,6 @@ struct AggregateIndependentWithSequentialKeysOptimization
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
template <typename Map>
|
template <typename Map>
|
||||||
struct MergeSequential
|
struct MergeSequential
|
||||||
{
|
{
|
||||||
@ -265,20 +255,11 @@ struct Creator
|
|||||||
void operator()(Value &) const {}
|
void operator()(Value &) const {}
|
||||||
};
|
};
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct Updater
|
struct Updater
|
||||||
{
|
{
|
||||||
void operator()(Value & x) const { ++x; }
|
void operator()(Value & x) const { ++x; }
|
||||||
};
|
};
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct Merger
|
struct Merger
|
||||||
{
|
{
|
||||||
void operator()(Value & dst, const Value & src) const { dst += src; }
|
void operator()(Value & dst, const Value & src) const { dst += src; }
|
||||||
|
@ -189,9 +189,6 @@ KeeperConfigurationAndSettings::loadFromConfig(const Poco::Util::AbstractConfigu
|
|||||||
|
|
||||||
ret->coordination_settings->loadFromConfig("keeper_server.coordination_settings", config);
|
ret->coordination_settings->loadFromConfig("keeper_server.coordination_settings", config);
|
||||||
|
|
||||||
if (ret->coordination_settings->quorum_reads)
|
|
||||||
LOG_WARNING(&Poco::Logger::get("KeeperConfigurationAndSettings"), "Setting 'quorum_reads' is deprecated. Please use 'read_mode'");
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ struct Settings;
|
|||||||
M(Milliseconds, heart_beat_interval_ms, 500, "Heartbeat interval between quorum nodes", 0) \
|
M(Milliseconds, heart_beat_interval_ms, 500, "Heartbeat interval between quorum nodes", 0) \
|
||||||
M(Milliseconds, election_timeout_lower_bound_ms, 1000, "Lower bound of election timer (avoid too often leader elections)", 0) \
|
M(Milliseconds, election_timeout_lower_bound_ms, 1000, "Lower bound of election timer (avoid too often leader elections)", 0) \
|
||||||
M(Milliseconds, election_timeout_upper_bound_ms, 2000, "Upper bound of election timer (avoid too often leader elections)", 0) \
|
M(Milliseconds, election_timeout_upper_bound_ms, 2000, "Upper bound of election timer (avoid too often leader elections)", 0) \
|
||||||
M(Milliseconds, leadership_expiry, 0, "How often will leader node check if it still has majority. Set it lower or equal to election_timeout_lower_bound_ms to have linearizable reads.", 0) \
|
|
||||||
M(UInt64, reserved_log_items, 100000, "How many log items to store (don't remove during compaction)", 0) \
|
M(UInt64, reserved_log_items, 100000, "How many log items to store (don't remove during compaction)", 0) \
|
||||||
M(UInt64, snapshot_distance, 100000, "How many log items we have to collect to write new snapshot", 0) \
|
M(UInt64, snapshot_distance, 100000, "How many log items we have to collect to write new snapshot", 0) \
|
||||||
M(Bool, auto_forwarding, true, "Allow to forward write requests from followers to leader", 0) \
|
M(Bool, auto_forwarding, true, "Allow to forward write requests from followers to leader", 0) \
|
||||||
@ -39,12 +38,11 @@ struct Settings;
|
|||||||
M(UInt64, stale_log_gap, 10000, "When node became stale and should receive snapshots from leader", 0) \
|
M(UInt64, stale_log_gap, 10000, "When node became stale and should receive snapshots from leader", 0) \
|
||||||
M(UInt64, fresh_log_gap, 200, "When node became fresh", 0) \
|
M(UInt64, fresh_log_gap, 200, "When node became fresh", 0) \
|
||||||
M(UInt64, max_requests_batch_size, 100, "Max size of batch in requests count before it will be sent to RAFT", 0) \
|
M(UInt64, max_requests_batch_size, 100, "Max size of batch in requests count before it will be sent to RAFT", 0) \
|
||||||
M(Bool, quorum_reads, false, "Deprecated - use read_mode. Execute read requests as writes through whole RAFT consesus with similar speed", 0) \
|
M(Bool, quorum_reads, false, "Execute read requests as writes through whole RAFT consesus with similar speed", 0) \
|
||||||
M(Bool, force_sync, true, "Call fsync on each change in RAFT changelog", 0) \
|
M(Bool, force_sync, true, "Call fsync on each change in RAFT changelog", 0) \
|
||||||
M(Bool, compress_logs, true, "Write compressed coordination logs in ZSTD format", 0) \
|
M(Bool, compress_logs, true, "Write compressed coordination logs in ZSTD format", 0) \
|
||||||
M(Bool, compress_snapshots_with_zstd_format, true, "Write compressed snapshots in ZSTD format (instead of custom LZ4)", 0) \
|
M(Bool, compress_snapshots_with_zstd_format, true, "Write compressed snapshots in ZSTD format (instead of custom LZ4)", 0) \
|
||||||
M(UInt64, configuration_change_tries_count, 20, "How many times we will try to apply configuration change (add/remove server) to the cluster", 0) \
|
M(UInt64, configuration_change_tries_count, 20, "How many times we will try to apply configuration change (add/remove server) to the cluster", 0)
|
||||||
M(String, read_mode, "nonlinear", "How should reads be processed. Valid values: 'nonlinear', 'fastlinear', 'quorum'. 'nonlinear' is the fastest option because there are no consistency requirements", 0)
|
|
||||||
|
|
||||||
DECLARE_SETTINGS_TRAITS(CoordinationSettingsTraits, LIST_OF_COORDINATION_SETTINGS)
|
DECLARE_SETTINGS_TRAITS(CoordinationSettingsTraits, LIST_OF_COORDINATION_SETTINGS)
|
||||||
|
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#include <Coordination/KeeperDispatcher.h>
|
#include <Coordination/KeeperDispatcher.h>
|
||||||
#include <libnuraft/async.hxx>
|
|
||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
#include <Common/ZooKeeper/KeeperException.h>
|
#include <Common/ZooKeeper/KeeperException.h>
|
||||||
#include <future>
|
#include <future>
|
||||||
@ -7,8 +6,6 @@
|
|||||||
#include <Poco/Path.h>
|
#include <Poco/Path.h>
|
||||||
#include <Common/hex.h>
|
#include <Common/hex.h>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <iterator>
|
|
||||||
#include <limits>
|
|
||||||
#include <Common/checkStackSize.h>
|
#include <Common/checkStackSize.h>
|
||||||
#include <Common/CurrentMetrics.h>
|
#include <Common/CurrentMetrics.h>
|
||||||
|
|
||||||
@ -33,83 +30,22 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
KeeperDispatcher::KeeperDispatcher()
|
KeeperDispatcher::KeeperDispatcher()
|
||||||
: responses_queue(std::numeric_limits<size_t>::max())
|
: responses_queue(std::numeric_limits<size_t>::max())
|
||||||
, read_requests_queue(std::numeric_limits<size_t>::max())
|
|
||||||
, finalize_requests_queue(std::numeric_limits<size_t>::max())
|
|
||||||
, configuration_and_settings(std::make_shared<KeeperConfigurationAndSettings>())
|
, configuration_and_settings(std::make_shared<KeeperConfigurationAndSettings>())
|
||||||
, log(&Poco::Logger::get("KeeperDispatcher"))
|
, log(&Poco::Logger::get("KeeperDispatcher"))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// ZooKeepers has 2 requirements:
|
|
||||||
/// - writes need to be linearizable
|
|
||||||
/// - all requests from single session need to be processed in the order of their arrival
|
|
||||||
///
|
|
||||||
/// Because of that, we cannot process read and write requests from SAME session at the same time.
|
|
||||||
/// To be able to process read and write requests in parallel we need to make sure that only 1 type
|
|
||||||
/// of request is being processed from a single session.
|
|
||||||
/// Multiple types from different sessions can be processed at the same time.
|
|
||||||
///
|
|
||||||
/// We do some in-session housekeeping to make sure that the multithreaded request processing is correct.
|
|
||||||
/// When a request is received from a client, we check if there are requests being processed from that same
|
|
||||||
/// session, and if yes, of what type. If the types are the same, and there are no requests of different
|
|
||||||
/// type inbetetween, we can instanly add it to active request queue. Otherwise, we need to wait until
|
|
||||||
/// all requests of the other type are processed.
|
|
||||||
///
|
|
||||||
/// There are multiple threads used for processing the request, each of them communicating with a queue.
|
|
||||||
/// Assumption: only one type of request is being processed from a same session at any point in time (read or write).
|
|
||||||
///
|
|
||||||
/// requestThread -> requests currently being processed
|
|
||||||
/// readRequestThread -> thread for processing read requests
|
|
||||||
/// finalizeRequestThread -> thread for finalizing requests:
|
|
||||||
/// - in-session housekeeping, add requests to the active request queue if there are any
|
|
||||||
///
|
|
||||||
/// If reads are linearizable without quorum, a request can possibly wait for a certain log to be committed.
|
|
||||||
/// In that case we add it to the waiting queue for that log.
|
|
||||||
/// When that log is committed, the committing thread will send that read request to readRequestThread so it can be processed.
|
|
||||||
///
|
|
||||||
void KeeperDispatcher::requestThread()
|
void KeeperDispatcher::requestThread()
|
||||||
{
|
{
|
||||||
setThreadName("KeeperReqT");
|
setThreadName("KeeperReqT");
|
||||||
|
|
||||||
/// Result of requests batch from previous iteration
|
/// Result of requests batch from previous iteration
|
||||||
RaftResult prev_result = nullptr;
|
RaftAppendResult prev_result = nullptr;
|
||||||
const auto previous_quorum_done = [&] { return !prev_result || prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK; };
|
/// Requests from previous iteration. We store them to be able
|
||||||
|
/// to send errors to the client.
|
||||||
|
KeeperStorage::RequestsForSessions prev_batch;
|
||||||
|
|
||||||
const auto needs_quorum = [](const auto & coordination_settings, const auto & request)
|
|
||||||
{
|
|
||||||
return coordination_settings->quorum_reads || coordination_settings->read_mode.toString() == "quorum" || !request.request->isReadRequest();
|
|
||||||
};
|
|
||||||
|
|
||||||
KeeperStorage::RequestsForSessions quorum_requests;
|
|
||||||
KeeperStorage::RequestsForSessions read_requests;
|
|
||||||
|
|
||||||
auto process_quorum_requests = [&, this]() mutable
|
|
||||||
{
|
|
||||||
/// Forcefully process all previous pending requests
|
|
||||||
if (prev_result)
|
|
||||||
forceWaitAndProcessResult(prev_result);
|
|
||||||
|
|
||||||
prev_result = server->putRequestBatch(quorum_requests);
|
|
||||||
|
|
||||||
if (prev_result)
|
|
||||||
{
|
|
||||||
prev_result->when_ready([&, requests_for_sessions = std::move(quorum_requests)](nuraft::cmd_result<nuraft::ptr<nuraft::buffer>> & result, nuraft::ptr<std::exception> &) mutable
|
|
||||||
{
|
|
||||||
if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT)
|
|
||||||
addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT);
|
|
||||||
else if (result.get_result_code() != nuraft::cmd_result_code::OK)
|
|
||||||
addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
quorum_requests.clear();
|
|
||||||
};
|
|
||||||
|
|
||||||
/// ZooKeeper requires that the requests inside a single session are processed in a strict order
|
|
||||||
/// (we cannot process later requests before all the previous once are processed)
|
|
||||||
/// By making sure that at this point we can either have just read requests or just write requests
|
|
||||||
/// from a single session, we can process them independently
|
|
||||||
while (!shutdown_called)
|
while (!shutdown_called)
|
||||||
{
|
{
|
||||||
KeeperStorage::RequestForSession request;
|
KeeperStorage::RequestForSession request;
|
||||||
@ -118,67 +54,94 @@ void KeeperDispatcher::requestThread()
|
|||||||
uint64_t max_wait = coordination_settings->operation_timeout_ms.totalMilliseconds();
|
uint64_t max_wait = coordination_settings->operation_timeout_ms.totalMilliseconds();
|
||||||
uint64_t max_batch_size = coordination_settings->max_requests_batch_size;
|
uint64_t max_batch_size = coordination_settings->max_requests_batch_size;
|
||||||
|
|
||||||
|
/// The code below do a very simple thing: batch all write (quorum) requests into vector until
|
||||||
|
/// previous write batch is not finished or max_batch size achieved. The main complexity goes from
|
||||||
|
/// the ability to process read requests without quorum (from local state). So when we are collecting
|
||||||
|
/// requests into a batch we must check that the new request is not read request. Otherwise we have to
|
||||||
|
/// process all already accumulated write requests, wait them synchronously and only after that process
|
||||||
|
/// read request. So reads are some kind of "separator" for writes.
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
if (active_requests_queue->tryPop(request, max_wait))
|
if (requests_queue->tryPop(request, max_wait))
|
||||||
{
|
{
|
||||||
CurrentMetrics::sub(CurrentMetrics::KeeperOutstandingRequets);
|
CurrentMetrics::sub(CurrentMetrics::KeeperOutstandingRequets);
|
||||||
if (shutdown_called)
|
if (shutdown_called)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (needs_quorum(coordination_settings, request))
|
KeeperStorage::RequestsForSessions current_batch;
|
||||||
quorum_requests.emplace_back(request);
|
|
||||||
else
|
bool has_read_request = false;
|
||||||
read_requests.emplace_back(request);
|
|
||||||
|
/// If new request is not read request or we must to process it through quorum.
|
||||||
|
/// Otherwise we will process it locally.
|
||||||
|
if (coordination_settings->quorum_reads || !request.request->isReadRequest())
|
||||||
|
{
|
||||||
|
current_batch.emplace_back(request);
|
||||||
|
|
||||||
/// Waiting until previous append will be successful, or batch is big enough
|
/// Waiting until previous append will be successful, or batch is big enough
|
||||||
/// has_result == false && get_result_code == OK means that our request still not processed.
|
/// has_result == false && get_result_code == OK means that our request still not processed.
|
||||||
/// Sometimes NuRaft set errorcode without setting result, so we check both here.
|
/// Sometimes NuRaft set errorcode without setting result, so we check both here.
|
||||||
while (true)
|
while (prev_result && (!prev_result->has_result() && prev_result->get_result_code() == nuraft::cmd_result_code::OK) && current_batch.size() <= max_batch_size)
|
||||||
{
|
{
|
||||||
if (quorum_requests.size() > max_batch_size)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (read_requests.size() > max_batch_size)
|
|
||||||
{
|
|
||||||
processReadRequests(coordination_settings, read_requests);
|
|
||||||
|
|
||||||
if (previous_quorum_done())
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trying to get batch requests as fast as possible
|
/// Trying to get batch requests as fast as possible
|
||||||
if (active_requests_queue->tryPop(request, 1))
|
if (requests_queue->tryPop(request, 1))
|
||||||
{
|
{
|
||||||
CurrentMetrics::sub(CurrentMetrics::KeeperOutstandingRequets);
|
CurrentMetrics::sub(CurrentMetrics::KeeperOutstandingRequets);
|
||||||
if (needs_quorum(coordination_settings, request))
|
/// Don't append read request into batch, we have to process them separately
|
||||||
quorum_requests.emplace_back(request);
|
if (!coordination_settings->quorum_reads && request.request->isReadRequest())
|
||||||
else
|
{
|
||||||
read_requests.emplace_back(request);
|
has_read_request = true;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/// batch of read requests can send at most one request
|
|
||||||
/// so we don't care if the previous batch hasn't received response
|
|
||||||
if (!read_requests.empty())
|
|
||||||
processReadRequests(coordination_settings, read_requests);
|
|
||||||
|
|
||||||
/// if we still didn't process previous batch we can
|
current_batch.emplace_back(request);
|
||||||
/// increase are current batch even more
|
}
|
||||||
if (previous_quorum_done())
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (shutdown_called)
|
if (shutdown_called)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
has_read_request = true;
|
||||||
|
|
||||||
if (shutdown_called)
|
if (shutdown_called)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!quorum_requests.empty())
|
/// Forcefully process all previous pending requests
|
||||||
process_quorum_requests();
|
if (prev_result)
|
||||||
|
forceWaitAndProcessResult(prev_result, prev_batch);
|
||||||
|
|
||||||
|
/// Process collected write requests batch
|
||||||
|
if (!current_batch.empty())
|
||||||
|
{
|
||||||
|
auto result = server->putRequestBatch(current_batch);
|
||||||
|
|
||||||
|
if (result)
|
||||||
|
{
|
||||||
|
if (has_read_request) /// If we will execute read request next, than we have to process result now
|
||||||
|
forceWaitAndProcessResult(result, current_batch);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
addErrorResponses(current_batch, Coordination::Error::ZCONNECTIONLOSS);
|
||||||
|
current_batch.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
prev_batch = std::move(current_batch);
|
||||||
|
prev_result = result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read request always goes after write batch (last request)
|
||||||
|
if (has_read_request)
|
||||||
|
{
|
||||||
|
if (server->isLeaderAlive())
|
||||||
|
server->putLocalReadRequest(request);
|
||||||
|
else
|
||||||
|
addErrorResponses({request}, Coordination::Error::ZCONNECTIONLOSS);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
@ -188,72 +151,6 @@ void KeeperDispatcher::requestThread()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KeeperDispatcher::processReadRequests(const CoordinationSettingsPtr & coordination_settings, KeeperStorage::RequestsForSessions & read_requests)
|
|
||||||
{
|
|
||||||
if (coordination_settings->read_mode.toString() == "fastlinear")
|
|
||||||
{
|
|
||||||
// we just want to know what's the current latest committed log on Leader node
|
|
||||||
auto leader_info_result = server->getLeaderInfo();
|
|
||||||
if (leader_info_result)
|
|
||||||
{
|
|
||||||
leader_info_result->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result<nuraft::ptr<nuraft::buffer>> & result, nuraft::ptr<std::exception> & exception) mutable
|
|
||||||
{
|
|
||||||
if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT)
|
|
||||||
{
|
|
||||||
addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (result.get_result_code() != nuraft::cmd_result_code::OK)
|
|
||||||
{
|
|
||||||
addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (exception)
|
|
||||||
{
|
|
||||||
LOG_INFO(log, "Got exception while waiting for read results {}", exception->what());
|
|
||||||
addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto & leader_info_ctx = result.get();
|
|
||||||
|
|
||||||
if (!leader_info_ctx)
|
|
||||||
{
|
|
||||||
addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
KeeperServer::NodeInfo leader_info;
|
|
||||||
leader_info.term = leader_info_ctx->get_ulong();
|
|
||||||
leader_info.last_committed_index = leader_info_ctx->get_ulong();
|
|
||||||
std::lock_guard lock(leader_waiter_mutex);
|
|
||||||
auto node_info = server->getNodeInfo();
|
|
||||||
|
|
||||||
/// we're behind, we need to wait
|
|
||||||
if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index)
|
|
||||||
{
|
|
||||||
auto & leader_waiter = leader_waiters[leader_info];
|
|
||||||
leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end());
|
|
||||||
LOG_TRACE(log, "waiting for term {}, idx {}", leader_info.term, leader_info.last_committed_index);
|
|
||||||
}
|
|
||||||
/// process it in background thread
|
|
||||||
else if (!read_requests_queue.push(std::move(requests_for_sessions)))
|
|
||||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue");
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
assert(coordination_settings->read_mode.toString() == "nonlinear");
|
|
||||||
if (!read_requests_queue.push(std::move(read_requests)))
|
|
||||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue");
|
|
||||||
}
|
|
||||||
|
|
||||||
read_requests.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
void KeeperDispatcher::responseThread()
|
void KeeperDispatcher::responseThread()
|
||||||
{
|
{
|
||||||
setThreadName("KeeperRspT");
|
setThreadName("KeeperRspT");
|
||||||
@ -303,65 +200,6 @@ void KeeperDispatcher::snapshotThread()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Background thread for processing read requests
|
|
||||||
void KeeperDispatcher::readRequestThread()
|
|
||||||
{
|
|
||||||
setThreadName("KeeperReadT");
|
|
||||||
while (!shutdown_called)
|
|
||||||
{
|
|
||||||
KeeperStorage::RequestsForSessions requests;
|
|
||||||
if (!read_requests_queue.pop(requests))
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (shutdown_called)
|
|
||||||
break;
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
for (const auto & request_info : requests)
|
|
||||||
{
|
|
||||||
if (server->isLeaderAlive())
|
|
||||||
server->putLocalReadRequest(request_info);
|
|
||||||
else
|
|
||||||
addErrorResponses({request_info}, Coordination::Error::ZCONNECTIONLOSS);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!finalize_requests_queue.push(std::move(requests)))
|
|
||||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue");
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// We finalize requests every time we commit a single log with request
|
|
||||||
/// or process a batch of read requests.
|
|
||||||
/// Because it can get heavy, we do it in background thread.
|
|
||||||
void KeeperDispatcher::finalizeRequestsThread()
|
|
||||||
{
|
|
||||||
setThreadName("KeeperFinalT");
|
|
||||||
while (!shutdown_called)
|
|
||||||
{
|
|
||||||
KeeperStorage::RequestsForSessions requests;
|
|
||||||
if (!finalize_requests_queue.pop(requests))
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (shutdown_called)
|
|
||||||
break;
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
finalizeRequests(requests);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response)
|
void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(session_to_response_callback_mutex);
|
std::lock_guard lock(session_to_response_callback_mutex);
|
||||||
@ -417,30 +255,6 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ
|
|||||||
request_info.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
|
request_info.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
|
||||||
request_info.session_id = session_id;
|
request_info.session_id = session_id;
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard lock{unprocessed_request_mutex};
|
|
||||||
auto unprocessed_requests_it = unprocessed_requests_for_session.find(session_id);
|
|
||||||
if (unprocessed_requests_it == unprocessed_requests_for_session.end())
|
|
||||||
{
|
|
||||||
auto & unprocessed_requests = unprocessed_requests_for_session[session_id];
|
|
||||||
unprocessed_requests.unprocessed_num = 1;
|
|
||||||
unprocessed_requests.is_read = request->isReadRequest();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
auto & unprocessed_requests = unprocessed_requests_it->second;
|
|
||||||
|
|
||||||
/// queue is not empty, or the request types don't match, put it in the waiting queue
|
|
||||||
if (!unprocessed_requests.request_queue.empty() || unprocessed_requests.is_read != request->isReadRequest())
|
|
||||||
{
|
|
||||||
unprocessed_requests.request_queue.push_back(std::move(request_info));
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
++unprocessed_requests.unprocessed_num;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::lock_guard lock(push_request_mutex);
|
std::lock_guard lock(push_request_mutex);
|
||||||
|
|
||||||
if (shutdown_called)
|
if (shutdown_called)
|
||||||
@ -449,10 +263,10 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ
|
|||||||
/// Put close requests without timeouts
|
/// Put close requests without timeouts
|
||||||
if (request->getOpNum() == Coordination::OpNum::Close)
|
if (request->getOpNum() == Coordination::OpNum::Close)
|
||||||
{
|
{
|
||||||
if (!active_requests_queue->push(std::move(request_info)))
|
if (!requests_queue->push(std::move(request_info)))
|
||||||
throw Exception("Cannot push request to queue", ErrorCodes::SYSTEM_ERROR);
|
throw Exception("Cannot push request to queue", ErrorCodes::SYSTEM_ERROR);
|
||||||
}
|
}
|
||||||
else if (!active_requests_queue->tryPush(std::move(request_info), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds()))
|
else if (!requests_queue->tryPush(std::move(request_info), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds()))
|
||||||
{
|
{
|
||||||
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
|
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||||
}
|
}
|
||||||
@ -465,23 +279,13 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf
|
|||||||
LOG_DEBUG(log, "Initializing storage dispatcher");
|
LOG_DEBUG(log, "Initializing storage dispatcher");
|
||||||
|
|
||||||
configuration_and_settings = KeeperConfigurationAndSettings::loadFromConfig(config, standalone_keeper);
|
configuration_and_settings = KeeperConfigurationAndSettings::loadFromConfig(config, standalone_keeper);
|
||||||
active_requests_queue = std::make_unique<RequestsQueue>(configuration_and_settings->coordination_settings->max_requests_batch_size);
|
requests_queue = std::make_unique<RequestsQueue>(configuration_and_settings->coordination_settings->max_requests_batch_size);
|
||||||
|
|
||||||
request_thread = ThreadFromGlobalPool([this] { requestThread(); });
|
request_thread = ThreadFromGlobalPool([this] { requestThread(); });
|
||||||
responses_thread = ThreadFromGlobalPool([this] { responseThread(); });
|
responses_thread = ThreadFromGlobalPool([this] { responseThread(); });
|
||||||
snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); });
|
snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); });
|
||||||
read_request_thread = ThreadFromGlobalPool([this] { readRequestThread(); });
|
|
||||||
finalize_requests_thread = ThreadFromGlobalPool([this] { finalizeRequestsThread(); });
|
|
||||||
|
|
||||||
server = std::make_unique<KeeperServer>(
|
server = std::make_unique<KeeperServer>(configuration_and_settings, config, responses_queue, snapshots_queue);
|
||||||
configuration_and_settings,
|
|
||||||
config,
|
|
||||||
responses_queue,
|
|
||||||
snapshots_queue,
|
|
||||||
[this](const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx)
|
|
||||||
{ onRequestCommit(request_for_session, log_term, log_idx); },
|
|
||||||
[this](uint64_t term, uint64_t last_idx)
|
|
||||||
{ onApplySnapshot(term, last_idx); });
|
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -529,9 +333,9 @@ void KeeperDispatcher::shutdown()
|
|||||||
if (session_cleaner_thread.joinable())
|
if (session_cleaner_thread.joinable())
|
||||||
session_cleaner_thread.join();
|
session_cleaner_thread.join();
|
||||||
|
|
||||||
if (active_requests_queue)
|
if (requests_queue)
|
||||||
{
|
{
|
||||||
active_requests_queue->finish();
|
requests_queue->finish();
|
||||||
|
|
||||||
if (request_thread.joinable())
|
if (request_thread.joinable())
|
||||||
request_thread.join();
|
request_thread.join();
|
||||||
@ -545,14 +349,6 @@ void KeeperDispatcher::shutdown()
|
|||||||
if (snapshot_thread.joinable())
|
if (snapshot_thread.joinable())
|
||||||
snapshot_thread.join();
|
snapshot_thread.join();
|
||||||
|
|
||||||
read_requests_queue.finish();
|
|
||||||
if (read_request_thread.joinable())
|
|
||||||
read_request_thread.join();
|
|
||||||
|
|
||||||
finalize_requests_queue.finish();
|
|
||||||
if (finalize_requests_thread.joinable())
|
|
||||||
finalize_requests_thread.join();
|
|
||||||
|
|
||||||
update_configuration_queue.finish();
|
update_configuration_queue.finish();
|
||||||
if (update_configuration_thread.joinable())
|
if (update_configuration_thread.joinable())
|
||||||
update_configuration_thread.join();
|
update_configuration_thread.join();
|
||||||
@ -561,7 +357,7 @@ void KeeperDispatcher::shutdown()
|
|||||||
KeeperStorage::RequestForSession request_for_session;
|
KeeperStorage::RequestForSession request_for_session;
|
||||||
|
|
||||||
/// Set session expired for all pending requests
|
/// Set session expired for all pending requests
|
||||||
while (active_requests_queue && active_requests_queue->tryPop(request_for_session))
|
while (requests_queue && requests_queue->tryPop(request_for_session))
|
||||||
{
|
{
|
||||||
CurrentMetrics::sub(CurrentMetrics::KeeperOutstandingRequets);
|
CurrentMetrics::sub(CurrentMetrics::KeeperOutstandingRequets);
|
||||||
auto response = request_for_session.request->makeResponse();
|
auto response = request_for_session.request->makeResponse();
|
||||||
@ -678,7 +474,7 @@ void KeeperDispatcher::sessionCleanerTask()
|
|||||||
};
|
};
|
||||||
{
|
{
|
||||||
std::lock_guard lock(push_request_mutex);
|
std::lock_guard lock(push_request_mutex);
|
||||||
if (!active_requests_queue->push(std::move(request_info)))
|
if (!requests_queue->push(std::move(request_info)))
|
||||||
LOG_INFO(log, "Cannot push close request to queue while cleaning outdated sessions");
|
LOG_INFO(log, "Cannot push close request to queue while cleaning outdated sessions");
|
||||||
CurrentMetrics::add(CurrentMetrics::KeeperOutstandingRequets);
|
CurrentMetrics::add(CurrentMetrics::KeeperOutstandingRequets);
|
||||||
}
|
}
|
||||||
@ -728,12 +524,19 @@ void KeeperDispatcher::addErrorResponses(const KeeperStorage::RequestsForSession
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KeeperDispatcher::forceWaitAndProcessResult(RaftResult & result)
|
void KeeperDispatcher::forceWaitAndProcessResult(RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions)
|
||||||
{
|
{
|
||||||
if (!result->has_result())
|
if (!result->has_result())
|
||||||
result->get();
|
result->get();
|
||||||
|
|
||||||
|
/// If we get some errors, than send them to clients
|
||||||
|
if (!result->get_accepted() || result->get_result_code() == nuraft::cmd_result_code::TIMEOUT)
|
||||||
|
addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT);
|
||||||
|
else if (result->get_result_code() != nuraft::cmd_result_code::OK)
|
||||||
|
addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS);
|
||||||
|
|
||||||
result = nullptr;
|
result = nullptr;
|
||||||
|
requests_for_sessions.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
|
int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
|
||||||
@ -781,7 +584,7 @@ int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
|
|||||||
/// Push new session request to queue
|
/// Push new session request to queue
|
||||||
{
|
{
|
||||||
std::lock_guard lock(push_request_mutex);
|
std::lock_guard lock(push_request_mutex);
|
||||||
if (!active_requests_queue->tryPush(std::move(request_info), session_timeout_ms))
|
if (!requests_queue->tryPush(std::move(request_info), session_timeout_ms))
|
||||||
throw Exception("Cannot push session id request to queue within session timeout", ErrorCodes::TIMEOUT_EXCEEDED);
|
throw Exception("Cannot push session id request to queue within session timeout", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||||
CurrentMetrics::add(CurrentMetrics::KeeperOutstandingRequets);
|
CurrentMetrics::add(CurrentMetrics::KeeperOutstandingRequets);
|
||||||
}
|
}
|
||||||
@ -854,122 +657,6 @@ void KeeperDispatcher::updateConfigurationThread()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used to update the state for a session based on the requests
|
|
||||||
// - update the number of current unprocessed requests for the session
|
|
||||||
// - if the number of unprocessed requests is 0, we can start adding next type of requests
|
|
||||||
// from unprocessed requests queue to the active queue
|
|
||||||
void KeeperDispatcher::finalizeRequests(const KeeperStorage::RequestsForSessions & requests_for_sessions)
|
|
||||||
{
|
|
||||||
std::unordered_map<int64_t, size_t> counts_for_session;
|
|
||||||
|
|
||||||
for (const auto & request_for_session : requests_for_sessions)
|
|
||||||
{
|
|
||||||
++counts_for_session[request_for_session.session_id];
|
|
||||||
}
|
|
||||||
|
|
||||||
std::lock_guard lock{unprocessed_request_mutex};
|
|
||||||
for (const auto [session_id, count] : counts_for_session)
|
|
||||||
{
|
|
||||||
auto unprocessed_requests_it = unprocessed_requests_for_session.find(session_id);
|
|
||||||
if (unprocessed_requests_it == unprocessed_requests_for_session.end())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
auto & unprocessed_requests = unprocessed_requests_it->second;
|
|
||||||
unprocessed_requests.unprocessed_num -= count;
|
|
||||||
|
|
||||||
if (unprocessed_requests.unprocessed_num == 0)
|
|
||||||
{
|
|
||||||
if (!unprocessed_requests.request_queue.empty())
|
|
||||||
{
|
|
||||||
auto & unprocessed_requests_queue = unprocessed_requests.request_queue;
|
|
||||||
unprocessed_requests.is_read = !unprocessed_requests.is_read;
|
|
||||||
// start adding next type of requests
|
|
||||||
while (!unprocessed_requests_queue.empty() && unprocessed_requests_queue.front().request->isReadRequest() == unprocessed_requests.is_read)
|
|
||||||
{
|
|
||||||
auto & front_request = unprocessed_requests_queue.front();
|
|
||||||
|
|
||||||
/// Put close requests without timeouts
|
|
||||||
if (front_request.request->getOpNum() == Coordination::OpNum::Close)
|
|
||||||
{
|
|
||||||
if (!active_requests_queue->push(std::move(front_request)))
|
|
||||||
throw Exception("Cannot push request to queue", ErrorCodes::SYSTEM_ERROR);
|
|
||||||
}
|
|
||||||
else if (!active_requests_queue->tryPush(std::move(front_request), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds()))
|
|
||||||
{
|
|
||||||
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
|
|
||||||
}
|
|
||||||
|
|
||||||
++unprocessed_requests.unprocessed_num;
|
|
||||||
unprocessed_requests_queue.pop_front();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
unprocessed_requests_for_session.erase(unprocessed_requests_it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finalize request
|
|
||||||
// Process read requests that were waiting for this commit
|
|
||||||
void KeeperDispatcher::onRequestCommit(const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx)
|
|
||||||
{
|
|
||||||
if (!finalize_requests_queue.push({request_for_session}))
|
|
||||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue");
|
|
||||||
|
|
||||||
KeeperStorage::RequestsForSessions requests;
|
|
||||||
{
|
|
||||||
std::lock_guard lock(leader_waiter_mutex);
|
|
||||||
auto request_queue_it = leader_waiters.find(KeeperServer::NodeInfo{.term = log_term, .last_committed_index = log_idx});
|
|
||||||
if (request_queue_it != leader_waiters.end())
|
|
||||||
{
|
|
||||||
requests = std::move(request_queue_it->second);
|
|
||||||
leader_waiters.erase(request_queue_it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (requests.empty())
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!read_requests_queue.push(std::move(requests)))
|
|
||||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process all read request that are waiting for lower or currently last processed log index
|
|
||||||
void KeeperDispatcher::onApplySnapshot(uint64_t term, uint64_t last_idx)
|
|
||||||
{
|
|
||||||
KeeperServer::NodeInfo current_node_info{term, last_idx};
|
|
||||||
KeeperStorage::RequestsForSessions requests;
|
|
||||||
{
|
|
||||||
std::lock_guard lock(leader_waiter_mutex);
|
|
||||||
for (auto leader_waiter_it = leader_waiters.begin(); leader_waiter_it != leader_waiters.end();)
|
|
||||||
{
|
|
||||||
auto waiting_node_info = leader_waiter_it->first;
|
|
||||||
if (waiting_node_info.term <= current_node_info.term
|
|
||||||
&& waiting_node_info.last_committed_index <= current_node_info.last_committed_index)
|
|
||||||
{
|
|
||||||
for (auto & request : leader_waiter_it->second)
|
|
||||||
{
|
|
||||||
requests.push_back(std::move(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
leader_waiter_it = leader_waiters.erase(leader_waiter_it);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
++leader_waiter_it;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (requests.empty())
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!read_requests_queue.push(std::move(requests)))
|
|
||||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue");
|
|
||||||
}
|
|
||||||
|
|
||||||
bool KeeperDispatcher::isServerActive() const
|
bool KeeperDispatcher::isServerActive() const
|
||||||
{
|
{
|
||||||
return checkInit() && hasLeader() && !server->isRecovering();
|
return checkInit() && hasLeader() && !server->isRecovering();
|
||||||
@ -1034,7 +721,7 @@ Keeper4LWInfo KeeperDispatcher::getKeeper4LWInfo() const
|
|||||||
Keeper4LWInfo result = server->getPartiallyFilled4LWInfo();
|
Keeper4LWInfo result = server->getPartiallyFilled4LWInfo();
|
||||||
{
|
{
|
||||||
std::lock_guard lock(push_request_mutex);
|
std::lock_guard lock(push_request_mutex);
|
||||||
result.outstanding_requests_count = active_requests_queue->size();
|
result.outstanding_requests_count = requests_queue->size();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
std::lock_guard lock(session_to_response_callback_mutex);
|
std::lock_guard lock(session_to_response_callback_mutex);
|
||||||
|
@ -32,12 +32,9 @@ private:
|
|||||||
using UpdateConfigurationQueue = ConcurrentBoundedQueue<ConfigUpdateAction>;
|
using UpdateConfigurationQueue = ConcurrentBoundedQueue<ConfigUpdateAction>;
|
||||||
|
|
||||||
/// Size depends on coordination settings
|
/// Size depends on coordination settings
|
||||||
/// Request currently being processed
|
std::unique_ptr<RequestsQueue> requests_queue;
|
||||||
std::unique_ptr<RequestsQueue> active_requests_queue;
|
|
||||||
ResponsesQueue responses_queue;
|
ResponsesQueue responses_queue;
|
||||||
SnapshotsQueue snapshots_queue{1};
|
SnapshotsQueue snapshots_queue{1};
|
||||||
ConcurrentBoundedQueue<KeeperStorage::RequestsForSessions> read_requests_queue;
|
|
||||||
ConcurrentBoundedQueue<KeeperStorage::RequestsForSessions> finalize_requests_queue;
|
|
||||||
|
|
||||||
/// More than 1k updates is definitely misconfiguration.
|
/// More than 1k updates is definitely misconfiguration.
|
||||||
UpdateConfigurationQueue update_configuration_queue{1000};
|
UpdateConfigurationQueue update_configuration_queue{1000};
|
||||||
@ -67,8 +64,6 @@ private:
|
|||||||
ThreadFromGlobalPool snapshot_thread;
|
ThreadFromGlobalPool snapshot_thread;
|
||||||
/// Apply or wait for configuration changes
|
/// Apply or wait for configuration changes
|
||||||
ThreadFromGlobalPool update_configuration_thread;
|
ThreadFromGlobalPool update_configuration_thread;
|
||||||
ThreadFromGlobalPool read_request_thread;
|
|
||||||
ThreadFromGlobalPool finalize_requests_thread;
|
|
||||||
|
|
||||||
/// RAFT wrapper.
|
/// RAFT wrapper.
|
||||||
std::unique_ptr<KeeperServer> server;
|
std::unique_ptr<KeeperServer> server;
|
||||||
@ -82,34 +77,6 @@ private:
|
|||||||
/// Counter for new session_id requests.
|
/// Counter for new session_id requests.
|
||||||
std::atomic<int64_t> internal_session_id_counter{0};
|
std::atomic<int64_t> internal_session_id_counter{0};
|
||||||
|
|
||||||
/// A read request needs to have at least the log it was the last committed log on the leader
|
|
||||||
/// at the time the request was being made.
|
|
||||||
/// If the node is stale, we need to wait to commit that log before doing local read requests to achieve
|
|
||||||
/// linearizability.
|
|
||||||
std::unordered_map<KeeperServer::NodeInfo, KeeperStorage::RequestsForSessions> leader_waiters;
|
|
||||||
std::mutex leader_waiter_mutex;
|
|
||||||
|
|
||||||
/// We can be actively processing one type of requests (either read or write) from a single session.
|
|
||||||
/// If we receive a request of a type that is not currently being processed, we put it in the waiting queue.
|
|
||||||
/// Also, we want to process them in ariving order, so if we have a different type in the queue, we cannot process that request
|
|
||||||
/// but wait for all the previous requests to finish.
|
|
||||||
/// E.g. READ -> WRITE -> READ, the last READ will go to the waiting queue even though we are currently processing the first READ
|
|
||||||
/// because we have WRITE request before it that needs to be processed.
|
|
||||||
struct UnprocessedRequests
|
|
||||||
{
|
|
||||||
/// how many requests are currently in the active request queue
|
|
||||||
size_t unprocessed_num{0};
|
|
||||||
/// is_read currently being processed
|
|
||||||
bool is_read{false};
|
|
||||||
std::list<KeeperStorage::RequestForSession> request_queue;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Called every time a batch of requests are processed.
|
|
||||||
void finalizeRequests(const KeeperStorage::RequestsForSessions & requests_for_sessions);
|
|
||||||
|
|
||||||
std::unordered_map<int64_t, UnprocessedRequests> unprocessed_requests_for_session;
|
|
||||||
std::mutex unprocessed_request_mutex;
|
|
||||||
|
|
||||||
/// Thread put requests to raft
|
/// Thread put requests to raft
|
||||||
void requestThread();
|
void requestThread();
|
||||||
/// Thread put responses for subscribed sessions
|
/// Thread put responses for subscribed sessions
|
||||||
@ -121,12 +88,6 @@ private:
|
|||||||
/// Thread apply or wait configuration changes from leader
|
/// Thread apply or wait configuration changes from leader
|
||||||
void updateConfigurationThread();
|
void updateConfigurationThread();
|
||||||
|
|
||||||
void readRequestThread();
|
|
||||||
|
|
||||||
void finalizeRequestsThread();
|
|
||||||
|
|
||||||
void processReadRequests(const CoordinationSettingsPtr & coordination_settings, KeeperStorage::RequestsForSessions & read_requests);
|
|
||||||
|
|
||||||
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response);
|
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response);
|
||||||
|
|
||||||
/// Add error responses for requests to responses queue.
|
/// Add error responses for requests to responses queue.
|
||||||
@ -135,7 +96,7 @@ private:
|
|||||||
|
|
||||||
/// Forcefully wait for result and sets errors if something when wrong.
|
/// Forcefully wait for result and sets errors if something when wrong.
|
||||||
/// Clears both arguments
|
/// Clears both arguments
|
||||||
static void forceWaitAndProcessResult(RaftResult & result);
|
void forceWaitAndProcessResult(RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/// Just allocate some objects, real initialization is done by `intialize method`
|
/// Just allocate some objects, real initialization is done by `intialize method`
|
||||||
@ -155,12 +116,6 @@ public:
|
|||||||
return server && server->checkInit();
|
return server && server->checkInit();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Called when a single log with request is committed.
|
|
||||||
void onRequestCommit(const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx);
|
|
||||||
|
|
||||||
/// Called when a snapshot is applied
|
|
||||||
void onApplySnapshot(uint64_t term, uint64_t last_idx);
|
|
||||||
|
|
||||||
/// Is server accepting requests, i.e. connected to the cluster
|
/// Is server accepting requests, i.e. connected to the cluster
|
||||||
/// and achieved quorum
|
/// and achieved quorum
|
||||||
bool isServerActive() const;
|
bool isServerActive() const;
|
||||||
|
@ -105,9 +105,7 @@ KeeperServer::KeeperServer(
|
|||||||
const KeeperConfigurationAndSettingsPtr & configuration_and_settings_,
|
const KeeperConfigurationAndSettingsPtr & configuration_and_settings_,
|
||||||
const Poco::Util::AbstractConfiguration & config,
|
const Poco::Util::AbstractConfiguration & config,
|
||||||
ResponsesQueue & responses_queue_,
|
ResponsesQueue & responses_queue_,
|
||||||
SnapshotsQueue & snapshots_queue_,
|
SnapshotsQueue & snapshots_queue_)
|
||||||
KeeperStateMachine::CommitCallback commit_callback,
|
|
||||||
KeeperStateMachine::ApplySnapshotCallback apply_snapshot_callback)
|
|
||||||
: server_id(configuration_and_settings_->server_id)
|
: server_id(configuration_and_settings_->server_id)
|
||||||
, coordination_settings(configuration_and_settings_->coordination_settings)
|
, coordination_settings(configuration_and_settings_->coordination_settings)
|
||||||
, log(&Poco::Logger::get("KeeperServer"))
|
, log(&Poco::Logger::get("KeeperServer"))
|
||||||
@ -115,7 +113,7 @@ KeeperServer::KeeperServer(
|
|||||||
, keeper_context{std::make_shared<KeeperContext>()}
|
, keeper_context{std::make_shared<KeeperContext>()}
|
||||||
, create_snapshot_on_exit(config.getBool("keeper_server.create_snapshot_on_exit", true))
|
, create_snapshot_on_exit(config.getBool("keeper_server.create_snapshot_on_exit", true))
|
||||||
{
|
{
|
||||||
if (coordination_settings->quorum_reads || coordination_settings->read_mode.toString() == "quorum")
|
if (coordination_settings->quorum_reads)
|
||||||
LOG_WARNING(log, "Quorum reads enabled, Keeper will work slower.");
|
LOG_WARNING(log, "Quorum reads enabled, Keeper will work slower.");
|
||||||
|
|
||||||
keeper_context->digest_enabled = config.getBool("keeper_server.digest_enabled", false);
|
keeper_context->digest_enabled = config.getBool("keeper_server.digest_enabled", false);
|
||||||
@ -127,9 +125,7 @@ KeeperServer::KeeperServer(
|
|||||||
configuration_and_settings_->snapshot_storage_path,
|
configuration_and_settings_->snapshot_storage_path,
|
||||||
coordination_settings,
|
coordination_settings,
|
||||||
keeper_context,
|
keeper_context,
|
||||||
checkAndGetSuperdigest(configuration_and_settings_->super_digest),
|
checkAndGetSuperdigest(configuration_and_settings_->super_digest));
|
||||||
std::move(commit_callback),
|
|
||||||
std::move(apply_snapshot_callback));
|
|
||||||
|
|
||||||
state_manager = nuraft::cs_new<KeeperStateManager>(
|
state_manager = nuraft::cs_new<KeeperStateManager>(
|
||||||
server_id,
|
server_id,
|
||||||
@ -180,13 +176,6 @@ struct KeeperServer::KeeperRaftServer : public nuraft::raft_server
|
|||||||
reconfigure(new_config);
|
reconfigure(new_config);
|
||||||
}
|
}
|
||||||
|
|
||||||
RaftResult getLeaderInfo()
|
|
||||||
{
|
|
||||||
nuraft::ptr<nuraft::req_msg> req
|
|
||||||
= nuraft::cs_new<nuraft::req_msg>(0ull, nuraft::msg_type::leader_status_request, 0, 0, 0ull, 0ull, 0ull);
|
|
||||||
return send_msg_to_leader(req);
|
|
||||||
}
|
|
||||||
|
|
||||||
void commit_in_bg() override
|
void commit_in_bg() override
|
||||||
{
|
{
|
||||||
// For NuRaft, if any commit fails (uncaught exception) the whole server aborts as a safety
|
// For NuRaft, if any commit fails (uncaught exception) the whole server aborts as a safety
|
||||||
@ -280,20 +269,6 @@ void KeeperServer::launchRaftServer(const Poco::Util::AbstractConfiguration & co
|
|||||||
coordination_settings->election_timeout_lower_bound_ms.totalMilliseconds(), "election_timeout_lower_bound_ms", log);
|
coordination_settings->election_timeout_lower_bound_ms.totalMilliseconds(), "election_timeout_lower_bound_ms", log);
|
||||||
params.election_timeout_upper_bound_ = getValueOrMaxInt32AndLogWarning(
|
params.election_timeout_upper_bound_ = getValueOrMaxInt32AndLogWarning(
|
||||||
coordination_settings->election_timeout_upper_bound_ms.totalMilliseconds(), "election_timeout_upper_bound_ms", log);
|
coordination_settings->election_timeout_upper_bound_ms.totalMilliseconds(), "election_timeout_upper_bound_ms", log);
|
||||||
|
|
||||||
params.leadership_expiry_ = getValueOrMaxInt32AndLogWarning(coordination_settings->leadership_expiry.totalMilliseconds(), "leadership_expiry", log);
|
|
||||||
|
|
||||||
if (coordination_settings->read_mode.toString() == "fastlinear")
|
|
||||||
{
|
|
||||||
if (params.leadership_expiry_ == 0)
|
|
||||||
params.leadership_expiry_ = params.election_timeout_lower_bound_;
|
|
||||||
else if (params.leadership_expiry_ > params.election_timeout_lower_bound_)
|
|
||||||
{
|
|
||||||
LOG_WARNING(log, "To use fast linearizable reads, leadership_expiry should be set to a value that is less or equal to the election_timeout_upper_bound_ms. "
|
|
||||||
"Based on current settings, there are no guarantees for linearizability of reads.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
params.reserved_log_items_ = getValueOrMaxInt32AndLogWarning(coordination_settings->reserved_log_items, "reserved_log_items", log);
|
params.reserved_log_items_ = getValueOrMaxInt32AndLogWarning(coordination_settings->reserved_log_items, "reserved_log_items", log);
|
||||||
params.snapshot_distance_ = getValueOrMaxInt32AndLogWarning(coordination_settings->snapshot_distance, "snapshot_distance", log);
|
params.snapshot_distance_ = getValueOrMaxInt32AndLogWarning(coordination_settings->snapshot_distance, "snapshot_distance", log);
|
||||||
|
|
||||||
@ -512,7 +487,7 @@ void KeeperServer::putLocalReadRequest(const KeeperStorage::RequestForSession &
|
|||||||
state_machine->processReadRequest(request_for_session);
|
state_machine->processReadRequest(request_for_session);
|
||||||
}
|
}
|
||||||
|
|
||||||
RaftResult KeeperServer::putRequestBatch(const KeeperStorage::RequestsForSessions & requests_for_sessions)
|
RaftAppendResult KeeperServer::putRequestBatch(const KeeperStorage::RequestsForSessions & requests_for_sessions)
|
||||||
{
|
{
|
||||||
std::vector<nuraft::ptr<nuraft::buffer>> entries;
|
std::vector<nuraft::ptr<nuraft::buffer>> entries;
|
||||||
for (const auto & request_for_session : requests_for_sessions)
|
for (const auto & request_for_session : requests_for_sessions)
|
||||||
@ -738,20 +713,6 @@ std::vector<int64_t> KeeperServer::getDeadSessions()
|
|||||||
return state_machine->getDeadSessions();
|
return state_machine->getDeadSessions();
|
||||||
}
|
}
|
||||||
|
|
||||||
RaftResult KeeperServer::getLeaderInfo()
|
|
||||||
{
|
|
||||||
std::lock_guard lock{server_write_mutex};
|
|
||||||
if (is_recovering)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
return raft_instance->getLeaderInfo();
|
|
||||||
}
|
|
||||||
|
|
||||||
KeeperServer::NodeInfo KeeperServer::getNodeInfo()
|
|
||||||
{
|
|
||||||
return { .term = raft_instance->get_term(), .last_committed_index = state_machine->last_commit_index() };
|
|
||||||
}
|
|
||||||
|
|
||||||
ConfigUpdateActions KeeperServer::getConfigurationDiff(const Poco::Util::AbstractConfiguration & config)
|
ConfigUpdateActions KeeperServer::getConfigurationDiff(const Poco::Util::AbstractConfiguration & config)
|
||||||
{
|
{
|
||||||
auto diff = state_manager->getConfigurationDiff(config);
|
auto diff = state_manager->getConfigurationDiff(config);
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
using RaftResult = nuraft::ptr<nuraft::cmd_result<nuraft::ptr<nuraft::buffer>>>;
|
using RaftAppendResult = nuraft::ptr<nuraft::cmd_result<nuraft::ptr<nuraft::buffer>>>;
|
||||||
|
|
||||||
class KeeperServer
|
class KeeperServer
|
||||||
{
|
{
|
||||||
@ -71,9 +71,7 @@ public:
|
|||||||
const KeeperConfigurationAndSettingsPtr & settings_,
|
const KeeperConfigurationAndSettingsPtr & settings_,
|
||||||
const Poco::Util::AbstractConfiguration & config_,
|
const Poco::Util::AbstractConfiguration & config_,
|
||||||
ResponsesQueue & responses_queue_,
|
ResponsesQueue & responses_queue_,
|
||||||
SnapshotsQueue & snapshots_queue_,
|
SnapshotsQueue & snapshots_queue_);
|
||||||
KeeperStateMachine::CommitCallback commit_callback,
|
|
||||||
KeeperStateMachine::ApplySnapshotCallback apply_snapshot_callback);
|
|
||||||
|
|
||||||
/// Load state machine from the latest snapshot and load log storage. Start NuRaft with required settings.
|
/// Load state machine from the latest snapshot and load log storage. Start NuRaft with required settings.
|
||||||
void startup(const Poco::Util::AbstractConfiguration & config, bool enable_ipv6 = true);
|
void startup(const Poco::Util::AbstractConfiguration & config, bool enable_ipv6 = true);
|
||||||
@ -86,7 +84,7 @@ public:
|
|||||||
|
|
||||||
/// Put batch of requests into Raft and get result of put. Responses will be set separately into
|
/// Put batch of requests into Raft and get result of put. Responses will be set separately into
|
||||||
/// responses_queue.
|
/// responses_queue.
|
||||||
RaftResult putRequestBatch(const KeeperStorage::RequestsForSessions & requests);
|
RaftAppendResult putRequestBatch(const KeeperStorage::RequestsForSessions & requests);
|
||||||
|
|
||||||
/// Return set of the non-active sessions
|
/// Return set of the non-active sessions
|
||||||
std::vector<int64_t> getDeadSessions();
|
std::vector<int64_t> getDeadSessions();
|
||||||
@ -121,17 +119,6 @@ public:
|
|||||||
|
|
||||||
int getServerID() const { return server_id; }
|
int getServerID() const { return server_id; }
|
||||||
|
|
||||||
struct NodeInfo
|
|
||||||
{
|
|
||||||
uint64_t term;
|
|
||||||
uint64_t last_committed_index;
|
|
||||||
|
|
||||||
bool operator==(const NodeInfo &) const = default;
|
|
||||||
};
|
|
||||||
|
|
||||||
RaftResult getLeaderInfo();
|
|
||||||
NodeInfo getNodeInfo();
|
|
||||||
|
|
||||||
/// Get configuration diff between current configuration in RAFT and in XML file
|
/// Get configuration diff between current configuration in RAFT and in XML file
|
||||||
ConfigUpdateActions getConfigurationDiff(const Poco::Util::AbstractConfiguration & config);
|
ConfigUpdateActions getConfigurationDiff(const Poco::Util::AbstractConfiguration & config);
|
||||||
|
|
||||||
@ -139,23 +126,10 @@ public:
|
|||||||
/// Synchronously check for update results with retries.
|
/// Synchronously check for update results with retries.
|
||||||
void applyConfigurationUpdate(const ConfigUpdateAction & task);
|
void applyConfigurationUpdate(const ConfigUpdateAction & task);
|
||||||
|
|
||||||
|
|
||||||
/// Wait configuration update for action. Used by followers.
|
/// Wait configuration update for action. Used by followers.
|
||||||
/// Return true if update was successfully received.
|
/// Return true if update was successfully received.
|
||||||
bool waitConfigurationUpdate(const ConfigUpdateAction & task);
|
bool waitConfigurationUpdate(const ConfigUpdateAction & task);
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
namespace std
|
|
||||||
{
|
|
||||||
template <>
|
|
||||||
struct hash<DB::KeeperServer::NodeInfo>
|
|
||||||
{
|
|
||||||
size_t operator()(const DB::KeeperServer::NodeInfo & info) const
|
|
||||||
{
|
|
||||||
SipHash hash_state;
|
|
||||||
hash_state.update(info.term);
|
|
||||||
hash_state.update(info.last_committed_index);
|
|
||||||
return hash_state.get64();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
@ -44,9 +44,7 @@ KeeperStateMachine::KeeperStateMachine(
|
|||||||
const std::string & snapshots_path_,
|
const std::string & snapshots_path_,
|
||||||
const CoordinationSettingsPtr & coordination_settings_,
|
const CoordinationSettingsPtr & coordination_settings_,
|
||||||
const KeeperContextPtr & keeper_context_,
|
const KeeperContextPtr & keeper_context_,
|
||||||
const std::string & superdigest_,
|
const std::string & superdigest_)
|
||||||
CommitCallback commit_callback_,
|
|
||||||
ApplySnapshotCallback apply_snapshot_callback_)
|
|
||||||
: coordination_settings(coordination_settings_)
|
: coordination_settings(coordination_settings_)
|
||||||
, snapshot_manager(
|
, snapshot_manager(
|
||||||
snapshots_path_,
|
snapshots_path_,
|
||||||
@ -60,8 +58,6 @@ KeeperStateMachine::KeeperStateMachine(
|
|||||||
, last_committed_idx(0)
|
, last_committed_idx(0)
|
||||||
, log(&Poco::Logger::get("KeeperStateMachine"))
|
, log(&Poco::Logger::get("KeeperStateMachine"))
|
||||||
, superdigest(superdigest_)
|
, superdigest(superdigest_)
|
||||||
, commit_callback(std::move(commit_callback_))
|
|
||||||
, apply_snapshot_callback(std::move(apply_snapshot_callback_))
|
|
||||||
, keeper_context(keeper_context_)
|
, keeper_context(keeper_context_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -227,11 +223,11 @@ bool KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & req
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit_ext(const ext_op_params & params)
|
nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, nuraft::buffer & data)
|
||||||
{
|
{
|
||||||
auto request_for_session = parseRequest(*params.data);
|
auto request_for_session = parseRequest(data);
|
||||||
if (!request_for_session.zxid)
|
if (!request_for_session.zxid)
|
||||||
request_for_session.zxid = params.log_idx;
|
request_for_session.zxid = log_idx;
|
||||||
|
|
||||||
/// Special processing of session_id request
|
/// Special processing of session_id request
|
||||||
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
|
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
|
||||||
@ -276,9 +272,8 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit_ext(const ext_op_params &
|
|||||||
assertDigest(*request_for_session.digest, storage->getNodesDigest(true), *request_for_session.request, true);
|
assertDigest(*request_for_session.digest, storage->getNodesDigest(true), *request_for_session.request, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
last_committed_idx = params.log_idx;
|
|
||||||
commit_callback(request_for_session, params.log_term, params.log_idx);
|
|
||||||
ProfileEvents::increment(ProfileEvents::KeeperCommits);
|
ProfileEvents::increment(ProfileEvents::KeeperCommits);
|
||||||
|
last_committed_idx = log_idx;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -311,7 +306,6 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
|
|||||||
|
|
||||||
ProfileEvents::increment(ProfileEvents::KeeperSnapshotApplys);
|
ProfileEvents::increment(ProfileEvents::KeeperSnapshotApplys);
|
||||||
last_committed_idx = s.get_last_log_idx();
|
last_committed_idx = s.get_last_log_idx();
|
||||||
apply_snapshot_callback(s.get_last_log_term(), s.get_last_log_idx());
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,10 +320,6 @@ void KeeperStateMachine::commit_config(const uint64_t /* log_idx */, nuraft::ptr
|
|||||||
void KeeperStateMachine::rollback(uint64_t log_idx, nuraft::buffer & data)
|
void KeeperStateMachine::rollback(uint64_t log_idx, nuraft::buffer & data)
|
||||||
{
|
{
|
||||||
auto request_for_session = parseRequest(data);
|
auto request_for_session = parseRequest(data);
|
||||||
|
|
||||||
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
|
|
||||||
return;
|
|
||||||
|
|
||||||
// If we received a log from an older node, use the log_idx as the zxid
|
// If we received a log from an older node, use the log_idx as the zxid
|
||||||
// log_idx will always be larger or equal to the zxid so we can safely do this
|
// log_idx will always be larger or equal to the zxid so we can safely do this
|
||||||
// (log_idx is increased for all logs, while zxid is only increased for requests)
|
// (log_idx is increased for all logs, while zxid is only increased for requests)
|
||||||
|
@ -20,18 +20,13 @@ using SnapshotsQueue = ConcurrentBoundedQueue<CreateSnapshotTask>;
|
|||||||
class KeeperStateMachine : public nuraft::state_machine
|
class KeeperStateMachine : public nuraft::state_machine
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using CommitCallback = std::function<void(const KeeperStorage::RequestForSession &, uint64_t, uint64_t)>;
|
|
||||||
using ApplySnapshotCallback = std::function<void(uint64_t, uint64_t)>;
|
|
||||||
|
|
||||||
KeeperStateMachine(
|
KeeperStateMachine(
|
||||||
ResponsesQueue & responses_queue_,
|
ResponsesQueue & responses_queue_,
|
||||||
SnapshotsQueue & snapshots_queue_,
|
SnapshotsQueue & snapshots_queue_,
|
||||||
const std::string & snapshots_path_,
|
const std::string & snapshots_path_,
|
||||||
const CoordinationSettingsPtr & coordination_settings_,
|
const CoordinationSettingsPtr & coordination_settings_,
|
||||||
const KeeperContextPtr & keeper_context_,
|
const KeeperContextPtr & keeper_context_,
|
||||||
const std::string & superdigest_ = "",
|
const std::string & superdigest_ = "");
|
||||||
CommitCallback commit_callback_ = [](const KeeperStorage::RequestForSession &, uint64_t, uint64_t){},
|
|
||||||
ApplySnapshotCallback apply_snapshot_callback_ = [](uint64_t, uint64_t){});
|
|
||||||
|
|
||||||
/// Read state from the latest snapshot
|
/// Read state from the latest snapshot
|
||||||
void init();
|
void init();
|
||||||
@ -42,7 +37,7 @@ public:
|
|||||||
|
|
||||||
nuraft::ptr<nuraft::buffer> pre_commit(uint64_t log_idx, nuraft::buffer & data) override;
|
nuraft::ptr<nuraft::buffer> pre_commit(uint64_t log_idx, nuraft::buffer & data) override;
|
||||||
|
|
||||||
nuraft::ptr<nuraft::buffer> commit_ext(const ext_op_params & params) override; /// NOLINT
|
nuraft::ptr<nuraft::buffer> commit(const uint64_t log_idx, nuraft::buffer & data) override; /// NOLINT
|
||||||
|
|
||||||
/// Save new cluster config to our snapshot (copy of the config stored in StateManager)
|
/// Save new cluster config to our snapshot (copy of the config stored in StateManager)
|
||||||
void commit_config(const uint64_t log_idx, nuraft::ptr<nuraft::cluster_config> & new_conf) override; /// NOLINT
|
void commit_config(const uint64_t log_idx, nuraft::ptr<nuraft::cluster_config> & new_conf) override; /// NOLINT
|
||||||
@ -150,11 +145,6 @@ private:
|
|||||||
/// Special part of ACL system -- superdigest specified in server config.
|
/// Special part of ACL system -- superdigest specified in server config.
|
||||||
const std::string superdigest;
|
const std::string superdigest;
|
||||||
|
|
||||||
/// call when a request is committed
|
|
||||||
const CommitCallback commit_callback;
|
|
||||||
/// call when snapshot is applied
|
|
||||||
const ApplySnapshotCallback apply_snapshot_callback;
|
|
||||||
|
|
||||||
KeeperContextPtr keeper_context;
|
KeeperContextPtr keeper_context;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/isLocalAddress.h>
|
#include <Common/isLocalAddress.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
|
#include <Common/getMultipleKeysFromConfig.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -94,6 +95,14 @@ KeeperStateManager::parseServersConfiguration(const Poco::Util::AbstractConfigur
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
std::string full_prefix = config_prefix + ".raft_configuration." + server_key;
|
std::string full_prefix = config_prefix + ".raft_configuration." + server_key;
|
||||||
|
|
||||||
|
if (getMultipleValuesFromConfig(config, full_prefix, "id").size() > 1
|
||||||
|
|| getMultipleValuesFromConfig(config, full_prefix, "hostname").size() > 1
|
||||||
|
|| getMultipleValuesFromConfig(config, full_prefix, "port").size() > 1)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::RAFT_ERROR, "Multiple <id> or <hostname> or <port> specified for a single <server>");
|
||||||
|
}
|
||||||
|
|
||||||
int new_server_id = config.getInt(full_prefix + ".id");
|
int new_server_id = config.getInt(full_prefix + ".id");
|
||||||
std::string hostname = config.getString(full_prefix + ".hostname");
|
std::string hostname = config.getString(full_prefix + ".hostname");
|
||||||
int port = config.getInt(full_prefix + ".port");
|
int port = config.getInt(full_prefix + ".port");
|
||||||
|
@ -1330,9 +1330,8 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint
|
|||||||
changelog.append(entry);
|
changelog.append(entry);
|
||||||
changelog.end_of_append_batch(0, 0);
|
changelog.end_of_append_batch(0, 0);
|
||||||
|
|
||||||
auto entry_buf = changelog.entry_at(i)->get_buf_ptr();
|
state_machine->pre_commit(i, changelog.entry_at(i)->get_buf());
|
||||||
state_machine->pre_commit(i, *entry_buf);
|
state_machine->commit(i, changelog.entry_at(i)->get_buf());
|
||||||
state_machine->commit_ext(nuraft::state_machine::ext_op_params{i, entry_buf});
|
|
||||||
bool snapshot_created = false;
|
bool snapshot_created = false;
|
||||||
if (i % settings->snapshot_distance == 0)
|
if (i % settings->snapshot_distance == 0)
|
||||||
{
|
{
|
||||||
@ -1376,9 +1375,8 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint
|
|||||||
|
|
||||||
for (size_t i = restore_machine->last_commit_index() + 1; i < restore_changelog.next_slot(); ++i)
|
for (size_t i = restore_machine->last_commit_index() + 1; i < restore_changelog.next_slot(); ++i)
|
||||||
{
|
{
|
||||||
auto entry = changelog.entry_at(i)->get_buf_ptr();
|
restore_machine->pre_commit(i, changelog.entry_at(i)->get_buf());
|
||||||
restore_machine->pre_commit(i, *entry);
|
restore_machine->commit(i, changelog.entry_at(i)->get_buf());
|
||||||
restore_machine->commit_ext(nuraft::state_machine::ext_op_params{i, entry});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto & source_storage = state_machine->getStorage();
|
auto & source_storage = state_machine->getStorage();
|
||||||
@ -1479,18 +1477,18 @@ TEST_P(CoordinationTest, TestEphemeralNodeRemove)
|
|||||||
std::shared_ptr<ZooKeeperCreateRequest> request_c = std::make_shared<ZooKeeperCreateRequest>();
|
std::shared_ptr<ZooKeeperCreateRequest> request_c = std::make_shared<ZooKeeperCreateRequest>();
|
||||||
request_c->path = "/hello";
|
request_c->path = "/hello";
|
||||||
request_c->is_ephemeral = true;
|
request_c->is_ephemeral = true;
|
||||||
auto entry_c = getLogEntryFromZKRequest(0, 1, state_machine->getNextZxid(), request_c)->get_buf_ptr();
|
auto entry_c = getLogEntryFromZKRequest(0, 1, state_machine->getNextZxid(), request_c);
|
||||||
state_machine->pre_commit(1, *entry_c);
|
state_machine->pre_commit(1, entry_c->get_buf());
|
||||||
state_machine->commit_ext(nuraft::state_machine::ext_op_params{1, entry_c});
|
state_machine->commit(1, entry_c->get_buf());
|
||||||
const auto & storage = state_machine->getStorage();
|
const auto & storage = state_machine->getStorage();
|
||||||
|
|
||||||
EXPECT_EQ(storage.ephemerals.size(), 1);
|
EXPECT_EQ(storage.ephemerals.size(), 1);
|
||||||
std::shared_ptr<ZooKeeperRemoveRequest> request_d = std::make_shared<ZooKeeperRemoveRequest>();
|
std::shared_ptr<ZooKeeperRemoveRequest> request_d = std::make_shared<ZooKeeperRemoveRequest>();
|
||||||
request_d->path = "/hello";
|
request_d->path = "/hello";
|
||||||
/// Delete from other session
|
/// Delete from other session
|
||||||
auto entry_d = getLogEntryFromZKRequest(0, 2, state_machine->getNextZxid(), request_d)->get_buf_ptr();
|
auto entry_d = getLogEntryFromZKRequest(0, 2, state_machine->getNextZxid(), request_d);
|
||||||
state_machine->pre_commit(2, *entry_d);
|
state_machine->pre_commit(2, entry_d->get_buf());
|
||||||
state_machine->commit_ext(nuraft::state_machine::ext_op_params{2, entry_d});
|
state_machine->commit(2, entry_d->get_buf());
|
||||||
|
|
||||||
EXPECT_EQ(storage.ephemerals.size(), 0);
|
EXPECT_EQ(storage.ephemerals.size(), 0);
|
||||||
}
|
}
|
||||||
|
@ -105,10 +105,6 @@ template <typename T> bool decimalEqual(T x, T y, UInt32 x_scale, UInt32 y_scale
|
|||||||
template <typename T> bool decimalLess(T x, T y, UInt32 x_scale, UInt32 y_scale);
|
template <typename T> bool decimalLess(T x, T y, UInt32 x_scale, UInt32 y_scale);
|
||||||
template <typename T> bool decimalLessOrEqual(T x, T y, UInt32 x_scale, UInt32 y_scale);
|
template <typename T> bool decimalLessOrEqual(T x, T y, UInt32 x_scale, UInt32 y_scale);
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class DecimalField
|
class DecimalField
|
||||||
{
|
{
|
||||||
@ -168,9 +164,6 @@ private:
|
|||||||
T dec;
|
T dec;
|
||||||
UInt32 scale;
|
UInt32 scale;
|
||||||
};
|
};
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
|
||||||
template <typename T> constexpr bool is_decimal_field = false;
|
template <typename T> constexpr bool is_decimal_field = false;
|
||||||
template <> constexpr inline bool is_decimal_field<DecimalField<Decimal32>> = true;
|
template <> constexpr inline bool is_decimal_field<DecimalField<Decimal32>> = true;
|
||||||
@ -594,11 +587,6 @@ public:
|
|||||||
switch (field.which)
|
switch (field.which)
|
||||||
{
|
{
|
||||||
case Types::Null: return f(field.template get<Null>());
|
case Types::Null: return f(field.template get<Null>());
|
||||||
// gcc 8.2.1
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
case Types::UInt64: return f(field.template get<UInt64>());
|
case Types::UInt64: return f(field.template get<UInt64>());
|
||||||
case Types::UInt128: return f(field.template get<UInt128>());
|
case Types::UInt128: return f(field.template get<UInt128>());
|
||||||
case Types::UInt256: return f(field.template get<UInt256>());
|
case Types::UInt256: return f(field.template get<UInt256>());
|
||||||
@ -622,9 +610,6 @@ public:
|
|||||||
case Types::Decimal128: return f(field.template get<DecimalField<Decimal128>>());
|
case Types::Decimal128: return f(field.template get<DecimalField<Decimal128>>());
|
||||||
case Types::Decimal256: return f(field.template get<DecimalField<Decimal256>>());
|
case Types::Decimal256: return f(field.template get<DecimalField<Decimal256>>());
|
||||||
case Types::AggregateFunctionState: return f(field.template get<AggregateFunctionStateData>());
|
case Types::AggregateFunctionState: return f(field.template get<AggregateFunctionStateData>());
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
|
@ -136,6 +136,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
|||||||
M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \
|
M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \
|
||||||
M(UInt64, aggregation_memory_efficient_merge_threads, 0, "Number of threads to use for merge intermediate aggregation results in memory efficient mode. When bigger, then more memory is consumed. 0 means - same as 'max_threads'.", 0) \
|
M(UInt64, aggregation_memory_efficient_merge_threads, 0, "Number of threads to use for merge intermediate aggregation results in memory efficient mode. When bigger, then more memory is consumed. 0 means - same as 'max_threads'.", 0) \
|
||||||
M(Bool, enable_positional_arguments, true, "Enable positional arguments in ORDER BY, GROUP BY and LIMIT BY", 0) \
|
M(Bool, enable_positional_arguments, true, "Enable positional arguments in ORDER BY, GROUP BY and LIMIT BY", 0) \
|
||||||
|
M(Bool, enable_extended_results_for_datetime_functions, false, "Enable date functions like toLastDayOfMonth return Date32 results (instead of Date results) for Date32/DateTime64 arguments.", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, group_by_use_nulls, false, "Treat columns mentioned in ROLLUP, CUBE or GROUPING SETS as Nullable", 0) \
|
M(Bool, group_by_use_nulls, false, "Treat columns mentioned in ROLLUP, CUBE or GROUPING SETS as Nullable", 0) \
|
||||||
\
|
\
|
||||||
|
@ -42,11 +42,6 @@ struct Null
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Ignore strange gcc warning https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55776
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wshadow"
|
|
||||||
#endif
|
|
||||||
/// @note Except explicitly described you should not assume on TypeIndex numbers and/or their orders in this enum.
|
/// @note Except explicitly described you should not assume on TypeIndex numbers and/or their orders in this enum.
|
||||||
enum class TypeIndex
|
enum class TypeIndex
|
||||||
{
|
{
|
||||||
@ -89,9 +84,6 @@ enum class TypeIndex
|
|||||||
Map,
|
Map,
|
||||||
Object,
|
Object,
|
||||||
};
|
};
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
using UInt128 = ::UInt128;
|
using UInt128 = ::UInt128;
|
||||||
|
@ -453,15 +453,19 @@ using SubcolumnsTreeWithColumns = SubcolumnsTree<ColumnWithTypeAndDimensions>;
|
|||||||
using Node = SubcolumnsTreeWithColumns::Node;
|
using Node = SubcolumnsTreeWithColumns::Node;
|
||||||
|
|
||||||
/// Creates data type and column from tree of subcolumns.
|
/// Creates data type and column from tree of subcolumns.
|
||||||
ColumnWithTypeAndDimensions createTypeFromNode(const Node * node)
|
ColumnWithTypeAndDimensions createTypeFromNode(const Node & node)
|
||||||
{
|
{
|
||||||
auto collect_tuple_elemets = [](const auto & children)
|
auto collect_tuple_elemets = [](const auto & children)
|
||||||
{
|
{
|
||||||
|
if (children.empty())
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot create type from empty Tuple or Nested node");
|
||||||
|
|
||||||
std::vector<std::tuple<String, ColumnWithTypeAndDimensions>> tuple_elements;
|
std::vector<std::tuple<String, ColumnWithTypeAndDimensions>> tuple_elements;
|
||||||
tuple_elements.reserve(children.size());
|
tuple_elements.reserve(children.size());
|
||||||
for (const auto & [name, child] : children)
|
for (const auto & [name, child] : children)
|
||||||
{
|
{
|
||||||
auto column = createTypeFromNode(child.get());
|
assert(child);
|
||||||
|
auto column = createTypeFromNode(*child);
|
||||||
tuple_elements.emplace_back(name, std::move(column));
|
tuple_elements.emplace_back(name, std::move(column));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -475,13 +479,13 @@ ColumnWithTypeAndDimensions createTypeFromNode(const Node * node)
|
|||||||
return std::make_tuple(std::move(tuple_names), std::move(tuple_columns));
|
return std::make_tuple(std::move(tuple_names), std::move(tuple_columns));
|
||||||
};
|
};
|
||||||
|
|
||||||
if (node->kind == Node::SCALAR)
|
if (node.kind == Node::SCALAR)
|
||||||
{
|
{
|
||||||
return node->data;
|
return node.data;
|
||||||
}
|
}
|
||||||
else if (node->kind == Node::NESTED)
|
else if (node.kind == Node::NESTED)
|
||||||
{
|
{
|
||||||
auto [tuple_names, tuple_columns] = collect_tuple_elemets(node->children);
|
auto [tuple_names, tuple_columns] = collect_tuple_elemets(node.children);
|
||||||
|
|
||||||
Columns offsets_columns;
|
Columns offsets_columns;
|
||||||
offsets_columns.reserve(tuple_columns[0].array_dimensions + 1);
|
offsets_columns.reserve(tuple_columns[0].array_dimensions + 1);
|
||||||
@ -492,7 +496,7 @@ ColumnWithTypeAndDimensions createTypeFromNode(const Node * node)
|
|||||||
/// `k1 Array(Nested(k2 Int, k3 Int))` and k1 is marked as Nested
|
/// `k1 Array(Nested(k2 Int, k3 Int))` and k1 is marked as Nested
|
||||||
/// and `k2` and `k3` has anonymous_array_level = 1 in that case.
|
/// and `k2` and `k3` has anonymous_array_level = 1 in that case.
|
||||||
|
|
||||||
const auto & current_array = assert_cast<const ColumnArray &>(*node->data.column);
|
const auto & current_array = assert_cast<const ColumnArray &>(*node.data.column);
|
||||||
offsets_columns.push_back(current_array.getOffsetsPtr());
|
offsets_columns.push_back(current_array.getOffsetsPtr());
|
||||||
|
|
||||||
auto first_column = tuple_columns[0].column;
|
auto first_column = tuple_columns[0].column;
|
||||||
@ -529,7 +533,7 @@ ColumnWithTypeAndDimensions createTypeFromNode(const Node * node)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto [tuple_names, tuple_columns] = collect_tuple_elemets(node->children);
|
auto [tuple_names, tuple_columns] = collect_tuple_elemets(node.children);
|
||||||
|
|
||||||
size_t num_elements = tuple_columns.size();
|
size_t num_elements = tuple_columns.size();
|
||||||
Columns tuple_elements_columns(num_elements);
|
Columns tuple_elements_columns(num_elements);
|
||||||
@ -587,6 +591,15 @@ std::pair<ColumnPtr, DataTypePtr> unflattenObjectToTuple(const ColumnObject & co
|
|||||||
{
|
{
|
||||||
const auto & subcolumns = column.getSubcolumns();
|
const auto & subcolumns = column.getSubcolumns();
|
||||||
|
|
||||||
|
if (subcolumns.empty())
|
||||||
|
{
|
||||||
|
auto type = std::make_shared<DataTypeTuple>(
|
||||||
|
DataTypes{std::make_shared<DataTypeUInt8>()},
|
||||||
|
Names{ColumnObject::COLUMN_NAME_DUMMY});
|
||||||
|
|
||||||
|
return {type->createColumn()->cloneResized(column.size()), type};
|
||||||
|
}
|
||||||
|
|
||||||
PathsInData paths;
|
PathsInData paths;
|
||||||
DataTypes types;
|
DataTypes types;
|
||||||
Columns columns;
|
Columns columns;
|
||||||
@ -613,6 +626,9 @@ std::pair<ColumnPtr, DataTypePtr> unflattenTuple(
|
|||||||
assert(paths.size() == tuple_types.size());
|
assert(paths.size() == tuple_types.size());
|
||||||
assert(paths.size() == tuple_columns.size());
|
assert(paths.size() == tuple_columns.size());
|
||||||
|
|
||||||
|
if (paths.empty())
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot unflatten empty Tuple");
|
||||||
|
|
||||||
/// We add all paths to the subcolumn tree and then create a type from it.
|
/// We add all paths to the subcolumn tree and then create a type from it.
|
||||||
/// The tree stores column, type and number of array dimensions
|
/// The tree stores column, type and number of array dimensions
|
||||||
/// for each intermediate node.
|
/// for each intermediate node.
|
||||||
|
@ -51,6 +51,8 @@ public:
|
|||||||
using NodeKind = typename Node::Kind;
|
using NodeKind = typename Node::Kind;
|
||||||
using NodePtr = std::shared_ptr<Node>;
|
using NodePtr = std::shared_ptr<Node>;
|
||||||
|
|
||||||
|
SubcolumnsTree() : root(std::make_shared<Node>(Node::TUPLE)) {}
|
||||||
|
|
||||||
/// Add a leaf without any data in other nodes.
|
/// Add a leaf without any data in other nodes.
|
||||||
bool add(const PathInData & path, const NodeData & leaf_data)
|
bool add(const PathInData & path, const NodeData & leaf_data)
|
||||||
{
|
{
|
||||||
@ -73,13 +75,9 @@ public:
|
|||||||
bool add(const PathInData & path, const NodeCreator & node_creator)
|
bool add(const PathInData & path, const NodeCreator & node_creator)
|
||||||
{
|
{
|
||||||
const auto & parts = path.getParts();
|
const auto & parts = path.getParts();
|
||||||
|
|
||||||
if (parts.empty())
|
if (parts.empty())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!root)
|
|
||||||
root = std::make_shared<Node>(Node::TUPLE);
|
|
||||||
|
|
||||||
Node * current_node = root.get();
|
Node * current_node = root.get();
|
||||||
for (size_t i = 0; i < parts.size() - 1; ++i)
|
for (size_t i = 0; i < parts.size() - 1; ++i)
|
||||||
{
|
{
|
||||||
@ -166,13 +164,13 @@ public:
|
|||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool empty() const { return root == nullptr; }
|
bool empty() const { return root->children.empty(); }
|
||||||
size_t size() const { return leaves.size(); }
|
size_t size() const { return leaves.size(); }
|
||||||
|
|
||||||
using Nodes = std::vector<NodePtr>;
|
using Nodes = std::vector<NodePtr>;
|
||||||
|
|
||||||
const Nodes & getLeaves() const { return leaves; }
|
const Nodes & getLeaves() const { return leaves; }
|
||||||
const Node * getRoot() const { return root.get(); }
|
const Node & getRoot() const { return *root; }
|
||||||
|
|
||||||
using iterator = typename Nodes::iterator;
|
using iterator = typename Nodes::iterator;
|
||||||
using const_iterator = typename Nodes::const_iterator;
|
using const_iterator = typename Nodes::const_iterator;
|
||||||
@ -186,11 +184,11 @@ public:
|
|||||||
private:
|
private:
|
||||||
const Node * findImpl(const PathInData & path, bool find_exact) const
|
const Node * findImpl(const PathInData & path, bool find_exact) const
|
||||||
{
|
{
|
||||||
if (!root)
|
if (empty())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
const auto & parts = path.getParts();
|
const auto & parts = path.getParts();
|
||||||
const Node * current_node = root.get();
|
const auto * current_node = root.get();
|
||||||
|
|
||||||
for (const auto & part : parts)
|
for (const auto & part : parts)
|
||||||
{
|
{
|
||||||
|
@ -7,12 +7,6 @@
|
|||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
# pragma GCC diagnostic push
|
|
||||||
# pragma GCC diagnostic ignored "-Wsuggest-override"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
DB::DiskPtr createDisk();
|
DB::DiskPtr createDisk();
|
||||||
|
|
||||||
|
@ -82,6 +82,14 @@ struct ToStartOfWeekImpl
|
|||||||
{
|
{
|
||||||
return time_zone.toFirstDayNumOfWeek(DayNum(d), week_mode);
|
return time_zone.toFirstDayNumOfWeek(DayNum(d), week_mode);
|
||||||
}
|
}
|
||||||
|
static inline Int64 execute_extended_result(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode);
|
||||||
|
}
|
||||||
|
static inline Int32 execute_extended_result(Int32 d, UInt8 week_mode, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfWeek(ExtendedDayNum(d), week_mode);
|
||||||
|
}
|
||||||
|
|
||||||
using FactorTransform = ZeroTransform;
|
using FactorTransform = ZeroTransform;
|
||||||
};
|
};
|
||||||
@ -115,7 +123,7 @@ struct ToWeekImpl
|
|||||||
using FactorTransform = ToStartOfYearImpl;
|
using FactorTransform = ToStartOfYearImpl;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename FromType, typename ToType, typename Transform>
|
template <typename FromType, typename ToType, typename Transform, bool is_extended_result = false>
|
||||||
struct WeekTransformer
|
struct WeekTransformer
|
||||||
{
|
{
|
||||||
explicit WeekTransformer(Transform transform_)
|
explicit WeekTransformer(Transform transform_)
|
||||||
@ -130,6 +138,9 @@ struct WeekTransformer
|
|||||||
vec_to.resize(size);
|
vec_to.resize(size);
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
if constexpr (is_extended_result)
|
||||||
|
vec_to[i] = transform.execute_extended_result(vec_from[i], week_mode, time_zone);
|
||||||
|
else
|
||||||
vec_to[i] = transform.execute(vec_from[i], week_mode, time_zone);
|
vec_to[i] = transform.execute(vec_from[i], week_mode, time_zone);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,13 +149,13 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
template <typename FromDataType, typename ToDataType>
|
template <typename FromDataType, typename ToDataType, bool is_extended_result = false>
|
||||||
struct CustomWeekTransformImpl
|
struct CustomWeekTransformImpl
|
||||||
{
|
{
|
||||||
template <typename Transform>
|
template <typename Transform>
|
||||||
static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/, Transform transform = {})
|
static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/, Transform transform = {})
|
||||||
{
|
{
|
||||||
const auto op = WeekTransformer<typename FromDataType::FieldType, typename ToDataType::FieldType, Transform>{std::move(transform)};
|
const auto op = WeekTransformer<typename FromDataType::FieldType, typename ToDataType::FieldType, Transform, is_extended_result>{std::move(transform)};
|
||||||
|
|
||||||
UInt8 week_mode = DEFAULT_WEEK_MODE;
|
UInt8 week_mode = DEFAULT_WEEK_MODE;
|
||||||
if (arguments.size() > 1)
|
if (arguments.size() > 1)
|
||||||
|
@ -161,7 +161,14 @@ struct ToMondayImpl
|
|||||||
{
|
{
|
||||||
return time_zone.toFirstDayNumOfWeek(DayNum(d));
|
return time_zone.toFirstDayNumOfWeek(DayNum(d));
|
||||||
}
|
}
|
||||||
|
static inline Int64 execute_extended_result(Int64 t, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t));
|
||||||
|
}
|
||||||
|
static inline Int32 execute_extended_result(Int32 d, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfWeek(ExtendedDayNum(d));
|
||||||
|
}
|
||||||
using FactorTransform = ZeroTransform;
|
using FactorTransform = ZeroTransform;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -185,6 +192,14 @@ struct ToStartOfMonthImpl
|
|||||||
{
|
{
|
||||||
return time_zone.toFirstDayNumOfMonth(DayNum(d));
|
return time_zone.toFirstDayNumOfMonth(DayNum(d));
|
||||||
}
|
}
|
||||||
|
static inline Int64 execute_extended_result(Int64 t, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfMonth(time_zone.toDayNum(t));
|
||||||
|
}
|
||||||
|
static inline Int32 execute_extended_result(Int32 d, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfMonth(ExtendedDayNum(d));
|
||||||
|
}
|
||||||
|
|
||||||
using FactorTransform = ZeroTransform;
|
using FactorTransform = ZeroTransform;
|
||||||
};
|
};
|
||||||
@ -218,7 +233,14 @@ struct ToLastDayOfMonthImpl
|
|||||||
/// 0xFFF9 is Int value for 2149-05-31 -- the last day where we can actually find LastDayOfMonth. This will also be the return value.
|
/// 0xFFF9 is Int value for 2149-05-31 -- the last day where we can actually find LastDayOfMonth. This will also be the return value.
|
||||||
return time_zone.toLastDayNumOfMonth(DayNum(std::min(d, UInt16(0xFFF9))));
|
return time_zone.toLastDayNumOfMonth(DayNum(std::min(d, UInt16(0xFFF9))));
|
||||||
}
|
}
|
||||||
|
static inline Int64 execute_extended_result(Int64 t, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toLastDayNumOfMonth(time_zone.toDayNum(t));
|
||||||
|
}
|
||||||
|
static inline Int32 execute_extended_result(Int32 d, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toLastDayNumOfMonth(ExtendedDayNum(d));
|
||||||
|
}
|
||||||
using FactorTransform = ZeroTransform;
|
using FactorTransform = ZeroTransform;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -242,7 +264,14 @@ struct ToStartOfQuarterImpl
|
|||||||
{
|
{
|
||||||
return time_zone.toFirstDayNumOfQuarter(DayNum(d));
|
return time_zone.toFirstDayNumOfQuarter(DayNum(d));
|
||||||
}
|
}
|
||||||
|
static inline Int64 execute_extended_result(Int64 t, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfQuarter(time_zone.toDayNum(t));
|
||||||
|
}
|
||||||
|
static inline Int32 execute_extended_result(Int32 d, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfQuarter(ExtendedDayNum(d));
|
||||||
|
}
|
||||||
using FactorTransform = ZeroTransform;
|
using FactorTransform = ZeroTransform;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -266,6 +295,14 @@ struct ToStartOfYearImpl
|
|||||||
{
|
{
|
||||||
return time_zone.toFirstDayNumOfYear(DayNum(d));
|
return time_zone.toFirstDayNumOfYear(DayNum(d));
|
||||||
}
|
}
|
||||||
|
static inline Int64 execute_extended_result(Int64 t, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfYear(time_zone.toDayNum(t));
|
||||||
|
}
|
||||||
|
static inline Int32 execute_extended_result(Int32 d, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfYear(ExtendedDayNum(d));
|
||||||
|
}
|
||||||
|
|
||||||
using FactorTransform = ZeroTransform;
|
using FactorTransform = ZeroTransform;
|
||||||
};
|
};
|
||||||
@ -893,7 +930,7 @@ struct ToStartOfISOYearImpl
|
|||||||
|
|
||||||
static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone)
|
static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||||
{
|
{
|
||||||
return time_zone.toFirstDayNumOfISOYear(time_zone.toDayNum(t));
|
return t < 0 ? 0 : time_zone.toFirstDayNumOfISOYear(ExtendedDayNum(std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM))));
|
||||||
}
|
}
|
||||||
static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone)
|
static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone)
|
||||||
{
|
{
|
||||||
@ -901,12 +938,20 @@ struct ToStartOfISOYearImpl
|
|||||||
}
|
}
|
||||||
static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone)
|
static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone)
|
||||||
{
|
{
|
||||||
return time_zone.toFirstDayNumOfISOYear(ExtendedDayNum(d));
|
return d < 0 ? 0 : time_zone.toFirstDayNumOfISOYear(ExtendedDayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM))));
|
||||||
}
|
}
|
||||||
static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone)
|
static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone)
|
||||||
{
|
{
|
||||||
return time_zone.toFirstDayNumOfISOYear(DayNum(d));
|
return time_zone.toFirstDayNumOfISOYear(DayNum(d));
|
||||||
}
|
}
|
||||||
|
static inline Int64 execute_extended_result(Int64 t, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfISOYear(time_zone.toDayNum(t));
|
||||||
|
}
|
||||||
|
static inline Int32 execute_extended_result(Int32 d, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toFirstDayNumOfISOYear(ExtendedDayNum(d));
|
||||||
|
}
|
||||||
|
|
||||||
using FactorTransform = ZeroTransform;
|
using FactorTransform = ZeroTransform;
|
||||||
};
|
};
|
||||||
@ -1201,7 +1246,7 @@ struct ToYYYYMMDDhhmmssImpl
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
template <typename FromType, typename ToType, typename Transform>
|
template <typename FromType, typename ToType, typename Transform, bool is_extended_result = false>
|
||||||
struct Transformer
|
struct Transformer
|
||||||
{
|
{
|
||||||
template <typename FromTypeVector, typename ToTypeVector>
|
template <typename FromTypeVector, typename ToTypeVector>
|
||||||
@ -1211,18 +1256,21 @@ struct Transformer
|
|||||||
vec_to.resize(size);
|
vec_to.resize(size);
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
if constexpr (is_extended_result)
|
||||||
|
vec_to[i] = transform.execute_extended_result(vec_from[i], time_zone);
|
||||||
|
else
|
||||||
vec_to[i] = transform.execute(vec_from[i], time_zone);
|
vec_to[i] = transform.execute(vec_from[i], time_zone);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
template <typename FromDataType, typename ToDataType, typename Transform>
|
template <typename FromDataType, typename ToDataType, typename Transform, bool is_extended_result = false>
|
||||||
struct DateTimeTransformImpl
|
struct DateTimeTransformImpl
|
||||||
{
|
{
|
||||||
static ColumnPtr execute(
|
static ColumnPtr execute(
|
||||||
const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/, const Transform & transform = {})
|
const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/, const Transform & transform = {})
|
||||||
{
|
{
|
||||||
using Op = Transformer<typename FromDataType::FieldType, typename ToDataType::FieldType, Transform>;
|
using Op = Transformer<typename FromDataType::FieldType, typename ToDataType::FieldType, Transform, is_extended_result>;
|
||||||
|
|
||||||
const ColumnPtr source_col = arguments[0].column;
|
const ColumnPtr source_col = arguments[0].column;
|
||||||
if (const auto * sources = checkAndGetColumn<typename FromDataType::ColumnType>(source_col.get()))
|
if (const auto * sources = checkAndGetColumn<typename FromDataType::ColumnType>(source_col.get()))
|
||||||
|
78
src/Functions/FunctionCustomWeekToDateOrDate32.h
Normal file
78
src/Functions/FunctionCustomWeekToDateOrDate32.h
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <Functions/IFunctionCustomWeek.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Transform>
|
||||||
|
class FunctionCustomWeekToDateOrDate32 : public IFunctionCustomWeek<Transform>, WithContext
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
const bool enable_extended_results_for_datetime_functions = false;
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr context_)
|
||||||
|
{
|
||||||
|
return std::make_shared<FunctionCustomWeekToDateOrDate32>(context_);
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit FunctionCustomWeekToDateOrDate32(ContextPtr context_)
|
||||||
|
: WithContext(context_)
|
||||||
|
, enable_extended_results_for_datetime_functions(context_->getSettingsRef().enable_extended_results_for_datetime_functions)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
|
{
|
||||||
|
this->checkArguments(arguments, /*is_result_type_date_or_date32*/ true);
|
||||||
|
|
||||||
|
const IDataType * from_type = arguments[0].type.get();
|
||||||
|
WhichDataType which(from_type);
|
||||||
|
if ((which.isDate32() || which.isDateTime64()) && enable_extended_results_for_datetime_functions)
|
||||||
|
return std::make_shared<DataTypeDate32>();
|
||||||
|
else
|
||||||
|
return std::make_shared<DataTypeDate>();
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
const IDataType * from_type = arguments[0].type.get();
|
||||||
|
WhichDataType which(from_type);
|
||||||
|
|
||||||
|
if (which.isDate())
|
||||||
|
return CustomWeekTransformImpl<DataTypeDate, DataTypeDate>::execute(
|
||||||
|
arguments, result_type, input_rows_count, Transform{});
|
||||||
|
else if (which.isDate32())
|
||||||
|
if (enable_extended_results_for_datetime_functions)
|
||||||
|
return CustomWeekTransformImpl<DataTypeDate32, DataTypeDate32, /*is_extended_result*/ true>::execute(
|
||||||
|
arguments, result_type, input_rows_count, Transform{});
|
||||||
|
else
|
||||||
|
return CustomWeekTransformImpl<DataTypeDate32, DataTypeDate>::execute(
|
||||||
|
arguments, result_type, input_rows_count, Transform{});
|
||||||
|
else if (which.isDateTime())
|
||||||
|
return CustomWeekTransformImpl<DataTypeDateTime, DataTypeDate>::execute(
|
||||||
|
arguments, result_type, input_rows_count, Transform{});
|
||||||
|
else if (which.isDateTime64())
|
||||||
|
{
|
||||||
|
if (enable_extended_results_for_datetime_functions)
|
||||||
|
return CustomWeekTransformImpl<DataTypeDateTime64, DataTypeDate32, /*is_extended_result*/ true>::execute(
|
||||||
|
arguments, result_type, input_rows_count,
|
||||||
|
TransformDateTime64<Transform>{assert_cast<const DataTypeDateTime64 *>(from_type)->getScale()});
|
||||||
|
else
|
||||||
|
return CustomWeekTransformImpl<DataTypeDateTime64, DataTypeDate>::execute(
|
||||||
|
arguments, result_type, input_rows_count,
|
||||||
|
TransformDateTime64<Transform>{assert_cast<const DataTypeDateTime64 *>(from_type)->getScale()});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type " + arguments[0].type->getName() + " of argument of function " + this->getName(),
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -1,14 +1,5 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include <DataTypes/DataTypeDate.h>
|
#include <Functions/IFunctionCustomWeek.h>
|
||||||
#include <DataTypes/DataTypeDate32.h>
|
|
||||||
#include <DataTypes/DataTypeDateTime.h>
|
|
||||||
#include <DataTypes/DataTypeDateTime64.h>
|
|
||||||
#include <Functions/CustomWeekTransforms.h>
|
|
||||||
#include <Functions/IFunction.h>
|
|
||||||
#include <Functions/TransformDateTime64.h>
|
|
||||||
#include <IO/WriteHelpers.h>
|
|
||||||
#include <Interpreters/Context_fwd.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -16,82 +7,23 @@ namespace DB
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// See CustomWeekTransforms.h
|
/// See CustomWeekTransforms.h
|
||||||
template <typename ToDataType, typename Transform>
|
template <typename ToDataType, typename Transform>
|
||||||
class FunctionCustomWeekToSomething : public IFunction
|
class FunctionCustomWeekToSomething : public IFunctionCustomWeek<Transform>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static constexpr auto name = Transform::name;
|
|
||||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionCustomWeekToSomething>(); }
|
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionCustomWeekToSomething>(); }
|
||||||
|
|
||||||
String getName() const override { return name; }
|
|
||||||
|
|
||||||
bool isVariadic() const override { return true; }
|
|
||||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
|
||||||
size_t getNumberOfArguments() const override { return 0; }
|
|
||||||
|
|
||||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
{
|
{
|
||||||
if (arguments.size() == 1)
|
this->checkArguments(arguments);
|
||||||
{
|
|
||||||
if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
|
||||||
throw Exception(
|
|
||||||
"Illegal type " + arguments[0].type->getName() + " of argument of function " + getName()
|
|
||||||
+ ". Must be Date, Date32, DateTime or DateTime64.",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
}
|
|
||||||
else if (arguments.size() == 2)
|
|
||||||
{
|
|
||||||
if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
|
||||||
throw Exception(
|
|
||||||
"Illegal type " + arguments[0].type->getName() + " of 1st argument of function " + getName()
|
|
||||||
+ ". Must be Date, Date32, DateTime or DateTime64.",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
if (!isUInt8(arguments[1].type))
|
|
||||||
throw Exception(
|
|
||||||
"Illegal type of 2nd (optional) argument of function " + getName()
|
|
||||||
+ ". Must be constant UInt8 (week mode).",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
}
|
|
||||||
else if (arguments.size() == 3)
|
|
||||||
{
|
|
||||||
if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
|
||||||
throw Exception(
|
|
||||||
"Illegal type " + arguments[0].type->getName() + " of argument of function " + getName()
|
|
||||||
+ ". Must be Date, Date32, DateTime or DateTime64",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
if (!isUInt8(arguments[1].type))
|
|
||||||
throw Exception(
|
|
||||||
"Illegal type of 2nd (optional) argument of function " + getName()
|
|
||||||
+ ". Must be constant UInt8 (week mode).",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
if (!isString(arguments[2].type))
|
|
||||||
throw Exception(
|
|
||||||
"Illegal type of 3rd (optional) argument of function " + getName()
|
|
||||||
+ ". Must be constant string (timezone name).",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
if ((isDate(arguments[0].type) || isDate32(arguments[0].type))
|
|
||||||
&& (std::is_same_v<ToDataType, DataTypeDate> || std::is_same_v<ToDataType, DataTypeDate32>))
|
|
||||||
throw Exception(
|
|
||||||
"The timezone argument of function " + getName() + " is allowed only when the 1st argument is DateTime or DateTime64.",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
throw Exception(
|
|
||||||
"Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size())
|
|
||||||
+ ", expected 1, 2 or 3.",
|
|
||||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
|
||||||
|
|
||||||
return std::make_shared<ToDataType>();
|
return std::make_shared<ToDataType>();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool useDefaultImplementationForConstants() const override { return true; }
|
|
||||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; }
|
|
||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
const IDataType * from_type = arguments[0].type.get();
|
const IDataType * from_type = arguments[0].type.get();
|
||||||
@ -114,44 +46,10 @@ public:
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"Illegal type " + arguments[0].type->getName() + " of argument of function " + getName(),
|
"Illegal type " + arguments[0].type->getName() + " of argument of function " + this->getName(),
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool hasInformationAboutMonotonicity() const override { return true; }
|
|
||||||
|
|
||||||
Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override
|
|
||||||
{
|
|
||||||
if constexpr (std::is_same_v<typename Transform::FactorTransform, ZeroTransform>)
|
|
||||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
|
||||||
|
|
||||||
const IFunction::Monotonicity is_monotonic = { .is_monotonic = true };
|
|
||||||
const IFunction::Monotonicity is_not_monotonic;
|
|
||||||
|
|
||||||
/// This method is called only if the function has one argument. Therefore, we do not care about the non-local time zone.
|
|
||||||
const DateLUTImpl & date_lut = DateLUT::instance();
|
|
||||||
|
|
||||||
if (left.isNull() || right.isNull())
|
|
||||||
return {};
|
|
||||||
|
|
||||||
/// The function is monotonous on the [left, right] segment, if the factor transformation returns the same values for them.
|
|
||||||
|
|
||||||
if (checkAndGetDataType<DataTypeDate>(&type))
|
|
||||||
{
|
|
||||||
return Transform::FactorTransform::execute(UInt16(left.get<UInt64>()), date_lut)
|
|
||||||
== Transform::FactorTransform::execute(UInt16(right.get<UInt64>()), date_lut)
|
|
||||||
? is_monotonic
|
|
||||||
: is_not_monotonic;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return Transform::FactorTransform::execute(UInt32(left.get<UInt64>()), date_lut)
|
|
||||||
== Transform::FactorTransform::execute(UInt32(right.get<UInt64>()), date_lut)
|
|
||||||
? is_monotonic
|
|
||||||
: is_not_monotonic;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
81
src/Functions/FunctionDateOrDateTimeToDateOrDate32.h
Normal file
81
src/Functions/FunctionDateOrDateTimeToDateOrDate32.h
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <Functions/IFunctionDateOrDateTime.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Transform>
|
||||||
|
class FunctionDateOrDateTimeToDateOrDate32 : public IFunctionDateOrDateTime<Transform>, WithContext
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
const bool enable_extended_results_for_datetime_functions = false;
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr context_)
|
||||||
|
{
|
||||||
|
return std::make_shared<FunctionDateOrDateTimeToDateOrDate32>(context_);
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit FunctionDateOrDateTimeToDateOrDate32(ContextPtr context_)
|
||||||
|
: WithContext(context_)
|
||||||
|
, enable_extended_results_for_datetime_functions(context_->getSettingsRef().enable_extended_results_for_datetime_functions)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
|
{
|
||||||
|
this->checkArguments(arguments, /*is_result_type_date_or_date32*/ true);
|
||||||
|
|
||||||
|
const IDataType * from_type = arguments[0].type.get();
|
||||||
|
WhichDataType which(from_type);
|
||||||
|
|
||||||
|
/// If the time zone is specified but empty, throw an exception.
|
||||||
|
/// only validate the time_zone part if the number of arguments is 2.
|
||||||
|
if ((which.isDateTime() || which.isDateTime64()) && arguments.size() == 2
|
||||||
|
&& extractTimeZoneNameFromFunctionArguments(arguments, 1, 0).empty())
|
||||||
|
throw Exception(
|
||||||
|
"Function " + this->getName() + " supports a 2nd argument (optional) that must be non-empty and be a valid time zone",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
|
if ((which.isDate32() || which.isDateTime64()) && enable_extended_results_for_datetime_functions)
|
||||||
|
return std::make_shared<DataTypeDate32>();
|
||||||
|
else
|
||||||
|
return std::make_shared<DataTypeDate>();
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
const IDataType * from_type = arguments[0].type.get();
|
||||||
|
WhichDataType which(from_type);
|
||||||
|
|
||||||
|
if (which.isDate())
|
||||||
|
return DateTimeTransformImpl<DataTypeDate, DataTypeDate, Transform>::execute(arguments, result_type, input_rows_count);
|
||||||
|
else if (which.isDate32())
|
||||||
|
if (enable_extended_results_for_datetime_functions)
|
||||||
|
return DateTimeTransformImpl<DataTypeDate32, DataTypeDate32, Transform, /*is_extended_result*/ true>::execute(arguments, result_type, input_rows_count);
|
||||||
|
else
|
||||||
|
return DateTimeTransformImpl<DataTypeDate32, DataTypeDate, Transform>::execute(arguments, result_type, input_rows_count);
|
||||||
|
else if (which.isDateTime())
|
||||||
|
return DateTimeTransformImpl<DataTypeDateTime, DataTypeDate, Transform>::execute(arguments, result_type, input_rows_count);
|
||||||
|
else if (which.isDateTime64())
|
||||||
|
{
|
||||||
|
const auto scale = static_cast<const DataTypeDateTime64 *>(from_type)->getScale();
|
||||||
|
|
||||||
|
const TransformDateTime64<Transform> transformer(scale);
|
||||||
|
if (enable_extended_results_for_datetime_functions)
|
||||||
|
return DateTimeTransformImpl<DataTypeDateTime64, DataTypeDate32, decltype(transformer), /*is_extended_result*/ true>::execute(arguments, result_type, input_rows_count, transformer);
|
||||||
|
else
|
||||||
|
return DateTimeTransformImpl<DataTypeDateTime64, DataTypeDate, decltype(transformer)>::execute(arguments, result_type, input_rows_count, transformer);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw Exception("Illegal type " + arguments[0].type->getName() + " of argument of function " + this->getName(),
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -1,14 +1,5 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include <DataTypes/DataTypeDate.h>
|
#include <Functions/IFunctionDateOrDateTime.h>
|
||||||
#include <DataTypes/DataTypeDate32.h>
|
|
||||||
#include <DataTypes/DataTypeDateTime.h>
|
|
||||||
#include <Functions/IFunction.h>
|
|
||||||
#include <DataTypes/DataTypeDateTime64.h>
|
|
||||||
#include <Functions/extractTimeZoneFromFunctionArguments.h>
|
|
||||||
#include <Functions/DateTimeTransforms.h>
|
|
||||||
#include <Functions/TransformDateTime64.h>
|
|
||||||
#include <IO/WriteHelpers.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -16,59 +7,18 @@ namespace DB
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// See DateTimeTransforms.h
|
/// See DateTimeTransforms.h
|
||||||
template <typename ToDataType, typename Transform>
|
template <typename ToDataType, typename Transform>
|
||||||
class FunctionDateOrDateTimeToSomething : public IFunction
|
class FunctionDateOrDateTimeToSomething : public IFunctionDateOrDateTime<Transform>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static constexpr auto name = Transform::name;
|
|
||||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionDateOrDateTimeToSomething>(); }
|
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionDateOrDateTimeToSomething>(); }
|
||||||
|
|
||||||
String getName() const override
|
|
||||||
{
|
|
||||||
return name;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isVariadic() const override { return true; }
|
|
||||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
|
||||||
size_t getNumberOfArguments() const override { return 0; }
|
|
||||||
|
|
||||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
{
|
{
|
||||||
if (arguments.size() == 1)
|
this->checkArguments(arguments, (std::is_same_v<ToDataType, DataTypeDate> || std::is_same_v<ToDataType, DataTypeDate32>));
|
||||||
{
|
|
||||||
if (!isDateOrDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
|
||||||
throw Exception(
|
|
||||||
"Illegal type " + arguments[0].type->getName() + " of argument of function " + getName()
|
|
||||||
+ ". Should be a date or a date with time",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
}
|
|
||||||
else if (arguments.size() == 2)
|
|
||||||
{
|
|
||||||
if (!isDateOrDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
|
||||||
throw Exception(
|
|
||||||
"Illegal type " + arguments[0].type->getName() + " of argument of function " + getName()
|
|
||||||
+ ". Should be a date or a date with time",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
if (!isString(arguments[1].type))
|
|
||||||
throw Exception(
|
|
||||||
"Function " + getName() + " supports 1 or 2 arguments. The 1st argument "
|
|
||||||
"must be of type Date or DateTime. The 2nd argument (optional) must be "
|
|
||||||
"a constant string with timezone name",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
if ((isDate(arguments[0].type) || isDate32(arguments[0].type)) && (std::is_same_v<ToDataType, DataTypeDate> || std::is_same_v<ToDataType, DataTypeDate32>))
|
|
||||||
throw Exception(
|
|
||||||
"The timezone argument of function " + getName() + " is allowed only when the 1st argument has the type DateTime",
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
throw Exception(
|
|
||||||
"Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size())
|
|
||||||
+ ", should be 1 or 2",
|
|
||||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
|
||||||
|
|
||||||
/// For DateTime, if time zone is specified, attach it to type.
|
/// For DateTime, if time zone is specified, attach it to type.
|
||||||
/// If the time zone is specified but empty, throw an exception.
|
/// If the time zone is specified but empty, throw an exception.
|
||||||
@ -79,7 +29,7 @@ public:
|
|||||||
/// to accommodate functions like toStartOfDay(today()), toStartOfDay(yesterday()) etc.
|
/// to accommodate functions like toStartOfDay(today()), toStartOfDay(yesterday()) etc.
|
||||||
if (arguments.size() == 2 && time_zone.empty())
|
if (arguments.size() == 2 && time_zone.empty())
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"Function " + getName() + " supports a 2nd argument (optional) that must be non-empty and be a valid time zone",
|
"Function " + this->getName() + " supports a 2nd argument (optional) that must be non-empty and be a valid time zone",
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
return std::make_shared<ToDataType>(time_zone);
|
return std::make_shared<ToDataType>(time_zone);
|
||||||
}
|
}
|
||||||
@ -109,9 +59,6 @@ public:
|
|||||||
return std::make_shared<ToDataType>();
|
return std::make_shared<ToDataType>();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool useDefaultImplementationForConstants() const override { return true; }
|
|
||||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
|
|
||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
const IDataType * from_type = arguments[0].type.get();
|
const IDataType * from_type = arguments[0].type.get();
|
||||||
@ -131,51 +78,10 @@ public:
|
|||||||
return DateTimeTransformImpl<DataTypeDateTime64, ToDataType, decltype(transformer)>::execute(arguments, result_type, input_rows_count, transformer);
|
return DateTimeTransformImpl<DataTypeDateTime64, ToDataType, decltype(transformer)>::execute(arguments, result_type, input_rows_count, transformer);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
throw Exception("Illegal type " + arguments[0].type->getName() + " of argument of function " + getName(),
|
throw Exception("Illegal type " + arguments[0].type->getName() + " of argument of function " + this->getName(),
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasInformationAboutMonotonicity() const override
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override
|
|
||||||
{
|
|
||||||
if constexpr (std::is_same_v<typename Transform::FactorTransform, ZeroTransform>)
|
|
||||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
|
||||||
|
|
||||||
const IFunction::Monotonicity is_monotonic = { .is_monotonic = true };
|
|
||||||
const IFunction::Monotonicity is_not_monotonic;
|
|
||||||
|
|
||||||
const DateLUTImpl * date_lut = &DateLUT::instance();
|
|
||||||
if (const auto * timezone = dynamic_cast<const TimezoneMixin *>(&type))
|
|
||||||
date_lut = &timezone->getTimeZone();
|
|
||||||
|
|
||||||
if (left.isNull() || right.isNull())
|
|
||||||
return is_not_monotonic;
|
|
||||||
|
|
||||||
/// The function is monotonous on the [left, right] segment, if the factor transformation returns the same values for them.
|
|
||||||
|
|
||||||
if (checkAndGetDataType<DataTypeDate>(&type))
|
|
||||||
{
|
|
||||||
return Transform::FactorTransform::execute(UInt16(left.get<UInt64>()), *date_lut)
|
|
||||||
== Transform::FactorTransform::execute(UInt16(right.get<UInt64>()), *date_lut)
|
|
||||||
? is_monotonic : is_not_monotonic;
|
|
||||||
}
|
|
||||||
else if (checkAndGetDataType<DataTypeDate32>(&type))
|
|
||||||
{
|
|
||||||
return Transform::FactorTransform::execute(Int32(left.get<UInt64>()), *date_lut)
|
|
||||||
== Transform::FactorTransform::execute(Int32(right.get<UInt64>()), *date_lut)
|
|
||||||
? is_monotonic : is_not_monotonic;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return Transform::FactorTransform::execute(UInt32(left.get<UInt64>()), *date_lut)
|
|
||||||
== Transform::FactorTransform::execute(UInt32(right.get<UInt64>()), *date_lut)
|
|
||||||
? is_monotonic : is_not_monotonic;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -2381,7 +2381,7 @@ using FunctionToDate = FunctionConvert<DataTypeDate, NameToDate, ToDateMonotonic
|
|||||||
using FunctionToDate32 = FunctionConvert<DataTypeDate32, NameToDate32, ToDateMonotonicity>;
|
using FunctionToDate32 = FunctionConvert<DataTypeDate32, NameToDate32, ToDateMonotonicity>;
|
||||||
using FunctionToDateTime = FunctionConvert<DataTypeDateTime, NameToDateTime, ToDateTimeMonotonicity>;
|
using FunctionToDateTime = FunctionConvert<DataTypeDateTime, NameToDateTime, ToDateTimeMonotonicity>;
|
||||||
using FunctionToDateTime32 = FunctionConvert<DataTypeDateTime, NameToDateTime32, ToDateTimeMonotonicity>;
|
using FunctionToDateTime32 = FunctionConvert<DataTypeDateTime, NameToDateTime32, ToDateTimeMonotonicity>;
|
||||||
using FunctionToDateTime64 = FunctionConvert<DataTypeDateTime64, NameToDateTime64, UnknownMonotonicity>;
|
using FunctionToDateTime64 = FunctionConvert<DataTypeDateTime64, NameToDateTime64, ToDateTimeMonotonicity>;
|
||||||
using FunctionToUUID = FunctionConvert<DataTypeUUID, NameToUUID, ToNumberMonotonicity<UInt128>>;
|
using FunctionToUUID = FunctionConvert<DataTypeUUID, NameToUUID, ToNumberMonotonicity<UInt128>>;
|
||||||
using FunctionToString = FunctionConvert<DataTypeString, NameToString, ToStringMonotonicity>;
|
using FunctionToString = FunctionConvert<DataTypeString, NameToString, ToStringMonotonicity>;
|
||||||
using FunctionToUnixTimestamp = FunctionConvert<DataTypeUInt32, NameToUnixTimestamp, ToNumberMonotonicity<UInt32>>;
|
using FunctionToUnixTimestamp = FunctionConvert<DataTypeUInt32, NameToUnixTimestamp, ToNumberMonotonicity<UInt32>>;
|
||||||
|
@ -649,7 +649,7 @@ public:
|
|||||||
for (unsigned int region_id : region_ids)
|
for (unsigned int region_id : region_ids)
|
||||||
{
|
{
|
||||||
const StringRef & name_ref = dict.getRegionName(region_id, language);
|
const StringRef & name_ref = dict.getRegionName(region_id, language);
|
||||||
col_to->insertDataWithTerminatingZero(name_ref.data, name_ref.size + 1);
|
col_to->insertData(name_ref.data, name_ref.size);
|
||||||
}
|
}
|
||||||
|
|
||||||
return col_to;
|
return col_to;
|
||||||
|
@ -140,10 +140,6 @@ struct NumericArraySource : public ArraySourceImpl<NumericArraySource<T>>
|
|||||||
|
|
||||||
|
|
||||||
/// The methods can be virtual or not depending on the template parameter. See IStringSource.
|
/// The methods can be virtual or not depending on the template parameter. See IStringSource.
|
||||||
#if !defined(__clang__)
|
|
||||||
# pragma GCC diagnostic push
|
|
||||||
# pragma GCC diagnostic ignored "-Wsuggest-override"
|
|
||||||
#elif __clang_major__ >= 11
|
|
||||||
#pragma GCC diagnostic push
|
#pragma GCC diagnostic push
|
||||||
#ifdef HAS_SUGGEST_OVERRIDE
|
#ifdef HAS_SUGGEST_OVERRIDE
|
||||||
# pragma GCC diagnostic ignored "-Wsuggest-override"
|
# pragma GCC diagnostic ignored "-Wsuggest-override"
|
||||||
@ -151,7 +147,6 @@ struct NumericArraySource : public ArraySourceImpl<NumericArraySource<T>>
|
|||||||
#ifdef HAS_SUGGEST_DESTRUCTOR_OVERRIDE
|
#ifdef HAS_SUGGEST_DESTRUCTOR_OVERRIDE
|
||||||
# pragma GCC diagnostic ignored "-Wsuggest-destructor-override"
|
# pragma GCC diagnostic ignored "-Wsuggest-destructor-override"
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
template <typename Base>
|
template <typename Base>
|
||||||
struct ConstSource : public Base
|
struct ConstSource : public Base
|
||||||
@ -233,9 +228,7 @@ struct ConstSource : public Base
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#if !defined(__clang__) || __clang_major__ >= 11
|
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
#endif
|
|
||||||
|
|
||||||
struct StringSource
|
struct StringSource
|
||||||
{
|
{
|
||||||
|
122
src/Functions/IFunctionCustomWeek.h
Normal file
122
src/Functions/IFunctionCustomWeek.h
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <DataTypes/DataTypeDate.h>
|
||||||
|
#include <DataTypes/DataTypeDate32.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime64.h>
|
||||||
|
#include <Functions/CustomWeekTransforms.h>
|
||||||
|
#include <Functions/IFunction.h>
|
||||||
|
#include <Functions/TransformDateTime64.h>
|
||||||
|
#include <IO/WriteHelpers.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Transform>
|
||||||
|
class IFunctionCustomWeek : public IFunction
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static constexpr auto name = Transform::name;
|
||||||
|
String getName() const override { return name; }
|
||||||
|
bool isVariadic() const override { return true; }
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||||
|
size_t getNumberOfArguments() const override { return 0; }
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; }
|
||||||
|
|
||||||
|
bool hasInformationAboutMonotonicity() const override { return true; }
|
||||||
|
|
||||||
|
Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override
|
||||||
|
{
|
||||||
|
if constexpr (std::is_same_v<typename Transform::FactorTransform, ZeroTransform>)
|
||||||
|
return {.is_monotonic = true, .is_always_monotonic = true};
|
||||||
|
|
||||||
|
const IFunction::Monotonicity is_monotonic = {.is_monotonic = true};
|
||||||
|
const IFunction::Monotonicity is_not_monotonic;
|
||||||
|
|
||||||
|
/// This method is called only if the function has one argument. Therefore, we do not care about the non-local time zone.
|
||||||
|
const DateLUTImpl & date_lut = DateLUT::instance();
|
||||||
|
|
||||||
|
if (left.isNull() || right.isNull())
|
||||||
|
return {};
|
||||||
|
|
||||||
|
/// The function is monotonous on the [left, right] segment, if the factor transformation returns the same values for them.
|
||||||
|
|
||||||
|
if (checkAndGetDataType<DataTypeDate>(&type))
|
||||||
|
{
|
||||||
|
return Transform::FactorTransform::execute(UInt16(left.get<UInt64>()), date_lut)
|
||||||
|
== Transform::FactorTransform::execute(UInt16(right.get<UInt64>()), date_lut)
|
||||||
|
? is_monotonic
|
||||||
|
: is_not_monotonic;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return Transform::FactorTransform::execute(UInt32(left.get<UInt64>()), date_lut)
|
||||||
|
== Transform::FactorTransform::execute(UInt32(right.get<UInt64>()), date_lut)
|
||||||
|
? is_monotonic
|
||||||
|
: is_not_monotonic;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void checkArguments(const ColumnsWithTypeAndName & arguments, bool is_result_type_date_or_date32 = false) const
|
||||||
|
{
|
||||||
|
if (arguments.size() == 1)
|
||||||
|
{
|
||||||
|
if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type " + arguments[0].type->getName() + " of argument of function " + getName()
|
||||||
|
+ ". Must be Date, Date32, DateTime or DateTime64.",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
}
|
||||||
|
else if (arguments.size() == 2)
|
||||||
|
{
|
||||||
|
if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type " + arguments[0].type->getName() + " of 1st argument of function " + getName()
|
||||||
|
+ ". Must be Date, Date32, DateTime or DateTime64.",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
if (!isUInt8(arguments[1].type))
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type of 2nd (optional) argument of function " + getName()
|
||||||
|
+ ". Must be constant UInt8 (week mode).",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
}
|
||||||
|
else if (arguments.size() == 3)
|
||||||
|
{
|
||||||
|
if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type " + arguments[0].type->getName() + " of argument of function " + getName()
|
||||||
|
+ ". Must be Date, Date32, DateTime or DateTime64",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
if (!isUInt8(arguments[1].type))
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type of 2nd (optional) argument of function " + getName()
|
||||||
|
+ ". Must be constant UInt8 (week mode).",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
if (!isString(arguments[2].type))
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type of 3rd (optional) argument of function " + getName()
|
||||||
|
+ ". Must be constant string (timezone name).",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
if ((isDate(arguments[0].type) || isDate32(arguments[0].type)) && is_result_type_date_or_date32)
|
||||||
|
throw Exception(
|
||||||
|
"The timezone argument of function " + getName() + " is allowed only when the 1st argument is DateTime or DateTime64.",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw Exception(
|
||||||
|
"Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size())
|
||||||
|
+ ", expected 1, 2 or 3.",
|
||||||
|
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
118
src/Functions/IFunctionDateOrDateTime.h
Normal file
118
src/Functions/IFunctionDateOrDateTime.h
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <DataTypes/DataTypeDate.h>
|
||||||
|
#include <DataTypes/DataTypeDate32.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime.h>
|
||||||
|
#include <Functions/IFunction.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime64.h>
|
||||||
|
#include <Functions/extractTimeZoneFromFunctionArguments.h>
|
||||||
|
#include <Functions/DateTimeTransforms.h>
|
||||||
|
#include <Functions/TransformDateTime64.h>
|
||||||
|
#include <IO/WriteHelpers.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Transform>
|
||||||
|
class IFunctionDateOrDateTime : public IFunction
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static constexpr auto name = Transform::name;
|
||||||
|
String getName() const override { return name; }
|
||||||
|
|
||||||
|
bool isVariadic() const override { return true; }
|
||||||
|
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||||
|
|
||||||
|
size_t getNumberOfArguments() const override { return 0; }
|
||||||
|
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
|
||||||
|
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
|
||||||
|
|
||||||
|
bool hasInformationAboutMonotonicity() const override
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override
|
||||||
|
{
|
||||||
|
if constexpr (std::is_same_v<typename Transform::FactorTransform, ZeroTransform>)
|
||||||
|
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||||
|
|
||||||
|
const IFunction::Monotonicity is_monotonic = { .is_monotonic = true };
|
||||||
|
const IFunction::Monotonicity is_not_monotonic;
|
||||||
|
|
||||||
|
const DateLUTImpl * date_lut = &DateLUT::instance();
|
||||||
|
if (const auto * timezone = dynamic_cast<const TimezoneMixin *>(&type))
|
||||||
|
date_lut = &timezone->getTimeZone();
|
||||||
|
|
||||||
|
if (left.isNull() || right.isNull())
|
||||||
|
return is_not_monotonic;
|
||||||
|
|
||||||
|
/// The function is monotonous on the [left, right] segment, if the factor transformation returns the same values for them.
|
||||||
|
|
||||||
|
if (checkAndGetDataType<DataTypeDate>(&type))
|
||||||
|
{
|
||||||
|
return Transform::FactorTransform::execute(UInt16(left.get<UInt64>()), *date_lut)
|
||||||
|
== Transform::FactorTransform::execute(UInt16(right.get<UInt64>()), *date_lut)
|
||||||
|
? is_monotonic : is_not_monotonic;
|
||||||
|
}
|
||||||
|
else if (checkAndGetDataType<DataTypeDate32>(&type))
|
||||||
|
{
|
||||||
|
return Transform::FactorTransform::execute(Int32(left.get<UInt64>()), *date_lut)
|
||||||
|
== Transform::FactorTransform::execute(Int32(right.get<UInt64>()), *date_lut)
|
||||||
|
? is_monotonic : is_not_monotonic;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return Transform::FactorTransform::execute(UInt32(left.get<UInt64>()), *date_lut)
|
||||||
|
== Transform::FactorTransform::execute(UInt32(right.get<UInt64>()), *date_lut)
|
||||||
|
? is_monotonic : is_not_monotonic;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void checkArguments(const ColumnsWithTypeAndName & arguments, bool is_result_type_date_or_date32) const
|
||||||
|
{
|
||||||
|
if (arguments.size() == 1)
|
||||||
|
{
|
||||||
|
if (!isDateOrDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type " + arguments[0].type->getName() + " of argument of function " + getName()
|
||||||
|
+ ". Should be Date, Date32, DateTime or DateTime64",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
}
|
||||||
|
else if (arguments.size() == 2)
|
||||||
|
{
|
||||||
|
if (!isDateOrDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type " + arguments[0].type->getName() + " of argument of function " + getName()
|
||||||
|
+ ". Should be Date, Date32, DateTime or DateTime64",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
if (!isString(arguments[1].type))
|
||||||
|
throw Exception(
|
||||||
|
"Function " + getName() + " supports 1 or 2 arguments. The optional 2nd argument must be "
|
||||||
|
"a constant string with a timezone name",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
if ((isDate(arguments[0].type) || isDate32(arguments[0].type)) && is_result_type_date_or_date32)
|
||||||
|
throw Exception(
|
||||||
|
"The timezone argument of function " + getName() + " is allowed only when the 1st argument has the type DateTime or DateTime64",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw Exception(
|
||||||
|
"Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size())
|
||||||
|
+ ", should be 1 or 2",
|
||||||
|
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -12,11 +12,6 @@
|
|||||||
|
|
||||||
/// Warning in boost::geometry during template strategy substitution.
|
/// Warning in boost::geometry during template strategy substitution.
|
||||||
#pragma GCC diagnostic push
|
#pragma GCC diagnostic push
|
||||||
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||||
|
|
||||||
#include <boost/geometry.hpp>
|
#include <boost/geometry.hpp>
|
||||||
@ -286,16 +281,9 @@ void PointInPolygonWithGrid<CoordinateType>::calcGridAttributes(
|
|||||||
const Point & min_corner = box.min_corner();
|
const Point & min_corner = box.min_corner();
|
||||||
const Point & max_corner = box.max_corner();
|
const Point & max_corner = box.max_corner();
|
||||||
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
cell_width = (max_corner.x() - min_corner.x()) / grid_size;
|
cell_width = (max_corner.x() - min_corner.x()) / grid_size;
|
||||||
cell_height = (max_corner.y() - min_corner.y()) / grid_size;
|
cell_height = (max_corner.y() - min_corner.y()) / grid_size;
|
||||||
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
|
|
||||||
if (cell_width == 0 || cell_height == 0)
|
if (cell_width == 0 || cell_height == 0)
|
||||||
{
|
{
|
||||||
has_empty_bound = true;
|
has_empty_bound = true;
|
||||||
@ -330,10 +318,6 @@ void PointInPolygonWithGrid<CoordinateType>::buildGrid()
|
|||||||
|
|
||||||
for (size_t row = 0; row < grid_size; ++row)
|
for (size_t row = 0; row < grid_size; ++row)
|
||||||
{
|
{
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#if !defined(__clang__)
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
CoordinateType y_min = min_corner.y() + row * cell_height;
|
CoordinateType y_min = min_corner.y() + row * cell_height;
|
||||||
CoordinateType y_max = min_corner.y() + (row + 1) * cell_height;
|
CoordinateType y_max = min_corner.y() + (row + 1) * cell_height;
|
||||||
|
|
||||||
@ -341,7 +325,6 @@ void PointInPolygonWithGrid<CoordinateType>::buildGrid()
|
|||||||
{
|
{
|
||||||
CoordinateType x_min = min_corner.x() + col * cell_width;
|
CoordinateType x_min = min_corner.x() + col * cell_width;
|
||||||
CoordinateType x_max = min_corner.x() + (col + 1) * cell_width;
|
CoordinateType x_max = min_corner.x() + (col + 1) * cell_width;
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
Box cell_box(Point(x_min, y_min), Point(x_max, y_max));
|
Box cell_box(Point(x_min, y_min), Point(x_max, y_max));
|
||||||
|
|
||||||
MultiPolygon intersection;
|
MultiPolygon intersection;
|
||||||
|
@ -87,6 +87,46 @@ public:
|
|||||||
return wrapped_transform.execute(t, std::forward<Args>(args)...);
|
return wrapped_transform.execute(t, std::forward<Args>(args)...);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <typename ... Args>
|
||||||
|
inline auto NO_SANITIZE_UNDEFINED execute_extended_result(const DateTime64 & t, Args && ... args) const
|
||||||
|
{
|
||||||
|
/// Type conversion from float to integer may be required.
|
||||||
|
/// We are Ok with implementation specific result for out of range and denormals conversion.
|
||||||
|
|
||||||
|
if constexpr (TransformHasExecuteOverload_v<DateTime64, decltype(scale_multiplier), Args...>)
|
||||||
|
{
|
||||||
|
return wrapped_transform.execute_extended_result(t, scale_multiplier, std::forward<Args>(args)...);
|
||||||
|
}
|
||||||
|
else if constexpr (TransformHasExecuteOverload_v<DecimalUtils::DecimalComponents<DateTime64>, Args...>)
|
||||||
|
{
|
||||||
|
auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier);
|
||||||
|
|
||||||
|
const auto result = wrapped_transform.execute_extended_result(components, std::forward<Args>(args)...);
|
||||||
|
using ResultType = std::decay_t<decltype(result)>;
|
||||||
|
|
||||||
|
if constexpr (std::is_same_v<DecimalUtils::DecimalComponents<DateTime64>, ResultType>)
|
||||||
|
{
|
||||||
|
return DecimalUtils::decimalFromComponentsWithMultiplier<DateTime64>(result, scale_multiplier);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
const auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier);
|
||||||
|
return wrapped_transform.execute_extended_result(static_cast<Int64>(components.whole), std::forward<Args>(args)...);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename ... Args, typename = std::enable_if_t<!std::is_same_v<T, DateTime64>>>
|
||||||
|
inline auto execute_extended_result(const T & t, Args && ... args) const
|
||||||
|
{
|
||||||
|
return wrapped_transform.execute_extended_result(t, std::forward<Args>(args)...);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
DateTime64::NativeType scale_multiplier = 1;
|
DateTime64::NativeType scale_multiplier = 1;
|
||||||
Transform wrapped_transform = {};
|
Transform wrapped_transform = {};
|
||||||
|
@ -83,7 +83,7 @@ public:
|
|||||||
for (size_t i = 0; i < input_rows_count; ++i)
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
{
|
{
|
||||||
if (const auto * symbol = symbol_index.findSymbol(reinterpret_cast<const void *>(data[i])))
|
if (const auto * symbol = symbol_index.findSymbol(reinterpret_cast<const void *>(data[i])))
|
||||||
result_column->insertDataWithTerminatingZero(symbol->name, strlen(symbol->name) + 1);
|
result_column->insertData(symbol->name, strlen(symbol->name));
|
||||||
else
|
else
|
||||||
result_column->insertDefault();
|
result_column->insertDefault();
|
||||||
}
|
}
|
||||||
|
@ -78,15 +78,15 @@ public:
|
|||||||
|
|
||||||
for (size_t i = 0; i < input_rows_count; ++i)
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
{
|
{
|
||||||
StringRef source = column_concrete->getDataAtWithTerminatingZero(i);
|
StringRef source = column_concrete->getDataAt(i);
|
||||||
auto demangled = tryDemangle(source.data);
|
auto demangled = tryDemangle(source.data);
|
||||||
if (demangled)
|
if (demangled)
|
||||||
{
|
{
|
||||||
result_column->insertDataWithTerminatingZero(demangled.get(), strlen(demangled.get()) + 1);
|
result_column->insertData(demangled.get(), strlen(demangled.get()));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
result_column->insertDataWithTerminatingZero(source.data, source.size);
|
result_column->insertData(source.data, source.size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,4 +102,3 @@ REGISTER_FUNCTION(Demangle)
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <Functions/CustomWeekTransforms.h>
|
#include <Functions/CustomWeekTransforms.h>
|
||||||
#include <Functions/FunctionCustomWeekToSomething.h>
|
#include <Functions/FunctionCustomWeekToSomething.h>
|
||||||
|
#include <Functions/FunctionCustomWeekToDateOrDate32.h>
|
||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/IFunction.h>
|
#include <Functions/IFunction.h>
|
||||||
|
|
||||||
@ -9,7 +10,7 @@ namespace DB
|
|||||||
{
|
{
|
||||||
using FunctionToWeek = FunctionCustomWeekToSomething<DataTypeUInt8, ToWeekImpl>;
|
using FunctionToWeek = FunctionCustomWeekToSomething<DataTypeUInt8, ToWeekImpl>;
|
||||||
using FunctionToYearWeek = FunctionCustomWeekToSomething<DataTypeUInt32, ToYearWeekImpl>;
|
using FunctionToYearWeek = FunctionCustomWeekToSomething<DataTypeUInt32, ToYearWeekImpl>;
|
||||||
using FunctionToStartOfWeek = FunctionCustomWeekToSomething<DataTypeDate, ToStartOfWeekImpl>;
|
using FunctionToStartOfWeek = FunctionCustomWeekToDateOrDate32<ToStartOfWeekImpl>;
|
||||||
|
|
||||||
REGISTER_FUNCTION(ToCustomWeek)
|
REGISTER_FUNCTION(ToCustomWeek)
|
||||||
{
|
{
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/DateTimeTransforms.h>
|
#include <Functions/DateTimeTransforms.h>
|
||||||
#include <Functions/FunctionDateOrDateTimeToSomething.h>
|
#include <Functions/FunctionDateOrDateTimeToDateOrDate32.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
using FunctionToLastDayOfMonth = FunctionDateOrDateTimeToSomething<DataTypeDate, ToLastDayOfMonthImpl>;
|
using FunctionToLastDayOfMonth = FunctionDateOrDateTimeToDateOrDate32<ToLastDayOfMonthImpl>;
|
||||||
|
|
||||||
REGISTER_FUNCTION(ToLastDayOfMonth)
|
REGISTER_FUNCTION(ToLastDayOfMonth)
|
||||||
{
|
{
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/DateTimeTransforms.h>
|
#include <Functions/DateTimeTransforms.h>
|
||||||
#include <Functions/FunctionDateOrDateTimeToSomething.h>
|
#include <Functions/FunctionDateOrDateTimeToDateOrDate32.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
using FunctionToMonday = FunctionDateOrDateTimeToSomething<DataTypeDate, ToMondayImpl>;
|
using FunctionToMonday = FunctionDateOrDateTimeToDateOrDate32<ToMondayImpl>;
|
||||||
|
|
||||||
REGISTER_FUNCTION(ToMonday)
|
REGISTER_FUNCTION(ToMonday)
|
||||||
{
|
{
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/DateTimeTransforms.h>
|
#include <Functions/DateTimeTransforms.h>
|
||||||
#include <Functions/FunctionDateOrDateTimeToSomething.h>
|
#include <Functions/FunctionDateOrDateTimeToDateOrDate32.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
using FunctionToStartOfISOYear = FunctionDateOrDateTimeToSomething<DataTypeDate, ToStartOfISOYearImpl>;
|
using FunctionToStartOfISOYear = FunctionDateOrDateTimeToDateOrDate32<ToStartOfISOYearImpl>;
|
||||||
|
|
||||||
REGISTER_FUNCTION(ToStartOfISOYear)
|
REGISTER_FUNCTION(ToStartOfISOYear)
|
||||||
{
|
{
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/DateTimeTransforms.h>
|
#include <Functions/DateTimeTransforms.h>
|
||||||
#include <Functions/FunctionDateOrDateTimeToSomething.h>
|
#include <Functions/FunctionDateOrDateTimeToDateOrDate32.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
using FunctionToStartOfMonth = FunctionDateOrDateTimeToSomething<DataTypeDate, ToStartOfMonthImpl>;
|
using FunctionToStartOfMonth = FunctionDateOrDateTimeToDateOrDate32<ToStartOfMonthImpl>;
|
||||||
|
|
||||||
REGISTER_FUNCTION(ToStartOfMonth)
|
REGISTER_FUNCTION(ToStartOfMonth)
|
||||||
{
|
{
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/DateTimeTransforms.h>
|
#include <Functions/DateTimeTransforms.h>
|
||||||
#include <Functions/FunctionDateOrDateTimeToSomething.h>
|
#include <Functions/FunctionDateOrDateTimeToDateOrDate32.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
using FunctionToStartOfQuarter = FunctionDateOrDateTimeToSomething<DataTypeDate, ToStartOfQuarterImpl>;
|
using FunctionToStartOfQuarter = FunctionDateOrDateTimeToDateOrDate32<ToStartOfQuarterImpl>;
|
||||||
|
|
||||||
REGISTER_FUNCTION(ToStartOfQuarter)
|
REGISTER_FUNCTION(ToStartOfQuarter)
|
||||||
{
|
{
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user