Merge remote-tracking branch 'rschu1ze/master' into punycode-is-cool

This commit is contained in:
Robert Schulze 2023-12-19 06:27:23 +00:00
commit 00c407ecc1
No known key found for this signature in database
GPG Key ID: 26703B55FB13728A
337 changed files with 7223 additions and 4986 deletions

View File

@ -18,9 +18,6 @@ runs:
echo "Setup the common ENV variables"
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/${{inputs.job_type}}
REPO_COPY=${{runner.temp}}/${{inputs.job_type}}/git-repo-copy
IMAGES_PATH=${{runner.temp}}/images_path
REPORTS_PATH=${{runner.temp}}/reports_dir
EOF
if [ -z "${{env.GITHUB_JOB_OVERRIDDEN}}" ] && [ "true" == "${{inputs.nested_job}}" ]; then
echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs"
@ -30,6 +27,4 @@ runs:
shell: bash
run: |
# to remove every leftovers
sudo rm -fr "$TEMP_PATH"
mkdir -p "$REPO_COPY"
cp -a "$GITHUB_WORKSPACE"/. "$REPO_COPY"/
sudo rm -fr "$TEMP_PATH" && mkdir -p "$TEMP_PATH"

View File

@ -10,27 +10,21 @@ on: # yamllint disable-line rule:truthy
branches:
- 'backport/**'
jobs:
CheckLabels:
RunConfig:
runs-on: [self-hosted, style-checker]
# Run the first check always, even if the CI is cancelled
if: ${{ always() }}
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py
PythonUnitTests:
runs-on: [self-hosted, style-checker]
needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
@ -40,273 +34,235 @@ jobs:
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
- name: PrepareRunConfig
id: runconfig
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
- name: Re-create GH statuses for skipped jobs if any
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_docker.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
CompatibilityCheckX86:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
test_name: Compatibility check (amd64)
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
CompatibilityCheckAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
test_name: Compatibility check (aarch64)
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
#########################################################################################
#################################### ORDINARY BUILDS ####################################
#########################################################################################
BuilderDebRelease:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_release
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebAarch64:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_aarch64
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebAsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_asan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebTsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_tsan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebDebug:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_debug
data: ${{ needs.RunConfig.outputs.data }}
BuilderBinDarwin:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin
data: ${{ needs.RunConfig.outputs.data }}
checkout_depth: 0
BuilderBinDarwinAarch64:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin_aarch64
data: ${{ needs.RunConfig.outputs.data }}
checkout_depth: 0
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
needs:
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
filter: tree:0
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docker server and keeper images
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
checkout_depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
BuilderReport:
if: ${{ success() || failure() }}
if: ${{ !failure() && !cancelled() }}
needs:
- BuilderDebRelease
- RunConfig
- BuilderDebAarch64
- BuilderDebAsan
- BuilderDebTsan
- BuilderDebDebug
- BuilderDebRelease
- BuilderDebTsan
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
BuilderSpecialReport:
if: ${{ success() || failure() }}
if: ${{ !failure() && !cancelled() }}
needs:
- RunConfig
- BuilderBinDarwin
- BuilderBinDarwinAarch64
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse special build check
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
############################################################################################
#################################### INSTALL PACKAGES ######################################
############################################################################################
InstallPackagesTestRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (amd64)
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
InstallPackagesTestAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (arm64)
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
##############################################################################################
FunctionalStatelessTestAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (asan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
##############################################################################################
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (debug)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
##############################################################################################
######################################### STRESS TESTS #######################################
##############################################################################################
StressTestTsan:
needs: [BuilderDebTsan]
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs:
- DockerHubPush
- DockerServerImages
- BuilderReport
- BuilderSpecialReport
- FunctionalStatelessTestAsan

View File

@ -1,138 +0,0 @@
name: DocsCheck
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
pull_request:
types:
- synchronize
- reopened
- opened
branches:
- master
paths:
- '**.md'
- 'docker/docs/**'
- 'docs/**'
- 'utils/check-style/aspell-ignore/**'
- 'tests/ci/docs_check.py'
- '.github/workflows/docs_check.yml'
jobs:
CheckLabels:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py
DockerHubPushAarch64:
needs: CheckLabels
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
needs: CheckLabels
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
StyleCheck:
needs: DockerHubPush
# We need additional `&& ! cancelled()` to have the job being able to cancel
if: ${{ success() || failure() || ( always() && ! cancelled() ) }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Style check
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 style_check.py
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
DocsCheck:
needs: DockerHubPush
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docs check
runner_type: func-tester-aarch64
additional_envs: |
run_command: |
cd "$REPO_COPY/tests/ci"
python3 docs_check.py
FinishCheck:
needs:
- StyleCheck
- DockerHubPush
- DocsCheck
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py
python3 merge_pr.py --check-approved

View File

@ -11,16 +11,14 @@ on: # yamllint disable-line rule:truthy
workflow_call:
jobs:
KeeperJepsenRelease:
uses: ./.github/workflows/reusable_test.yml
uses: ./.github/workflows/reusable_simple_job.yml
with:
test_name: Jepsen keeper check
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 jepsen_check.py keeper
# ServerJepsenRelease:
# runs-on: [self-hosted, style-checker]
# uses: ./.github/workflows/reusable_test.yml
# uses: ./.github/workflows/reusable_simple_job.yml
# with:
# test_name: Jepsen server check
# runner_type: style-checker

View File

@ -8,19 +8,26 @@ on: # yamllint disable-line rule:truthy
# schedule:
# - cron: '0 0 2 31 1' # never for now
workflow_call:
inputs:
data:
description: json ci data
type: string
required: true
jobs:
BuilderFuzzers:
uses: ./.github/workflows/reusable_build.yml
with:
build_name: fuzzers
data: ${{ inputs.data }}
libFuzzerTest:
needs: [BuilderFuzzers]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: libFuzzer tests
runner_type: func-tester
data: ${{ inputs.data }}
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 libfuzzer_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"

File diff suppressed because it is too large Load Diff

View File

@ -13,67 +13,38 @@ jobs:
Debug:
# The task for having a preserved ENV and event.json for later investigation
uses: ./.github/workflows/debug.yml
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64 --all
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
RunConfig:
runs-on: [self-hosted, style-checker]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64 --all
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
- name: PrepareRunConfig
id: runconfig
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --skip-jobs --rebuild-all-docker --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
BuildDockers:
needs: [RunConfig]
uses: ./.github/workflows/reusable_docker.yml
with:
data: "${{ needs.RunConfig.outputs.data }}"
set_latest: true
SonarCloud:
runs-on: [self-hosted, builder]
env:

File diff suppressed because it is too large Load Diff

View File

@ -13,171 +13,190 @@ on: # yamllint disable-line rule:truthy
- '2[1-9].[1-9]'
jobs:
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
RunConfig:
runs-on: [self-hosted, style-checker]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
python3 run_check.py
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
echo "Testing the main ci directory"
python3 -m unittest discover -s . -p 'test_*.py'
for dir in *_lambda/; do
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
- name: PrepareRunConfig
id: runconfig
run: |
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --rebuild-all-binaries --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
- name: Re-create GH statuses for skipped jobs if any
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_docker.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
CompatibilityCheckX86:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
test_name: Compatibility check (amd64)
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
CompatibilityCheckAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
test_name: Compatibility check (aarch64)
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
#########################################################################################
#################################### ORDINARY BUILDS ####################################
#########################################################################################
BuilderDebRelease:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_release
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebAarch64:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_aarch64
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebAsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_asan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebUBsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_ubsan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebTsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_tsan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebMsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_msan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebDebug:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_debug
data: ${{ needs.RunConfig.outputs.data }}
BuilderBinDarwin:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderBinDarwinAarch64:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin_aarch64
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
needs:
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
filter: tree:0
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docker server and keeper images
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
checkout_depth: 0
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
BuilderReport:
if: ${{ success() || failure() }}
needs:
- RunConfig
- BuilderDebRelease
- BuilderDebAarch64
- BuilderDebAsan
- BuilderDebTsan
- BuilderDebUBsan
- BuilderDebMsan
- BuilderDebDebug
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
python3 build_report_check.py "$CHECK_NAME"
BuilderSpecialReport:
if: ${{ !failure() && !cancelled() }}
needs:
- RunConfig
- BuilderDebRelease
- BuilderDebAarch64
- BuilderDebAsan
@ -186,33 +205,18 @@ jobs:
- BuilderDebMsan
- BuilderDebDebug
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
runner_type: style-checker
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
BuilderSpecialReport:
if: ${{ success() || failure() }}
needs:
- BuilderBinDarwin
- BuilderBinDarwinAarch64
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse special build check
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
MarkReleaseReady:
if: ${{ !failure() && !cancelled() }}
needs:
- BuilderBinDarwin
- BuilderBinDarwinAarch64
@ -232,282 +236,224 @@ jobs:
#################################### INSTALL PACKAGES ######################################
############################################################################################
InstallPackagesTestRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (amd64)
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
InstallPackagesTestAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (arm64)
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
##############################################################################################
FunctionalStatelessTestRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (aarch64)
runner_type: func-tester-aarch64
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (asan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestTsan:
needs: [BuilderDebTsan]
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (tsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 5
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestUBsan:
needs: [BuilderDebUBsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (ubsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 2
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestMsan:
needs: [BuilderDebMsan]
needs: [RunConfig, BuilderDebMsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (msan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestUBsan:
needs: [RunConfig, BuilderDebUBsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (ubsan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestDebug:
needs: [BuilderDebDebug]
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (debug)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 5
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
##############################################################################################
FunctionalStatefulTestRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (release)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (aarch64)
runner_type: func-tester-aarch64
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (asan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestTsan:
needs: [BuilderDebTsan]
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (tsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestMsan:
needs: [BuilderDebMsan]
needs: [RunConfig, BuilderDebMsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (msan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestUBsan:
needs: [BuilderDebUBsan]
needs: [RunConfig, BuilderDebUBsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (ubsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (debug)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
##############################################################################################
######################################### STRESS TESTS #######################################
##############################################################################################
StressTestAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (asan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
StressTestTsan:
needs: [BuilderDebTsan]
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
StressTestMsan:
needs: [BuilderDebMsan]
needs: [RunConfig, BuilderDebMsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (msan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
StressTestUBsan:
needs: [BuilderDebUBsan]
needs: [RunConfig, BuilderDebUBsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (ubsan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
StressTestDebug:
needs: [BuilderDebDebug]
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (debug)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan)
runner_type: stress-tester
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsAnalyzerAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan, analyzer)
runner_type: stress-tester
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsTsan:
needs: [BuilderDebTsan]
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: stress-tester
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs:
- DockerHubPush
- DockerServerImages
- BuilderReport
- BuilderSpecialReport

View File

@ -22,6 +22,10 @@ name: Build ClickHouse
description: the label of runner to use
default: builder
type: string
data:
description: json ci data
type: string
required: true
additional_envs:
description: additional ENV variables to setup the job
type: string
@ -29,6 +33,7 @@ name: Build ClickHouse
jobs:
Build:
name: Build-${{inputs.build_name}}
if: contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name)
env:
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
runs-on: [self-hosted, '${{inputs.runner_type}}']
@ -37,6 +42,7 @@ jobs:
uses: ClickHouse/checkout@v1
with:
clear-repository: true
ref: ${{ fromJson(inputs.data).git_ref }}
submodules: true
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
@ -44,6 +50,9 @@ jobs:
run: |
cat >> "$GITHUB_ENV" << 'EOF'
${{inputs.additional_envs}}
DOCKER_TAG<<DOCKER_JSON
${{ toJson(fromJson(inputs.data).docker_data.images) }}
DOCKER_JSON
EOF
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
@ -60,20 +69,18 @@ jobs:
uses: ./.github/actions/common_setup
with:
job_type: build_check
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Pre
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.build_name}}'
- name: Build
run: |
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
python3 "$GITHUB_WORKSPACE/tests/ci/build_check.py" "$BUILD_NAME"
- name: Post
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}'
- name: Mark as done
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.build_name}}'
- name: Clean
if: always()
uses: ./.github/actions/clean

68
.github/workflows/reusable_docker.yml vendored Normal file
View File

@ -0,0 +1,68 @@
name: Build docker images
'on':
workflow_call:
inputs:
data:
description: json with ci data from todo job
required: true
type: string
set_latest:
description: set latest tag for resulting multiarch manifest
required: false
type: boolean
default: false
jobs:
DockerBuildAarch64:
runs-on: [self-hosted, style-checker-aarch64]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_aarch64) != '[]'
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
run: |
python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \
--suffix aarch64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_aarch64) }}'
DockerBuildAmd64:
runs-on: [self-hosted, style-checker]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_amd64) != '[]'
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
run: |
python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \
--suffix amd64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_amd64) }}'
DockerMultiArchManifest:
needs: [DockerBuildAmd64, DockerBuildAarch64]
runs-on: [self-hosted, style-checker]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_multi) != '[]'
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
if [ "${{ inputs.set_latest }}" == "true" ]; then
echo "latest tag will be set for resulting manifests"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' \
--set-latest
else
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}'
fi

View File

@ -0,0 +1,90 @@
### For the pure soul wishes to move it to another place
# https://github.com/orgs/community/discussions/9050
name: Simple job
'on':
workflow_call:
inputs:
test_name:
description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV
required: true
type: string
runner_type:
description: the label of runner to use
required: true
type: string
run_command:
description: the command to launch the check
default: ""
required: false
type: string
checkout_depth:
description: the value of the git shallow checkout
required: false
type: number
default: 1
submodules:
description: if the submodules should be checked out
required: false
type: boolean
default: false
additional_envs:
description: additional ENV variables to setup the job
type: string
working-directory:
description: sets custom working directory
type: string
default: ""
git_ref:
description: commit to use, merge commit for pr or head
required: false
type: string
default: ${{ github.event.after }} # no merge commit
secrets:
secret_envs:
description: if given, it's passed to the environments
required: false
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
CHECK_NAME: ${{inputs.test_name}}
jobs:
Test:
runs-on: [self-hosted, '${{inputs.runner_type}}']
name: ${{inputs.test_name}}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
ref: ${{ inputs.git_ref }}
submodules: ${{inputs.submodules}}
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
- name: Set build envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
${{secrets.secret_envs}}
EOF
- name: Common setup
uses: ./.github/actions/common_setup
with:
job_type: test
- name: Run
run: |
if [ -n '${{ inputs.working-directory }}' ]; then
cd "${{ inputs.working-directory }}"
else
cd "$GITHUB_WORKSPACE/tests/ci"
fi
${{ inputs.run_command }}
- name: Clean
if: always()
uses: ./.github/actions/clean

View File

@ -14,13 +14,10 @@ name: Testing workflow
required: true
type: string
run_command:
description: the command to launch the check. Usually starts with `cd '$REPO_COPY/tests/ci'`
required: true
description: the command to launch the check
default: ""
required: false
type: string
batches:
description: how many batches for the test will be launched
default: 1
type: number
checkout_depth:
description: the value of the git shallow checkout
required: false
@ -34,80 +31,89 @@ name: Testing workflow
additional_envs:
description: additional ENV variables to setup the job
type: string
data:
description: ci data
type: string
required: true
working-directory:
description: sets custom working directory
type: string
default: ""
secrets:
secret_envs:
description: if given, it's passed to the environments
required: false
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
CHECK_NAME: ${{inputs.test_name}}
jobs:
PrepareStrategy:
# batches < 1 is misconfiguration,
# and we need this step only for batches > 1
if: ${{ inputs.batches > 1 }}
runs-on: [self-hosted, style-checker-aarch64]
outputs:
batches: ${{steps.batches.outputs.batches}}
steps:
- name: Calculate batches
id: batches
run: |
batches_output=$(python3 -c 'import json; print(json.dumps(list(range(${{inputs.batches}}))))')
echo "batches=${batches_output}" >> "$GITHUB_OUTPUT"
Test:
# If PrepareStrategy is skipped for batches == 1,
# we still need to launch the test.
# `! failure()` is mandatory here to launch on skipped Job
# `&& !cancelled()` to allow the be cancelable
if: ${{ ( !failure() && !cancelled() ) && inputs.batches > 0 }}
# Do not add `-0` to the end, if there's only one batch
name: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
runs-on: [self-hosted, '${{inputs.runner_type}}']
needs: [PrepareStrategy]
if: ${{ !failure() && !cancelled() && contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.test_name) }}
name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
strategy:
fail-fast: false # we always wait for entire matrix
matrix:
# if PrepareStrategy does not have batches, we use 0
batch: ${{ needs.PrepareStrategy.outputs.batches
&& fromJson(needs.PrepareStrategy.outputs.batches)
|| fromJson('[0]')}}
batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
ref: ${{ fromJson(inputs.data).git_ref }}
submodules: ${{inputs.submodules}}
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
- name: Set build envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
${{secrets.secret_envs}}
DOCKER_TAG<<DOCKER_JSON
${{ toJson(fromJson(inputs.data).docker_data.images) }}
DOCKER_JSON
EOF
- name: Common setup
uses: ./.github/actions/common_setup
with:
job_type: test
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Setup batch
if: ${{ inputs.batches > 1}}
if: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 }}
run: |
cat >> "$GITHUB_ENV" << 'EOF'
RUN_BY_HASH_NUM=${{matrix.batch}}
RUN_BY_HASH_TOTAL=${{inputs.batches}}
RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }}
EOF
- name: Run test
run: ${{inputs.run_command}}
- name: Pre run
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}'
- name: Run
run: |
if [ -n "${{ inputs.working-directory }}" ]; then
cd "${{ inputs.working-directory }}"
else
cd "$GITHUB_WORKSPACE/tests/ci"
fi
if [ -n "$(echo '${{ inputs.run_command }}' | tr -d '\n')" ]; then
echo "Running command from workflow input"
${{ inputs.run_command }}
else
echo "Running command from job config"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --run --job-name '${{inputs.test_name}}'
fi
- name: Post run
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
- name: Mark as done
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
- name: Clean
if: always()
uses: ./.github/actions/clean

View File

@ -36,6 +36,7 @@ ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.11.2.11"
ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS=""
# user/group precreated explicitly with fixed uid/gid on purpose.
# It is especially important for rootless containers: in that case entrypoint
@ -47,15 +48,27 @@ ARG PACKAGES="clickhouse-keeper"
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
&& for package in ${PACKAGES}; do \
( \
cd /tmp \
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& cd /tmp && rm -f /tmp/*tgz && rm -f /tmp/*tgz.sha512 |: \
&& if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
echo "installing from provided urls with tgz packages: ${DIRECT_DOWNLOAD_URLS}" \
&& for url in $DIRECT_DOWNLOAD_URLS; do \
echo "Get ${url}" \
&& wget -c -q "$url" \
; done \
else \
for package in ${PACKAGES}; do \
cd /tmp \
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz.sha512" \
&& sed 's:/output/:/tmp/:' < "${package}-${VERSION}-${arch}.tgz.sha512" | sha512sum -c \
&& tar xvzf "${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / \
) \
; done \
fi \
&& cat *.tgz.sha512 | sha512sum -c \
&& for file in *.tgz; do \
if [ -f "$file" ]; then \
echo "Unpacking $file"; \
tar xvzf "$file" --strip-components=1 -C /; \
fi \
; done \
&& rm /tmp/*.tgz /install -r \
&& addgroup -S -g 101 clickhouse \

View File

@ -149,7 +149,7 @@ then
mkdir -p "$PERF_OUTPUT"
cp -r ../tests/performance "$PERF_OUTPUT"
cp -r ../tests/config/top_level_domains "$PERF_OUTPUT"
cp -r ../docker/test/performance-comparison/config "$PERF_OUTPUT" ||:
cp -r ../tests/performance/scripts/config "$PERF_OUTPUT" ||:
for SRC in /output/clickhouse*; do
# Copy all clickhouse* files except packages and bridges
[[ "$SRC" != *.* ]] && [[ "$SRC" != *-bridge ]] && \
@ -160,7 +160,7 @@ then
ln -sf clickhouse "$PERF_OUTPUT"/clickhouse-keeper
fi
cp -r ../docker/test/performance-comparison "$PERF_OUTPUT"/scripts ||:
cp -r ../tests/performance/scripts "$PERF_OUTPUT"/scripts ||:
prepare_combined_output "$PERF_OUTPUT"
# We have to know the revision that corresponds to this binary build.

View File

@ -34,6 +34,7 @@ ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.11.2.11"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS=""
# user/group precreated explicitly with fixed uid/gid on purpose.
# It is especially important for rootless containers: in that case entrypoint
@ -43,15 +44,26 @@ ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# The same uid / gid (101) is used both for alpine and ubuntu.
RUN arch=${TARGETARCH:-amd64} \
&& for package in ${PACKAGES}; do \
( \
cd /tmp \
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& cd /tmp \
&& if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
echo "installing from provided urls with tgz packages: ${DIRECT_DOWNLOAD_URLS}" \
&& for url in $DIRECT_DOWNLOAD_URLS; do \
echo "Get ${url}" \
&& wget -c -q "$url" \
; done \
else \
for package in ${PACKAGES}; do \
echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz.sha512" \
&& sed 's:/output/:/tmp/:' < "${package}-${VERSION}-${arch}.tgz.sha512" | sha512sum -c \
&& tar xvzf "${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / \
) \
; done \
fi \
&& cat *.tgz.sha512 | sed 's:/output/:/tmp/:' | sha512sum -c \
&& for file in *.tgz; do \
if [ -f "$file" ]; then \
echo "Unpacking $file"; \
tar xvzf "$file" --strip-components=1 -C /; \
fi \
; done \
&& rm /tmp/*.tgz /install -r \
&& addgroup -S -g 101 clickhouse \

View File

@ -37,6 +37,7 @@ ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# from debs created by CI build, for example:
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://..." -t ...
ARG deb_location_url=""
ARG DIRECT_DOWNLOAD_URLS=""
# set non-empty single_binary_location_url to create docker image
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
@ -44,6 +45,18 @@ ARG single_binary_location_url=""
ARG TARGETARCH
# install from direct URL
RUN if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
echo "installing from custom predefined urls with deb packages: ${DIRECT_DOWNLOAD_URLS}" \
&& rm -rf /tmp/clickhouse_debs \
&& mkdir -p /tmp/clickhouse_debs \
&& for url in $DIRECT_DOWNLOAD_URLS; do \
wget --progress=bar:force:noscroll "$url" -P /tmp/clickhouse_debs || exit 1 \
; done \
&& dpkg -i /tmp/clickhouse_debs/*.deb \
&& rm -rf /tmp/* ; \
fi
# install from a web location with deb packages
RUN arch="${TARGETARCH:-amd64}" \
&& if [ -n "${deb_location_url}" ]; then \

View File

@ -39,18 +39,8 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY * /
COPY run.sh /
# Bind everything to one NUMA node, if there's more than one. Theoretically the
# node #0 should be less stable because of system interruptions. We bind
# randomly to node 1 or 0 to gather some statistics on that. We have to bind
# both servers and the tmpfs on which the database is stored. How to do it
# is unclear, but by default tmpfs uses
# 'process allocation policy', not sure which process but hopefully the one that
# writes to it, so just bind the downloader script as well.
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\\(.*\\)nodes.*$/\\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"]
CMD ["bash", "/run.sh"]
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison

View File

@ -0,0 +1,18 @@
#!/bin/bash
entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
[ ! -e "$entry" ] && echo "ERROR: test scripts are not found" && exit 1
# Bind everything to one NUMA node, if there's more than one. Theoretically the
# node #0 should be less stable because of system interruptions. We bind
# randomly to node 1 or 0 to gather some statistics on that. We have to bind
# both servers and the tmpfs on which the database is stored. How to do it
# is unclear, but by default tmpfs uses
# 'process allocation policy', not sure which process but hopefully the one that
# writes to it, so just bind the downloader script as well.
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
echo Will bind to NUMA node $node;
numactl --cpunodebind=$node --membind=$node $entry

View File

@ -300,9 +300,6 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.zst ||:
# FIXME: remove once only github actions will be left
rm /var/log/clickhouse-server/clickhouse-server1.log
rm /var/log/clickhouse-server/clickhouse-server2.log
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:

View File

@ -23,6 +23,7 @@ echo "Check submodules" | ts
./check-submodules |& tee /test_output/submodules_output.txt
echo "Check shell scripts with shellcheck" | ts
./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt
/process_style_check_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
echo "Check help for changelog generator works" | ts
cd ../changelog || exit 1

View File

@ -489,7 +489,7 @@ When using functions with response codes or `errno`, always check the result and
``` cpp
if (0 != close(fd))
throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE);
throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name);
```
You can use assert to check invariant in code.

View File

@ -520,7 +520,7 @@ Indexes of type `set` can be utilized by all functions. The other index types ar
| [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [has](/docs/en/sql-reference/functions/array-functions#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
| [hasAny](/docs/en/sql-reference/functions/array-functions#function-hasAny) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
| [hasAny](/docs/en/sql-reference/functions/array-functions#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
| [hasAll](/docs/en/sql-reference/functions/array-functions#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |

View File

@ -1578,9 +1578,15 @@ Default value: `default`.
## allow_experimental_parallel_reading_from_replicas
If true, ClickHouse will send a SELECT query to all replicas of a table (up to `max_parallel_replicas`) . It will work for any kind of MergeTree table.
Enables or disables sending SELECT queries to all replicas of a table (up to `max_parallel_replicas`). Reading is parallelized and coordinated dynamically. It will work for any kind of MergeTree table.
Default value: `false`.
Possible values:
- 0 - Disabled.
- 1 - Enabled, silently disabled in case of failure.
- 2 - Enabled, throws an exception in case of failure.
Default value: `0`.
## compile_expressions {#compile-expressions}

View File

@ -0,0 +1,14 @@
---
slug: /en/operations/system-tables/dropped_tables_parts
---
# dropped_tables_parts {#system_tables-dropped_tables_parts}
Contains information about parts of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) dropped tables from [system.dropped_tables](./dropped_tables.md)
The schema of this table is the same as [system.parts](./parts.md)
**See Also**
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
- [system.parts](./parts.md)
- [system.dropped_tables](./dropped_tables.md)

View File

@ -1081,6 +1081,10 @@ Result:
└─────────────────────────────────────────────────────────────┘
```
**See also**
- [arrayFold](#arrayfold)
## arrayReduceInRanges
Applies an aggregate function to array elements in given ranges and returns an array containing the result corresponding to each range. The function will return the same result as multiple `arrayReduce(agg_func, arraySlice(arr1, index, length), ...)`.
@ -1123,6 +1127,56 @@ Result:
└─────────────────────────────┘
```
## arrayFold
Applies a lambda function to one or more equally-sized arrays and collects the result in an accumulator.
**Syntax**
``` sql
arrayFold(lambda_function, arr1, arr2, ..., accumulator)
```
**Example**
Query:
``` sql
SELECT arrayFold( acc,x -> acc + x*2, [1, 2, 3, 4], toInt64(3)) AS res;
```
Result:
``` text
┌─res─┐
│ 23 │
└─────┘
```
**Example with the Fibonacci sequence**
```sql
SELECT arrayFold( acc,x -> (acc.2, acc.2 + acc.1), range(number), (1::Int64, 0::Int64)).1 AS fibonacci
FROM numbers(1,10);
┌─fibonacci─┐
│ 0 │
│ 1 │
│ 1 │
│ 2 │
│ 3 │
│ 5 │
│ 8 │
│ 13 │
│ 21 │
│ 34 │
└───────────┘
```
**See also**
- [arrayReduce](#arrayreduce)
## arrayReverse(arr)
Returns an array of the same size as the original array containing the elements in reverse order.

View File

@ -1809,6 +1809,8 @@ Alias: `dateTrunc`.
- `quarter`
- `year`
`unit` argument is case-insensitive.
- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../../sql-reference/data-types/string.md).

View File

@ -533,8 +533,8 @@ Result:
```result
┌─concatWithSeparator('a', '1', '2', '3', '4')─┐
│ 1a2a3a4 │
└───────────────────────────────────┘
│ 1a2a3a4
└──────────────────────────────────────────────
```
## concatWithSeparatorAssumeInjective

View File

@ -493,7 +493,7 @@ catch (const DB::Exception & e)
``` cpp
if (0 != close(fd))
throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE);
throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name);
```
`assert` не используются.

View File

@ -369,6 +369,9 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT
| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [has](../../../sql-reference/functions/array-functions.md#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
| [hasAny](../../../sql-reference/functions/array-functions.md#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
| [hasAll](../../../sql-reference/functions/array-functions.md#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
Функции с постоянным агрументом, который меньше, чем размер ngram не могут использовать индекс `ngrambf_v1` для оптимизации запроса.

View File

@ -1,14 +1,14 @@
---
slug: /ru/getting-started/example-datasets/criteo
sidebar_position: 18
sidebar_label: "Терабайт логов кликов от Criteo"
sidebar_label: "Терабайтный журнал посещений сайта от Criteo"
---
# Терабайт логов кликов от Criteo {#terabait-logov-klikov-ot-criteo}
# Терабайтный журнал посещений сайта от Criteo {#terabaitnyi-zhurnal}
Скачайте данные с http://labs.criteo.com/downloads/download-terabyte-click-logs/
Создайте таблицу для импорта лога:
Создайте таблицу для импорта журнала:
``` sql
CREATE TABLE criteo_log (date Date, clicked UInt8, int1 Int32, int2 Int32, int3 Int32, int4 Int32, int5 Int32, int6 Int32, int7 Int32, int8 Int32, int9 Int32, int10 Int32, int11 Int32, int12 Int32, int13 Int32, cat1 String, cat2 String, cat3 String, cat4 String, cat5 String, cat6 String, cat7 String, cat8 String, cat9 String, cat10 String, cat11 String, cat12 String, cat13 String, cat14 String, cat15 String, cat16 String, cat17 String, cat18 String, cat19 String, cat20 String, cat21 String, cat22 String, cat23 String, cat24 String, cat25 String, cat26 String) ENGINE = Log
@ -69,7 +69,7 @@ CREATE TABLE criteo
) ENGINE = MergeTree(date, intHash32(icat1), (date, intHash32(icat1)), 8192)
```
Преобразуем данные из сырого лога и положим во вторую таблицу:
Преобразуйте импортированные данные, разложив их по таблице сконвертированных данных:
``` sql
INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int7, int8, int9, int10, int11, int12, int13, reinterpretAsUInt32(unhex(cat1)) AS icat1, reinterpretAsUInt32(unhex(cat2)) AS icat2, reinterpretAsUInt32(unhex(cat3)) AS icat3, reinterpretAsUInt32(unhex(cat4)) AS icat4, reinterpretAsUInt32(unhex(cat5)) AS icat5, reinterpretAsUInt32(unhex(cat6)) AS icat6, reinterpretAsUInt32(unhex(cat7)) AS icat7, reinterpretAsUInt32(unhex(cat8)) AS icat8, reinterpretAsUInt32(unhex(cat9)) AS icat9, reinterpretAsUInt32(unhex(cat10)) AS icat10, reinterpretAsUInt32(unhex(cat11)) AS icat11, reinterpretAsUInt32(unhex(cat12)) AS icat12, reinterpretAsUInt32(unhex(cat13)) AS icat13, reinterpretAsUInt32(unhex(cat14)) AS icat14, reinterpretAsUInt32(unhex(cat15)) AS icat15, reinterpretAsUInt32(unhex(cat16)) AS icat16, reinterpretAsUInt32(unhex(cat17)) AS icat17, reinterpretAsUInt32(unhex(cat18)) AS icat18, reinterpretAsUInt32(unhex(cat19)) AS icat19, reinterpretAsUInt32(unhex(cat20)) AS icat20, reinterpretAsUInt32(unhex(cat21)) AS icat21, reinterpretAsUInt32(unhex(cat22)) AS icat22, reinterpretAsUInt32(unhex(cat23)) AS icat23, reinterpretAsUInt32(unhex(cat24)) AS icat24, reinterpretAsUInt32(unhex(cat25)) AS icat25, reinterpretAsUInt32(unhex(cat26)) AS icat26 FROM criteo_log;

View File

@ -485,7 +485,7 @@ catch (const DB::Exception & e)
``` cpp
if (0 != close(fd))
throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE);
throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name);
```
`不要使用断言`

View File

@ -364,6 +364,9 @@ WHERE 子句中的条件可以包含对某列数据进行运算的函数表达
| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [has](../../../sql-reference/functions/array-functions.md#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
| [hasAny](../../../sql-reference/functions/array-functions.md#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
| [hasAll](../../../sql-reference/functions/array-functions.md#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
常量参数小于 ngram 大小的函数不能使用 `ngrambf_v1` 进行查询优化。

View File

@ -405,7 +405,7 @@ private:
|| sigaddset(&sig_set, SIGINT)
|| pthread_sigmask(SIG_BLOCK, &sig_set, nullptr))
{
throwFromErrno("Cannot block signal.", ErrorCodes::CANNOT_BLOCK_SIGNAL);
throw ErrnoException(ErrorCodes::CANNOT_BLOCK_SIGNAL, "Cannot block signal");
}
while (true)

View File

@ -328,7 +328,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
fs::create_symlink(binary_self_canonical_path, main_bin_path);
if (0 != chmod(binary_self_canonical_path.string().c_str(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH))
throwFromErrno(fmt::format("Cannot chmod {}", binary_self_canonical_path.string()), ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot chmod {}", binary_self_canonical_path.string());
}
}
else
@ -361,7 +361,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
if (already_installed)
{
if (0 != chmod(main_bin_path.string().c_str(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH))
throwFromErrno(fmt::format("Cannot chmod {}", main_bin_path.string()), ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot chmod {}", main_bin_path.string());
}
else
{
@ -395,7 +395,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
}
if (0 != chmod(destination.c_str(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH))
throwFromErrno(fmt::format("Cannot chmod {}", main_bin_tmp_path.string()), ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot chmod {}", main_bin_tmp_path.string());
}
catch (const Exception & e)
{
@ -1122,7 +1122,7 @@ namespace
return 0;
}
else
throwFromErrno(fmt::format("Cannot obtain the status of pid {} with `kill`", pid), ErrorCodes::CANNOT_KILL);
throw ErrnoException(ErrorCodes::CANNOT_KILL, "Cannot obtain the status of pid {} with `kill`", pid);
}
if (!pid)
@ -1143,7 +1143,7 @@ namespace
if (0 == kill(pid, signal))
fmt::print("Sent {} signal to process with pid {}.\n", signal_name, pid);
else
throwFromErrno(fmt::format("Cannot send {} signal", signal_name), ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot send {} signal", signal_name);
size_t try_num = 0;
for (; try_num < max_tries; ++try_num)

View File

@ -43,7 +43,7 @@
#include <Parsers/IAST.h>
#include <Parsers/ASTInsertQuery.h>
#include <Common/ErrorHandlers.h>
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
#include <Functions/UserDefined/IUserDefinedSQLObjectsStorage.h>
#include <Functions/registerFunctions.h>
#include <AggregateFunctions/registerAggregateFunctions.h>
#include <TableFunctions/registerTableFunctions.h>
@ -757,7 +757,7 @@ void LocalServer::processConfig()
}
/// For ClickHouse local if path is not set the loader will be disabled.
global_context->getUserDefinedSQLObjectsLoader().loadObjects();
global_context->getUserDefinedSQLObjectsStorage().loadObjects();
LOG_DEBUG(log, "Loaded metadata.");
}

View File

@ -1307,7 +1307,7 @@ try
/// stdin must be seekable
auto res = lseek(file->getFD(), 0, SEEK_SET);
if (-1 == res)
throwFromErrno("Input must be seekable file (it will be read twice).", ErrorCodes::CANNOT_SEEK_THROUGH_FILE);
throw ErrnoException(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Input must be seekable file (it will be read twice)");
SingleReadBufferIterator read_buffer_iterator(std::move(file));
schema_columns = readSchemaFromFormat(input_format, {}, read_buffer_iterator, false, context_const);
@ -1336,7 +1336,7 @@ try
/// stdin must be seekable
auto res = lseek(file_in.getFD(), 0, SEEK_SET);
if (-1 == res)
throwFromErrno("Input must be seekable file (it will be read twice).", ErrorCodes::CANNOT_SEEK_THROUGH_FILE);
throw ErrnoException(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Input must be seekable file (it will be read twice)");
}
Obfuscator obfuscator(header, seed, markov_model_params);

View File

@ -67,7 +67,7 @@
#include <Storages/Cache/registerRemoteFileMetadatas.h>
#include <Common/NamedCollections/NamedCollectionUtils.h>
#include <AggregateFunctions/registerAggregateFunctions.h>
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
#include <Functions/UserDefined/IUserDefinedSQLObjectsStorage.h>
#include <Functions/registerFunctions.h>
#include <TableFunctions/registerTableFunctions.h>
#include <Formats/registerFormats.h>
@ -1756,7 +1756,7 @@ try
/// After loading validate that default database exists
database_catalog.assertDatabaseExists(default_database);
/// Load user-defined SQL functions.
global_context->getUserDefinedSQLObjectsLoader().loadObjects();
global_context->getUserDefinedSQLObjectsStorage().loadObjects();
}
catch (...)
{

View File

@ -721,7 +721,7 @@ function insertChart(i) {
query_editor_confirm.addEventListener('click', editConfirm);
/// Ctrl+Enter (or Cmd+Enter on Mac) will also confirm editing.
query_editor.addEventListener('keydown', e => {
query_editor.addEventListener('keydown', event => {
if ((event.metaKey || event.ctrlKey) && (event.keyCode == 13 || event.keyCode == 10)) {
editConfirm();
}

View File

@ -56,7 +56,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
group * result{};
if (0 != getgrnam_r(arg_gid.data(), &entry, buf.get(), buf_size, &result))
throwFromErrno(fmt::format("Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid), ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid);
@ -68,7 +68,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group has id 0, but dropping privileges to gid 0 does not make sense");
if (0 != setgid(gid))
throwFromErrno(fmt::format("Cannot do 'setgid' to user ({})", arg_gid), ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'setgid' to user ({})", arg_gid);
}
if (!arg_uid.empty())
@ -81,7 +81,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
passwd * result{};
if (0 != getpwnam_r(arg_uid.data(), &entry, buf.get(), buf_size, &result))
throwFromErrno(fmt::format("Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid), ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid);
@ -93,7 +93,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User has id 0, but dropping privileges to uid 0 does not make sense");
if (0 != setuid(uid))
throwFromErrno(fmt::format("Cannot do 'setuid' to user ({})", arg_uid), ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'setuid' to user ({})", arg_uid);
}
}
@ -136,7 +136,7 @@ try
execvp(new_argv.front(), new_argv.data());
throwFromErrno("Cannot execvp", ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot execvp");
}
catch (...)
{

View File

@ -43,14 +43,6 @@ namespace Stage = BackupCoordinationStage;
namespace
{
/// Uppercases the first character of a passed string.
String toUpperFirst(const String & str)
{
String res = str;
res[0] = std::toupper(res[0]);
return res;
}
/// Outputs "table <name>" or "temporary table <name>"
String tableNameWithTypeToString(const String & database_name, const String & table_name, bool first_upper)
{
@ -164,7 +156,7 @@ BackupEntries BackupEntriesCollector::run()
Strings BackupEntriesCollector::setStage(const String & new_stage, const String & message)
{
LOG_TRACE(log, fmt::runtime(toUpperFirst(new_stage)));
LOG_TRACE(log, "Setting stage: {}", new_stage);
current_stage = new_stage;
backup_coordination->setStage(new_stage, message);

View File

@ -318,14 +318,14 @@ void ClientBase::setupSignalHandler()
sigemptyset(&new_act.sa_mask);
#else
if (sigemptyset(&new_act.sa_mask))
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler");
#endif
if (sigaction(SIGINT, &new_act, nullptr))
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler");
if (sigaction(SIGQUIT, &new_act, nullptr))
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler");
}
@ -543,16 +543,16 @@ try
if (!pager.empty())
{
if (SIG_ERR == signal(SIGPIPE, SIG_IGN))
throwFromErrno("Cannot set signal handler for SIGPIPE.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGPIPE");
/// We need to reset signals that had been installed in the
/// setupSignalHandler() since terminal will send signals to both
/// processes and so signals will be delivered to the
/// clickhouse-client/local as well, which will be terminated when
/// signal will be delivered second time.
if (SIG_ERR == signal(SIGINT, SIG_IGN))
throwFromErrno("Cannot set signal handler for SIGINT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGINT");
if (SIG_ERR == signal(SIGQUIT, SIG_IGN))
throwFromErrno("Cannot set signal handler for SIGQUIT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGQUIT");
ShellCommand::Config config(pager);
config.pipe_stdin_only = true;
@ -1306,11 +1306,11 @@ void ClientBase::resetOutput()
pager_cmd->wait();
if (SIG_ERR == signal(SIGPIPE, SIG_DFL))
throwFromErrno("Cannot set signal handler for SIIGPIEP.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGPIPE");
if (SIG_ERR == signal(SIGINT, SIG_DFL))
throwFromErrno("Cannot set signal handler for SIGINT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGINT");
if (SIG_ERR == signal(SIGQUIT, SIG_DFL))
throwFromErrno("Cannot set signal handler for SIGQUIT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGQUIT");
setupSignalHandler();
}

View File

@ -248,7 +248,7 @@ void ColumnFunction::appendArguments(const ColumnsWithTypeAndName & columns)
auto wanna_capture = columns.size();
if (were_captured + wanna_capture > args)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture {} columns because function {} has {} arguments{}.",
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture {} column(s) because function {} has {} arguments{}.",
wanna_capture, function->getName(), args,
(were_captured ? " and " + toString(were_captured) + " columns have already been captured" : ""));

View File

@ -18,9 +18,11 @@ void AlignedBuffer::alloc(size_t size, size_t alignment)
void * new_buf;
int res = ::posix_memalign(&new_buf, std::max(alignment, sizeof(void*)), size);
if (0 != res)
throwFromErrno(fmt::format("Cannot allocate memory (posix_memalign), size: {}, alignment: {}.",
ReadableSize(size), ReadableSize(alignment)),
ErrorCodes::CANNOT_ALLOCATE_MEMORY, res);
throw ErrnoException(
ErrorCodes::CANNOT_ALLOCATE_MEMORY,
"Cannot allocate memory (posix_memalign), size: {}, alignment: {}.",
ReadableSize(size),
ReadableSize(alignment));
buf = new_buf;
}

View File

@ -118,8 +118,11 @@ public:
void * new_buf = ::realloc(buf, new_size);
if (nullptr == new_buf)
{
DB::throwFromErrno(
fmt::format("Allocator: Cannot realloc from {} to {}.", ReadableSize(old_size), ReadableSize(new_size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
throw DB::ErrnoException(
DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY,
"Allocator: Cannot realloc from {} to {}",
ReadableSize(old_size),
ReadableSize(new_size));
}
buf = new_buf;
@ -164,7 +167,7 @@ private:
buf = ::malloc(size);
if (nullptr == buf)
DB::throwFromErrno(fmt::format("Allocator: Cannot malloc {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
throw DB::ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Allocator: Cannot malloc {}.", ReadableSize(size));
}
else
{
@ -172,8 +175,8 @@ private:
int res = posix_memalign(&buf, alignment, size);
if (0 != res)
DB::throwFromErrno(fmt::format("Cannot allocate memory (posix_memalign) {}.", ReadableSize(size)),
DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, res);
throw DB::ErrnoException(
DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Cannot allocate memory (posix_memalign) {}.", ReadableSize(size));
if constexpr (clear_memory)
memset(buf, 0, size);

View File

@ -179,13 +179,13 @@ private:
{
ptr = mmap(address_hint, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (MAP_FAILED == ptr)
DB::throwFromErrno(fmt::format("Allocator: Cannot mmap {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
throw ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Allocator: Cannot mmap {}", ReadableSize(size));
}
~Chunk()
{
if (ptr && 0 != munmap(ptr, size))
DB::throwFromErrno(fmt::format("Allocator: Cannot munmap {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_MUNMAP);
throw ErrnoException(DB::ErrorCodes::CANNOT_MUNMAP, "Allocator: Cannot munmap {}", ReadableSize(size));
}
Chunk(Chunk && other) noexcept : ptr(other.ptr), size(other.size)

View File

@ -797,7 +797,7 @@ void AsynchronousMetrics::update(TimePoint update_time)
int64_t hz = sysconf(_SC_CLK_TCK);
if (-1 == hz)
throwFromErrno("Cannot call 'sysconf' to obtain system HZ", ErrorCodes::CANNOT_SYSCONF);
throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ");
double multiplier = 1.0 / hz / (std::chrono::duration_cast<std::chrono::nanoseconds>(time_after_previous_update).count() / 1e9);
size_t num_cpus = 0;

View File

@ -69,13 +69,13 @@ public:
int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
if (-1 == fd)
DB::throwFromErrnoWithPath("Cannot open file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot open file {}", path);
try
{
int flock_ret = flock(fd, LOCK_EX);
if (-1 == flock_ret)
DB::throwFromErrnoWithPath("Cannot lock file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot lock file {}", path);
if (!file_doesnt_exists)
{
@ -145,7 +145,7 @@ public:
int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
if (-1 == fd)
DB::throwFromErrnoWithPath("Cannot open file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot open file {}", path);
try
{

View File

@ -19,7 +19,7 @@ Epoll::Epoll() : events_count(0)
{
epoll_fd = epoll_create1(0);
if (epoll_fd == -1)
throwFromErrno("Cannot open epoll descriptor", DB::ErrorCodes::EPOLL_ERROR);
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot open epoll descriptor");
}
Epoll::Epoll(Epoll && other) noexcept : epoll_fd(other.epoll_fd), events_count(other.events_count.load())
@ -47,7 +47,7 @@ void Epoll::add(int fd, void * ptr, uint32_t events)
++events_count;
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &event) == -1)
throwFromErrno("Cannot add new descriptor to epoll", DB::ErrorCodes::EPOLL_ERROR);
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot add new descriptor to epoll");
}
void Epoll::remove(int fd)
@ -55,7 +55,7 @@ void Epoll::remove(int fd)
--events_count;
if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, nullptr) == -1)
throwFromErrno("Cannot remove descriptor from epoll", DB::ErrorCodes::EPOLL_ERROR);
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot remove descriptor from epoll");
}
size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout) const
@ -82,7 +82,7 @@ size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout
continue;
}
else
throwFromErrno("Error in epoll_wait", DB::ErrorCodes::EPOLL_ERROR);
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Error in epoll_wait");
}
else
break;

View File

@ -21,7 +21,7 @@ EventFD::EventFD()
{
fd = eventfd(0 /* initval */, 0 /* flags */);
if (fd == -1)
throwFromErrno("Cannot create eventfd", ErrorCodes::CANNOT_PIPE);
throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot create eventfd");
}
uint64_t EventFD::read() const
@ -33,7 +33,7 @@ uint64_t EventFD::read() const
break;
if (errno != EINTR)
throwFromErrno("Cannot read from eventfd", ErrorCodes::CANNOT_READ_FROM_SOCKET);
throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot read from eventfd");
}
return buf;
@ -47,7 +47,7 @@ bool EventFD::write(uint64_t increase) const
return false;
if (errno != EINTR)
throwFromErrno("Cannot write to eventfd", ErrorCodes::CANNOT_WRITE_TO_SOCKET);
throw ErrnoException(ErrorCodes::CANNOT_WRITE_TO_SOCKET, "Cannot write to eventfd");
}
return true;

View File

@ -1,25 +1,24 @@
#include "Exception.h"
#include <algorithm>
#include <cstring>
#include <cxxabi.h>
#include <cstdlib>
#include <Poco/String.h>
#include <Common/logger_useful.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
#include <cstring>
#include <filesystem>
#include <cxxabi.h>
#include <IO/Operators.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <base/demangle.h>
#include <base/errnoToString.h>
#include <Common/formatReadable.h>
#include <Common/filesystemHelpers.h>
#include <Poco/String.h>
#include <Common/ErrorCodes.h>
#include <Common/LockMemoryExceptionInThread.h>
#include <Common/MemorySanitizer.h>
#include <Common/SensitiveDataMasker.h>
#include <Common/LockMemoryExceptionInThread.h>
#include <filesystem>
#include <Common/filesystemHelpers.h>
#include <Common/formatReadable.h>
#include <Common/logger_useful.h>
#include <Common/config_version.h>
@ -212,17 +211,6 @@ Exception::FramePointers Exception::getStackFramePointers() const
thread_local bool Exception::enable_job_stack_trace = false;
thread_local std::vector<StackTrace::FramePointers> Exception::thread_frame_pointers = {};
void throwFromErrno(const std::string & s, int code, int the_errno)
{
throw ErrnoException(s + ", " + errnoToString(the_errno), code, the_errno);
}
void throwFromErrnoWithPath(const std::string & s, const std::string & path, int code, int the_errno)
{
throw ErrnoException(s + ", " + errnoToString(the_errno), code, the_errno, path);
}
static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message)
{
try

View File

@ -7,9 +7,10 @@
#include <Poco/Exception.h>
#include <base/defines.h>
#include <base/errnoToString.h>
#include <base/scope_guard.h>
#include <Common/StackTrace.h>
#include <Common/LoggingFormatStringHelpers.h>
#include <Common/StackTrace.h>
#include <fmt/format.h>
@ -173,12 +174,61 @@ std::string getExceptionStackTraceString(const std::exception & e);
std::string getExceptionStackTraceString(std::exception_ptr e);
/// Contains an additional member `saved_errno`. See the throwFromErrno function.
/// Contains an additional member `saved_errno`
class ErrnoException : public Exception
{
public:
ErrnoException(const std::string & msg, int code, int saved_errno_, const std::optional<std::string> & path_ = {})
: Exception(msg, code), saved_errno(saved_errno_), path(path_) {}
ErrnoException(std::string && msg, int code, int with_errno) : Exception(msg, code), saved_errno(with_errno)
{
capture_thread_frame_pointers = thread_frame_pointers;
addMessage(", {}", errnoToString(saved_errno));
}
/// Message must be a compile-time constant
template <typename T>
requires std::is_convertible_v<T, String>
ErrnoException(int code, T && message) : Exception(message, code), saved_errno(errno)
{
capture_thread_frame_pointers = thread_frame_pointers;
addMessage(", {}", errnoToString(saved_errno));
}
// Format message with fmt::format, like the logging functions.
template <typename... Args>
ErrnoException(int code, FormatStringHelper<Args...> fmt, Args &&... args)
: Exception(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code), saved_errno(errno)
{
capture_thread_frame_pointers = thread_frame_pointers;
message_format_string = fmt.message_format_string;
addMessage(", {}", errnoToString(saved_errno));
}
template <typename... Args>
[[noreturn]] static void throwWithErrno(int code, int with_errno, FormatStringHelper<Args...> fmt, Args &&... args)
{
auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code, with_errno);
e.message_format_string = fmt.message_format_string;
throw e;
}
template <typename... Args>
[[noreturn]] static void throwFromPath(int code, const std::string & path, FormatStringHelper<Args...> fmt, Args &&... args)
{
auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code, errno);
e.message_format_string = fmt.message_format_string;
e.path = path;
throw e;
}
template <typename... Args>
[[noreturn]] static void
throwFromPathWithErrno(int code, const std::string & path, int with_errno, FormatStringHelper<Args...> fmt, Args &&... args)
{
auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code, with_errno);
e.message_format_string = fmt.message_format_string;
e.path = path;
throw e;
}
ErrnoException * clone() const override { return new ErrnoException(*this); }
void rethrow() const override { throw *this; } // NOLINT
@ -188,7 +238,7 @@ public:
private:
int saved_errno;
std::optional<std::string> path;
std::optional<std::string> path{};
const char * name() const noexcept override { return "DB::ErrnoException"; }
const char * className() const noexcept override { return "DB::ErrnoException"; }
@ -233,13 +283,6 @@ private:
using Exceptions = std::vector<std::exception_ptr>;
[[noreturn]] void throwFromErrno(const std::string & s, int code, int the_errno = errno);
/// Useful to produce some extra information about available space and inodes on device
[[noreturn]] void throwFromErrnoWithPath(const std::string & s, const std::string & path, int code,
int the_errno = errno);
/** Try to write an exception to the log (and forget about it).
* Can be used in destructors in the catch-all block.
*/

View File

@ -28,13 +28,14 @@ static struct InitFiu
/// We should define different types of failpoints here. There are four types of them:
/// - ONCE: the failpoint will only be triggered once.
/// - REGULAR: the failpoint will always be triggered util disableFailPoint is called.
/// - PAUSAEBLE_ONCE: the failpoint will be blocked one time when pauseFailPoint is called, util disableFailPoint is called.
/// - PAUSAEBLE: the failpoint will be blocked every time when pauseFailPoint is called, util disableFailPoint is called.
/// - REGULAR: the failpoint will always be triggered until disableFailPoint is called.
/// - PAUSEABLE_ONCE: the failpoint will be blocked one time when pauseFailPoint is called, util disableFailPoint is called.
/// - PAUSEABLE: the failpoint will be blocked every time when pauseFailPoint is called, util disableFailPoint is called.
#define APPLY_FOR_FAILPOINTS(ONCE, REGULAR, PAUSEABLE_ONCE, PAUSEABLE) \
ONCE(replicated_merge_tree_commit_zk_fail_after_op) \
ONCE(replicated_merge_tree_insert_quorum_fail_0) \
REGULAR(replicated_merge_tree_commit_zk_fail_when_recovering_from_hw_fault) \
REGULAR(use_delayed_remote_source) \
REGULAR(cluster_discovery_faults) \
REGULAR(check_table_query_delay_for_part) \

View File

@ -46,14 +46,14 @@ public:
void * vp = ::mmap(nullptr, num_bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (MAP_FAILED == vp)
DB::throwFromErrno(fmt::format("FiberStack: Cannot mmap {}.", ReadableSize(num_bytes)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
throw DB::ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "FiberStack: Cannot mmap {}.", ReadableSize(num_bytes));
/// TODO: make reports on illegal guard page access more clear.
/// Currently we will see segfault and almost random stacktrace.
if (-1 == ::mprotect(vp, page_size, PROT_NONE))
{
::munmap(vp, num_bytes);
DB::throwFromErrno("FiberStack: cannot protect guard page", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
throw DB::ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "FiberStack: cannot protect guard page");
}
/// Do not count guard page in memory usage.

View File

@ -58,9 +58,8 @@ private:
public:
InterruptListener() : active(false)
{
if (sigemptyset(&sig_set)
|| sigaddset(&sig_set, SIGINT))
throwFromErrno("Cannot manipulate with signal set.", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
if (sigemptyset(&sig_set) || sigaddset(&sig_set, SIGINT))
throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Cannot manipulate with signal set");
block();
}
@ -82,7 +81,7 @@ public:
if (errno == EAGAIN)
return false;
else
throwFromErrno("Cannot poll signal (sigtimedwait).", ErrorCodes::CANNOT_WAIT_FOR_SIGNAL);
throw ErrnoException(ErrorCodes::CANNOT_WAIT_FOR_SIGNAL, "Cannot poll signal (sigtimedwait)");
}
return true;
@ -93,7 +92,7 @@ public:
if (!active)
{
if (pthread_sigmask(SIG_BLOCK, &sig_set, nullptr))
throwFromErrno("Cannot block signal.", ErrorCodes::CANNOT_BLOCK_SIGNAL);
throw ErrnoException(ErrorCodes::CANNOT_BLOCK_SIGNAL, "Cannot block signal");
active = true;
}
@ -105,7 +104,7 @@ public:
if (active)
{
if (pthread_sigmask(SIG_UNBLOCK, &sig_set, nullptr))
throwFromErrno("Cannot unblock signal.", ErrorCodes::CANNOT_UNBLOCK_SIGNAL);
throw ErrnoException(ErrorCodes::CANNOT_UNBLOCK_SIGNAL, "Cannot unblock signal");
active = false;
}

View File

@ -39,7 +39,8 @@ MemoryStatisticsOS::MemoryStatisticsOS()
fd = ::open(filename, O_RDONLY | O_CLOEXEC);
if (-1 == fd)
throwFromErrno("Cannot open file " + std::string(filename), errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);
ErrnoException::throwFromPath(
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, filename, "Cannot open file {}", filename);
}
MemoryStatisticsOS::~MemoryStatisticsOS()
@ -48,9 +49,8 @@ MemoryStatisticsOS::~MemoryStatisticsOS()
{
try
{
throwFromErrno(
"File descriptor for \"" + std::string(filename) + "\" could not be closed. "
"Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE);
ErrnoException::throwFromPath(
ErrorCodes::CANNOT_CLOSE_FILE, filename, "File descriptor for '{}' could not be closed", filename);
}
catch (const ErrnoException &)
{
@ -77,7 +77,7 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const
if (errno == EINTR)
continue;
throwFromErrno("Cannot read from file " + std::string(filename), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, filename, "Cannot read from file {}", filename);
}
assert(res >= 0);
@ -136,7 +136,7 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const
size_t len = sizeof(struct kinfo_proc);
if (-1 == ::sysctl(mib, 4, &kp, &len, nullptr, 0))
throwFromErrno("Cannot sysctl(kern.proc.pid." + std::to_string(self) + ")", ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot sysctl(kern.proc.pid.{})", std::to_string(self));
if (sizeof(struct kinfo_proc) != len)
throw DB::Exception(DB::ErrorCodes::SYSTEM_ERROR, "Kernel returns structure of {} bytes instead of expected {}",

View File

@ -117,7 +117,7 @@ struct NetlinkMessage
if (errno == EAGAIN)
continue;
else
throwFromErrno("Can't send a Netlink command", ErrorCodes::NETLINK_ERROR);
throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't send a Netlink command");
}
if (bytes_sent > request_size)
@ -255,7 +255,7 @@ NetlinkMetricsProvider::NetlinkMetricsProvider()
{
netlink_socket_fd = ::socket(PF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
if (netlink_socket_fd < 0)
throwFromErrno("Can't create PF_NETLINK socket", ErrorCodes::NETLINK_ERROR);
throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't create PF_NETLINK socket");
try
{
@ -267,7 +267,7 @@ NetlinkMetricsProvider::NetlinkMetricsProvider()
tv.tv_usec = 50000;
if (0 != ::setsockopt(netlink_socket_fd, SOL_SOCKET, SO_RCVTIMEO, reinterpret_cast<const char *>(&tv), sizeof(tv)))
throwFromErrno("Can't set timeout on PF_NETLINK socket", ErrorCodes::NETLINK_ERROR);
throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't set timeout on PF_NETLINK socket");
union
{
@ -277,7 +277,7 @@ NetlinkMetricsProvider::NetlinkMetricsProvider()
addr.nl_family = AF_NETLINK;
if (::bind(netlink_socket_fd, &sockaddr, sizeof(addr)) < 0)
throwFromErrno("Can't bind PF_NETLINK socket", ErrorCodes::NETLINK_ERROR);
throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't bind PF_NETLINK socket");
taskstats_family_id = getFamilyId(netlink_socket_fd);
}

View File

@ -209,7 +209,7 @@ protected:
{
size_t length = right_rounded_down - left_rounded_up;
if (0 != mprotect(left_rounded_up, length, prot))
throwFromErrno("Cannot mprotect memory region", ErrorCodes::CANNOT_MPROTECT);
throw ErrnoException(ErrorCodes::CANNOT_MPROTECT, "Cannot mprotect memory region");
}
}

View File

@ -29,14 +29,14 @@ void LazyPipeFDs::open()
#ifndef OS_DARWIN
if (0 != pipe2(fds_rw, O_CLOEXEC))
throwFromErrno("Cannot create pipe", ErrorCodes::CANNOT_PIPE);
throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot create pipe");
#else
if (0 != pipe(fds_rw))
throwFromErrno("Cannot create pipe", ErrorCodes::CANNOT_PIPE);
throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot create pipe");
if (0 != fcntl(fds_rw[0], F_SETFD, FD_CLOEXEC))
throwFromErrno("Cannot setup auto-close on exec for read end of pipe", ErrorCodes::CANNOT_FCNTL);
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot setup auto-close on exec for read end of pipe");
if (0 != fcntl(fds_rw[1], F_SETFD, FD_CLOEXEC))
throwFromErrno("Cannot setup auto-close on exec for write end of pipe", ErrorCodes::CANNOT_FCNTL);
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot setup auto-close on exec for write end of pipe");
#endif
}
@ -47,7 +47,7 @@ void LazyPipeFDs::close()
if (fd < 0)
continue;
if (0 != ::close(fd))
throwFromErrno("Cannot close pipe", ErrorCodes::CANNOT_PIPE);
throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot close pipe");
fd = -1;
}
}
@ -74,18 +74,18 @@ void LazyPipeFDs::setNonBlockingWrite()
{
int flags = fcntl(fds_rw[1], F_GETFL, 0);
if (-1 == flags)
throwFromErrno("Cannot get file status flags of pipe", ErrorCodes::CANNOT_FCNTL);
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot get file status flags of pipe");
if (-1 == fcntl(fds_rw[1], F_SETFL, flags | O_NONBLOCK))
throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL);
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot set non-blocking mode of pipe");
}
void LazyPipeFDs::setNonBlockingRead()
{
int flags = fcntl(fds_rw[0], F_GETFL, 0);
if (-1 == flags)
throwFromErrno("Cannot get file status flags of pipe", ErrorCodes::CANNOT_FCNTL);
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot get file status flags of pipe");
if (-1 == fcntl(fds_rw[0], F_SETFL, flags | O_NONBLOCK))
throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL);
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot set non-blocking mode of pipe");
}
void LazyPipeFDs::setNonBlockingReadWrite()
@ -110,13 +110,13 @@ void LazyPipeFDs::tryIncreaseSize(int desired_size)
/// It will work nevertheless.
}
else
throwFromErrno("Cannot get pipe capacity", ErrorCodes::CANNOT_FCNTL);
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot get pipe capacity");
}
else
{
for (errno = 0; errno != EPERM && pipe_size < desired_size; pipe_size *= 2)
if (-1 == fcntl(fds_rw[1], F_SETPIPE_SZ, pipe_size * 2) && errno != EPERM)
throwFromErrno("Cannot increase pipe capacity to " + std::to_string(pipe_size * 2), ErrorCodes::CANNOT_FCNTL);
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot increase pipe capacity to {}", pipe_size * 2);
LOG_TRACE(log, "Pipe capacity is {}", ReadableSize(std::min(pipe_size, desired_size)));
}

View File

@ -37,18 +37,15 @@ namespace
{
[[noreturn]] inline void throwWithFailedToOpenFile(const std::string & filename)
{
throwFromErrno(
"Cannot open file " + filename,
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);
ErrnoException::throwFromPath(
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, filename, "Cannot open file {}", filename);
}
inline void emitErrorMsgWithFailedToCloseFile(const std::string & filename)
{
try
{
throwFromErrno(
"File descriptor for \"" + filename + "\" could not be closed. "
"Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_CLOSE_FILE, filename, "File descriptor for {} could not be closed", filename);
}
catch (const ErrnoException &)
{
@ -69,9 +66,7 @@ ssize_t readFromFD(const int fd, const char * filename, char * buf, size_t buf_s
if (errno == EINTR)
continue;
throwFromErrno(
"Cannot read from file " + std::string(filename),
ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, filename, "Cannot read from file {}", filename);
}
assert(res >= 0);

View File

@ -451,6 +451,7 @@ The server successfully detected this situation and will download merged part fr
M(ThreadpoolReaderSubmitReadSynchronously, "How many times we haven't scheduled a task on the thread pool and read synchronously instead") \
M(ThreadpoolReaderSubmitReadSynchronouslyBytes, "How many bytes were read synchronously") \
M(ThreadpoolReaderSubmitReadSynchronouslyMicroseconds, "How much time we spent reading synchronously") \
M(ThreadpoolReaderSubmitLookupInCacheMicroseconds, "How much time we spent checking if content is cached") \
M(AsynchronousReaderIgnoredBytes, "Number of bytes ignored during asynchronous reading") \
\
M(FileSegmentWaitReadBufferMicroseconds, "Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \

View File

@ -141,7 +141,7 @@ void Timer::createIfNecessary(UInt64 thread_id, int clock_type, int pause_signal
/// Also, it cannot be created if the server has too many threads.
throwFromErrno("Failed to create thread timer", ErrorCodes::CANNOT_CREATE_TIMER);
throw ErrnoException(ErrorCodes::CANNOT_CREATE_TIMER, "Failed to create thread timer");
}
timer_id.emplace(local_timer_id);
CurrentMetrics::add(CurrentMetrics::CreatedTimersInQueryProfiler);
@ -164,7 +164,7 @@ void Timer::set(UInt32 period)
struct itimerspec timer_spec = {.it_interval = interval, .it_value = offset};
if (timer_settime(*timer_id, 0, &timer_spec, nullptr))
throwFromErrno("Failed to set thread timer period", ErrorCodes::CANNOT_SET_TIMER_PERIOD);
throw ErrnoException(ErrorCodes::CANNOT_SET_TIMER_PERIOD, "Failed to set thread timer period");
CurrentMetrics::add(CurrentMetrics::ActiveTimersInQueryProfiler);
}
@ -238,13 +238,13 @@ QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(UInt64 thread_id, int clock_t
sa.sa_flags = SA_SIGINFO | SA_RESTART;
if (sigemptyset(&sa.sa_mask))
throwFromErrno("Failed to clean signal mask for query profiler", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Failed to clean signal mask for query profiler");
if (sigaddset(&sa.sa_mask, pause_signal))
throwFromErrno("Failed to add signal to mask for query profiler", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Failed to add signal to mask for query profiler");
if (sigaction(pause_signal, &sa, nullptr))
throwFromErrno("Failed to setup signal handler for query profiler", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Failed to setup signal handler for query profiler");
try
{

View File

@ -145,7 +145,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
#endif
if (!real_vfork)
throwFromErrno("Cannot find symbol vfork in myself", ErrorCodes::CANNOT_DLSYM);
throw ErrnoException(ErrorCodes::CANNOT_DLSYM, "Cannot find symbol vfork in myself");
PipeFDs pipe_stdin;
PipeFDs pipe_stdout;
@ -163,7 +163,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
pid_t pid = reinterpret_cast<pid_t(*)()>(real_vfork)();
if (pid == -1)
throwFromErrno("Cannot vfork", ErrorCodes::CANNOT_FORK);
throw ErrnoException(ErrorCodes::CANNOT_FORK, "Cannot vfork");
if (0 == pid)
{
@ -305,7 +305,7 @@ int ShellCommand::tryWait()
while (waitpid(pid, &status, 0) < 0)
{
if (errno != EINTR)
throwFromErrno("Cannot waitpid", ErrorCodes::CANNOT_WAITPID);
throw ErrnoException(ErrorCodes::CANNOT_WAITPID, "Cannot waitpid");
}
LOG_TRACE(getLogger(), "Wait for shell command pid {} completed with status {}", pid, status);

View File

@ -64,7 +64,7 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_)
fd = ::open(path.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0666);
if (-1 == fd)
throwFromErrnoWithPath("Cannot open file " + path, path, ErrorCodes::CANNOT_OPEN_FILE);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot open file {}", path);
try
{
@ -74,14 +74,14 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_)
if (errno == EWOULDBLOCK)
throw Exception(ErrorCodes::CANNOT_OPEN_FILE, "Cannot lock file {}. Another server instance in same directory is already running.", path);
else
throwFromErrnoWithPath("Cannot lock file " + path, path, ErrorCodes::CANNOT_OPEN_FILE);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot lock file {}", path);
}
if (0 != ftruncate(fd, 0))
throwFromErrnoWithPath("Cannot ftruncate " + path, path, ErrorCodes::CANNOT_TRUNCATE_FILE);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_TRUNCATE_FILE, path, "Cannot ftruncate file {}", path);
if (0 != lseek(fd, 0, SEEK_SET))
throwFromErrnoWithPath("Cannot lseek " + path, path, ErrorCodes::CANNOT_SEEK_THROUGH_FILE);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, path, "Cannot lseek file {}", path);
/// Write information about current server instance to the file.
WriteBufferFromFileDescriptor out(fd, 1024);

View File

@ -19,12 +19,12 @@ uint16_t getTerminalWidth()
if (isatty(STDIN_FILENO))
{
if (ioctl(STDIN_FILENO, TIOCGWINSZ, &terminal_size))
DB::throwFromErrno("Cannot obtain terminal window size (ioctl TIOCGWINSZ)", DB::ErrorCodes::SYSTEM_ERROR);
throw DB::ErrnoException(DB::ErrorCodes::SYSTEM_ERROR, "Cannot obtain terminal window size (ioctl TIOCGWINSZ)");
}
else if (isatty(STDERR_FILENO))
{
if (ioctl(STDERR_FILENO, TIOCGWINSZ, &terminal_size))
DB::throwFromErrno("Cannot obtain terminal window size (ioctl TIOCGWINSZ)", DB::ErrorCodes::SYSTEM_ERROR);
throw DB::ErrnoException(DB::ErrorCodes::SYSTEM_ERROR, "Cannot obtain terminal window size (ioctl TIOCGWINSZ)");
}
/// Default - 0.
return terminal_size.ws_col;

View File

@ -258,10 +258,10 @@ void ThreadFuzzer::setup() const
#if defined(OS_LINUX)
if (sigemptyset(&sa.sa_mask))
throwFromErrno("Failed to clean signal mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Failed to clean signal mask for thread fuzzer");
if (sigaddset(&sa.sa_mask, SIGPROF))
throwFromErrno("Failed to add signal to mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Failed to add signal to mask for thread fuzzer");
#else
// the two following functions always return 0 under mac
sigemptyset(&sa.sa_mask);
@ -269,7 +269,7 @@ void ThreadFuzzer::setup() const
#endif
if (sigaction(SIGPROF, &sa, nullptr))
throwFromErrno("Failed to setup signal handler for thread fuzzer", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Failed to setup signal handler for thread fuzzer");
static constexpr UInt32 timer_precision = 1000000;
@ -280,7 +280,7 @@ void ThreadFuzzer::setup() const
struct itimerval timer = {.it_interval = interval, .it_value = interval};
if (0 != setitimer(ITIMER_PROF, &timer, nullptr))
throwFromErrno("Failed to create profiling timer", ErrorCodes::CANNOT_CREATE_TIMER);
throw ErrnoException(ErrorCodes::CANNOT_CREATE_TIMER, "Failed to create profiling timer");
}

View File

@ -24,7 +24,7 @@ TimerDescriptor::TimerDescriptor(int clockid, int flags)
throw Exception(ErrorCodes::CANNOT_CREATE_TIMER, "Cannot create timer_fd descriptor");
if (-1 == fcntl(timer_fd, F_SETFL, O_NONBLOCK))
throwFromErrno("Cannot set O_NONBLOCK for timer_fd", ErrorCodes::CANNOT_FCNTL);
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot set O_NONBLOCK for timer_fd");
}
TimerDescriptor::TimerDescriptor(TimerDescriptor && other) noexcept : timer_fd(other.timer_fd)
@ -57,7 +57,7 @@ void TimerDescriptor::reset() const
spec.it_value.tv_nsec = 0;
if (-1 == timerfd_settime(timer_fd, 0 /*relative timer */, &spec, nullptr))
throwFromErrno("Cannot reset timer_fd", ErrorCodes::CANNOT_SET_TIMER_PERIOD);
throw ErrnoException(ErrorCodes::CANNOT_SET_TIMER_PERIOD, "Cannot reset timer_fd");
/// Drain socket.
/// It may be possible that alarm happened and socket is readable.
@ -78,7 +78,7 @@ void TimerDescriptor::drain() const
break;
if (errno != EINTR)
throwFromErrno("Cannot drain timer_fd", ErrorCodes::CANNOT_READ_FROM_SOCKET);
throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot drain timer_fd");
}
}
}
@ -94,7 +94,7 @@ void TimerDescriptor::setRelative(uint64_t usec) const
spec.it_value.tv_nsec = (usec % TIMER_PRECISION) * 1'000;
if (-1 == timerfd_settime(timer_fd, 0 /*relative timer */, &spec, nullptr))
throwFromErrno("Cannot set time for timer_fd", ErrorCodes::CANNOT_SET_TIMER_PERIOD);
throw ErrnoException(ErrorCodes::CANNOT_SET_TIMER_PERIOD, "Cannot set time for timer_fd");
}
void TimerDescriptor::setRelative(Poco::Timespan timespan) const

View File

@ -869,7 +869,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
/// method is called.
do
{
/// Use getData insteand of exists to avoid watch leak.
/// Use getData instead of exists to avoid watch leak.
impl->get(path, callback, std::make_shared<Coordination::WatchCallback>(watch));
if (!state->event.tryWait(1000))
@ -888,7 +888,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
return false;
}
void ZooKeeper::handleEphemeralNodeExistence(const std::string & path, const std::string & fast_delete_if_equal_value)
void ZooKeeper::deleteEphemeralNodeIfContentMatches(const std::string & path, const std::string & fast_delete_if_equal_value)
{
zkutil::EventPtr eph_node_disappeared = std::make_shared<Poco::Event>();
String content;
@ -1175,6 +1175,7 @@ std::future<Coordination::RemoveResponse> ZooKeeper::asyncRemove(const std::stri
return future;
}
/// Needs to match ZooKeeperWithInjection::asyncTryRemove implementation
std::future<Coordination::RemoveResponse> ZooKeeper::asyncTryRemove(const std::string & path, int32_t version)
{
auto promise = std::make_shared<std::promise<Coordination::RemoveResponse>>();

View File

@ -33,7 +33,8 @@ namespace CurrentMetrics
namespace DB
{
class ZooKeeperLog;
class ZooKeeperLog;
class ZooKeeperWithFaultInjection;
namespace ErrorCodes
{
@ -194,6 +195,9 @@ private:
/// Methods with names not starting at try- raise KeeperException on any error.
class ZooKeeper
{
/// ZooKeeperWithFaultInjection wants access to `impl` pointer to reimplement some async functions with faults
friend class DB::ZooKeeperWithFaultInjection;
public:
using Ptr = std::shared_ptr<ZooKeeper>;
@ -470,7 +474,7 @@ public:
/// If the node exists and its value is equal to fast_delete_if_equal_value it will remove it
/// If the node exists and its value is different, it will wait for it to disappear. It will throw a LOGICAL_ERROR if the node doesn't
/// disappear automatically after 3x session_timeout.
void handleEphemeralNodeExistence(const std::string & path, const std::string & fast_delete_if_equal_value);
void deleteEphemeralNodeIfContentMatches(const std::string & path, const std::string & fast_delete_if_equal_value);
Coordination::ReconfigResponse reconfig(
const std::string & joining,
@ -646,8 +650,6 @@ private:
ZooKeeperArgs args;
std::mutex mutex;
Poco::Logger * log = nullptr;
std::shared_ptr<DB::ZooKeeperLog> zk_log;

View File

@ -0,0 +1,632 @@
#include <base/defines.h>
#include <Common/ZooKeeper/ZooKeeperWithFaultInjection.h>
namespace DB
{
ZooKeeperWithFaultInjection::ZooKeeperWithFaultInjection(
zkutil::ZooKeeper::Ptr const & keeper_,
double fault_injection_probability,
UInt64 fault_injection_seed,
std::string name_,
Poco::Logger * logger_)
: keeper(keeper_)
, fault_policy(std::make_unique<RandomFaultInjection>(fault_injection_probability, fault_injection_seed))
, name(std::move(name_))
, logger(logger_)
, seed(fault_injection_seed)
{
}
void ZooKeeperWithFaultInjection::resetKeeper()
{
/// When an error is injected, we need to reset keeper for several reasons
/// a) Avoid processing further requests in this keeper (in async code)
/// b) Simulate a fault as ZooKeeperImpl does, forcing a new session (which drops ephemeral nodes)
///
/// Ideally we would call `keeper->finalize("Fault injection");` to force the session reload.
/// The problem with that is that many operations currently aren't able to cope with keeper faults correctly,
/// so they would fail. While this is what happens in production, it's not what we want in the CI.
///
/// Until all the code can handle keeper session resets, we need to simulate it so the code that relies on its
/// behaviour keeps working. An example of such code is insert block ids: If keeper dies between the block id being
/// reserved (via ephemeral node) and the metadata being pushed, the reserved block id will be deleted automatically
/// in keeper (connection drop == delete all ephemeral nodes attached to that connection). This way retrying and
/// getting a new block id is ok. But without a connection reset (because ZooKeeperWithFaultInjection doesn't
/// enforce it yet), the old ephemeral nodes associated with "committing_blocks" will still be there and operations
/// such as block merges, mutations, etc., will think they are alive and wait for them to be ready (which will never
/// happen)
/// Our poor man session reload is to keep track of ephemeral nodes created by this Faulty keeper and delete
/// them manually when we force a fault. This is obviously limited as it will only apply for operations processed by
/// this instance, but let's trust more and more code can handle session reloads and we can eliminate the hack.
/// Until that time, the hack remains.
if (keeper)
{
for (const auto & path_created : session_ephemeral_nodes)
{
try
{
keeper->remove(path_created);
}
catch (const Coordination::Exception & e)
{
if (logger)
LOG_TRACE(logger, "Failed to delete ephemeral node ({}) during fault cleanup: {}", path_created, e.what());
}
}
}
session_ephemeral_nodes.clear();
keeper.reset();
}
void ZooKeeperWithFaultInjection::multiResponseSaveEphemeralNodePaths(
const Coordination::Requests & requests, const Coordination::Responses & responses)
{
if (responses.empty())
return;
chassert(requests.size() == responses.size());
for (size_t i = 0; i < requests.size(); i++)
{
const auto * create_req = dynamic_cast<const Coordination::CreateRequest *>(requests[i].get());
if (create_req && create_req->is_ephemeral)
{
const auto * create_resp = dynamic_cast<const Coordination::CreateResponse *>(responses.at(i).get());
chassert(create_resp);
session_ephemeral_nodes.emplace_back(create_resp->path_created);
}
}
}
void ZooKeeperWithFaultInjection::injectFailureBeforeOperationThrow(const char * func_name, const String & path)
{
if (unlikely(!keeper))
{
/// This is ok for async requests, where you call several of them and one introduced a fault
/// In the faults we reset the pointer to mark the connection as failed and inject failures in any
/// subsequent async requests
if (logger)
LOG_TRACE(logger, "ZooKeeperWithFaultInjection called after fault: seed={}, func={} path={}", seed, func_name, path);
throw zkutil::KeeperException::fromMessage(RandomFaultInjection::error_before_op, RandomFaultInjection::msg_session_expired);
}
if (unlikely(fault_policy) && fault_policy->beforeOperation())
{
if (logger)
LOG_TRACE(
logger,
"ZooKeeperWithFaultInjection call FAILED: seed={} func={} path={} code={} message={} ",
seed,
func_name,
path,
RandomFaultInjection::error_before_op,
RandomFaultInjection::msg_before_op);
resetKeeper();
throw zkutil::KeeperException::fromMessage(RandomFaultInjection::error_before_op, RandomFaultInjection::msg_before_op);
}
}
void ZooKeeperWithFaultInjection::injectFailureAfterOperationThrow(const char * func_name, const String & path)
{
if (unlikely(fault_policy) && fault_policy->afterOperation())
{
if (logger)
LOG_TRACE(
logger,
"ZooKeeperWithFaultInjection call FAILED: seed={} func={} path={} code={} message={} ",
seed,
func_name,
path,
RandomFaultInjection::error_after_op,
RandomFaultInjection::msg_after_op);
resetKeeper();
throw zkutil::KeeperException::fromMessage(RandomFaultInjection::error_after_op, RandomFaultInjection::msg_after_op);
}
}
template <typename Operation>
std::invoke_result_t<Operation>
ZooKeeperWithFaultInjection::executeWithFaultSync(const char * func_name, const std::string & path, Operation operation)
{
injectFailureBeforeOperationThrow(func_name, path);
if constexpr (!std::is_same_v<std::invoke_result_t<Operation>, void>)
{
auto res = operation();
injectFailureAfterOperationThrow(func_name, path);
return res;
}
else
{
operation();
injectFailureAfterOperationThrow(func_name, path);
}
}
template <typename Promise>
bool ZooKeeperWithFaultInjection::injectFailureBeforeOperationPromise(const char * func_name, Promise & promise, const String & path)
{
if (unlikely(!keeper))
{
if (logger)
LOG_ERROR(logger, "ZooKeeperWithFaultInjection called after fault injection: seed={}, func={} path={}", seed, func_name, path);
promise->set_exception(std::make_exception_ptr(
zkutil::KeeperException::fromMessage(RandomFaultInjection::error_before_op, RandomFaultInjection::msg_session_expired)));
}
if (unlikely(fault_policy) && fault_policy->beforeOperation())
{
if (logger)
LOG_TRACE(
logger, "ZooKeeperWithFaultInjection injected fault before operation: seed={} func={} path={}", seed, func_name, path);
resetKeeper();
promise->set_exception(std::make_exception_ptr(
zkutil::KeeperException::fromMessage(RandomFaultInjection::error_before_op, RandomFaultInjection::msg_before_op)));
return true;
}
return false;
}
template <typename Promise>
bool ZooKeeperWithFaultInjection::injectFailureAfterOperationPromise(const char * func_name, Promise & promise, const String & path)
{
if (unlikely(fault_policy) && fault_policy->afterOperation())
{
promise->set_exception(std::make_exception_ptr(
zkutil::KeeperException::fromMessage(RandomFaultInjection::error_after_op, RandomFaultInjection::msg_after_op)));
if (logger)
LOG_TRACE(logger, "ZooKeeperWithFaultInjection injected fault after operation: seed={} func={} path={}", seed, func_name, path);
resetKeeper();
return true;
}
return false;
}
Strings ZooKeeperWithFaultInjection::getChildren(
const std::string & path, Coordination::Stat * stat, const zkutil::EventPtr & watch, Coordination::ListRequestType list_request_type)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->getChildren(path, stat, watch, list_request_type); });
}
zkutil::ZooKeeper::MultiGetChildrenResponse
ZooKeeperWithFaultInjection::getChildren(const std::vector<std::string> & paths, Coordination::ListRequestType list_request_type)
{
return executeWithFaultSync(
__func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->getChildren(paths, list_request_type); });
}
Coordination::Error ZooKeeperWithFaultInjection::tryGetChildren(
const std::string & path,
Strings & res,
Coordination::Stat * stat,
const zkutil::EventPtr & watch,
Coordination::ListRequestType list_request_type)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->tryGetChildren(path, res, stat, watch, list_request_type); });
}
zkutil::ZooKeeper::MultiTryGetChildrenResponse
ZooKeeperWithFaultInjection::tryGetChildren(const std::vector<std::string> & paths, Coordination::ListRequestType list_request_type)
{
return executeWithFaultSync(
__func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->tryGetChildren(paths, list_request_type); });
}
Coordination::Error ZooKeeperWithFaultInjection::tryGetChildrenWatch(
const std::string & path,
Strings & res,
Coordination::Stat * stat,
Coordination::WatchCallback watch_callback,
Coordination::ListRequestType list_request_type)
{
return executeWithFaultSync(
__func__, path, [&]() { return keeper->tryGetChildrenWatch(path, res, stat, watch_callback, list_request_type); });
}
Strings ZooKeeperWithFaultInjection::getChildrenWatch(
const std::string & path,
Coordination::Stat * stat,
Coordination::WatchCallback watch_callback,
Coordination::ListRequestType list_request_type)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->getChildrenWatch(path, stat, watch_callback, list_request_type); });
}
Strings ZooKeeperWithFaultInjection::getChildrenWatch(
const std::string & path,
Coordination::Stat * stat,
Coordination::WatchCallbackPtr watch_callback,
Coordination::ListRequestType list_request_type)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->getChildrenWatch(path, stat, watch_callback, list_request_type); });
}
bool ZooKeeperWithFaultInjection::tryGet(
const std::string & path, std::string & res, Coordination::Stat * stat, const zkutil::EventPtr & watch, Coordination::Error * code)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->tryGet(path, res, stat, watch, code); });
}
bool ZooKeeperWithFaultInjection::tryGetWatch(
const std::string & path,
std::string & res,
Coordination::Stat * stat,
Coordination::WatchCallback watch_callback,
Coordination::Error * code)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->tryGetWatch(path, res, stat, watch_callback, code); });
}
std::string ZooKeeperWithFaultInjection::get(const std::string & path, Coordination::Stat * stat, const zkutil::EventPtr & watch)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->get(path, stat, watch); });
}
zkutil::ZooKeeper::MultiGetResponse ZooKeeperWithFaultInjection::get(const std::vector<std::string> & paths)
{
return executeWithFaultSync(__func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->get(paths); });
}
zkutil::ZooKeeper::MultiTryGetResponse ZooKeeperWithFaultInjection::tryGet(const std::vector<std::string> & paths)
{
return executeWithFaultSync(__func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->tryGet(paths); });
}
void ZooKeeperWithFaultInjection::set(const String & path, const String & data, int32_t version, Coordination::Stat * stat)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->set(path, data, version, stat); });
}
void ZooKeeperWithFaultInjection::remove(const String & path, int32_t version)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->remove(path, version); });
}
bool ZooKeeperWithFaultInjection::exists(const std::string & path, Coordination::Stat * stat, const zkutil::EventPtr & watch)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->exists(path, stat, watch); });
}
zkutil::ZooKeeper::MultiExistsResponse ZooKeeperWithFaultInjection::exists(const std::vector<std::string> & paths)
{
return executeWithFaultSync(__func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->exists(paths); });
}
std::string ZooKeeperWithFaultInjection::create(const std::string & path, const std::string & data, int32_t mode)
{
return executeWithFaultSync(
__func__,
path,
[&]()
{
auto path_created = keeper->create(path, data, mode);
if (unlikely(fault_policy) && (mode == zkutil::CreateMode::EphemeralSequential || mode == zkutil::CreateMode::Ephemeral))
session_ephemeral_nodes.emplace_back(path_created);
return path_created;
});
}
Coordination::Error
ZooKeeperWithFaultInjection::tryCreate(const std::string & path, const std::string & data, int32_t mode, std::string & path_created)
{
return executeWithFaultSync(
__func__,
path,
[&]()
{
Coordination::Error code = keeper->tryCreate(path, data, mode, path_created);
if (unlikely(fault_policy) && code == Coordination::Error::ZOK
&& (mode == zkutil::CreateMode::EphemeralSequential || mode == zkutil::CreateMode::Ephemeral))
session_ephemeral_nodes.emplace_back(path_created);
return code;
});
}
Coordination::Error ZooKeeperWithFaultInjection::tryCreate(const std::string & path, const std::string & data, int32_t mode)
{
std::string path_created;
return tryCreate(path, data, mode, path_created);
}
Coordination::Responses ZooKeeperWithFaultInjection::multi(const Coordination::Requests & requests)
{
return executeWithFaultSync(
__func__,
!requests.empty() ? requests.front()->getPath() : "",
[&]()
{
auto responses = keeper->multi(requests);
if (unlikely(fault_policy))
multiResponseSaveEphemeralNodePaths(requests, responses);
return responses;
});
}
void ZooKeeperWithFaultInjection::createIfNotExists(const std::string & path, const std::string & data)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->createIfNotExists(path, data); });
}
void ZooKeeperWithFaultInjection::createOrUpdate(const std::string & path, const std::string & data, int32_t mode)
{
chassert(mode != zkutil::CreateMode::EphemeralSequential && mode != zkutil::CreateMode::Ephemeral);
return executeWithFaultSync(__func__, path, [&]() { return keeper->createOrUpdate(path, data, mode); });
}
void ZooKeeperWithFaultInjection::createAncestors(const std::string & path)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->createAncestors(path); });
}
Coordination::Error ZooKeeperWithFaultInjection::tryRemove(const std::string & path, int32_t version)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->tryRemove(path, version); });
}
void ZooKeeperWithFaultInjection::removeRecursive(const std::string & path)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->removeRecursive(path); });
}
void ZooKeeperWithFaultInjection::tryRemoveRecursive(const std::string & path)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->tryRemoveRecursive(path); });
}
void ZooKeeperWithFaultInjection::removeChildren(const std::string & path)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->removeChildren(path); });
}
bool ZooKeeperWithFaultInjection::tryRemoveChildrenRecursive(
const std::string & path, bool probably_flat, zkutil::RemoveException keep_child)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->tryRemoveChildrenRecursive(path, probably_flat, keep_child); });
}
bool ZooKeeperWithFaultInjection::waitForDisappear(const std::string & path, const zkutil::ZooKeeper::WaitCondition & condition)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->waitForDisappear(path, condition); });
}
std::string ZooKeeperWithFaultInjection::sync(const std::string & path)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->sync(path); });
}
Coordination::Error
ZooKeeperWithFaultInjection::trySet(const std::string & path, const std::string & data, int32_t version, Coordination::Stat * stat)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->trySet(path, data, version, stat); });
}
void ZooKeeperWithFaultInjection::checkExistsAndGetCreateAncestorsOps(const std::string & path, Coordination::Requests & requests)
{
return executeWithFaultSync(__func__, path, [&]() { return keeper->checkExistsAndGetCreateAncestorsOps(path, requests); });
}
void ZooKeeperWithFaultInjection::deleteEphemeralNodeIfContentMatches(
const std::string & path, const std::string & fast_delete_if_equal_value)
{
return executeWithFaultSync(
__func__, path, [&]() { return keeper->deleteEphemeralNodeIfContentMatches(path, fast_delete_if_equal_value); });
}
Coordination::Error ZooKeeperWithFaultInjection::tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses)
{
return executeWithFaultSync(
__func__,
!requests.empty() ? requests.front()->getPath() : "",
[&]()
{
auto code = keeper->tryMulti(requests, responses);
if (unlikely(fault_policy) && code == Coordination::Error::ZOK)
multiResponseSaveEphemeralNodePaths(requests, responses);
return code;
});
}
Coordination::Error
ZooKeeperWithFaultInjection::tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses)
{
try
{
return tryMulti(requests, responses);
}
catch (const Coordination::Exception & e)
{
return e.code;
}
}
zkutil::ZooKeeper::FutureExists ZooKeeperWithFaultInjection::asyncExists(std::string path, Coordination::WatchCallback watch_callback)
{
auto promise = std::make_shared<std::promise<Coordination::ExistsResponse>>();
auto future = promise->get_future();
if (injectFailureBeforeOperationPromise(__func__, promise, path))
return future;
const char * function_name = __func__;
auto callback = [&, promise](const Coordination::ExistsResponse & response) mutable
{
if (injectFailureAfterOperationPromise(function_name, promise, path))
return;
if (response.error != Coordination::Error::ZOK && response.error != Coordination::Error::ZNONODE)
promise->set_exception(std::make_exception_ptr(zkutil::KeeperException::fromPath(response.error, path)));
else
promise->set_value(response);
};
keeper->impl->exists(
path,
std::move(callback),
watch_callback ? std::make_shared<Coordination::WatchCallback>(watch_callback) : Coordination::WatchCallbackPtr{});
return future;
}
zkutil::ZooKeeper::FutureGet ZooKeeperWithFaultInjection::asyncTryGet(std::string path)
{
auto promise = std::make_shared<std::promise<Coordination::GetResponse>>();
auto future = promise->get_future();
if (injectFailureBeforeOperationPromise(__func__, promise, path))
return future;
const char * function_name = __func__;
auto callback = [&, promise](const Coordination::GetResponse & response) mutable
{
if (injectFailureAfterOperationPromise(function_name, promise, path))
return;
if (response.error != Coordination::Error::ZOK && response.error != Coordination::Error::ZNONODE)
promise->set_exception(std::make_exception_ptr(zkutil::KeeperException::fromPath(response.error, path)));
else
promise->set_value(response);
};
keeper->impl->get(path, std::move(callback), {});
return future;
}
zkutil::ZooKeeper::FutureMulti ZooKeeperWithFaultInjection::asyncTryMultiNoThrow(const Coordination::Requests & ops)
{
#ifndef NDEBUG
/// asyncTryMultiNoThrow is not setup to handle faults with ephemeral nodes
/// To do it we'd need to look at ops and save the indexes BEFORE the callback, as the ops are not
/// guaranteed to live until then
for (const auto & op : ops)
{
const auto * create_req = dynamic_cast<const Coordination::CreateRequest *>(op.get());
if (create_req)
chassert(!create_req->is_ephemeral);
}
#endif
auto promise = std::make_shared<std::promise<Coordination::MultiResponse>>();
auto future = promise->get_future();
size_t request_size = ops.size();
String path = ops.empty() ? "" : ops.front()->getPath();
if (!keeper || (unlikely(fault_policy) && fault_policy->beforeOperation()))
{
if (logger)
LOG_TRACE(logger, "ZooKeeperWithFaultInjection injected fault before operation: seed={} func={} path={}", seed, __func__, path);
resetKeeper();
Coordination::MultiResponse errors;
for (size_t i = 0; i < request_size; i++)
{
auto r = std::make_shared<Coordination::ZooKeeperErrorResponse>();
r->error = RandomFaultInjection::error_before_op;
errors.responses.emplace_back(std::move(r));
}
promise->set_value(errors);
return future;
}
const char * function_name = __func__;
auto callback = [&, promise](const Coordination::MultiResponse & response) mutable
{
if (unlikely(fault_policy) && fault_policy->afterOperation())
{
if (logger)
LOG_TRACE(
logger,
"ZooKeeperWithFaultInjection injected fault after operation: seed={} func={} path={}",
seed,
function_name,
path);
resetKeeper();
Coordination::MultiResponse errors;
for (size_t i = 0; i < request_size; i++)
{
auto r = std::make_shared<Coordination::ZooKeeperErrorResponse>();
r->error = RandomFaultInjection::error_after_op;
errors.responses.emplace_back(std::move(r));
}
promise->set_value(errors);
}
else
{
promise->set_value(response);
}
};
keeper->impl->multi(ops, std::move(callback));
return future;
}
/// Needs to match ZooKeeper::asyncTryRemove implementation
zkutil::ZooKeeper::FutureRemove ZooKeeperWithFaultInjection::asyncTryRemove(std::string path, int32_t version)
{
auto promise = std::make_shared<std::promise<Coordination::RemoveResponse>>();
auto future = promise->get_future();
if (injectFailureBeforeOperationPromise(__func__, promise, path))
return future;
const char * function_name = __func__;
auto callback = [&, promise](const Coordination::RemoveResponse & response) mutable
{
if (injectFailureAfterOperationPromise(function_name, promise, path))
return;
if (response.error != Coordination::Error::ZOK && response.error != Coordination::Error::ZNONODE
&& response.error != Coordination::Error::ZBADVERSION && response.error != Coordination::Error::ZNOTEMPTY)
{
promise->set_exception(std::make_exception_ptr(zkutil::KeeperException::fromPath(response.error, path)));
}
else
promise->set_value(response);
};
keeper->impl->remove(path, version, std::move(callback));
return future;
}
zkutil::ZooKeeper::FutureRemove ZooKeeperWithFaultInjection::asyncTryRemoveNoThrow(const std::string & path, int32_t version)
{
auto promise = std::make_shared<std::promise<Coordination::RemoveResponse>>();
auto future = promise->get_future();
if (!keeper || (unlikely(fault_policy) && fault_policy->beforeOperation()))
{
if (logger)
LOG_TRACE(logger, "ZooKeeperWithFaultInjection injected fault before operation: seed={} func={} path={}", seed, __func__, path);
resetKeeper();
Coordination::RemoveResponse r;
r.error = RandomFaultInjection::error_before_op;
promise->set_value(r);
return future;
}
const char * function_name = __func__;
auto callback = [&, promise](const Coordination::RemoveResponse & response) mutable
{
if (unlikely(fault_policy) && fault_policy->afterOperation())
{
if (logger)
LOG_TRACE(
logger,
"ZooKeeperWithFaultInjection injected fault after operation: seed={} func={} path={}",
seed,
function_name,
path);
resetKeeper();
Coordination::RemoveResponse r;
r.error = RandomFaultInjection::error_after_op;
promise->set_value(r);
}
else
{
promise->set_value(response);
}
};
keeper->impl->remove(path, version, std::move(callback));
return future;
}
}

View File

@ -12,10 +12,6 @@
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
class RandomFaultInjection
{
@ -23,23 +19,33 @@ public:
bool must_fail_after_op = false;
bool must_fail_before_op = false;
static constexpr auto msg_session_expired = "Called after fault injection";
static constexpr auto error_before_op = Coordination::Error::ZSESSIONEXPIRED;
static constexpr auto msg_before_op = "Fault injection before operation";
static constexpr auto error_after_op = Coordination::Error::ZOPERATIONTIMEOUT;
static constexpr auto msg_after_op = "Fault injection after operation";
RandomFaultInjection(double probability, UInt64 seed_) : rndgen(seed_), distribution(probability) { }
void beforeOperation()
bool beforeOperation()
{
if (distribution(rndgen) || must_fail_before_op)
if (must_fail_before_op || distribution(rndgen))
{
must_fail_before_op = false;
throw zkutil::KeeperException::fromMessage(Coordination::Error::ZSESSIONEXPIRED, "Fault injection before operation");
return true;
}
return false;
}
void afterOperation()
bool afterOperation()
{
if (distribution(rndgen) || must_fail_after_op)
if (must_fail_after_op || distribution(rndgen))
{
must_fail_after_op = false;
throw zkutil::KeeperException::fromMessage(Coordination::Error::ZOPERATIONTIMEOUT, "Fault injection after operation");
return true;
}
return false;
}
private:
@ -52,563 +58,206 @@ private:
///
class ZooKeeperWithFaultInjection
{
template<bool async_insert>
friend class ReplicatedMergeTreeSinkImpl;
using zk = zkutil::ZooKeeper;
zk::Ptr keeper;
zk::Ptr keeper_prev;
zkutil::ZooKeeper::Ptr keeper;
std::unique_ptr<RandomFaultInjection> fault_policy;
std::string name;
Poco::Logger * logger = nullptr;
UInt64 calls_total = 0;
UInt64 calls_without_fault_injection = 0;
const UInt64 seed = 0;
std::vector<std::string> ephemeral_nodes;
std::vector<std::string> session_ephemeral_nodes;
ZooKeeperWithFaultInjection(
zk::Ptr const & keeper_,
double fault_injection_probability,
UInt64 fault_injection_seed,
std::string name_,
Poco::Logger * logger_)
: keeper(keeper_), name(std::move(name_)), logger(logger_), seed(fault_injection_seed)
{
fault_policy = std::make_unique<RandomFaultInjection>(fault_injection_probability, fault_injection_seed);
template <typename Operation>
std::invoke_result_t<Operation> executeWithFaultSync(const char * func_name, const std::string & path, Operation);
void injectFailureBeforeOperationThrow(const char * func_name, const String & path);
void injectFailureAfterOperationThrow(const char * func_name, const String & path);
template <typename Promise>
bool injectFailureBeforeOperationPromise(const char * func_name, Promise & promise, const String & path);
template <typename Promise>
bool injectFailureAfterOperationPromise(const char * func_name, Promise & promise, const String & path);
if (unlikely(logger))
LOG_TRACE(
logger,
"ZooKeeperWithFaultInjection created: name={} seed={} fault_probability={}",
name,
seed,
fault_injection_probability);
}
void resetKeeper();
void multiResponseSaveEphemeralNodePaths(const Coordination::Requests & requests, const Coordination::Responses & responses);
public:
using Ptr = std::shared_ptr<ZooKeeperWithFaultInjection>;
ZooKeeperWithFaultInjection(
zkutil::ZooKeeper::Ptr const & keeper_,
double fault_injection_probability,
UInt64 fault_injection_seed,
std::string name_,
Poco::Logger * logger_);
explicit ZooKeeperWithFaultInjection(zkutil::ZooKeeper::Ptr const & keeper_) : keeper(keeper_) { }
static ZooKeeperWithFaultInjection::Ptr createInstance(
double fault_injection_probability, UInt64 fault_injection_seed, const zk::Ptr & zookeeper, std::string name, Poco::Logger * logger)
double fault_injection_probability,
UInt64 fault_injection_seed,
zkutil::ZooKeeper::Ptr const & zookeeper,
std::string name,
Poco::Logger * logger)
{
/// validate all parameters here, constructor just accept everything
if (fault_injection_probability < 0.0)
fault_injection_probability = .0;
else if (fault_injection_probability > 1.0)
fault_injection_probability = 1.0;
if (0 == fault_injection_seed)
if (fault_injection_seed == 0)
fault_injection_seed = randomSeed();
if (fault_injection_probability > 0.0)
return std::shared_ptr<ZooKeeperWithFaultInjection>(
new ZooKeeperWithFaultInjection(zookeeper, fault_injection_probability, fault_injection_seed, std::move(name), logger));
return std::make_shared<ZooKeeperWithFaultInjection>(
zookeeper, fault_injection_probability, fault_injection_seed, std::move(name), logger);
/// if no fault injection provided, create instance which will not log anything
return std::make_shared<ZooKeeperWithFaultInjection>(zookeeper);
}
explicit ZooKeeperWithFaultInjection(zk::Ptr const & keeper_) : keeper(keeper_) { }
void setKeeper(zkutil::ZooKeeper::Ptr const & keeper_) { keeper = keeper_; }
zkutil::ZooKeeper::Ptr getKeeper() const { return keeper; }
bool isNull() const { return keeper.get() == nullptr; }
bool expired() const { return !keeper || keeper->expired(); }
bool isFeatureEnabled(KeeperFeatureFlag feature_flag) const { return keeper->isFeatureEnabled(feature_flag); }
~ZooKeeperWithFaultInjection()
void forceFailureBeforeOperation()
{
if (unlikely(logger))
LOG_TRACE(
logger,
"ZooKeeperWithFaultInjection report: name={} seed={} calls_total={} calls_succeeded={} calls_failed={} failure_rate={}",
name,
seed,
calls_total,
calls_without_fault_injection,
calls_total - calls_without_fault_injection,
float(calls_total - calls_without_fault_injection) / calls_total);
if (!fault_policy)
fault_policy = std::make_unique<RandomFaultInjection>(0, 0);
fault_policy->must_fail_before_op = true;
}
void setKeeper(zk::Ptr const & keeper_) { keeper = keeper_; }
bool isNull() const { return keeper.get() == nullptr; }
bool expired() { return keeper->expired(); }
void forceFailureAfterOperation()
{
if (!fault_policy)
fault_policy = std::make_unique<RandomFaultInjection>(0, 0);
fault_policy->must_fail_after_op = true;
}
///
/// mirror ZooKeeper interface
/// mirror ZooKeeper interface: Sync functions
///
Strings getChildren(
const std::string & path,
Coordination::Stat * stat = nullptr,
const zkutil::EventPtr & watch = nullptr,
Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL)
{
return access("getChildren", path, [&]() { return keeper->getChildren(path, stat, watch, list_request_type); });
}
Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
zkutil::ZooKeeper::MultiGetChildrenResponse getChildren(
const std::vector<std::string> & paths, Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
Coordination::Error tryGetChildren(
const std::string & path,
Strings & res,
Coordination::Stat * stat = nullptr,
const zkutil::EventPtr & watch = nullptr,
Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL)
{
return access("tryGetChildren", path, [&]() { return keeper->tryGetChildren(path, res, stat, watch, list_request_type); });
}
Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
zk::FutureExists asyncExists(const std::string & path, Coordination::WatchCallback watch_callback = {})
{
return access("asyncExists", path, [&]() { return keeper->asyncExists(path, watch_callback); });
}
zkutil::ZooKeeper::MultiTryGetChildrenResponse tryGetChildren(
const std::vector<std::string> & paths, Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
zk::FutureGet asyncTryGet(const std::string & path)
{
return access("asyncTryGet", path, [&]() { return keeper->asyncTryGet(path); });
}
Coordination::Error tryGetChildrenWatch(
const std::string & path,
Strings & res,
Coordination::Stat * stat,
Coordination::WatchCallback watch_callback,
Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
Strings getChildrenWatch(
const std::string & path,
Coordination::Stat * stat,
Coordination::WatchCallback watch_callback,
Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
Strings getChildrenWatch(
const std::string & path,
Coordination::Stat * stat,
Coordination::WatchCallbackPtr watch_callback,
Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
bool tryGet(
const std::string & path,
std::string & res,
Coordination::Stat * stat = nullptr,
const zkutil::EventPtr & watch = nullptr,
Coordination::Error * code = nullptr)
{
return access("tryGet", path, [&]() { return keeper->tryGet(path, res, stat, watch, code); });
}
Coordination::Error * code = nullptr);
Coordination::Error tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses)
{
constexpr auto method = "tryMulti";
auto error = access(
method,
!requests.empty() ? requests.front()->getPath() : "",
[&]() { return keeper->tryMulti(requests, responses); },
[&](const Coordination::Error & original_error)
{
if (original_error == Coordination::Error::ZOK)
faultInjectionPostAction(method, requests, responses);
},
[&]()
{
responses.clear();
for (size_t i = 0; i < requests.size(); ++i)
responses.emplace_back(std::make_shared<Coordination::ZooKeeperErrorResponse>());
});
/// collect ephemeral nodes when no fault was injected (to clean up on demand)
if (unlikely(fault_policy) && Coordination::Error::ZOK == error)
{
doForEachCreatedEphemeralNode(
method, requests, responses, [&](const String & path_created) { ephemeral_nodes.push_back(path_created); });
}
return error;
}
Coordination::Error tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses)
{
constexpr auto method = "tryMultiNoThrow";
constexpr auto no_throw = true;
constexpr auto inject_failure_before_op = false;
auto error = access<no_throw, inject_failure_before_op>(
method,
!requests.empty() ? requests.front()->getPath() : "",
[&]() { return keeper->tryMultiNoThrow(requests, responses); },
[&](const Coordination::Error & original_error)
{
if (original_error == Coordination::Error::ZOK)
faultInjectionPostAction(method, requests, responses);
},
[&]()
{
responses.clear();
for (size_t i = 0; i < requests.size(); ++i)
responses.emplace_back(std::make_shared<Coordination::ZooKeeperErrorResponse>());
});
/// collect ephemeral nodes when no fault was injected (to clean up later)
if (unlikely(fault_policy) && Coordination::Error::ZOK == error)
{
doForEachCreatedEphemeralNode(
method, requests, responses, [&](const String & path_created) { ephemeral_nodes.push_back(path_created); });
}
return error;
}
std::string get(const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr)
{
return access("get", path, [&]() { return keeper->get(path, stat, watch); });
}
zkutil::ZooKeeper::MultiGetResponse get(const std::vector<std::string> & paths)
{
return access("get", !paths.empty() ? paths.front() : "", [&]() { return keeper->get(paths); });
}
zkutil::ZooKeeper::MultiTryGetResponse tryGet(const std::vector<std::string> & paths)
{
return access("tryGet", !paths.empty() ? paths.front() : "", [&]() { return keeper->tryGet(paths); });
}
bool exists(const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr)
{
return access("exists", path, [&]() { return keeper->exists(path, stat, watch); });
}
bool existsNoFailureInjection(const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr)
{
return access<false, false, false>("exists", path, [&]() { return keeper->exists(path, stat, watch); });
}
zkutil::ZooKeeper::MultiExistsResponse exists(const std::vector<std::string> & paths)
{
return access("exists", !paths.empty() ? paths.front() : "", [&]() { return keeper->exists(paths); });
}
std::string create(const std::string & path, const std::string & data, int32_t mode)
{
std::string path_created;
auto code = tryCreate(path, data, mode, path_created);
if (code != Coordination::Error::ZOK)
throw zkutil::KeeperException::fromPath(code, path);
return path_created;
}
Coordination::Error tryCreate(const std::string & path, const std::string & data, int32_t mode, std::string & path_created)
{
path_created.clear();
auto error = access(
"tryCreate",
path,
[&]() { return keeper->tryCreate(path, data, mode, path_created); },
[&](Coordination::Error & code)
{
try
{
if (!path_created.empty() && (mode == zkutil::CreateMode::EphemeralSequential || mode == zkutil::CreateMode::Ephemeral))
{
keeper->remove(path_created);
if (unlikely(logger))
LOG_TRACE(logger, "ZooKeeperWithFaultInjection cleanup: seed={} func={} path={} path_created={} code={}",
seed, "tryCreate", path, path_created, code);
}
}
catch (const zkutil::KeeperException & e)
{
if (unlikely(logger))
LOG_TRACE(
logger,
"ZooKeeperWithFaultInjection cleanup FAILED: seed={} func={} path={} path_created={} code={} message={} ",
seed,
"tryCreate",
path,
path_created,
e.code,
e.message());
}
});
/// collect ephemeral nodes when no fault was injected (to clean up later)
if (unlikely(fault_policy))
{
if (!path_created.empty() && (mode == zkutil::CreateMode::EphemeralSequential || mode == zkutil::CreateMode::Ephemeral))
ephemeral_nodes.push_back(path_created);
}
return error;
}
Coordination::Error tryCreate(const std::string & path, const std::string & data, int32_t mode)
{
String path_created;
return tryCreate(path, data, mode, path_created);
}
void createIfNotExists(const std::string & path, const std::string & data)
{
std::string path_created;
auto code = tryCreate(path, data, zkutil::CreateMode::Persistent, path_created);
if (code == Coordination::Error::ZOK || code == Coordination::Error::ZNODEEXISTS)
return;
throw zkutil::KeeperException::fromPath(code, path);
}
Coordination::Responses multi(const Coordination::Requests & requests)
{
constexpr auto method = "multi";
auto result = access(
method,
!requests.empty() ? requests.front()->getPath() : "",
[&]() { return keeper->multi(requests); },
[&](Coordination::Responses & responses) { faultInjectionPostAction(method, requests, responses); });
/// collect ephemeral nodes to clean up
if (unlikely(fault_policy))
{
doForEachCreatedEphemeralNode(
method, requests, result, [&](const String & path_created) { ephemeral_nodes.push_back(path_created); });
}
return result;
}
void createAncestors(const std::string & path)
{
access("createAncestors", path, [&]() { return keeper->createAncestors(path); });
}
Coordination::Error tryRemove(const std::string & path, int32_t version = -1)
{
return access("tryRemove", path, [&]() { return keeper->tryRemove(path, version); });
}
void removeRecursive(const std::string & path)
{
return access("removeRecursive", path, [&]() { return keeper->removeRecursive(path); });
}
std::string sync(const std::string & path)
{
return access("sync", path, [&]() { return keeper->sync(path); });
}
Coordination::Error trySet(const std::string & path, const std::string & data, int32_t version = -1, Coordination::Stat * stat = nullptr)
{
return access("trySet", path, [&]() { return keeper->trySet(path, data, version, stat); });
}
void checkExistsAndGetCreateAncestorsOps(const std::string & path, Coordination::Requests & requests)
{
return access("checkExistsAndGetCreateAncestorsOps", path, [&]() { return keeper->checkExistsAndGetCreateAncestorsOps(path, requests); });
}
void handleEphemeralNodeExistenceNoFailureInjection(const std::string & path, const std::string & fast_delete_if_equal_value)
{
return access<false, false, false>("handleEphemeralNodeExistence", path, [&]() { return keeper->handleEphemeralNodeExistence(path, fast_delete_if_equal_value); });
}
void cleanupEphemeralNodes()
{
for (const auto & path : ephemeral_nodes)
{
try
{
if (keeper_prev)
keeper_prev->tryRemove(path);
}
catch (...)
{
if (unlikely(logger))
tryLogCurrentException(logger, "Exception during ephemeral nodes clean up");
}
}
ephemeral_nodes.clear();
}
bool isFeatureEnabled(KeeperFeatureFlag feature_flag) const
{
return keeper->isFeatureEnabled(feature_flag);
}
private:
void faultInjectionBefore(std::function<void()> fault_cleanup)
{
try
{
if (unlikely(fault_policy))
fault_policy->beforeOperation();
}
catch (const zkutil::KeeperException &)
{
fault_cleanup();
throw;
}
}
void faultInjectionAfter(std::function<void()> fault_cleanup)
{
try
{
if (unlikely(fault_policy))
fault_policy->afterOperation();
}
catch (const zkutil::KeeperException &)
{
fault_cleanup();
throw;
}
}
void doForEachCreatedEphemeralNode(
const char * method, const Coordination::Requests & requests, const Coordination::Responses & responses, auto && action)
{
if (responses.empty())
return;
if (responses.size() != requests.size())
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Number of responses doesn't match number of requests: method={} requests={} responses={}",
method,
requests.size(),
responses.size());
/// find create request with ephemeral flag
std::vector<std::pair<size_t, const Coordination::CreateRequest *>> create_requests;
for (size_t i = 0; i < requests.size(); ++i)
{
const auto * create_req = dynamic_cast<const Coordination::CreateRequest *>(requests[i].get());
if (create_req && create_req->is_ephemeral)
create_requests.emplace_back(i, create_req);
}
for (auto && [i, req] : create_requests)
{
const auto * create_resp = dynamic_cast<const Coordination::CreateResponse *>(responses.at(i).get());
if (!create_resp)
throw Exception(
ErrorCodes::LOGICAL_ERROR, "Response should be CreateResponse: method={} index={} path={}", method, i, req->path);
action(create_resp->path_created);
}
}
void faultInjectionPostAction(const char * method, const Coordination::Requests & requests, Coordination::Responses & responses)
{
doForEachCreatedEphemeralNode(method, requests, responses, [&](const String & path_created) { keeper->remove(path_created); });
}
template <typename T>
struct FaultCleanupTypeImpl
{
using Type = std::function<void(T &)>;
};
template <>
struct FaultCleanupTypeImpl<void>
{
using Type = std::function<void()>;
};
template <typename T>
using FaultCleanupType = typename FaultCleanupTypeImpl<T>::Type;
template <
bool no_throw_access = false,
bool inject_failure_before_op = true,
int inject_failure_after_op = true,
typename Operation,
typename Result = std::invoke_result_t<Operation>>
Result access(
const char * func_name,
bool tryGetWatch(
const std::string & path,
Operation operation,
FaultCleanupType<Result> fault_after_op_cleanup = {},
FaultCleanupType<void> fault_before_op_cleanup = {})
{
try
{
++calls_total;
std::string & res,
Coordination::Stat * stat,
Coordination::WatchCallback watch_callback,
Coordination::Error * code = nullptr);
if (!keeper)
throw zkutil::KeeperException::fromMessage(Coordination::Error::ZSESSIONEXPIRED,
"Session is considered to be expired due to fault injection");
std::string get(const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr);
if constexpr (inject_failure_before_op)
{
faultInjectionBefore(
[&]
{
if (fault_before_op_cleanup)
fault_before_op_cleanup();
});
}
zkutil::ZooKeeper::MultiGetResponse get(const std::vector<std::string> & paths);
if constexpr (!std::is_same_v<Result, void>)
{
Result res = operation();
zkutil::ZooKeeper::MultiTryGetResponse tryGet(const std::vector<std::string> & paths);
/// if connectivity error occurred w/o fault injection -> just return it
if constexpr (std::is_same_v<Coordination::Error, Result>)
{
if (Coordination::isHardwareError(res))
return res;
}
void set(const String & path, const String & data, int32_t version = -1, Coordination::Stat * stat = nullptr);
if constexpr (inject_failure_after_op)
{
faultInjectionAfter(
[&]
{
if (fault_after_op_cleanup)
fault_after_op_cleanup(res);
});
}
void remove(const String & path, int32_t version = -1);
++calls_without_fault_injection;
bool exists(const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr);
if (unlikely(logger))
LOG_TRACE(logger, "ZooKeeperWithFaultInjection call SUCCEEDED: seed={} func={} path={}", seed, func_name, path);
zkutil::ZooKeeper::MultiExistsResponse exists(const std::vector<std::string> & paths);
return res;
}
else
{
operation();
std::string create(const std::string & path, const std::string & data, int32_t mode);
if constexpr (inject_failure_after_op)
{
faultInjectionAfter(
[&fault_after_op_cleanup]
{
if (fault_after_op_cleanup)
fault_after_op_cleanup();
});
}
Coordination::Error tryCreate(const std::string & path, const std::string & data, int32_t mode, std::string & path_created);
++calls_without_fault_injection;
Coordination::Error tryCreate(const std::string & path, const std::string & data, int32_t mode);
if (unlikely(logger))
LOG_TRACE(logger, "ZooKeeperWithFaultInjection call SUCCEEDED: seed={} func={} path={}", seed, func_name, path);
}
}
catch (const zkutil::KeeperException & e)
{
if (unlikely(logger))
LOG_TRACE(
logger,
"ZooKeeperWithFaultInjection call FAILED: seed={} func={} path={} code={} message={} ",
seed,
func_name,
path,
e.code,
e.message());
Coordination::Responses multi(const Coordination::Requests & requests);
/// save valid pointer to clean up ephemeral nodes later if necessary
if (keeper)
keeper_prev = keeper;
keeper.reset();
void createIfNotExists(const std::string & path, const std::string & data);
/// for try*NoThrow() methods
if constexpr (no_throw_access)
return e.code;
void createOrUpdate(const std::string & path, const std::string & data, int32_t mode);
if constexpr (std::is_same_v<Coordination::Error, Result>)
{
/// try*() methods throws at least on hardware error and return only on user errors
/// todo: the methods return only on subset of user errors, and throw on another errors
/// to mimic the methods exactly - we need to specify errors on which to return for each such method
if (Coordination::isHardwareError(e.code))
throw;
void createAncestors(const std::string & path);
return e.code;
}
Coordination::Error tryRemove(const std::string & path, int32_t version = -1);
throw;
}
}
void removeRecursive(const std::string & path);
void tryRemoveRecursive(const std::string & path);
void removeChildren(const std::string & path);
bool tryRemoveChildrenRecursive(
const std::string & path, bool probably_flat = false, zkutil::RemoveException keep_child = zkutil::RemoveException{});
bool waitForDisappear(const std::string & path, const zkutil::ZooKeeper::WaitCondition & condition = {});
std::string sync(const std::string & path);
Coordination::Error
trySet(const std::string & path, const std::string & data, int32_t version = -1, Coordination::Stat * stat = nullptr);
void checkExistsAndGetCreateAncestorsOps(const std::string & path, Coordination::Requests & requests);
void deleteEphemeralNodeIfContentMatches(const std::string & path, const std::string & fast_delete_if_equal_value);
Coordination::Error tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses);
Coordination::Error tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses);
///
/// mirror ZooKeeper interface: Async functions
/// Note that there is not guarantees that the parameters will live until the internal callback is called
/// so we might need to copy them
///
zkutil::ZooKeeper::FutureExists asyncExists(std::string path, Coordination::WatchCallback watch_callback = {});
zkutil::ZooKeeper::FutureGet asyncTryGet(std::string path);
zkutil::ZooKeeper::FutureMulti asyncTryMultiNoThrow(const Coordination::Requests & ops);
zkutil::ZooKeeper::FutureRemove asyncTryRemove(std::string path, int32_t version = -1);
zkutil::ZooKeeper::FutureRemove asyncTryRemoveNoThrow(const std::string & path, int32_t version = -1);
};
using ZooKeeperWithFaultInjectionPtr = ZooKeeperWithFaultInjection::Ptr;

View File

@ -31,7 +31,8 @@ namespace
const auto error = getpwuid_r(user_id, &passwd_entry, buffer.data(), buffer_size, &result);
if (error)
throwFromErrno("Failed to find user name for " + std::to_string(user_id), ErrorCodes::FAILED_TO_GETPWUID, error);
ErrnoException::throwWithErrno(
ErrorCodes::FAILED_TO_GETPWUID, error, "Failed to find user name for {}", std::to_string(user_id));
else if (result)
return result->pw_name;
return std::to_string(user_id);

View File

@ -87,10 +87,12 @@ static bool renameat2(const std::string & old_path, const std::string & new_path
return false;
if (errno == EEXIST)
throwFromErrno(fmt::format("Cannot rename {} to {} because the second path already exists", old_path, new_path), ErrorCodes::ATOMIC_RENAME_FAIL);
throw ErrnoException(
ErrorCodes::ATOMIC_RENAME_FAIL, "Cannot rename {} to {} because the second path already exists", old_path, new_path);
if (errno == ENOENT)
throwFromErrno(fmt::format("Paths cannot be exchanged because {} or {} does not exist", old_path, new_path), ErrorCodes::ATOMIC_RENAME_FAIL);
throwFromErrnoWithPath(fmt::format("Cannot rename {} to {}", old_path, new_path), new_path, ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(
ErrorCodes::ATOMIC_RENAME_FAIL, "Paths cannot be exchanged because {} or {} does not exist", old_path, new_path);
ErrnoException::throwFromPath(ErrorCodes::SYSTEM_ERROR, new_path, "Cannot rename {} to {}", old_path, new_path);
}
bool supportsAtomicRename()
@ -139,11 +141,12 @@ static bool renameat2(const std::string & old_path, const std::string & new_path
if (errnum == ENOTSUP || errnum == EINVAL)
return false;
if (errnum == EEXIST)
throwFromErrno(fmt::format("Cannot rename {} to {} because the second path already exists", old_path, new_path), ErrorCodes::ATOMIC_RENAME_FAIL);
throw ErrnoException(
ErrorCodes::ATOMIC_RENAME_FAIL, "Cannot rename {} to {} because the second path already exists", old_path, new_path);
if (errnum == ENOENT)
throwFromErrno(fmt::format("Paths cannot be exchanged because {} or {} does not exist", old_path, new_path), ErrorCodes::ATOMIC_RENAME_FAIL);
throwFromErrnoWithPath(
fmt::format("Cannot rename {} to {}: {}", old_path, new_path, strerror(errnum)), new_path, ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(
ErrorCodes::ATOMIC_RENAME_FAIL, "Paths cannot be exchanged because {} or {} does not exist", old_path, new_path);
ErrnoException::throwFromPath(ErrorCodes::SYSTEM_ERROR, new_path, "Cannot rename {} to {}", old_path, new_path);
}

View File

@ -54,7 +54,7 @@ static size_t getStackSize(void ** out_address)
# if defined(OS_FREEBSD) || defined(OS_SUNOS)
pthread_attr_init(&attr);
if (0 != pthread_attr_get_np(pthread_self(), &attr))
throwFromErrno("Cannot pthread_attr_get_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
throw ErrnoException(ErrorCodes::CANNOT_PTHREAD_ATTR, "Cannot pthread_attr_get_np");
# else
if (0 != pthread_getattr_np(pthread_self(), &attr))
{
@ -64,14 +64,14 @@ static size_t getStackSize(void ** out_address)
return 0;
}
else
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
throw ErrnoException(ErrorCodes::CANNOT_PTHREAD_ATTR, "Cannot pthread_getattr_np");
}
# endif
SCOPE_EXIT({ pthread_attr_destroy(&attr); });
if (0 != pthread_attr_getstack(&attr, &address, &size))
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
throw ErrnoException(ErrorCodes::CANNOT_PTHREAD_ATTR, "Cannot pthread_attr_getstack");
#ifdef USE_MUSL
/// Adjust stack size for the main thread under musl.

View File

@ -26,19 +26,21 @@ void createHardLink(const String & source_path, const String & destination_path)
struct stat destination_descr;
if (0 != lstat(source_path.c_str(), &source_descr))
throwFromErrnoWithPath("Cannot stat " + source_path, source_path, ErrorCodes::CANNOT_STAT);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_STAT, source_path, "Cannot stat {}", source_path);
if (0 != lstat(destination_path.c_str(), &destination_descr))
throwFromErrnoWithPath("Cannot stat " + destination_path, destination_path, ErrorCodes::CANNOT_STAT);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_STAT, destination_path, "Cannot stat {}", destination_path);
if (source_descr.st_ino != destination_descr.st_ino)
throwFromErrnoWithPath(
"Destination file " + destination_path + " is already exist and have different inode.",
destination_path, ErrorCodes::CANNOT_LINK, link_errno);
ErrnoException::throwFromPathWithErrno(
ErrorCodes::CANNOT_LINK,
destination_path,
link_errno,
"Destination file {} already exists and has a different inode",
destination_path);
}
else
throwFromErrnoWithPath("Cannot link " + source_path + " to " + destination_path, destination_path,
ErrorCodes::CANNOT_LINK);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_LINK, destination_path, "Cannot link {} to {}", source_path, destination_path);
}
}

View File

@ -248,7 +248,7 @@ int main(int argc, char ** argv)
rusage resource_usage;
if (0 != getrusage(RUSAGE_SELF, &resource_usage))
throwFromErrno("Cannot getrusage", ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot getrusage");
size_t allocated_bytes = resource_usage.ru_maxrss * 1024;
std::cerr << "Current memory usage: " << allocated_bytes << " bytes.\n";

View File

@ -82,9 +82,9 @@ int main(int argc, char ** argv)
{
pthread_t thread;
if (pthread_create(&thread, nullptr, g, nullptr))
DB::throwFromErrno("Cannot create thread.", DB::ErrorCodes::PTHREAD_ERROR);
throw DB::ErrnoException(DB::ErrorCodes::PTHREAD_ERROR, "Cannot create thread");
if (pthread_join(thread, nullptr))
DB::throwFromErrno("Cannot join thread.", DB::ErrorCodes::PTHREAD_ERROR);
throw DB::ErrnoException(DB::ErrorCodes::PTHREAD_ERROR, "Cannot join thread");
});
test(n, "Create and destroy std::thread each iteration", []

View File

@ -49,7 +49,7 @@ struct statvfs getStatVFS(const String & path)
{
if (errno == EINTR)
continue;
throwFromErrnoWithPath("Could not calculate available disk space (statvfs)", path, ErrorCodes::CANNOT_STATVFS);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_STATVFS, path, "Could not calculate available disk space (statvfs)");
}
return fs;
}
@ -79,7 +79,7 @@ String getBlockDeviceId([[maybe_unused]] const String & path)
#if defined(OS_LINUX)
struct stat sb;
if (lstat(path.c_str(), &sb))
throwFromErrnoWithPath("Cannot lstat " + path, path, ErrorCodes::CANNOT_STAT);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_STAT, path, "Cannot lstat {}", path);
WriteBufferFromOwnString ss;
ss << major(sb.st_dev) << ":" << minor(sb.st_dev);
return ss.str();
@ -164,7 +164,7 @@ std::filesystem::path getMountPoint(std::filesystem::path absolute_path)
{
struct stat st;
if (stat(p.c_str(), &st)) /// NOTE: man stat does not list EINTR as possible error
throwFromErrnoWithPath("Cannot stat " + p.string(), p.string(), ErrorCodes::SYSTEM_ERROR);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::SYSTEM_ERROR, p.string(), "Cannot stat {}", p.string());
return st.st_dev;
};
@ -250,10 +250,8 @@ size_t getSizeFromFileDescriptor(int fd, const String & file_name)
int res = fstat(fd, &buf);
if (-1 == res)
{
throwFromErrnoWithPath(
"Cannot execute fstat" + (file_name.empty() ? "" : " file: " + file_name),
file_name,
ErrorCodes::CANNOT_FSTAT);
DB::ErrnoException::throwFromPath(
DB::ErrorCodes::CANNOT_FSTAT, file_name, "Cannot execute fstat{}", file_name.empty() ? "" : " file: " + file_name);
}
return buf.st_size;
}
@ -263,10 +261,7 @@ Int64 getINodeNumberFromPath(const String & path)
struct stat file_stat;
if (stat(path.data(), &file_stat))
{
throwFromErrnoWithPath(
"Cannot execute stat for file " + path,
path,
ErrorCodes::CANNOT_STAT);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_STAT, path, "Cannot execute stat for file {}", path);
}
return file_stat.st_ino;
}
@ -302,7 +297,7 @@ bool createFile(const std::string & path)
close(n);
return true;
}
DB::throwFromErrnoWithPath("Cannot create file: " + path, path, DB::ErrorCodes::CANNOT_CREATE_FILE);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_CREATE_FILE, path, "Cannot create file: {}", path);
}
bool exists(const std::string & path)
@ -317,7 +312,7 @@ bool canRead(const std::string & path)
return true;
if (errno == EACCES)
return false;
DB::throwFromErrnoWithPath("Cannot check read access to file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::PATH_ACCESS_DENIED, path, "Cannot check read access to file: {}", path);
}
bool canWrite(const std::string & path)
@ -327,7 +322,7 @@ bool canWrite(const std::string & path)
return true;
if (errno == EACCES)
return false;
DB::throwFromErrnoWithPath("Cannot check write access to file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::PATH_ACCESS_DENIED, path, "Cannot check write access to file: {}", path);
}
bool canExecute(const std::string & path)
@ -337,7 +332,7 @@ bool canExecute(const std::string & path)
return true;
if (errno == EACCES)
return false;
DB::throwFromErrnoWithPath("Cannot check write access to file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::PATH_ACCESS_DENIED, path, "Cannot check execute access to file: {}", path);
}
time_t getModificationTime(const std::string & path)
@ -369,7 +364,7 @@ void setModificationTime(const std::string & path, time_t time)
tb.actime = time;
tb.modtime = time;
if (utime(path.c_str(), &tb) != 0)
DB::throwFromErrnoWithPath("Cannot set modification time for file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::PATH_ACCESS_DENIED, path, "Cannot set modification time to file: {}", path);
}
bool isSymlink(const fs::path & path)

View File

@ -27,7 +27,7 @@ static __user_cap_data_struct getCapabilities()
/// Avoid dependency on 'libcap'.
if (0 != syscall(SYS_capget, &request, &response))
throwFromErrno("Cannot do 'capget' syscall", ErrorCodes::NETLINK_ERROR);
throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Cannot do 'capget' syscall");
return response;
}

View File

@ -28,9 +28,7 @@ struct NetworkInterfaces : public boost::noncopyable
NetworkInterfaces()
{
if (getifaddrs(&ifaddr) == -1)
{
throwFromErrno("Cannot getifaddrs", ErrorCodes::SYSTEM_ERROR);
}
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot getifaddrs");
}
bool hasAddress(const Poco::Net::IPAddress & address) const

View File

@ -6,7 +6,7 @@
#include <mysql/mysql.h>
#endif
#include <Poco/Logger.h>
#include <Common/logger_useful.h>
#include <mysqlxx/Connection.h>
#include <mysqlxx/Query.h>
@ -52,8 +52,7 @@ void Query::executeImpl()
{
MYSQL* mysql_driver = conn->getDriver();
auto & logger = Poco::Logger::get("mysqlxx::Query");
logger.trace("Running MySQL query using connection %lu", mysql_thread_id(mysql_driver));
LOG_TRACE(&Poco::Logger::get("mysqlxx::Query"), "Running MySQL query using connection {}", mysql_thread_id(mysql_driver));
if (mysql_real_query(mysql_driver, query.data(), query.size()))
{
const auto err_no = mysql_errno(mysql_driver);

View File

@ -24,7 +24,7 @@ DB::UInt64 randomSeed()
{
struct timespec times;
if (clock_gettime(CLOCK_MONOTONIC, &times))
DB::throwFromErrno("Cannot clock_gettime.", DB::ErrorCodes::CANNOT_CLOCK_GETTIME);
throw DB::ErrnoException(DB::ErrorCodes::CANNOT_CLOCK_GETTIME, "Cannot clock_gettime");
/// Not cryptographically secure as time, pid and stack address can be predictable.

View File

@ -120,7 +120,7 @@ __attribute__((__noinline__)) void remapToHugeStep1(void * begin, size_t size)
void * scratch = mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (MAP_FAILED == scratch)
throwFromErrno(fmt::format("Cannot mmap {} bytes", size), ErrorCodes::CANNOT_ALLOCATE_MEMORY);
throw ErrnoException(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Cannot mmap {} bytes", size);
memcpy(scratch, begin, size);

View File

@ -44,7 +44,7 @@ void setThreadName(const char * name)
if (0 != prctl(PR_SET_NAME, name, 0, 0, 0))
#endif
if (errno != ENOSYS && errno != EPERM) /// It's ok if the syscall is unsupported or not allowed in some environments.
DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)", DB::ErrorCodes::PTHREAD_ERROR);
throw DB::ErrnoException(DB::ErrorCodes::PTHREAD_ERROR, "Cannot set thread name with prctl(PR_SET_NAME, ...)");
memcpy(thread_name, name, std::min<size_t>(1 + strlen(name), THREAD_NAME_SIZE - 1));
}
@ -64,7 +64,7 @@ const char * getThreadName()
#else
if (0 != prctl(PR_GET_NAME, thread_name, 0, 0, 0))
if (errno != ENOSYS && errno != EPERM) /// It's ok if the syscall is unsupported or not allowed in some environments.
DB::throwFromErrno("Cannot get thread name with prctl(PR_GET_NAME)", DB::ErrorCodes::PTHREAD_ERROR);
throw DB::ErrnoException(DB::ErrorCodes::PTHREAD_ERROR, "Cannot get thread name with prctl(PR_GET_NAME)");
#endif
return thread_name;

View File

@ -186,7 +186,7 @@ class IColumn;
M(Bool, parallel_replicas_for_non_replicated_merge_tree, false, "If true, ClickHouse will use parallel replicas algorithm also for non-replicated MergeTree tables", 0) \
M(UInt64, parallel_replicas_min_number_of_rows_per_replica, 0, "Limit the number of replicas used in a query to (estimated rows to read / min_number_of_rows_per_replica). The max is still limited by 'max_parallel_replicas'", 0) \
\
M(Bool, skip_unavailable_shards, false, "If true, ClickHouse silently skips unavailable shards and nodes unresolvable through DNS. Shard is marked as unavailable when none of the replicas can be reached.", 0) \
M(Bool, skip_unavailable_shards, false, "If true, ClickHouse silently skips unavailable shards. Shard is marked as unavailable when: 1) The shard cannot be reached due to a connection failure. 2) Shard is unresolvable through DNS. 3) Table does not exist on the shard.", 0) \
\
M(UInt64, parallel_distributed_insert_select, 0, "Process distributed INSERT SELECT query in the same cluster on local tables on every shard; if set to 1 - SELECT is executed on each shard; if set to 2 - SELECT and INSERT are executed on each shard", 0) \
M(UInt64, distributed_group_by_no_merge, 0, "If 1, Do not merge aggregation states from different servers for distributed queries (shards will process query up to the Complete stage, initiator just proxies the data from the shards). If 2 the initiator will apply ORDER BY and LIMIT stages (it is not in case when shard process query up to the Complete stage)", 0) \

View File

@ -92,10 +92,10 @@ PipeFDs signal_pipe;
static void call_default_signal_handler(int sig)
{
if (SIG_ERR == signal(sig, SIG_DFL))
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler");
if (0 != raise(sig))
throwFromErrno("Cannot send signal.", ErrorCodes::CANNOT_SEND_SIGNAL);
throw ErrnoException(ErrorCodes::CANNOT_SEND_SIGNAL, "Cannot send signal");
}
static const size_t signal_pipe_buf_size =
@ -659,7 +659,17 @@ BaseDaemon::~BaseDaemon()
/// Reset signals to SIG_DFL to avoid trying to write to the signal_pipe that will be closed after.
for (int sig : handled_signals)
if (SIG_ERR == signal(sig, SIG_DFL))
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
{
try
{
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler");
}
catch (ErrnoException &)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
signal_pipe.close();
}
@ -1129,7 +1139,7 @@ void BaseDaemon::setupWatchdog()
pid = fork();
if (-1 == pid)
throwFromErrno("Cannot fork", ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot fork");
if (0 == pid)
{
@ -1225,7 +1235,7 @@ void BaseDaemon::setupWatchdog()
if (SIG_ERR == signal(sig, SIG_IGN))
{
char * signal_description = strsignal(sig); // NOLINT(concurrency-mt-unsafe)
throwFromErrno(fmt::format("Cannot ignore {}", signal_description), ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot ignore {}", signal_description);
}
}
}
@ -1315,7 +1325,7 @@ void systemdNotify(const std::string_view & command)
int s = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0);
if (s == -1)
throwFromErrno("Can't create UNIX socket for systemd notify.", ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Can't create UNIX socket for systemd notify");
SCOPE_EXIT({ close(s); });
@ -1351,7 +1361,7 @@ void systemdNotify(const std::string_view & command)
if (errno == EINTR)
continue;
else
throwFromErrno("Failed to notify systemd, sendto returned error.", ErrorCodes::SYSTEM_ERROR);
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Failed to notify systemd, sendto returned error");
}
else
sent_bytes_total += sent_bytes;

View File

@ -675,8 +675,11 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(
if (errno == ENOENT && !throw_on_error)
return nullptr;
throwFromErrnoWithPath("Cannot open file " + metadata_file_path, metadata_file_path,
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);
ErrnoException::throwFromPath(
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE,
metadata_file_path,
"Cannot open file {}",
metadata_file_path);
}
ReadBufferFromFile in(metadata_file_fd, metadata_file_path, METADATA_FILE_BUFFER_SIZE);

View File

@ -74,7 +74,7 @@ void DatabaseReplicatedDDLWorker::initializeReplication()
/// Create "active" node (remove previous one if necessary)
String active_path = fs::path(database->replica_path) / "active";
String active_id = toString(ServerUUID::get());
zookeeper->handleEphemeralNodeExistence(active_path, active_id);
zookeeper->deleteEphemeralNodeIfContentMatches(active_path, active_id);
zookeeper->create(active_path, active_id, zkutil::CreateMode::Ephemeral);
active_node_holder.reset();
active_node_holder_zookeeper = zookeeper;

View File

@ -82,8 +82,6 @@ private:
void buildDependencyGraph();
void removeUnresolvableDependencies();
void loadTablesInTopologicalOrder();
void startLoadingTables(ContextMutablePtr load_context, const std::vector<StorageID> & tables_to_load, size_t level);
};
}

View File

@ -481,7 +481,7 @@ public:
if (file.fd == -1)
{
auto error_code = (errno == ENOENT) ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE;
throwFromErrnoWithPath("Cannot open file " + file_path, file_path, error_code);
ErrnoException::throwFromPath(error_code, file_path, "Cannot open file {}", file_path);
}
allocateSizeForNextPartition();
@ -490,7 +490,8 @@ public:
void allocateSizeForNextPartition()
{
if (preallocateDiskSpace(file.fd, current_blocks_size * block_size, block_size * file_blocks_size) < 0)
throwFromErrnoWithPath("Cannot preallocate space for the file " + file_path, file_path, ErrorCodes::CANNOT_ALLOCATE_MEMORY);
ErrnoException::throwFromPath(
ErrorCodes::CANNOT_ALLOCATE_MEMORY, file_path, "Cannot preallocate space for the file {}", file_path);
current_blocks_size += file_blocks_size;
}
@ -552,11 +553,11 @@ public:
Stopwatch watch;
#if defined(OS_DARWIN)
if (::fsync(file.fd) < 0)
throwFromErrnoWithPath("Cannot fsync " + file_path, file_path, ErrorCodes::CANNOT_FSYNC);
#else
ErrnoException::throwFromPath(ErrorCodes::CANNOT_FSYNC, file_path, "Cannot fsync {}", file_path);
# else
if (::fdatasync(file.fd) < 0)
throwFromErrnoWithPath("Cannot fdatasync " + file_path, file_path, ErrorCodes::CANNOT_FSYNC);
#endif
ErrnoException::throwFromPath(ErrorCodes::CANNOT_FSYNC, file_path, "Cannot fdatasync {}", file_path);
# endif
ProfileEvents::increment(ProfileEvents::FileSyncElapsedMicroseconds, watch.elapsedMicroseconds());
current_block_index += buffer_size_in_blocks;
@ -598,13 +599,13 @@ public:
while (io_submit(aio_context.ctx, 1, &request_ptr) != 1)
{
if (errno != EINTR)
throwFromErrno("io_submit: Failed to submit a request for asynchronous IO", ErrorCodes::CANNOT_IO_SUBMIT);
throw ErrnoException(ErrorCodes::CANNOT_IO_SUBMIT, "io_submit: Failed to submit a request for asynchronous IO");
}
while (io_getevents(aio_context.ctx, 1, 1, &event, nullptr) != 1)
{
if (errno != EINTR)
throwFromErrno("io_getevents: Failed to get an event for asynchronous IO", ErrorCodes::CANNOT_IO_GETEVENTS);
throw ErrnoException(ErrorCodes::CANNOT_IO_GETEVENTS, "io_getevents: Failed to get an event for asynchronous IO");
}
auto read_bytes = eventResult(event);
@ -692,7 +693,7 @@ public:
while (to_pop < to_push && (popped = io_getevents(aio_context.ctx, to_push - to_pop, to_push - to_pop, &events[to_pop], nullptr)) <= 0)
{
if (errno != EINTR)
throwFromErrno("io_getevents: Failed to get an event for asynchronous IO", ErrorCodes::CANNOT_IO_GETEVENTS);
throw ErrnoException(ErrorCodes::CANNOT_IO_GETEVENTS, "io_getevents: Failed to get an event for asynchronous IO");
}
for (size_t i = to_pop; i < to_pop + popped; ++i)
@ -743,7 +744,7 @@ public:
while (new_tasks_count > 0 && (pushed = io_submit(aio_context.ctx, new_tasks_count, &pointers[to_push])) <= 0)
{
if (errno != EINTR)
throwFromErrno("io_submit: Failed to submit a request for asynchronous IO", ErrorCodes::CANNOT_IO_SUBMIT);
throw ErrnoException(ErrorCodes::CANNOT_IO_SUBMIT, "io_submit: Failed to submit a request for asynchronous IO");
}
to_push += pushed;

View File

@ -359,21 +359,21 @@ void DiskLocal::removeFile(const String & path)
{
auto fs_path = fs::path(disk_path) / path;
if (0 != unlink(fs_path.c_str()))
throwFromErrnoWithPath("Cannot unlink file " + fs_path.string(), fs_path, ErrorCodes::CANNOT_UNLINK);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_UNLINK, fs_path, "Cannot unlink file {}", fs_path);
}
void DiskLocal::removeFileIfExists(const String & path)
{
auto fs_path = fs::path(disk_path) / path;
if (0 != unlink(fs_path.c_str()) && errno != ENOENT)
throwFromErrnoWithPath("Cannot unlink file " + fs_path.string(), fs_path, ErrorCodes::CANNOT_UNLINK);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_UNLINK, fs_path, "Cannot unlink file {}", fs_path);
}
void DiskLocal::removeDirectory(const String & path)
{
auto fs_path = fs::path(disk_path) / path;
if (0 != rmdir(fs_path.c_str()))
throwFromErrnoWithPath("Cannot rmdir " + fs_path.string(), fs_path, ErrorCodes::CANNOT_RMDIR);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_RMDIR, fs_path, "Cannot rmdir {}", fs_path);
}
void DiskLocal::removeRecursive(const String & path)
@ -412,7 +412,7 @@ void DiskLocal::truncateFile(const String & path, size_t size)
{
int res = truncate((fs::path(disk_path) / path).string().data(), size);
if (-1 == res)
throwFromErrnoWithPath("Cannot truncate file " + path, path, ErrorCodes::CANNOT_TRUNCATE_FILE);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_TRUNCATE_FILE, path, "Cannot truncate {}", path);
}
void DiskLocal::createFile(const String & path)
@ -709,7 +709,7 @@ struct stat DiskLocal::stat(const String & path) const
auto full_path = fs::path(disk_path) / path;
if (::stat(full_path.string().c_str(), &st) == 0)
return st;
DB::throwFromErrnoWithPath("Cannot stat file: " + path, path, DB::ErrorCodes::CANNOT_STAT);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_STAT, path, "Cannot stat file: {}", path);
}
void DiskLocal::chmod(const String & path, mode_t mode)
@ -717,7 +717,7 @@ void DiskLocal::chmod(const String & path, mode_t mode)
auto full_path = fs::path(disk_path) / path;
if (::chmod(full_path.string().c_str(), mode) == 0)
return;
DB::throwFromErrnoWithPath("Cannot chmod file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED);
DB::ErrnoException::throwFromPath(DB::ErrorCodes::PATH_ACCESS_DENIED, path, "Cannot chmod file: {}", path);
}
void registerDiskLocal(DiskFactory & factory, bool global_skip_access_check)

View File

@ -177,22 +177,13 @@ CachedOnDiskReadBufferFromFile::getCacheReadBuffer(const FileSegment & file_segm
}
ReadSettings local_read_settings{settings};
local_read_settings.local_fs_prefetch = false;
if (local_read_settings.local_fs_method != LocalFSReadMethod::pread_threadpool)
local_read_settings.local_fs_method = LocalFSReadMethod::pread;
local_read_settings.local_fs_method = LocalFSReadMethod::pread;
if (use_external_buffer)
local_read_settings.local_fs_buffer_size = 0;
cache_file_reader = createReadBufferFromFileBase(
path,
local_read_settings,
std::nullopt,
std::nullopt,
file_segment.getFlagsForLocalRead(),
/*existing_memory=*/nullptr,
/*alignment=*/0,
/*use_external_buffer=*/true);
cache_file_reader
= createReadBufferFromFileBase(path, local_read_settings, std::nullopt, std::nullopt, file_segment.getFlagsForLocalRead());
if (getFileSizeFromReadBuffer(*cache_file_reader) == 0)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read from an empty cache file: {}", path);

View File

@ -77,7 +77,7 @@ IOUringReader::IOUringReader(uint32_t entries_)
int ret = io_uring_queue_init_params(entries_, &ring, &params);
if (ret < 0)
throwFromErrno("Failed initializing io_uring", ErrorCodes::IO_URING_INIT_FAILED, -ret);
ErrnoException::throwWithErrno(ErrorCodes::IO_URING_INIT_FAILED, -ret, "Failed initializing io_uring");
cq_entries = params.cq_entries;
ring_completion_monitor = std::make_unique<ThreadFromGlobalPool>([this] { monitorRing(); });

View File

@ -175,9 +175,8 @@ std::future<IAsynchronousReader::Result> ThreadPoolReader::submit(Request reques
else
{
ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadFailed);
promise.set_exception(std::make_exception_ptr(ErrnoException(
fmt::format("Cannot read from file {}, {}", fd, errnoToString()),
ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, errno)));
promise.set_exception(std::make_exception_ptr(
ErrnoException(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, "Cannot read from file {}", fd)));
return future;
}
}
@ -233,7 +232,7 @@ std::future<IAsynchronousReader::Result> ThreadPoolReader::submit(Request reques
if (-1 == res && errno != EINTR)
{
ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadFailed);
throwFromErrno(fmt::format("Cannot read from file {}", fd), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR);
throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, "Cannot read from file {}", fd);
}
bytes_read += res;

View File

@ -26,6 +26,7 @@ namespace ProfileEvents
extern const Event ThreadpoolReaderSubmitReadSynchronously;
extern const Event ThreadpoolReaderSubmitReadSynchronouslyBytes;
extern const Event ThreadpoolReaderSubmitReadSynchronouslyMicroseconds;
extern const Event ThreadpoolReaderSubmitLookupInCacheMicroseconds;
extern const Event AsynchronousReaderIgnoredBytes;
}
@ -83,7 +84,13 @@ std::future<IAsynchronousReader::Result> ThreadPoolRemoteFSReader::submit(Reques
reader.seek(request.offset, SEEK_SET);
}
if (reader.isContentCached(request.offset, request.size))
bool is_content_cached = false;
{
ProfileEventTimeIncrement<Microseconds> elapsed(ProfileEvents::ThreadpoolReaderSubmitLookupInCacheMicroseconds);
is_content_cached = reader.isContentCached(request.offset, request.size);
}
if (is_content_cached)
{
std::promise<Result> promise;
std::future<Result> future = promise.get_future();

View File

@ -29,8 +29,7 @@ public:
off_t res = lseek(fd, 0, SEEK_SET);
if (-1 == res)
throwFromErrnoWithPath("Cannot reread temporary file " + file_name, file_name,
ErrorCodes::CANNOT_SEEK_THROUGH_FILE);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, file_name, "Cannot reread temporary file {}", file_name);
return std::make_unique<ReadBufferFromTemporaryWriteBuffer>(fd, file_name, std::move(origin->tmp_file));
}

View File

@ -31,8 +31,8 @@ LocalDirectorySyncGuard::LocalDirectorySyncGuard(const String & full_path)
: fd(::open(full_path.c_str(), O_DIRECTORY))
{
if (-1 == fd)
throwFromErrnoWithPath("Cannot open file " + full_path, full_path,
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);
ErrnoException::throwFromPath(
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, full_path, "Cannot open file {}", full_path);
}
LocalDirectorySyncGuard::~LocalDirectorySyncGuard()
@ -45,7 +45,7 @@ LocalDirectorySyncGuard::~LocalDirectorySyncGuard()
#if defined(OS_DARWIN)
if (fcntl(fd, F_FULLFSYNC, 0))
throwFromErrno("Cannot fcntl(F_FULLFSYNC)", ErrorCodes::CANNOT_FSYNC);
throw ErrnoException(ErrorCodes::CANNOT_FSYNC, "Cannot fcntl(F_FULLFSYNC)");
#else
if (-1 == ::fdatasync(fd))
throw Exception(ErrorCodes::CANNOT_FSYNC, "Cannot fdatasync");

View File

@ -141,7 +141,7 @@ void LocalObjectStorage::removeObject(const StoredObject & object)
return;
if (0 != unlink(object.remote_path.data()))
throwFromErrnoWithPath("Cannot unlink file " + object.remote_path, object.remote_path, ErrorCodes::CANNOT_UNLINK);
ErrnoException::throwFromPath(ErrorCodes::CANNOT_UNLINK, object.remote_path, "Cannot unlink file {}", object.remote_path);
}
void LocalObjectStorage::removeObjects(const StoredObjects & objects)

View File

@ -1,47 +0,0 @@
#pragma once
#include <base/types.h>
namespace DB
{
class IAST;
struct Settings;
enum class UserDefinedSQLObjectType;
/// Interface for a loader of user-defined SQL objects.
/// Implementations: UserDefinedSQLLoaderFromDisk, UserDefinedSQLLoaderFromZooKeeper
class IUserDefinedSQLObjectsLoader
{
public:
virtual ~IUserDefinedSQLObjectsLoader() = default;
/// Whether this loader can replicate SQL objects to another node.
virtual bool isReplicated() const { return false; }
virtual String getReplicationID() const { return ""; }
/// Loads all objects. Can be called once - if objects are already loaded the function does nothing.
virtual void loadObjects() = 0;
/// Stops watching.
virtual void stopWatching() {}
/// Immediately reloads all objects, throws an exception if failed.
virtual void reloadObjects() = 0;
/// Immediately reloads a specified object only.
virtual void reloadObject(UserDefinedSQLObjectType object_type, const String & object_name) = 0;
/// Stores an object (must be called only by UserDefinedSQLFunctionFactory::registerFunction).
virtual bool storeObject(
UserDefinedSQLObjectType object_type,
const String & object_name,
const IAST & create_object_query,
bool throw_if_exists,
bool replace_if_exists,
const Settings & settings) = 0;
/// Removes an object (must be called only by UserDefinedSQLFunctionFactory::unregisterFunction).
virtual bool removeObject(UserDefinedSQLObjectType object_type, const String & object_name, bool throw_if_not_exists) = 0;
};
}

View File

@ -0,0 +1,74 @@
#pragma once
#include <base/types.h>
#include <Interpreters/Context_fwd.h>
#include <Parsers/IAST_fwd.h>
namespace DB
{
class IAST;
struct Settings;
enum class UserDefinedSQLObjectType;
/// Interface for a storage of user-defined SQL objects.
/// Implementations: UserDefinedSQLObjectsDiskStorage, UserDefinedSQLObjectsZooKeeperStorage
class IUserDefinedSQLObjectsStorage
{
public:
virtual ~IUserDefinedSQLObjectsStorage() = default;
/// Whether this loader can replicate SQL objects to another node.
virtual bool isReplicated() const { return false; }
virtual String getReplicationID() const { return ""; }
/// Loads all objects. Can be called once - if objects are already loaded the function does nothing.
virtual void loadObjects() = 0;
/// Get object by name. If no object stored with object_name throws exception.
virtual ASTPtr get(const String & object_name) const = 0;
/// Get object by name. If no object stored with object_name return nullptr.
virtual ASTPtr tryGet(const String & object_name) const = 0;
/// Check if object with object_name is stored.
virtual bool has(const String & object_name) const = 0;
/// Get all user defined object names.
virtual std::vector<String> getAllObjectNames() const = 0;
/// Get all user defined objects.
virtual std::vector<std::pair<String, ASTPtr>> getAllObjects() const = 0;
/// Check whether any UDFs have been stored.
virtual bool empty() const = 0;
/// Stops watching.
virtual void stopWatching() {}
/// Immediately reloads all objects, throws an exception if failed.
virtual void reloadObjects() = 0;
/// Immediately reloads a specified object only.
virtual void reloadObject(UserDefinedSQLObjectType object_type, const String & object_name) = 0;
/// Stores an object (must be called only by UserDefinedSQLFunctionFactory::registerFunction).
virtual bool storeObject(
const ContextPtr & current_context,
UserDefinedSQLObjectType object_type,
const String & object_name,
ASTPtr create_object_query,
bool throw_if_exists,
bool replace_if_exists,
const Settings & settings) = 0;
/// Removes an object (must be called only by UserDefinedSQLFunctionFactory::unregisterFunction).
virtual bool removeObject(
const ContextPtr & current_context,
UserDefinedSQLObjectType object_type,
const String & object_name,
bool throw_if_not_exists) = 0;
};
}

Some files were not shown because too many files have changed in this diff Show More