mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into remove-the-limit-for-connections-per-endpoint
This commit is contained in:
commit
6360b76792
7
.github/actions/common_setup/action.yml
vendored
7
.github/actions/common_setup/action.yml
vendored
@ -18,9 +18,6 @@ runs:
|
||||
echo "Setup the common ENV variables"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/${{inputs.job_type}}
|
||||
REPO_COPY=${{runner.temp}}/${{inputs.job_type}}/git-repo-copy
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
EOF
|
||||
if [ -z "${{env.GITHUB_JOB_OVERRIDDEN}}" ] && [ "true" == "${{inputs.nested_job}}" ]; then
|
||||
echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs"
|
||||
@ -30,6 +27,4 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
# to remove every leftovers
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$REPO_COPY"
|
||||
cp -a "$GITHUB_WORKSPACE"/. "$REPO_COPY"/
|
||||
sudo rm -fr "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
|
||||
|
242
.github/workflows/backport_branches.yml
vendored
242
.github/workflows/backport_branches.yml
vendored
@ -10,27 +10,21 @@ on: # yamllint disable-line rule:truthy
|
||||
branches:
|
||||
- 'backport/**'
|
||||
jobs:
|
||||
CheckLabels:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
# Run the first check always, even if the CI is cancelled
|
||||
if: ${{ always() }}
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 run_check.py
|
||||
PythonUnitTests:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
needs: CheckLabels
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -40,273 +34,235 @@ jobs:
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
needs: CheckLabels
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
needs: CheckLabels
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::CI run configure results"
|
||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
{
|
||||
echo 'CI_DATA<<EOF'
|
||||
cat ${{ runner.temp }}/ci_run_data.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
- name: Re-create GH statuses for skipped jobs if any
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
|
||||
filter: tree:0
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
CompatibilityCheckX86:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Compatibility check X86
|
||||
test_name: Compatibility check (amd64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
|
||||
CompatibilityCheckAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Compatibility check X86
|
||||
test_name: Compatibility check (aarch64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
|
||||
#########################################################################################
|
||||
#################################### ORDINARY BUILDS ####################################
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_release
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_aarch64
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_asan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_tsan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_debug
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderBinDarwin:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: binary_darwin
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
checkout_depth: 0
|
||||
BuilderBinDarwinAarch64:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: binary_darwin_aarch64
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
checkout_depth: 0
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
filter: tree:0
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Docker server and keeper images
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
checkout_depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- RunConfig
|
||||
- BuilderDebAarch64
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebDebug
|
||||
- BuilderDebRelease
|
||||
- BuilderDebTsan
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: ClickHouse build check
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
additional_envs: |
|
||||
NEEDS_DATA<<NDENV
|
||||
${{ toJSON(needs) }}
|
||||
NDENV
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
BuilderSpecialReport:
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs:
|
||||
- RunConfig
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: ClickHouse special build check
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
additional_envs: |
|
||||
NEEDS_DATA<<NDENV
|
||||
${{ toJSON(needs) }}
|
||||
NDENV
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
############################################################################################
|
||||
#################################### INSTALL PACKAGES ######################################
|
||||
############################################################################################
|
||||
InstallPackagesTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Install packages (amd64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 install_check.py "$CHECK_NAME"
|
||||
InstallPackagesTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Install packages (arm64)
|
||||
runner_type: style-checker-aarch64
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 install_check.py "$CHECK_NAME"
|
||||
##############################################################################################
|
||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatelessTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (asan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatefulTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
needs: [RunConfig, BuilderDebDebug]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (debug)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
##############################################################################################
|
||||
######################################### STRESS TESTS #######################################
|
||||
##############################################################################################
|
||||
StressTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (tsan)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (release)
|
||||
runner_type: stress-tester
|
||||
batches: 4
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs:
|
||||
- DockerHubPush
|
||||
- DockerServerImages
|
||||
- BuilderReport
|
||||
- BuilderSpecialReport
|
||||
- FunctionalStatelessTestAsan
|
||||
|
138
.github/workflows/docs_check.yml
vendored
138
.github/workflows/docs_check.yml
vendored
@ -1,138 +0,0 @@
|
||||
name: DocsCheck
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
pull_request:
|
||||
types:
|
||||
- synchronize
|
||||
- reopened
|
||||
- opened
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- '**.md'
|
||||
- 'docker/docs/**'
|
||||
- 'docs/**'
|
||||
- 'utils/check-style/aspell-ignore/**'
|
||||
- 'tests/ci/docs_check.py'
|
||||
- '.github/workflows/docs_check.yml'
|
||||
jobs:
|
||||
CheckLabels:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 run_check.py
|
||||
DockerHubPushAarch64:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
|
||||
filter: tree:0
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
StyleCheck:
|
||||
needs: DockerHubPush
|
||||
# We need additional `&& ! cancelled()` to have the job being able to cancel
|
||||
if: ${{ success() || failure() || ( always() && ! cancelled() ) }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Style check
|
||||
runner_type: style-checker
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 style_check.py
|
||||
secrets:
|
||||
secret_envs: |
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
DocsCheck:
|
||||
needs: DockerHubPush
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Docs check
|
||||
runner_type: func-tester-aarch64
|
||||
additional_envs: |
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 docs_check.py
|
||||
FinishCheck:
|
||||
needs:
|
||||
- StyleCheck
|
||||
- DockerHubPush
|
||||
- DocsCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 finish_check.py
|
||||
python3 merge_pr.py --check-approved
|
6
.github/workflows/jepsen.yml
vendored
6
.github/workflows/jepsen.yml
vendored
@ -11,16 +11,14 @@ on: # yamllint disable-line rule:truthy
|
||||
workflow_call:
|
||||
jobs:
|
||||
KeeperJepsenRelease:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
uses: ./.github/workflows/reusable_simple_job.yml
|
||||
with:
|
||||
test_name: Jepsen keeper check
|
||||
runner_type: style-checker
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 jepsen_check.py keeper
|
||||
# ServerJepsenRelease:
|
||||
# runs-on: [self-hosted, style-checker]
|
||||
# uses: ./.github/workflows/reusable_test.yml
|
||||
# uses: ./.github/workflows/reusable_simple_job.yml
|
||||
# with:
|
||||
# test_name: Jepsen server check
|
||||
# runner_type: style-checker
|
||||
|
9
.github/workflows/libfuzzer.yml
vendored
9
.github/workflows/libfuzzer.yml
vendored
@ -8,19 +8,26 @@ on: # yamllint disable-line rule:truthy
|
||||
# schedule:
|
||||
# - cron: '0 0 2 31 1' # never for now
|
||||
workflow_call:
|
||||
inputs:
|
||||
data:
|
||||
description: json ci data
|
||||
type: string
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
BuilderFuzzers:
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: fuzzers
|
||||
data: ${{ inputs.data }}
|
||||
libFuzzerTest:
|
||||
needs: [BuilderFuzzers]
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: libFuzzer tests
|
||||
runner_type: func-tester
|
||||
data: ${{ inputs.data }}
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 libfuzzer_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
|
637
.github/workflows/master.yml
vendored
637
.github/workflows/master.yml
vendored
File diff suppressed because it is too large
Load Diff
79
.github/workflows/nightly.yml
vendored
79
.github/workflows/nightly.yml
vendored
@ -13,67 +13,38 @@ jobs:
|
||||
Debug:
|
||||
# The task for having a preserved ENV and event.json for later investigation
|
||||
uses: ./.github/workflows/debug.yml
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64 --all
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64 --all
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --skip-jobs --rebuild-all-docker --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::CI run configure results"
|
||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'CI_DATA<<EOF'
|
||||
cat ${{ runner.temp }}/ci_run_data.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
with:
|
||||
data: "${{ needs.RunConfig.outputs.data }}"
|
||||
set_latest: true
|
||||
SonarCloud:
|
||||
runs-on: [self-hosted, builder]
|
||||
env:
|
||||
|
814
.github/workflows/pull_request.yml
vendored
814
.github/workflows/pull_request.yml
vendored
File diff suppressed because it is too large
Load Diff
444
.github/workflows/release_branches.yml
vendored
444
.github/workflows/release_branches.yml
vendored
@ -13,171 +13,190 @@ on: # yamllint disable-line rule:truthy
|
||||
- '2[1-9].[1-9]'
|
||||
|
||||
jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
python3 run_check.py
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p 'test_*.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --rebuild-all-binaries --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
echo "::group::CI run configure results"
|
||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'CI_DATA<<EOF'
|
||||
cat ${{ runner.temp }}/ci_run_data.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
- name: Re-create GH statuses for skipped jobs if any
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
CompatibilityCheckX86:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Compatibility check X86
|
||||
test_name: Compatibility check (amd64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
|
||||
CompatibilityCheckAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Compatibility check X86
|
||||
test_name: Compatibility check (aarch64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
|
||||
#########################################################################################
|
||||
#################################### ORDINARY BUILDS ####################################
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_release
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_aarch64
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_asan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebUBsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_ubsan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_tsan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebMsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_msan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_debug
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderBinDarwin:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: binary_darwin
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderBinDarwinAarch64:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: binary_darwin_aarch64
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
filter: tree:0
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Docker server and keeper images
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
checkout_depth: 0
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
if: ${{ success() || failure() }}
|
||||
needs:
|
||||
- RunConfig
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
- BuilderDebMsan
|
||||
- BuilderDebDebug
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: ClickHouse build check
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
additional_envs: |
|
||||
NEEDS_DATA<<NDENV
|
||||
${{ toJSON(needs) }}
|
||||
NDENV
|
||||
run_command: |
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
BuilderSpecialReport:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs:
|
||||
- RunConfig
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderDebAsan
|
||||
@ -186,33 +205,18 @@ jobs:
|
||||
- BuilderDebMsan
|
||||
- BuilderDebDebug
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: ClickHouse build check
|
||||
runner_type: style-checker
|
||||
additional_envs: |
|
||||
NEEDS_DATA<<NDENV
|
||||
${{ toJSON(needs) }}
|
||||
NDENV
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
BuilderSpecialReport:
|
||||
if: ${{ success() || failure() }}
|
||||
needs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: ClickHouse special build check
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
additional_envs: |
|
||||
NEEDS_DATA<<NDENV
|
||||
${{ toJSON(needs) }}
|
||||
NDENV
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
MarkReleaseReady:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
@ -232,282 +236,224 @@ jobs:
|
||||
#################################### INSTALL PACKAGES ######################################
|
||||
############################################################################################
|
||||
InstallPackagesTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Install packages (amd64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 install_check.py "$CHECK_NAME"
|
||||
InstallPackagesTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Install packages (arm64)
|
||||
runner_type: style-checker-aarch64
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 install_check.py "$CHECK_NAME"
|
||||
##############################################################################################
|
||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatelessTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (release)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (aarch64)
|
||||
runner_type: func-tester-aarch64
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (asan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
batches: 4
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (tsan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
batches: 5
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
FunctionalStatelessTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (ubsan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
batches: 2
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
needs: [RunConfig, BuilderDebMsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (msan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
batches: 6
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestUBsan:
|
||||
needs: [RunConfig, BuilderDebUBsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (ubsan)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
needs: [RunConfig, BuilderDebDebug]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (debug)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
batches: 5
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatefulTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (release)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (aarch64)
|
||||
runner_type: func-tester-aarch64
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (asan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (tsan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
needs: [RunConfig, BuilderDebMsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (msan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
needs: [RunConfig, BuilderDebUBsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (ubsan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
needs: [RunConfig, BuilderDebDebug]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (debug)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
##############################################################################################
|
||||
######################################### STRESS TESTS #######################################
|
||||
##############################################################################################
|
||||
StressTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (asan)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (tsan)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
needs: [RunConfig, BuilderDebMsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (msan)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
needs: [RunConfig, BuilderDebUBsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (ubsan)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
needs: [RunConfig, BuilderDebDebug]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (debug)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan)
|
||||
runner_type: stress-tester
|
||||
batches: 4
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsAnalyzerAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan, analyzer)
|
||||
runner_type: stress-tester
|
||||
batches: 6
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (tsan)
|
||||
runner_type: stress-tester
|
||||
batches: 6
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (release)
|
||||
runner_type: stress-tester
|
||||
batches: 4
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs:
|
||||
- DockerHubPush
|
||||
- DockerServerImages
|
||||
- BuilderReport
|
||||
- BuilderSpecialReport
|
||||
|
31
.github/workflows/reusable_build.yml
vendored
31
.github/workflows/reusable_build.yml
vendored
@ -22,6 +22,10 @@ name: Build ClickHouse
|
||||
description: the label of runner to use
|
||||
default: builder
|
||||
type: string
|
||||
data:
|
||||
description: json ci data
|
||||
type: string
|
||||
required: true
|
||||
additional_envs:
|
||||
description: additional ENV variables to setup the job
|
||||
type: string
|
||||
@ -29,6 +33,7 @@ name: Build ClickHouse
|
||||
jobs:
|
||||
Build:
|
||||
name: Build-${{inputs.build_name}}
|
||||
if: contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name)
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
|
||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||
@ -37,6 +42,7 @@ jobs:
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
submodules: true
|
||||
fetch-depth: ${{inputs.checkout_depth}}
|
||||
filter: tree:0
|
||||
@ -44,6 +50,9 @@ jobs:
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
${{inputs.additional_envs}}
|
||||
DOCKER_TAG<<DOCKER_JSON
|
||||
${{ toJson(fromJson(inputs.data).docker_data.images) }}
|
||||
DOCKER_JSON
|
||||
EOF
|
||||
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
@ -60,20 +69,18 @@ jobs:
|
||||
uses: ./.github/actions/common_setup
|
||||
with:
|
||||
job_type: build_check
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Pre
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.build_name}}'
|
||||
- name: Build
|
||||
run: |
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/build_check.py" "$BUILD_NAME"
|
||||
- name: Post
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}'
|
||||
- name: Mark as done
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.build_name}}'
|
||||
- name: Clean
|
||||
if: always()
|
||||
uses: ./.github/actions/clean
|
||||
|
68
.github/workflows/reusable_docker.yml
vendored
Normal file
68
.github/workflows/reusable_docker.yml
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
name: Build docker images
|
||||
'on':
|
||||
workflow_call:
|
||||
inputs:
|
||||
data:
|
||||
description: json with ci data from todo job
|
||||
required: true
|
||||
type: string
|
||||
set_latest:
|
||||
description: set latest tag for resulting multiarch manifest
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
jobs:
|
||||
DockerBuildAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
if: |
|
||||
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_aarch64) != '[]'
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
- name: Build images
|
||||
run: |
|
||||
python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \
|
||||
--suffix aarch64 \
|
||||
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
||||
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_aarch64) }}'
|
||||
DockerBuildAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: |
|
||||
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_amd64) != '[]'
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
- name: Build images
|
||||
run: |
|
||||
python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \
|
||||
--suffix amd64 \
|
||||
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
||||
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_amd64) }}'
|
||||
DockerMultiArchManifest:
|
||||
needs: [DockerBuildAmd64, DockerBuildAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: |
|
||||
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_multi) != '[]'
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
- name: Build images
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
if [ "${{ inputs.set_latest }}" == "true" ]; then
|
||||
echo "latest tag will be set for resulting manifests"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
|
||||
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
||||
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' \
|
||||
--set-latest
|
||||
else
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
|
||||
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
||||
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}'
|
||||
fi
|
90
.github/workflows/reusable_simple_job.yml
vendored
Normal file
90
.github/workflows/reusable_simple_job.yml
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
### For the pure soul wishes to move it to another place
|
||||
# https://github.com/orgs/community/discussions/9050
|
||||
|
||||
name: Simple job
|
||||
'on':
|
||||
workflow_call:
|
||||
inputs:
|
||||
test_name:
|
||||
description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV
|
||||
required: true
|
||||
type: string
|
||||
runner_type:
|
||||
description: the label of runner to use
|
||||
required: true
|
||||
type: string
|
||||
run_command:
|
||||
description: the command to launch the check
|
||||
default: ""
|
||||
required: false
|
||||
type: string
|
||||
checkout_depth:
|
||||
description: the value of the git shallow checkout
|
||||
required: false
|
||||
type: number
|
||||
default: 1
|
||||
submodules:
|
||||
description: if the submodules should be checked out
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
additional_envs:
|
||||
description: additional ENV variables to setup the job
|
||||
type: string
|
||||
working-directory:
|
||||
description: sets custom working directory
|
||||
type: string
|
||||
default: ""
|
||||
git_ref:
|
||||
description: commit to use, merge commit for pr or head
|
||||
required: false
|
||||
type: string
|
||||
default: ${{ github.event.after }} # no merge commit
|
||||
secrets:
|
||||
secret_envs:
|
||||
description: if given, it's passed to the environments
|
||||
required: false
|
||||
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
CHECK_NAME: ${{inputs.test_name}}
|
||||
|
||||
jobs:
|
||||
Test:
|
||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||
name: ${{inputs.test_name}}
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
ref: ${{ inputs.git_ref }}
|
||||
submodules: ${{inputs.submodules}}
|
||||
fetch-depth: ${{inputs.checkout_depth}}
|
||||
filter: tree:0
|
||||
- name: Set build envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
CHECK_NAME=${{ inputs.test_name }}
|
||||
${{inputs.additional_envs}}
|
||||
${{secrets.secret_envs}}
|
||||
EOF
|
||||
- name: Common setup
|
||||
uses: ./.github/actions/common_setup
|
||||
with:
|
||||
job_type: test
|
||||
- name: Run
|
||||
run: |
|
||||
if [ -n '${{ inputs.working-directory }}' ]; then
|
||||
cd "${{ inputs.working-directory }}"
|
||||
else
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
fi
|
||||
${{ inputs.run_command }}
|
||||
- name: Clean
|
||||
if: always()
|
||||
uses: ./.github/actions/clean
|
88
.github/workflows/reusable_test.yml
vendored
88
.github/workflows/reusable_test.yml
vendored
@ -14,13 +14,10 @@ name: Testing workflow
|
||||
required: true
|
||||
type: string
|
||||
run_command:
|
||||
description: the command to launch the check. Usually starts with `cd '$REPO_COPY/tests/ci'`
|
||||
required: true
|
||||
description: the command to launch the check
|
||||
default: ""
|
||||
required: false
|
||||
type: string
|
||||
batches:
|
||||
description: how many batches for the test will be launched
|
||||
default: 1
|
||||
type: number
|
||||
checkout_depth:
|
||||
description: the value of the git shallow checkout
|
||||
required: false
|
||||
@ -34,80 +31,89 @@ name: Testing workflow
|
||||
additional_envs:
|
||||
description: additional ENV variables to setup the job
|
||||
type: string
|
||||
data:
|
||||
description: ci data
|
||||
type: string
|
||||
required: true
|
||||
working-directory:
|
||||
description: sets custom working directory
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
secret_envs:
|
||||
description: if given, it's passed to the environments
|
||||
required: false
|
||||
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
CHECK_NAME: ${{inputs.test_name}}
|
||||
|
||||
jobs:
|
||||
PrepareStrategy:
|
||||
# batches < 1 is misconfiguration,
|
||||
# and we need this step only for batches > 1
|
||||
if: ${{ inputs.batches > 1 }}
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
batches: ${{steps.batches.outputs.batches}}
|
||||
steps:
|
||||
- name: Calculate batches
|
||||
id: batches
|
||||
run: |
|
||||
batches_output=$(python3 -c 'import json; print(json.dumps(list(range(${{inputs.batches}}))))')
|
||||
echo "batches=${batches_output}" >> "$GITHUB_OUTPUT"
|
||||
Test:
|
||||
# If PrepareStrategy is skipped for batches == 1,
|
||||
# we still need to launch the test.
|
||||
# `! failure()` is mandatory here to launch on skipped Job
|
||||
# `&& !cancelled()` to allow the be cancelable
|
||||
if: ${{ ( !failure() && !cancelled() ) && inputs.batches > 0 }}
|
||||
# Do not add `-0` to the end, if there's only one batch
|
||||
name: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
|
||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||
needs: [PrepareStrategy]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.test_name) }}
|
||||
name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
|
||||
strategy:
|
||||
fail-fast: false # we always wait for entire matrix
|
||||
matrix:
|
||||
# if PrepareStrategy does not have batches, we use 0
|
||||
batch: ${{ needs.PrepareStrategy.outputs.batches
|
||||
&& fromJson(needs.PrepareStrategy.outputs.batches)
|
||||
|| fromJson('[0]')}}
|
||||
batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
submodules: ${{inputs.submodules}}
|
||||
fetch-depth: ${{inputs.checkout_depth}}
|
||||
filter: tree:0
|
||||
- name: Set build envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
CHECK_NAME=${{ inputs.test_name }}
|
||||
${{inputs.additional_envs}}
|
||||
${{secrets.secret_envs}}
|
||||
DOCKER_TAG<<DOCKER_JSON
|
||||
${{ toJson(fromJson(inputs.data).docker_data.images) }}
|
||||
DOCKER_JSON
|
||||
EOF
|
||||
- name: Common setup
|
||||
uses: ./.github/actions/common_setup
|
||||
with:
|
||||
job_type: test
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Setup batch
|
||||
if: ${{ inputs.batches > 1}}
|
||||
if: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 }}
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
RUN_BY_HASH_NUM=${{matrix.batch}}
|
||||
RUN_BY_HASH_TOTAL=${{inputs.batches}}
|
||||
RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }}
|
||||
EOF
|
||||
- name: Run test
|
||||
run: ${{inputs.run_command}}
|
||||
- name: Pre run
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}'
|
||||
- name: Run
|
||||
run: |
|
||||
if [ -n "${{ inputs.working-directory }}" ]; then
|
||||
cd "${{ inputs.working-directory }}"
|
||||
else
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
fi
|
||||
if [ -n "$(echo '${{ inputs.run_command }}' | tr -d '\n')" ]; then
|
||||
echo "Running command from workflow input"
|
||||
${{ inputs.run_command }}
|
||||
else
|
||||
echo "Running command from job config"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --run --job-name '${{inputs.test_name}}'
|
||||
fi
|
||||
- name: Post run
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
|
||||
- name: Mark as done
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
|
||||
- name: Clean
|
||||
if: always()
|
||||
uses: ./.github/actions/clean
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -354,6 +354,9 @@
|
||||
[submodule "contrib/aklomp-base64"]
|
||||
path = contrib/aklomp-base64
|
||||
url = https://github.com/aklomp/base64.git
|
||||
[submodule "contrib/pocketfft"]
|
||||
path = contrib/pocketfft
|
||||
url = https://github.com/mreineck/pocketfft.git
|
||||
[submodule "contrib/sqids-cpp"]
|
||||
path = contrib/sqids-cpp
|
||||
url = https://github.com/sqids/sqids-cpp.git
|
||||
|
@ -105,7 +105,6 @@
|
||||
* Rewrite equality with `is null` check in JOIN ON section. Experimental *Analyzer only*. [#56538](https://github.com/ClickHouse/ClickHouse/pull/56538) ([vdimir](https://github.com/vdimir)).
|
||||
* Function`concat` now supports arbitrary argument types (instead of only String and FixedString arguments). This makes it behave more similar to MySQL `concat` implementation. For example, `SELECT concat('ab', 42)` now returns `ab42`. [#56540](https://github.com/ClickHouse/ClickHouse/pull/56540) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||
* Allow getting cache configuration from 'named_collection' section in config or from SQL created named collections. [#56541](https://github.com/ClickHouse/ClickHouse/pull/56541) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update `query_masking_rules` when reloading the config ([#56449](https://github.com/ClickHouse/ClickHouse/issues/56449)). [#56573](https://github.com/ClickHouse/ClickHouse/pull/56573) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* PostgreSQL database engine: Make the removal of outdated tables less aggressive with unsuccessful postgres connection. [#56609](https://github.com/ClickHouse/ClickHouse/pull/56609) ([jsc0218](https://github.com/jsc0218)).
|
||||
* It took too much time to connnect to PG when URL is not right, so the relevant query stucks there and get cancelled. [#56648](https://github.com/ClickHouse/ClickHouse/pull/56648) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Keeper improvement: disable compressed logs by default in Keeper. [#56763](https://github.com/ClickHouse/ClickHouse/pull/56763) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
1
contrib/CMakeLists.txt
vendored
1
contrib/CMakeLists.txt
vendored
@ -44,6 +44,7 @@ else ()
|
||||
endif ()
|
||||
add_contrib (miniselect-cmake miniselect)
|
||||
add_contrib (pdqsort-cmake pdqsort)
|
||||
add_contrib (pocketfft-cmake pocketfft)
|
||||
add_contrib (crc32-vpmsum-cmake crc32-vpmsum)
|
||||
add_contrib (sparsehash-c11-cmake sparsehash-c11)
|
||||
add_contrib (abseil-cpp-cmake abseil-cpp)
|
||||
|
1
contrib/pocketfft
vendored
Submodule
1
contrib/pocketfft
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546
|
10
contrib/pocketfft-cmake/CMakeLists.txt
Normal file
10
contrib/pocketfft-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,10 @@
|
||||
option (ENABLE_POCKETFFT "Enable pocketfft" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT ENABLE_POCKETFFT)
|
||||
message(STATUS "Not using pocketfft")
|
||||
return()
|
||||
endif()
|
||||
|
||||
add_library(_pocketfft INTERFACE)
|
||||
target_include_directories(_pocketfft INTERFACE ${ClickHouse_SOURCE_DIR}/contrib/pocketfft)
|
||||
add_library(ch_contrib::pocketfft ALIAS _pocketfft)
|
@ -125,6 +125,7 @@
|
||||
"docker/test/server-jepsen",
|
||||
"docker/test/sqllogic",
|
||||
"docker/test/sqltest",
|
||||
"docker/test/clickbench",
|
||||
"docker/test/stateless"
|
||||
]
|
||||
},
|
||||
@ -145,6 +146,10 @@
|
||||
"name": "clickhouse/server-jepsen-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/clickbench": {
|
||||
"name": "clickhouse/clickbench",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/install/deb": {
|
||||
"name": "clickhouse/install-deb-test",
|
||||
"dependent": []
|
||||
|
@ -34,8 +34,9 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.11.1.2711"
|
||||
ARG VERSION="23.11.2.11"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
# It is especially important for rootless containers: in that case entrypoint
|
||||
@ -47,15 +48,27 @@ ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
ARG TARGETARCH
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
( \
|
||||
cd /tmp \
|
||||
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& cd /tmp && rm -f /tmp/*tgz && rm -f /tmp/*tgz.sha512 |: \
|
||||
&& if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
|
||||
echo "installing from provided urls with tgz packages: ${DIRECT_DOWNLOAD_URLS}" \
|
||||
&& for url in $DIRECT_DOWNLOAD_URLS; do \
|
||||
echo "Get ${url}" \
|
||||
&& wget -c -q "$url" \
|
||||
; done \
|
||||
else \
|
||||
for package in ${PACKAGES}; do \
|
||||
cd /tmp \
|
||||
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz.sha512" \
|
||||
&& sed 's:/output/:/tmp/:' < "${package}-${VERSION}-${arch}.tgz.sha512" | sha512sum -c \
|
||||
&& tar xvzf "${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / \
|
||||
) \
|
||||
; done \
|
||||
fi \
|
||||
&& cat *.tgz.sha512 | sha512sum -c \
|
||||
&& for file in *.tgz; do \
|
||||
if [ -f "$file" ]; then \
|
||||
echo "Unpacking $file"; \
|
||||
tar xvzf "$file" --strip-components=1 -C /; \
|
||||
fi \
|
||||
; done \
|
||||
&& rm /tmp/*.tgz /install -r \
|
||||
&& addgroup -S -g 101 clickhouse \
|
||||
|
@ -49,6 +49,7 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||
chmod 777 -R /rust && \
|
||||
rustup toolchain install nightly-2023-07-04 && \
|
||||
rustup default nightly-2023-07-04 && \
|
||||
rustup toolchain remove stable && \
|
||||
rustup component add rust-src && \
|
||||
rustup target add x86_64-unknown-linux-gnu && \
|
||||
rustup target add aarch64-unknown-linux-gnu && \
|
||||
|
@ -149,7 +149,7 @@ then
|
||||
mkdir -p "$PERF_OUTPUT"
|
||||
cp -r ../tests/performance "$PERF_OUTPUT"
|
||||
cp -r ../tests/config/top_level_domains "$PERF_OUTPUT"
|
||||
cp -r ../docker/test/performance-comparison/config "$PERF_OUTPUT" ||:
|
||||
cp -r ../tests/performance/scripts/config "$PERF_OUTPUT" ||:
|
||||
for SRC in /output/clickhouse*; do
|
||||
# Copy all clickhouse* files except packages and bridges
|
||||
[[ "$SRC" != *.* ]] && [[ "$SRC" != *-bridge ]] && \
|
||||
@ -160,7 +160,7 @@ then
|
||||
ln -sf clickhouse "$PERF_OUTPUT"/clickhouse-keeper
|
||||
fi
|
||||
|
||||
cp -r ../docker/test/performance-comparison "$PERF_OUTPUT"/scripts ||:
|
||||
cp -r ../tests/performance/scripts "$PERF_OUTPUT"/scripts ||:
|
||||
prepare_combined_output "$PERF_OUTPUT"
|
||||
|
||||
# We have to know the revision that corresponds to this binary build.
|
||||
|
@ -32,8 +32,9 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.11.1.2711"
|
||||
ARG VERSION="23.11.2.11"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
# It is especially important for rootless containers: in that case entrypoint
|
||||
@ -43,15 +44,26 @@ ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
( \
|
||||
cd /tmp \
|
||||
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& cd /tmp \
|
||||
&& if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
|
||||
echo "installing from provided urls with tgz packages: ${DIRECT_DOWNLOAD_URLS}" \
|
||||
&& for url in $DIRECT_DOWNLOAD_URLS; do \
|
||||
echo "Get ${url}" \
|
||||
&& wget -c -q "$url" \
|
||||
; done \
|
||||
else \
|
||||
for package in ${PACKAGES}; do \
|
||||
echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz.sha512" \
|
||||
&& sed 's:/output/:/tmp/:' < "${package}-${VERSION}-${arch}.tgz.sha512" | sha512sum -c \
|
||||
&& tar xvzf "${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / \
|
||||
) \
|
||||
; done \
|
||||
fi \
|
||||
&& cat *.tgz.sha512 | sed 's:/output/:/tmp/:' | sha512sum -c \
|
||||
&& for file in *.tgz; do \
|
||||
if [ -f "$file" ]; then \
|
||||
echo "Unpacking $file"; \
|
||||
tar xvzf "$file" --strip-components=1 -C /; \
|
||||
fi \
|
||||
; done \
|
||||
&& rm /tmp/*.tgz /install -r \
|
||||
&& addgroup -S -g 101 clickhouse \
|
||||
|
@ -30,13 +30,14 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.11.1.2711"
|
||||
ARG VERSION="23.11.2.11"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
# from debs created by CI build, for example:
|
||||
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://..." -t ...
|
||||
ARG deb_location_url=""
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
# set non-empty single_binary_location_url to create docker image
|
||||
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
|
||||
@ -44,6 +45,18 @@ ARG single_binary_location_url=""
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
# install from direct URL
|
||||
RUN if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
|
||||
echo "installing from custom predefined urls with deb packages: ${DIRECT_DOWNLOAD_URLS}" \
|
||||
&& rm -rf /tmp/clickhouse_debs \
|
||||
&& mkdir -p /tmp/clickhouse_debs \
|
||||
&& for url in $DIRECT_DOWNLOAD_URLS; do \
|
||||
wget --progress=bar:force:noscroll "$url" -P /tmp/clickhouse_debs || exit 1 \
|
||||
; done \
|
||||
&& dpkg -i /tmp/clickhouse_debs/*.deb \
|
||||
&& rm -rf /tmp/* ; \
|
||||
fi
|
||||
|
||||
# install from a web location with deb packages
|
||||
RUN arch="${TARGETARCH:-amd64}" \
|
||||
&& if [ -n "${deb_location_url}" ]; then \
|
||||
|
@ -12,6 +12,7 @@ RUN apt-get update \
|
||||
ripgrep \
|
||||
zstd \
|
||||
locales \
|
||||
sudo \
|
||||
--yes --no-install-recommends
|
||||
|
||||
# Sanitizer options for services (clickhouse-server)
|
||||
|
@ -21,7 +21,7 @@ EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:-"check_name, "}
|
||||
|
||||
# trace_log needs more columns for symbolization
|
||||
EXTRA_COLUMNS_TRACE_LOG="${EXTRA_COLUMNS} symbols Array(LowCardinality(String)), lines Array(LowCardinality(String)), "
|
||||
EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> toLowCardinality(demangle(addressToSymbol(x))), trace) AS symbols, arrayMap(x -> toLowCardinality(addressToLine(x)), trace) AS lines"
|
||||
EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> demangle(addressToSymbol(x)), trace)::Array(LowCardinality(String)) AS symbols, arrayMap(x -> addressToLine(x), trace)::Array(LowCardinality(String)) AS lines"
|
||||
|
||||
|
||||
function __set_connection_args
|
||||
|
10
docker/test/clickbench/Dockerfile
Normal file
10
docker/test/clickbench/Dockerfile
Normal file
@ -0,0 +1,10 @@
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
COPY *.sh /
|
||||
COPY *.sql /
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
112
docker/test/clickbench/create.sql
Normal file
112
docker/test/clickbench/create.sql
Normal file
@ -0,0 +1,112 @@
|
||||
ATTACH TABLE hits UUID 'c449dfbf-ba06-4d13-abec-8396559eb955'
|
||||
(
|
||||
WatchID BIGINT NOT NULL,
|
||||
JavaEnable SMALLINT NOT NULL,
|
||||
Title TEXT NOT NULL,
|
||||
GoodEvent SMALLINT NOT NULL,
|
||||
EventTime TIMESTAMP NOT NULL,
|
||||
EventDate Date NOT NULL,
|
||||
CounterID INTEGER NOT NULL,
|
||||
ClientIP INTEGER NOT NULL,
|
||||
RegionID INTEGER NOT NULL,
|
||||
UserID BIGINT NOT NULL,
|
||||
CounterClass SMALLINT NOT NULL,
|
||||
OS SMALLINT NOT NULL,
|
||||
UserAgent SMALLINT NOT NULL,
|
||||
URL TEXT NOT NULL,
|
||||
Referer TEXT NOT NULL,
|
||||
IsRefresh SMALLINT NOT NULL,
|
||||
RefererCategoryID SMALLINT NOT NULL,
|
||||
RefererRegionID INTEGER NOT NULL,
|
||||
URLCategoryID SMALLINT NOT NULL,
|
||||
URLRegionID INTEGER NOT NULL,
|
||||
ResolutionWidth SMALLINT NOT NULL,
|
||||
ResolutionHeight SMALLINT NOT NULL,
|
||||
ResolutionDepth SMALLINT NOT NULL,
|
||||
FlashMajor SMALLINT NOT NULL,
|
||||
FlashMinor SMALLINT NOT NULL,
|
||||
FlashMinor2 TEXT NOT NULL,
|
||||
NetMajor SMALLINT NOT NULL,
|
||||
NetMinor SMALLINT NOT NULL,
|
||||
UserAgentMajor SMALLINT NOT NULL,
|
||||
UserAgentMinor VARCHAR(255) NOT NULL,
|
||||
CookieEnable SMALLINT NOT NULL,
|
||||
JavascriptEnable SMALLINT NOT NULL,
|
||||
IsMobile SMALLINT NOT NULL,
|
||||
MobilePhone SMALLINT NOT NULL,
|
||||
MobilePhoneModel TEXT NOT NULL,
|
||||
Params TEXT NOT NULL,
|
||||
IPNetworkID INTEGER NOT NULL,
|
||||
TraficSourceID SMALLINT NOT NULL,
|
||||
SearchEngineID SMALLINT NOT NULL,
|
||||
SearchPhrase TEXT NOT NULL,
|
||||
AdvEngineID SMALLINT NOT NULL,
|
||||
IsArtifical SMALLINT NOT NULL,
|
||||
WindowClientWidth SMALLINT NOT NULL,
|
||||
WindowClientHeight SMALLINT NOT NULL,
|
||||
ClientTimeZone SMALLINT NOT NULL,
|
||||
ClientEventTime TIMESTAMP NOT NULL,
|
||||
SilverlightVersion1 SMALLINT NOT NULL,
|
||||
SilverlightVersion2 SMALLINT NOT NULL,
|
||||
SilverlightVersion3 INTEGER NOT NULL,
|
||||
SilverlightVersion4 SMALLINT NOT NULL,
|
||||
PageCharset TEXT NOT NULL,
|
||||
CodeVersion INTEGER NOT NULL,
|
||||
IsLink SMALLINT NOT NULL,
|
||||
IsDownload SMALLINT NOT NULL,
|
||||
IsNotBounce SMALLINT NOT NULL,
|
||||
FUniqID BIGINT NOT NULL,
|
||||
OriginalURL TEXT NOT NULL,
|
||||
HID INTEGER NOT NULL,
|
||||
IsOldCounter SMALLINT NOT NULL,
|
||||
IsEvent SMALLINT NOT NULL,
|
||||
IsParameter SMALLINT NOT NULL,
|
||||
DontCountHits SMALLINT NOT NULL,
|
||||
WithHash SMALLINT NOT NULL,
|
||||
HitColor CHAR NOT NULL,
|
||||
LocalEventTime TIMESTAMP NOT NULL,
|
||||
Age SMALLINT NOT NULL,
|
||||
Sex SMALLINT NOT NULL,
|
||||
Income SMALLINT NOT NULL,
|
||||
Interests SMALLINT NOT NULL,
|
||||
Robotness SMALLINT NOT NULL,
|
||||
RemoteIP INTEGER NOT NULL,
|
||||
WindowName INTEGER NOT NULL,
|
||||
OpenerName INTEGER NOT NULL,
|
||||
HistoryLength SMALLINT NOT NULL,
|
||||
BrowserLanguage TEXT NOT NULL,
|
||||
BrowserCountry TEXT NOT NULL,
|
||||
SocialNetwork TEXT NOT NULL,
|
||||
SocialAction TEXT NOT NULL,
|
||||
HTTPError SMALLINT NOT NULL,
|
||||
SendTiming INTEGER NOT NULL,
|
||||
DNSTiming INTEGER NOT NULL,
|
||||
ConnectTiming INTEGER NOT NULL,
|
||||
ResponseStartTiming INTEGER NOT NULL,
|
||||
ResponseEndTiming INTEGER NOT NULL,
|
||||
FetchTiming INTEGER NOT NULL,
|
||||
SocialSourceNetworkID SMALLINT NOT NULL,
|
||||
SocialSourcePage TEXT NOT NULL,
|
||||
ParamPrice BIGINT NOT NULL,
|
||||
ParamOrderID TEXT NOT NULL,
|
||||
ParamCurrency TEXT NOT NULL,
|
||||
ParamCurrencyID SMALLINT NOT NULL,
|
||||
OpenstatServiceName TEXT NOT NULL,
|
||||
OpenstatCampaignID TEXT NOT NULL,
|
||||
OpenstatAdID TEXT NOT NULL,
|
||||
OpenstatSourceID TEXT NOT NULL,
|
||||
UTMSource TEXT NOT NULL,
|
||||
UTMMedium TEXT NOT NULL,
|
||||
UTMCampaign TEXT NOT NULL,
|
||||
UTMContent TEXT NOT NULL,
|
||||
UTMTerm TEXT NOT NULL,
|
||||
FromTag TEXT NOT NULL,
|
||||
HasGCLID SMALLINT NOT NULL,
|
||||
RefererHash BIGINT NOT NULL,
|
||||
URLHash BIGINT NOT NULL,
|
||||
CLID INTEGER NOT NULL,
|
||||
PRIMARY KEY (CounterID, EventDate, UserID, EventTime, WatchID)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
SETTINGS disk = disk(type = cache, path = '/dev/shm/clickhouse/', max_size = '16G',
|
||||
disk = disk(type = web, endpoint = 'https://clickhouse-datasets-web.s3.us-east-1.amazonaws.com/'));
|
43
docker/test/clickbench/queries.sql
Normal file
43
docker/test/clickbench/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT COUNT(*) FROM hits;
|
||||
SELECT COUNT(*) FROM hits WHERE AdvEngineID <> 0;
|
||||
SELECT SUM(AdvEngineID), COUNT(*), AVG(ResolutionWidth) FROM hits;
|
||||
SELECT AVG(UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM hits;
|
||||
SELECT MIN(EventDate), MAX(EventDate) FROM hits;
|
||||
SELECT AdvEngineID, COUNT(*) FROM hits WHERE AdvEngineID <> 0 GROUP BY AdvEngineID ORDER BY COUNT(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM hits GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, SUM(AdvEngineID), COUNT(*) AS c, AVG(ResolutionWidth), COUNT(DISTINCT UserID) FROM hits GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE MobilePhoneModel <> '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE MobilePhoneModel <> '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(*) AS c FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, COUNT(*) AS c FROM hits WHERE SearchPhrase <> '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, COUNT(*) FROM hits GROUP BY UserID ORDER BY COUNT(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, COUNT(*) FROM hits GROUP BY UserID, SearchPhrase ORDER BY COUNT(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, COUNT(*) FROM hits GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, COUNT(*) FROM hits GROUP BY UserID, m, SearchPhrase ORDER BY COUNT(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM hits WHERE UserID = 435090932899640449;
|
||||
SELECT COUNT(*) FROM hits WHERE URL LIKE '%google%';
|
||||
SELECT SearchPhrase, MIN(URL), COUNT(*) AS c FROM hits WHERE URL LIKE '%google%' AND SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, MIN(URL), MIN(Title), COUNT(*) AS c, COUNT(DISTINCT UserID) FROM hits WHERE Title LIKE '%Google%' AND URL NOT LIKE '%.google.%' AND SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM hits WHERE URL LIKE '%google%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, AVG(length(URL)) AS l, COUNT(*) AS c FROM hits WHERE URL <> '' GROUP BY CounterID HAVING COUNT(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS k, AVG(length(Referer)) AS l, COUNT(*) AS c, MIN(Referer) FROM hits WHERE Referer <> '' GROUP BY k HAVING COUNT(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT SUM(ResolutionWidth), SUM(ResolutionWidth + 1), SUM(ResolutionWidth + 2), SUM(ResolutionWidth + 3), SUM(ResolutionWidth + 4), SUM(ResolutionWidth + 5), SUM(ResolutionWidth + 6), SUM(ResolutionWidth + 7), SUM(ResolutionWidth + 8), SUM(ResolutionWidth + 9), SUM(ResolutionWidth + 10), SUM(ResolutionWidth + 11), SUM(ResolutionWidth + 12), SUM(ResolutionWidth + 13), SUM(ResolutionWidth + 14), SUM(ResolutionWidth + 15), SUM(ResolutionWidth + 16), SUM(ResolutionWidth + 17), SUM(ResolutionWidth + 18), SUM(ResolutionWidth + 19), SUM(ResolutionWidth + 20), SUM(ResolutionWidth + 21), SUM(ResolutionWidth + 22), SUM(ResolutionWidth + 23), SUM(ResolutionWidth + 24), SUM(ResolutionWidth + 25), SUM(ResolutionWidth + 26), SUM(ResolutionWidth + 27), SUM(ResolutionWidth + 28), SUM(ResolutionWidth + 29), SUM(ResolutionWidth + 30), SUM(ResolutionWidth + 31), SUM(ResolutionWidth + 32), SUM(ResolutionWidth + 33), SUM(ResolutionWidth + 34), SUM(ResolutionWidth + 35), SUM(ResolutionWidth + 36), SUM(ResolutionWidth + 37), SUM(ResolutionWidth + 38), SUM(ResolutionWidth + 39), SUM(ResolutionWidth + 40), SUM(ResolutionWidth + 41), SUM(ResolutionWidth + 42), SUM(ResolutionWidth + 43), SUM(ResolutionWidth + 44), SUM(ResolutionWidth + 45), SUM(ResolutionWidth + 46), SUM(ResolutionWidth + 47), SUM(ResolutionWidth + 48), SUM(ResolutionWidth + 49), SUM(ResolutionWidth + 50), SUM(ResolutionWidth + 51), SUM(ResolutionWidth + 52), SUM(ResolutionWidth + 53), SUM(ResolutionWidth + 54), SUM(ResolutionWidth + 55), SUM(ResolutionWidth + 56), SUM(ResolutionWidth + 57), SUM(ResolutionWidth + 58), SUM(ResolutionWidth + 59), SUM(ResolutionWidth + 60), SUM(ResolutionWidth + 61), SUM(ResolutionWidth + 62), SUM(ResolutionWidth + 63), SUM(ResolutionWidth + 64), SUM(ResolutionWidth + 65), SUM(ResolutionWidth + 66), SUM(ResolutionWidth + 67), SUM(ResolutionWidth + 68), SUM(ResolutionWidth + 69), SUM(ResolutionWidth + 70), SUM(ResolutionWidth + 71), SUM(ResolutionWidth + 72), SUM(ResolutionWidth + 73), SUM(ResolutionWidth + 74), SUM(ResolutionWidth + 75), SUM(ResolutionWidth + 76), SUM(ResolutionWidth + 77), SUM(ResolutionWidth + 78), SUM(ResolutionWidth + 79), SUM(ResolutionWidth + 80), SUM(ResolutionWidth + 81), SUM(ResolutionWidth + 82), SUM(ResolutionWidth + 83), SUM(ResolutionWidth + 84), SUM(ResolutionWidth + 85), SUM(ResolutionWidth + 86), SUM(ResolutionWidth + 87), SUM(ResolutionWidth + 88), SUM(ResolutionWidth + 89) FROM hits;
|
||||
SELECT SearchEngineID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits WHERE SearchPhrase <> '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits WHERE SearchPhrase <> '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, COUNT(*) AS c FROM hits GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, COUNT(*) AS c FROM hits GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, COUNT(*) AS c FROM hits GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND IsRefresh = 0 AND URL <> '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND IsRefresh = 0 AND Title <> '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 AND IsLink <> 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 10 OFFSET 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 10 OFFSET 1000;
|
||||
SELECT URLHash, EventDate, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 3594120000172545465 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 10 OFFSET 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 AND DontCountHits = 0 AND URLHash = 2868770270353813622 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10 OFFSET 10000;
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS M, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-14' AND EventDate <= '2013-07-15' AND IsRefresh = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime) LIMIT 10 OFFSET 1000;
|
79
docker/test/clickbench/run.sh
Executable file
79
docker/test/clickbench/run.sh
Executable file
@ -0,0 +1,79 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT_PID=$!
|
||||
(sleep 1200 && kill -9 $SCRIPT_PID) &
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /setup_export_logs.sh
|
||||
|
||||
# fail on errors, verbose and export all env variables
|
||||
set -e -x -a
|
||||
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
|
||||
# A directory for cache
|
||||
mkdir /dev/shm/clickhouse
|
||||
chown clickhouse:clickhouse /dev/shm/clickhouse
|
||||
|
||||
# Allow introspection functions, needed for sending the logs
|
||||
echo "
|
||||
profiles:
|
||||
default:
|
||||
allow_introspection_functions: 1
|
||||
" > /etc/clickhouse-server/users.d/allow_introspection_functions.yaml
|
||||
|
||||
# Enable text_log
|
||||
echo "
|
||||
text_log:
|
||||
" > /etc/clickhouse-server/config.d/text_log.yaml
|
||||
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
clickhouse start
|
||||
|
||||
# Wait for the server to start, but not for too long.
|
||||
for _ in {1..100}
|
||||
do
|
||||
clickhouse-client --query "SELECT 1" && break
|
||||
sleep 1
|
||||
done
|
||||
|
||||
setup_logs_replication
|
||||
|
||||
# Load the data
|
||||
|
||||
clickhouse-client --time < /create.sql
|
||||
|
||||
# Run the queries
|
||||
|
||||
set +x
|
||||
|
||||
TRIES=3
|
||||
QUERY_NUM=1
|
||||
while read -r query; do
|
||||
echo -n "["
|
||||
for i in $(seq 1 $TRIES); do
|
||||
RES=$(clickhouse-client --query_id "q${QUERY_NUM}-${i}" --time --format Null --query "$query" --progress 0 2>&1 ||:)
|
||||
echo -n "${RES}"
|
||||
[[ "$i" != "$TRIES" ]] && echo -n ", "
|
||||
|
||||
echo "${QUERY_NUM},${i},${RES}" >> /test_output/test_results.tsv
|
||||
done
|
||||
echo "],"
|
||||
|
||||
QUERY_NUM=$((QUERY_NUM + 1))
|
||||
done < /queries.sql
|
||||
|
||||
set -x
|
||||
|
||||
clickhouse-client --query "SELECT total_bytes FROM system.tables WHERE name = 'hits' AND database = 'default'"
|
||||
|
||||
clickhouse-client -q "system flush logs" ||:
|
||||
stop_logs_replication
|
||||
clickhouse stop
|
||||
|
||||
mv /var/log/clickhouse-server/* /test_output/
|
||||
|
||||
echo -e "success\tClickBench finished" > /test_output/check_status.tsv
|
@ -74,7 +74,7 @@ RUN python3 -m pip install --no-cache-dir \
|
||||
delta-spark==2.3.0 \
|
||||
dict2xml \
|
||||
dicttoxml \
|
||||
docker \
|
||||
docker==6.1.3 \
|
||||
docker-compose==1.29.2 \
|
||||
grpcio \
|
||||
grpcio-tools \
|
||||
|
@ -39,18 +39,8 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY * /
|
||||
COPY run.sh /
|
||||
|
||||
# Bind everything to one NUMA node, if there's more than one. Theoretically the
|
||||
# node #0 should be less stable because of system interruptions. We bind
|
||||
# randomly to node 1 or 0 to gather some statistics on that. We have to bind
|
||||
# both servers and the tmpfs on which the database is stored. How to do it
|
||||
# is unclear, but by default tmpfs uses
|
||||
# 'process allocation policy', not sure which process but hopefully the one that
|
||||
# writes to it, so just bind the downloader script as well.
|
||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||
CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\\(.*\\)nodes.*$/\\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"]
|
||||
CMD ["bash", "/run.sh"]
|
||||
|
||||
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison
|
||||
|
18
docker/test/performance-comparison/run.sh
Normal file
18
docker/test/performance-comparison/run.sh
Normal file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
|
||||
[ ! -e "$entry" ] && echo "ERROR: test scripts are not found" && exit 1
|
||||
|
||||
# Bind everything to one NUMA node, if there's more than one. Theoretically the
|
||||
# node #0 should be less stable because of system interruptions. We bind
|
||||
# randomly to node 1 or 0 to gather some statistics on that. We have to bind
|
||||
# both servers and the tmpfs on which the database is stored. How to do it
|
||||
# is unclear, but by default tmpfs uses
|
||||
# 'process allocation policy', not sure which process but hopefully the one that
|
||||
# writes to it, so just bind the downloader script as well.
|
||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
||||
echo Will bind to NUMA node $node;
|
||||
numactl --cpunodebind=$node --membind=$node $entry
|
@ -30,7 +30,7 @@ def build_url(base_url, dataset):
|
||||
return os.path.join(base_url, dataset, "partitions", AVAILABLE_DATASETS[dataset])
|
||||
|
||||
|
||||
def dowload_with_progress(url, path):
|
||||
def download_with_progress(url, path):
|
||||
logging.info("Downloading from %s to temp path %s", url, path)
|
||||
for i in range(RETRIES_COUNT):
|
||||
try:
|
||||
@ -110,7 +110,7 @@ if __name__ == "__main__":
|
||||
temp_archive_path = _get_temp_file_name()
|
||||
try:
|
||||
download_url_for_dataset = build_url(args.url_prefix, dataset)
|
||||
dowload_with_progress(download_url_for_dataset, temp_archive_path)
|
||||
download_with_progress(download_url_for_dataset, temp_archive_path)
|
||||
unpack_to_clickhouse_directory(temp_archive_path, args.clickhouse_data_path)
|
||||
except Exception as ex:
|
||||
logging.info("Some exception occured %s", str(ex))
|
||||
|
@ -300,9 +300,6 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.zst ||:
|
||||
# FIXME: remove once only github actions will be left
|
||||
rm /var/log/clickhouse-server/clickhouse-server1.log
|
||||
rm /var/log/clickhouse-server/clickhouse-server2.log
|
||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
||||
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
||||
|
@ -23,6 +23,7 @@ echo "Check submodules" | ts
|
||||
./check-submodules |& tee /test_output/submodules_output.txt
|
||||
echo "Check shell scripts with shellcheck" | ts
|
||||
./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt
|
||||
|
||||
/process_style_check_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
echo "Check help for changelog generator works" | ts
|
||||
cd ../changelog || exit 1
|
||||
|
@ -77,6 +77,7 @@ remove_keeper_config "create_if_not_exists" "[01]"
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
||||
@ -115,6 +116,7 @@ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_defau
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
||||
|
22
docs/changelogs/v23.11.2.11-stable.md
Normal file
22
docs/changelogs/v23.11.2.11-stable.md
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.11.2.11-stable (6e5411358c8) FIXME as compared to v23.11.1.2711-stable (05bc8ef1e02)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#57661](https://github.com/ClickHouse/ClickHouse/issues/57661): Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix SIGSEGV for aggregation of sparse columns with any() RESPECT NULL [#57710](https://github.com/ClickHouse/ClickHouse/pull/57710) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix bug window functions: revert [#39631](https://github.com/ClickHouse/ClickHouse/issues/39631) [#57766](https://github.com/ClickHouse/ClickHouse/pull/57766) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
@ -489,7 +489,7 @@ When using functions with response codes or `errno`, always check the result and
|
||||
|
||||
``` cpp
|
||||
if (0 != close(fd))
|
||||
throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name);
|
||||
```
|
||||
|
||||
You can use assert to check invariant in code.
|
||||
|
@ -520,7 +520,7 @@ Indexes of type `set` can be utilized by all functions. The other index types ar
|
||||
| [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [has](/docs/en/sql-reference/functions/array-functions#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [hasAny](/docs/en/sql-reference/functions/array-functions#function-hasAny) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| [hasAny](/docs/en/sql-reference/functions/array-functions#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [hasAll](/docs/en/sql-reference/functions/array-functions#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
|
||||
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
|
||||
|
@ -25,7 +25,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[ORDER BY expr]
|
||||
[PRIMARY KEY expr]
|
||||
[SAMPLE BY expr]
|
||||
[SETTINGS name=value, clean_deleted_rows=value, ...]
|
||||
[SETTINGS name=value, ...]
|
||||
```
|
||||
|
||||
For a description of request parameters, see [statement description](../../../sql-reference/statements/create/table.md).
|
||||
@ -88,53 +88,6 @@ SELECT * FROM mySecondReplacingMT FINAL;
|
||||
└─────┴─────────┴─────────────────────┘
|
||||
```
|
||||
|
||||
### is_deleted
|
||||
|
||||
`is_deleted` — Name of a column used during a merge to determine whether the data in this row represents the state or is to be deleted; `1` is a “deleted“ row, `0` is a “state“ row.
|
||||
|
||||
Column data type — `UInt8`.
|
||||
|
||||
:::note
|
||||
`is_deleted` can only be enabled when `ver` is used.
|
||||
|
||||
The row is deleted when `OPTIMIZE ... FINAL CLEANUP` or `OPTIMIZE ... FINAL` is used, or if the engine setting `clean_deleted_rows` has been set to `Always`.
|
||||
|
||||
No matter the operation on the data, the version must be increased. If two inserted rows have the same version number, the last inserted row is the one kept.
|
||||
|
||||
:::
|
||||
|
||||
Example:
|
||||
```sql
|
||||
-- with ver and is_deleted
|
||||
CREATE OR REPLACE TABLE myThirdReplacingMT
|
||||
(
|
||||
`key` Int64,
|
||||
`someCol` String,
|
||||
`eventTime` DateTime,
|
||||
`is_deleted` UInt8
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(eventTime, is_deleted)
|
||||
ORDER BY key;
|
||||
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 01:01:01', 0);
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 01:01:01', 1);
|
||||
|
||||
select * from myThirdReplacingMT final;
|
||||
|
||||
0 rows in set. Elapsed: 0.003 sec.
|
||||
|
||||
-- delete rows with is_deleted
|
||||
OPTIMIZE TABLE myThirdReplacingMT FINAL CLEANUP;
|
||||
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 00:00:00', 0);
|
||||
|
||||
select * from myThirdReplacingMT final;
|
||||
|
||||
┌─key─┬─someCol─┬───────────eventTime─┬─is_deleted─┐
|
||||
│ 1 │ first │ 2020-01-01 00:00:00 │ 0 │
|
||||
└─────┴─────────┴─────────────────────┴────────────┘
|
||||
```
|
||||
|
||||
## Query clauses
|
||||
|
||||
When creating a `ReplacingMergeTree` table the same [clauses](../../../engines/table-engines/mergetree-family/mergetree.md) are required, as when creating a `MergeTree` table.
|
||||
|
@ -406,7 +406,7 @@ RESTORE TABLE data AS data_restored FROM Disk('s3_plain', 'cloud_backup');
|
||||
:::note
|
||||
But keep in mind that:
|
||||
- This disk should not be used for `MergeTree` itself, only for `BACKUP`/`RESTORE`
|
||||
- If your tables are backed by S3 storage, it doesn't use `CopyObject` calls to copy parts to the destination bucket, instead, it downloads and uploads them, which is very inefficient. Prefer to use `BACKUP ... TO S3(<endpoint>)` syntax for this use-case.
|
||||
- If your tables are backed by S3 storage and types of the disks are different, it doesn't use `CopyObject` calls to copy parts to the destination bucket, instead, it downloads and uploads them, which is very inefficient. Prefer to use `BACKUP ... TO S3(<endpoint>)` syntax for this use-case.
|
||||
:::
|
||||
|
||||
## Alternatives
|
||||
|
@ -1683,7 +1683,7 @@ Default value: `0.5`.
|
||||
|
||||
Asynchronous loading of databases and tables.
|
||||
|
||||
If `true` all non-system databases with `Ordinary`, `Atomic` and `Replicated` engine will be loaded asynchronously after the ClickHouse server start up. See `system.async_loader` table, `tables_loader_background_pool_size` and `tables_loader_foreground_pool_size` server settings. Any query that tries to access a table, that is not yet loaded, will wait for exactly this table to be started up. If load job fails, query will rethrow an error (instead of shutting down the whole server in case of `async_load_databases = false`). The table that is waited for by at least one query will be loaded with higher priority. DDL queries on a database will wait for exactly that database to be started up.
|
||||
If `true` all non-system databases with `Ordinary`, `Atomic` and `Replicated` engine will be loaded asynchronously after the ClickHouse server start up. See `system.asynchronous_loader` table, `tables_loader_background_pool_size` and `tables_loader_foreground_pool_size` server settings. Any query that tries to access a table, that is not yet loaded, will wait for exactly this table to be started up. If load job fails, query will rethrow an error (instead of shutting down the whole server in case of `async_load_databases = false`). The table that is waited for by at least one query will be loaded with higher priority. DDL queries on a database will wait for exactly that database to be started up.
|
||||
|
||||
If `false`, all databases are loaded when the server starts.
|
||||
|
||||
|
@ -852,16 +852,6 @@ If the file name for column is too long (more than `max_file_name_length` bytes)
|
||||
|
||||
The maximal length of the file name to keep it as is without hashing. Takes effect only if setting `replace_long_file_name_to_hash` is enabled. The value of this setting does not include the length of file extension. So, it is recommended to set it below the maximum filename length (usually 255 bytes) with some gap to avoid filesystem errors. Default value: 127.
|
||||
|
||||
## clean_deleted_rows
|
||||
|
||||
Enable/disable automatic deletion of rows flagged as `is_deleted` when perform `OPTIMIZE ... FINAL` on a table using the ReplacingMergeTree engine. When disabled, the `CLEANUP` keyword has to be added to the `OPTIMIZE ... FINAL` to have the same behaviour.
|
||||
|
||||
Possible values:
|
||||
|
||||
- `Always` or `Never`.
|
||||
|
||||
Default value: `Never`
|
||||
|
||||
## allow_experimental_block_number_column
|
||||
|
||||
Persists virtual column `_block_number` on merges.
|
||||
|
@ -1578,9 +1578,15 @@ Default value: `default`.
|
||||
|
||||
## allow_experimental_parallel_reading_from_replicas
|
||||
|
||||
If true, ClickHouse will send a SELECT query to all replicas of a table (up to `max_parallel_replicas`) . It will work for any kind of MergeTree table.
|
||||
Enables or disables sending SELECT queries to all replicas of a table (up to `max_parallel_replicas`). Reading is parallelized and coordinated dynamically. It will work for any kind of MergeTree table.
|
||||
|
||||
Default value: `false`.
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled.
|
||||
- 1 - Enabled, silently disabled in case of failure.
|
||||
- 2 - Enabled, throws an exception in case of failure.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## compile_expressions {#compile-expressions}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/async_loader
|
||||
slug: /en/operations/system-tables/asynchronous_loader
|
||||
---
|
||||
# async_loader
|
||||
# asynchronous_loader
|
||||
|
||||
Contains information and status for recent asynchronous jobs (e.g. for tables loading). The table contains a row for every job. There is a tool for visualizing information from this table `utils/async_loader_graph`.
|
||||
|
||||
@ -9,7 +9,7 @@ Example:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.async_loader
|
||||
FROM system.asynchronous_loader
|
||||
FORMAT Vertical
|
||||
LIMIT 1
|
||||
```
|
14
docs/en/operations/system-tables/dropped_tables_parts.md
Normal file
14
docs/en/operations/system-tables/dropped_tables_parts.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/dropped_tables_parts
|
||||
---
|
||||
# dropped_tables_parts {#system_tables-dropped_tables_parts}
|
||||
|
||||
Contains information about parts of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) dropped tables from [system.dropped_tables](./dropped_tables.md)
|
||||
|
||||
The schema of this table is the same as [system.parts](./parts.md)
|
||||
|
||||
**See Also**
|
||||
|
||||
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
|
||||
- [system.parts](./parts.md)
|
||||
- [system.dropped_tables](./dropped_tables.md)
|
@ -53,7 +53,6 @@ clickhouse-benchmark [keys] < queries_file;
|
||||
- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [comparison mode](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` performs the [Independent two-sample Student’s t-test](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) to determine whether the two distributions aren’t different with the selected level of confidence.
|
||||
- `--cumulative` — Printing cumulative data instead of data per interval.
|
||||
- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`.
|
||||
- `--json=FILEPATH` — `JSON` output. When the key is set, `clickhouse-benchmark` outputs a report to the specified JSON-file.
|
||||
- `--user=USERNAME` — ClickHouse user name. Default value: `default`.
|
||||
- `--password=PSWD` — ClickHouse user password. Default value: empty string.
|
||||
- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` outputs stack traces of exceptions.
|
||||
|
@ -216,6 +216,7 @@ Arguments:
|
||||
- `--logger.level` — Log level.
|
||||
- `--ignore-error` — do not stop processing if a query failed.
|
||||
- `-c`, `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty.
|
||||
- `--no-system-tables` — do not attach system tables.
|
||||
- `--help` — arguments references for `clickhouse-local`.
|
||||
- `-V`, `--version` — print version information and exit.
|
||||
|
||||
|
@ -1809,6 +1809,8 @@ Alias: `dateTrunc`.
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
`unit` argument is case-insensitive.
|
||||
|
||||
- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
|
@ -533,8 +533,8 @@ Result:
|
||||
|
||||
```result
|
||||
┌─concatWithSeparator('a', '1', '2', '3', '4')─┐
|
||||
│ 1a2a3a4 │
|
||||
└───────────────────────────────────┘
|
||||
│ 1a2a3a4 │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## concatWithSeparatorAssumeInjective
|
||||
@ -1251,7 +1251,7 @@ This function also replaces numeric character references with Unicode characters
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
decodeHTMComponent(x)
|
||||
decodeHTMLComponent(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
@ -1268,7 +1268,7 @@ Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
``` sql
|
||||
SELECT decodeHTMLComponent(''CH');
|
||||
SELECT decodeHMLComponent('I♥ClickHouse');
|
||||
SELECT decodeHTMLComponent('I♥ClickHouse');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
59
docs/en/sql-reference/functions/time-series-functions.md
Normal file
59
docs/en/sql-reference/functions/time-series-functions.md
Normal file
@ -0,0 +1,59 @@
|
||||
---
|
||||
slug: /en/sql-reference/functions/time-series-functions
|
||||
sidebar_position: 172
|
||||
sidebar_label: Time Series
|
||||
---
|
||||
|
||||
# Time Series Functions
|
||||
|
||||
Below functions are used for time series analysis.
|
||||
|
||||
## seriesPeriodDetectFFT
|
||||
|
||||
Finds the period of the given time series data using FFT
|
||||
FFT - [Fast Fourier transform](https://en.wikipedia.org/wiki/Fast_Fourier_transform)
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
seriesPeriodDetectFFT(series);
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `series` - An array of numeric values
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A real value equal to the period of time series
|
||||
- Returns NAN when number of data points are less than four.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT seriesPeriodDetectFFT([1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6]) AS print_0;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌───────────print_0──────┐
|
||||
│ 3 │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT seriesPeriodDetectFFT(arrayMap(x -> abs((x % 6) - 3), range(1000))) AS print_0;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─print_0─┐
|
||||
│ 6 │
|
||||
└─────────┘
|
||||
```
|
@ -128,17 +128,17 @@ Reading data from `table.csv`, located in `archive1.zip` or/and `archive2.zip`:
|
||||
SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv');
|
||||
```
|
||||
|
||||
## Globbing {#globs_in_path}
|
||||
## Globs in path {#globs_in_path}
|
||||
|
||||
Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix.
|
||||
|
||||
- `*` — Represents arbitrarily many characters except `/` but including the empty string.
|
||||
- `?` — Represents an arbitrary single character.
|
||||
- `{some_string,another_string,yet_another_one}` — Represents any of alternative strings `'some_string', 'another_string', 'yet_another_one'`. The strings may contain `/`.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. The strings can contain the `/` symbol.
|
||||
- `{N..M}` — Represents any number `>= N` and `<= M`.
|
||||
- `**` - Represents all files inside a folder recursively.
|
||||
|
||||
Constructions with `{}` are similar to the [remote](remote.md) table function.
|
||||
Constructions with `{}` are similar to the [remote](remote.md) and [hdfs](hdfs.md) table functions.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -19,6 +19,7 @@ fuzzJSON({ named_collection [option=value [,..]] | json_str[, random_seed] })
|
||||
- `json_str` (String) - The source string representing structured data in JSON format.
|
||||
- `random_seed` (UInt64) - Manual random seed for producing stable results.
|
||||
- `reuse_output` (boolean) - Reuse the output from a fuzzing process as input for the next fuzzer.
|
||||
- `malform_output` (boolean) - Generate a string that cannot be parsed as a JSON object.
|
||||
- `max_output_length` (UInt64) - Maximum allowable length of the generated or perturbed JSON string.
|
||||
- `probability` (Float64) - The probability to fuzz a JSON field (a key-value pair). Must be within [0, 1] range.
|
||||
- `max_nesting_level` (UInt64) - The maximum allowed depth of nested structures within the JSON data.
|
||||
@ -84,3 +85,13 @@ SELECT * FROM fuzzJSON('{"id":1}', 1234) LIMIT 3;
|
||||
{"BRjE":16137826149911306846}
|
||||
{"XjKE":15076727133550123563}
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT * FROM fuzzJSON(json_nc, json_str='{"name" : "FuzzJSON"}', random_seed=1337, malform_output=true) LIMIT 3;
|
||||
```
|
||||
|
||||
``` text
|
||||
U"name":"FuzzJSON*"SpByjZKtr2VAyHCO"falseh
|
||||
{"name"keFuzzJSON, "g6vVO7TCIk":jTt^
|
||||
{"DBhz":YFuzzJSON5}
|
||||
```
|
||||
|
@ -41,14 +41,14 @@ LIMIT 2
|
||||
|
||||
## Globs in path {#globs_in_path}
|
||||
|
||||
Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern (not only suffix or prefix).
|
||||
Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix.
|
||||
|
||||
- `*` — Substitutes any number of any characters except `/` including empty string.
|
||||
- `?` — Substitutes any single character.
|
||||
- `*` — Represents arbitrarily many characters except `/` but including the empty string.
|
||||
- `?` — Represents an arbitrary single character.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. The strings can contain the `/` symbol.
|
||||
- `{N..M}` — Substitutes any number in range from N to M including both borders.
|
||||
- `{N..M}` — Represents any number `>= N` and `<= M`.
|
||||
|
||||
Constructions with `{}` are similar to the [remote](../../sql-reference/table-functions/remote.md)) table function.
|
||||
Constructions with `{}` are similar to the [remote](remote.md) and [file](file.md) table functions.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -493,7 +493,7 @@ catch (const DB::Exception & e)
|
||||
|
||||
``` cpp
|
||||
if (0 != close(fd))
|
||||
throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name);
|
||||
```
|
||||
|
||||
`assert` не используются.
|
||||
|
@ -369,6 +369,9 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT
|
||||
| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [has](../../../sql-reference/functions/array-functions.md#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [hasAny](../../../sql-reference/functions/array-functions.md#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [hasAll](../../../sql-reference/functions/array-functions.md#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
|
||||
Функции с постоянным агрументом, который меньше, чем размер ngram не могут использовать индекс `ngrambf_v1` для оптимизации запроса.
|
||||
|
@ -86,59 +86,6 @@ SELECT * FROM mySecondReplacingMT FINAL;
|
||||
│ 1 │ first │ 2020-01-01 01:01:01 │
|
||||
└─────┴─────────┴─────────────────────┘
|
||||
```
|
||||
### is_deleted
|
||||
|
||||
`is_deleted` — Имя столбца, который используется во время слияния для обозначения того, нужно ли отображать строку или она подлежит удалению; `1` - для удаления строки, `0` - для отображения строки.
|
||||
|
||||
Тип данных столбца — `UInt8`.
|
||||
|
||||
:::note
|
||||
`is_deleted` может быть использован, если `ver` используется.
|
||||
|
||||
Строка удаляется в следующих случаях:
|
||||
|
||||
- при использовании инструкции `OPTIMIZE ... FINAL CLEANUP`
|
||||
- при использовании инструкции `OPTIMIZE ... FINAL`
|
||||
- параметр движка `clean_deleted_rows` установлен в значение `Always` (по умолчанию - `Never`)
|
||||
- есть новые версии строки
|
||||
|
||||
Не рекомендуется выполнять `FINAL CLEANUP` или использовать параметр движка `clean_deleted_rows` со значением `Always`, это может привести к неожиданным результатам, например удаленные строки могут вновь появиться.
|
||||
|
||||
Вне зависимости от производимых изменений над данными, версия должна увеличиваться. Если у двух строк одна и та же версия, то остается только последняя вставленная строка.
|
||||
:::
|
||||
|
||||
Пример:
|
||||
|
||||
```sql
|
||||
-- with ver and is_deleted
|
||||
CREATE OR REPLACE TABLE myThirdReplacingMT
|
||||
(
|
||||
`key` Int64,
|
||||
`someCol` String,
|
||||
`eventTime` DateTime,
|
||||
`is_deleted` UInt8
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(eventTime, is_deleted)
|
||||
ORDER BY key;
|
||||
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 01:01:01', 0);
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 01:01:01', 1);
|
||||
|
||||
select * from myThirdReplacingMT final;
|
||||
|
||||
0 rows in set. Elapsed: 0.003 sec.
|
||||
|
||||
-- delete rows with is_deleted
|
||||
OPTIMIZE TABLE myThirdReplacingMT FINAL CLEANUP;
|
||||
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 00:00:00', 0);
|
||||
|
||||
select * from myThirdReplacingMT final;
|
||||
|
||||
┌─key─┬─someCol─┬───────────eventTime─┬─is_deleted─┐
|
||||
│ 1 │ first │ 2020-01-01 00:00:00 │ 0 │
|
||||
└─────┴─────────┴─────────────────────┴────────────┘
|
||||
```
|
||||
|
||||
## Секции запроса
|
||||
|
||||
|
@ -45,6 +45,7 @@ $ clickhouse-local --structure "table_structure" --input-format "format_of_incom
|
||||
- `--logger.level` — уровень логирования.
|
||||
- `--ignore-error` — не прекращать обработку если запрос выдал ошибку.
|
||||
- `-c`, `--config-file` — путь к файлу конфигурации. По умолчанию `clickhouse-local` запускается с пустой конфигурацией. Конфигурационный файл имеет тот же формат, что и для сервера ClickHouse, и в нём можно использовать все конфигурационные параметры сервера. Обычно подключение конфигурации не требуется; если требуется установить отдельный параметр, то это можно сделать ключом с именем параметра.
|
||||
- `--no-system-tables` — запуск без использования системных таблиц.
|
||||
- `--help` — вывод справочной информации о `clickhouse-local`.
|
||||
- `-V`, `--version` — вывод текущей версии и выход.
|
||||
|
||||
|
@ -76,14 +76,16 @@ SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 U
|
||||
|
||||
## Шаблоны поиска в компонентах пути {#globs-in-path}
|
||||
|
||||
При описании пути к файлу могут использоваться шаблоны поиска. Обрабатываются только те файлы, у которых путь и название соответствуют шаблону полностью (а не только префикс или суффикс).
|
||||
Путь к файлу может содержать шаблоны в режиме доступа только для чтения.
|
||||
Шаблоны могут содержаться в разных частях пути.
|
||||
Обрабатываться будут те и только те файлы, которые существуют в файловой системе и удовлетворяют всему шаблону пути.
|
||||
|
||||
- `*` — заменяет любое количество любых символов кроме `/`, включая отсутствие символов.
|
||||
- `?` — заменяет ровно один любой символ.
|
||||
- `{some_string,another_string,yet_another_one}` — заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`. Эти строки также могут содержать символ `/`.
|
||||
- `{N..M}` — заменяет любое число в интервале от `N` до `M` включительно (может содержать ведущие нули).
|
||||
|
||||
Конструкция с `{}` аналогична табличной функции [remote](remote.md).
|
||||
Конструкция с `{}` аналогична табличным функциям [remote](remote.md), [hdfs](hdfs.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -14,7 +14,7 @@ hdfs(URI, format, structure)
|
||||
|
||||
**Входные параметры**
|
||||
|
||||
- `URI` — URI файла в HDFS. Путь к файлу поддерживает следующие шаблоны в режиме доступа только для чтения `*`, `?`, `{abc,def}` и `{N..M}`, где `N`, `M` — числа, \``'abc', 'def'` — строки.
|
||||
- `URI` — URI файла в HDFS.
|
||||
- `format` — [формат](../../interfaces/formats.md#formats) файла.
|
||||
- `structure` — структура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
|
||||
@ -41,19 +41,22 @@ LIMIT 2
|
||||
|
||||
## Шаблоны поиска в компонентах пути {#globs-in-path}
|
||||
|
||||
- `*` — Заменяет любое количество любых символов кроме `/`, включая отсутствие символов.
|
||||
Путь к файлу может содержать шаблоны в режиме доступа только для чтения.
|
||||
Шаблоны могут содержаться в разных частях пути.
|
||||
Обрабатываться будут те и только те файлы, которые существуют в файловой системе и удовлетворяют всему шаблону пути.
|
||||
|
||||
|
||||
- `*` — Заменяет любое количество любых символов (кроме `/`), включая отсутствие символов.
|
||||
- `?` — Заменяет ровно один любой символ.
|
||||
- `{some_string,another_string,yet_another_one}` — Заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`. Эти строки также могут содержать символ `/`.
|
||||
- `{N..M}` — Заменяет любое число в интервале от `N` до `M` включительно (может содержать ведущие нули).
|
||||
|
||||
Конструкция с `{}` аналогична табличной функции [remote](remote.md).
|
||||
Конструкция с `{}` аналогична табличной функции [remote](remote.md), [file](file.md).
|
||||
|
||||
:::danger Предупреждение
|
||||
Если ваш список файлов содержит интервал с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры по отдельности или используйте `?`.
|
||||
Если ваш список файлов содержит интервал с ведущими нулями, используйте отдельную конструкцию с фигурными скобками для каждой цифры или используйте `?`.
|
||||
:::
|
||||
|
||||
Шаблоны могут содержаться в разных частях пути. Обрабатываться будут ровно те файлы, которые и удовлетворяют всему шаблону пути, и существуют в файловой системе.
|
||||
|
||||
## Виртуальные столбцы {#virtualnye-stolbtsy}
|
||||
|
||||
- `_path` — Путь к файлу.
|
||||
|
@ -485,7 +485,7 @@ catch (const DB::Exception & e)
|
||||
|
||||
``` cpp
|
||||
if (0 != close(fd))
|
||||
throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name);
|
||||
```
|
||||
|
||||
`不要使用断言`。
|
||||
|
@ -364,6 +364,9 @@ WHERE 子句中的条件可以包含对某列数据进行运算的函数表达
|
||||
| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [has](../../../sql-reference/functions/array-functions.md#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [hasAny](../../../sql-reference/functions/array-functions.md#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [hasAll](../../../sql-reference/functions/array-functions.md#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
|
||||
常量参数小于 ngram 大小的函数不能使用 `ngrambf_v1` 进行查询优化。
|
||||
|
@ -45,6 +45,7 @@ clickhouse-local --structure "table_structure" --input-format "format_of_incomin
|
||||
- `--logger.level` — 日志级别。
|
||||
- `--ignore-error` — 当查询失败时,不停止处理。
|
||||
- `-c`, `--config-file` — 与ClickHouse服务器格式相同配置文件的路径,默认情况下配置为空。
|
||||
- `--no-system-tables` — 不附加系统表。
|
||||
- `--help` — `clickhouse-local`使用帮助信息。
|
||||
- `-V`, `--version` — 打印版本信息并退出。
|
||||
|
||||
|
@ -405,7 +405,7 @@ private:
|
||||
|| sigaddset(&sig_set, SIGINT)
|
||||
|| pthread_sigmask(SIG_BLOCK, &sig_set, nullptr))
|
||||
{
|
||||
throwFromErrno("Cannot block signal.", ErrorCodes::CANNOT_BLOCK_SIGNAL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_BLOCK_SIGNAL, "Cannot block signal");
|
||||
}
|
||||
|
||||
while (true)
|
||||
|
@ -328,7 +328,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
fs::create_symlink(binary_self_canonical_path, main_bin_path);
|
||||
|
||||
if (0 != chmod(binary_self_canonical_path.string().c_str(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH))
|
||||
throwFromErrno(fmt::format("Cannot chmod {}", binary_self_canonical_path.string()), ErrorCodes::SYSTEM_ERROR);
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot chmod {}", binary_self_canonical_path.string());
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -361,7 +361,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
if (already_installed)
|
||||
{
|
||||
if (0 != chmod(main_bin_path.string().c_str(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH))
|
||||
throwFromErrno(fmt::format("Cannot chmod {}", main_bin_path.string()), ErrorCodes::SYSTEM_ERROR);
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot chmod {}", main_bin_path.string());
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -395,7 +395,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
}
|
||||
|
||||
if (0 != chmod(destination.c_str(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH))
|
||||
throwFromErrno(fmt::format("Cannot chmod {}", main_bin_tmp_path.string()), ErrorCodes::SYSTEM_ERROR);
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot chmod {}", main_bin_tmp_path.string());
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
@ -1122,7 +1122,7 @@ namespace
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
throwFromErrno(fmt::format("Cannot obtain the status of pid {} with `kill`", pid), ErrorCodes::CANNOT_KILL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_KILL, "Cannot obtain the status of pid {} with `kill`", pid);
|
||||
}
|
||||
|
||||
if (!pid)
|
||||
@ -1143,7 +1143,7 @@ namespace
|
||||
if (0 == kill(pid, signal))
|
||||
fmt::print("Sent {} signal to process with pid {}.\n", signal_name, pid);
|
||||
else
|
||||
throwFromErrno(fmt::format("Cannot send {} signal", signal_name), ErrorCodes::SYSTEM_ERROR);
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot send {} signal", signal_name);
|
||||
|
||||
size_t try_num = 0;
|
||||
for (; try_num < max_tries; ++try_num)
|
||||
|
@ -115,6 +115,7 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/LocalDirectorySyncGuard.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/TemporaryFileOnDisk.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/loadLocalDiskConfig.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/DiskType.cpp
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/IObjectStorage.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp
|
||||
|
@ -43,7 +43,7 @@
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
#include <Common/ErrorHandlers.h>
|
||||
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
|
||||
#include <Functions/UserDefined/IUserDefinedSQLObjectsStorage.h>
|
||||
#include <Functions/registerFunctions.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <TableFunctions/registerTableFunctions.h>
|
||||
@ -744,7 +744,7 @@ void LocalServer::processConfig()
|
||||
|
||||
LOG_DEBUG(log, "Loading metadata from {}", path);
|
||||
auto startup_system_tasks = loadMetadataSystem(global_context);
|
||||
attachSystemTablesLocal</* lazy= */ true>(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE));
|
||||
attachSystemTablesLocal(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE));
|
||||
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA));
|
||||
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE));
|
||||
waitLoad(TablesLoaderForegroundPoolId, startup_system_tasks);
|
||||
@ -757,13 +757,13 @@ void LocalServer::processConfig()
|
||||
}
|
||||
|
||||
/// For ClickHouse local if path is not set the loader will be disabled.
|
||||
global_context->getUserDefinedSQLObjectsLoader().loadObjects();
|
||||
global_context->getUserDefinedSQLObjectsStorage().loadObjects();
|
||||
|
||||
LOG_DEBUG(log, "Loaded metadata.");
|
||||
}
|
||||
else
|
||||
else if (!config().has("no-system-tables"))
|
||||
{
|
||||
attachSystemTablesLocal</* lazy= */ true>(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE));
|
||||
attachSystemTablesLocal(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE));
|
||||
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA));
|
||||
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE));
|
||||
}
|
||||
@ -842,6 +842,7 @@ void LocalServer::addOptions(OptionsDescription & options_description)
|
||||
("logger.log", po::value<std::string>(), "Log file name")
|
||||
("logger.level", po::value<std::string>(), "Log level")
|
||||
|
||||
("no-system-tables", "do not attach system tables (better startup time)")
|
||||
("path", po::value<std::string>(), "Storage path")
|
||||
("only-system-tables", "attach only system tables from specified path")
|
||||
("top_level_domains_path", po::value<std::string>(), "Path to lists with custom TLDs")
|
||||
@ -870,6 +871,8 @@ void LocalServer::processOptions(const OptionsDescription &, const CommandLineOp
|
||||
config().setString("table-file", options["file"].as<std::string>());
|
||||
if (options.count("structure"))
|
||||
config().setString("table-structure", options["structure"].as<std::string>());
|
||||
if (options.count("no-system-tables"))
|
||||
config().setBool("no-system-tables", true);
|
||||
if (options.count("only-system-tables"))
|
||||
config().setBool("only-system-tables", true);
|
||||
if (options.count("database"))
|
||||
|
@ -1307,7 +1307,7 @@ try
|
||||
/// stdin must be seekable
|
||||
auto res = lseek(file->getFD(), 0, SEEK_SET);
|
||||
if (-1 == res)
|
||||
throwFromErrno("Input must be seekable file (it will be read twice).", ErrorCodes::CANNOT_SEEK_THROUGH_FILE);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Input must be seekable file (it will be read twice)");
|
||||
|
||||
SingleReadBufferIterator read_buffer_iterator(std::move(file));
|
||||
schema_columns = readSchemaFromFormat(input_format, {}, read_buffer_iterator, false, context_const);
|
||||
@ -1336,7 +1336,7 @@ try
|
||||
/// stdin must be seekable
|
||||
auto res = lseek(file_in.getFD(), 0, SEEK_SET);
|
||||
if (-1 == res)
|
||||
throwFromErrno("Input must be seekable file (it will be read twice).", ErrorCodes::CANNOT_SEEK_THROUGH_FILE);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Input must be seekable file (it will be read twice)");
|
||||
}
|
||||
|
||||
Obfuscator obfuscator(header, seed, markov_model_params);
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include <Common/makeSocketAddress.h>
|
||||
#include <Common/FailPoint.h>
|
||||
#include <Server/waitServersToFinish.h>
|
||||
#include <Interpreters/Cache/FileCacheFactory.h>
|
||||
#include <Core/ServerUUID.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
@ -66,7 +67,7 @@
|
||||
#include <Storages/Cache/registerRemoteFileMetadatas.h>
|
||||
#include <Common/NamedCollections/NamedCollectionUtils.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
|
||||
#include <Functions/UserDefined/IUserDefinedSQLObjectsStorage.h>
|
||||
#include <Functions/registerFunctions.h>
|
||||
#include <TableFunctions/registerTableFunctions.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
@ -1450,8 +1451,6 @@ try
|
||||
|
||||
global_context->reloadAuxiliaryZooKeepersConfigIfChanged(config);
|
||||
|
||||
global_context->reloadQueryMaskingRulesIfChanged(config);
|
||||
|
||||
std::lock_guard lock(servers_lock);
|
||||
updateServers(*config, server_pool, async_metrics, servers, servers_to_start_before_tables);
|
||||
}
|
||||
@ -1472,6 +1471,8 @@ try
|
||||
#endif
|
||||
NamedCollectionUtils::reloadFromConfig(*config);
|
||||
|
||||
FileCacheFactory::instance().updateSettingsFromConfig(*config);
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::MainConfigLoads);
|
||||
|
||||
/// Must be the last.
|
||||
@ -1755,7 +1756,7 @@ try
|
||||
/// After loading validate that default database exists
|
||||
database_catalog.assertDatabaseExists(default_database);
|
||||
/// Load user-defined SQL functions.
|
||||
global_context->getUserDefinedSQLObjectsLoader().loadObjects();
|
||||
global_context->getUserDefinedSQLObjectsStorage().loadObjects();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
1
programs/server/config.d/graphite_alternative.xml
Symbolic link
1
programs/server/config.d/graphite_alternative.xml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../tests/config/config.d/graphite_alternative.xml
|
@ -455,6 +455,7 @@
|
||||
<div id="username-password">
|
||||
<input spellcheck="false" id="user" type="text" value="" placeholder="user" />
|
||||
<input spellcheck="false" id="password" type="password" placeholder="password" />
|
||||
<input id="hidden-submit" type="submit" hidden="true"/>
|
||||
</div>
|
||||
</div>
|
||||
<div id="button-options">
|
||||
@ -720,7 +721,7 @@ function insertChart(i) {
|
||||
query_editor_confirm.addEventListener('click', editConfirm);
|
||||
|
||||
/// Ctrl+Enter (or Cmd+Enter on Mac) will also confirm editing.
|
||||
query_editor.addEventListener('keydown', e => {
|
||||
query_editor.addEventListener('keydown', event => {
|
||||
if ((event.metaKey || event.ctrlKey) && (event.keyCode == 13 || event.keyCode == 10)) {
|
||||
editConfirm();
|
||||
}
|
||||
@ -895,7 +896,7 @@ document.getElementById('add').addEventListener('click', e => {
|
||||
});
|
||||
|
||||
document.getElementById('reload').addEventListener('click', e => {
|
||||
reloadAll(false);
|
||||
reloadAll(queries.length == 0);
|
||||
});
|
||||
|
||||
document.getElementById('search').addEventListener('click', e => {
|
||||
@ -1291,6 +1292,7 @@ async function drawAll() {
|
||||
document.getElementById('add').style.display = 'inline-block';
|
||||
document.getElementById('edit').style.display = 'inline-block';
|
||||
document.getElementById('search-span').style.display = '';
|
||||
hideError();
|
||||
}
|
||||
else {
|
||||
const charts = document.getElementById('charts')
|
||||
@ -1317,9 +1319,11 @@ function disableButtons() {
|
||||
reloadButton.classList.add('disabled');
|
||||
|
||||
const runButton = document.getElementById('run');
|
||||
runButton.value = 'Reloading…';
|
||||
runButton.disabled = true;
|
||||
runButton.classList.add('disabled');
|
||||
if (runButton) {
|
||||
runButton.value = 'Reloading…';
|
||||
runButton.disabled = true;
|
||||
runButton.classList.add('disabled');
|
||||
}
|
||||
|
||||
const searchButton = document.getElementById('search');
|
||||
searchButton.value = '…';
|
||||
@ -1334,9 +1338,11 @@ function enableButtons() {
|
||||
reloadButton.classList.remove('disabled');
|
||||
|
||||
const runButton = document.getElementById('run');
|
||||
runButton.value = 'Ok';
|
||||
runButton.disabled = false;
|
||||
runButton.classList.remove('disabled');
|
||||
if (runButton) {
|
||||
runButton.value = 'Ok';
|
||||
runButton.disabled = false;
|
||||
runButton.classList.remove('disabled');
|
||||
}
|
||||
|
||||
const searchButton = document.getElementById('search');
|
||||
searchButton.value = '🔎';
|
||||
@ -1359,14 +1365,17 @@ async function reloadAll(do_search) {
|
||||
}
|
||||
await drawAll();
|
||||
} catch (e) {
|
||||
showError(e.toString());
|
||||
showError(e.message);
|
||||
}
|
||||
enableButtons();
|
||||
}
|
||||
|
||||
document.getElementById('params').onsubmit = function(event) {
|
||||
let do_search = document.activeElement === document.getElementById('search-query');
|
||||
reloadAll(do_search);
|
||||
if (document.activeElement === document.getElementById('search-query')) {
|
||||
reloadAll(true);
|
||||
} else {
|
||||
reloadAll(queries.length == 0);
|
||||
}
|
||||
event.preventDefault();
|
||||
}
|
||||
|
||||
@ -1405,13 +1414,15 @@ function refreshCustomized(value) {
|
||||
document.getElementById('search-span').style.opacity = customized ? 0.5 : 1.0;
|
||||
}
|
||||
|
||||
function regenerate() {
|
||||
function updateFromState() {
|
||||
document.getElementById('url').value = host;
|
||||
document.getElementById('user').value = user;
|
||||
document.getElementById('password').value = password;
|
||||
document.getElementById('search-query').value = search_query;
|
||||
refreshCustomized();
|
||||
}
|
||||
|
||||
function regenerate() {
|
||||
findParamsInQueries();
|
||||
buildParams();
|
||||
|
||||
@ -1430,7 +1441,7 @@ function regenerate() {
|
||||
window.onpopstate = function(event) {
|
||||
if (!event.state) { return; }
|
||||
({host, user, queries, params, search_query, customized} = event.state);
|
||||
|
||||
updateFromState();
|
||||
regenerate();
|
||||
drawAll();
|
||||
};
|
||||
@ -1447,6 +1458,7 @@ if (window.location.hash) {
|
||||
|
||||
async function start() {
|
||||
try {
|
||||
updateFromState();
|
||||
if (queries.length == 0) {
|
||||
await searchQueries();
|
||||
} else {
|
||||
@ -1460,7 +1472,7 @@ async function start() {
|
||||
drawAll();
|
||||
}
|
||||
} catch (e) {
|
||||
showError(e.toString());
|
||||
showError(e.message);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
|
||||
group * result{};
|
||||
|
||||
if (0 != getgrnam_r(arg_gid.data(), &entry, buf.get(), buf_size, &result))
|
||||
throwFromErrno(fmt::format("Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid), ErrorCodes::SYSTEM_ERROR);
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid);
|
||||
|
||||
if (!result)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid);
|
||||
@ -68,7 +68,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group has id 0, but dropping privileges to gid 0 does not make sense");
|
||||
|
||||
if (0 != setgid(gid))
|
||||
throwFromErrno(fmt::format("Cannot do 'setgid' to user ({})", arg_gid), ErrorCodes::SYSTEM_ERROR);
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'setgid' to user ({})", arg_gid);
|
||||
}
|
||||
|
||||
if (!arg_uid.empty())
|
||||
@ -81,7 +81,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
|
||||
passwd * result{};
|
||||
|
||||
if (0 != getpwnam_r(arg_uid.data(), &entry, buf.get(), buf_size, &result))
|
||||
throwFromErrno(fmt::format("Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid), ErrorCodes::SYSTEM_ERROR);
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid);
|
||||
|
||||
if (!result)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid);
|
||||
@ -93,7 +93,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User has id 0, but dropping privileges to uid 0 does not make sense");
|
||||
|
||||
if (0 != setuid(uid))
|
||||
throwFromErrno(fmt::format("Cannot do 'setuid' to user ({})", arg_uid), ErrorCodes::SYSTEM_ERROR);
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'setuid' to user ({})", arg_uid);
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ try
|
||||
|
||||
execvp(new_argv.front(), new_argv.data());
|
||||
|
||||
throwFromErrno("Cannot execvp", ErrorCodes::SYSTEM_ERROR);
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot execvp");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -1,24 +1,25 @@
|
||||
extern crate blake3;
|
||||
extern crate libc;
|
||||
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::ffi::{CString};
|
||||
use std::slice;
|
||||
use std::os::raw::c_char;
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn blake3_apply_shim(
|
||||
begin: *const c_char,
|
||||
_size: u32,
|
||||
size: u32,
|
||||
out_char_data: *mut u8,
|
||||
) -> *mut c_char {
|
||||
if begin.is_null() {
|
||||
let err_str = CString::new("input was a null pointer").unwrap();
|
||||
return err_str.into_raw();
|
||||
}
|
||||
let input_res = slice::from_raw_parts(begin as *const u8, size as usize);
|
||||
let mut hasher = blake3::Hasher::new();
|
||||
let input_bytes = CStr::from_ptr(begin);
|
||||
let input_res = input_bytes.to_bytes();
|
||||
hasher.update(input_res);
|
||||
let mut reader = hasher.finalize_xof();
|
||||
|
||||
reader.fill(std::slice::from_raw_parts_mut(out_char_data, blake3::OUT_LEN));
|
||||
std::ptr::null_mut()
|
||||
}
|
||||
|
@ -183,6 +183,7 @@ enum class AccessType
|
||||
M(SYSTEM_REPLICATION_QUEUES, "SYSTEM STOP REPLICATION QUEUES, SYSTEM START REPLICATION QUEUES, STOP REPLICATION QUEUES, START REPLICATION QUEUES", TABLE, SYSTEM) \
|
||||
M(SYSTEM_DROP_REPLICA, "DROP REPLICA", TABLE, SYSTEM) \
|
||||
M(SYSTEM_SYNC_REPLICA, "SYNC REPLICA", TABLE, SYSTEM) \
|
||||
M(SYSTEM_REPLICA_READINESS, "SYSTEM REPLICA READY, SYSTEM REPLICA UNREADY", GLOBAL, SYSTEM) \
|
||||
M(SYSTEM_RESTART_REPLICA, "RESTART REPLICA", TABLE, SYSTEM) \
|
||||
M(SYSTEM_RESTORE_REPLICA, "RESTORE REPLICA", TABLE, SYSTEM) \
|
||||
M(SYSTEM_WAIT_LOADING_PARTS, "WAIT LOADING PARTS", TABLE, SYSTEM) \
|
||||
|
@ -43,14 +43,6 @@ namespace Stage = BackupCoordinationStage;
|
||||
|
||||
namespace
|
||||
{
|
||||
/// Uppercases the first character of a passed string.
|
||||
String toUpperFirst(const String & str)
|
||||
{
|
||||
String res = str;
|
||||
res[0] = std::toupper(res[0]);
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Outputs "table <name>" or "temporary table <name>"
|
||||
String tableNameWithTypeToString(const String & database_name, const String & table_name, bool first_upper)
|
||||
{
|
||||
@ -164,7 +156,7 @@ BackupEntries BackupEntriesCollector::run()
|
||||
|
||||
Strings BackupEntriesCollector::setStage(const String & new_stage, const String & message)
|
||||
{
|
||||
LOG_TRACE(log, fmt::runtime(toUpperFirst(new_stage)));
|
||||
LOG_TRACE(log, "Setting stage: {}", new_stage);
|
||||
current_stage = new_stage;
|
||||
|
||||
backup_coordination->setStage(new_stage, message);
|
||||
|
@ -436,6 +436,10 @@ dbms_target_link_libraries(PRIVATE ch_contrib::zstd)
|
||||
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::zstd)
|
||||
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::xz)
|
||||
|
||||
if (TARGET ch_contrib::pocketfft)
|
||||
target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::pocketfft)
|
||||
endif ()
|
||||
|
||||
if (TARGET ch_contrib::icu)
|
||||
dbms_target_link_libraries (PRIVATE ch_contrib::icu)
|
||||
endif ()
|
||||
|
@ -318,14 +318,14 @@ void ClientBase::setupSignalHandler()
|
||||
sigemptyset(&new_act.sa_mask);
|
||||
#else
|
||||
if (sigemptyset(&new_act.sa_mask))
|
||||
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler");
|
||||
#endif
|
||||
|
||||
if (sigaction(SIGINT, &new_act, nullptr))
|
||||
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler");
|
||||
|
||||
if (sigaction(SIGQUIT, &new_act, nullptr))
|
||||
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler");
|
||||
}
|
||||
|
||||
|
||||
@ -543,16 +543,16 @@ try
|
||||
if (!pager.empty())
|
||||
{
|
||||
if (SIG_ERR == signal(SIGPIPE, SIG_IGN))
|
||||
throwFromErrno("Cannot set signal handler for SIGPIPE.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGPIPE");
|
||||
/// We need to reset signals that had been installed in the
|
||||
/// setupSignalHandler() since terminal will send signals to both
|
||||
/// processes and so signals will be delivered to the
|
||||
/// clickhouse-client/local as well, which will be terminated when
|
||||
/// signal will be delivered second time.
|
||||
if (SIG_ERR == signal(SIGINT, SIG_IGN))
|
||||
throwFromErrno("Cannot set signal handler for SIGINT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGINT");
|
||||
if (SIG_ERR == signal(SIGQUIT, SIG_IGN))
|
||||
throwFromErrno("Cannot set signal handler for SIGQUIT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGQUIT");
|
||||
|
||||
ShellCommand::Config config(pager);
|
||||
config.pipe_stdin_only = true;
|
||||
@ -1306,11 +1306,11 @@ void ClientBase::resetOutput()
|
||||
pager_cmd->wait();
|
||||
|
||||
if (SIG_ERR == signal(SIGPIPE, SIG_DFL))
|
||||
throwFromErrno("Cannot set signal handler for SIIGPIEP.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGPIPE");
|
||||
if (SIG_ERR == signal(SIGINT, SIG_DFL))
|
||||
throwFromErrno("Cannot set signal handler for SIGINT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGINT");
|
||||
if (SIG_ERR == signal(SIGQUIT, SIG_DFL))
|
||||
throwFromErrno("Cannot set signal handler for SIGQUIT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGQUIT");
|
||||
|
||||
setupSignalHandler();
|
||||
}
|
||||
|
@ -248,7 +248,7 @@ void ColumnFunction::appendArguments(const ColumnsWithTypeAndName & columns)
|
||||
auto wanna_capture = columns.size();
|
||||
|
||||
if (were_captured + wanna_capture > args)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture {} columns because function {} has {} arguments{}.",
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture {} column(s) because function {} has {} arguments{}.",
|
||||
wanna_capture, function->getName(), args,
|
||||
(were_captured ? " and " + toString(were_captured) + " columns have already been captured" : ""));
|
||||
|
||||
|
@ -18,9 +18,11 @@ void AlignedBuffer::alloc(size_t size, size_t alignment)
|
||||
void * new_buf;
|
||||
int res = ::posix_memalign(&new_buf, std::max(alignment, sizeof(void*)), size);
|
||||
if (0 != res)
|
||||
throwFromErrno(fmt::format("Cannot allocate memory (posix_memalign), size: {}, alignment: {}.",
|
||||
ReadableSize(size), ReadableSize(alignment)),
|
||||
ErrorCodes::CANNOT_ALLOCATE_MEMORY, res);
|
||||
throw ErrnoException(
|
||||
ErrorCodes::CANNOT_ALLOCATE_MEMORY,
|
||||
"Cannot allocate memory (posix_memalign), size: {}, alignment: {}.",
|
||||
ReadableSize(size),
|
||||
ReadableSize(alignment));
|
||||
buf = new_buf;
|
||||
}
|
||||
|
||||
|
@ -118,8 +118,11 @@ public:
|
||||
void * new_buf = ::realloc(buf, new_size);
|
||||
if (nullptr == new_buf)
|
||||
{
|
||||
DB::throwFromErrno(
|
||||
fmt::format("Allocator: Cannot realloc from {} to {}.", ReadableSize(old_size), ReadableSize(new_size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
throw DB::ErrnoException(
|
||||
DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY,
|
||||
"Allocator: Cannot realloc from {} to {}",
|
||||
ReadableSize(old_size),
|
||||
ReadableSize(new_size));
|
||||
}
|
||||
|
||||
buf = new_buf;
|
||||
@ -164,7 +167,7 @@ private:
|
||||
buf = ::malloc(size);
|
||||
|
||||
if (nullptr == buf)
|
||||
DB::throwFromErrno(fmt::format("Allocator: Cannot malloc {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
throw DB::ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Allocator: Cannot malloc {}.", ReadableSize(size));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -172,8 +175,8 @@ private:
|
||||
int res = posix_memalign(&buf, alignment, size);
|
||||
|
||||
if (0 != res)
|
||||
DB::throwFromErrno(fmt::format("Cannot allocate memory (posix_memalign) {}.", ReadableSize(size)),
|
||||
DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, res);
|
||||
throw DB::ErrnoException(
|
||||
DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Cannot allocate memory (posix_memalign) {}.", ReadableSize(size));
|
||||
|
||||
if constexpr (clear_memory)
|
||||
memset(buf, 0, size);
|
||||
|
@ -179,13 +179,13 @@ private:
|
||||
{
|
||||
ptr = mmap(address_hint, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (MAP_FAILED == ptr)
|
||||
DB::throwFromErrno(fmt::format("Allocator: Cannot mmap {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
throw ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Allocator: Cannot mmap {}", ReadableSize(size));
|
||||
}
|
||||
|
||||
~Chunk()
|
||||
{
|
||||
if (ptr && 0 != munmap(ptr, size))
|
||||
DB::throwFromErrno(fmt::format("Allocator: Cannot munmap {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_MUNMAP);
|
||||
throw ErrnoException(DB::ErrorCodes::CANNOT_MUNMAP, "Allocator: Cannot munmap {}", ReadableSize(size));
|
||||
}
|
||||
|
||||
Chunk(Chunk && other) noexcept : ptr(other.ptr), size(other.size)
|
||||
|
@ -362,7 +362,7 @@ public:
|
||||
bool is_executing = false;
|
||||
};
|
||||
|
||||
// For introspection and debug only, see `system.async_loader` table.
|
||||
// For introspection and debug only, see `system.asynchronous_loader` table.
|
||||
std::vector<JobState> getJobStates() const;
|
||||
|
||||
// For deadlock resolution. Should not be used directly.
|
||||
|
@ -797,7 +797,7 @@ void AsynchronousMetrics::update(TimePoint update_time)
|
||||
|
||||
int64_t hz = sysconf(_SC_CLK_TCK);
|
||||
if (-1 == hz)
|
||||
throwFromErrno("Cannot call 'sysconf' to obtain system HZ", ErrorCodes::CANNOT_SYSCONF);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ");
|
||||
|
||||
double multiplier = 1.0 / hz / (std::chrono::duration_cast<std::chrono::nanoseconds>(time_after_previous_update).count() / 1e9);
|
||||
size_t num_cpus = 0;
|
||||
|
@ -519,8 +519,9 @@ void ConfigProcessor::doIncludesRecursive(
|
||||
|
||||
if (attr_nodes["from_zk"]) /// we have zookeeper subst
|
||||
{
|
||||
if (node->hasChildNodes()) /// only allow substitution for nodes with no value
|
||||
throw Poco::Exception("Element <" + node->nodeName() + "> has value, can't process from_zk substitution");
|
||||
/// only allow substitution for nodes with no value and without "replace"
|
||||
if (node->hasChildNodes() && !replace)
|
||||
throw Poco::Exception("Element <" + node->nodeName() + "> has value and does not have 'replace' attribute, can't process from_zk substitution");
|
||||
|
||||
contributing_zk_paths.insert(attr_nodes["from_zk"]->getNodeValue());
|
||||
|
||||
@ -544,8 +545,9 @@ void ConfigProcessor::doIncludesRecursive(
|
||||
|
||||
if (attr_nodes["from_env"]) /// we have env subst
|
||||
{
|
||||
if (node->hasChildNodes()) /// only allow substitution for nodes with no value
|
||||
throw Poco::Exception("Element <" + node->nodeName() + "> has value, can't process from_env substitution");
|
||||
/// only allow substitution for nodes with no value and without "replace"
|
||||
if (node->hasChildNodes() && !replace)
|
||||
throw Poco::Exception("Element <" + node->nodeName() + "> has value and does not have 'replace' attribute, can't process from_env substitution");
|
||||
|
||||
XMLDocumentPtr env_document;
|
||||
auto get_env_node = [&](const std::string & name) -> const Node *
|
||||
|
@ -69,13 +69,13 @@ public:
|
||||
|
||||
int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
|
||||
if (-1 == fd)
|
||||
DB::throwFromErrnoWithPath("Cannot open file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE);
|
||||
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot open file {}", path);
|
||||
|
||||
try
|
||||
{
|
||||
int flock_ret = flock(fd, LOCK_EX);
|
||||
if (-1 == flock_ret)
|
||||
DB::throwFromErrnoWithPath("Cannot lock file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE);
|
||||
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot lock file {}", path);
|
||||
|
||||
if (!file_doesnt_exists)
|
||||
{
|
||||
@ -145,7 +145,7 @@ public:
|
||||
|
||||
int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
|
||||
if (-1 == fd)
|
||||
DB::throwFromErrnoWithPath("Cannot open file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE);
|
||||
DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot open file {}", path);
|
||||
|
||||
try
|
||||
{
|
||||
|
@ -260,6 +260,7 @@
|
||||
#define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M)
|
||||
#endif
|
||||
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
#define M(NAME, DOCUMENTATION) extern const Metric NAME = Metric(__COUNTER__);
|
||||
|
@ -19,7 +19,7 @@ Epoll::Epoll() : events_count(0)
|
||||
{
|
||||
epoll_fd = epoll_create1(0);
|
||||
if (epoll_fd == -1)
|
||||
throwFromErrno("Cannot open epoll descriptor", DB::ErrorCodes::EPOLL_ERROR);
|
||||
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot open epoll descriptor");
|
||||
}
|
||||
|
||||
Epoll::Epoll(Epoll && other) noexcept : epoll_fd(other.epoll_fd), events_count(other.events_count.load())
|
||||
@ -47,7 +47,7 @@ void Epoll::add(int fd, void * ptr, uint32_t events)
|
||||
++events_count;
|
||||
|
||||
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &event) == -1)
|
||||
throwFromErrno("Cannot add new descriptor to epoll", DB::ErrorCodes::EPOLL_ERROR);
|
||||
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot add new descriptor to epoll");
|
||||
}
|
||||
|
||||
void Epoll::remove(int fd)
|
||||
@ -55,7 +55,7 @@ void Epoll::remove(int fd)
|
||||
--events_count;
|
||||
|
||||
if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, nullptr) == -1)
|
||||
throwFromErrno("Cannot remove descriptor from epoll", DB::ErrorCodes::EPOLL_ERROR);
|
||||
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot remove descriptor from epoll");
|
||||
}
|
||||
|
||||
size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout) const
|
||||
@ -82,7 +82,7 @@ size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout
|
||||
continue;
|
||||
}
|
||||
else
|
||||
throwFromErrno("Error in epoll_wait", DB::ErrorCodes::EPOLL_ERROR);
|
||||
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Error in epoll_wait");
|
||||
}
|
||||
else
|
||||
break;
|
||||
|
@ -21,7 +21,7 @@ EventFD::EventFD()
|
||||
{
|
||||
fd = eventfd(0 /* initval */, 0 /* flags */);
|
||||
if (fd == -1)
|
||||
throwFromErrno("Cannot create eventfd", ErrorCodes::CANNOT_PIPE);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot create eventfd");
|
||||
}
|
||||
|
||||
uint64_t EventFD::read() const
|
||||
@ -33,7 +33,7 @@ uint64_t EventFD::read() const
|
||||
break;
|
||||
|
||||
if (errno != EINTR)
|
||||
throwFromErrno("Cannot read from eventfd", ErrorCodes::CANNOT_READ_FROM_SOCKET);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot read from eventfd");
|
||||
}
|
||||
|
||||
return buf;
|
||||
@ -47,7 +47,7 @@ bool EventFD::write(uint64_t increase) const
|
||||
return false;
|
||||
|
||||
if (errno != EINTR)
|
||||
throwFromErrno("Cannot write to eventfd", ErrorCodes::CANNOT_WRITE_TO_SOCKET);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_WRITE_TO_SOCKET, "Cannot write to eventfd");
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -1,25 +1,24 @@
|
||||
#include "Exception.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
#include <cxxabi.h>
|
||||
#include <cstdlib>
|
||||
#include <Poco/String.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <cstring>
|
||||
#include <filesystem>
|
||||
#include <cxxabi.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <base/demangle.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Poco/String.h>
|
||||
#include <Common/ErrorCodes.h>
|
||||
#include <Common/LockMemoryExceptionInThread.h>
|
||||
#include <Common/MemorySanitizer.h>
|
||||
#include <Common/SensitiveDataMasker.h>
|
||||
#include <Common/LockMemoryExceptionInThread.h>
|
||||
#include <filesystem>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
|
||||
@ -212,17 +211,6 @@ Exception::FramePointers Exception::getStackFramePointers() const
|
||||
thread_local bool Exception::enable_job_stack_trace = false;
|
||||
thread_local std::vector<StackTrace::FramePointers> Exception::thread_frame_pointers = {};
|
||||
|
||||
|
||||
void throwFromErrno(const std::string & s, int code, int the_errno)
|
||||
{
|
||||
throw ErrnoException(s + ", " + errnoToString(the_errno), code, the_errno);
|
||||
}
|
||||
|
||||
void throwFromErrnoWithPath(const std::string & s, const std::string & path, int code, int the_errno)
|
||||
{
|
||||
throw ErrnoException(s + ", " + errnoToString(the_errno), code, the_errno, path);
|
||||
}
|
||||
|
||||
static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message)
|
||||
{
|
||||
try
|
||||
|
@ -7,9 +7,10 @@
|
||||
#include <Poco/Exception.h>
|
||||
|
||||
#include <base/defines.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <Common/StackTrace.h>
|
||||
#include <Common/LoggingFormatStringHelpers.h>
|
||||
#include <Common/StackTrace.h>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
@ -173,12 +174,61 @@ std::string getExceptionStackTraceString(const std::exception & e);
|
||||
std::string getExceptionStackTraceString(std::exception_ptr e);
|
||||
|
||||
|
||||
/// Contains an additional member `saved_errno`. See the throwFromErrno function.
|
||||
/// Contains an additional member `saved_errno`
|
||||
class ErrnoException : public Exception
|
||||
{
|
||||
public:
|
||||
ErrnoException(const std::string & msg, int code, int saved_errno_, const std::optional<std::string> & path_ = {})
|
||||
: Exception(msg, code), saved_errno(saved_errno_), path(path_) {}
|
||||
ErrnoException(std::string && msg, int code, int with_errno) : Exception(msg, code), saved_errno(with_errno)
|
||||
{
|
||||
capture_thread_frame_pointers = thread_frame_pointers;
|
||||
addMessage(", {}", errnoToString(saved_errno));
|
||||
}
|
||||
|
||||
/// Message must be a compile-time constant
|
||||
template <typename T>
|
||||
requires std::is_convertible_v<T, String>
|
||||
ErrnoException(int code, T && message) : Exception(message, code), saved_errno(errno)
|
||||
{
|
||||
capture_thread_frame_pointers = thread_frame_pointers;
|
||||
addMessage(", {}", errnoToString(saved_errno));
|
||||
}
|
||||
|
||||
// Format message with fmt::format, like the logging functions.
|
||||
template <typename... Args>
|
||||
ErrnoException(int code, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||
: Exception(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code), saved_errno(errno)
|
||||
{
|
||||
capture_thread_frame_pointers = thread_frame_pointers;
|
||||
message_format_string = fmt.message_format_string;
|
||||
addMessage(", {}", errnoToString(saved_errno));
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
[[noreturn]] static void throwWithErrno(int code, int with_errno, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||
{
|
||||
auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code, with_errno);
|
||||
e.message_format_string = fmt.message_format_string;
|
||||
throw e;
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
[[noreturn]] static void throwFromPath(int code, const std::string & path, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||
{
|
||||
auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code, errno);
|
||||
e.message_format_string = fmt.message_format_string;
|
||||
e.path = path;
|
||||
throw e;
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
[[noreturn]] static void
|
||||
throwFromPathWithErrno(int code, const std::string & path, int with_errno, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||
{
|
||||
auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code, with_errno);
|
||||
e.message_format_string = fmt.message_format_string;
|
||||
e.path = path;
|
||||
throw e;
|
||||
}
|
||||
|
||||
ErrnoException * clone() const override { return new ErrnoException(*this); }
|
||||
void rethrow() const override { throw *this; } // NOLINT
|
||||
@ -188,7 +238,7 @@ public:
|
||||
|
||||
private:
|
||||
int saved_errno;
|
||||
std::optional<std::string> path;
|
||||
std::optional<std::string> path{};
|
||||
|
||||
const char * name() const noexcept override { return "DB::ErrnoException"; }
|
||||
const char * className() const noexcept override { return "DB::ErrnoException"; }
|
||||
@ -233,13 +283,6 @@ private:
|
||||
|
||||
using Exceptions = std::vector<std::exception_ptr>;
|
||||
|
||||
|
||||
[[noreturn]] void throwFromErrno(const std::string & s, int code, int the_errno = errno);
|
||||
/// Useful to produce some extra information about available space and inodes on device
|
||||
[[noreturn]] void throwFromErrnoWithPath(const std::string & s, const std::string & path, int code,
|
||||
int the_errno = errno);
|
||||
|
||||
|
||||
/** Try to write an exception to the log (and forget about it).
|
||||
* Can be used in destructors in the catch-all block.
|
||||
*/
|
||||
|
@ -28,13 +28,14 @@ static struct InitFiu
|
||||
|
||||
/// We should define different types of failpoints here. There are four types of them:
|
||||
/// - ONCE: the failpoint will only be triggered once.
|
||||
/// - REGULAR: the failpoint will always be triggered util disableFailPoint is called.
|
||||
/// - PAUSAEBLE_ONCE: the failpoint will be blocked one time when pauseFailPoint is called, util disableFailPoint is called.
|
||||
/// - PAUSAEBLE: the failpoint will be blocked every time when pauseFailPoint is called, util disableFailPoint is called.
|
||||
/// - REGULAR: the failpoint will always be triggered until disableFailPoint is called.
|
||||
/// - PAUSEABLE_ONCE: the failpoint will be blocked one time when pauseFailPoint is called, util disableFailPoint is called.
|
||||
/// - PAUSEABLE: the failpoint will be blocked every time when pauseFailPoint is called, util disableFailPoint is called.
|
||||
|
||||
#define APPLY_FOR_FAILPOINTS(ONCE, REGULAR, PAUSEABLE_ONCE, PAUSEABLE) \
|
||||
ONCE(replicated_merge_tree_commit_zk_fail_after_op) \
|
||||
ONCE(replicated_merge_tree_insert_quorum_fail_0) \
|
||||
REGULAR(replicated_merge_tree_commit_zk_fail_when_recovering_from_hw_fault) \
|
||||
REGULAR(use_delayed_remote_source) \
|
||||
REGULAR(cluster_discovery_faults) \
|
||||
REGULAR(check_table_query_delay_for_part) \
|
||||
|
@ -46,14 +46,14 @@ public:
|
||||
|
||||
void * vp = ::mmap(nullptr, num_bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (MAP_FAILED == vp)
|
||||
DB::throwFromErrno(fmt::format("FiberStack: Cannot mmap {}.", ReadableSize(num_bytes)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
throw DB::ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "FiberStack: Cannot mmap {}.", ReadableSize(num_bytes));
|
||||
|
||||
/// TODO: make reports on illegal guard page access more clear.
|
||||
/// Currently we will see segfault and almost random stacktrace.
|
||||
if (-1 == ::mprotect(vp, page_size, PROT_NONE))
|
||||
{
|
||||
::munmap(vp, num_bytes);
|
||||
DB::throwFromErrno("FiberStack: cannot protect guard page", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
throw DB::ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "FiberStack: cannot protect guard page");
|
||||
}
|
||||
|
||||
/// Do not count guard page in memory usage.
|
||||
|
@ -58,9 +58,8 @@ private:
|
||||
public:
|
||||
InterruptListener() : active(false)
|
||||
{
|
||||
if (sigemptyset(&sig_set)
|
||||
|| sigaddset(&sig_set, SIGINT))
|
||||
throwFromErrno("Cannot manipulate with signal set.", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
|
||||
if (sigemptyset(&sig_set) || sigaddset(&sig_set, SIGINT))
|
||||
throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Cannot manipulate with signal set");
|
||||
|
||||
block();
|
||||
}
|
||||
@ -82,7 +81,7 @@ public:
|
||||
if (errno == EAGAIN)
|
||||
return false;
|
||||
else
|
||||
throwFromErrno("Cannot poll signal (sigtimedwait).", ErrorCodes::CANNOT_WAIT_FOR_SIGNAL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_WAIT_FOR_SIGNAL, "Cannot poll signal (sigtimedwait)");
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -93,7 +92,7 @@ public:
|
||||
if (!active)
|
||||
{
|
||||
if (pthread_sigmask(SIG_BLOCK, &sig_set, nullptr))
|
||||
throwFromErrno("Cannot block signal.", ErrorCodes::CANNOT_BLOCK_SIGNAL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_BLOCK_SIGNAL, "Cannot block signal");
|
||||
|
||||
active = true;
|
||||
}
|
||||
@ -105,7 +104,7 @@ public:
|
||||
if (active)
|
||||
{
|
||||
if (pthread_sigmask(SIG_UNBLOCK, &sig_set, nullptr))
|
||||
throwFromErrno("Cannot unblock signal.", ErrorCodes::CANNOT_UNBLOCK_SIGNAL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_UNBLOCK_SIGNAL, "Cannot unblock signal");
|
||||
|
||||
active = false;
|
||||
}
|
||||
|
@ -39,7 +39,8 @@ MemoryStatisticsOS::MemoryStatisticsOS()
|
||||
fd = ::open(filename, O_RDONLY | O_CLOEXEC);
|
||||
|
||||
if (-1 == fd)
|
||||
throwFromErrno("Cannot open file " + std::string(filename), errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);
|
||||
ErrnoException::throwFromPath(
|
||||
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, filename, "Cannot open file {}", filename);
|
||||
}
|
||||
|
||||
MemoryStatisticsOS::~MemoryStatisticsOS()
|
||||
@ -48,9 +49,8 @@ MemoryStatisticsOS::~MemoryStatisticsOS()
|
||||
{
|
||||
try
|
||||
{
|
||||
throwFromErrno(
|
||||
"File descriptor for \"" + std::string(filename) + "\" could not be closed. "
|
||||
"Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
ErrnoException::throwFromPath(
|
||||
ErrorCodes::CANNOT_CLOSE_FILE, filename, "File descriptor for '{}' could not be closed", filename);
|
||||
}
|
||||
catch (const ErrnoException &)
|
||||
{
|
||||
@ -77,7 +77,7 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
|
||||
throwFromErrno("Cannot read from file " + std::string(filename), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR);
|
||||
ErrnoException::throwFromPath(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, filename, "Cannot read from file {}", filename);
|
||||
}
|
||||
|
||||
assert(res >= 0);
|
||||
@ -136,7 +136,7 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const
|
||||
size_t len = sizeof(struct kinfo_proc);
|
||||
|
||||
if (-1 == ::sysctl(mib, 4, &kp, &len, nullptr, 0))
|
||||
throwFromErrno("Cannot sysctl(kern.proc.pid." + std::to_string(self) + ")", ErrorCodes::SYSTEM_ERROR);
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot sysctl(kern.proc.pid.{})", std::to_string(self));
|
||||
|
||||
if (sizeof(struct kinfo_proc) != len)
|
||||
throw DB::Exception(DB::ErrorCodes::SYSTEM_ERROR, "Kernel returns structure of {} bytes instead of expected {}",
|
||||
|
@ -117,7 +117,7 @@ struct NetlinkMessage
|
||||
if (errno == EAGAIN)
|
||||
continue;
|
||||
else
|
||||
throwFromErrno("Can't send a Netlink command", ErrorCodes::NETLINK_ERROR);
|
||||
throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't send a Netlink command");
|
||||
}
|
||||
|
||||
if (bytes_sent > request_size)
|
||||
@ -255,7 +255,7 @@ NetlinkMetricsProvider::NetlinkMetricsProvider()
|
||||
{
|
||||
netlink_socket_fd = ::socket(PF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
|
||||
if (netlink_socket_fd < 0)
|
||||
throwFromErrno("Can't create PF_NETLINK socket", ErrorCodes::NETLINK_ERROR);
|
||||
throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't create PF_NETLINK socket");
|
||||
|
||||
try
|
||||
{
|
||||
@ -267,7 +267,7 @@ NetlinkMetricsProvider::NetlinkMetricsProvider()
|
||||
tv.tv_usec = 50000;
|
||||
|
||||
if (0 != ::setsockopt(netlink_socket_fd, SOL_SOCKET, SO_RCVTIMEO, reinterpret_cast<const char *>(&tv), sizeof(tv)))
|
||||
throwFromErrno("Can't set timeout on PF_NETLINK socket", ErrorCodes::NETLINK_ERROR);
|
||||
throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't set timeout on PF_NETLINK socket");
|
||||
|
||||
union
|
||||
{
|
||||
@ -277,7 +277,7 @@ NetlinkMetricsProvider::NetlinkMetricsProvider()
|
||||
addr.nl_family = AF_NETLINK;
|
||||
|
||||
if (::bind(netlink_socket_fd, &sockaddr, sizeof(addr)) < 0)
|
||||
throwFromErrno("Can't bind PF_NETLINK socket", ErrorCodes::NETLINK_ERROR);
|
||||
throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't bind PF_NETLINK socket");
|
||||
|
||||
taskstats_family_id = getFamilyId(netlink_socket_fd);
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ protected:
|
||||
{
|
||||
size_t length = right_rounded_down - left_rounded_up;
|
||||
if (0 != mprotect(left_rounded_up, length, prot))
|
||||
throwFromErrno("Cannot mprotect memory region", ErrorCodes::CANNOT_MPROTECT);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_MPROTECT, "Cannot mprotect memory region");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,14 +29,14 @@ void LazyPipeFDs::open()
|
||||
|
||||
#ifndef OS_DARWIN
|
||||
if (0 != pipe2(fds_rw, O_CLOEXEC))
|
||||
throwFromErrno("Cannot create pipe", ErrorCodes::CANNOT_PIPE);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot create pipe");
|
||||
#else
|
||||
if (0 != pipe(fds_rw))
|
||||
throwFromErrno("Cannot create pipe", ErrorCodes::CANNOT_PIPE);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot create pipe");
|
||||
if (0 != fcntl(fds_rw[0], F_SETFD, FD_CLOEXEC))
|
||||
throwFromErrno("Cannot setup auto-close on exec for read end of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot setup auto-close on exec for read end of pipe");
|
||||
if (0 != fcntl(fds_rw[1], F_SETFD, FD_CLOEXEC))
|
||||
throwFromErrno("Cannot setup auto-close on exec for write end of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot setup auto-close on exec for write end of pipe");
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -47,7 +47,7 @@ void LazyPipeFDs::close()
|
||||
if (fd < 0)
|
||||
continue;
|
||||
if (0 != ::close(fd))
|
||||
throwFromErrno("Cannot close pipe", ErrorCodes::CANNOT_PIPE);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot close pipe");
|
||||
fd = -1;
|
||||
}
|
||||
}
|
||||
@ -74,18 +74,18 @@ void LazyPipeFDs::setNonBlockingWrite()
|
||||
{
|
||||
int flags = fcntl(fds_rw[1], F_GETFL, 0);
|
||||
if (-1 == flags)
|
||||
throwFromErrno("Cannot get file status flags of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot get file status flags of pipe");
|
||||
if (-1 == fcntl(fds_rw[1], F_SETFL, flags | O_NONBLOCK))
|
||||
throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot set non-blocking mode of pipe");
|
||||
}
|
||||
|
||||
void LazyPipeFDs::setNonBlockingRead()
|
||||
{
|
||||
int flags = fcntl(fds_rw[0], F_GETFL, 0);
|
||||
if (-1 == flags)
|
||||
throwFromErrno("Cannot get file status flags of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot get file status flags of pipe");
|
||||
if (-1 == fcntl(fds_rw[0], F_SETFL, flags | O_NONBLOCK))
|
||||
throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot set non-blocking mode of pipe");
|
||||
}
|
||||
|
||||
void LazyPipeFDs::setNonBlockingReadWrite()
|
||||
@ -110,13 +110,13 @@ void LazyPipeFDs::tryIncreaseSize(int desired_size)
|
||||
/// It will work nevertheless.
|
||||
}
|
||||
else
|
||||
throwFromErrno("Cannot get pipe capacity", ErrorCodes::CANNOT_FCNTL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot get pipe capacity");
|
||||
}
|
||||
else
|
||||
{
|
||||
for (errno = 0; errno != EPERM && pipe_size < desired_size; pipe_size *= 2)
|
||||
if (-1 == fcntl(fds_rw[1], F_SETPIPE_SZ, pipe_size * 2) && errno != EPERM)
|
||||
throwFromErrno("Cannot increase pipe capacity to " + std::to_string(pipe_size * 2), ErrorCodes::CANNOT_FCNTL);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot increase pipe capacity to {}", pipe_size * 2);
|
||||
|
||||
LOG_TRACE(log, "Pipe capacity is {}", ReadableSize(std::min(pipe_size, desired_size)));
|
||||
}
|
||||
|
@ -37,18 +37,15 @@ namespace
|
||||
{
|
||||
[[noreturn]] inline void throwWithFailedToOpenFile(const std::string & filename)
|
||||
{
|
||||
throwFromErrno(
|
||||
"Cannot open file " + filename,
|
||||
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);
|
||||
ErrnoException::throwFromPath(
|
||||
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, filename, "Cannot open file {}", filename);
|
||||
}
|
||||
|
||||
inline void emitErrorMsgWithFailedToCloseFile(const std::string & filename)
|
||||
{
|
||||
try
|
||||
{
|
||||
throwFromErrno(
|
||||
"File descriptor for \"" + filename + "\" could not be closed. "
|
||||
"Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
ErrnoException::throwFromPath(ErrorCodes::CANNOT_CLOSE_FILE, filename, "File descriptor for {} could not be closed", filename);
|
||||
}
|
||||
catch (const ErrnoException &)
|
||||
{
|
||||
@ -69,9 +66,7 @@ ssize_t readFromFD(const int fd, const char * filename, char * buf, size_t buf_s
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
|
||||
throwFromErrno(
|
||||
"Cannot read from file " + std::string(filename),
|
||||
ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR);
|
||||
ErrnoException::throwFromPath(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, filename, "Cannot read from file {}", filename);
|
||||
}
|
||||
|
||||
assert(res >= 0);
|
||||
|
@ -451,6 +451,7 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(ThreadpoolReaderSubmitReadSynchronously, "How many times we haven't scheduled a task on the thread pool and read synchronously instead") \
|
||||
M(ThreadpoolReaderSubmitReadSynchronouslyBytes, "How many bytes were read synchronously") \
|
||||
M(ThreadpoolReaderSubmitReadSynchronouslyMicroseconds, "How much time we spent reading synchronously") \
|
||||
M(ThreadpoolReaderSubmitLookupInCacheMicroseconds, "How much time we spent checking if content is cached") \
|
||||
M(AsynchronousReaderIgnoredBytes, "Number of bytes ignored during asynchronous reading") \
|
||||
\
|
||||
M(FileSegmentWaitReadBufferMicroseconds, "Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user