mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-28 18:42:26 +00:00
Merge remote-tracking branch 'ClickHouse/master' into bump-rocksdb
This commit is contained in:
commit
e7becedb03
@ -13,3 +13,6 @@
|
|||||||
# dbms/ → src/
|
# dbms/ → src/
|
||||||
# (though it is unlikely that you will see it in blame)
|
# (though it is unlikely that you will see it in blame)
|
||||||
06446b4f08a142d6f1bc30664c47ded88ab51782
|
06446b4f08a142d6f1bc30664c47ded88ab51782
|
||||||
|
|
||||||
|
# Applied Black formatter for Python code
|
||||||
|
e6f5a3f98b21ba99cf274a9833797889e020a2b3
|
||||||
|
1
.github/actionlint.yml
vendored
1
.github/actionlint.yml
vendored
@ -7,3 +7,4 @@ self-hosted-runner:
|
|||||||
- stress-tester
|
- stress-tester
|
||||||
- style-checker
|
- style-checker
|
||||||
- style-checker-aarch64
|
- style-checker-aarch64
|
||||||
|
- release-maker
|
||||||
|
151
.github/workflows/create_release.yml
vendored
151
.github/workflows/create_release.yml
vendored
@ -6,8 +6,8 @@ concurrency:
|
|||||||
'on':
|
'on':
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
sha:
|
ref:
|
||||||
description: 'The SHA hash of the commit from which to create the release'
|
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
type:
|
type:
|
||||||
@ -15,15 +15,152 @@ concurrency:
|
|||||||
required: true
|
required: true
|
||||||
type: choice
|
type: choice
|
||||||
options:
|
options:
|
||||||
- new
|
|
||||||
- patch
|
- patch
|
||||||
|
- new
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
Release:
|
CreateRelease:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||||
|
runs-on: [self-hosted, release-maker]
|
||||||
steps:
|
steps:
|
||||||
|
- name: DebugInfo
|
||||||
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
|
- name: Set envs
|
||||||
|
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||||
|
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||||
|
RCSK
|
||||||
|
RELEASE_INFO_FILE=${{ runner.temp }}/release_info.json
|
||||||
|
EOF
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
- name: Print greeting
|
with:
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Prepare Release Info
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/release.py --commit ${{ inputs.sha }} --type ${{ inputs.type }} --dry-run
|
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||||
|
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||||
|
--outfile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
echo "::group::Release Info"
|
||||||
|
python3 -m json.tool "$RELEASE_INFO_FILE"
|
||||||
|
echo "::endgroup::"
|
||||||
|
release_tag=$(jq -r '.release_tag' "$RELEASE_INFO_FILE")
|
||||||
|
commit_sha=$(jq -r '.commit_sha' "$RELEASE_INFO_FILE")
|
||||||
|
echo "Release Tag: $release_tag"
|
||||||
|
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||||
|
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||||
|
- name: Download All Release Artifacts
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Push Git Tag for the Release
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Push New Release Branch
|
||||||
|
if: ${{ inputs.type == 'new' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Bump CH Version and Update Contributors' List
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Checkout master
|
||||||
|
run: |
|
||||||
|
git checkout master
|
||||||
|
- name: Bump Docker versions, Changelog, Security
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
|
||||||
|
echo "List versions"
|
||||||
|
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||||
|
echo "Update docker version"
|
||||||
|
./utils/list-versions/update-docker-version.sh
|
||||||
|
echo "Generate ChangeLog"
|
||||||
|
export CI=1
|
||||||
|
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||||
|
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||||
|
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||||
|
--gh-user-or-token="$GH_TOKEN" --jobs=5 \
|
||||||
|
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||||
|
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||||
|
echo "Generate Security"
|
||||||
|
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||||
|
git diff HEAD
|
||||||
|
- name: Generate ChangeLog
|
||||||
|
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||||
|
uses: peter-evans/create-pull-request@v6
|
||||||
|
with:
|
||||||
|
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
|
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||||
|
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
|
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
|
branch: auto/${{ env.RELEASE_TAG }}
|
||||||
|
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||||
|
delete-branch: true
|
||||||
|
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||||
|
labels: do not test
|
||||||
|
body: |
|
||||||
|
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
|
### Changelog category (leave one):
|
||||||
|
- Not for changelog (changelog entry is not required)
|
||||||
|
- name: Reset changes if Dry-run
|
||||||
|
if: ${{ inputs.dry-run }}
|
||||||
|
run: |
|
||||||
|
git reset --hard HEAD
|
||||||
|
- name: Checkout back to GITHUB_REF
|
||||||
|
run: |
|
||||||
|
git checkout "$GITHUB_REF_NAME"
|
||||||
|
- name: Create GH Release
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --create-gh-release \
|
||||||
|
--infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
|
||||||
|
- name: Export TGZ Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test TGZ Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export RPM Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test RPM Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export Debian Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test Debian Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Docker clickhouse/clickhouse-server building
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
cd "./tests/ci"
|
||||||
|
export CHECK_NAME="Docker server image"
|
||||||
|
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
- name: Docker clickhouse/clickhouse-keeper building
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
cd "./tests/ci"
|
||||||
|
export CHECK_NAME="Docker keeper image"
|
||||||
|
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
- name: Post Slack Message
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
echo Slack Message
|
||||||
|
2
.github/workflows/pull_request.yml
vendored
2
.github/workflows/pull_request.yml
vendored
@ -172,7 +172,7 @@ jobs:
|
|||||||
################################# Stage Final #################################
|
################################# Stage Final #################################
|
||||||
#
|
#
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
|
@ -42,9 +42,19 @@ endif ()
|
|||||||
# But use 2 parallel jobs, since:
|
# But use 2 parallel jobs, since:
|
||||||
# - this is what llvm does
|
# - this is what llvm does
|
||||||
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2)
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO)
|
||||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
if (ARCH_AARCH64)
|
||||||
set (PARALLEL_LINK_JOBS 2)
|
# aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency
|
||||||
|
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.")
|
||||||
|
set (PARALLEL_LINK_JOBS 1)
|
||||||
|
if (LINKER_NAME MATCHES "lld")
|
||||||
|
math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4)
|
||||||
|
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}")
|
||||||
|
endif()
|
||||||
|
elseif (PARALLEL_LINK_JOBS GREATER 2)
|
||||||
|
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
||||||
|
set (PARALLEL_LINK_JOBS 2)
|
||||||
|
endif ()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
||||||
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ee2bb8513b28bf86b35404dd17a0e29305ca9e08
|
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
|
@ -27,19 +27,19 @@ def run_fuzzer(fuzzer: str):
|
|||||||
parser.read(path)
|
parser.read(path)
|
||||||
|
|
||||||
if parser.has_section("asan"):
|
if parser.has_section("asan"):
|
||||||
os.environ[
|
os.environ["ASAN_OPTIONS"] = (
|
||||||
"ASAN_OPTIONS"
|
f"{os.environ['ASAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['asan'].items())}"
|
||||||
] = f"{os.environ['ASAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['asan'].items())}"
|
)
|
||||||
|
|
||||||
if parser.has_section("msan"):
|
if parser.has_section("msan"):
|
||||||
os.environ[
|
os.environ["MSAN_OPTIONS"] = (
|
||||||
"MSAN_OPTIONS"
|
f"{os.environ['MSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['msan'].items())}"
|
||||||
] = f"{os.environ['MSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['msan'].items())}"
|
)
|
||||||
|
|
||||||
if parser.has_section("ubsan"):
|
if parser.has_section("ubsan"):
|
||||||
os.environ[
|
os.environ["UBSAN_OPTIONS"] = (
|
||||||
"UBSAN_OPTIONS"
|
f"{os.environ['UBSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['ubsan'].items())}"
|
||||||
] = f"{os.environ['UBSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['ubsan'].items())}"
|
)
|
||||||
|
|
||||||
if parser.has_section("libfuzzer"):
|
if parser.has_section("libfuzzer"):
|
||||||
custom_libfuzzer_options = " ".join(
|
custom_libfuzzer_options = " ".join(
|
||||||
|
@ -23,7 +23,10 @@ source /utils.lib
|
|||||||
/usr/share/clickhouse-test/config/install.sh
|
/usr/share/clickhouse-test/config/install.sh
|
||||||
|
|
||||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
|
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
|
||||||
|
|
||||||
./setup_minio.sh stateful
|
./setup_minio.sh stateful
|
||||||
|
./mc admin trace clickminio > /test_output/rubbish.log &
|
||||||
|
MC_ADMIN_PID=$!
|
||||||
|
|
||||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||||
|
|
||||||
@ -254,6 +257,8 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
|
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Kill minio admin client to stop collecting logs
|
||||||
|
kill $MC_ADMIN_PID
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||||
|
|
||||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst ||:
|
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst ||:
|
||||||
|
@ -54,6 +54,9 @@ source /utils.lib
|
|||||||
/usr/share/clickhouse-test/config/install.sh
|
/usr/share/clickhouse-test/config/install.sh
|
||||||
|
|
||||||
./setup_minio.sh stateless
|
./setup_minio.sh stateless
|
||||||
|
m./c admin trace clickminio > /test_output/rubbish.log &
|
||||||
|
MC_ADMIN_PID=$!
|
||||||
|
|
||||||
./setup_hdfs_minicluster.sh
|
./setup_hdfs_minicluster.sh
|
||||||
|
|
||||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||||
@ -383,6 +386,9 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
|||||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Kill minio admin client to stop collecting logs
|
||||||
|
kill $MC_ADMIN_PID
|
||||||
|
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||||
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
|
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
|
||||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
||||||
|
@ -3,7 +3,7 @@ aiosignal==1.3.1
|
|||||||
astroid==3.1.0
|
astroid==3.1.0
|
||||||
async-timeout==4.0.3
|
async-timeout==4.0.3
|
||||||
attrs==23.2.0
|
attrs==23.2.0
|
||||||
black==23.12.0
|
black==24.4.2
|
||||||
boto3==1.34.131
|
boto3==1.34.131
|
||||||
botocore==1.34.131
|
botocore==1.34.131
|
||||||
certifi==2024.6.2
|
certifi==2024.6.2
|
||||||
|
@ -226,15 +226,59 @@ Other IDEs you can use are [Sublime Text](https://www.sublimetext.com/), [Visual
|
|||||||
|
|
||||||
## Writing Code {#writing-code}
|
## Writing Code {#writing-code}
|
||||||
|
|
||||||
The description of ClickHouse architecture can be found here: https://clickhouse.com/docs/en/development/architecture/
|
Below you can find some quick links which may be useful when writing code for ClickHouse:
|
||||||
|
|
||||||
The Code Style Guide: https://clickhouse.com/docs/en/development/style/
|
- [ClickHouse architecture description](https://clickhouse.com/docs/en/development/architecture/).
|
||||||
|
- [The code style guide](https://clickhouse.com/docs/en/development/style/).
|
||||||
|
- [Adding third-party libraries](https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries)
|
||||||
|
- [Writing tests](https://clickhouse.com/docs/en/development/tests/)
|
||||||
|
- [List of open issues](https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest)
|
||||||
|
|
||||||
Adding third-party libraries: https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries
|
## Writing Documentation {#writing-documentation}
|
||||||
|
|
||||||
Writing tests: https://clickhouse.com/docs/en/development/tests/
|
As part of every pull request which adds a new feature, it is necessary to write documentation for it. If you'd like to preview your documentation changes the instructions for how to build the documentation page locally are available in the README.md file [here](https://github.com/ClickHouse/clickhouse-docs). When adding a new function to ClickHouse you can use the template below as a guide:
|
||||||
|
|
||||||
List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest
|
```markdown
|
||||||
|
# newFunctionName
|
||||||
|
|
||||||
|
A short description of the function goes here. It should describe briefly what it does and a typical usage case.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
\```sql
|
||||||
|
newFunctionName(arg1, arg2[, arg3])
|
||||||
|
\```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `arg1` — Description of the argument. [DataType](../data-types/float.md)
|
||||||
|
- `arg2` — Description of the argument. [DataType](../data-types/float.md)
|
||||||
|
- `arg3` — Description of optional argument (optional). [DataType](../data-types/float.md)
|
||||||
|
|
||||||
|
**Implementation Details**
|
||||||
|
|
||||||
|
A description of implementation details if relevant.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns {insert what the function returns here}. [DataType](../data-types/float.md)
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
\```sql
|
||||||
|
SELECT 'write your example query here';
|
||||||
|
\```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
|
||||||
|
\```response
|
||||||
|
┌───────────────────────────────────┐
|
||||||
|
│ the result of the query │
|
||||||
|
└───────────────────────────────────┘
|
||||||
|
\```
|
||||||
|
```
|
||||||
|
|
||||||
## Test Data {#test-data}
|
## Test Data {#test-data}
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ You have four options for getting up and running with ClickHouse:
|
|||||||
|
|
||||||
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
|
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
|
||||||
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
|
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
|
||||||
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, ARM, or PowerPC64LE CPU architecture
|
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, modern ARM (ARMv8.2-A up), or PowerPC64LE CPU architecture
|
||||||
- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub
|
- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub
|
||||||
|
|
||||||
## ClickHouse Cloud
|
## ClickHouse Cloud
|
||||||
|
@ -1030,7 +1030,7 @@ A table with no primary key represents the extreme case of a single equivalence
|
|||||||
|
|
||||||
The fewer and the larger the equivalence classes are, the higher the degree of freedom when re-shuffling rows.
|
The fewer and the larger the equivalence classes are, the higher the degree of freedom when re-shuffling rows.
|
||||||
|
|
||||||
The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemir, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns.
|
The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemire, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns.
|
||||||
It performs three steps:
|
It performs three steps:
|
||||||
1. Find all equivalence classes based on the row values in primary key columns.
|
1. Find all equivalence classes based on the row values in primary key columns.
|
||||||
2. For each equivalence class, calculate (usually estimate) the cardinalities of the non-primary-key columns.
|
2. For each equivalence class, calculate (usually estimate) the cardinalities of the non-primary-key columns.
|
||||||
|
@ -2698,6 +2698,204 @@ Like function `YYYYMMDDhhmmssToDate()` but produces a [DateTime64](../data-types
|
|||||||
|
|
||||||
Accepts an additional, optional `precision` parameter after the `timezone` parameter.
|
Accepts an additional, optional `precision` parameter after the `timezone` parameter.
|
||||||
|
|
||||||
|
## changeYear
|
||||||
|
|
||||||
|
Changes the year component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
``` sql
|
||||||
|
|
||||||
|
changeYear(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the year. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The same type as `date_or_datetime`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeYear(toDate('1999-01-01'), 2000), changeYear(toDateTime64('1999-01-01 00:00:00.000', 3), 2000);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeYear(toDate('1999-01-01'), 2000)─┬─changeYear(toDateTime64('1999-01-01 00:00:00.000', 3), 2000)─┐
|
||||||
|
│ 2000-01-01 │ 2000-01-01 00:00:00.000 │
|
||||||
|
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## changeMonth
|
||||||
|
|
||||||
|
Changes the month component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
changeMonth(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the month. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a value of same type as `date_or_datetime`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeMonth(toDate('1999-01-01'), 2), changeMonth(toDateTime64('1999-01-01 00:00:00.000', 3), 2);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeMonth(toDate('1999-01-01'), 2)─┬─changeMonth(toDateTime64('1999-01-01 00:00:00.000', 3), 2)─┐
|
||||||
|
│ 1999-02-01 │ 1999-02-01 00:00:00.000 │
|
||||||
|
└──────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## changeDay
|
||||||
|
|
||||||
|
Changes the day component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
changeDay(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the day. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a value of same type as `date_or_datetime`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeDay(toDate('1999-01-01'), 5), changeDay(toDateTime64('1999-01-01 00:00:00.000', 3), 5);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeDay(toDate('1999-01-01'), 5)─┬─changeDay(toDateTime64('1999-01-01 00:00:00.000', 3), 5)─┐
|
||||||
|
│ 1999-01-05 │ 1999-01-05 00:00:00.000 │
|
||||||
|
└────────────────────────────────────┴──────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## changeHour
|
||||||
|
|
||||||
|
Changes the hour component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
changeHour(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the hour. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeHour(toDate('1999-01-01'), 14), changeHour(toDateTime64('1999-01-01 00:00:00.000', 3), 14);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeHour(toDate('1999-01-01'), 14)─┬─changeHour(toDateTime64('1999-01-01 00:00:00.000', 3), 14)─┐
|
||||||
|
│ 1999-01-01 14:00:00 │ 1999-01-01 14:00:00.000 │
|
||||||
|
└──────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## changeMinute
|
||||||
|
|
||||||
|
Changes the minute component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
changeMinute(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the minute. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeMinute(toDate('1999-01-01'), 15), changeMinute(toDateTime64('1999-01-01 00:00:00.000', 3), 15);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeMinute(toDate('1999-01-01'), 15)─┬─changeMinute(toDateTime64('1999-01-01 00:00:00.000', 3), 15)─┐
|
||||||
|
│ 1999-01-01 00:15:00 │ 1999-01-01 00:15:00.000 │
|
||||||
|
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## changeSecond
|
||||||
|
|
||||||
|
Changes the second component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
changeSecond(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the second. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeSecond(toDate('1999-01-01'), 15), changeSecond(toDateTime64('1999-01-01 00:00:00.000', 3), 15);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeSecond(toDate('1999-01-01'), 15)─┬─changeSecond(toDateTime64('1999-01-01 00:00:00.000', 3), 15)─┐
|
||||||
|
│ 1999-01-01 00:00:15 │ 1999-01-01 00:00:15.000 │
|
||||||
|
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## addYears
|
## addYears
|
||||||
|
|
||||||
Adds a specified number of years to a date, a date with time or a string-encoded date / date with time.
|
Adds a specified number of years to a date, a date with time or a string-encoded date / date with time.
|
||||||
@ -2714,6 +2912,7 @@ addYears(date, num)
|
|||||||
- `num`: Number of years to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of years to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2751,6 +2950,7 @@ addQuarters(date, num)
|
|||||||
- `num`: Number of quarters to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of quarters to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2788,6 +2988,7 @@ addMonths(date, num)
|
|||||||
- `num`: Number of months to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of months to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2825,6 +3026,7 @@ addWeeks(date, num)
|
|||||||
- `num`: Number of weeks to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of weeks to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2862,6 +3064,7 @@ addDays(date, num)
|
|||||||
- `num`: Number of days to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of days to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2899,6 +3102,7 @@ addHours(date, num)
|
|||||||
- `num`: Number of hours to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of hours to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
o
|
||||||
- Returns `date` plus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2936,6 +3140,7 @@ addMinutes(date, num)
|
|||||||
- `num`: Number of minutes to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of minutes to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2973,6 +3178,7 @@ addSeconds(date, num)
|
|||||||
- `num`: Number of seconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of seconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3010,6 +3216,7 @@ addMilliseconds(date_time, num)
|
|||||||
- `num`: Number of milliseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of milliseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` plus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` plus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3045,6 +3252,7 @@ addMicroseconds(date_time, num)
|
|||||||
- `num`: Number of microseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of microseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` plus `num` microseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` plus `num` microseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3080,6 +3288,7 @@ addNanoseconds(date_time, num)
|
|||||||
- `num`: Number of nanoseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of nanoseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` plus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` plus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3115,6 +3324,7 @@ addInterval(interval_1, interval_2)
|
|||||||
- `interval_2`: Second interval to be added. [interval](../data-types/special-data-types/interval.md).
|
- `interval_2`: Second interval to be added. [interval](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
@ -3161,6 +3371,7 @@ addTupleOfIntervals(interval_1, interval_2)
|
|||||||
- `intervals`: Tuple of intervals to add to `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
- `intervals`: Tuple of intervals to add to `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` with added `intervals`. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
|
- Returns `date` with added `intervals`. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3195,6 +3406,7 @@ subtractYears(date, num)
|
|||||||
- `num`: Number of years to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of years to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3232,6 +3444,7 @@ subtractQuarters(date, num)
|
|||||||
- `num`: Number of quarters to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of quarters to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3269,6 +3482,7 @@ subtractMonths(date, num)
|
|||||||
- `num`: Number of months to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of months to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3306,6 +3520,7 @@ subtractWeeks(date, num)
|
|||||||
- `num`: Number of weeks to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of weeks to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3343,6 +3558,7 @@ subtractDays(date, num)
|
|||||||
- `num`: Number of days to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of days to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3380,6 +3596,7 @@ subtractHours(date, num)
|
|||||||
- `num`: Number of hours to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of hours to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[Datetime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[Datetime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3417,6 +3634,7 @@ subtractMinutes(date, num)
|
|||||||
- `num`: Number of minutes to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of minutes to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3454,6 +3672,7 @@ subtractSeconds(date, num)
|
|||||||
- `num`: Number of seconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of seconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3491,6 +3710,7 @@ subtractMilliseconds(date_time, num)
|
|||||||
- `num`: Number of milliseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of milliseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` minus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` minus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3526,6 +3746,7 @@ subtractMicroseconds(date_time, num)
|
|||||||
- `num`: Number of microseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of microseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` minus `num` microseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` minus `num` microseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3561,6 +3782,7 @@ subtractNanoseconds(date_time, num)
|
|||||||
- `num`: Number of nanoseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of nanoseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` minus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` minus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3596,6 +3818,7 @@ subtractInterval(interval_1, interval_2)
|
|||||||
- `interval_2`: Second interval to be negated. [interval](../data-types/special-data-types/interval.md).
|
- `interval_2`: Second interval to be negated. [interval](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
@ -3642,6 +3865,7 @@ subtractTupleOfIntervals(interval_1, interval_2)
|
|||||||
- `intervals`: Tuple of intervals to subtract from `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
- `intervals`: Tuple of intervals to subtract from `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` with subtracted `intervals`. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` with subtracted `intervals`. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
@ -376,6 +376,7 @@ void LocalServer::setupUsers()
|
|||||||
" </networks>"
|
" </networks>"
|
||||||
" <profile>default</profile>"
|
" <profile>default</profile>"
|
||||||
" <quota>default</quota>"
|
" <quota>default</quota>"
|
||||||
|
" <named_collection_control>1</named_collection_control>"
|
||||||
" </default>"
|
" </default>"
|
||||||
" </users>"
|
" </users>"
|
||||||
" <quotas>"
|
" <quotas>"
|
||||||
|
@ -516,6 +516,9 @@
|
|||||||
/// Save query in history only if it is different.
|
/// Save query in history only if it is different.
|
||||||
let previous_query = '';
|
let previous_query = '';
|
||||||
|
|
||||||
|
/// Start of the last query
|
||||||
|
let last_query_start = 0;
|
||||||
|
|
||||||
const current_url = new URL(window.location);
|
const current_url = new URL(window.location);
|
||||||
const opened_locally = location.protocol == 'file:';
|
const opened_locally = location.protocol == 'file:';
|
||||||
|
|
||||||
@ -567,6 +570,8 @@
|
|||||||
'&password=' + encodeURIComponent(password)
|
'&password=' + encodeURIComponent(password)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
last_query_start = performance.now();
|
||||||
|
|
||||||
const xhr = new XMLHttpRequest;
|
const xhr = new XMLHttpRequest;
|
||||||
|
|
||||||
xhr.open('POST', url, true);
|
xhr.open('POST', url, true);
|
||||||
@ -579,7 +584,8 @@
|
|||||||
if (posted_request_num != request_num) {
|
if (posted_request_num != request_num) {
|
||||||
return;
|
return;
|
||||||
} else if (this.readyState === XMLHttpRequest.DONE) {
|
} else if (this.readyState === XMLHttpRequest.DONE) {
|
||||||
renderResponse(this.status, this.response);
|
const elapsed_msec = performance.now() - last_query_start;
|
||||||
|
renderResponse(this.status, this.response, elapsed_msec);
|
||||||
|
|
||||||
/// The query is saved in browser history (in state JSON object)
|
/// The query is saved in browser history (in state JSON object)
|
||||||
/// as well as in URL fragment identifier.
|
/// as well as in URL fragment identifier.
|
||||||
@ -587,7 +593,8 @@
|
|||||||
const state = {
|
const state = {
|
||||||
query: query,
|
query: query,
|
||||||
status: this.status,
|
status: this.status,
|
||||||
response: this.response.length > 100000 ? null : this.response /// Lower than the browser's limit.
|
response: this.response.length > 100000 ? null : this.response, /// Lower than the browser's limit.
|
||||||
|
elapsed_msec: elapsed_msec,
|
||||||
};
|
};
|
||||||
const title = "ClickHouse Query: " + query;
|
const title = "ClickHouse Query: " + query;
|
||||||
|
|
||||||
@ -617,7 +624,7 @@
|
|||||||
xhr.send(query);
|
xhr.send(query);
|
||||||
}
|
}
|
||||||
|
|
||||||
function renderResponse(status, response) {
|
function renderResponse(status, response, elapsed_msec) {
|
||||||
document.getElementById('hourglass').style.display = 'none';
|
document.getElementById('hourglass').style.display = 'none';
|
||||||
|
|
||||||
if (status === 200) {
|
if (status === 200) {
|
||||||
@ -632,6 +639,7 @@
|
|||||||
renderChart(json);
|
renderChart(json);
|
||||||
} else {
|
} else {
|
||||||
renderUnparsedResult(response);
|
renderUnparsedResult(response);
|
||||||
|
stats.innerText = `Elapsed (client-side): ${(elapsed_msec / 1000).toFixed(3)} sec.`;
|
||||||
}
|
}
|
||||||
document.getElementById('check-mark').style.display = 'inline';
|
document.getElementById('check-mark').style.display = 'inline';
|
||||||
} else {
|
} else {
|
||||||
@ -651,7 +659,7 @@
|
|||||||
clear();
|
clear();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
renderResponse(event.state.status, event.state.response);
|
renderResponse(event.state.status, event.state.response, event.state.elapsed_msec);
|
||||||
};
|
};
|
||||||
|
|
||||||
if (window.location.hash) {
|
if (window.location.hash) {
|
||||||
|
@ -17,6 +17,8 @@ src_paths = ["src", "tests/ci", "tests/sqllogic"]
|
|||||||
[tool.pylint.'MESSAGES CONTROL']
|
[tool.pylint.'MESSAGES CONTROL']
|
||||||
# pytest.mark.parametrize is not callable (not-callable)
|
# pytest.mark.parametrize is not callable (not-callable)
|
||||||
disable = '''
|
disable = '''
|
||||||
|
pointless-string-statement,
|
||||||
|
line-too-long,
|
||||||
missing-docstring,
|
missing-docstring,
|
||||||
too-few-public-methods,
|
too-few-public-methods,
|
||||||
invalid-name,
|
invalid-name,
|
||||||
|
@ -1490,6 +1490,10 @@ void QueryAnalyzer::qualifyColumnNodesWithProjectionNames(const QueryTreeNodes &
|
|||||||
additional_column_qualification_parts = {table_expression_node->getAlias()};
|
additional_column_qualification_parts = {table_expression_node->getAlias()};
|
||||||
else if (auto * table_node = table_expression_node->as<TableNode>())
|
else if (auto * table_node = table_expression_node->as<TableNode>())
|
||||||
additional_column_qualification_parts = {table_node->getStorageID().getDatabaseName(), table_node->getStorageID().getTableName()};
|
additional_column_qualification_parts = {table_node->getStorageID().getDatabaseName(), table_node->getStorageID().getTableName()};
|
||||||
|
else if (auto * query_node = table_expression_node->as<QueryNode>(); query_node && query_node->isCTE())
|
||||||
|
additional_column_qualification_parts = {query_node->getCTEName()};
|
||||||
|
else if (auto * union_node = table_expression_node->as<UnionNode>(); union_node && union_node->isCTE())
|
||||||
|
additional_column_qualification_parts = {union_node->getCTEName()};
|
||||||
|
|
||||||
size_t additional_column_qualification_parts_size = additional_column_qualification_parts.size();
|
size_t additional_column_qualification_parts_size = additional_column_qualification_parts.size();
|
||||||
const auto & table_expression_data = scope.getTableExpressionDataOrThrow(table_expression_node);
|
const auto & table_expression_data = scope.getTableExpressionDataOrThrow(table_expression_node);
|
||||||
@ -4455,9 +4459,8 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table
|
|||||||
{
|
{
|
||||||
auto left_table_expression = extractLeftTableExpression(scope_query_node->getJoinTree());
|
auto left_table_expression = extractLeftTableExpression(scope_query_node->getJoinTree());
|
||||||
if (table_expression_node.get() == left_table_expression.get() &&
|
if (table_expression_node.get() == left_table_expression.get() &&
|
||||||
scope.joins_count == 1 &&
|
scope.joins_count == 1 && scope.context->getSettingsRef().single_join_prefer_left_table)
|
||||||
scope.context->getSettingsRef().single_join_prefer_left_table)
|
table_expression_data.should_qualify_columns = false;
|
||||||
table_expression_data.should_qualify_columns = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
scope.table_expression_node_to_data.emplace(table_expression_node, std::move(table_expression_data));
|
scope.table_expression_node_to_data.emplace(table_expression_node, std::move(table_expression_data));
|
||||||
|
@ -508,6 +508,7 @@ The server successfully detected this situation and will download merged part fr
|
|||||||
M(FileSegmentHolderCompleteMicroseconds, "File segments holder complete() time") \
|
M(FileSegmentHolderCompleteMicroseconds, "File segments holder complete() time") \
|
||||||
M(FileSegmentFailToIncreasePriority, "Number of times the priority was not increased due to a high contention on the cache lock") \
|
M(FileSegmentFailToIncreasePriority, "Number of times the priority was not increased due to a high contention on the cache lock") \
|
||||||
M(FilesystemCacheFailToReserveSpaceBecauseOfLockContention, "Number of times space reservation was skipped due to a high contention on the cache lock") \
|
M(FilesystemCacheFailToReserveSpaceBecauseOfLockContention, "Number of times space reservation was skipped due to a high contention on the cache lock") \
|
||||||
|
M(FilesystemCacheFailToReserveSpaceBecauseOfCacheResize, "Number of times space reservation was skipped due to the cache is being resized") \
|
||||||
M(FilesystemCacheHoldFileSegments, "Filesystem cache file segments count, which were hold") \
|
M(FilesystemCacheHoldFileSegments, "Filesystem cache file segments count, which were hold") \
|
||||||
M(FilesystemCacheUnusedHoldFileSegments, "Filesystem cache file segments count, which were hold, but not used (because of seek or LIMIT n, etc)") \
|
M(FilesystemCacheUnusedHoldFileSegments, "Filesystem cache file segments count, which were hold, but not used (because of seek or LIMIT n, etc)") \
|
||||||
M(FilesystemCacheFreeSpaceKeepingThreadRun, "Number of times background thread executed free space keeping job") \
|
M(FilesystemCacheFreeSpaceKeepingThreadRun, "Number of times background thread executed free space keeping job") \
|
||||||
|
399
src/Functions/changeDate.cpp
Normal file
399
src/Functions/changeDate.cpp
Normal file
@ -0,0 +1,399 @@
|
|||||||
|
#include "Common/DateLUTImpl.h"
|
||||||
|
#include "Common/Exception.h"
|
||||||
|
#include <Columns/ColumnDecimal.h>
|
||||||
|
#include <Columns/ColumnsDateTime.h>
|
||||||
|
#include <Columns/ColumnsNumber.h>
|
||||||
|
#include <Columns/IColumn.h>
|
||||||
|
#include <Common/DateLUT.h>
|
||||||
|
#include <Common/typeid_cast.h>
|
||||||
|
#include <DataTypes/DataTypeDate.h>
|
||||||
|
#include <DataTypes/DataTypeDate32.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime64.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <DataTypes/IDataType.h>
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Functions/FunctionHelpers.h>
|
||||||
|
#include <Functions/IFunction.h>
|
||||||
|
#include <Interpreters/castColumn.h>
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
enum class Component
|
||||||
|
{
|
||||||
|
Year,
|
||||||
|
Month,
|
||||||
|
Day,
|
||||||
|
Hour,
|
||||||
|
Minute,
|
||||||
|
Second
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Traits>
|
||||||
|
class FunctionChangeDate : public IFunction
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static constexpr auto name = Traits::name;
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionChangeDate>(); }
|
||||||
|
String getName() const override { return Traits::name; }
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||||
|
size_t getNumberOfArguments() const override { return 2; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
|
{
|
||||||
|
FunctionArgumentDescriptors args{
|
||||||
|
{"date_or_datetime", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isDateOrDate32OrDateTimeOrDateTime64), nullptr, "Date or date with time"},
|
||||||
|
{"value", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeInteger), nullptr, "Integer"}
|
||||||
|
};
|
||||||
|
validateFunctionArguments(*this, arguments, args);
|
||||||
|
|
||||||
|
const auto & input_type = arguments[0].type;
|
||||||
|
|
||||||
|
if constexpr (Traits::component == Component::Hour || Traits::component == Component::Minute || Traits::component == Component::Second)
|
||||||
|
{
|
||||||
|
if (isDate(input_type))
|
||||||
|
return std::make_shared<DataTypeDateTime>();
|
||||||
|
if (isDate32(input_type))
|
||||||
|
return std::make_shared<DataTypeDateTime64>(DataTypeDateTime64::default_scale);
|
||||||
|
}
|
||||||
|
|
||||||
|
return input_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
const auto & input_type = arguments[0].type;
|
||||||
|
if (isDate(input_type))
|
||||||
|
{
|
||||||
|
if constexpr (Traits::component == Component::Hour || Traits::component == Component::Minute || Traits::component == Component::Second)
|
||||||
|
return execute<DataTypeDate, DataTypeDateTime>(arguments, input_type, result_type, input_rows_count);
|
||||||
|
return execute<DataTypeDate, DataTypeDate>(arguments, input_type, result_type, input_rows_count);
|
||||||
|
}
|
||||||
|
if (isDate32(input_type))
|
||||||
|
{
|
||||||
|
if constexpr (Traits::component == Component::Hour || Traits::component == Component::Minute || Traits::component == Component::Second)
|
||||||
|
return execute<DataTypeDate32, DataTypeDateTime64>(arguments, input_type, result_type, input_rows_count);
|
||||||
|
return execute<DataTypeDate32, DataTypeDate32>(arguments, input_type, result_type, input_rows_count);
|
||||||
|
}
|
||||||
|
if (isDateTime(input_type))
|
||||||
|
return execute<DataTypeDateTime, DataTypeDateTime>(arguments, input_type, result_type, input_rows_count);
|
||||||
|
if (isDateTime64(input_type))
|
||||||
|
return execute<DataTypeDateTime64, DataTypeDateTime64>(arguments, input_type, result_type, input_rows_count);
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid input type");
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename InputDataType, typename ResultDataType>
|
||||||
|
ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr & input_type, const DataTypePtr & result_type, size_t input_rows_count) const
|
||||||
|
{
|
||||||
|
typename ResultDataType::ColumnType::MutablePtr result_col;
|
||||||
|
if constexpr (std::is_same_v<ResultDataType, DataTypeDateTime64>)
|
||||||
|
{
|
||||||
|
auto scale = DataTypeDateTime64::default_scale;
|
||||||
|
if constexpr (std::is_same_v<InputDataType, DateTime64>)
|
||||||
|
scale = typeid_cast<const DataTypeDateTime64 &>(*result_type).getScale();
|
||||||
|
result_col = ResultDataType::ColumnType::create(input_rows_count, scale);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
result_col = ResultDataType::ColumnType::create(input_rows_count);
|
||||||
|
|
||||||
|
auto date_time_col = arguments[0].column->convertToFullIfNeeded();
|
||||||
|
const auto & date_time_col_data = typeid_cast<const typename InputDataType::ColumnType &>(*date_time_col).getData();
|
||||||
|
|
||||||
|
auto value_col = castColumn(arguments[1], std::make_shared<DataTypeFloat64>());
|
||||||
|
value_col = value_col->convertToFullIfNeeded();
|
||||||
|
const auto & value_col_data = typeid_cast<const ColumnFloat64 &>(*value_col).getData();
|
||||||
|
|
||||||
|
auto & result_col_data = result_col->getData();
|
||||||
|
|
||||||
|
if constexpr (std::is_same_v<InputDataType, DataTypeDateTime64>)
|
||||||
|
{
|
||||||
|
const auto scale = typeid_cast<const DataTypeDateTime64 &>(*result_type).getScale();
|
||||||
|
const auto & date_lut = typeid_cast<const DataTypeDateTime64 &>(*result_type).getTimeZone();
|
||||||
|
|
||||||
|
Int64 deg = 1;
|
||||||
|
for (size_t j = 0; j < scale; ++j)
|
||||||
|
deg *= 10;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
{
|
||||||
|
Int64 time = date_lut.toNumYYYYMMDDhhmmss(date_time_col_data[i] / deg);
|
||||||
|
Int64 fraction = date_time_col_data[i] % deg;
|
||||||
|
|
||||||
|
result_col_data[i] = getChangedDate(time, value_col_data[i], result_type, date_lut, scale, fraction);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if constexpr (std::is_same_v<InputDataType, DataTypeDate32> && std::is_same_v<ResultDataType, DataTypeDateTime64>)
|
||||||
|
{
|
||||||
|
const auto & date_lut = typeid_cast<const DataTypeDateTime64 &>(*result_type).getTimeZone();
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
{
|
||||||
|
Int64 time = static_cast<Int64>(date_lut.toNumYYYYMMDD(ExtendedDayNum(date_time_col_data[i]))) * 1'000'000;
|
||||||
|
result_col_data[i] = getChangedDate(time, value_col_data[i], result_type, date_lut, 3, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if constexpr (std::is_same_v<InputDataType, DataTypeDate> && std::is_same_v<ResultDataType, DataTypeDateTime>)
|
||||||
|
{
|
||||||
|
const auto & date_lut = typeid_cast<const DataTypeDateTime &>(*result_type).getTimeZone();
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
{
|
||||||
|
Int64 time = static_cast<Int64>(date_lut.toNumYYYYMMDD(ExtendedDayNum(date_time_col_data[i]))) * 1'000'000;
|
||||||
|
result_col_data[i] = static_cast<UInt32>(getChangedDate(time, value_col_data[i], result_type, date_lut));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if constexpr (std::is_same_v<InputDataType, DataTypeDateTime>)
|
||||||
|
{
|
||||||
|
const auto & date_lut = typeid_cast<const DataTypeDateTime &>(*result_type).getTimeZone();
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
{
|
||||||
|
Int64 time = date_lut.toNumYYYYMMDDhhmmss(date_time_col_data[i]);
|
||||||
|
result_col_data[i] = static_cast<UInt32>(getChangedDate(time, value_col_data[i], result_type, date_lut));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
const auto & date_lut = DateLUT::instance();
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
{
|
||||||
|
Int64 time;
|
||||||
|
if (isDate(input_type))
|
||||||
|
time = static_cast<Int64>(date_lut.toNumYYYYMMDD(DayNum(date_time_col_data[i]))) * 1'000'000;
|
||||||
|
else
|
||||||
|
time = static_cast<Int64>(date_lut.toNumYYYYMMDD(ExtendedDayNum(date_time_col_data[i]))) * 1'000'000;
|
||||||
|
|
||||||
|
if (isDate(result_type))
|
||||||
|
result_col_data[i] = static_cast<UInt16>(getChangedDate(time, value_col_data[i], result_type, date_lut));
|
||||||
|
else
|
||||||
|
result_col_data[i] = static_cast<Int32>(getChangedDate(time, value_col_data[i], result_type, date_lut));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result_col;
|
||||||
|
}
|
||||||
|
|
||||||
|
Int64 getChangedDate(Int64 time, Float64 new_value, const DataTypePtr & result_type, const DateLUTImpl & date_lut, Int64 scale = 0, Int64 fraction = 0) const
|
||||||
|
{
|
||||||
|
auto year = time / 10'000'000'000;
|
||||||
|
auto month = (time % 10'000'000'000) / 100'000'000;
|
||||||
|
auto day = (time % 100'000'000) / 1'000'000;
|
||||||
|
auto hours = (time % 1'000'000) / 10'000;
|
||||||
|
auto minutes = (time % 10'000) / 100;
|
||||||
|
auto seconds = time % 100;
|
||||||
|
|
||||||
|
Int64 min_date = 0, max_date = 0;
|
||||||
|
Int16 min_year, max_year;
|
||||||
|
if (isDate(result_type))
|
||||||
|
{
|
||||||
|
min_date = date_lut.makeDayNum(1970, 1, 1);
|
||||||
|
max_date = date_lut.makeDayNum(2149, 6, 6);
|
||||||
|
min_year = 1970;
|
||||||
|
max_year = 2149;
|
||||||
|
}
|
||||||
|
else if (isDate32(result_type))
|
||||||
|
{
|
||||||
|
min_date = date_lut.makeDayNum(1900, 1, 1);
|
||||||
|
max_date = date_lut.makeDayNum(2299, 12, 31);
|
||||||
|
min_year = 1900;
|
||||||
|
max_year = 2299;
|
||||||
|
}
|
||||||
|
else if (isDateTime(result_type))
|
||||||
|
{
|
||||||
|
min_date = 0;
|
||||||
|
max_date = 0x0FFFFFFFFLL;
|
||||||
|
min_year = 1970;
|
||||||
|
max_year = 2106;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
min_date = DecimalUtils::decimalFromComponents<DateTime64>(
|
||||||
|
date_lut.makeDateTime(1900, 1, 1, 0, 0, 0),
|
||||||
|
static_cast<Int64>(0),
|
||||||
|
static_cast<UInt32>(scale));
|
||||||
|
Int64 deg = 1;
|
||||||
|
for (Int64 j = 0; j < scale; ++j)
|
||||||
|
deg *= 10;
|
||||||
|
max_date = DecimalUtils::decimalFromComponents<DateTime64>(
|
||||||
|
date_lut.makeDateTime(2299, 12, 31, 23, 59, 59),
|
||||||
|
static_cast<Int64>(deg - 1),
|
||||||
|
static_cast<UInt32>(scale));
|
||||||
|
min_year = 1900;
|
||||||
|
max_year = 2299;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (Traits::component)
|
||||||
|
{
|
||||||
|
case Component::Year:
|
||||||
|
if (new_value < min_year)
|
||||||
|
return min_date;
|
||||||
|
else if (new_value > max_year)
|
||||||
|
return max_date;
|
||||||
|
year = static_cast<Int16>(new_value);
|
||||||
|
break;
|
||||||
|
case Component::Month:
|
||||||
|
if (new_value < 1 || new_value > 12)
|
||||||
|
return min_date;
|
||||||
|
month = static_cast<UInt8>(new_value);
|
||||||
|
break;
|
||||||
|
case Component::Day:
|
||||||
|
if (new_value < 1 || new_value > 31)
|
||||||
|
return min_date;
|
||||||
|
day = static_cast<UInt8>(new_value);
|
||||||
|
break;
|
||||||
|
case Component::Hour:
|
||||||
|
if (new_value < 0 || new_value > 23)
|
||||||
|
return min_date;
|
||||||
|
hours = static_cast<UInt8>(new_value);
|
||||||
|
break;
|
||||||
|
case Component::Minute:
|
||||||
|
if (new_value < 0 || new_value > 59)
|
||||||
|
return min_date;
|
||||||
|
minutes = static_cast<UInt8>(new_value);
|
||||||
|
break;
|
||||||
|
case Component::Second:
|
||||||
|
if (new_value < 0 || new_value > 59)
|
||||||
|
return min_date;
|
||||||
|
seconds = static_cast<UInt8>(new_value);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
Int64 result;
|
||||||
|
if (isDate(result_type) || isDate32(result_type))
|
||||||
|
result = date_lut.makeDayNum(year, month, day);
|
||||||
|
else if (isDateTime(result_type))
|
||||||
|
result = date_lut.makeDateTime(year, month, day, hours, minutes, seconds);
|
||||||
|
else
|
||||||
|
#ifndef __clang_analyzer__
|
||||||
|
/// ^^ This looks funny. It is the least terrible suppression of a false positive reported by clang-analyzer (a sub-class
|
||||||
|
/// of clang-tidy checks) deep down in 'decimalFromComponents'. Usual suppressions of the form NOLINT* don't work here (they
|
||||||
|
/// would only affect code in _this_ file), and suppressing the issue in 'decimalFromComponents' may suppress true positives.
|
||||||
|
result = DecimalUtils::decimalFromComponents<DateTime64>(
|
||||||
|
date_lut.makeDateTime(year, month, day, hours, minutes, seconds),
|
||||||
|
fraction,
|
||||||
|
static_cast<UInt32>(scale));
|
||||||
|
#else
|
||||||
|
{
|
||||||
|
UNUSED(fraction);
|
||||||
|
result = 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (result < min_date)
|
||||||
|
return min_date;
|
||||||
|
|
||||||
|
if (result > max_date)
|
||||||
|
return max_date;
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct ChangeYearTraits
|
||||||
|
{
|
||||||
|
static constexpr auto name = "changeYear";
|
||||||
|
static constexpr auto component = Component::Year;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ChangeMonthTraits
|
||||||
|
{
|
||||||
|
static constexpr auto name = "changeMonth";
|
||||||
|
static constexpr auto component = Component::Month;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ChangeDayTraits
|
||||||
|
{
|
||||||
|
static constexpr auto name = "changeDay";
|
||||||
|
static constexpr auto component = Component::Day;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ChangeHourTraits
|
||||||
|
{
|
||||||
|
static constexpr auto name = "changeHour";
|
||||||
|
static constexpr auto component = Component::Hour;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ChangeMinuteTraits
|
||||||
|
{
|
||||||
|
static constexpr auto name = "changeMinute";
|
||||||
|
static constexpr auto component = Component::Minute;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ChangeSecondTraits
|
||||||
|
{
|
||||||
|
static constexpr auto name = "changeSecond";
|
||||||
|
static constexpr auto component = Component::Second;
|
||||||
|
};
|
||||||
|
|
||||||
|
REGISTER_FUNCTION(ChangeDate)
|
||||||
|
{
|
||||||
|
{
|
||||||
|
FunctionDocumentation::Description description = "Changes the year component of a date or date time.";
|
||||||
|
FunctionDocumentation::Syntax syntax = "changeYear(date_or_datetime, value);";
|
||||||
|
FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}};
|
||||||
|
FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime.";
|
||||||
|
FunctionDocumentation::Categories categories = {"Dates and Times"};
|
||||||
|
FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories};
|
||||||
|
factory.registerFunction<FunctionChangeDate<ChangeYearTraits>>(function_documentation);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
FunctionDocumentation::Description description = "Changes the month component of a date or date time.";
|
||||||
|
FunctionDocumentation::Syntax syntax = "changeMonth(date_or_datetime, value);";
|
||||||
|
FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}};
|
||||||
|
FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime.";
|
||||||
|
FunctionDocumentation::Categories categories = {"Dates and Times"};
|
||||||
|
FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories};
|
||||||
|
factory.registerFunction<FunctionChangeDate<ChangeMonthTraits>>(function_documentation);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
FunctionDocumentation::Description description = "Changes the day component of a date or date time.";
|
||||||
|
FunctionDocumentation::Syntax syntax = "changeDay(date_or_datetime, value);";
|
||||||
|
FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}};
|
||||||
|
FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime.";
|
||||||
|
FunctionDocumentation::Categories categories = {"Dates and Times"};
|
||||||
|
FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories};
|
||||||
|
factory.registerFunction<FunctionChangeDate<ChangeDayTraits>>(function_documentation);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
FunctionDocumentation::Description description = "Changes the hour component of a date or date time.";
|
||||||
|
FunctionDocumentation::Syntax syntax = "changeHour(date_or_datetime, value);";
|
||||||
|
FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}};
|
||||||
|
FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime. If the input is a Date, return DateTime. If the input is a Date32, return DateTime64.";
|
||||||
|
FunctionDocumentation::Categories categories = {"Dates and Times"};
|
||||||
|
FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories};
|
||||||
|
factory.registerFunction<FunctionChangeDate<ChangeHourTraits>>(function_documentation);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
FunctionDocumentation::Description description = "Changes the minute component of a date or date time.";
|
||||||
|
FunctionDocumentation::Syntax syntax = "changeMinute(date_or_datetime, value);";
|
||||||
|
FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}};
|
||||||
|
FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime. If the input is a Date, return DateTime. If the input is a Date32, return DateTime64.";
|
||||||
|
FunctionDocumentation::Categories categories = {"Dates and Times"};
|
||||||
|
FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories};
|
||||||
|
factory.registerFunction<FunctionChangeDate<ChangeMinuteTraits>>(function_documentation);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
FunctionDocumentation::Description description = "Changes the second component of a date or date time.";
|
||||||
|
FunctionDocumentation::Syntax syntax = "changeSecond(date_or_datetime, value);";
|
||||||
|
FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}};
|
||||||
|
FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime. If the input is a Date, return DateTime. If the input is a Date32, return DateTime64.";
|
||||||
|
FunctionDocumentation::Categories categories = {"Dates and Times"};
|
||||||
|
FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories};
|
||||||
|
factory.registerFunction<FunctionChangeDate<ChangeSecondTraits>>(function_documentation);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -30,6 +30,7 @@ namespace ProfileEvents
|
|||||||
extern const Event FilesystemCacheFailToReserveSpaceBecauseOfLockContention;
|
extern const Event FilesystemCacheFailToReserveSpaceBecauseOfLockContention;
|
||||||
extern const Event FilesystemCacheFreeSpaceKeepingThreadRun;
|
extern const Event FilesystemCacheFreeSpaceKeepingThreadRun;
|
||||||
extern const Event FilesystemCacheFreeSpaceKeepingThreadWorkMilliseconds;
|
extern const Event FilesystemCacheFreeSpaceKeepingThreadWorkMilliseconds;
|
||||||
|
extern const Event FilesystemCacheFailToReserveSpaceBecauseOfCacheResize;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -813,7 +814,7 @@ bool FileCache::tryReserve(
|
|||||||
/// ok compared to the number of cases this check will help.
|
/// ok compared to the number of cases this check will help.
|
||||||
if (cache_is_being_resized.load(std::memory_order_relaxed))
|
if (cache_is_being_resized.load(std::memory_order_relaxed))
|
||||||
{
|
{
|
||||||
ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfLockContention);
|
ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfCacheResize);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1281,10 +1281,6 @@ void DatabaseCatalog::rescheduleDropTableTask()
|
|||||||
auto min_drop_time = getMinDropTime();
|
auto min_drop_time = getMinDropTime();
|
||||||
time_t schedule_after_ms = min_drop_time > current_time ? (min_drop_time - current_time) * 1000 : 0;
|
time_t schedule_after_ms = min_drop_time > current_time ? (min_drop_time - current_time) * 1000 : 0;
|
||||||
|
|
||||||
LOG_TRACE(
|
|
||||||
log,
|
|
||||||
"Have {} tables in queue to drop. Schedule background task in {} seconds",
|
|
||||||
tables_marked_dropped.size(), schedule_after_ms / 1000);
|
|
||||||
(*drop_task)->scheduleAfter(schedule_after_ms);
|
(*drop_task)->scheduleAfter(schedule_after_ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -445,6 +445,9 @@ bool NpyRowInputFormat::readRow(MutableColumns & columns, RowReadExtension & /*
|
|||||||
elements_in_current_column *= header.shape[i];
|
elements_in_current_column *= header.shape[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (typeid_cast<ColumnArray *>(current_column))
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected nesting level of column '{}', expected {}", column->getName(), header.shape.size() - 1);
|
||||||
|
|
||||||
for (size_t i = 0; i != elements_in_current_column; ++i)
|
for (size_t i = 0; i != elements_in_current_column; ++i)
|
||||||
readValue(current_column);
|
readValue(current_column);
|
||||||
|
|
||||||
|
@ -196,6 +196,16 @@ void DistributedAsyncInsertBatch::readText(ReadBuffer & in)
|
|||||||
UInt64 idx;
|
UInt64 idx;
|
||||||
in >> idx >> "\n";
|
in >> idx >> "\n";
|
||||||
files.push_back(std::filesystem::absolute(fmt::format("{}/{}.bin", parent.path, idx)).string());
|
files.push_back(std::filesystem::absolute(fmt::format("{}/{}.bin", parent.path, idx)).string());
|
||||||
|
|
||||||
|
ReadBufferFromFile header_buffer(files.back());
|
||||||
|
const DistributedAsyncInsertHeader & header = DistributedAsyncInsertHeader::read(header_buffer, parent.log);
|
||||||
|
total_bytes += total_bytes;
|
||||||
|
|
||||||
|
if (header.rows)
|
||||||
|
{
|
||||||
|
total_rows += header.rows;
|
||||||
|
total_bytes += header.bytes;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
recovered = true;
|
recovered = true;
|
||||||
|
355
tests/ci/artifactory.py
Normal file
355
tests/ci/artifactory.py
Normal file
@ -0,0 +1,355 @@
|
|||||||
|
import argparse
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
from shutil import copy2
|
||||||
|
from create_release import PackageDownloader, ReleaseInfo, ShellRunner
|
||||||
|
from ci_utils import WithIter
|
||||||
|
|
||||||
|
|
||||||
|
class MountPointApp(metaclass=WithIter):
|
||||||
|
RCLONE = "rclone"
|
||||||
|
S3FS = "s3fs"
|
||||||
|
|
||||||
|
|
||||||
|
class R2MountPoint:
|
||||||
|
_TEST_BUCKET_NAME = "repo-test"
|
||||||
|
_PROD_BUCKET_NAME = "packages"
|
||||||
|
_CACHE_MAX_SIZE_GB = 20
|
||||||
|
MOUNT_POINT = "/home/ubuntu/mountpoint"
|
||||||
|
API_ENDPOINT = "https://d4fd593eebab2e3a58a599400c4cd64d.r2.cloudflarestorage.com"
|
||||||
|
LOG_FILE = "/home/ubuntu/fuse_mount.log"
|
||||||
|
# mod time is not required by reprepro and createrepo - disable to simplify bucket's mount sync (applicable fro rclone)
|
||||||
|
NOMODTIME = True
|
||||||
|
# enable debug messages in mount log
|
||||||
|
DEBUG = True
|
||||||
|
# enable cache for mountpoint
|
||||||
|
CACHE_ENABLED = False
|
||||||
|
# TODO: which mode is better: minimal/writes/full/off
|
||||||
|
_RCLONE_CACHE_MODE = "minimal"
|
||||||
|
UMASK = "0000"
|
||||||
|
|
||||||
|
def __init__(self, app: str, dry_run: bool) -> None:
|
||||||
|
assert app in MountPointApp
|
||||||
|
self.app = app
|
||||||
|
if dry_run:
|
||||||
|
self.bucket_name = self._TEST_BUCKET_NAME
|
||||||
|
else:
|
||||||
|
self.bucket_name = self._PROD_BUCKET_NAME
|
||||||
|
|
||||||
|
self.aux_mount_options = ""
|
||||||
|
self.async_mount = False
|
||||||
|
if self.app == MountPointApp.S3FS:
|
||||||
|
self.cache_dir = "/home/ubuntu/s3fs_cache"
|
||||||
|
# self.aux_mount_options += "-o nomodtime " if self.NOMODTIME else "" not for s3fs
|
||||||
|
self.aux_mount_options += "--debug " if self.DEBUG else ""
|
||||||
|
self.aux_mount_options += (
|
||||||
|
f"-o use_cache={self.cache_dir} -o cache_size_mb={self._CACHE_MAX_SIZE_GB * 1024} "
|
||||||
|
if self.CACHE_ENABLED
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
# without -o nomultipart there are errors like "Error 5 writing to /home/ubuntu/***.deb: Input/output error"
|
||||||
|
self.mount_cmd = f"s3fs {self.bucket_name} {self.MOUNT_POINT} -o url={self.API_ENDPOINT} -o use_path_request_style -o umask=0000 -o nomultipart -o logfile={self.LOG_FILE} {self.aux_mount_options}"
|
||||||
|
elif self.app == MountPointApp.RCLONE:
|
||||||
|
# run rclone mount process asynchronously, otherwise subprocess.run(daemonized command) will not return
|
||||||
|
self.async_mount = True
|
||||||
|
self.cache_dir = "/home/ubuntu/rclone_cache"
|
||||||
|
self.aux_mount_options += "--no-modtime " if self.NOMODTIME else ""
|
||||||
|
self.aux_mount_options += "-v " if self.DEBUG else "" # -vv too verbose
|
||||||
|
self.aux_mount_options += (
|
||||||
|
f"--vfs-cache-mode {self._RCLONE_CACHE_MODE} --vfs-cache-max-size {self._CACHE_MAX_SIZE_GB}G"
|
||||||
|
if self.CACHE_ENABLED
|
||||||
|
else "--vfs-cache-mode off"
|
||||||
|
)
|
||||||
|
# Use --no-modtime to try to avoid: ERROR : rpm/lts/clickhouse-client-24.3.6.5.x86_64.rpm: Failed to apply pending mod time
|
||||||
|
self.mount_cmd = f"rclone mount remote:{self.bucket_name} {self.MOUNT_POINT} --daemon --cache-dir {self.cache_dir} --umask 0000 --log-file {self.LOG_FILE} {self.aux_mount_options}"
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
def init(self):
|
||||||
|
print(f"Mount bucket [{self.bucket_name}] to [{self.MOUNT_POINT}]")
|
||||||
|
_CLEAN_LOG_FILE_CMD = f"tail -n 1000 {self.LOG_FILE} > {self.LOG_FILE}_tmp && mv {self.LOG_FILE}_tmp {self.LOG_FILE} ||:"
|
||||||
|
_MKDIR_CMD = f"mkdir -p {self.MOUNT_POINT}"
|
||||||
|
_MKDIR_FOR_CACHE = f"mkdir -p {self.cache_dir}"
|
||||||
|
_UNMOUNT_CMD = (
|
||||||
|
f"mount | grep -q {self.MOUNT_POINT} && umount {self.MOUNT_POINT} ||:"
|
||||||
|
)
|
||||||
|
|
||||||
|
_TEST_MOUNT_CMD = f"mount | grep -q {self.MOUNT_POINT}"
|
||||||
|
ShellRunner.run(_CLEAN_LOG_FILE_CMD)
|
||||||
|
ShellRunner.run(_UNMOUNT_CMD)
|
||||||
|
ShellRunner.run(_MKDIR_CMD)
|
||||||
|
ShellRunner.run(_MKDIR_FOR_CACHE)
|
||||||
|
ShellRunner.run(self.mount_cmd, async_=self.async_mount)
|
||||||
|
if self.async_mount:
|
||||||
|
time.sleep(3)
|
||||||
|
ShellRunner.run(_TEST_MOUNT_CMD)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def teardown(cls):
|
||||||
|
print(f"Unmount [{cls.MOUNT_POINT}]")
|
||||||
|
ShellRunner.run(f"umount {cls.MOUNT_POINT}")
|
||||||
|
|
||||||
|
|
||||||
|
class RepoCodenames(metaclass=WithIter):
|
||||||
|
LTS = "lts"
|
||||||
|
STABLE = "stable"
|
||||||
|
|
||||||
|
|
||||||
|
class DebianArtifactory:
|
||||||
|
_TEST_REPO_URL = "https://pub-73dd1910f4284a81a02a67018967e028.r2.dev/deb"
|
||||||
|
_PROD_REPO_URL = "https://packages.clickhouse.com/deb"
|
||||||
|
|
||||||
|
def __init__(self, release_info: ReleaseInfo, dry_run: bool):
|
||||||
|
self.codename = release_info.codename
|
||||||
|
self.version = release_info.version
|
||||||
|
if dry_run:
|
||||||
|
self.repo_url = self._TEST_REPO_URL
|
||||||
|
else:
|
||||||
|
self.repo_url = self._PROD_REPO_URL
|
||||||
|
assert self.codename in RepoCodenames
|
||||||
|
self.pd = PackageDownloader(
|
||||||
|
release=release_info.release_branch,
|
||||||
|
commit_sha=release_info.commit_sha,
|
||||||
|
version=release_info.version,
|
||||||
|
)
|
||||||
|
|
||||||
|
def export_packages(self):
|
||||||
|
assert self.pd.local_deb_packages_ready(), "BUG: Packages are not downloaded"
|
||||||
|
print("Start adding packages")
|
||||||
|
paths = [
|
||||||
|
self.pd.LOCAL_DIR + "/" + file for file in self.pd.get_deb_packages_files()
|
||||||
|
]
|
||||||
|
REPREPRO_CMD_PREFIX = f"reprepro --basedir {R2MountPoint.MOUNT_POINT}/configs/deb --outdir {R2MountPoint.MOUNT_POINT}/deb --verbose"
|
||||||
|
cmd = f"{REPREPRO_CMD_PREFIX} includedeb {self.codename} {' '.join(paths)}"
|
||||||
|
print("Running export command:")
|
||||||
|
print(f" {cmd}")
|
||||||
|
ShellRunner.run(cmd)
|
||||||
|
ShellRunner.run("sync")
|
||||||
|
|
||||||
|
if self.codename == RepoCodenames.LTS:
|
||||||
|
packages_with_version = [
|
||||||
|
package + "=" + self.version for package in self.pd.get_packages_names()
|
||||||
|
]
|
||||||
|
print(
|
||||||
|
f"Copy packages from {RepoCodenames.LTS} to {RepoCodenames.STABLE} repository"
|
||||||
|
)
|
||||||
|
cmd = f"{REPREPRO_CMD_PREFIX} copy {RepoCodenames.STABLE} {RepoCodenames.LTS} {' '.join(packages_with_version)}"
|
||||||
|
print("Running copy command:")
|
||||||
|
print(f" {cmd}")
|
||||||
|
ShellRunner.run(cmd)
|
||||||
|
ShellRunner.run("sync")
|
||||||
|
|
||||||
|
def test_packages(self):
|
||||||
|
ShellRunner.run("docker pull ubuntu:latest")
|
||||||
|
print(f"Test packages installation, version [{self.version}]")
|
||||||
|
cmd = f"docker run --rm ubuntu:latest bash -c \"apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-client={self.version}\""
|
||||||
|
print("Running test command:")
|
||||||
|
print(f" {cmd}")
|
||||||
|
ShellRunner.run(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def _copy_if_not_exists(src: Path, dst: Path) -> Path:
|
||||||
|
if dst.is_dir():
|
||||||
|
dst = dst / src.name
|
||||||
|
if not dst.exists():
|
||||||
|
return copy2(src, dst) # type: ignore
|
||||||
|
if src.stat().st_size == dst.stat().st_size:
|
||||||
|
return dst
|
||||||
|
return copy2(src, dst) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
class RpmArtifactory:
|
||||||
|
_TEST_REPO_URL = (
|
||||||
|
"https://pub-73dd1910f4284a81a02a67018967e028.r2.dev/rpm/clickhouse.repo"
|
||||||
|
)
|
||||||
|
_PROD_REPO_URL = "https://packages.clickhouse.com/rpm/clickhouse.repo"
|
||||||
|
_SIGN_KEY = "885E2BDCF96B0B45ABF058453E4AD4719DDE9A38"
|
||||||
|
|
||||||
|
def __init__(self, release_info: ReleaseInfo, dry_run: bool):
|
||||||
|
self.codename = release_info.codename
|
||||||
|
self.version = release_info.version
|
||||||
|
if dry_run:
|
||||||
|
self.repo_url = self._TEST_REPO_URL
|
||||||
|
else:
|
||||||
|
self.repo_url = self._PROD_REPO_URL
|
||||||
|
assert self.codename in RepoCodenames
|
||||||
|
self.pd = PackageDownloader(
|
||||||
|
release=release_info.release_branch,
|
||||||
|
commit_sha=release_info.commit_sha,
|
||||||
|
version=release_info.version,
|
||||||
|
)
|
||||||
|
|
||||||
|
def export_packages(self, codename: Optional[str] = None) -> None:
|
||||||
|
assert self.pd.local_rpm_packages_ready(), "BUG: Packages are not downloaded"
|
||||||
|
codename = codename or self.codename
|
||||||
|
print(f"Start adding packages to [{codename}]")
|
||||||
|
paths = [
|
||||||
|
self.pd.LOCAL_DIR + "/" + file for file in self.pd.get_rpm_packages_files()
|
||||||
|
]
|
||||||
|
|
||||||
|
dest_dir = Path(R2MountPoint.MOUNT_POINT) / "rpm" / codename
|
||||||
|
|
||||||
|
for package in paths:
|
||||||
|
_copy_if_not_exists(Path(package), dest_dir)
|
||||||
|
|
||||||
|
commands = (
|
||||||
|
f"createrepo_c --local-sqlite --workers=2 --update --verbose {dest_dir}",
|
||||||
|
f"gpg --sign-with {self._SIGN_KEY} --detach-sign --batch --yes --armor {dest_dir / 'repodata' / 'repomd.xml'}",
|
||||||
|
)
|
||||||
|
print(f"Exporting RPM packages into [{codename}]")
|
||||||
|
|
||||||
|
for command in commands:
|
||||||
|
print("Running command:")
|
||||||
|
print(f" {command}")
|
||||||
|
ShellRunner.run(command)
|
||||||
|
|
||||||
|
update_public_key = f"gpg --armor --export {self._SIGN_KEY}"
|
||||||
|
pub_key_path = dest_dir / "repodata" / "repomd.xml.key"
|
||||||
|
print("Updating repomd.xml.key")
|
||||||
|
pub_key_path.write_text(ShellRunner.run(update_public_key)[1])
|
||||||
|
if codename == RepoCodenames.LTS:
|
||||||
|
self.export_packages(RepoCodenames.STABLE)
|
||||||
|
ShellRunner.run("sync")
|
||||||
|
|
||||||
|
def test_packages(self):
|
||||||
|
ShellRunner.run("docker pull fedora:latest")
|
||||||
|
print(f"Test package installation, version [{self.version}]")
|
||||||
|
cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1"'
|
||||||
|
print("Running test command:")
|
||||||
|
print(f" {cmd}")
|
||||||
|
ShellRunner.run(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
class TgzArtifactory:
|
||||||
|
_TEST_REPO_URL = "https://pub-73dd1910f4284a81a02a67018967e028.r2.dev/tgz"
|
||||||
|
_PROD_REPO_URL = "https://packages.clickhouse.com/tgz"
|
||||||
|
|
||||||
|
def __init__(self, release_info: ReleaseInfo, dry_run: bool):
|
||||||
|
self.codename = release_info.codename
|
||||||
|
self.version = release_info.version
|
||||||
|
if dry_run:
|
||||||
|
self.repo_url = self._TEST_REPO_URL
|
||||||
|
else:
|
||||||
|
self.repo_url = self._PROD_REPO_URL
|
||||||
|
assert self.codename in RepoCodenames
|
||||||
|
self.pd = PackageDownloader(
|
||||||
|
release=release_info.release_branch,
|
||||||
|
commit_sha=release_info.commit_sha,
|
||||||
|
version=release_info.version,
|
||||||
|
)
|
||||||
|
|
||||||
|
def export_packages(self, codename: Optional[str] = None) -> None:
|
||||||
|
assert self.pd.local_tgz_packages_ready(), "BUG: Packages are not downloaded"
|
||||||
|
codename = codename or self.codename
|
||||||
|
|
||||||
|
paths = [
|
||||||
|
self.pd.LOCAL_DIR + "/" + file for file in self.pd.get_tgz_packages_files()
|
||||||
|
]
|
||||||
|
|
||||||
|
dest_dir = Path(R2MountPoint.MOUNT_POINT) / "tgz" / codename
|
||||||
|
|
||||||
|
print(f"Exporting TGZ packages into [{codename}]")
|
||||||
|
|
||||||
|
for package in paths:
|
||||||
|
_copy_if_not_exists(Path(package), dest_dir)
|
||||||
|
|
||||||
|
if codename == RepoCodenames.LTS:
|
||||||
|
self.export_packages(RepoCodenames.STABLE)
|
||||||
|
ShellRunner.run("sync")
|
||||||
|
|
||||||
|
def test_packages(self):
|
||||||
|
tgz_file = "/tmp/tmp.tgz"
|
||||||
|
tgz_sha_file = "/tmp/tmp.tgz.sha512"
|
||||||
|
ShellRunner.run(
|
||||||
|
f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz"
|
||||||
|
)
|
||||||
|
ShellRunner.run(
|
||||||
|
f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512"
|
||||||
|
)
|
||||||
|
expected_checksum = ShellRunner.run(f"cut -d ' ' -f 1 {tgz_sha_file}")
|
||||||
|
actual_checksum = ShellRunner.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1")
|
||||||
|
assert (
|
||||||
|
expected_checksum == actual_checksum
|
||||||
|
), f"[{actual_checksum} != {expected_checksum}]"
|
||||||
|
ShellRunner.run("rm /tmp/tmp.tgz*")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> argparse.Namespace:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||||
|
description="Adds release packages to the repository",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--infile",
|
||||||
|
type=str,
|
||||||
|
required=True,
|
||||||
|
help="input file with release info",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--export-debian",
|
||||||
|
action="store_true",
|
||||||
|
help="Export debian packages to repository",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--export-rpm",
|
||||||
|
action="store_true",
|
||||||
|
help="Export rpm packages to repository",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--export-tgz",
|
||||||
|
action="store_true",
|
||||||
|
help="Export tgz packages to repository",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--test-debian",
|
||||||
|
action="store_true",
|
||||||
|
help="Test debian packages installation",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--test-rpm",
|
||||||
|
action="store_true",
|
||||||
|
help="Test rpm packages installation",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--test-tgz",
|
||||||
|
action="store_true",
|
||||||
|
help="Test tgz packages installation",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--dry-run",
|
||||||
|
action="store_true",
|
||||||
|
help="Dry run mode",
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = parse_args()
|
||||||
|
assert args.dry_run
|
||||||
|
|
||||||
|
release_info = ReleaseInfo.from_file(args.infile)
|
||||||
|
"""
|
||||||
|
Use S3FS. RCLONE has some errors with r2 remote which I didn't figure out how to resolve:
|
||||||
|
ERROR : IO error: NotImplemented: versionId not implemented
|
||||||
|
Failed to copy: NotImplemented: versionId not implemented
|
||||||
|
"""
|
||||||
|
mp = R2MountPoint(MountPointApp.S3FS, dry_run=args.dry_run)
|
||||||
|
if args.export_debian:
|
||||||
|
mp.init()
|
||||||
|
DebianArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||||
|
mp.teardown()
|
||||||
|
if args.export_rpm:
|
||||||
|
mp.init()
|
||||||
|
RpmArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||||
|
mp.teardown()
|
||||||
|
if args.export_tgz:
|
||||||
|
mp.init()
|
||||||
|
TgzArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||||
|
mp.teardown()
|
||||||
|
if args.test_debian:
|
||||||
|
DebianArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||||
|
if args.test_tgz:
|
||||||
|
TgzArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||||
|
if args.test_rpm:
|
||||||
|
RpmArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
@ -6,6 +6,7 @@ import os
|
|||||||
import re
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
@ -15,7 +16,7 @@ import upload_result_helper
|
|||||||
from build_check import get_release_or_pr
|
from build_check import get_release_or_pr
|
||||||
from ci_config import CI
|
from ci_config import CI
|
||||||
from ci_metadata import CiMetadata
|
from ci_metadata import CiMetadata
|
||||||
from ci_utils import GHActions, normalize_string, Shell
|
from ci_utils import GHActions, normalize_string, Utils
|
||||||
from clickhouse_helper import (
|
from clickhouse_helper import (
|
||||||
CiLogsCredentials,
|
CiLogsCredentials,
|
||||||
ClickHouseHelper,
|
ClickHouseHelper,
|
||||||
@ -264,7 +265,7 @@ def check_missing_images_on_dockerhub(
|
|||||||
|
|
||||||
def _pre_action(s3, indata, pr_info):
|
def _pre_action(s3, indata, pr_info):
|
||||||
print("Clear dmesg")
|
print("Clear dmesg")
|
||||||
Shell.run("sudo dmesg --clear ||:")
|
Utils.clear_dmesg()
|
||||||
CommitStatusData.cleanup()
|
CommitStatusData.cleanup()
|
||||||
JobReport.cleanup()
|
JobReport.cleanup()
|
||||||
BuildResult.cleanup()
|
BuildResult.cleanup()
|
||||||
@ -550,7 +551,17 @@ def _update_gh_statuses_action(indata: Dict, s3: S3Helper) -> None:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise e
|
raise e
|
||||||
print("Going to update overall CI report")
|
print("Going to update overall CI report")
|
||||||
set_status_comment(commit, pr_info)
|
for retry in range(2):
|
||||||
|
try:
|
||||||
|
set_status_comment(commit, pr_info)
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
print(
|
||||||
|
f"WARNING: Failed to update CI Running status, attempt [{retry + 1}], exception [{e}]"
|
||||||
|
)
|
||||||
|
time.sleep(1)
|
||||||
|
else:
|
||||||
|
print("ERROR: All retry attempts failed.")
|
||||||
print("... CI report update - done")
|
print("... CI report update - done")
|
||||||
|
|
||||||
|
|
||||||
@ -996,10 +1007,10 @@ def main() -> int:
|
|||||||
args.skip_jobs,
|
args.skip_jobs,
|
||||||
)
|
)
|
||||||
|
|
||||||
if IS_CI and pr_info.is_pr:
|
|
||||||
ci_cache.filter_out_not_affected_jobs()
|
|
||||||
|
|
||||||
ci_cache.print_status()
|
ci_cache.print_status()
|
||||||
|
if IS_CI and pr_info.is_pr and not ci_settings.no_ci_cache:
|
||||||
|
ci_cache.filter_out_not_affected_jobs()
|
||||||
|
ci_cache.print_status()
|
||||||
|
|
||||||
if IS_CI and not pr_info.is_merge_queue:
|
if IS_CI and not pr_info.is_merge_queue:
|
||||||
# wait for pending jobs to be finished, await_jobs is a long blocking call
|
# wait for pending jobs to be finished, await_jobs is a long blocking call
|
||||||
@ -1035,6 +1046,7 @@ def main() -> int:
|
|||||||
elif args.pre:
|
elif args.pre:
|
||||||
assert indata, "Run config must be provided via --infile"
|
assert indata, "Run config must be provided via --infile"
|
||||||
_pre_action(s3, indata, pr_info)
|
_pre_action(s3, indata, pr_info)
|
||||||
|
JobReport.create_pre_report().dump()
|
||||||
|
|
||||||
### RUN action: start
|
### RUN action: start
|
||||||
elif args.run:
|
elif args.run:
|
||||||
@ -1086,6 +1098,16 @@ def main() -> int:
|
|||||||
print(status)
|
print(status)
|
||||||
print("::endgroup::")
|
print("::endgroup::")
|
||||||
previous_status = status.state
|
previous_status = status.state
|
||||||
|
print("Create dummy job report with job_skipped flag")
|
||||||
|
JobReport(
|
||||||
|
status=status.state,
|
||||||
|
description="",
|
||||||
|
test_results=[],
|
||||||
|
start_time="",
|
||||||
|
duration=0.0,
|
||||||
|
additional_files=[],
|
||||||
|
job_skipped=True,
|
||||||
|
).dump()
|
||||||
|
|
||||||
# ci cache check
|
# ci cache check
|
||||||
if not previous_status and not ci_settings.no_ci_cache:
|
if not previous_status and not ci_settings.no_ci_cache:
|
||||||
@ -1121,22 +1143,22 @@ def main() -> int:
|
|||||||
exit_code = 1
|
exit_code = 1
|
||||||
else:
|
else:
|
||||||
exit_code = _run_test(check_name, args.run_command)
|
exit_code = _run_test(check_name, args.run_command)
|
||||||
|
job_report = JobReport.load() if JobReport.exist() else None
|
||||||
|
assert (
|
||||||
|
job_report
|
||||||
|
), "BUG. There must be job report either real report, or pre-report if job was killed"
|
||||||
|
job_report.exit_code = exit_code
|
||||||
|
job_report.dump()
|
||||||
### RUN action: end
|
### RUN action: end
|
||||||
|
|
||||||
### POST action: start
|
### POST action: start
|
||||||
elif args.post:
|
elif args.post:
|
||||||
has_oom_error = False
|
|
||||||
if Shell.check(
|
|
||||||
"sudo dmesg -T | grep -q -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE'"
|
|
||||||
):
|
|
||||||
print("WARNING: OOM while job execution")
|
|
||||||
CIBuddy(dry_run=not pr_info.is_release).post_error(
|
|
||||||
"Out Of Memory", job_name=_get_ext_check_name(args.job_name)
|
|
||||||
)
|
|
||||||
has_oom_error = True
|
|
||||||
|
|
||||||
job_report = JobReport.load() if JobReport.exist() else None
|
job_report = JobReport.load() if JobReport.exist() else None
|
||||||
if job_report:
|
assert (
|
||||||
|
job_report
|
||||||
|
), "BUG. There must be job report either real report, or pre-report if job was killed"
|
||||||
|
if not job_report.job_skipped and not job_report.pre_report:
|
||||||
|
# it's a real job report
|
||||||
ch_helper = ClickHouseHelper()
|
ch_helper = ClickHouseHelper()
|
||||||
check_url = ""
|
check_url = ""
|
||||||
|
|
||||||
@ -1236,26 +1258,32 @@ def main() -> int:
|
|||||||
indata["build"],
|
indata["build"],
|
||||||
ch_helper,
|
ch_helper,
|
||||||
)
|
)
|
||||||
else:
|
elif job_report.job_skipped:
|
||||||
|
print(f"Skipped after rerun check {[args.job_name]} - do nothing")
|
||||||
|
elif job_report.job_skipped:
|
||||||
|
print(f"Job was skipped {[args.job_name]} - do nothing")
|
||||||
|
elif job_report.pre_report:
|
||||||
|
print(f"ERROR: Job was killed - generate evidence")
|
||||||
|
job_report.update_duration()
|
||||||
|
# Job was killed!
|
||||||
|
if Utils.is_killed_with_oom():
|
||||||
|
print("WARNING: OOM while job execution")
|
||||||
|
error = f"Out Of Memory, exit_code {job_report.exit_code}, after {job_report.duration}s"
|
||||||
|
else:
|
||||||
|
error = f"Unknown, exit_code {job_report.exit_code}, after {job_report.duration}s"
|
||||||
|
CIBuddy().post_error(error, job_name=_get_ext_check_name(args.job_name))
|
||||||
if CI.is_test_job(args.job_name):
|
if CI.is_test_job(args.job_name):
|
||||||
if has_oom_error:
|
|
||||||
description = "ERROR: Out Of Memory"
|
|
||||||
else:
|
|
||||||
description = "ERROR: Unknown job status"
|
|
||||||
gh = GitHub(get_best_robot_token(), per_page=100)
|
gh = GitHub(get_best_robot_token(), per_page=100)
|
||||||
commit = get_commit(gh, pr_info.sha)
|
commit = get_commit(gh, pr_info.sha)
|
||||||
post_commit_status(
|
post_commit_status(
|
||||||
commit,
|
commit,
|
||||||
ERROR,
|
ERROR,
|
||||||
"",
|
"",
|
||||||
description,
|
"Error: " + error,
|
||||||
job_report.check_name or _get_ext_check_name(args.job_name),
|
_get_ext_check_name(args.job_name),
|
||||||
pr_info,
|
pr_info,
|
||||||
dump_to_file=True,
|
dump_to_file=True,
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
# no job report
|
|
||||||
print(f"No job report for {[args.job_name]} - do nothing")
|
|
||||||
### POST action: end
|
### POST action: end
|
||||||
|
|
||||||
### MARK SUCCESS action: start
|
### MARK SUCCESS action: start
|
||||||
|
@ -520,6 +520,35 @@ class CiCache:
|
|||||||
self.RecordType.SUCCESSFUL, job, batch, num_batches, release_branch
|
self.RecordType.SUCCESSFUL, job, batch, num_batches, release_branch
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def has_evidence(self, job: str, job_config: CI.JobConfig) -> bool:
|
||||||
|
"""
|
||||||
|
checks if the job has been seen in master/release CI
|
||||||
|
function is to be used to check if change did not affect the job
|
||||||
|
:param job_config:
|
||||||
|
:param job:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
self.is_successful(
|
||||||
|
job=job,
|
||||||
|
batch=0,
|
||||||
|
num_batches=job_config.num_batches,
|
||||||
|
release_branch=True,
|
||||||
|
)
|
||||||
|
or self.is_pending(
|
||||||
|
job=job,
|
||||||
|
batch=0,
|
||||||
|
num_batches=job_config.num_batches,
|
||||||
|
release_branch=True,
|
||||||
|
)
|
||||||
|
or self.is_failed(
|
||||||
|
job=job,
|
||||||
|
batch=0,
|
||||||
|
num_batches=job_config.num_batches,
|
||||||
|
release_branch=True,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def is_failed(
|
def is_failed(
|
||||||
self, job: str, batch: int, num_batches: int, release_branch: bool
|
self, job: str, batch: int, num_batches: int, release_branch: bool
|
||||||
) -> bool:
|
) -> bool:
|
||||||
@ -677,74 +706,47 @@ class CiCache:
|
|||||||
def filter_out_not_affected_jobs(self):
|
def filter_out_not_affected_jobs(self):
|
||||||
"""
|
"""
|
||||||
Filter is to be applied in PRs to remove jobs that are not affected by the change
|
Filter is to be applied in PRs to remove jobs that are not affected by the change
|
||||||
It removes jobs from @jobs_to_do if it is a:
|
|
||||||
1. test job and it is in @jobs_to_wait (no need to wait not affected jobs in PRs)
|
|
||||||
2. test job and it has finished on release branch (even if failed)
|
|
||||||
3. build job which is not required by any test job that is left in @jobs_to_do
|
|
||||||
|
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
# 1.
|
|
||||||
remove_from_await_list = []
|
|
||||||
for job_name, job_config in self.jobs_to_wait.items():
|
|
||||||
if CI.is_test_job(job_name) and job_name != CI.JobNames.BUILD_CHECK:
|
|
||||||
remove_from_await_list.append(job_name)
|
|
||||||
for job in remove_from_await_list:
|
|
||||||
print(f"Filter job [{job}] - test job and not affected by the change")
|
|
||||||
del self.jobs_to_wait[job]
|
|
||||||
del self.jobs_to_do[job]
|
|
||||||
|
|
||||||
# 2.
|
|
||||||
remove_from_to_do = []
|
remove_from_to_do = []
|
||||||
|
required_builds = []
|
||||||
for job_name, job_config in self.jobs_to_do.items():
|
for job_name, job_config in self.jobs_to_do.items():
|
||||||
if CI.is_test_job(job_name) and job_name != CI.JobNames.BUILD_CHECK:
|
if CI.is_test_job(job_name) and job_name != CI.JobNames.BUILD_CHECK:
|
||||||
batches_to_remove = []
|
if job_config.reference_job_name:
|
||||||
assert job_config.batches is not None
|
reference_name = job_config.reference_job_name
|
||||||
for batch in job_config.batches:
|
reference_config = CI.JOB_CONFIGS[reference_name]
|
||||||
if self.is_failed(
|
else:
|
||||||
job_name, batch, job_config.num_batches, release_branch=True
|
reference_name = job_name
|
||||||
):
|
reference_config = job_config
|
||||||
print(
|
if self.has_evidence(
|
||||||
f"Filter [{job_name}/{batch}] - not affected by the change (failed on release branch)"
|
job=reference_name,
|
||||||
)
|
job_config=reference_config,
|
||||||
batches_to_remove.append(batch)
|
):
|
||||||
for batch in batches_to_remove:
|
|
||||||
job_config.batches.remove(batch)
|
|
||||||
if not job_config.batches:
|
|
||||||
print(
|
|
||||||
f"Filter [{job_name}] - not affected by the change (failed on release branch)"
|
|
||||||
)
|
|
||||||
remove_from_to_do.append(job_name)
|
remove_from_to_do.append(job_name)
|
||||||
for job in remove_from_to_do:
|
else:
|
||||||
del self.jobs_to_do[job]
|
required_builds += (
|
||||||
|
job_config.required_builds if job_config.required_builds else []
|
||||||
|
)
|
||||||
|
|
||||||
# 3.
|
|
||||||
required_builds = [] # type: List[str]
|
|
||||||
for job_name, job_config in self.jobs_to_do.items():
|
|
||||||
if CI.is_test_job(job_name) and job_config.required_builds:
|
|
||||||
required_builds += job_config.required_builds
|
|
||||||
required_builds = list(set(required_builds))
|
|
||||||
|
|
||||||
remove_builds = [] # type: List[str]
|
|
||||||
has_builds_to_do = False
|
has_builds_to_do = False
|
||||||
for job_name, job_config in self.jobs_to_do.items():
|
for job_name, job_config in self.jobs_to_do.items():
|
||||||
if CI.is_build_job(job_name):
|
if CI.is_build_job(job_name):
|
||||||
if job_name not in required_builds:
|
if job_name not in required_builds:
|
||||||
remove_builds.append(job_name)
|
remove_from_to_do.append(job_name)
|
||||||
else:
|
else:
|
||||||
has_builds_to_do = True
|
has_builds_to_do = True
|
||||||
|
|
||||||
for build_job in remove_builds:
|
if not has_builds_to_do:
|
||||||
print(
|
remove_from_to_do.append(CI.JobNames.BUILD_CHECK)
|
||||||
f"Filter build job [{build_job}] - not affected and not required by test jobs"
|
|
||||||
)
|
|
||||||
del self.jobs_to_do[build_job]
|
|
||||||
if build_job in self.jobs_to_wait:
|
|
||||||
del self.jobs_to_wait[build_job]
|
|
||||||
|
|
||||||
if not has_builds_to_do and CI.JobNames.BUILD_CHECK in self.jobs_to_do:
|
for job in remove_from_to_do:
|
||||||
print(f"Filter job [{CI.JobNames.BUILD_CHECK}] - no builds to do")
|
print(f"Filter job [{job}] - not affected by the change")
|
||||||
del self.jobs_to_do[CI.JobNames.BUILD_CHECK]
|
if job in self.jobs_to_do:
|
||||||
|
del self.jobs_to_do[job]
|
||||||
|
if job in self.jobs_to_wait:
|
||||||
|
del self.jobs_to_wait[job]
|
||||||
|
if job in self.jobs_to_skip:
|
||||||
|
self.jobs_to_skip.remove(job)
|
||||||
|
|
||||||
def await_pending_jobs(self, is_release: bool, dry_run: bool = False) -> None:
|
def await_pending_jobs(self, is_release: bool, dry_run: bool = False) -> None:
|
||||||
"""
|
"""
|
||||||
@ -763,14 +765,19 @@ class CiCache:
|
|||||||
MAX_JOB_NUM_TO_WAIT = 3
|
MAX_JOB_NUM_TO_WAIT = 3
|
||||||
round_cnt = 0
|
round_cnt = 0
|
||||||
|
|
||||||
# FIXME: temporary experiment: lets enable await for PR' workflows but for a shorter time
|
def _has_build_job():
|
||||||
|
for job in self.jobs_to_wait:
|
||||||
|
if CI.is_build_job(job):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
if not is_release:
|
if not is_release:
|
||||||
MAX_ROUNDS_TO_WAIT = 3
|
# in PRs we can wait only for builds, TIMEOUT*MAX_ROUNDS_TO_WAIT=100min is enough
|
||||||
|
MAX_ROUNDS_TO_WAIT = 2
|
||||||
|
|
||||||
while (
|
while (
|
||||||
len(self.jobs_to_wait) > MAX_JOB_NUM_TO_WAIT
|
len(self.jobs_to_wait) > MAX_JOB_NUM_TO_WAIT or _has_build_job()
|
||||||
and round_cnt < MAX_ROUNDS_TO_WAIT
|
) and round_cnt < MAX_ROUNDS_TO_WAIT:
|
||||||
):
|
|
||||||
round_cnt += 1
|
round_cnt += 1
|
||||||
GHActions.print_in_group(
|
GHActions.print_in_group(
|
||||||
f"Wait pending jobs, round [{round_cnt}/{MAX_ROUNDS_TO_WAIT}]:",
|
f"Wait pending jobs, round [{round_cnt}/{MAX_ROUNDS_TO_WAIT}]:",
|
||||||
|
@ -413,7 +413,9 @@ class CI:
|
|||||||
release_only=True,
|
release_only=True,
|
||||||
),
|
),
|
||||||
JobNames.INTEGRATION_TEST_FLAKY: CommonJobConfigs.INTEGRATION_TEST.with_properties(
|
JobNames.INTEGRATION_TEST_FLAKY: CommonJobConfigs.INTEGRATION_TEST.with_properties(
|
||||||
required_builds=[BuildNames.PACKAGE_ASAN], pr_only=True
|
required_builds=[BuildNames.PACKAGE_ASAN],
|
||||||
|
pr_only=True,
|
||||||
|
reference_job_name=JobNames.INTEGRATION_TEST_TSAN,
|
||||||
),
|
),
|
||||||
JobNames.COMPATIBILITY_TEST: CommonJobConfigs.COMPATIBILITY_TEST.with_properties(
|
JobNames.COMPATIBILITY_TEST: CommonJobConfigs.COMPATIBILITY_TEST.with_properties(
|
||||||
required_builds=[BuildNames.PACKAGE_RELEASE],
|
required_builds=[BuildNames.PACKAGE_RELEASE],
|
||||||
@ -455,7 +457,10 @@ class CI:
|
|||||||
required_builds=[BuildNames.PACKAGE_UBSAN],
|
required_builds=[BuildNames.PACKAGE_UBSAN],
|
||||||
),
|
),
|
||||||
JobNames.STATELESS_TEST_FLAKY_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
JobNames.STATELESS_TEST_FLAKY_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||||
required_builds=[BuildNames.PACKAGE_ASAN], pr_only=True, timeout=3600
|
required_builds=[BuildNames.PACKAGE_ASAN],
|
||||||
|
pr_only=True,
|
||||||
|
timeout=3600,
|
||||||
|
reference_job_name=JobNames.STATELESS_TEST_RELEASE,
|
||||||
),
|
),
|
||||||
JobNames.JEPSEN_KEEPER: JobConfig(
|
JobNames.JEPSEN_KEEPER: JobConfig(
|
||||||
required_builds=[BuildNames.BINARY_RELEASE],
|
required_builds=[BuildNames.BINARY_RELEASE],
|
||||||
@ -640,7 +645,7 @@ class CI:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def is_test_job(cls, job: str) -> bool:
|
def is_test_job(cls, job: str) -> bool:
|
||||||
return not cls.is_build_job(job) and job != cls.JobNames.STYLE_CHECK
|
return not cls.is_build_job(job)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def is_docs_job(cls, job: str) -> bool:
|
def is_docs_job(cls, job: str) -> bool:
|
||||||
|
@ -284,8 +284,12 @@ class JobConfig:
|
|||||||
|
|
||||||
# GH Runner type (tag from @Runners)
|
# GH Runner type (tag from @Runners)
|
||||||
runner_type: str
|
runner_type: str
|
||||||
# used for config validation in ci unittests
|
# used in ci unittests for config validation
|
||||||
job_name_keyword: str = ""
|
job_name_keyword: str = ""
|
||||||
|
# name of another job that (if provided) should be used to check if job was affected by the change or not (in CiCache.has_evidence(job=@reference_job_name) call)
|
||||||
|
# for example: "Stateless flaky check" can use reference_job_name="Stateless tests (release)". "Stateless flaky check" does not run on master
|
||||||
|
# and there cannot be an evidence for it, so instead "Stateless tests (release)" job name can be used to check the evidence
|
||||||
|
reference_job_name: str = ""
|
||||||
# builds required for the job (applicable for test jobs)
|
# builds required for the job (applicable for test jobs)
|
||||||
required_builds: Optional[List[str]] = None
|
required_builds: Optional[List[str]] = None
|
||||||
# build config for the build job (applicable for builds)
|
# build config for the build job (applicable for builds)
|
||||||
|
@ -96,3 +96,15 @@ class Utils:
|
|||||||
if match:
|
if match:
|
||||||
return int(match.group(1))
|
return int(match.group(1))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_killed_with_oom():
|
||||||
|
if Shell.check(
|
||||||
|
"sudo dmesg -T | grep -q -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE'"
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def clear_dmesg():
|
||||||
|
Shell.run("sudo dmesg --clear ||:")
|
||||||
|
710
tests/ci/create_release.py
Executable file
710
tests/ci/create_release.py
Executable file
@ -0,0 +1,710 @@
|
|||||||
|
import argparse
|
||||||
|
import dataclasses
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from copy import copy
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Iterator, List
|
||||||
|
|
||||||
|
from git_helper import Git, GIT_PREFIX
|
||||||
|
from ssh import SSHAgent
|
||||||
|
from env_helper import GITHUB_REPOSITORY, S3_BUILDS_BUCKET
|
||||||
|
from s3_helper import S3Helper
|
||||||
|
from autoscale_runners_lambda.lambda_shared.pr import Labels
|
||||||
|
from version_helper import (
|
||||||
|
FILE_WITH_VERSION_PATH,
|
||||||
|
GENERATED_CONTRIBUTORS,
|
||||||
|
get_abs_path,
|
||||||
|
get_version_from_repo,
|
||||||
|
update_cmake_version,
|
||||||
|
update_contributors,
|
||||||
|
VersionType,
|
||||||
|
)
|
||||||
|
from ci_config import CI
|
||||||
|
|
||||||
|
CMAKE_PATH = get_abs_path(FILE_WITH_VERSION_PATH)
|
||||||
|
CONTRIBUTORS_PATH = get_abs_path(GENERATED_CONTRIBUTORS)
|
||||||
|
|
||||||
|
|
||||||
|
class ShellRunner:
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def run(
|
||||||
|
cls, command, check_retcode=True, print_output=True, async_=False, dry_run=False
|
||||||
|
):
|
||||||
|
if dry_run:
|
||||||
|
print(f"Dry-run: Would run shell command: [{command}]")
|
||||||
|
return 0, ""
|
||||||
|
print(f"Running shell command: [{command}]")
|
||||||
|
if async_:
|
||||||
|
subprocess.Popen(command.split(" ")) # pylint:disable=consider-using-with
|
||||||
|
return 0, ""
|
||||||
|
result = subprocess.run(
|
||||||
|
command + " 2>&1",
|
||||||
|
shell=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
text=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
if print_output:
|
||||||
|
print(result.stdout)
|
||||||
|
if check_retcode:
|
||||||
|
assert result.returncode == 0, f"Return code [{result.returncode}]"
|
||||||
|
return result.returncode, result.stdout
|
||||||
|
|
||||||
|
|
||||||
|
@dataclasses.dataclass
|
||||||
|
class ReleaseInfo:
|
||||||
|
version: str
|
||||||
|
release_tag: str
|
||||||
|
release_branch: str
|
||||||
|
commit_sha: str
|
||||||
|
# lts or stable
|
||||||
|
codename: str
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_file(file_path: str) -> "ReleaseInfo":
|
||||||
|
with open(file_path, "r", encoding="utf-8") as json_file:
|
||||||
|
res = json.load(json_file)
|
||||||
|
return ReleaseInfo(**res)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def prepare(commit_ref: str, release_type: str, outfile: str) -> None:
|
||||||
|
Path(outfile).parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
Path(outfile).unlink(missing_ok=True)
|
||||||
|
version = None
|
||||||
|
release_branch = None
|
||||||
|
release_tag = None
|
||||||
|
codename = None
|
||||||
|
assert release_type in ("patch", "new")
|
||||||
|
if release_type == "new":
|
||||||
|
# check commit_ref is right and on a right branch
|
||||||
|
ShellRunner.run(
|
||||||
|
f"git merge-base --is-ancestor origin/{commit_ref} origin/master"
|
||||||
|
)
|
||||||
|
with checkout(commit_ref):
|
||||||
|
_, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}")
|
||||||
|
# Git() must be inside "with checkout" contextmanager
|
||||||
|
git = Git()
|
||||||
|
version = get_version_from_repo(git=git)
|
||||||
|
release_branch = "master"
|
||||||
|
expected_prev_tag = f"v{version.major}.{version.minor}.1.1-new"
|
||||||
|
version.bump().with_description(VersionType.NEW)
|
||||||
|
assert (
|
||||||
|
git.latest_tag == expected_prev_tag
|
||||||
|
), f"BUG: latest tag [{git.latest_tag}], expected [{expected_prev_tag}]"
|
||||||
|
release_tag = version.describe
|
||||||
|
codename = (
|
||||||
|
VersionType.STABLE
|
||||||
|
) # dummy value (artifactory won't be updated for new release)
|
||||||
|
if release_type == "patch":
|
||||||
|
with checkout(commit_ref):
|
||||||
|
_, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}")
|
||||||
|
# Git() must be inside "with checkout" contextmanager
|
||||||
|
git = Git()
|
||||||
|
version = get_version_from_repo(git=git)
|
||||||
|
codename = version.get_stable_release_type()
|
||||||
|
version.with_description(codename)
|
||||||
|
release_branch = f"{version.major}.{version.minor}"
|
||||||
|
release_tag = version.describe
|
||||||
|
ShellRunner.run(f"{GIT_PREFIX} fetch origin {release_branch} --tags")
|
||||||
|
# check commit is right and on a right branch
|
||||||
|
ShellRunner.run(
|
||||||
|
f"git merge-base --is-ancestor {commit_ref} origin/{release_branch}"
|
||||||
|
)
|
||||||
|
if version.patch == 1:
|
||||||
|
expected_version = copy(version)
|
||||||
|
expected_version.bump()
|
||||||
|
expected_tag_prefix = (
|
||||||
|
f"v{expected_version.major}.{expected_version.minor}-"
|
||||||
|
)
|
||||||
|
expected_tag_suffix = "-new"
|
||||||
|
else:
|
||||||
|
expected_tag_prefix = (
|
||||||
|
f"v{version.major}.{version.minor}.{version.patch-1}."
|
||||||
|
)
|
||||||
|
expected_tag_suffix = f"-{version.get_stable_release_type()}"
|
||||||
|
if git.latest_tag.startswith(
|
||||||
|
expected_tag_prefix
|
||||||
|
) and git.latest_tag.endswith(expected_tag_suffix):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
assert (
|
||||||
|
False
|
||||||
|
), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]"
|
||||||
|
|
||||||
|
assert (
|
||||||
|
release_branch
|
||||||
|
and commit_sha
|
||||||
|
and release_tag
|
||||||
|
and version
|
||||||
|
and codename in ("lts", "stable")
|
||||||
|
)
|
||||||
|
res = ReleaseInfo(
|
||||||
|
release_branch=release_branch,
|
||||||
|
commit_sha=commit_sha,
|
||||||
|
release_tag=release_tag,
|
||||||
|
version=version.string,
|
||||||
|
codename=codename,
|
||||||
|
)
|
||||||
|
with open(outfile, "w", encoding="utf-8") as f:
|
||||||
|
print(json.dumps(dataclasses.asdict(res), indent=2), file=f)
|
||||||
|
|
||||||
|
def push_release_tag(self, dry_run: bool) -> None:
|
||||||
|
if dry_run:
|
||||||
|
# remove locally created tag from prev run
|
||||||
|
ShellRunner.run(
|
||||||
|
f"{GIT_PREFIX} tag -l | grep -q {self.release_tag} && git tag -d {self.release_tag} ||:"
|
||||||
|
)
|
||||||
|
# Create release tag
|
||||||
|
print(
|
||||||
|
f"Create and push release tag [{self.release_tag}], commit [{self.commit_sha}]"
|
||||||
|
)
|
||||||
|
tag_message = f"Release {self.release_tag}"
|
||||||
|
ShellRunner.run(
|
||||||
|
f"{GIT_PREFIX} tag -a -m '{tag_message}' {self.release_tag} {self.commit_sha}"
|
||||||
|
)
|
||||||
|
cmd_push_tag = f"{GIT_PREFIX} push origin {self.release_tag}:{self.release_tag}"
|
||||||
|
ShellRunner.run(cmd_push_tag, dry_run=dry_run)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None:
|
||||||
|
cmd = f"gh api repos/{GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}"
|
||||||
|
ShellRunner.run(cmd, dry_run=dry_run)
|
||||||
|
|
||||||
|
def push_new_release_branch(self, dry_run: bool) -> None:
|
||||||
|
assert (
|
||||||
|
self.release_branch == "master"
|
||||||
|
), "New release branch can be created only for release type [new]"
|
||||||
|
git = Git()
|
||||||
|
version = get_version_from_repo(git=git)
|
||||||
|
new_release_branch = f"{version.major}.{version.minor}"
|
||||||
|
stable_release_type = version.get_stable_release_type()
|
||||||
|
version_after_release = copy(version)
|
||||||
|
version_after_release.bump()
|
||||||
|
assert (
|
||||||
|
version_after_release.string == self.version
|
||||||
|
), f"Unexpected current version in git, must precede [{self.version}] by one step, actual [{version.string}]"
|
||||||
|
if dry_run:
|
||||||
|
# remove locally created branch from prev run
|
||||||
|
ShellRunner.run(
|
||||||
|
f"{GIT_PREFIX} branch -l | grep -q {new_release_branch} && git branch -d {new_release_branch} ||:"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"Create and push new release branch [{new_release_branch}], commit [{self.commit_sha}]"
|
||||||
|
)
|
||||||
|
with checkout(self.release_branch):
|
||||||
|
with checkout_new(new_release_branch):
|
||||||
|
pr_labels = f"--label {Labels.RELEASE}"
|
||||||
|
if stable_release_type == VersionType.LTS:
|
||||||
|
pr_labels += f" --label {Labels.RELEASE_LTS}"
|
||||||
|
cmd_push_branch = (
|
||||||
|
f"{GIT_PREFIX} push --set-upstream origin {new_release_branch}"
|
||||||
|
)
|
||||||
|
ShellRunner.run(cmd_push_branch, dry_run=dry_run)
|
||||||
|
|
||||||
|
print("Create and push backport tags for new release branch")
|
||||||
|
ReleaseInfo._create_gh_label(
|
||||||
|
f"v{new_release_branch}-must-backport", "10dbed", dry_run=dry_run
|
||||||
|
)
|
||||||
|
ReleaseInfo._create_gh_label(
|
||||||
|
f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run
|
||||||
|
)
|
||||||
|
ShellRunner.run(
|
||||||
|
f"""gh pr create --repo {GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}'
|
||||||
|
--head {new_release_branch} {pr_labels}
|
||||||
|
--body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.'
|
||||||
|
""",
|
||||||
|
dry_run=dry_run,
|
||||||
|
)
|
||||||
|
|
||||||
|
def update_version_and_contributors_list(self, dry_run: bool) -> None:
|
||||||
|
# Bump version, update contributors list, create PR
|
||||||
|
branch_upd_version_contributors = f"bump_version_{self.version}"
|
||||||
|
with checkout(self.commit_sha):
|
||||||
|
git = Git()
|
||||||
|
version = get_version_from_repo(git=git)
|
||||||
|
if self.release_branch == "master":
|
||||||
|
version.bump()
|
||||||
|
version.with_description(VersionType.TESTING)
|
||||||
|
else:
|
||||||
|
version.with_description(version.get_stable_release_type())
|
||||||
|
assert (
|
||||||
|
version.string == self.version
|
||||||
|
), f"BUG: version in release info does not match version in git commit, expected [{self.version}], got [{version.string}]"
|
||||||
|
with checkout(self.release_branch):
|
||||||
|
with checkout_new(branch_upd_version_contributors):
|
||||||
|
update_cmake_version(version)
|
||||||
|
update_contributors(raise_error=True)
|
||||||
|
cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'"
|
||||||
|
cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}"
|
||||||
|
body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md")
|
||||||
|
actor = os.getenv("GITHUB_ACTOR", "") or "me"
|
||||||
|
cmd_create_pr = f"gh pr create --repo {GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body-file '{body_file} --label 'do not test' --assignee @{actor}"
|
||||||
|
ShellRunner.run(cmd_commit_version_upd, dry_run=dry_run)
|
||||||
|
ShellRunner.run(cmd_push_branch, dry_run=dry_run)
|
||||||
|
ShellRunner.run(cmd_create_pr, dry_run=dry_run)
|
||||||
|
if dry_run:
|
||||||
|
ShellRunner.run(
|
||||||
|
f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'"
|
||||||
|
)
|
||||||
|
ShellRunner.run(
|
||||||
|
f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None:
|
||||||
|
repo = os.getenv("GITHUB_REPOSITORY")
|
||||||
|
assert repo
|
||||||
|
cmds = []
|
||||||
|
cmds.append(
|
||||||
|
f"gh release create --repo {repo} --title 'Release {self.release_tag}' {self.release_tag}"
|
||||||
|
)
|
||||||
|
for file in packages_files:
|
||||||
|
cmds.append(f"gh release upload {self.release_tag} {file}")
|
||||||
|
if not dry_run:
|
||||||
|
for cmd in cmds:
|
||||||
|
ShellRunner.run(cmd)
|
||||||
|
else:
|
||||||
|
print("Dry-run, would run commands:")
|
||||||
|
print("\n * ".join(cmds))
|
||||||
|
|
||||||
|
|
||||||
|
class RepoTypes:
|
||||||
|
RPM = "rpm"
|
||||||
|
DEBIAN = "deb"
|
||||||
|
TGZ = "tgz"
|
||||||
|
|
||||||
|
|
||||||
|
class PackageDownloader:
|
||||||
|
PACKAGES = (
|
||||||
|
"clickhouse-client",
|
||||||
|
"clickhouse-common-static",
|
||||||
|
"clickhouse-common-static-dbg",
|
||||||
|
"clickhouse-keeper",
|
||||||
|
"clickhouse-keeper-dbg",
|
||||||
|
"clickhouse-server",
|
||||||
|
)
|
||||||
|
|
||||||
|
EXTRA_PACKAGES = (
|
||||||
|
"clickhouse-library-bridge",
|
||||||
|
"clickhouse-odbc-bridge",
|
||||||
|
)
|
||||||
|
PACKAGE_TYPES = (CI.BuildNames.PACKAGE_RELEASE, CI.BuildNames.PACKAGE_AARCH64)
|
||||||
|
MACOS_PACKAGE_TO_BIN_SUFFIX = {
|
||||||
|
CI.BuildNames.BINARY_DARWIN: "macos",
|
||||||
|
CI.BuildNames.BINARY_DARWIN_AARCH64: "macos-aarch64",
|
||||||
|
}
|
||||||
|
LOCAL_DIR = "/tmp/packages"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _get_arch_suffix(cls, package_arch, repo_type):
|
||||||
|
if package_arch == CI.BuildNames.PACKAGE_RELEASE:
|
||||||
|
return (
|
||||||
|
"amd64" if repo_type in (RepoTypes.DEBIAN, RepoTypes.TGZ) else "x86_64"
|
||||||
|
)
|
||||||
|
elif package_arch == CI.BuildNames.PACKAGE_AARCH64:
|
||||||
|
return (
|
||||||
|
"arm64" if repo_type in (RepoTypes.DEBIAN, RepoTypes.TGZ) else "aarch64"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
assert False, "BUG"
|
||||||
|
|
||||||
|
def __init__(self, release, commit_sha, version):
|
||||||
|
assert version.startswith(release), "Invalid release branch or version"
|
||||||
|
major, minor = map(int, release.split("."))
|
||||||
|
self.package_names = list(self.PACKAGES)
|
||||||
|
if major > 24 or (major == 24 and minor > 3):
|
||||||
|
self.package_names += list(self.EXTRA_PACKAGES)
|
||||||
|
self.release = release
|
||||||
|
self.commit_sha = commit_sha
|
||||||
|
self.version = version
|
||||||
|
self.s3 = S3Helper()
|
||||||
|
self.deb_package_files = []
|
||||||
|
self.rpm_package_files = []
|
||||||
|
self.tgz_package_files = []
|
||||||
|
# just binaries for macos
|
||||||
|
self.macos_package_files = ["clickhouse-macos", "clickhouse-macos-aarch64"]
|
||||||
|
self.file_to_type = {}
|
||||||
|
|
||||||
|
ShellRunner.run(f"mkdir -p {self.LOCAL_DIR}")
|
||||||
|
|
||||||
|
for package_type in self.PACKAGE_TYPES:
|
||||||
|
for package in self.package_names:
|
||||||
|
deb_package_file_name = f"{package}_{self.version}_{self._get_arch_suffix(package_type, RepoTypes.DEBIAN)}.deb"
|
||||||
|
self.deb_package_files.append(deb_package_file_name)
|
||||||
|
self.file_to_type[deb_package_file_name] = package_type
|
||||||
|
|
||||||
|
rpm_package_file_name = f"{package}-{self.version}.{self._get_arch_suffix(package_type, RepoTypes.RPM)}.rpm"
|
||||||
|
self.rpm_package_files.append(rpm_package_file_name)
|
||||||
|
self.file_to_type[rpm_package_file_name] = package_type
|
||||||
|
|
||||||
|
tgz_package_file_name = f"{package}-{self.version}-{self._get_arch_suffix(package_type, RepoTypes.TGZ)}.tgz"
|
||||||
|
self.tgz_package_files.append(tgz_package_file_name)
|
||||||
|
self.file_to_type[tgz_package_file_name] = package_type
|
||||||
|
tgz_package_file_name += ".sha512"
|
||||||
|
self.tgz_package_files.append(tgz_package_file_name)
|
||||||
|
self.file_to_type[tgz_package_file_name] = package_type
|
||||||
|
|
||||||
|
def get_deb_packages_files(self):
|
||||||
|
return self.deb_package_files
|
||||||
|
|
||||||
|
def get_rpm_packages_files(self):
|
||||||
|
return self.rpm_package_files
|
||||||
|
|
||||||
|
def get_tgz_packages_files(self):
|
||||||
|
return self.tgz_package_files
|
||||||
|
|
||||||
|
def get_macos_packages_files(self):
|
||||||
|
return self.macos_package_files
|
||||||
|
|
||||||
|
def get_packages_names(self):
|
||||||
|
return self.package_names
|
||||||
|
|
||||||
|
def get_all_packages_files(self):
|
||||||
|
assert self.local_tgz_packages_ready()
|
||||||
|
assert self.local_deb_packages_ready()
|
||||||
|
assert self.local_rpm_packages_ready()
|
||||||
|
assert self.local_macos_packages_ready()
|
||||||
|
res = []
|
||||||
|
for package_file in (
|
||||||
|
self.deb_package_files
|
||||||
|
+ self.rpm_package_files
|
||||||
|
+ self.tgz_package_files
|
||||||
|
+ self.macos_package_files
|
||||||
|
):
|
||||||
|
res.append(self.LOCAL_DIR + "/" + package_file)
|
||||||
|
return res
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
ShellRunner.run(f"rm -rf {self.LOCAL_DIR}/*")
|
||||||
|
for package_file in (
|
||||||
|
self.deb_package_files + self.rpm_package_files + self.tgz_package_files
|
||||||
|
):
|
||||||
|
print(f"Downloading: [{package_file}]")
|
||||||
|
s3_path = "/".join(
|
||||||
|
[
|
||||||
|
self.release,
|
||||||
|
self.commit_sha,
|
||||||
|
self.file_to_type[package_file],
|
||||||
|
package_file,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.s3.download_file(
|
||||||
|
bucket=S3_BUILDS_BUCKET,
|
||||||
|
s3_path=s3_path,
|
||||||
|
local_file_path="/".join([self.LOCAL_DIR, package_file]),
|
||||||
|
)
|
||||||
|
|
||||||
|
for macos_package, bin_suffix in self.MACOS_PACKAGE_TO_BIN_SUFFIX.items():
|
||||||
|
binary_name = "clickhouse"
|
||||||
|
destination_binary_name = f"{binary_name}-{bin_suffix}"
|
||||||
|
assert destination_binary_name in self.macos_package_files
|
||||||
|
print(
|
||||||
|
f"Downloading: [{macos_package}] binary to [{destination_binary_name}]"
|
||||||
|
)
|
||||||
|
s3_path = "/".join(
|
||||||
|
[
|
||||||
|
self.release,
|
||||||
|
self.commit_sha,
|
||||||
|
macos_package,
|
||||||
|
binary_name,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.s3.download_file(
|
||||||
|
bucket=S3_BUILDS_BUCKET,
|
||||||
|
s3_path=s3_path,
|
||||||
|
local_file_path="/".join([self.LOCAL_DIR, destination_binary_name]),
|
||||||
|
)
|
||||||
|
|
||||||
|
def local_deb_packages_ready(self) -> bool:
|
||||||
|
assert self.deb_package_files
|
||||||
|
for package_file in self.deb_package_files:
|
||||||
|
print(f"Check package is downloaded [{package_file}]")
|
||||||
|
if not Path(self.LOCAL_DIR + "/" + package_file).is_file():
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def local_rpm_packages_ready(self) -> bool:
|
||||||
|
assert self.rpm_package_files
|
||||||
|
for package_file in self.rpm_package_files:
|
||||||
|
print(f"Check package is downloaded [{package_file}]")
|
||||||
|
if not Path(self.LOCAL_DIR + "/" + package_file).is_file():
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def local_tgz_packages_ready(self) -> bool:
|
||||||
|
assert self.tgz_package_files
|
||||||
|
for package_file in self.tgz_package_files:
|
||||||
|
print(f"Check package is downloaded [{package_file}]")
|
||||||
|
if not Path(self.LOCAL_DIR + "/" + package_file).is_file():
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def local_macos_packages_ready(self) -> bool:
|
||||||
|
assert self.macos_package_files
|
||||||
|
for package_file in self.macos_package_files:
|
||||||
|
print(f"Check package is downloaded [{package_file}]")
|
||||||
|
if not Path(self.LOCAL_DIR + "/" + package_file).is_file():
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> argparse.Namespace:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||||
|
description="Creates release",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--prepare-release-info",
|
||||||
|
action="store_true",
|
||||||
|
help="Initial step to prepare info like release branch, release tag, etc.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--push-release-tag",
|
||||||
|
action="store_true",
|
||||||
|
help="Creates and pushes git tag",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--push-new-release-branch",
|
||||||
|
action="store_true",
|
||||||
|
help="Creates and pushes new release branch and corresponding service gh tags for backports",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--create-bump-version-pr",
|
||||||
|
action="store_true",
|
||||||
|
help="Updates version, contributors' list and creates PR",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--download-packages",
|
||||||
|
action="store_true",
|
||||||
|
help="Downloads all required packages from s3",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--create-gh-release",
|
||||||
|
action="store_true",
|
||||||
|
help="Create GH Release object and attach all packages",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--ref",
|
||||||
|
type=str,
|
||||||
|
help="the commit hash or branch",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--release-type",
|
||||||
|
choices=("new", "patch"),
|
||||||
|
# dest="release_type",
|
||||||
|
help="a release type to bump the major.minor.patch version part, "
|
||||||
|
"new branch is created only for the value 'new'",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--dry-run",
|
||||||
|
action="store_true",
|
||||||
|
help="do not make any actual changes in the repo, just show what will be done",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--outfile",
|
||||||
|
default="",
|
||||||
|
type=str,
|
||||||
|
help="output file to write json result to, if not set - stdout",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--infile",
|
||||||
|
default="",
|
||||||
|
type=str,
|
||||||
|
help="input file with release info",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def checkout(ref: str) -> Iterator[None]:
|
||||||
|
_, orig_ref = ShellRunner.run(f"{GIT_PREFIX} symbolic-ref --short HEAD")
|
||||||
|
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
|
||||||
|
assert orig_ref
|
||||||
|
if ref not in (orig_ref,):
|
||||||
|
ShellRunner.run(f"{GIT_PREFIX} checkout {ref}")
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
except (Exception, KeyboardInterrupt) as e:
|
||||||
|
print(f"ERROR: Exception [{e}]")
|
||||||
|
ShellRunner.run(rollback_cmd)
|
||||||
|
raise
|
||||||
|
ShellRunner.run(rollback_cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def checkout_new(ref: str) -> Iterator[None]:
|
||||||
|
_, orig_ref = ShellRunner.run(f"{GIT_PREFIX} symbolic-ref --short HEAD")
|
||||||
|
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
|
||||||
|
assert orig_ref
|
||||||
|
ShellRunner.run(f"{GIT_PREFIX} checkout -b {ref}")
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
except (Exception, KeyboardInterrupt) as e:
|
||||||
|
print(f"ERROR: Exception [{e}]")
|
||||||
|
ShellRunner.run(rollback_cmd)
|
||||||
|
raise
|
||||||
|
ShellRunner.run(rollback_cmd)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = parse_args()
|
||||||
|
assert args.dry_run
|
||||||
|
|
||||||
|
# prepare ssh for git if needed
|
||||||
|
_ssh_agent = None
|
||||||
|
_key_pub = None
|
||||||
|
if os.getenv("ROBOT_CLICKHOUSE_SSH_KEY", ""):
|
||||||
|
_key = os.getenv("ROBOT_CLICKHOUSE_SSH_KEY")
|
||||||
|
_ssh_agent = SSHAgent()
|
||||||
|
_key_pub = _ssh_agent.add(_key)
|
||||||
|
_ssh_agent.print_keys()
|
||||||
|
|
||||||
|
if args.prepare_release_info:
|
||||||
|
assert (
|
||||||
|
args.ref and args.release_type and args.outfile
|
||||||
|
), "--ref, --release-type and --outfile must be provided with --prepare-release-info"
|
||||||
|
ReleaseInfo.prepare(
|
||||||
|
commit_ref=args.ref, release_type=args.release_type, outfile=args.outfile
|
||||||
|
)
|
||||||
|
if args.push_release_tag:
|
||||||
|
assert args.infile, "--infile <release info file path> must be provided"
|
||||||
|
release_info = ReleaseInfo.from_file(args.infile)
|
||||||
|
release_info.push_release_tag(dry_run=args.dry_run)
|
||||||
|
if args.push_new_release_branch:
|
||||||
|
assert args.infile, "--infile <release info file path> must be provided"
|
||||||
|
release_info = ReleaseInfo.from_file(args.infile)
|
||||||
|
release_info.push_new_release_branch(dry_run=args.dry_run)
|
||||||
|
if args.create_bump_version_pr:
|
||||||
|
# TODO: store link to PR in release info
|
||||||
|
assert args.infile, "--infile <release info file path> must be provided"
|
||||||
|
release_info = ReleaseInfo.from_file(args.infile)
|
||||||
|
release_info.update_version_and_contributors_list(dry_run=args.dry_run)
|
||||||
|
if args.download_packages:
|
||||||
|
assert args.infile, "--infile <release info file path> must be provided"
|
||||||
|
release_info = ReleaseInfo.from_file(args.infile)
|
||||||
|
p = PackageDownloader(
|
||||||
|
release=release_info.release_branch,
|
||||||
|
commit_sha=release_info.commit_sha,
|
||||||
|
version=release_info.version,
|
||||||
|
)
|
||||||
|
p.run()
|
||||||
|
if args.create_gh_release:
|
||||||
|
assert args.infile, "--infile <release info file path> must be provided"
|
||||||
|
release_info = ReleaseInfo.from_file(args.infile)
|
||||||
|
p = PackageDownloader(
|
||||||
|
release=release_info.release_branch,
|
||||||
|
commit_sha=release_info.commit_sha,
|
||||||
|
version=release_info.version,
|
||||||
|
)
|
||||||
|
release_info.create_gh_release(p.get_all_packages_files(), args.dry_run)
|
||||||
|
|
||||||
|
# tear down ssh
|
||||||
|
if _ssh_agent and _key_pub:
|
||||||
|
_ssh_agent.remove(_key_pub)
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Prepare release machine:
|
||||||
|
|
||||||
|
### INSTALL PACKAGES
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install --yes --no-install-recommends python3-dev python3-pip gh unzip
|
||||||
|
sudo apt install --yes python3-boto3
|
||||||
|
sudo apt install --yes python3-github
|
||||||
|
sudo apt install --yes python3-unidiff
|
||||||
|
sudo apt install --yes s3fs
|
||||||
|
|
||||||
|
### INSTALL AWS CLI
|
||||||
|
cd /tmp
|
||||||
|
curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "awscliv2.zip"
|
||||||
|
unzip awscliv2.zip
|
||||||
|
sudo ./aws/install
|
||||||
|
rm -rf aws*
|
||||||
|
cd -
|
||||||
|
|
||||||
|
### INSTALL GH ACTIONS RUNNER:
|
||||||
|
# Create a folder
|
||||||
|
RUNNER_VERSION=2.317.0
|
||||||
|
cd ~
|
||||||
|
mkdir actions-runner && cd actions-runner
|
||||||
|
# Download the latest runner package
|
||||||
|
runner_arch() {
|
||||||
|
case $(uname -m) in
|
||||||
|
x86_64 )
|
||||||
|
echo x64;;
|
||||||
|
aarch64 )
|
||||||
|
echo arm64;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
curl -O -L https://github.com/actions/runner/releases/download/v$RUNNER_VERSION/actions-runner-linux-$(runner_arch)-$RUNNER_VERSION.tar.gz
|
||||||
|
# Extract the installer
|
||||||
|
tar xzf ./actions-runner-linux-$(runner_arch)-$RUNNER_VERSION.tar.gz
|
||||||
|
rm ./actions-runner-linux-$(runner_arch)-$RUNNER_VERSION.tar.gz
|
||||||
|
|
||||||
|
### Install reprepro:
|
||||||
|
cd ~
|
||||||
|
sudo apt install dpkg-dev libgpgme-dev libdb-dev libbz2-dev liblzma-dev libarchive-dev shunit2 db-util debhelper
|
||||||
|
git clone https://salsa.debian.org/debian/reprepro.git
|
||||||
|
cd reprepro
|
||||||
|
dpkg-buildpackage -b --no-sign && sudo dpkg -i ../reprepro_$(dpkg-parsechangelog --show-field Version)_$(dpkg-architecture -q DEB_HOST_ARCH).deb
|
||||||
|
|
||||||
|
### Install createrepo-c:
|
||||||
|
sudo apt install createrepo-c
|
||||||
|
createrepo_c --version
|
||||||
|
#Version: 0.17.3 (Features: DeltaRPM LegacyWeakdeps )
|
||||||
|
|
||||||
|
### Import gpg sign key
|
||||||
|
gpg --import key.pgp
|
||||||
|
gpg --list-secret-keys
|
||||||
|
|
||||||
|
### Install docker
|
||||||
|
sudo su; cd ~
|
||||||
|
|
||||||
|
deb_arch() {
|
||||||
|
case $(uname -m) in
|
||||||
|
x86_64 )
|
||||||
|
echo amd64;;
|
||||||
|
aarch64 )
|
||||||
|
echo arm64;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||||
|
|
||||||
|
echo "deb [arch=$(deb_arch) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||||
|
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install --yes --no-install-recommends docker-ce docker-buildx-plugin docker-ce-cli containerd.io
|
||||||
|
|
||||||
|
sudo usermod -aG docker ubuntu
|
||||||
|
|
||||||
|
# enable ipv6 in containers (fixed-cidr-v6 is some random network mask)
|
||||||
|
cat <<EOT > /etc/docker/daemon.json
|
||||||
|
{
|
||||||
|
"ipv6": true,
|
||||||
|
"fixed-cidr-v6": "2001:db8:1::/64",
|
||||||
|
"log-driver": "json-file",
|
||||||
|
"log-opts": {
|
||||||
|
"max-file": "5",
|
||||||
|
"max-size": "1000m"
|
||||||
|
},
|
||||||
|
"insecure-registries" : ["dockerhub-proxy.dockerhub-proxy-zone:5000"],
|
||||||
|
"registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"]
|
||||||
|
}
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# if docker build does not work:
|
||||||
|
sudo systemctl restart docker
|
||||||
|
docker buildx rm mybuilder
|
||||||
|
docker buildx create --name mybuilder --driver docker-container --use
|
||||||
|
docker buildx inspect mybuilder --bootstrap
|
||||||
|
|
||||||
|
### Install tailscale
|
||||||
|
|
||||||
|
### Configure GH runner
|
||||||
|
"""
|
@ -11,7 +11,6 @@ from os import path as p
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, List
|
from typing import Dict, List
|
||||||
|
|
||||||
from build_check import get_release_or_pr
|
|
||||||
from build_download_helper import read_build_urls
|
from build_download_helper import read_build_urls
|
||||||
from docker_images_helper import DockerImageData, docker_login
|
from docker_images_helper import DockerImageData, docker_login
|
||||||
from env_helper import (
|
from env_helper import (
|
||||||
@ -22,7 +21,7 @@ from env_helper import (
|
|||||||
TEMP_PATH,
|
TEMP_PATH,
|
||||||
)
|
)
|
||||||
from git_helper import Git
|
from git_helper import Git
|
||||||
from pr_info import PRInfo
|
from pr_info import PRInfo, EventType
|
||||||
from report import FAILURE, SUCCESS, JobReport, TestResult, TestResults
|
from report import FAILURE, SUCCESS, JobReport, TestResult, TestResults
|
||||||
from stopwatch import Stopwatch
|
from stopwatch import Stopwatch
|
||||||
from tee_popen import TeePopen
|
from tee_popen import TeePopen
|
||||||
@ -63,6 +62,12 @@ def parse_args() -> argparse.Namespace:
|
|||||||
help="a version to build, automaticaly got from version_helper, accepts either "
|
help="a version to build, automaticaly got from version_helper, accepts either "
|
||||||
"tag ('refs/tags/' is removed automatically) or a normal 22.2.2.2 format",
|
"tag ('refs/tags/' is removed automatically) or a normal 22.2.2.2 format",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--sha",
|
||||||
|
type=str,
|
||||||
|
default="",
|
||||||
|
help="sha of the commit to use packages from",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--release-type",
|
"--release-type",
|
||||||
type=str,
|
type=str,
|
||||||
@ -122,7 +127,7 @@ def parse_args() -> argparse.Namespace:
|
|||||||
|
|
||||||
|
|
||||||
def retry_popen(cmd: str, log_file: Path) -> int:
|
def retry_popen(cmd: str, log_file: Path) -> int:
|
||||||
max_retries = 5
|
max_retries = 2
|
||||||
for retry in range(max_retries):
|
for retry in range(max_retries):
|
||||||
# From time to time docker build may failed. Curl issues, or even push
|
# From time to time docker build may failed. Curl issues, or even push
|
||||||
# It will sleep progressively 5, 15, 30 and 50 seconds between retries
|
# It will sleep progressively 5, 15, 30 and 50 seconds between retries
|
||||||
@ -370,13 +375,22 @@ def main():
|
|||||||
tags = gen_tags(args.version, args.release_type)
|
tags = gen_tags(args.version, args.release_type)
|
||||||
repo_urls = {}
|
repo_urls = {}
|
||||||
direct_urls: Dict[str, List[str]] = {}
|
direct_urls: Dict[str, List[str]] = {}
|
||||||
release_or_pr, _ = get_release_or_pr(pr_info, args.version)
|
if pr_info.event_type == EventType.PULL_REQUEST:
|
||||||
|
release_or_pr = str(pr_info.number)
|
||||||
|
sha = pr_info.sha
|
||||||
|
elif pr_info.event_type == EventType.PUSH and pr_info.is_master:
|
||||||
|
release_or_pr = str(0)
|
||||||
|
sha = pr_info.sha
|
||||||
|
else:
|
||||||
|
release_or_pr = f"{args.version.major}.{args.version.minor}"
|
||||||
|
sha = args.sha
|
||||||
|
assert sha
|
||||||
|
|
||||||
for arch, build_name in zip(ARCH, ("package_release", "package_aarch64")):
|
for arch, build_name in zip(ARCH, ("package_release", "package_aarch64")):
|
||||||
if not args.bucket_prefix:
|
if not args.bucket_prefix:
|
||||||
repo_urls[arch] = (
|
repo_urls[arch] = (
|
||||||
f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/"
|
f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/"
|
||||||
f"{release_or_pr}/{pr_info.sha}/{build_name}"
|
f"{release_or_pr}/{sha}/{build_name}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
repo_urls[arch] = f"{args.bucket_prefix}/{build_name}"
|
repo_urls[arch] = f"{args.bucket_prefix}/{build_name}"
|
||||||
|
@ -23,7 +23,7 @@ from typing import (
|
|||||||
from build_download_helper import get_gh_api
|
from build_download_helper import get_gh_api
|
||||||
from ci_config import CI
|
from ci_config import CI
|
||||||
from ci_utils import normalize_string
|
from ci_utils import normalize_string
|
||||||
from env_helper import REPORT_PATH, TEMP_PATH
|
from env_helper import REPORT_PATH, GITHUB_WORKSPACE
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -244,7 +244,8 @@ HTML_TEST_PART = """
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
BASE_HEADERS = ["Test name", "Test status"]
|
BASE_HEADERS = ["Test name", "Test status"]
|
||||||
JOB_REPORT_FILE = Path(TEMP_PATH) / "job_report.json"
|
# should not be in TEMP directory or any directory that may be cleaned during the job execution
|
||||||
|
JOB_REPORT_FILE = Path(GITHUB_WORKSPACE) / "job_report.json"
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@ -296,6 +297,33 @@ class JobReport:
|
|||||||
build_dir_for_upload: Union[Path, str] = ""
|
build_dir_for_upload: Union[Path, str] = ""
|
||||||
# if False no GH commit status will be created by CI
|
# if False no GH commit status will be created by CI
|
||||||
need_commit_status: bool = True
|
need_commit_status: bool = True
|
||||||
|
# indicates that this is not real job report but report for the job that was skipped by rerun check
|
||||||
|
job_skipped: bool = False
|
||||||
|
# indicates that report generated by CI script in order to check later if job was killed before real report is generated
|
||||||
|
pre_report: bool = False
|
||||||
|
exit_code: int = -1
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_pre_report() -> "JobReport":
|
||||||
|
return JobReport(
|
||||||
|
status=ERROR,
|
||||||
|
description="",
|
||||||
|
test_results=[],
|
||||||
|
start_time=datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
|
||||||
|
duration=0.0,
|
||||||
|
additional_files=[],
|
||||||
|
pre_report=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def update_duration(self):
|
||||||
|
if not self.start_time:
|
||||||
|
self.duration = 0.0
|
||||||
|
else:
|
||||||
|
start_time = datetime.datetime.strptime(
|
||||||
|
self.start_time, "%Y-%m-%d %H:%M:%S"
|
||||||
|
)
|
||||||
|
current_time = datetime.datetime.utcnow()
|
||||||
|
self.duration = (current_time - start_time).total_seconds()
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
assert self.status in (SUCCESS, ERROR, FAILURE, PENDING)
|
assert self.status in (SUCCESS, ERROR, FAILURE, PENDING)
|
||||||
|
@ -37,9 +37,9 @@ class SSHAgent:
|
|||||||
ssh_options = (
|
ssh_options = (
|
||||||
"," + os.environ["SSH_OPTIONS"] if os.environ.get("SSH_OPTIONS") else ""
|
"," + os.environ["SSH_OPTIONS"] if os.environ.get("SSH_OPTIONS") else ""
|
||||||
)
|
)
|
||||||
os.environ[
|
os.environ["SSH_OPTIONS"] = (
|
||||||
"SSH_OPTIONS"
|
f"{ssh_options}UserKnownHostsFile=/dev/null,StrictHostKeyChecking=no"
|
||||||
] = f"{ssh_options}UserKnownHostsFile=/dev/null,StrictHostKeyChecking=no"
|
)
|
||||||
|
|
||||||
def add(self, key):
|
def add(self, key):
|
||||||
key_pub = self._key_pub(key)
|
key_pub = self._key_pub(key)
|
||||||
|
@ -16,7 +16,15 @@ from docker_images_helper import get_docker_image, pull_image
|
|||||||
from env_helper import IS_CI, REPO_COPY, TEMP_PATH, GITHUB_EVENT_PATH
|
from env_helper import IS_CI, REPO_COPY, TEMP_PATH, GITHUB_EVENT_PATH
|
||||||
from git_helper import GIT_PREFIX, git_runner
|
from git_helper import GIT_PREFIX, git_runner
|
||||||
from pr_info import PRInfo
|
from pr_info import PRInfo
|
||||||
from report import ERROR, FAILURE, SUCCESS, JobReport, TestResults, read_test_results
|
from report import (
|
||||||
|
ERROR,
|
||||||
|
FAILURE,
|
||||||
|
SUCCESS,
|
||||||
|
JobReport,
|
||||||
|
TestResults,
|
||||||
|
read_test_results,
|
||||||
|
FAIL,
|
||||||
|
)
|
||||||
from ssh import SSHKey
|
from ssh import SSHKey
|
||||||
from stopwatch import Stopwatch
|
from stopwatch import Stopwatch
|
||||||
|
|
||||||
@ -192,15 +200,6 @@ def main():
|
|||||||
future = executor.submit(subprocess.run, cmd_shell, shell=True)
|
future = executor.submit(subprocess.run, cmd_shell, shell=True)
|
||||||
_ = future.result()
|
_ = future.result()
|
||||||
|
|
||||||
autofix_description = ""
|
|
||||||
if args.push:
|
|
||||||
try:
|
|
||||||
commit_push_staged(pr_info)
|
|
||||||
except subprocess.SubprocessError:
|
|
||||||
# do not fail the whole script if the autofix didn't work out
|
|
||||||
logging.error("Unable to push the autofix. Continue.")
|
|
||||||
autofix_description = "Failed to push autofix to the PR. "
|
|
||||||
|
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
f"python3 ../../utils/check-style/process_style_check_result.py --in-results-dir {temp_path} "
|
f"python3 ../../utils/check-style/process_style_check_result.py --in-results-dir {temp_path} "
|
||||||
f"--out-results-file {temp_path}/test_results.tsv --out-status-file {temp_path}/check_status.tsv || "
|
f"--out-results-file {temp_path}/test_results.tsv --out-status-file {temp_path}/check_status.tsv || "
|
||||||
@ -210,6 +209,21 @@ def main():
|
|||||||
|
|
||||||
state, description, test_results, additional_files = process_result(temp_path)
|
state, description, test_results, additional_files = process_result(temp_path)
|
||||||
|
|
||||||
|
autofix_description = ""
|
||||||
|
fail_cnt = 0
|
||||||
|
for result in test_results:
|
||||||
|
if result.status in (FAILURE, FAIL):
|
||||||
|
# do not autofix if not only black failed
|
||||||
|
fail_cnt += 1
|
||||||
|
|
||||||
|
if args.push and fail_cnt == 1:
|
||||||
|
try:
|
||||||
|
commit_push_staged(pr_info)
|
||||||
|
except subprocess.SubprocessError:
|
||||||
|
# do not fail the whole script if the autofix didn't work out
|
||||||
|
logging.error("Unable to push the autofix. Continue.")
|
||||||
|
autofix_description = "Failed to push autofix to the PR. "
|
||||||
|
|
||||||
JobReport(
|
JobReport(
|
||||||
description=f"{autofix_description}{description}",
|
description=f"{autofix_description}{description}",
|
||||||
test_results=test_results,
|
test_results=test_results,
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
|
import random
|
||||||
|
|
||||||
from ci_config import CI
|
from ci_config import CI
|
||||||
import ci as CIPY
|
import ci as CIPY
|
||||||
from ci_settings import CiSettings
|
from ci_settings import CiSettings
|
||||||
@ -57,6 +59,18 @@ class TestCIConfig(unittest.TestCase):
|
|||||||
f"Job [{job}] apparently uses wrong common config with job keyword [{CI.JOB_CONFIGS[job].job_name_keyword}]",
|
f"Job [{job}] apparently uses wrong common config with job keyword [{CI.JOB_CONFIGS[job].job_name_keyword}]",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_job_config_has_proper_values(self):
|
||||||
|
for job in CI.JobNames:
|
||||||
|
if CI.JOB_CONFIGS[job].reference_job_name:
|
||||||
|
reference_job_config = CI.JOB_CONFIGS[
|
||||||
|
CI.JOB_CONFIGS[job].reference_job_name
|
||||||
|
]
|
||||||
|
# reference job must run in all workflows and has digest
|
||||||
|
self.assertTrue(reference_job_config.pr_only == False)
|
||||||
|
self.assertTrue(reference_job_config.release_only == False)
|
||||||
|
self.assertTrue(reference_job_config.run_always == False)
|
||||||
|
self.assertTrue(reference_job_config.digest != CI.DigestConfig())
|
||||||
|
|
||||||
def test_required_checks(self):
|
def test_required_checks(self):
|
||||||
for job in CI.REQUIRED_CHECKS:
|
for job in CI.REQUIRED_CHECKS:
|
||||||
if job in (CI.StatusNames.PR_CHECK, CI.StatusNames.SYNC):
|
if job in (CI.StatusNames.PR_CHECK, CI.StatusNames.SYNC):
|
||||||
@ -497,79 +511,68 @@ class TestCIConfig(unittest.TestCase):
|
|||||||
settings = CiSettings()
|
settings = CiSettings()
|
||||||
settings.no_ci_cache = True
|
settings.no_ci_cache = True
|
||||||
pr_info = PRInfo(github_event=_TEST_EVENT_JSON)
|
pr_info = PRInfo(github_event=_TEST_EVENT_JSON)
|
||||||
pr_info.event_type = EventType.PUSH
|
pr_info.event_type = EventType.PULL_REQUEST
|
||||||
pr_info.number = 0
|
pr_info.number = 123
|
||||||
assert pr_info.is_release and not pr_info.is_merge_queue
|
assert pr_info.is_pr
|
||||||
ci_cache = CIPY._configure_jobs(
|
ci_cache = CIPY._configure_jobs(
|
||||||
S3Helper(), pr_info, settings, skip_jobs=False, dry_run=True
|
S3Helper(), pr_info, settings, skip_jobs=False, dry_run=True
|
||||||
)
|
)
|
||||||
self.assertTrue(not ci_cache.jobs_to_skip, "Must be no jobs in skip list")
|
self.assertTrue(not ci_cache.jobs_to_skip, "Must be no jobs in skip list")
|
||||||
all_jobs_in_wf = list(ci_cache.jobs_to_do)
|
|
||||||
assert not ci_cache.jobs_to_wait
|
assert not ci_cache.jobs_to_wait
|
||||||
assert not ci_cache.jobs_to_skip
|
assert not ci_cache.jobs_to_skip
|
||||||
|
|
||||||
|
MOCK_AFFECTED_JOBS = [
|
||||||
|
CI.JobNames.STATELESS_TEST_S3_DEBUG,
|
||||||
|
CI.JobNames.STRESS_TEST_TSAN,
|
||||||
|
]
|
||||||
|
MOCK_REQUIRED_BUILDS = []
|
||||||
|
|
||||||
# pretend there are pending jobs that we need to wait
|
# pretend there are pending jobs that we need to wait
|
||||||
for job, job_config in ci_cache.jobs_to_do.items():
|
for job, job_config in ci_cache.jobs_to_do.items():
|
||||||
ci_cache.jobs_to_wait[job] = job_config
|
if job in MOCK_AFFECTED_JOBS:
|
||||||
|
MOCK_REQUIRED_BUILDS += job_config.required_builds
|
||||||
|
elif job not in MOCK_AFFECTED_JOBS:
|
||||||
|
ci_cache.jobs_to_wait[job] = job_config
|
||||||
|
|
||||||
# remove couple tests from to_wait and
|
for job, job_config in ci_cache.jobs_to_do.items():
|
||||||
# expect they are preserved in @jobs_to_to along with required package_asan
|
if job_config.reference_job_name:
|
||||||
del ci_cache.jobs_to_wait[CI.JobNames.STATELESS_TEST_ASAN]
|
# jobs with reference_job_name in config are not supposed to have records in the cache - continue
|
||||||
del ci_cache.jobs_to_wait[CI.JobNames.INTEGRATION_TEST_TSAN]
|
continue
|
||||||
del ci_cache.jobs_to_wait[CI.JobNames.STATELESS_TEST_MSAN]
|
if job in MOCK_AFFECTED_JOBS:
|
||||||
|
continue
|
||||||
# pretend we have some batches failed for one of the job from the to_do list
|
for batch in range(job_config.num_batches):
|
||||||
failed_job = CI.JobNames.INTEGRATION_TEST_TSAN
|
# add any record into cache
|
||||||
failed_job_config = ci_cache.jobs_to_do[failed_job]
|
record = CiCache.Record(
|
||||||
FAILED_BATCHES = [0, 3]
|
record_type=random.choice(
|
||||||
for batch in FAILED_BATCHES:
|
[
|
||||||
assert batch < failed_job_config.num_batches
|
CiCache.RecordType.FAILED,
|
||||||
record = CiCache.Record(
|
CiCache.RecordType.PENDING,
|
||||||
record_type=CiCache.RecordType.FAILED,
|
CiCache.RecordType.SUCCESSFUL,
|
||||||
job_name=failed_job,
|
]
|
||||||
job_digest=ci_cache.job_digests[failed_job],
|
),
|
||||||
batch=batch,
|
job_name=job,
|
||||||
num_batches=failed_job_config.num_batches,
|
job_digest=ci_cache.job_digests[job],
|
||||||
release_branch=True,
|
batch=batch,
|
||||||
)
|
num_batches=job_config.num_batches,
|
||||||
for record_t_, records_ in ci_cache.records.items():
|
release_branch=True,
|
||||||
if record_t_.value == CiCache.RecordType.FAILED.value:
|
)
|
||||||
records_[record.to_str_key()] = record
|
for record_t_, records_ in ci_cache.records.items():
|
||||||
|
if record_t_.value == CiCache.RecordType.FAILED.value:
|
||||||
# pretend we have all batches failed for one of the job from the to_do list
|
records_[record.to_str_key()] = record
|
||||||
failed_job = CI.JobNames.STATELESS_TEST_MSAN
|
|
||||||
failed_job_config = ci_cache.jobs_to_do[failed_job]
|
|
||||||
assert failed_job_config.num_batches > 1
|
|
||||||
for batch in range(failed_job_config.num_batches):
|
|
||||||
record = CiCache.Record(
|
|
||||||
record_type=CiCache.RecordType.FAILED,
|
|
||||||
job_name=failed_job,
|
|
||||||
job_digest=ci_cache.job_digests[failed_job],
|
|
||||||
batch=batch,
|
|
||||||
num_batches=failed_job_config.num_batches,
|
|
||||||
release_branch=True,
|
|
||||||
)
|
|
||||||
for record_t_, records_ in ci_cache.records.items():
|
|
||||||
if record_t_.value == CiCache.RecordType.FAILED.value:
|
|
||||||
records_[record.to_str_key()] = record
|
|
||||||
|
|
||||||
ci_cache.filter_out_not_affected_jobs()
|
ci_cache.filter_out_not_affected_jobs()
|
||||||
expected_to_do = [
|
expected_to_do = (
|
||||||
CI.JobNames.STATELESS_TEST_ASAN,
|
[
|
||||||
CI.BuildNames.PACKAGE_ASAN,
|
CI.JobNames.BUILD_CHECK,
|
||||||
CI.JobNames.INTEGRATION_TEST_TSAN,
|
]
|
||||||
CI.BuildNames.PACKAGE_TSAN,
|
+ MOCK_AFFECTED_JOBS
|
||||||
CI.JobNames.BUILD_CHECK,
|
+ MOCK_REQUIRED_BUILDS
|
||||||
]
|
)
|
||||||
self.assertCountEqual(
|
self.assertCountEqual(
|
||||||
list(ci_cache.jobs_to_wait),
|
list(ci_cache.jobs_to_wait),
|
||||||
[
|
[
|
||||||
CI.BuildNames.PACKAGE_ASAN,
|
|
||||||
CI.BuildNames.PACKAGE_TSAN,
|
|
||||||
CI.JobNames.BUILD_CHECK,
|
CI.JobNames.BUILD_CHECK,
|
||||||
],
|
]
|
||||||
|
+ MOCK_REQUIRED_BUILDS,
|
||||||
)
|
)
|
||||||
self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do)
|
self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do)
|
||||||
self.assertTrue(ci_cache.jobs_to_do[CI.JobNames.INTEGRATION_TEST_TSAN].batches)
|
|
||||||
for batch in ci_cache.jobs_to_do[CI.JobNames.INTEGRATION_TEST_TSAN].batches:
|
|
||||||
self.assertTrue(batch not in FAILED_BATCHES)
|
|
||||||
|
@ -172,14 +172,10 @@ class TestCIOptions(unittest.TestCase):
|
|||||||
job: CI.JobConfig(runner_type=CI.Runners.STYLE_CHECKER)
|
job: CI.JobConfig(runner_type=CI.Runners.STYLE_CHECKER)
|
||||||
for job in _TEST_JOB_LIST
|
for job in _TEST_JOB_LIST
|
||||||
}
|
}
|
||||||
jobs_configs[
|
jobs_configs["fuzzers"].run_by_label = (
|
||||||
"fuzzers"
|
|
||||||
].run_by_label = (
|
|
||||||
"TEST_LABEL" # check "fuzzers" appears in the result due to the label
|
"TEST_LABEL" # check "fuzzers" appears in the result due to the label
|
||||||
)
|
)
|
||||||
jobs_configs[
|
jobs_configs["Integration tests (asan)"].release_only = (
|
||||||
"Integration tests (asan)"
|
|
||||||
].release_only = (
|
|
||||||
True # still must be included as it's set with include keywords
|
True # still must be included as it's set with include keywords
|
||||||
)
|
)
|
||||||
filtered_jobs = list(
|
filtered_jobs = list(
|
||||||
@ -311,9 +307,9 @@ class TestCIOptions(unittest.TestCase):
|
|||||||
job: CI.JobConfig(runner_type=CI.Runners.STYLE_CHECKER)
|
job: CI.JobConfig(runner_type=CI.Runners.STYLE_CHECKER)
|
||||||
for job in _TEST_JOB_LIST
|
for job in _TEST_JOB_LIST
|
||||||
}
|
}
|
||||||
jobs_configs[
|
jobs_configs["fuzzers"].run_by_label = (
|
||||||
"fuzzers"
|
"TEST_LABEL" # check "fuzzers" does not appears in the result
|
||||||
].run_by_label = "TEST_LABEL" # check "fuzzers" does not appears in the result
|
)
|
||||||
jobs_configs["Integration tests (asan)"].release_only = True
|
jobs_configs["Integration tests (asan)"].release_only = True
|
||||||
filtered_jobs = list(
|
filtered_jobs = list(
|
||||||
ci_options.apply(
|
ci_options.apply(
|
||||||
|
@ -72,6 +72,19 @@ class ClickHouseVersion:
|
|||||||
return self.patch_update()
|
return self.patch_update()
|
||||||
raise KeyError(f"wrong part {part} is used")
|
raise KeyError(f"wrong part {part} is used")
|
||||||
|
|
||||||
|
def bump(self) -> "ClickHouseVersion":
|
||||||
|
if self.minor < 12:
|
||||||
|
self._minor += 1
|
||||||
|
self._revision += 1
|
||||||
|
self._patch = 1
|
||||||
|
self._tweak = 1
|
||||||
|
else:
|
||||||
|
self._major += 1
|
||||||
|
self._revision += 1
|
||||||
|
self._patch = 1
|
||||||
|
self._tweak = 1
|
||||||
|
return self
|
||||||
|
|
||||||
def major_update(self) -> "ClickHouseVersion":
|
def major_update(self) -> "ClickHouseVersion":
|
||||||
if self._git is not None:
|
if self._git is not None:
|
||||||
self._git.update()
|
self._git.update()
|
||||||
@ -148,6 +161,11 @@ class ClickHouseVersion:
|
|||||||
"""our X.3 and X.8 are LTS"""
|
"""our X.3 and X.8 are LTS"""
|
||||||
return self.minor % 5 == 3
|
return self.minor % 5 == 3
|
||||||
|
|
||||||
|
def get_stable_release_type(self) -> str:
|
||||||
|
if self.is_lts:
|
||||||
|
return VersionType.LTS
|
||||||
|
return VersionType.STABLE
|
||||||
|
|
||||||
def as_dict(self) -> VERSIONS:
|
def as_dict(self) -> VERSIONS:
|
||||||
return {
|
return {
|
||||||
"revision": self.revision,
|
"revision": self.revision,
|
||||||
@ -168,6 +186,7 @@ class ClickHouseVersion:
|
|||||||
raise ValueError(f"version type {version_type} not in {VersionType.VALID}")
|
raise ValueError(f"version type {version_type} not in {VersionType.VALID}")
|
||||||
self._description = version_type
|
self._description = version_type
|
||||||
self._describe = f"v{self.string}-{version_type}"
|
self._describe = f"v{self.string}-{version_type}"
|
||||||
|
return self
|
||||||
|
|
||||||
def copy(self) -> "ClickHouseVersion":
|
def copy(self) -> "ClickHouseVersion":
|
||||||
copy = ClickHouseVersion(
|
copy = ClickHouseVersion(
|
||||||
|
@ -711,9 +711,9 @@ def get_localzone():
|
|||||||
|
|
||||||
class SettingsRandomizer:
|
class SettingsRandomizer:
|
||||||
settings = {
|
settings = {
|
||||||
"max_insert_threads": lambda: 12
|
"max_insert_threads": lambda: (
|
||||||
if random.random() < 0.03
|
12 if random.random() < 0.03 else random.randint(1, 3)
|
||||||
else random.randint(1, 3),
|
),
|
||||||
"group_by_two_level_threshold": threshold_generator(0.2, 0.2, 1, 1000000),
|
"group_by_two_level_threshold": threshold_generator(0.2, 0.2, 1, 1000000),
|
||||||
"group_by_two_level_threshold_bytes": threshold_generator(
|
"group_by_two_level_threshold_bytes": threshold_generator(
|
||||||
0.2, 0.2, 1, 50000000
|
0.2, 0.2, 1, 50000000
|
||||||
|
@ -1454,9 +1454,9 @@ class ClickHouseCluster:
|
|||||||
def setup_azurite_cmd(self, instance, env_variables, docker_compose_yml_dir):
|
def setup_azurite_cmd(self, instance, env_variables, docker_compose_yml_dir):
|
||||||
self.with_azurite = True
|
self.with_azurite = True
|
||||||
env_variables["AZURITE_PORT"] = str(self.azurite_port)
|
env_variables["AZURITE_PORT"] = str(self.azurite_port)
|
||||||
env_variables[
|
env_variables["AZURITE_STORAGE_ACCOUNT_URL"] = (
|
||||||
"AZURITE_STORAGE_ACCOUNT_URL"
|
f"http://azurite1:{env_variables['AZURITE_PORT']}/devstoreaccount1"
|
||||||
] = f"http://azurite1:{env_variables['AZURITE_PORT']}/devstoreaccount1"
|
)
|
||||||
env_variables["AZURITE_CONNECTION_STRING"] = (
|
env_variables["AZURITE_CONNECTION_STRING"] = (
|
||||||
f"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;"
|
f"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;"
|
||||||
f"AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;"
|
f"AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;"
|
||||||
@ -1653,9 +1653,9 @@ class ClickHouseCluster:
|
|||||||
|
|
||||||
# Code coverage files will be placed in database directory
|
# Code coverage files will be placed in database directory
|
||||||
# (affect only WITH_COVERAGE=1 build)
|
# (affect only WITH_COVERAGE=1 build)
|
||||||
env_variables[
|
env_variables["LLVM_PROFILE_FILE"] = (
|
||||||
"LLVM_PROFILE_FILE"
|
"/var/lib/clickhouse/server_%h_%p_%m.profraw"
|
||||||
] = "/var/lib/clickhouse/server_%h_%p_%m.profraw"
|
)
|
||||||
|
|
||||||
clickhouse_start_command = CLICKHOUSE_START_COMMAND
|
clickhouse_start_command = CLICKHOUSE_START_COMMAND
|
||||||
if clickhouse_log_file:
|
if clickhouse_log_file:
|
||||||
@ -1668,9 +1668,9 @@ class ClickHouseCluster:
|
|||||||
cluster=self,
|
cluster=self,
|
||||||
base_path=self.base_dir,
|
base_path=self.base_dir,
|
||||||
name=name,
|
name=name,
|
||||||
base_config_dir=base_config_dir
|
base_config_dir=(
|
||||||
if base_config_dir
|
base_config_dir if base_config_dir else self.base_config_dir
|
||||||
else self.base_config_dir,
|
),
|
||||||
custom_main_configs=main_configs or [],
|
custom_main_configs=main_configs or [],
|
||||||
custom_user_configs=user_configs or [],
|
custom_user_configs=user_configs or [],
|
||||||
custom_dictionaries=dictionaries or [],
|
custom_dictionaries=dictionaries or [],
|
||||||
|
@ -19,9 +19,9 @@ def cluster():
|
|||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
cluster.add_instance(
|
cluster.add_instance(
|
||||||
"node",
|
"node",
|
||||||
main_configs=["configs/storage_arm.xml"]
|
main_configs=(
|
||||||
if is_arm()
|
["configs/storage_arm.xml"] if is_arm() else ["configs/storage_amd.xml"]
|
||||||
else ["configs/storage_amd.xml"],
|
),
|
||||||
with_minio=True,
|
with_minio=True,
|
||||||
with_hdfs=not is_arm(),
|
with_hdfs=not is_arm(),
|
||||||
)
|
)
|
||||||
|
@ -5,6 +5,7 @@ in this test we write into per-node tables and read from the distributed table.
|
|||||||
The default database in the distributed table definition is left empty on purpose to test
|
The default database in the distributed table definition is left empty on purpose to test
|
||||||
default database deduction.
|
default database deduction.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from helpers.client import QueryRuntimeException
|
from helpers.client import QueryRuntimeException
|
||||||
|
@ -1,4 +0,0 @@
|
|||||||
<clickhouse>
|
|
||||||
<disable_internal_dns_cache>1</disable_internal_dns_cache>
|
|
||||||
<max_concurrent_queries>250</max_concurrent_queries>
|
|
||||||
</clickhouse>
|
|
@ -1,11 +0,0 @@
|
|||||||
<clickhouse>
|
|
||||||
<users>
|
|
||||||
<test_dns>
|
|
||||||
<password/>
|
|
||||||
<networks>
|
|
||||||
<host_regexp>test1\.example\.com$</host_regexp>
|
|
||||||
</networks>
|
|
||||||
<profile>default</profile>
|
|
||||||
</test_dns>
|
|
||||||
</users>
|
|
||||||
</clickhouse>
|
|
@ -1,5 +0,0 @@
|
|||||||
<clickhouse>
|
|
||||||
<listen_host>::</listen_host>
|
|
||||||
<listen_host>0.0.0.0</listen_host>
|
|
||||||
<listen_try>1</listen_try>
|
|
||||||
</clickhouse>
|
|
@ -1,8 +0,0 @@
|
|||||||
. {
|
|
||||||
hosts /example.com {
|
|
||||||
reload "20ms"
|
|
||||||
fallthrough
|
|
||||||
}
|
|
||||||
forward . 127.0.0.11
|
|
||||||
log
|
|
||||||
}
|
|
@ -1 +0,0 @@
|
|||||||
filled in runtime, but needs to exist in order to be volume mapped in docker
|
|
@ -1,62 +0,0 @@
|
|||||||
import pycurl
|
|
||||||
import threading
|
|
||||||
from io import BytesIO
|
|
||||||
import sys
|
|
||||||
|
|
||||||
client_ip = sys.argv[1]
|
|
||||||
server_ip = sys.argv[2]
|
|
||||||
|
|
||||||
mutex = threading.Lock()
|
|
||||||
success_counter = 0
|
|
||||||
number_of_threads = 100
|
|
||||||
number_of_iterations = 50
|
|
||||||
|
|
||||||
|
|
||||||
def perform_request():
|
|
||||||
buffer = BytesIO()
|
|
||||||
crl = pycurl.Curl()
|
|
||||||
crl.setopt(pycurl.INTERFACE, client_ip)
|
|
||||||
crl.setopt(crl.WRITEDATA, buffer)
|
|
||||||
crl.setopt(crl.URL, f"http://{server_ip}:8123/?query=select+1&user=test_dns")
|
|
||||||
|
|
||||||
crl.perform()
|
|
||||||
|
|
||||||
# End curl session
|
|
||||||
crl.close()
|
|
||||||
|
|
||||||
str_response = buffer.getvalue().decode("iso-8859-1")
|
|
||||||
expected_response = "1\n"
|
|
||||||
|
|
||||||
mutex.acquire()
|
|
||||||
|
|
||||||
global success_counter
|
|
||||||
|
|
||||||
if str_response == expected_response:
|
|
||||||
success_counter += 1
|
|
||||||
|
|
||||||
mutex.release()
|
|
||||||
|
|
||||||
|
|
||||||
def perform_multiple_requests(n):
|
|
||||||
for request_number in range(n):
|
|
||||||
perform_request()
|
|
||||||
|
|
||||||
|
|
||||||
threads = []
|
|
||||||
|
|
||||||
|
|
||||||
for i in range(number_of_threads):
|
|
||||||
thread = threading.Thread(
|
|
||||||
target=perform_multiple_requests, args=(number_of_iterations,)
|
|
||||||
)
|
|
||||||
thread.start()
|
|
||||||
threads.append(thread)
|
|
||||||
|
|
||||||
for thread in threads:
|
|
||||||
thread.join()
|
|
||||||
|
|
||||||
|
|
||||||
if success_counter == number_of_threads * number_of_iterations:
|
|
||||||
exit(0)
|
|
||||||
|
|
||||||
exit(1)
|
|
@ -1,88 +0,0 @@
|
|||||||
import pytest
|
|
||||||
import socket
|
|
||||||
from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check
|
|
||||||
from time import sleep
|
|
||||||
import os
|
|
||||||
|
|
||||||
DOCKER_COMPOSE_PATH = get_docker_compose_path()
|
|
||||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
|
||||||
|
|
||||||
ch_server = cluster.add_instance(
|
|
||||||
"clickhouse-server",
|
|
||||||
with_coredns=True,
|
|
||||||
main_configs=["configs/config.xml", "configs/listen_host.xml"],
|
|
||||||
user_configs=["configs/host_regexp.xml"],
|
|
||||||
)
|
|
||||||
|
|
||||||
client = cluster.add_instance(
|
|
||||||
"clickhouse-client",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
|
||||||
def started_cluster():
|
|
||||||
global cluster
|
|
||||||
try:
|
|
||||||
cluster.start()
|
|
||||||
yield cluster
|
|
||||||
|
|
||||||
finally:
|
|
||||||
cluster.shutdown()
|
|
||||||
|
|
||||||
|
|
||||||
def check_ptr_record(ip, hostname):
|
|
||||||
try:
|
|
||||||
host, aliaslist, ipaddrlist = socket.gethostbyaddr(ip)
|
|
||||||
if hostname.lower() == host.lower():
|
|
||||||
return True
|
|
||||||
except socket.herror:
|
|
||||||
pass
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def setup_dns_server(ip):
|
|
||||||
domains_string = "test3.example.com test2.example.com test1.example.com"
|
|
||||||
example_file_path = f'{ch_server.env_variables["COREDNS_CONFIG_DIR"]}/example.com'
|
|
||||||
run_and_check(f"echo '{ip} {domains_string}' > {example_file_path}", shell=True)
|
|
||||||
|
|
||||||
# DNS server takes time to reload the configuration.
|
|
||||||
for try_num in range(10):
|
|
||||||
if all(check_ptr_record(ip, host) for host in domains_string.split()):
|
|
||||||
break
|
|
||||||
sleep(1)
|
|
||||||
|
|
||||||
|
|
||||||
def setup_ch_server(dns_server_ip):
|
|
||||||
ch_server.exec_in_container(
|
|
||||||
(["bash", "-c", f"echo 'nameserver {dns_server_ip}' > /etc/resolv.conf"])
|
|
||||||
)
|
|
||||||
ch_server.exec_in_container(
|
|
||||||
(["bash", "-c", "echo 'options ndots:0' >> /etc/resolv.conf"])
|
|
||||||
)
|
|
||||||
ch_server.query("SYSTEM DROP DNS CACHE")
|
|
||||||
|
|
||||||
|
|
||||||
def build_endpoint_v4(ip):
|
|
||||||
return f"'http://{ip}:8123/?query=SELECT+1&user=test_dns'"
|
|
||||||
|
|
||||||
|
|
||||||
def build_endpoint_v6(ip):
|
|
||||||
return build_endpoint_v4(f"[{ip}]")
|
|
||||||
|
|
||||||
|
|
||||||
def test_host_regexp_multiple_ptr_v4(started_cluster):
|
|
||||||
server_ip = cluster.get_instance_ip("clickhouse-server")
|
|
||||||
client_ip = cluster.get_instance_ip("clickhouse-client")
|
|
||||||
dns_server_ip = cluster.get_instance_ip(cluster.coredns_host)
|
|
||||||
|
|
||||||
setup_dns_server(client_ip)
|
|
||||||
setup_ch_server(dns_server_ip)
|
|
||||||
|
|
||||||
current_dir = os.path.dirname(__file__)
|
|
||||||
client.copy_file_to_container(
|
|
||||||
os.path.join(current_dir, "scripts", "stress_test.py"), "stress_test.py"
|
|
||||||
)
|
|
||||||
|
|
||||||
client.exec_in_container(["python3", f"stress_test.py", client_ip, server_ip])
|
|
@ -2,6 +2,7 @@
|
|||||||
This test makes sure interserver cluster queries handle invalid DNS
|
This test makes sure interserver cluster queries handle invalid DNS
|
||||||
records for replicas.
|
records for replicas.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from helpers.client import QueryRuntimeException
|
from helpers.client import QueryRuntimeException
|
||||||
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
|
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
|
||||||
|
|
||||||
|
@ -197,7 +197,9 @@ def test_partition_by_string_column(started_cluster):
|
|||||||
started_cluster, bucket, "test_foo/bar.csv"
|
started_cluster, bucket, "test_foo/bar.csv"
|
||||||
)
|
)
|
||||||
assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, "test_йцук.csv")
|
assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, "test_йцук.csv")
|
||||||
assert '78,"你好"\n' == get_s3_file_content(started_cluster, bucket, "test_你好.csv")
|
assert '78,"你好"\n' == get_s3_file_content(
|
||||||
|
started_cluster, bucket, "test_你好.csv"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_partition_by_const_column(started_cluster):
|
def test_partition_by_const_column(started_cluster):
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
"""Test HTTP responses given by the TCP Handler."""
|
"""Test HTTP responses given by the TCP Handler."""
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
"""Test Interserver responses on configured IP."""
|
"""Test Interserver responses on configured IP."""
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
@ -50,7 +50,7 @@ TYPES = {
|
|||||||
"UInt32": {"bits": 32, "sign": False, "float": False},
|
"UInt32": {"bits": 32, "sign": False, "float": False},
|
||||||
"Int32": {"bits": 32, "sign": True, "float": False},
|
"Int32": {"bits": 32, "sign": True, "float": False},
|
||||||
"UInt64": {"bits": 64, "sign": False, "float": False},
|
"UInt64": {"bits": 64, "sign": False, "float": False},
|
||||||
"Int64": {"bits": 64, "sign": True, "float": False}
|
"Int64": {"bits": 64, "sign": True, "float": False},
|
||||||
# "Float32" : { "bits" : 32, "sign" : True, "float" : True },
|
# "Float32" : { "bits" : 32, "sign" : True, "float" : True },
|
||||||
# "Float64" : { "bits" : 64, "sign" : True, "float" : True }
|
# "Float64" : { "bits" : 64, "sign" : True, "float" : True }
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CURDIR"/../shell_config.sh
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
DB_SUFFIX=$RANDOM
|
DB_SUFFIX=${RANDOM}${RANDOM}${RANDOM}${RANDOM}
|
||||||
${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS parallel_ddl_${DB_SUFFIX}"
|
${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS parallel_ddl_${DB_SUFFIX}"
|
||||||
|
|
||||||
function query()
|
function query()
|
||||||
|
@ -41,7 +41,7 @@ function thread3()
|
|||||||
|
|
||||||
function thread4()
|
function thread4()
|
||||||
{
|
{
|
||||||
while true; do $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE alter_table0 FINAL"; done
|
while true; do $CLICKHOUSE_CLIENT --receive_timeout=3 -q "OPTIMIZE TABLE alter_table0 FINAL" | grep -Fv "Timeout exceeded while receiving data from server"; done
|
||||||
}
|
}
|
||||||
|
|
||||||
function thread5()
|
function thread5()
|
||||||
|
@ -1,2 +0,0 @@
|
|||||||
0
|
|
||||||
1
|
|
@ -1,43 +0,0 @@
|
|||||||
-- Tags: distributed, no-parallel
|
|
||||||
|
|
||||||
CREATE DATABASE IF NOT EXISTS shard_0;
|
|
||||||
CREATE DATABASE IF NOT EXISTS shard_1;
|
|
||||||
CREATE DATABASE IF NOT EXISTS main_01487;
|
|
||||||
CREATE DATABASE IF NOT EXISTS test_01487;
|
|
||||||
|
|
||||||
USE main_01487;
|
|
||||||
|
|
||||||
DROP TABLE IF EXISTS shard_0.l;
|
|
||||||
DROP TABLE IF EXISTS shard_1.l;
|
|
||||||
DROP TABLE IF EXISTS d;
|
|
||||||
DROP TABLE IF EXISTS t;
|
|
||||||
|
|
||||||
CREATE TABLE shard_0.l (value UInt8) ENGINE = MergeTree ORDER BY value;
|
|
||||||
CREATE TABLE shard_1.l (value UInt8) ENGINE = MergeTree ORDER BY value;
|
|
||||||
CREATE TABLE t (value UInt8) ENGINE = Memory;
|
|
||||||
|
|
||||||
INSERT INTO shard_0.l VALUES (0);
|
|
||||||
INSERT INTO shard_1.l VALUES (1);
|
|
||||||
INSERT INTO t VALUES (0), (1), (2);
|
|
||||||
|
|
||||||
CREATE TABLE d AS t ENGINE = Distributed(test_cluster_two_shards_different_databases, currentDatabase(), t);
|
|
||||||
|
|
||||||
USE test_01487;
|
|
||||||
DROP DATABASE test_01487;
|
|
||||||
|
|
||||||
-- After the default database is dropped QueryAnalysisPass cannot process the following SELECT query.
|
|
||||||
-- That query is invalid on the initiator node.
|
|
||||||
set allow_experimental_analyzer = 0;
|
|
||||||
|
|
||||||
SELECT * FROM main_01487.d WHERE value IN (SELECT l.value FROM l) ORDER BY value;
|
|
||||||
|
|
||||||
USE main_01487;
|
|
||||||
|
|
||||||
DROP TABLE IF EXISTS shard_0.l;
|
|
||||||
DROP TABLE IF EXISTS shard_1.l;
|
|
||||||
DROP TABLE IF EXISTS d;
|
|
||||||
DROP TABLE IF EXISTS t;
|
|
||||||
|
|
||||||
DROP DATABASE shard_0;
|
|
||||||
DROP DATABASE shard_1;
|
|
||||||
DROP DATABASE main_01487;
|
|
@ -443,6 +443,18 @@ SELECT '--';
|
|||||||
--
|
--
|
||||||
DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN test_table_in_cte FROM test_table);
|
DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN test_table_in_cte FROM test_table);
|
||||||
in(id, test_table_in_cte) UInt8
|
in(id, test_table_in_cte) UInt8
|
||||||
|
SELECT '--';
|
||||||
|
--
|
||||||
|
DESCRIBE (WITH test_table_in_cte_1 AS (SELECT 1 AS c1), test_table_in_cte_2 AS (SELECT 1 AS c1) SELECT *
|
||||||
|
FROM test_table_in_cte_1 INNER JOIN test_table_in_cte_2 as test_table_in_cte_2 ON test_table_in_cte_1.c1 = test_table_in_cte_2.c1);
|
||||||
|
test_table_in_cte_1.c1 UInt8
|
||||||
|
test_table_in_cte_2.c1 UInt8
|
||||||
|
SELECT '--';
|
||||||
|
--
|
||||||
|
DESCRIBE (WITH test_table_in_cte_1 AS (SELECT 1 AS c1), test_table_in_cte_2 AS (SELECT 1 AS c1 UNION ALL SELECT 1 AS c1) SELECT *
|
||||||
|
FROM test_table_in_cte_1 INNER JOIN test_table_in_cte_2 as test_table_in_cte_2 ON test_table_in_cte_1.c1 = test_table_in_cte_2.c1);
|
||||||
|
test_table_in_cte_1.c1 UInt8
|
||||||
|
test_table_in_cte_2.c1 UInt8
|
||||||
SELECT 'Joins';
|
SELECT 'Joins';
|
||||||
Joins
|
Joins
|
||||||
DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2);
|
DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2);
|
||||||
|
@ -408,6 +408,16 @@ SELECT '--';
|
|||||||
|
|
||||||
DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN test_table_in_cte FROM test_table);
|
DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN test_table_in_cte FROM test_table);
|
||||||
|
|
||||||
|
SELECT '--';
|
||||||
|
|
||||||
|
DESCRIBE (WITH test_table_in_cte_1 AS (SELECT 1 AS c1), test_table_in_cte_2 AS (SELECT 1 AS c1) SELECT *
|
||||||
|
FROM test_table_in_cte_1 INNER JOIN test_table_in_cte_2 as test_table_in_cte_2 ON test_table_in_cte_1.c1 = test_table_in_cte_2.c1);
|
||||||
|
|
||||||
|
SELECT '--';
|
||||||
|
|
||||||
|
DESCRIBE (WITH test_table_in_cte_1 AS (SELECT 1 AS c1), test_table_in_cte_2 AS (SELECT 1 AS c1 UNION ALL SELECT 1 AS c1) SELECT *
|
||||||
|
FROM test_table_in_cte_1 INNER JOIN test_table_in_cte_2 as test_table_in_cte_2 ON test_table_in_cte_1.c1 = test_table_in_cte_2.c1);
|
||||||
|
|
||||||
SELECT 'Joins';
|
SELECT 'Joins';
|
||||||
|
|
||||||
DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2);
|
DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2);
|
||||||
|
@ -85,12 +85,12 @@ c
|
|||||||
[4,5,6]
|
[4,5,6]
|
||||||
[[1,2],[3,4]]
|
[[1,2],[3,4]]
|
||||||
[[5,6],[7,8]]
|
[[5,6],[7,8]]
|
||||||
0
|
1
|
||||||
0
|
1
|
||||||
0
|
1
|
||||||
0
|
1
|
||||||
0
|
1
|
||||||
0
|
1
|
||||||
1
|
1
|
||||||
[2.199219,1.099609,3.300781]
|
[2.199219,1.099609,3.300781]
|
||||||
[4.25,3.34961,6.628906]
|
[4.25,3.34961,6.628906]
|
||||||
|
@ -52,14 +52,14 @@ $CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/two_dim.npy', Npy, 'v
|
|||||||
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/three_dim.npy', Npy, 'value Array(Array(Int8))')"
|
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/three_dim.npy', Npy, 'value Array(Array(Int8))')"
|
||||||
|
|
||||||
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Array(Float32)')" 2>&1 | grep -c "BAD_ARGUMENTS"
|
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Array(Float32)')" 2>&1 | grep -c "BAD_ARGUMENTS"
|
||||||
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value UUID')" 2>&1 | grep -c "BAD_ARGUMENTS"
|
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value UUID')" 2>&1 | grep -c "UNKNOWN_TYPE"
|
||||||
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Tuple(UInt8)')" 2>&1 | grep -c "BAD_ARGUMENTS"
|
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Tuple(UInt8)')" 2>&1 | grep -c "UNKNOWN_TYPE"
|
||||||
|
|
||||||
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Int8')" 2>&1 | grep -c "BAD_ARGUMENTS"
|
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Int8')" 2>&1 | grep -c "ILLEGAL_COLUMN"
|
||||||
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_str.npy', Npy, 'value Int8')" 2>&1 | grep -c "BAD_ARGUMENTS"
|
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_str.npy', Npy, 'value Int8')" 2>&1 | grep -c "ILLEGAL_COLUMN"
|
||||||
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_unicode.npy', Npy, 'value Float32')" 2>&1 | grep -c "BAD_ARGUMENTS"
|
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_unicode.npy', Npy, 'value Float32')" 2>&1 | grep -c "ILLEGAL_COLUMN"
|
||||||
|
|
||||||
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/complex.npy')" 2>&1 | grep -c "BAD_ARGUMENTS"
|
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/complex.npy')" 2>&1 | grep -c "CANNOT_EXTRACT_TABLE_STRUCTURE"
|
||||||
|
|
||||||
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/float_16.npy')"
|
$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/float_16.npy')"
|
||||||
|
|
||||||
|
@ -71,4 +71,3 @@ select toTypeName(res), array([1, 2, 3], [[1, 2, 3]]) as res;
|
|||||||
select toTypeName(res), map('a', 1, 'b', 'str_1') as res;
|
select toTypeName(res), map('a', 1, 'b', 'str_1') as res;
|
||||||
select toTypeName(res), map('a', 1, 'b', map('c', 2, 'd', 'str_1')) as res;
|
select toTypeName(res), map('a', 1, 'b', map('c', 2, 'd', 'str_1')) as res;
|
||||||
select toTypeName(res), map('a', 1, 'b', [1, 2, 3], 'c', [[4, 5, 6]]) as res;
|
select toTypeName(res), map('a', 1, 'b', [1, 2, 3], 'c', [[4, 5, 6]]) as res;
|
||||||
|
|
||||||
|
169
tests/queries/0_stateless/02982_changeDate.reference
Normal file
169
tests/queries/0_stateless/02982_changeDate.reference
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
Negative tests
|
||||||
|
changeYear
|
||||||
|
-- Date
|
||||||
|
2001-01-01
|
||||||
|
1970-01-01
|
||||||
|
1970-01-01
|
||||||
|
2149-06-06
|
||||||
|
-- Date32
|
||||||
|
2001-01-01
|
||||||
|
1900-01-01
|
||||||
|
1900-01-01
|
||||||
|
2299-12-31
|
||||||
|
-- DateTime
|
||||||
|
2001-01-01 11:22:33
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
2106-02-07 07:28:15
|
||||||
|
-- DateTime64
|
||||||
|
2001-01-01 11:22:33.4444
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
2299-12-31 23:59:59.9999
|
||||||
|
changeMonth
|
||||||
|
-- Date
|
||||||
|
2000-01-01
|
||||||
|
2000-02-01
|
||||||
|
2000-12-01
|
||||||
|
1970-01-01
|
||||||
|
1970-01-01
|
||||||
|
1970-01-01
|
||||||
|
-- Date32
|
||||||
|
2000-01-01
|
||||||
|
2000-02-01
|
||||||
|
2000-12-01
|
||||||
|
1900-01-01
|
||||||
|
1900-01-01
|
||||||
|
1900-01-01
|
||||||
|
-- DateTime
|
||||||
|
2000-01-01 11:22:33
|
||||||
|
2000-02-01 11:22:33
|
||||||
|
2000-12-01 11:22:33
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
-- DateTime64
|
||||||
|
2000-01-01 11:22:33.4444
|
||||||
|
2000-02-01 11:22:33.4444
|
||||||
|
2000-12-01 11:22:33.4444
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
changeDay
|
||||||
|
-- Date
|
||||||
|
2000-01-01
|
||||||
|
2000-01-02
|
||||||
|
2000-01-31
|
||||||
|
1970-01-01
|
||||||
|
1970-01-01
|
||||||
|
1970-01-01
|
||||||
|
-- Date32
|
||||||
|
2000-01-01
|
||||||
|
2000-01-02
|
||||||
|
2000-01-31
|
||||||
|
1900-01-01
|
||||||
|
1900-01-01
|
||||||
|
1900-01-01
|
||||||
|
-- DateTime
|
||||||
|
2000-01-01 11:22:33
|
||||||
|
2000-01-02 11:22:33
|
||||||
|
2000-01-31 11:22:33
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
-- DateTime64
|
||||||
|
2000-01-01 11:22:33.4444
|
||||||
|
2000-01-02 11:22:33.4444
|
||||||
|
2000-01-31 11:22:33.4444
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
-- Special case: change to 29 Feb in a leap year
|
||||||
|
2000-02-29
|
||||||
|
2000-02-29
|
||||||
|
2000-02-29 11:22:33
|
||||||
|
2000-02-29 11:22:33.4444
|
||||||
|
changeHour
|
||||||
|
-- Date
|
||||||
|
2000-01-01 00:00:00
|
||||||
|
2000-01-01 02:00:00
|
||||||
|
2000-01-01 23:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
-- Date32
|
||||||
|
2000-01-01 00:00:00.000
|
||||||
|
2000-01-01 02:00:00.000
|
||||||
|
2000-01-01 23:00:00.000
|
||||||
|
1900-01-01 00:00:00.000
|
||||||
|
1900-01-01 00:00:00.000
|
||||||
|
-- DateTime
|
||||||
|
2000-01-01 00:22:33
|
||||||
|
2000-01-01 02:22:33
|
||||||
|
2000-01-01 23:22:33
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
-- DateTime64
|
||||||
|
2000-01-01 00:22:33.4444
|
||||||
|
2000-01-01 02:22:33.4444
|
||||||
|
2000-01-01 23:22:33.4444
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
-- With different timezone
|
||||||
|
1970-01-01 07:00:00
|
||||||
|
1970-01-01 07:00:00
|
||||||
|
changeMinute
|
||||||
|
-- Date
|
||||||
|
2000-01-01 00:00:00
|
||||||
|
2000-01-01 00:02:00
|
||||||
|
2000-01-01 00:59:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
-- Date32
|
||||||
|
2000-01-01 00:00:00.000
|
||||||
|
2000-01-01 00:02:00.000
|
||||||
|
2000-01-01 00:59:00.000
|
||||||
|
1900-01-01 00:00:00.000
|
||||||
|
1900-01-01 00:00:00.000
|
||||||
|
-- DateTime
|
||||||
|
2000-01-01 11:00:33
|
||||||
|
2000-01-01 11:02:33
|
||||||
|
2000-01-01 11:59:33
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
-- DateTime64
|
||||||
|
2000-01-01 11:00:33.4444
|
||||||
|
2000-01-01 11:02:33.4444
|
||||||
|
2000-01-01 11:59:33.4444
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
-- With different timezone
|
||||||
|
1970-01-01 07:00:00
|
||||||
|
1970-01-01 07:00:00
|
||||||
|
changeSecond
|
||||||
|
-- Date
|
||||||
|
2000-01-01 00:00:00
|
||||||
|
2000-01-01 00:00:02
|
||||||
|
2000-01-01 00:00:59
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
-- Date32
|
||||||
|
2000-01-01 00:00:00.000
|
||||||
|
2000-01-01 00:00:02.000
|
||||||
|
2000-01-01 00:00:59.000
|
||||||
|
1900-01-01 00:00:00.000
|
||||||
|
1900-01-01 00:00:00.000
|
||||||
|
-- DateTime
|
||||||
|
2000-01-01 11:22:00
|
||||||
|
2000-01-01 11:22:02
|
||||||
|
2000-01-01 11:22:59
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
1970-01-01 01:00:00
|
||||||
|
-- DateTime64
|
||||||
|
2000-01-01 11:22:00.4444
|
||||||
|
2000-01-01 11:22:02.4444
|
||||||
|
2000-01-01 11:22:59.4444
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
1900-01-01 00:00:00.0000
|
||||||
|
-- With different timezone
|
||||||
|
1970-01-01 07:00:00
|
||||||
|
1970-01-01 07:00:00
|
185
tests/queries/0_stateless/02982_changeDate.sql
Normal file
185
tests/queries/0_stateless/02982_changeDate.sql
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
SELECT 'Negative tests';
|
||||||
|
-- as changeYear, changeMonth, changeDay, changeMinute, changeSecond share the same implementation, just testing one of them
|
||||||
|
SELECT changeYear(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||||
|
SELECT changeYear(toDate('2000-01-01')); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||||
|
SELECT changeYear(toDate('2000-01-01'), 2000, 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||||
|
SELECT changeYear(1999, 2000); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||||
|
SELECT changeYear(toDate('2000-01-01'), 'abc'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||||
|
SELECT changeYear(toDate('2000-01-01'), 1.5); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||||
|
|
||||||
|
-- Disable timezone randomization
|
||||||
|
SET session_timezone='CET';
|
||||||
|
|
||||||
|
SELECT 'changeYear';
|
||||||
|
SELECT '-- Date';
|
||||||
|
SELECT changeYear(toDate('2000-01-01'), 2001);
|
||||||
|
SELECT changeYear(toDate('2000-01-01'), 1800); -- out-of-bounds
|
||||||
|
SELECT changeYear(toDate('2000-01-01'), -5000); -- out-of-bounds
|
||||||
|
SELECT changeYear(toDate('2000-01-01'), 2500); -- out-of-bounds
|
||||||
|
SELECT '-- Date32';
|
||||||
|
SELECT changeYear(toDate32('2000-01-01'), 2001);
|
||||||
|
SELECT changeYear(toDate32('2000-01-01'), 1800); -- out-of-bounds
|
||||||
|
SELECT changeYear(toDate32('2000-01-01'), -5000); -- out-of-bounds
|
||||||
|
SELECT changeYear(toDate32('2000-01-01'), 2500); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime';
|
||||||
|
SELECT changeYear(toDateTime('2000-01-01 11:22:33'), 2001);
|
||||||
|
SELECT changeYear(toDateTime('2000-01-01 11:22:33'), 1800); -- out-of-bounds
|
||||||
|
SELECT changeYear(toDateTime('2000-01-01 11:22:33'), -5000); -- out-of-bounds
|
||||||
|
SELECT changeYear(toDateTime('2000-01-01 11:22:33'), 2500); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime64';
|
||||||
|
SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), 2001);
|
||||||
|
SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), 1800); -- out-of-bounds
|
||||||
|
SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), -5000); -- out-of-bounds
|
||||||
|
SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), 2500); -- out-of-bounds
|
||||||
|
|
||||||
|
SELECT 'changeMonth';
|
||||||
|
SELECT '-- Date';
|
||||||
|
SELECT changeMonth(toDate('2000-01-01'), 1);
|
||||||
|
SELECT changeMonth(toDate('2000-01-01'), 2);
|
||||||
|
SELECT changeMonth(toDate('2000-01-01'), 12);
|
||||||
|
SELECT changeMonth(toDate('2000-01-01'), 0); -- out-of-bounds
|
||||||
|
SELECT changeMonth(toDate('2000-01-01'), -1); -- out-of-bounds
|
||||||
|
SELECT changeMonth(toDate('2000-01-01'), 13); -- out-of-bounds
|
||||||
|
SELECT '-- Date32';
|
||||||
|
SELECT changeMonth(toDate32('2000-01-01'), 1);
|
||||||
|
SELECT changeMonth(toDate32('2000-01-01'), 2);
|
||||||
|
SELECT changeMonth(toDate32('2000-01-01'), 12);
|
||||||
|
SELECT changeMonth(toDate32('2000-01-01'), 0); -- out-of-bounds
|
||||||
|
SELECT changeMonth(toDate32('2000-01-01'), -1); -- out-of-bounds
|
||||||
|
SELECT changeMonth(toDate32('2000-01-01'), 13); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime';
|
||||||
|
SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 1);
|
||||||
|
SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 2);
|
||||||
|
SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 12);
|
||||||
|
SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 0); -- out-of-bounds
|
||||||
|
SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds
|
||||||
|
SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 13); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime64';
|
||||||
|
SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 1);
|
||||||
|
SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 2);
|
||||||
|
SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 12);
|
||||||
|
SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); -- out-of-bounds
|
||||||
|
SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds
|
||||||
|
SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 13); -- out-of-bounds
|
||||||
|
|
||||||
|
SELECT 'changeDay';
|
||||||
|
SELECT '-- Date';
|
||||||
|
SELECT changeDay(toDate('2000-01-01'), 1);
|
||||||
|
SELECT changeDay(toDate('2000-01-01'), 2);
|
||||||
|
SELECT changeDay(toDate('2000-01-01'), 31);
|
||||||
|
SELECT changeDay(toDate('2000-01-01'), 0); -- out-of-bounds
|
||||||
|
SELECT changeDay(toDate('2000-01-01'), -1); -- out-of-bounds
|
||||||
|
SELECT changeDay(toDate('2000-01-01'), 32); -- out-of-bounds
|
||||||
|
SELECT '-- Date32';
|
||||||
|
SELECT changeDay(toDate32('2000-01-01'), 1);
|
||||||
|
SELECT changeDay(toDate32('2000-01-01'), 2);
|
||||||
|
SELECT changeDay(toDate32('2000-01-01'), 31);
|
||||||
|
SELECT changeDay(toDate32('2000-01-01'), 0); -- out-of-bounds
|
||||||
|
SELECT changeDay(toDate32('2000-01-01'), -1); -- out-of-bounds
|
||||||
|
SELECT changeDay(toDate32('2000-01-01'), 32); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime';
|
||||||
|
SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 1);
|
||||||
|
SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 2);
|
||||||
|
SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 31);
|
||||||
|
SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 0); -- out-of-bounds
|
||||||
|
SELECT changeDay(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds
|
||||||
|
SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 32); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime64';
|
||||||
|
SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 1);
|
||||||
|
SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 2);
|
||||||
|
SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 31);
|
||||||
|
SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); -- out-of-bounds
|
||||||
|
SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds
|
||||||
|
SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 32); -- out-of-bounds
|
||||||
|
SELECT '-- Special case: change to 29 Feb in a leap year';
|
||||||
|
SELECT changeDay(toDate('2000-02-28'), 29);
|
||||||
|
SELECT changeDay(toDate32('2000-02-01'), 29);
|
||||||
|
SELECT changeDay(toDateTime('2000-02-01 11:22:33'), 29);
|
||||||
|
SELECT changeDay(toDateTime64('2000-02-01 11:22:33.4444', 4), 29);
|
||||||
|
|
||||||
|
SELECT 'changeHour';
|
||||||
|
SELECT '-- Date';
|
||||||
|
SELECT changeHour(toDate('2000-01-01'), 0);
|
||||||
|
SELECT changeHour(toDate('2000-01-01'), 2);
|
||||||
|
SELECT changeHour(toDate('2000-01-01'), 23);
|
||||||
|
SELECT changeHour(toDate('2000-01-01'), -1); -- out-of-bounds
|
||||||
|
SELECT changeHour(toDate('2000-01-01'), 24); -- out-of-bounds
|
||||||
|
SELECT '-- Date32';
|
||||||
|
SELECT changeHour(toDate32('2000-01-01'), 0);
|
||||||
|
SELECT changeHour(toDate32('2000-01-01'), 2);
|
||||||
|
SELECT changeHour(toDate32('2000-01-01'), 23);
|
||||||
|
SELECT changeHour(toDate32('2000-01-01'), -1); -- out-of-bounds
|
||||||
|
SELECT changeHour(toDate32('2000-01-01'), 24); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime';
|
||||||
|
SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 0);
|
||||||
|
SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 2);
|
||||||
|
SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 23);
|
||||||
|
SELECT changeHour(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds
|
||||||
|
SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 24); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime64';
|
||||||
|
SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 0);
|
||||||
|
SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 2);
|
||||||
|
SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 23);
|
||||||
|
SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds
|
||||||
|
SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 24); -- out-of-bounds
|
||||||
|
SELECT '-- With different timezone';
|
||||||
|
SELECT changeHour(toDate('2000-01-01'), -1) SETTINGS session_timezone = 'Asia/Novosibirsk';
|
||||||
|
SELECT changeHour(toDate('2000-01-01'), 24) SETTINGS session_timezone = 'Asia/Novosibirsk';
|
||||||
|
|
||||||
|
SELECT 'changeMinute';
|
||||||
|
SELECT '-- Date';
|
||||||
|
SELECT changeMinute(toDate('2000-01-01'), 0);
|
||||||
|
SELECT changeMinute(toDate('2000-01-01'), 2);
|
||||||
|
SELECT changeMinute(toDate('2000-01-01'), 59);
|
||||||
|
SELECT changeMinute(toDate('2000-01-01'), -1); -- out-of-bounds
|
||||||
|
SELECT changeMinute(toDate('2000-01-01'), 60); -- out-of-bounds
|
||||||
|
SELECT '-- Date32';
|
||||||
|
SELECT changeMinute(toDate32('2000-01-01'), 0);
|
||||||
|
SELECT changeMinute(toDate32('2000-01-01'), 2);
|
||||||
|
SELECT changeMinute(toDate32('2000-01-01'), 59);
|
||||||
|
SELECT changeMinute(toDate32('2000-01-01'), -1); -- out-of-bounds
|
||||||
|
SELECT changeMinute(toDate32('2000-01-01'), 60); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime';
|
||||||
|
SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 0);
|
||||||
|
SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 2);
|
||||||
|
SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 59);
|
||||||
|
SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds
|
||||||
|
SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 60); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime64';
|
||||||
|
SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 0);
|
||||||
|
SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 2);
|
||||||
|
SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 59);
|
||||||
|
SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds
|
||||||
|
SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 60); -- out-of-bounds
|
||||||
|
SELECT '-- With different timezone';
|
||||||
|
SELECT changeMinute(toDate('2000-01-01'), -1) SETTINGS session_timezone = 'Asia/Novosibirsk';
|
||||||
|
SELECT changeMinute(toDate('2000-01-01'), 60) SETTINGS session_timezone = 'Asia/Novosibirsk';
|
||||||
|
|
||||||
|
SELECT 'changeSecond';
|
||||||
|
SELECT '-- Date';
|
||||||
|
SELECT changeSecond(toDate('2000-01-01'), 0);
|
||||||
|
SELECT changeSecond(toDate('2000-01-01'), 2);
|
||||||
|
SELECT changeSecond(toDate('2000-01-01'), 59);
|
||||||
|
SELECT changeSecond(toDate('2000-01-01'), -1); -- out-of-bounds
|
||||||
|
SELECT changeSecond(toDate('2000-01-01'), 60); -- out-of-bounds
|
||||||
|
SELECT '-- Date32';
|
||||||
|
SELECT changeSecond(toDate32('2000-01-01'), 0);
|
||||||
|
SELECT changeSecond(toDate32('2000-01-01'), 2);
|
||||||
|
SELECT changeSecond(toDate32('2000-01-01'), 59);
|
||||||
|
SELECT changeSecond(toDate32('2000-01-01'), -1); -- out-of-bounds
|
||||||
|
SELECT changeSecond(toDate32('2000-01-01'), 60); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime';
|
||||||
|
SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 0);
|
||||||
|
SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 2);
|
||||||
|
SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 59);
|
||||||
|
SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds
|
||||||
|
SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 60); -- out-of-bounds
|
||||||
|
SELECT '-- DateTime64';
|
||||||
|
SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 0);
|
||||||
|
SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 2);
|
||||||
|
SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 59);
|
||||||
|
SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds
|
||||||
|
SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 60); -- out-of-bounds
|
||||||
|
SELECT '-- With different timezone';
|
||||||
|
SELECT changeSecond(toDate('2000-01-01'), -1) SETTINGS session_timezone = 'Asia/Novosibirsk';
|
||||||
|
SELECT changeSecond(toDate('2000-01-01'), 60) SETTINGS session_timezone = 'Asia/Novosibirsk';
|
@ -1 +1 @@
|
|||||||
10000
|
30000
|
||||||
|
@ -6,15 +6,17 @@ drop table if exists dist_out;
|
|||||||
|
|
||||||
create table ephemeral (key Int, value Int) engine=Null();
|
create table ephemeral (key Int, value Int) engine=Null();
|
||||||
create table dist_in as ephemeral engine=Distributed(test_shard_localhost, currentDatabase(), ephemeral, key) settings background_insert_batch=1;
|
create table dist_in as ephemeral engine=Distributed(test_shard_localhost, currentDatabase(), ephemeral, key) settings background_insert_batch=1;
|
||||||
create table data (key Int, uniq_values Int) engine=Memory();
|
create table data (key Int, uniq_values Int) engine=TinyLog();
|
||||||
create materialized view mv to data as select key, uniqExact(value) uniq_values from ephemeral group by key;
|
create materialized view mv to data as select key, uniqExact(value::String) uniq_values from ephemeral group by key;
|
||||||
system stop distributed sends dist_in;
|
system stop distributed sends dist_in;
|
||||||
create table dist_out as data engine=Distributed(test_shard_localhost, currentDatabase(), data);
|
create table dist_out as data engine=Distributed(test_shard_localhost, currentDatabase(), data);
|
||||||
|
|
||||||
set prefer_localhost_replica=0;
|
set prefer_localhost_replica=0;
|
||||||
SET optimize_trivial_insert_select = 1;
|
SET optimize_trivial_insert_select = 1;
|
||||||
|
|
||||||
insert into dist_in select number/100, number from system.numbers limit 1e6 settings max_memory_usage='20Mi';
|
-- due to pushing to MV with aggregation the query needs ~300MiB
|
||||||
|
-- but it will be done in background via "system flush distributed"
|
||||||
|
insert into dist_in select number/100, number from system.numbers limit 3e6 settings max_block_size=3e6, max_memory_usage='100Mi';
|
||||||
system flush distributed dist_in; -- { serverError MEMORY_LIMIT_EXCEEDED }
|
system flush distributed dist_in; -- { serverError MEMORY_LIMIT_EXCEEDED }
|
||||||
system flush distributed dist_in settings max_memory_usage=0;
|
system flush distributed dist_in settings max_memory_usage=0;
|
||||||
select count() from dist_out;
|
select count() from dist_out;
|
||||||
|
@ -1 +0,0 @@
|
|||||||
1 1 1
|
|
@ -1,17 +0,0 @@
|
|||||||
-- https://github.com/ClickHouse/ClickHouse/issues/22627
|
|
||||||
SET allow_experimental_analyzer=1;
|
|
||||||
WITH
|
|
||||||
x AS
|
|
||||||
(
|
|
||||||
SELECT 1 AS a
|
|
||||||
),
|
|
||||||
xx AS
|
|
||||||
(
|
|
||||||
SELECT *
|
|
||||||
FROM x
|
|
||||||
, x AS x1
|
|
||||||
, x AS x2
|
|
||||||
)
|
|
||||||
SELECT *
|
|
||||||
FROM xx
|
|
||||||
WHERE a = 1;
|
|
@ -0,0 +1 @@
|
|||||||
|
Hello, world!
|
20
tests/queries/0_stateless/03201_local_named_collections.sh
Executable file
20
tests/queries/0_stateless/03201_local_named_collections.sh
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --multiquery "
|
||||||
|
DROP TABLE IF EXISTS test;
|
||||||
|
CREATE TABLE test (s String) ORDER BY ();
|
||||||
|
INSERT INTO test VALUES ('Hello, world!');
|
||||||
|
"
|
||||||
|
|
||||||
|
${CLICKHOUSE_LOCAL} --multiquery "
|
||||||
|
CREATE NAMED COLLECTION mydb AS host = '${CLICKHOUSE_HOST}', port = ${CLICKHOUSE_PORT_TCP}, user = 'default', password = '', db = '${CLICKHOUSE_DATABASE}';
|
||||||
|
SELECT * FROM remote(mydb, table = 'test');
|
||||||
|
" 2>&1 | grep --text -F -v "ASan doesn't fully support makecontext/swapcontext functions"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --multiquery "
|
||||||
|
DROP TABLE test;
|
||||||
|
"
|
@ -0,0 +1,10 @@
|
|||||||
|
DROP TABLE IF EXISTS t_c3oollc8r;
|
||||||
|
CREATE TABLE t_c3oollc8r (c_k37 Int32, c_y String, c_bou Int32, c_g1 Int32, c_lfntfzg Int32, c_kntw50q Int32) ENGINE = MergeTree ORDER BY ();
|
||||||
|
|
||||||
|
SELECT (
|
||||||
|
SELECT c_k37
|
||||||
|
FROM t_c3oollc8r
|
||||||
|
) > c_lfntfzg
|
||||||
|
FROM remote('127.0.0.{1,2}', currentDatabase(), t_c3oollc8r);
|
||||||
|
|
||||||
|
DROP TABLE t_c3oollc8r;
|
@ -467,7 +467,7 @@ LOCALTIME
|
|||||||
LOCALTIMESTAMP
|
LOCALTIMESTAMP
|
||||||
LONGLONG
|
LONGLONG
|
||||||
LOONGARCH
|
LOONGARCH
|
||||||
Lemir
|
Lemire
|
||||||
Levenshtein
|
Levenshtein
|
||||||
Liao
|
Liao
|
||||||
LibFuzzer
|
LibFuzzer
|
||||||
@ -1319,6 +1319,12 @@ cfg
|
|||||||
cgroup
|
cgroup
|
||||||
cgroups
|
cgroups
|
||||||
chadmin
|
chadmin
|
||||||
|
changeDay
|
||||||
|
changeHour
|
||||||
|
changeMinute
|
||||||
|
changeMonth
|
||||||
|
changeSecond
|
||||||
|
changeYear
|
||||||
changelog
|
changelog
|
||||||
changelogs
|
changelogs
|
||||||
charset
|
charset
|
||||||
|
Loading…
Reference in New Issue
Block a user