Merge branch 'master' into miscellaneous

This commit is contained in:
Alexey Milovidov 2024-07-22 06:42:50 +02:00
commit 6f9be58f09
33 changed files with 1452 additions and 569 deletions

173
.github/actions/release/action.yml vendored Normal file
View File

@ -0,0 +1,173 @@
name: Release
description: Makes patch releases and creates new release branch
inputs:
ref:
description: 'Git reference (branch or commit sha) from which to create the release'
required: true
type: string
type:
description: 'The type of release: "new" for a new release or "patch" for a patch release'
required: true
type: choice
options:
- patch
- new
dry-run:
description: 'Dry run'
required: false
default: true
type: boolean
token:
required: true
type: string
runs:
using: "composite"
steps:
- name: Prepare Release Info
shell: bash
run: |
python3 ./tests/ci/create_release.py --prepare-release-info \
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
${{ inputs.dry-run && '--dry-run' || '' }}
echo "::group::Release Info"
python3 -m json.tool /tmp/release_info.json
echo "::endgroup::"
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
echo "Release Tag: $release_tag"
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
- name: Download All Release Artifacts
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Push Git Tag for the Release
shell: bash
run: |
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Push New Release Branch
if: ${{ inputs.type == 'new' }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Bump CH Version and Update Contributors' List
shell: bash
run: |
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Bump Docker versions, Changelog, Security
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --set-progress-started --progress "update ChangeLog"
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
echo "List versions"
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
echo "Update docker version"
./utils/list-versions/update-docker-version.sh
echo "Generate ChangeLog"
export CI=1
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
--volume=".:/ClickHouse" clickhouse/style-test \
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
--gh-user-or-token=${{ inputs.token }} --jobs=5 \
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
echo "Generate Security"
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Create ChangeLog PR
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
uses: peter-evans/create-pull-request@v6
with:
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
token: ${{ inputs.token }}
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
branch: auto/${{ env.RELEASE_TAG }}
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
delete-branch: true
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
labels: do not test
body: |
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
### Changelog category (leave one):
- Not for changelog (changelog entry is not required)
- name: Reset changes if Dry-run
if: ${{ inputs.dry-run }}
shell: bash
run: |
git reset --hard HEAD
- name: Checkout back to GITHUB_REF
shell: bash
run: |
git checkout "$GITHUB_REF_NAME"
# set current progress to OK
python3 ./tests/ci/create_release.py --set-progress-completed
- name: Create GH Release
shell: bash
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export TGZ Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test TGZ Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export RPM Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test RPM Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export Debian Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test Debian Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Docker clickhouse/clickhouse-server building
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --set-progress-started --progress "docker server release"
cd "./tests/ci"
export CHECK_NAME="Docker server image"
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
python3 ./tests/ci/create_release.py --set-progress-completed
- name: Docker clickhouse/clickhouse-keeper building
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --set-progress-started --progress "docker keeper release"
cd "./tests/ci"
export CHECK_NAME="Docker keeper image"
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
python3 ./tests/ci/create_release.py --set-progress-completed
- name: Set Release progress completed
shell: bash
run: |
# If we here - set completed status, to post proper Slack OK or FAIL message in the next step
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
python3 ./tests/ci/create_release.py --set-progress-completed
- name: Post Slack Message
if: ${{ !cancelled() }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }}

View File

@ -1,44 +1,110 @@
name: AutoRelease
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
DRY_RUN: true
concurrency:
group: auto-release
group: release
on: # yamllint disable-line rule:truthy
# schedule:
# - cron: '0 10-16 * * 1-5'
# Workflow uses a test bucket for packages and dry run mode (no real releases)
schedule:
- cron: '0 9 * * *'
- cron: '0 15 * * *'
workflow_dispatch:
inputs:
dry-run:
description: 'Dry run'
required: false
default: true
type: boolean
jobs:
CherryPick:
runs-on: [self-hosted, style-checker-aarch64]
AutoRelease:
runs-on: [self-hosted, release-maker]
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Set envs
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/cherry_pick
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
REPO_OWNER=ClickHouse
REPO_NAME=ClickHouse
REPO_TEAM=core
EOF
- name: Set DRY_RUN for schedule
if: ${{ github.event_name == 'schedule' }}
run: echo "DRY_RUN=true" >> "$GITHUB_ENV"
- name: Set DRY_RUN for dispatch
if: ${{ github.event_name == 'workflow_dispatch' }}
run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV"
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0
- name: Auto-release
- name: Auto Release Prepare
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 auto_release.py --release-after-days=3
- name: Cleanup
if: always()
python3 auto_release.py --prepare
echo "::group::Auto Release Info"
python3 -m json.tool /tmp/autorelease_info.json
echo "::endgroup::"
{
echo 'AUTO_RELEASE_PARAMS<<EOF'
cat /tmp/autorelease_info.json
echo 'EOF'
} >> "$GITHUB_ENV"
- name: Post Release Branch statuses
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 auto_release.py --post-status
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }}
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }}
uses: ./.github/actions/release
with:
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }}
type: patch
dry-run: ${{ env.DRY_RUN }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }}
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }}
uses: ./.github/actions/release
with:
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }}
type: patch
dry-run: ${{ env.DRY_RUN }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }}
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }}
uses: ./.github/actions/release
with:
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }}
type: patch
dry-run: ${{ env.DRY_RUN }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }}
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }}
uses: ./.github/actions/release
with:
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }}
type: patch
dry-run: ${{ env.DRY_RUN }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }}
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }}
uses: ./.github/actions/release
with:
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }}
type: patch
dry-run: ${{ env.DRY_RUN }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
- name: Post Slack Message
if: ${{ !cancelled() }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
- name: Clean up
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:

View File

@ -2,7 +2,6 @@ name: CreateRelease
concurrency:
group: release
'on':
workflow_dispatch:
inputs:
@ -31,136 +30,15 @@ jobs:
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Set envs
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
run: |
cat >> "$GITHUB_ENV" << 'EOF'
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
RELEASE_INFO_FILE=${{ runner.temp }}/release_info.json
EOF
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0
- name: Prepare Release Info
run: |
python3 ./tests/ci/create_release.py --prepare-release-info \
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
--outfile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
echo "::group::Release Info"
python3 -m json.tool "$RELEASE_INFO_FILE"
echo "::endgroup::"
release_tag=$(jq -r '.release_tag' "$RELEASE_INFO_FILE")
commit_sha=$(jq -r '.commit_sha' "$RELEASE_INFO_FILE")
echo "Release Tag: $release_tag"
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
- name: Download All Release Artifacts
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Push Git Tag for the Release
run: |
python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Push New Release Branch
if: ${{ inputs.type == 'new' }}
run: |
python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Bump CH Version and Update Contributors' List
run: |
python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Checkout master
run: |
git checkout master
- name: Bump Docker versions, Changelog, Security
if: ${{ inputs.type == 'patch' }}
run: |
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
echo "List versions"
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
echo "Update docker version"
./utils/list-versions/update-docker-version.sh
echo "Generate ChangeLog"
export CI=1
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
--volume=".:/ClickHouse" clickhouse/style-test \
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
--gh-user-or-token="$GH_TOKEN" --jobs=5 \
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
echo "Generate Security"
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Create ChangeLog PR
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
uses: peter-evans/create-pull-request@v6
- name: Call Release Action
uses: ./.github/actions/release
with:
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
branch: auto/${{ env.RELEASE_TAG }}
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
delete-branch: true
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
labels: do not test
body: |
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
### Changelog category (leave one):
- Not for changelog (changelog entry is not required)
- name: Reset changes if Dry-run
if: ${{ inputs.dry-run }}
run: |
git reset --hard HEAD
- name: Checkout back to GITHUB_REF
run: |
git checkout "$GITHUB_REF_NAME"
- name: Create GH Release
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/create_release.py --create-gh-release \
--infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export TGZ Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test TGZ Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export RPM Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test RPM Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export Debian Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test Debian Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Docker clickhouse/clickhouse-server building
if: ${{ inputs.type == 'patch' }}
run: |
cd "./tests/ci"
export CHECK_NAME="Docker server image"
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
- name: Docker clickhouse/clickhouse-keeper building
if: ${{ inputs.type == 'patch' }}
run: |
cd "./tests/ci"
export CHECK_NAME="Docker keeper image"
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
- name: Post Slack Message
if: always()
run: |
echo Slack Message
ref: ${{ inputs.ref }}
type: ${{ inputs.type }}
dry-run: ${{ inputs.dry-run }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}

View File

@ -6,38 +6,38 @@ sidebar_label: Playground
# ClickHouse Playground {#clickhouse-playground}
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster.
Several example datasets are available in Playground.
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) позволяет пользователям экспериментировать с ClickHouse, выполняя запросы мгновенно, без необходимости настройки сервера или кластера.
В Playground доступны несколько примеров наборов данных.
You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../interfaces/index.md).
Вы можете выполнять запросы к Playground, используя любой HTTP-клиент, например [curl](https://curl.haxx.se) или [wget](https://www.gnu.org/software/wget/), или настроить соединение, используя драйверы [JDBC](../interfaces/jdbc.md) или [ODBC](../interfaces/odbc.md). Дополнительную информацию о программных продуктах, поддерживающих ClickHouse, можно найти [здесь](../interfaces/index.md).
## Credentials {#credentials}
## Учетные данные {#credentials}
| Parameter | Value |
| Параметр | Значение |
|:--------------------|:-----------------------------------|
| HTTPS endpoint | `https://play.clickhouse.com:443/` |
| Native TCP endpoint | `play.clickhouse.com:9440` |
| User | `explorer` or `play` |
| Password | (empty) |
| HTTPS-адрес | `https://play.clickhouse.com:443/` |
| TCP-адрес | `play.clickhouse.com:9440` |
| Пользователь | `explorer` или `play` |
| Пароль | (пусто) |
## Limitations {#limitations}
## Ограничения {#limitations}
The queries are executed as a read-only user. It implies some limitations:
Запросы выполняются от имени пользователя с правами только на чтение. Это предполагает некоторые ограничения:
- DDL queries are not allowed
- INSERT queries are not allowed
- DDL-запросы не разрешены
- INSERT-запросы не разрешены
The service also have quotas on its usage.
Сервис также имеет квоты на использование.
## Examples {#examples}
## Примеры {#examples}
HTTPS endpoint example with `curl`:
Пример использования HTTPS-адреса с `curl`:
``` bash
```bash
curl "https://play.clickhouse.com/?user=explorer" --data-binary "SELECT 'Play ClickHouse'"
```
TCP endpoint example with [CLI](../interfaces/cli.md):
Пример использования TCP-адреса с [CLI](../interfaces/cli.md):
``` bash
clickhouse client --secure --host play.clickhouse.com --user explorer

View File

@ -4124,7 +4124,9 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
auto * column_to_interpolate = interpolate_node_typed.getExpression()->as<IdentifierNode>();
if (!column_to_interpolate)
throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found",
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"INTERPOLATE can work only for identifiers, but {} is found",
interpolate_node_typed.getExpression()->formatASTForErrorMessage());
auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName();

View File

@ -23,8 +23,20 @@ namespace DB
LazyPipeFDs TraceSender::pipe;
static thread_local bool inside_send = false;
void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Extras extras)
{
/** The method shouldn't be called recursively or throw exceptions.
* There are several reasons:
* - avoid infinite recursion when some of subsequent functions invoke tracing;
* - avoid inconsistent writes if the method was interrupted by a signal handler in the middle of writing,
* and then another tracing is invoked (e.g., from query profiler).
*/
if (unlikely(inside_send))
return;
inside_send = true;
DENY_ALLOCATIONS_IN_SCOPE;
constexpr size_t buf_size = sizeof(char) /// TraceCollector stop flag
+ sizeof(UInt8) /// String size
+ QUERY_ID_MAX_LEN /// Maximum query_id length
@ -80,6 +92,8 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Ext
writePODBinary(extras.increment, out);
out.next();
inside_send = false;
}
}

View File

@ -47,54 +47,85 @@ bool allArgumentsAreConstants(const ColumnsWithTypeAndName & args)
return true;
}
/// Replaces single low cardinality column in a function call by its dictionary
/// This can only happen after the arguments have been adapted in IFunctionOverloadResolver::getReturnType
/// as it's only possible if there is one low cardinality column and, optionally, const columns
ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
ColumnsWithTypeAndName & args, bool can_be_executed_on_default_arguments, size_t input_rows_count)
{
size_t num_rows = input_rows_count;
/// We return the LC indexes so the LC can be reconstructed with the function result
ColumnPtr indexes;
/// Find first LowCardinality column and replace it to nested dictionary.
for (auto & column : args)
size_t number_low_cardinality_columns = 0;
size_t last_low_cardinality = 0;
size_t number_const_columns = 0;
size_t number_full_columns = 0;
for (size_t i = 0; i < args.size(); i++)
{
if (const auto * low_cardinality_column = checkAndGetColumn<ColumnLowCardinality>(column.column.get()))
auto const & arg = args[i];
if (checkAndGetColumn<ColumnLowCardinality>(arg.column.get()))
{
/// Single LowCardinality column is supported now.
if (indexes)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single dictionary argument for function.");
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(column.type.get());
if (!low_cardinality_type)
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Incompatible type for LowCardinality column: {}",
column.type->getName());
if (can_be_executed_on_default_arguments)
{
/// Normal case, when function can be executed on values' default.
column.column = low_cardinality_column->getDictionary().getNestedColumn();
indexes = low_cardinality_column->getIndexesPtr();
}
else
{
/// Special case when default value can't be used. Example: 1 % LowCardinality(Int).
/// LowCardinality always contains default, so 1 % 0 will throw exception in normal case.
auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size());
column.column = dict_encoded.dictionary;
indexes = dict_encoded.indexes;
}
num_rows = column.column->size();
column.type = low_cardinality_type->getDictionaryType();
number_low_cardinality_columns++;
last_low_cardinality = i;
}
else if (checkAndGetColumn<ColumnConst>(arg.column.get()))
number_const_columns++;
else
number_full_columns++;
}
/// Change size of constants.
if (!number_low_cardinality_columns && !number_const_columns)
return nullptr;
if (number_full_columns > 0 || number_low_cardinality_columns > 1)
{
/// This should not be possible but currently there are multiple tests in CI failing because of it
/// TODO: Fix those cases, then enable this exception
#if 0
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected low cardinality types found. Low cardinality: {}. Full {}. Const {}",
number_low_cardinality_columns, number_full_columns, number_const_columns);
#else
return nullptr;
#endif
}
else if (number_low_cardinality_columns == 1)
{
auto & lc_arg = args[last_low_cardinality];
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(lc_arg.type.get());
if (!low_cardinality_type)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incompatible type for LowCardinality column: {}", lc_arg.type->getName());
const auto * low_cardinality_column = checkAndGetColumn<ColumnLowCardinality>(lc_arg.column.get());
chassert(low_cardinality_column);
if (can_be_executed_on_default_arguments)
{
/// Normal case, when function can be executed on values' default.
lc_arg.column = low_cardinality_column->getDictionary().getNestedColumn();
indexes = low_cardinality_column->getIndexesPtr();
}
else
{
/// Special case when default value can't be used. Example: 1 % LowCardinality(Int).
/// LowCardinality always contains default, so 1 % 0 will throw exception in normal case.
auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size());
lc_arg.column = dict_encoded.dictionary;
indexes = dict_encoded.indexes;
}
/// The new column will have a different number of rows, normally less but occasionally it might be more (NULL)
input_rows_count = lc_arg.column->size();
lc_arg.type = low_cardinality_type->getDictionaryType();
}
/// Change size of constants
for (auto & column : args)
{
if (const auto * column_const = checkAndGetColumn<ColumnConst>(column.column.get()))
{
column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), num_rows);
column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), input_rows_count);
column.type = recursiveRemoveLowCardinality(column.type);
}
}
@ -270,6 +301,8 @@ ColumnPtr IExecutableFunction::executeWithoutSparseColumns(const ColumnsWithType
bool can_be_executed_on_default_arguments = canBeExecutedOnDefaultArguments();
const auto & dictionary_type = res_low_cardinality_type->getDictionaryType();
/// The arguments should have been adapted in IFunctionOverloadResolver::getReturnType
/// So there is only one low cardinality column (and optionally some const columns) and no full column
ColumnPtr indexes = replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
columns_without_low_cardinality, can_be_executed_on_default_arguments, input_rows_count);

View File

@ -7,7 +7,6 @@
#include <Common/FieldVisitorToString.h>
#include <Common/KnownObjectNames.h>
#include <Common/SipHash.h>
#include <Common/typeid_cast.h>
#include <IO/Operators.h>
#include <IO/WriteBufferFromString.h>
#include <IO/WriteHelpers.h>
@ -19,9 +18,6 @@
#include <Parsers/queryToString.h>
#include <Parsers/ASTSetQuery.h>
#include <Parsers/FunctionSecretArgumentsFinderAST.h>
#include <Core/QualifiedTableName.h>
#include <boost/algorithm/string.hpp>
using namespace std::literals;
@ -632,6 +628,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
settings.ostr << ", ";
if (arguments->children[i]->as<ASTSetQuery>())
settings.ostr << "SETTINGS ";
nested_dont_need_parens.list_element_index = i;
arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens);
}
settings.ostr << (settings.hilite ? hilite_operator : "") << ']' << (settings.hilite ? hilite_none : "");
@ -642,12 +639,14 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
{
settings.ostr << (settings.hilite ? hilite_operator : "") << ((frame.need_parens && !alias.empty()) ? "tuple" : "") << '('
<< (settings.hilite ? hilite_none : "");
for (size_t i = 0; i < arguments->children.size(); ++i)
{
if (i != 0)
settings.ostr << ", ";
if (arguments->children[i]->as<ASTSetQuery>())
settings.ostr << "SETTINGS ";
nested_dont_need_parens.list_element_index = i;
arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens);
}
settings.ostr << (settings.hilite ? hilite_operator : "") << ')' << (settings.hilite ? hilite_none : "");
@ -663,6 +662,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
settings.ostr << ", ";
if (arguments->children[i]->as<ASTSetQuery>())
settings.ostr << "SETTINGS ";
nested_dont_need_parens.list_element_index = i;
arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens);
}
settings.ostr << (settings.hilite ? hilite_operator : "") << ')' << (settings.hilite ? hilite_none : "");

View File

@ -745,7 +745,12 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan,
{
auto & interpolate_node_typed = interpolate_node->as<InterpolateNode &>();
PlannerActionsVisitor planner_actions_visitor(planner_context);
PlannerActionsVisitor planner_actions_visitor(
planner_context,
/* use_column_identifier_as_action_node_name_, (default value)*/ true,
/// Prefer the INPUT to CONSTANT nodes (actions must be non constant)
/* always_use_const_column_for_constant_nodes */ false);
auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(*interpolate_actions_dag,
interpolate_node_typed.getExpression());
if (expression_to_interpolate_expression_nodes.size() != 1)

View File

@ -487,16 +487,33 @@ public:
return node;
}
const ActionsDAG::Node * addConstantIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column)
[[nodiscard]] String addConstantIfNecessary(
const std::string & node_name, const ColumnWithTypeAndName & column, bool always_use_const_column_for_constant_nodes)
{
chassert(column.column != nullptr);
auto it = node_name_to_node.find(node_name);
if (it != node_name_to_node.end() && (!always_use_const_column_for_constant_nodes || it->second->column))
return {node_name};
if (it != node_name_to_node.end())
return it->second;
{
/// There is a node with this name, but it doesn't have a column
/// This likely happens because we executed the query until WithMergeableState with a const node in the
/// WHERE clause and, as the results of headers are materialized, the column was removed
/// Let's add a new column and keep this
String dupped_name{node_name + "_dupped"};
if (node_name_to_node.find(dupped_name) != node_name_to_node.end())
return dupped_name;
const auto * node = &actions_dag.addColumn(column);
node_name_to_node[dupped_name] = node;
return dupped_name;
}
const auto * node = &actions_dag.addColumn(column);
node_name_to_node[node->result_name] = node;
return node;
return {node_name};
}
template <typename FunctionOrOverloadResolver>
@ -525,7 +542,7 @@ public:
}
private:
std::unordered_map<std::string_view, const ActionsDAG::Node *> node_name_to_node;
std::unordered_map<String, const ActionsDAG::Node *> node_name_to_node;
ActionsDAG & actions_dag;
QueryTreeNodePtr scope_node;
};
@ -533,9 +550,11 @@ private:
class PlannerActionsVisitorImpl
{
public:
PlannerActionsVisitorImpl(ActionsDAG & actions_dag,
PlannerActionsVisitorImpl(
ActionsDAG & actions_dag,
const PlannerContextPtr & planner_context_,
bool use_column_identifier_as_action_node_name_);
bool use_column_identifier_as_action_node_name_,
bool always_use_const_column_for_constant_nodes_);
ActionsDAG::NodeRawConstPtrs visit(QueryTreeNodePtr expression_node);
@ -595,14 +614,18 @@ private:
const PlannerContextPtr planner_context;
ActionNodeNameHelper action_node_name_helper;
bool use_column_identifier_as_action_node_name;
bool always_use_const_column_for_constant_nodes;
};
PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(ActionsDAG & actions_dag,
PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(
ActionsDAG & actions_dag,
const PlannerContextPtr & planner_context_,
bool use_column_identifier_as_action_node_name_)
bool use_column_identifier_as_action_node_name_,
bool always_use_const_column_for_constant_nodes_)
: planner_context(planner_context_)
, action_node_name_helper(node_to_node_name, *planner_context, use_column_identifier_as_action_node_name_)
, use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_)
, always_use_const_column_for_constant_nodes(always_use_const_column_for_constant_nodes_)
{
actions_stack.emplace_back(actions_dag, nullptr);
}
@ -725,17 +748,16 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi
column.type = constant_type;
column.column = column.type->createColumnConst(1, constant_literal);
actions_stack[0].addConstantIfNecessary(constant_node_name, column);
String final_name = actions_stack[0].addConstantIfNecessary(constant_node_name, column, always_use_const_column_for_constant_nodes);
size_t actions_stack_size = actions_stack.size();
for (size_t i = 1; i < actions_stack_size; ++i)
{
auto & actions_stack_node = actions_stack[i];
actions_stack_node.addInputConstantColumnIfNecessary(constant_node_name, column);
actions_stack_node.addInputConstantColumnIfNecessary(final_name, column);
}
return {constant_node_name, Levels(0)};
return {final_name, Levels(0)};
}
PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitLambda(const QueryTreeNodePtr & node)
@ -864,16 +886,16 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::ma
else
column.column = std::move(column_set);
actions_stack[0].addConstantIfNecessary(column.name, column);
String final_name = actions_stack[0].addConstantIfNecessary(column.name, column, always_use_const_column_for_constant_nodes);
size_t actions_stack_size = actions_stack.size();
for (size_t i = 1; i < actions_stack_size; ++i)
{
auto & actions_stack_node = actions_stack[i];
actions_stack_node.addInputConstantColumnIfNecessary(column.name, column);
actions_stack_node.addInputConstantColumnIfNecessary(final_name, column);
}
return {column.name, Levels(0)};
return {final_name, Levels(0)};
}
PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitIndexHintFunction(const QueryTreeNodePtr & node)
@ -1010,14 +1032,19 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi
}
PlannerActionsVisitor::PlannerActionsVisitor(const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_)
PlannerActionsVisitor::PlannerActionsVisitor(
const PlannerContextPtr & planner_context_,
bool use_column_identifier_as_action_node_name_,
bool always_use_const_column_for_constant_nodes_)
: planner_context(planner_context_)
, use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_)
, always_use_const_column_for_constant_nodes(always_use_const_column_for_constant_nodes_)
{}
ActionsDAG::NodeRawConstPtrs PlannerActionsVisitor::visit(ActionsDAG & actions_dag, QueryTreeNodePtr expression_node)
{
PlannerActionsVisitorImpl actions_visitor_impl(actions_dag, planner_context, use_column_identifier_as_action_node_name);
PlannerActionsVisitorImpl actions_visitor_impl(
actions_dag, planner_context, use_column_identifier_as_action_node_name, always_use_const_column_for_constant_nodes);
return actions_visitor_impl.visit(expression_node);
}

View File

@ -27,11 +27,17 @@ using PlannerContextPtr = std::shared_ptr<PlannerContext>;
* During actions build, there is special handling for following functions:
* 1. Aggregate functions are added in actions dag as INPUT nodes. Aggregate functions arguments are not added.
* 2. For function `in` and its variants, already collected sets from planner context are used.
* 3. When building actions that use CONSTANT nodes, by default we ignore pre-existing INPUTs if those don't have
* a column (a const column always has a column). This is for compatibility with previous headers. We disable this
* behaviour when we explicitly want to override CONSTANT nodes with the input (resolving InterpolateNode for example)
*/
class PlannerActionsVisitor
{
public:
explicit PlannerActionsVisitor(const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_ = true);
explicit PlannerActionsVisitor(
const PlannerContextPtr & planner_context_,
bool use_column_identifier_as_action_node_name_ = true,
bool always_use_const_column_for_constant_nodes_ = true);
/** Add actions necessary to calculate expression node into expression dag.
* Necessary actions are not added in actions dag output.
@ -42,6 +48,7 @@ public:
private:
const PlannerContextPtr planner_context;
bool use_column_identifier_as_action_node_name = true;
bool always_use_const_column_for_constant_nodes = true;
};
/** Calculate query tree expression node action dag name and add them into node to name map.

View File

@ -3,8 +3,13 @@ import time
from pathlib import Path
from typing import Optional
from shutil import copy2
from create_release import PackageDownloader, ReleaseInfo, ShellRunner
from ci_utils import WithIter
from create_release import (
PackageDownloader,
ReleaseInfo,
ReleaseContextManager,
ReleaseProgress,
)
from ci_utils import WithIter, Shell
class MountPointApp(metaclass=WithIter):
@ -76,19 +81,20 @@ class R2MountPoint:
)
_TEST_MOUNT_CMD = f"mount | grep -q {self.MOUNT_POINT}"
ShellRunner.run(_CLEAN_LOG_FILE_CMD)
ShellRunner.run(_UNMOUNT_CMD)
ShellRunner.run(_MKDIR_CMD)
ShellRunner.run(_MKDIR_FOR_CACHE)
ShellRunner.run(self.mount_cmd, async_=self.async_mount)
Shell.run(_CLEAN_LOG_FILE_CMD)
Shell.run(_UNMOUNT_CMD)
Shell.run(_MKDIR_CMD)
Shell.run(_MKDIR_FOR_CACHE)
# didn't manage to use simple run() and not block or fail
Shell.run_as_daemon(self.mount_cmd)
if self.async_mount:
time.sleep(3)
ShellRunner.run(_TEST_MOUNT_CMD)
Shell.run(_TEST_MOUNT_CMD, check=True)
@classmethod
def teardown(cls):
print(f"Unmount [{cls.MOUNT_POINT}]")
ShellRunner.run(f"umount {cls.MOUNT_POINT}")
Shell.run(f"umount {cls.MOUNT_POINT}")
class RepoCodenames(metaclass=WithIter):
@ -124,8 +130,8 @@ class DebianArtifactory:
cmd = f"{REPREPRO_CMD_PREFIX} includedeb {self.codename} {' '.join(paths)}"
print("Running export command:")
print(f" {cmd}")
ShellRunner.run(cmd)
ShellRunner.run("sync")
Shell.run(cmd, check=True)
Shell.run("sync")
if self.codename == RepoCodenames.LTS:
packages_with_version = [
@ -137,16 +143,20 @@ class DebianArtifactory:
cmd = f"{REPREPRO_CMD_PREFIX} copy {RepoCodenames.STABLE} {RepoCodenames.LTS} {' '.join(packages_with_version)}"
print("Running copy command:")
print(f" {cmd}")
ShellRunner.run(cmd)
ShellRunner.run("sync")
Shell.run(cmd, check=True)
Shell.run("sync")
def test_packages(self):
ShellRunner.run("docker pull ubuntu:latest")
Shell.run("docker pull ubuntu:latest")
print(f"Test packages installation, version [{self.version}]")
cmd = f"docker run --rm ubuntu:latest bash -c \"apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-client={self.version}\""
debian_command = f"echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-common-static={self.version} clickhouse-client={self.version}"
cmd = f'docker run --rm ubuntu:latest bash -c "apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; {debian_command}"'
print("Running test command:")
print(f" {cmd}")
ShellRunner.run(cmd)
Shell.run(cmd, check=True)
release_info = ReleaseInfo.from_file()
release_info.debian_command = debian_command
release_info.dump()
def _copy_if_not_exists(src: Path, dst: Path) -> Path:
@ -202,23 +212,27 @@ class RpmArtifactory:
for command in commands:
print("Running command:")
print(f" {command}")
ShellRunner.run(command)
Shell.run(command, check=True)
update_public_key = f"gpg --armor --export {self._SIGN_KEY}"
pub_key_path = dest_dir / "repodata" / "repomd.xml.key"
print("Updating repomd.xml.key")
pub_key_path.write_text(ShellRunner.run(update_public_key)[1])
pub_key_path.write_text(Shell.run(update_public_key, check=True))
if codename == RepoCodenames.LTS:
self.export_packages(RepoCodenames.STABLE)
ShellRunner.run("sync")
Shell.run("sync")
def test_packages(self):
ShellRunner.run("docker pull fedora:latest")
Shell.run("docker pull fedora:latest")
print(f"Test package installation, version [{self.version}]")
cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1"'
rpm_command = f"dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1"
cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command}"'
print("Running test command:")
print(f" {cmd}")
ShellRunner.run(cmd)
Shell.run(cmd, check=True)
release_info = ReleaseInfo.from_file()
release_info.rpm_command = rpm_command
release_info.dump()
class TgzArtifactory:
@ -256,23 +270,29 @@ class TgzArtifactory:
if codename == RepoCodenames.LTS:
self.export_packages(RepoCodenames.STABLE)
ShellRunner.run("sync")
Shell.run("sync")
def test_packages(self):
tgz_file = "/tmp/tmp.tgz"
tgz_sha_file = "/tmp/tmp.tgz.sha512"
ShellRunner.run(
f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz"
cmd = f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz"
Shell.run(
cmd,
check=True,
)
ShellRunner.run(
f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512"
Shell.run(
f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512",
check=True,
)
expected_checksum = ShellRunner.run(f"cut -d ' ' -f 1 {tgz_sha_file}")
actual_checksum = ShellRunner.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1")
expected_checksum = Shell.run(f"cut -d ' ' -f 1 {tgz_sha_file}", check=True)
actual_checksum = Shell.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1")
assert (
expected_checksum == actual_checksum
), f"[{actual_checksum} != {expected_checksum}]"
ShellRunner.run("rm /tmp/tmp.tgz*")
Shell.run("rm /tmp/tmp.tgz*")
release_info = ReleaseInfo.from_file()
release_info.tgz_command = cmd
release_info.dump()
def parse_args() -> argparse.Namespace:
@ -280,12 +300,6 @@ def parse_args() -> argparse.Namespace:
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Adds release packages to the repository",
)
parser.add_argument(
"--infile",
type=str,
required=True,
help="input file with release info",
)
parser.add_argument(
"--export-debian",
action="store_true",
@ -328,7 +342,7 @@ if __name__ == "__main__":
args = parse_args()
assert args.dry_run
release_info = ReleaseInfo.from_file(args.infile)
release_info = ReleaseInfo.from_file()
"""
Use S3FS. RCLONE has some errors with r2 remote which I didn't figure out how to resolve:
ERROR : IO error: NotImplemented: versionId not implemented
@ -336,20 +350,26 @@ if __name__ == "__main__":
"""
mp = R2MountPoint(MountPointApp.S3FS, dry_run=args.dry_run)
if args.export_debian:
mp.init()
DebianArtifactory(release_info, dry_run=args.dry_run).export_packages()
mp.teardown()
with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_DEB) as _:
mp.init()
DebianArtifactory(release_info, dry_run=args.dry_run).export_packages()
mp.teardown()
if args.export_rpm:
mp.init()
RpmArtifactory(release_info, dry_run=args.dry_run).export_packages()
mp.teardown()
with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_RPM) as _:
mp.init()
RpmArtifactory(release_info, dry_run=args.dry_run).export_packages()
mp.teardown()
if args.export_tgz:
mp.init()
TgzArtifactory(release_info, dry_run=args.dry_run).export_packages()
mp.teardown()
with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_TGZ) as _:
mp.init()
TgzArtifactory(release_info, dry_run=args.dry_run).export_packages()
mp.teardown()
if args.test_debian:
DebianArtifactory(release_info, dry_run=args.dry_run).test_packages()
with ReleaseContextManager(release_progress=ReleaseProgress.TEST_DEB) as _:
DebianArtifactory(release_info, dry_run=args.dry_run).test_packages()
if args.test_tgz:
TgzArtifactory(release_info, dry_run=args.dry_run).test_packages()
with ReleaseContextManager(release_progress=ReleaseProgress.TEST_TGZ) as _:
TgzArtifactory(release_info, dry_run=args.dry_run).test_packages()
if args.test_rpm:
RpmArtifactory(release_info, dry_run=args.dry_run).test_packages()
with ReleaseContextManager(release_progress=ReleaseProgress.TEST_RPM) as _:
RpmArtifactory(release_info, dry_run=args.dry_run).test_packages()

View File

@ -1,17 +1,17 @@
import argparse
from datetime import timedelta, datetime
import logging
import dataclasses
import json
import os
from commit_status_helper import get_commit_filtered_statuses
import sys
from typing import List
from get_robot_token import get_best_robot_token
from github_helper import GitHub
from release import Release, Repo as ReleaseRepo, RELEASE_READY_STATUS
from ci_utils import Shell
from env_helper import GITHUB_REPOSITORY
from report import SUCCESS
from ssh import SSHKey
LOGGER_NAME = __name__
HELPER_LOGGERS = ["github_helper", LOGGER_NAME]
logger = logging.getLogger(LOGGER_NAME)
from ci_buddy import CIBuddy
from ci_config import CI
def parse_args():
@ -21,120 +21,198 @@ def parse_args():
)
parser.add_argument("--token", help="GitHub token, if not set, used from smm")
parser.add_argument(
"--repo", default="ClickHouse/ClickHouse", help="Repo owner/name"
)
parser.add_argument("--dry-run", action="store_true", help="Do not create anything")
parser.add_argument(
"--release-after-days",
type=int,
default=3,
help="Do automatic release on the latest green commit after the latest "
"release if the newest release is older than the specified days",
)
parser.add_argument(
"--debug-helpers",
"--post-status",
action="store_true",
help="Add debug logging for this script and github_helper",
help="Post release branch statuses",
)
parser.add_argument(
"--remote-protocol",
"-p",
default="ssh",
choices=ReleaseRepo.VALID,
help="repo protocol for git commands remote, 'origin' is a special case and "
"uses 'origin' as a remote",
"--post-auto-release-complete",
action="store_true",
help="Post autorelease completion status",
)
parser.add_argument(
"--prepare",
action="store_true",
help="Prepare autorelease info",
)
parser.add_argument(
"--wf-status",
type=str,
default="",
help="overall workflow status [success|failure]",
)
return parser.parse_args(), parser
return parser.parse_args()
MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE = 5
AUTORELEASE_INFO_FILE = "/tmp/autorelease_info.json"
@dataclasses.dataclass
class ReleaseParams:
ready: bool
ci_status: str
num_patches: int
release_branch: str
commit_sha: str
commits_to_branch_head: int
latest: bool
def to_dict(self):
return dataclasses.asdict(self)
@dataclasses.dataclass
class AutoReleaseInfo:
releases: List[ReleaseParams]
def add_release(self, release_params: ReleaseParams) -> None:
self.releases.append(release_params)
def dump(self):
print(f"Dump release info into [{AUTORELEASE_INFO_FILE}]")
with open(AUTORELEASE_INFO_FILE, "w", encoding="utf-8") as f:
print(json.dumps(dataclasses.asdict(self), indent=2), file=f)
@staticmethod
def from_file() -> "AutoReleaseInfo":
with open(AUTORELEASE_INFO_FILE, "r", encoding="utf-8") as json_file:
res = json.load(json_file)
releases = [ReleaseParams(**release) for release in res["releases"]]
return AutoReleaseInfo(releases=releases)
def _prepare(token):
assert len(token) > 10
os.environ["GH_TOKEN"] = token
Shell.run("gh auth status", check=True)
gh = GitHub(token)
prs = gh.get_release_pulls(GITHUB_REPOSITORY)
prs.sort(key=lambda x: x.head.ref)
branch_names = [pr.head.ref for pr in prs]
print(f"Found release branches [{branch_names}]")
repo = gh.get_repo(GITHUB_REPOSITORY)
autoRelease_info = AutoReleaseInfo(releases=[])
for pr in prs:
print(f"\nChecking PR [{pr.head.ref}]")
refs = list(repo.get_git_matching_refs(f"tags/v{pr.head.ref}"))
assert refs
refs.sort(key=lambda ref: ref.ref)
latest_release_tag_ref = refs[-1]
latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha)
commits = Shell.run(
f"git rev-list --first-parent {latest_release_tag.tag}..origin/{pr.head.ref}",
check=True,
).split("\n")
commit_num = len(commits)
print(
f"Previous release [{latest_release_tag.tag}] was [{commit_num}] commits ago, date [{latest_release_tag.tagger.date}]"
)
commits_to_check = commits[:-1] # Exclude the version bump commit
commit_sha = ""
commit_ci_status = ""
commits_to_branch_head = 0
for idx, commit in enumerate(
commits_to_check[:MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE]
):
print(
f"Check commit [{commit}] [{pr.head.ref}~{idx+1}] as release candidate"
)
commit_num -= 1
is_completed = CI.GHActions.check_wf_completed(
token=token, commit_sha=commit
)
if not is_completed:
print(f"CI is in progress for [{commit}] - check previous commit")
commits_to_branch_head += 1
continue
commit_ci_status = CI.GHActions.get_commit_status_by_name(
token=token,
commit_sha=commit,
status_name=(CI.JobNames.BUILD_CHECK, "ClickHouse build check"),
)
commit_sha = commit
if commit_ci_status == SUCCESS:
break
print(f"CI status [{commit_ci_status}] - skip")
commits_to_branch_head += 1
ready = False
if commit_ci_status == SUCCESS and commit_sha:
print(
f"Add release ready info for commit [{commit_sha}] and release branch [{pr.head.ref}]"
)
ready = True
else:
print(f"WARNING: No ready commits found for release branch [{pr.head.ref}]")
autoRelease_info.add_release(
ReleaseParams(
release_branch=pr.head.ref,
commit_sha=commit_sha,
ready=ready,
ci_status=commit_ci_status,
num_patches=commit_num,
commits_to_branch_head=commits_to_branch_head,
latest=False,
)
)
if autoRelease_info.releases:
autoRelease_info.releases[-1].latest = True
autoRelease_info.dump()
def main():
args = parse_args()
logging.basicConfig(level=logging.INFO)
if args.debug_helpers:
for logger_name in HELPER_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.DEBUG)
args, parser = parse_args()
token = args.token or get_best_robot_token()
days_as_timedelta = timedelta(days=args.release_after_days)
now = datetime.now()
gh = GitHub(token)
prs = gh.get_release_pulls(args.repo)
branch_names = [pr.head.ref for pr in prs]
logger.info("Found release branches: %s\n ", " \n".join(branch_names))
repo = gh.get_repo(args.repo)
# In general there is no guarantee on which order the refs/commits are
# returned from the API, so we have to order them.
for pr in prs:
logger.info("Checking PR %s", pr.head.ref)
refs = list(repo.get_git_matching_refs(f"tags/v{pr.head.ref}"))
refs.sort(key=lambda ref: ref.ref)
latest_release_tag_ref = refs[-1]
latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha)
logger.info("That last release was done at %s", latest_release_tag.tagger.date)
if latest_release_tag.tagger.date + days_as_timedelta > now:
logger.info(
"Not enough days since the last release %s,"
" no automatic release can be done",
latest_release_tag.tag,
if args.post_status:
info = AutoReleaseInfo.from_file()
for release_info in info.releases:
if release_info.ready:
CIBuddy(dry_run=False).post_info(
title=f"Auto Release Status for {release_info.release_branch}",
body=release_info.to_dict(),
)
else:
CIBuddy(dry_run=False).post_warning(
title=f"Auto Release Status for {release_info.release_branch}",
body=release_info.to_dict(),
)
if args.post_auto_release_complete:
assert args.wf_status, "--wf-status Required with --post-auto-release-complete"
if args.wf_status != SUCCESS:
CIBuddy(dry_run=False).post_job_error(
error_description="Autorelease workflow failed",
job_name="Autorelease",
with_instance_info=False,
with_wf_link=True,
critical=True,
)
continue
unreleased_commits = list(
repo.get_commits(sha=pr.head.ref, since=latest_release_tag.tagger.date)
)
unreleased_commits.sort(
key=lambda commit: commit.commit.committer.date, reverse=True
)
for commit in unreleased_commits:
logger.info("Checking statuses of commit %s", commit.sha)
statuses = get_commit_filtered_statuses(commit)
all_success = all(st.state == SUCCESS for st in statuses)
passed_ready_for_release_check = any(
st.context == RELEASE_READY_STATUS and st.state == SUCCESS
for st in statuses
else:
CIBuddy(dry_run=False).post_info(
title=f"Autorelease completed",
body="",
with_wf_link=True,
)
if not (all_success and passed_ready_for_release_check):
logger.info("Commit is not green, thus not suitable for release")
continue
logger.info("Commit is ready for release, let's release!")
release = Release(
ReleaseRepo(args.repo, args.remote_protocol),
commit.sha,
"patch",
args.dry_run,
True,
)
try:
release.do(True, True, True)
except:
if release.has_rollback:
logging.error(
"!!The release process finished with error, read the output carefully!!"
)
logging.error(
"Probably, rollback finished with error. "
"If you don't see any of the following commands in the output, "
"execute them manually:"
)
release.log_rollback()
raise
logging.info("New release is done!")
break
elif args.prepare:
_prepare(token=args.token or get_best_robot_token())
else:
parser.print_help()
sys.exit(2)
if __name__ == "__main__":
if os.getenv("ROBOT_CLICKHOUSE_SSH_KEY", ""):
with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"):
main()
else:
main()
main()

View File

@ -1110,13 +1110,14 @@ def main() -> int:
ci_cache.print_status()
if IS_CI and not pr_info.is_merge_queue:
# wait for pending jobs to be finished, await_jobs is a long blocking call
ci_cache.await_pending_jobs(pr_info.is_release)
if pr_info.is_release:
print("Release/master: CI Cache add pending records for all todo jobs")
ci_cache.push_pending_all(pr_info.is_release)
# wait for pending jobs to be finished, await_jobs is a long blocking call
ci_cache.await_pending_jobs(pr_info.is_release)
# conclude results
result["git_ref"] = git_ref
result["version"] = version
@ -1296,7 +1297,7 @@ def main() -> int:
error_description = f"Out Of Memory, exit_code {job_report.exit_code}"
else:
error_description = f"Unknown, exit_code {job_report.exit_code}"
CIBuddy().post_error(
CIBuddy().post_job_error(
error_description + f" after {int(job_report.duration)}s",
job_name=_get_ext_check_name(args.job_name),
)

View File

@ -1,5 +1,6 @@
import json
import os
from typing import Union, Dict
import boto3
import requests
@ -60,14 +61,64 @@ class CIBuddy:
except Exception as e:
print(f"ERROR: Failed to post message, ex {e}")
def post_error(self, error_description, job_name="", with_instance_info=True):
def _post_formatted(
self, title: str, body: Union[Dict, str], with_wf_link: bool
) -> None:
message = title
if isinstance(body, dict):
for name, value in body.items():
if "commit_sha" in name:
value = (
f"<https://github.com/{self.repo}/commit/{value}|{value[:8]}>"
)
message += f" *{name}*: {value}\n"
else:
message += body + "\n"
run_id = os.getenv("GITHUB_RUN_ID", "")
if with_wf_link and run_id:
message += f" *workflow*: <https://github.com/{self.repo}/actions/runs/{run_id}|{run_id}>\n"
self.post(message)
def post_info(
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
) -> None:
title_extended = f":white_circle: *{title}*\n\n"
self._post_formatted(title_extended, body, with_wf_link)
def post_done(
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
) -> None:
title_extended = f":white_check_mark: *{title}*\n\n"
self._post_formatted(title_extended, body, with_wf_link)
def post_warning(
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
) -> None:
title_extended = f":warning: *{title}*\n\n"
self._post_formatted(title_extended, body, with_wf_link)
def post_critical(
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
) -> None:
title_extended = f":black_circle: *{title}*\n\n"
self._post_formatted(title_extended, body, with_wf_link)
def post_job_error(
self,
error_description: str,
job_name: str = "",
with_instance_info: bool = True,
with_wf_link: bool = True,
critical: bool = False,
) -> None:
instance_id, instance_type = "unknown", "unknown"
if with_instance_info:
instance_id = Shell.run("ec2metadata --instance-id") or instance_id
instance_type = Shell.run("ec2metadata --instance-type") or instance_type
if not job_name:
job_name = os.getenv("CHECK_NAME", "unknown")
line_err = f":red_circle: *Error: {error_description}*\n\n"
sign = ":red_circle:" if not critical else ":black_circle:"
line_err = f"{sign} *Error: {error_description}*\n\n"
line_ghr = f" *Runner:* `{instance_type}`, `{instance_id}`\n"
line_job = f" *Job:* `{job_name}`\n"
line_pr_ = f" *PR:* <https://github.com/{self.repo}/pull/{self.pr_number}|#{self.pr_number}>, <{self.commit_url}|{self.sha}>\n"
@ -82,10 +133,13 @@ class CIBuddy:
message += line_pr_
else:
message += line_br_
run_id = os.getenv("GITHUB_RUN_ID", "")
if with_wf_link and run_id:
message += f" *workflow*: <https://github.com/{self.repo}/actions/runs/{run_id}|{run_id}>\n"
self.post(message)
if __name__ == "__main__":
# test
buddy = CIBuddy(dry_run=True)
buddy.post_error("TEst")
buddy.post_job_error("TEst")

View File

@ -638,7 +638,14 @@ class CiCache:
pushes pending records for all jobs that supposed to be run
"""
for job, job_config in self.jobs_to_do.items():
if not job_config.has_digest():
if (
job in self.jobs_to_wait
or not job_config.has_digest()
or job_config.disable_await
):
# 1. "job in self.jobs_to_wait" - this job already has a pending record in cache
# 2. "not job_config.has_digest()" - cache is not used for these jobs
# 3. "job_config.disable_await" - await is explicitly disabled
continue
pending_state = PendingState(time.time(), run_url=GITHUB_RUN_URL)
assert job_config.batches
@ -708,7 +715,7 @@ class CiCache:
Filter is to be applied in PRs to remove jobs that are not affected by the change
:return:
"""
remove_from_to_do = []
remove_from_workflow = []
required_builds = []
has_test_jobs_to_skip = False
for job_name, job_config in self.jobs_to_do.items():
@ -723,26 +730,41 @@ class CiCache:
job=reference_name,
job_config=reference_config,
):
remove_from_to_do.append(job_name)
remove_from_workflow.append(job_name)
has_test_jobs_to_skip = True
else:
required_builds += (
job_config.required_builds if job_config.required_builds else []
)
if has_test_jobs_to_skip:
# If there are tests to skip, it means build digest has not been changed.
# If there are tests to skip, it means builds are not affected as well.
# No need to test builds. Let's keep all builds required for test jobs and skip the others
for job_name, job_config in self.jobs_to_do.items():
if CI.is_build_job(job_name):
if job_name not in required_builds:
remove_from_to_do.append(job_name)
remove_from_workflow.append(job_name)
for job in remove_from_to_do:
for job in remove_from_workflow:
print(f"Filter job [{job}] - not affected by the change")
if job in self.jobs_to_do:
del self.jobs_to_do[job]
if job in self.jobs_to_wait:
del self.jobs_to_wait[job]
if job in self.jobs_to_skip:
self.jobs_to_skip.remove(job)
# special handling for the special job: BUILD_CHECK
has_builds = False
for job in list(self.jobs_to_do) + self.jobs_to_skip:
if CI.is_build_job(job):
has_builds = True
break
if not has_builds:
if CI.JobNames.BUILD_CHECK in self.jobs_to_do:
print(
f"Filter job [{CI.JobNames.BUILD_CHECK}] - no builds are required in the workflow"
)
del self.jobs_to_do[CI.JobNames.BUILD_CHECK]
def await_pending_jobs(self, is_release: bool, dry_run: bool = False) -> None:
"""
@ -884,3 +906,87 @@ class CiCache:
self.jobs_to_wait[job] = job_config
return self
if __name__ == "__main__":
# for testing
job_digest = {
"package_release": "bbbd3519d1",
"package_aarch64": "bbbd3519d1",
"package_asan": "bbbd3519d1",
"package_ubsan": "bbbd3519d1",
"package_tsan": "bbbd3519d1",
"package_msan": "bbbd3519d1",
"package_debug": "bbbd3519d1",
"package_release_coverage": "bbbd3519d1",
"binary_release": "bbbd3519d1",
"binary_tidy": "bbbd3519d1",
"binary_darwin": "bbbd3519d1",
"binary_aarch64": "bbbd3519d1",
"binary_aarch64_v80compat": "bbbd3519d1",
"binary_freebsd": "bbbd3519d1",
"binary_darwin_aarch64": "bbbd3519d1",
"binary_ppc64le": "bbbd3519d1",
"binary_amd64_compat": "bbbd3519d1",
"binary_amd64_musl": "bbbd3519d1",
"binary_riscv64": "bbbd3519d1",
"binary_s390x": "bbbd3519d1",
"binary_loongarch64": "bbbd3519d1",
"Builds": "f5dffeecb8",
"Install packages (release)": "ba0c89660e",
"Install packages (aarch64)": "ba0c89660e",
"Stateful tests (asan)": "32a9a1aba9",
"Stateful tests (tsan)": "32a9a1aba9",
"Stateful tests (msan)": "32a9a1aba9",
"Stateful tests (ubsan)": "32a9a1aba9",
"Stateful tests (debug)": "32a9a1aba9",
"Stateful tests (release)": "32a9a1aba9",
"Stateful tests (coverage)": "32a9a1aba9",
"Stateful tests (aarch64)": "32a9a1aba9",
"Stateful tests (release, ParallelReplicas)": "32a9a1aba9",
"Stateful tests (debug, ParallelReplicas)": "32a9a1aba9",
"Stateless tests (asan)": "deb6778b88",
"Stateless tests (tsan)": "deb6778b88",
"Stateless tests (msan)": "deb6778b88",
"Stateless tests (ubsan)": "deb6778b88",
"Stateless tests (debug)": "deb6778b88",
"Stateless tests (release)": "deb6778b88",
"Stateless tests (coverage)": "deb6778b88",
"Stateless tests (aarch64)": "deb6778b88",
"Stateless tests (release, old analyzer, s3, DatabaseReplicated)": "deb6778b88",
"Stateless tests (debug, s3 storage)": "deb6778b88",
"Stateless tests (tsan, s3 storage)": "deb6778b88",
"Stress test (debug)": "aa298abf10",
"Stress test (tsan)": "aa298abf10",
"Upgrade check (debug)": "5ce4d3ee02",
"Integration tests (asan, old analyzer)": "42e58be3aa",
"Integration tests (tsan)": "42e58be3aa",
"Integration tests (aarch64)": "42e58be3aa",
"Integration tests flaky check (asan)": "42e58be3aa",
"Compatibility check (release)": "ecb69d8c4b",
"Compatibility check (aarch64)": "ecb69d8c4b",
"Unit tests (release)": "09d00b702e",
"Unit tests (asan)": "09d00b702e",
"Unit tests (msan)": "09d00b702e",
"Unit tests (tsan)": "09d00b702e",
"Unit tests (ubsan)": "09d00b702e",
"AST fuzzer (debug)": "c38ebf947f",
"AST fuzzer (asan)": "c38ebf947f",
"AST fuzzer (msan)": "c38ebf947f",
"AST fuzzer (tsan)": "c38ebf947f",
"AST fuzzer (ubsan)": "c38ebf947f",
"Stateless tests flaky check (asan)": "deb6778b88",
"Performance Comparison (release)": "a8a7179258",
"ClickBench (release)": "45c07c4aa6",
"ClickBench (aarch64)": "45c07c4aa6",
"Docker server image": "6a24d5b187",
"Docker keeper image": "6a24d5b187",
"Docs check": "4764154c62",
"Fast test": "cb269133f2",
"Style check": "ffffffffff",
"Stateful tests (ubsan, ParallelReplicas)": "32a9a1aba9",
"Stress test (msan)": "aa298abf10",
"Upgrade check (asan)": "5ce4d3ee02",
}
ci_cache = CiCache(job_digests=job_digest, cache_enabled=True, s3=S3Helper())
ci_cache.update()

View File

@ -32,6 +32,9 @@ class CI:
from ci_definitions import MQ_JOBS as MQ_JOBS
from ci_definitions import WorkflowStages as WorkflowStages
from ci_definitions import Runners as Runners
from ci_utils import Envs as Envs
from ci_utils import Utils as Utils
from ci_utils import GHActions as GHActions
from ci_definitions import Labels as Labels
from ci_definitions import TRUSTED_CONTRIBUTORS as TRUSTED_CONTRIBUTORS
from ci_utils import CATEGORY_TO_LABEL as CATEGORY_TO_LABEL

View File

@ -351,6 +351,8 @@ class JobConfig:
run_by_label: str = ""
# to run always regardless of the job digest or/and label
run_always: bool = False
# disables CI await for a given job
disable_await: bool = False
# if the job needs to be run on the release branch, including master (building packages, docker server).
# NOTE: Subsequent runs on the same branch with the similar digest are still considered skip-able.
required_on_release_branch: bool = False
@ -395,6 +397,7 @@ class CommonJobConfigs:
],
),
runner_type=Runners.STYLE_CHECKER_ARM,
disable_await=True,
)
COMPATIBILITY_TEST = JobConfig(
job_name_keyword="compatibility",

View File

@ -1,9 +1,16 @@
import os
import re
import subprocess
import time
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Iterator, List, Union, Optional, Tuple
from typing import Any, Iterator, List, Union, Optional, Sequence, Tuple
import requests
class Envs:
GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")
LABEL_CATEGORIES = {
@ -80,6 +87,71 @@ class GHActions:
print(line)
print("::endgroup::")
@staticmethod
def get_commit_status_by_name(
token: str, commit_sha: str, status_name: Union[str, Sequence]
) -> str:
assert len(token) == 40
assert len(commit_sha) == 40
assert is_hex(commit_sha)
assert not is_hex(token)
url = f"https://api.github.com/repos/{Envs.GITHUB_REPOSITORY}/commits/{commit_sha}/statuses?per_page={200}"
headers = {
"Authorization": f"token {token}",
"Accept": "application/vnd.github.v3+json",
}
response = requests.get(url, headers=headers, timeout=5)
if isinstance(status_name, str):
status_name = (status_name,)
if response.status_code == 200:
assert "next" not in response.links, "Response truncated"
statuses = response.json()
for status in statuses:
if status["context"] in status_name:
return status["state"] # type: ignore
return ""
@staticmethod
def check_wf_completed(token: str, commit_sha: str) -> bool:
headers = {
"Authorization": f"token {token}",
"Accept": "application/vnd.github.v3+json",
}
url = f"https://api.github.com/repos/{Envs.GITHUB_REPOSITORY}/commits/{commit_sha}/check-runs?per_page={100}"
for i in range(3):
try:
response = requests.get(url, headers=headers, timeout=5)
response.raise_for_status()
# assert "next" not in response.links, "Response truncated"
data = response.json()
assert data["check_runs"], "?"
for check in data["check_runs"]:
if check["status"] != "completed":
print(
f" Check workflow status: Check not completed [{check['name']}]"
)
return False
return True
except Exception as e:
print(f"ERROR: exception after attempt [{i}]: {e}")
time.sleep(1)
return False
@staticmethod
def get_pr_url_by_branch(repo, branch):
get_url_cmd = (
f"gh pr list --repo {repo} --head {branch} --json url --jq '.[0].url'"
)
url = Shell.run(get_url_cmd)
if not url:
print(f"ERROR: PR nor found, branch [{branch}]")
return url
class Shell:
@classmethod
@ -95,7 +167,11 @@ class Shell:
return res.stdout.strip()
@classmethod
def run(cls, command):
def run(cls, command, check=False, dry_run=False):
if dry_run:
print(f"Dry-ryn. Would run command [{command}]")
return ""
print(f"Run command [{command}]")
res = ""
result = subprocess.run(
command,
@ -107,12 +183,24 @@ class Shell:
)
if result.returncode == 0:
res = result.stdout
else:
print(
f"ERROR: stdout {result.stdout.strip()}, stderr {result.stderr.strip()}"
)
if check:
assert result.returncode == 0
return res.strip()
@classmethod
def run_as_daemon(cls, command):
print(f"Run daemon command [{command}]")
subprocess.Popen(command.split(" ")) # pylint:disable=consider-using-with
return 0, ""
@classmethod
def check(cls, command):
result = subprocess.run(
command + " 2>&1",
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,

View File

@ -2,7 +2,6 @@ import argparse
import dataclasses
import json
import os
import subprocess
from contextlib import contextmanager
from copy import copy
@ -13,7 +12,8 @@ from git_helper import Git, GIT_PREFIX
from ssh import SSHAgent
from env_helper import GITHUB_REPOSITORY, S3_BUILDS_BUCKET
from s3_helper import S3Helper
from ci_utils import Shell
from ci_utils import Shell, GHActions
from ci_buddy import CIBuddy
from version_helper import (
FILE_WITH_VERSION_PATH,
GENERATED_CONTRIBUTORS,
@ -27,34 +27,65 @@ from ci_config import CI
CMAKE_PATH = get_abs_path(FILE_WITH_VERSION_PATH)
CONTRIBUTORS_PATH = get_abs_path(GENERATED_CONTRIBUTORS)
RELEASE_INFO_FILE = "/tmp/release_info.json"
class ShellRunner:
class ReleaseProgress:
STARTED = "started"
DOWNLOAD_PACKAGES = "download packages"
PUSH_RELEASE_TAG = "push release tag"
PUSH_NEW_RELEASE_BRANCH = "push new release branch"
BUMP_VERSION = "bump version"
CREATE_GH_RELEASE = "create GH release"
EXPORT_TGZ = "export TGZ packages"
EXPORT_RPM = "export RPM packages"
EXPORT_DEB = "export DEB packages"
TEST_TGZ = "test TGZ packages"
TEST_RPM = "test RPM packages"
TEST_DEB = "test DEB packages"
@classmethod
def run(
cls, command, check_retcode=True, print_output=True, async_=False, dry_run=False
):
if dry_run:
print(f"Dry-run: Would run shell command: [{command}]")
return 0, ""
print(f"Running shell command: [{command}]")
if async_:
subprocess.Popen(command.split(" ")) # pylint:disable=consider-using-with
return 0, ""
result = subprocess.run(
command + " 2>&1",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=True,
)
if print_output:
print(result.stdout)
if check_retcode:
assert result.returncode == 0, f"Return code [{result.returncode}]"
return result.returncode, result.stdout
class ReleaseProgressDescription:
OK = "OK"
FAILED = "FAILED"
class ReleaseContextManager:
def __init__(self, release_progress):
self.release_progress = release_progress
self.release_info = None
def __enter__(self):
if self.release_progress == ReleaseProgress.STARTED:
# create initial release info
self.release_info = ReleaseInfo(
release_branch="NA",
commit_sha=args.ref,
release_tag="NA",
version="NA",
codename="NA",
previous_release_tag="NA",
previous_release_sha="NA",
release_progress=ReleaseProgress.STARTED,
).dump()
else:
# fetch release info from fs and update
self.release_info = ReleaseInfo.from_file()
assert self.release_info
assert (
self.release_info.progress_description == ReleaseProgressDescription.OK
), "Must be OK on the start of new context"
self.release_info.release_progress = self.release_progress
self.release_info.dump()
return self.release_info
def __exit__(self, exc_type, exc_value, traceback):
assert self.release_info
if exc_type is not None:
self.release_info.progress_description = ReleaseProgressDescription.FAILED
else:
self.release_info.progress_description = ReleaseProgressDescription.OK
self.release_info.dump()
@dataclasses.dataclass
@ -67,17 +98,29 @@ class ReleaseInfo:
codename: str
previous_release_tag: str
previous_release_sha: str
changelog_pr: str = ""
version_bump_pr: str = ""
release_url: str = ""
debian_command: str = ""
rpm_command: str = ""
tgz_command: str = ""
docker_command: str = ""
release_progress: str = ""
progress_description: str = ""
@staticmethod
def from_file(file_path: str) -> "ReleaseInfo":
with open(file_path, "r", encoding="utf-8") as json_file:
def from_file() -> "ReleaseInfo":
with open(RELEASE_INFO_FILE, "r", encoding="utf-8") as json_file:
res = json.load(json_file)
return ReleaseInfo(**res)
@staticmethod
def prepare(commit_ref: str, release_type: str, outfile: str) -> None:
Path(outfile).parent.mkdir(parents=True, exist_ok=True)
Path(outfile).unlink(missing_ok=True)
def dump(self):
print(f"Dump release info into [{RELEASE_INFO_FILE}]")
with open(RELEASE_INFO_FILE, "w", encoding="utf-8") as f:
print(json.dumps(dataclasses.asdict(self), indent=2), file=f)
return self
def prepare(self, commit_ref: str, release_type: str) -> "ReleaseInfo":
version = None
release_branch = None
release_tag = None
@ -87,11 +130,12 @@ class ReleaseInfo:
assert release_type in ("patch", "new")
if release_type == "new":
# check commit_ref is right and on a right branch
ShellRunner.run(
f"git merge-base --is-ancestor origin/{commit_ref} origin/master"
Shell.run(
f"git merge-base --is-ancestor origin/{commit_ref} origin/master",
check=True,
)
with checkout(commit_ref):
_, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}")
commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True)
# Git() must be inside "with checkout" contextmanager
git = Git()
version = get_version_from_repo(git=git)
@ -112,7 +156,7 @@ class ReleaseInfo:
assert previous_release_sha
if release_type == "patch":
with checkout(commit_ref):
_, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}")
commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True)
# Git() must be inside "with checkout" contextmanager
git = Git()
version = get_version_from_repo(git=git)
@ -120,10 +164,11 @@ class ReleaseInfo:
version.with_description(codename)
release_branch = f"{version.major}.{version.minor}"
release_tag = version.describe
ShellRunner.run(f"{GIT_PREFIX} fetch origin {release_branch} --tags")
Shell.run(f"{GIT_PREFIX} fetch origin {release_branch} --tags", check=True)
# check commit is right and on a right branch
ShellRunner.run(
f"git merge-base --is-ancestor {commit_ref} origin/{release_branch}"
Shell.run(
f"git merge-base --is-ancestor {commit_ref} origin/{release_branch}",
check=True,
)
if version.patch == 1:
expected_version = copy(version)
@ -162,22 +207,22 @@ class ReleaseInfo:
and version
and codename in ("lts", "stable")
)
res = ReleaseInfo(
release_branch=release_branch,
commit_sha=commit_sha,
release_tag=release_tag,
version=version.string,
codename=codename,
previous_release_tag=previous_release_tag,
previous_release_sha=previous_release_sha,
)
with open(outfile, "w", encoding="utf-8") as f:
print(json.dumps(dataclasses.asdict(res), indent=2), file=f)
self.release_branch = release_branch
self.commit_sha = commit_sha
self.release_tag = release_tag
self.version = version.string
self.codename = codename
self.previous_release_tag = previous_release_tag
self.previous_release_sha = previous_release_sha
self.release_progress = ReleaseProgress.STARTED
self.progress_description = ReleaseProgressDescription.OK
return self
def push_release_tag(self, dry_run: bool) -> None:
if dry_run:
# remove locally created tag from prev run
ShellRunner.run(
Shell.run(
f"{GIT_PREFIX} tag -l | grep -q {self.release_tag} && git tag -d {self.release_tag} ||:"
)
# Create release tag
@ -185,16 +230,17 @@ class ReleaseInfo:
f"Create and push release tag [{self.release_tag}], commit [{self.commit_sha}]"
)
tag_message = f"Release {self.release_tag}"
ShellRunner.run(
f"{GIT_PREFIX} tag -a -m '{tag_message}' {self.release_tag} {self.commit_sha}"
Shell.run(
f"{GIT_PREFIX} tag -a -m '{tag_message}' {self.release_tag} {self.commit_sha}",
check=True,
)
cmd_push_tag = f"{GIT_PREFIX} push origin {self.release_tag}:{self.release_tag}"
ShellRunner.run(cmd_push_tag, dry_run=dry_run)
Shell.run(cmd_push_tag, dry_run=dry_run, check=True)
@staticmethod
def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None:
cmd = f"gh api repos/{GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}"
ShellRunner.run(cmd, dry_run=dry_run)
Shell.run(cmd, dry_run=dry_run, check=True)
def push_new_release_branch(self, dry_run: bool) -> None:
assert (
@ -211,8 +257,8 @@ class ReleaseInfo:
), f"Unexpected current version in git, must precede [{self.version}] by one step, actual [{version.string}]"
if dry_run:
# remove locally created branch from prev run
ShellRunner.run(
f"{GIT_PREFIX} branch -l | grep -q {new_release_branch} && git branch -d {new_release_branch} ||:"
Shell.run(
f"{GIT_PREFIX} branch -l | grep -q {new_release_branch} && git branch -d {new_release_branch}"
)
print(
f"Create and push new release branch [{new_release_branch}], commit [{self.commit_sha}]"
@ -225,7 +271,7 @@ class ReleaseInfo:
cmd_push_branch = (
f"{GIT_PREFIX} push --set-upstream origin {new_release_branch}"
)
ShellRunner.run(cmd_push_branch, dry_run=dry_run)
Shell.run(cmd_push_branch, dry_run=dry_run, check=True)
print("Create and push backport tags for new release branch")
ReleaseInfo._create_gh_label(
@ -234,12 +280,13 @@ class ReleaseInfo:
ReleaseInfo._create_gh_label(
f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run
)
ShellRunner.run(
Shell.run(
f"""gh pr create --repo {GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}'
--head {new_release_branch} {pr_labels}
--body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.'
""",
dry_run=dry_run,
check=True,
)
def update_version_and_contributors_list(self, dry_run: bool) -> None:
@ -265,32 +312,52 @@ class ReleaseInfo:
body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md")
actor = os.getenv("GITHUB_ACTOR", "") or "me"
cmd_create_pr = f"gh pr create --repo {GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body-file '{body_file} --label 'do not test' --assignee @{actor}"
ShellRunner.run(cmd_commit_version_upd, dry_run=dry_run)
ShellRunner.run(cmd_push_branch, dry_run=dry_run)
ShellRunner.run(cmd_create_pr, dry_run=dry_run)
Shell.run(cmd_commit_version_upd, check=True, dry_run=dry_run)
Shell.run(cmd_push_branch, check=True, dry_run=dry_run)
Shell.run(cmd_create_pr, check=True, dry_run=dry_run)
if dry_run:
ShellRunner.run(
f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'"
)
ShellRunner.run(
Shell.run(f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'")
Shell.run(
f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'"
)
self.version_bump_pr = GHActions.get_pr_url_by_branch(
repo=GITHUB_REPOSITORY, branch=branch_upd_version_contributors
)
def update_release_info(self, dry_run: bool) -> "ReleaseInfo":
branch = f"auto/{release_info.release_tag}"
if not dry_run:
url = GHActions.get_pr_url_by_branch(repo=GITHUB_REPOSITORY, branch=branch)
else:
url = "dry-run"
print(f"ChangeLog PR url [{url}]")
self.changelog_pr = url
print(f"Release url [{url}]")
self.release_url = (
f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}"
)
self.docker_command = f"docker run --rm clickhouse/clickhouse:{self.release_branch} clickhouse --version"
self.dump()
return self
def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None:
repo = os.getenv("GITHUB_REPOSITORY")
assert repo
cmds = []
cmds.append(
cmds = [
f"gh release create --repo {repo} --title 'Release {self.release_tag}' {self.release_tag}"
)
]
for file in packages_files:
cmds.append(f"gh release upload {self.release_tag} {file}")
if not dry_run:
for cmd in cmds:
ShellRunner.run(cmd)
Shell.run(cmd, check=True)
self.release_url = f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}"
else:
print("Dry-run, would run commands:")
print("\n * ".join(cmds))
self.release_url = f"dry-run"
self.dump()
class RepoTypes:
@ -350,7 +417,7 @@ class PackageDownloader:
self.macos_package_files = ["clickhouse-macos", "clickhouse-macos-aarch64"]
self.file_to_type = {}
ShellRunner.run(f"mkdir -p {self.LOCAL_DIR}")
Shell.run(f"mkdir -p {self.LOCAL_DIR}")
for package_type in self.PACKAGE_TYPES:
for package in self.package_names:
@ -400,7 +467,7 @@ class PackageDownloader:
return res
def run(self):
ShellRunner.run(f"rm -rf {self.LOCAL_DIR}/*")
Shell.run(f"rm -rf {self.LOCAL_DIR}/*")
for package_file in (
self.deb_package_files + self.rpm_package_files + self.tgz_package_files
):
@ -473,6 +540,37 @@ class PackageDownloader:
return True
@contextmanager
def checkout(ref: str) -> Iterator[None]:
orig_ref = Shell.run(f"{GIT_PREFIX} symbolic-ref --short HEAD", check=True)
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
assert orig_ref
if ref not in (orig_ref,):
Shell.run(f"{GIT_PREFIX} checkout {ref}")
try:
yield
except (Exception, KeyboardInterrupt) as e:
print(f"ERROR: Exception [{e}]")
Shell.run(rollback_cmd)
raise
Shell.run(rollback_cmd)
@contextmanager
def checkout_new(ref: str) -> Iterator[None]:
orig_ref = Shell.run(f"{GIT_PREFIX} symbolic-ref --short HEAD", check=True)
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
assert orig_ref
Shell.run(f"{GIT_PREFIX} checkout -b {ref}", check=True)
try:
yield
except (Exception, KeyboardInterrupt) as e:
print(f"ERROR: Exception [{e}]")
Shell.run(rollback_cmd)
raise
Shell.run(rollback_cmd)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
@ -508,6 +606,11 @@ def parse_args() -> argparse.Namespace:
action="store_true",
help="Create GH Release object and attach all packages",
)
parser.add_argument(
"--post-status",
action="store_true",
help="Post release status into Slack",
)
parser.add_argument(
"--ref",
type=str,
@ -526,55 +629,25 @@ def parse_args() -> argparse.Namespace:
help="do not make any actual changes in the repo, just show what will be done",
)
parser.add_argument(
"--outfile",
default="",
type=str,
help="output file to write json result to, if not set - stdout",
"--set-progress-started",
action="store_true",
help="Set new progress step, --progress <PROGRESS STEP> must be set",
)
parser.add_argument(
"--infile",
default="",
"--progress",
type=str,
help="input file with release info",
help="Progress step name, see @ReleaseProgress",
)
parser.add_argument(
"--set-progress-completed",
action="store_true",
help="Set current progress step to OK (completed)",
)
return parser.parse_args()
@contextmanager
def checkout(ref: str) -> Iterator[None]:
_, orig_ref = ShellRunner.run(f"{GIT_PREFIX} symbolic-ref --short HEAD")
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
assert orig_ref
if ref not in (orig_ref,):
ShellRunner.run(f"{GIT_PREFIX} checkout {ref}")
try:
yield
except (Exception, KeyboardInterrupt) as e:
print(f"ERROR: Exception [{e}]")
ShellRunner.run(rollback_cmd)
raise
ShellRunner.run(rollback_cmd)
@contextmanager
def checkout_new(ref: str) -> Iterator[None]:
_, orig_ref = ShellRunner.run(f"{GIT_PREFIX} symbolic-ref --short HEAD")
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
assert orig_ref
ShellRunner.run(f"{GIT_PREFIX} checkout -b {ref}")
try:
yield
except (Exception, KeyboardInterrupt) as e:
print(f"ERROR: Exception [{e}]")
ShellRunner.run(rollback_cmd)
raise
ShellRunner.run(rollback_cmd)
if __name__ == "__main__":
args = parse_args()
assert args.dry_run
# prepare ssh for git if needed
_ssh_agent = None
@ -586,43 +659,82 @@ if __name__ == "__main__":
_ssh_agent.print_keys()
if args.prepare_release_info:
assert (
args.ref and args.release_type and args.outfile
), "--ref, --release-type and --outfile must be provided with --prepare-release-info"
ReleaseInfo.prepare(
commit_ref=args.ref, release_type=args.release_type, outfile=args.outfile
)
if args.push_release_tag:
assert args.infile, "--infile <release info file path> must be provided"
release_info = ReleaseInfo.from_file(args.infile)
release_info.push_release_tag(dry_run=args.dry_run)
if args.push_new_release_branch:
assert args.infile, "--infile <release info file path> must be provided"
release_info = ReleaseInfo.from_file(args.infile)
release_info.push_new_release_branch(dry_run=args.dry_run)
if args.create_bump_version_pr:
# TODO: store link to PR in release info
assert args.infile, "--infile <release info file path> must be provided"
release_info = ReleaseInfo.from_file(args.infile)
release_info.update_version_and_contributors_list(dry_run=args.dry_run)
with ReleaseContextManager(
release_progress=ReleaseProgress.STARTED
) as release_info:
assert (
args.ref and args.release_type
), "--ref and --release-type must be provided with --prepare-release-info"
release_info.prepare(commit_ref=args.ref, release_type=args.release_type)
if args.download_packages:
assert args.infile, "--infile <release info file path> must be provided"
release_info = ReleaseInfo.from_file(args.infile)
p = PackageDownloader(
release=release_info.release_branch,
commit_sha=release_info.commit_sha,
version=release_info.version,
)
p.run()
with ReleaseContextManager(
release_progress=ReleaseProgress.DOWNLOAD_PACKAGES
) as release_info:
p = PackageDownloader(
release=release_info.release_branch,
commit_sha=release_info.commit_sha,
version=release_info.version,
)
p.run()
if args.push_release_tag:
with ReleaseContextManager(
release_progress=ReleaseProgress.PUSH_RELEASE_TAG
) as release_info:
release_info.push_release_tag(dry_run=args.dry_run)
if args.push_new_release_branch:
with ReleaseContextManager(
release_progress=ReleaseProgress.PUSH_NEW_RELEASE_BRANCH
) as release_info:
release_info.push_new_release_branch(dry_run=args.dry_run)
if args.create_bump_version_pr:
with ReleaseContextManager(
release_progress=ReleaseProgress.BUMP_VERSION
) as release_info:
release_info.update_version_and_contributors_list(dry_run=args.dry_run)
if args.create_gh_release:
assert args.infile, "--infile <release info file path> must be provided"
release_info = ReleaseInfo.from_file(args.infile)
p = PackageDownloader(
release=release_info.release_branch,
commit_sha=release_info.commit_sha,
version=release_info.version,
)
release_info.create_gh_release(p.get_all_packages_files(), args.dry_run)
with ReleaseContextManager(
release_progress=ReleaseProgress.CREATE_GH_RELEASE
) as release_info:
p = PackageDownloader(
release=release_info.release_branch,
commit_sha=release_info.commit_sha,
version=release_info.version,
)
release_info.create_gh_release(
packages_files=p.get_all_packages_files(), dry_run=args.dry_run
)
if args.post_status:
release_info = ReleaseInfo.from_file()
release_info.update_release_info(dry_run=args.dry_run)
if release_info.debian_command:
CIBuddy(dry_run=args.dry_run).post_done(
f"New release issued", dataclasses.asdict(release_info)
)
else:
CIBuddy(dry_run=args.dry_run).post_critical(
f"Failed to issue new release", dataclasses.asdict(release_info)
)
if args.set_progress_started:
ri = ReleaseInfo.from_file()
ri.release_progress = args.progress
ri.progress_description = ReleaseProgressDescription.FAILED
ri.dump()
assert args.progress, "Progress step name must be provided"
if args.set_progress_completed:
ri = ReleaseInfo.from_file()
assert (
ri.progress_description == ReleaseProgressDescription.FAILED
), "Must be FAILED before set to OK"
ri.progress_description = ReleaseProgressDescription.OK
ri.dump()
# tear down ssh
if _ssh_agent and _key_pub:

View File

@ -254,11 +254,14 @@ def main():
statuses = get_commit_filtered_statuses(commit)
has_failed_statuses = False
has_native_failed_status = False
for status in statuses:
print(f"Check status [{status.context}], [{status.state}]")
if CI.is_required(status.context) and status.state != SUCCESS:
print(f"WARNING: Failed status [{status.context}], [{status.state}]")
has_failed_statuses = True
if status.context != CI.StatusNames.SYNC:
has_native_failed_status = True
if args.wf_status == SUCCESS or has_failed_statuses:
# set Mergeable check if workflow is successful (green)
@ -280,7 +283,7 @@ def main():
print(
"Workflow failed but no failed statuses found (died runner?) - cannot set Mergeable Check status"
)
if args.wf_status == SUCCESS and not has_failed_statuses:
if args.wf_status == SUCCESS and not has_native_failed_status:
sys.exit(0)
else:
sys.exit(1)

View File

@ -296,13 +296,16 @@ class PRInfo:
else:
if "schedule" in github_event:
self.event_type = EventType.SCHEDULE
else:
elif "inputs" in github_event:
# assume this is a dispatch
self.event_type = EventType.DISPATCH
logging.warning(
"event.json does not match pull_request or push:\n%s",
json.dumps(github_event, sort_keys=True, indent=4),
)
print("PR Info:")
print(self)
else:
logging.warning(
"event.json does not match pull_request or push:\n%s",
json.dumps(github_event, sort_keys=True, indent=4),
)
self.sha = os.getenv(
"GITHUB_SHA", "0000000000000000000000000000000000000000"
)

View File

@ -587,11 +587,11 @@ class TestCIConfig(unittest.TestCase):
for job, job_config in ci_cache.jobs_to_do.items():
if job in MOCK_AFFECTED_JOBS:
MOCK_REQUIRED_BUILDS += job_config.required_builds
elif job not in MOCK_AFFECTED_JOBS:
elif job not in MOCK_AFFECTED_JOBS and not job_config.disable_await:
ci_cache.jobs_to_wait[job] = job_config
for job, job_config in ci_cache.jobs_to_do.items():
if job_config.reference_job_name:
if job_config.reference_job_name or job_config.disable_await:
# jobs with reference_job_name in config are not supposed to have records in the cache - continue
continue
if job in MOCK_AFFECTED_JOBS:
@ -624,11 +624,76 @@ class TestCIConfig(unittest.TestCase):
+ MOCK_AFFECTED_JOBS
+ MOCK_REQUIRED_BUILDS
)
self.assertTrue(
CI.JobNames.BUILD_CHECK not in ci_cache.jobs_to_wait,
"We must never await on Builds Report",
)
self.assertCountEqual(
list(ci_cache.jobs_to_wait),
[
CI.JobNames.BUILD_CHECK,
]
+ MOCK_REQUIRED_BUILDS,
MOCK_REQUIRED_BUILDS,
)
self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do)
def test_ci_py_filters_not_affected_jobs_in_prs_no_builds(self):
"""
checks ci.py filters not affected jobs in PRs, no builds required
"""
settings = CiSettings()
settings.no_ci_cache = True
pr_info = PRInfo(github_event=_TEST_EVENT_JSON)
pr_info.event_type = EventType.PULL_REQUEST
pr_info.number = 123
assert pr_info.is_pr
ci_cache = CIPY._configure_jobs(
S3Helper(), pr_info, settings, skip_jobs=False, dry_run=True
)
self.assertTrue(not ci_cache.jobs_to_skip, "Must be no jobs in skip list")
assert not ci_cache.jobs_to_wait
assert not ci_cache.jobs_to_skip
MOCK_AFFECTED_JOBS = [
CI.JobNames.FAST_TEST,
]
MOCK_REQUIRED_BUILDS = []
# pretend there are pending jobs that we need to wait
for job, job_config in ci_cache.jobs_to_do.items():
if job in MOCK_AFFECTED_JOBS:
if job_config.required_builds:
MOCK_REQUIRED_BUILDS += job_config.required_builds
elif job not in MOCK_AFFECTED_JOBS and not job_config.disable_await:
ci_cache.jobs_to_wait[job] = job_config
for job, job_config in ci_cache.jobs_to_do.items():
if job_config.reference_job_name or job_config.disable_await:
# jobs with reference_job_name in config are not supposed to have records in the cache - continue
continue
if job in MOCK_AFFECTED_JOBS:
continue
for batch in range(job_config.num_batches):
# add any record into cache
record = CiCache.Record(
record_type=random.choice(
[
CiCache.RecordType.FAILED,
CiCache.RecordType.PENDING,
CiCache.RecordType.SUCCESSFUL,
]
),
job_name=job,
job_digest=ci_cache.job_digests[job],
batch=batch,
num_batches=job_config.num_batches,
release_branch=True,
)
for record_t_, records_ in ci_cache.records.items():
if record_t_.value == CiCache.RecordType.FAILED.value:
records_[record.to_str_key()] = record
ci_cache.filter_out_not_affected_jobs()
expected_to_do = MOCK_AFFECTED_JOBS + MOCK_REQUIRED_BUILDS
self.assertCountEqual(
list(ci_cache.jobs_to_wait),
MOCK_REQUIRED_BUILDS,
)
self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do)

View File

@ -10,8 +10,8 @@
PARTITION BY toYYYYMM(d) ORDER BY key
</create_query>
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers(5000000)</fill_query>
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers(5000000)</fill_query>
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers(2500000)</fill_query>
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers(2500000)</fill_query>
<query>SELECT * FROM optimized_select_final FINAL FORMAT Null SETTINGS max_threads = 8</query>
<query>SELECT * FROM optimized_select_final FINAL WHERE key % 10 = 0 FORMAT Null</query>

View File

@ -1,5 +1,5 @@
<test>
<query>with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByChar(' ', materialize(s)) as w from numbers(1000000)</query>
<query>with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp(' ', materialize(s)) as w from numbers(1000000)</query>
<query>with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp('\s+', materialize(s)) as w from numbers(100000)</query>
<query>with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp(' ', materialize(s)) as w from numbers(200000)</query>
<query>with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp('\s+', materialize(s)) as w from numbers(20000)</query>
</test>

View File

@ -24,10 +24,10 @@
<min_insert_block_size_rows>1</min_insert_block_size_rows>
</settings>
<!-- 100 parts -->
<query>INSERT INTO hits_wide(UserID) SELECT rand() FROM numbers(100)</query>
<query>INSERT INTO hits_compact(UserID) SELECT rand() FROM numbers(1000)</query>
<query>INSERT INTO hits_buffer(UserID) SELECT rand() FROM numbers(100)</query>
<!-- 50 parts -->
<query>INSERT INTO hits_wide(UserID) SELECT rand() FROM numbers(50)</query>
<query>INSERT INTO hits_compact(UserID) SELECT rand() FROM numbers(500)</query>
<query>INSERT INTO hits_buffer(UserID) SELECT rand() FROM numbers(50)</query>
<drop_query>DROP TABLE IF EXISTS hits_wide</drop_query>
<drop_query>DROP TABLE IF EXISTS hits_compact</drop_query>

View File

@ -555,7 +555,7 @@ if args.report == "main":
"Total client time for measured query runs,&nbsp;s", # 2
"Queries", # 3
"Longest query, total for measured runs,&nbsp;s", # 4
"Wall clock time per query,&nbsp;s", # 5
"Average query wall clock time,&nbsp;s", # 5
"Shortest query, total for measured runs,&nbsp;s", # 6
"", # Runs #7
]

View File

@ -8,13 +8,13 @@
40
41
0
41
2 42
2 42
43
0
43
11
11

View File

@ -12,10 +12,10 @@ ORDER BY (primary_key);
INSERT INTO set_array
select
toString(intDiv(number, 1000000)) as primary_key,
toString(intDiv(number, 100000)) as primary_key,
array(number) as index_array
from system.numbers
limit 10000000;
limit 1000000;
OPTIMIZE TABLE set_array FINAL;

View File

@ -0,0 +1,55 @@
-- { echoOn }
SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8)))
FROM system.one
GROUP BY '666';
6.666.8
SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8)))
FROM remote('127.0.0.{1,1}', 'system.one')
GROUP BY '666';
6.666.8
-- https://github.com/ClickHouse/ClickHouse/issues/63006
SELECT
6,
concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a,
concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b
FROM system.one
GROUP BY toNullable(6)
WITH ROLLUP
WITH TOTALS;
6 World666666 \N
6 World666666 \N
6 World666666 \N
SELECT
6,
concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a,
concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b
FROM remote('127.0.0.1')
GROUP BY toNullable(6)
WITH ROLLUP
WITH TOTALS;
6 World666666 \N
6 World666666 \N
6 World666666 \N
-- { echoOn }
SELECT
'%',
tuple(concat('%', 1, toLowCardinality(toLowCardinality(toNullable(materialize(1)))), currentDatabase(), 101., toNullable(13), '%AS%id_02%', toNullable(toNullable(10)), toLowCardinality(toNullable(10)), 10, 10)),
(toDecimal128(99.67, 6), 36, 61, 14)
FROM dist_03174
WHERE dummy IN (0, '255')
GROUP BY
toNullable(13),
(99.67, 61, toLowCardinality(14));
% ('%11default10113%AS%id_02%10101010') (99.67,36,61,14)
-- { echoOn }
SELECT
38,
concat(position(concat(concat(position(concat(toUInt256(3)), 'ca', 2), 3), NULLIF(1, materialize(toLowCardinality(1)))), toLowCardinality(toNullable('ca'))), concat(NULLIF(1, 1), concat(3), toNullable(3)))
FROM set_index_not__fuzz_0
GROUP BY
toNullable(3),
concat(concat(CAST(NULL, 'Nullable(Int8)'), toNullable(3)))
FORMAT Null
SETTINGS max_threads = 1, allow_experimental_analyzer = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 3, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_threads = 1;

View File

@ -0,0 +1,80 @@
-- There are various tests that check that group by keys don't propagate into functions replacing const arguments
-- by full (empty) columns
DROP TABLE IF EXISTS dist_03174;
DROP TABLE IF EXISTS set_index_not__fuzz_0;
-- https://github.com/ClickHouse/ClickHouse/issues/63006
SET allow_experimental_analyzer=1;
-- { echoOn }
SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8)))
FROM system.one
GROUP BY '666';
SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8)))
FROM remote('127.0.0.{1,1}', 'system.one')
GROUP BY '666';
-- https://github.com/ClickHouse/ClickHouse/issues/63006
SELECT
6,
concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a,
concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b
FROM system.one
GROUP BY toNullable(6)
WITH ROLLUP
WITH TOTALS;
SELECT
6,
concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a,
concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b
FROM remote('127.0.0.1')
GROUP BY toNullable(6)
WITH ROLLUP
WITH TOTALS;
-- https://github.com/ClickHouse/ClickHouse/issues/64945
-- { echoOff }
CREATE TABLE dist_03174 AS system.one ENGINE = Distributed(test_cluster_two_shards, system, one, dummy);
-- { echoOn }
SELECT
'%',
tuple(concat('%', 1, toLowCardinality(toLowCardinality(toNullable(materialize(1)))), currentDatabase(), 101., toNullable(13), '%AS%id_02%', toNullable(toNullable(10)), toLowCardinality(toNullable(10)), 10, 10)),
(toDecimal128(99.67, 6), 36, 61, 14)
FROM dist_03174
WHERE dummy IN (0, '255')
GROUP BY
toNullable(13),
(99.67, 61, toLowCardinality(14));
-- Parallel replicas
-- { echoOff }
CREATE TABLE set_index_not__fuzz_0
(
`name` String,
`status` Enum8('alive' = 0, 'rip' = 1),
INDEX idx_status status TYPE set(2) GRANULARITY 1
)
ENGINE = MergeTree()
ORDER BY name;
INSERT INTO set_index_not__fuzz_0 SELECT * FROM generateRandom() LIMIT 10;
-- { echoOn }
SELECT
38,
concat(position(concat(concat(position(concat(toUInt256(3)), 'ca', 2), 3), NULLIF(1, materialize(toLowCardinality(1)))), toLowCardinality(toNullable('ca'))), concat(NULLIF(1, 1), concat(3), toNullable(3)))
FROM set_index_not__fuzz_0
GROUP BY
toNullable(3),
concat(concat(CAST(NULL, 'Nullable(Int8)'), toNullable(3)))
FORMAT Null
SETTINGS max_threads = 1, allow_experimental_analyzer = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 3, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_threads = 1;
-- { echoOff }
DROP TABLE IF EXISTS dist_03174;
DROP TABLE IF EXISTS set_index_not__fuzz_0;

View File

@ -0,0 +1,7 @@
SET allow_experimental_analyzer = 1;
SELECT [1, (x -> 1)]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT (1, (x -> 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT map(1, (x -> 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT [1, lambda(x, 1)]; -- { serverError UNKNOWN_IDENTIFIER }
SELECT (1, lambda(x, 1)); -- { serverError UNKNOWN_IDENTIFIER }
SELECT map(1, lambda(x, 1)); -- { serverError UNKNOWN_IDENTIFIER }