mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into array-distance-functions
This commit is contained in:
commit
2865c8141d
@ -16,7 +16,6 @@ Checks: '-*,
|
||||
modernize-make-unique,
|
||||
modernize-raw-string-literal,
|
||||
modernize-redundant-void-arg,
|
||||
modernize-replace-auto-ptr,
|
||||
modernize-replace-random-shuffle,
|
||||
modernize-use-bool-literals,
|
||||
modernize-use-nullptr,
|
||||
@ -145,6 +144,7 @@ Checks: '-*,
|
||||
clang-analyzer-cplusplus.SelfAssignment,
|
||||
clang-analyzer-deadcode.DeadStores,
|
||||
clang-analyzer-cplusplus.Move,
|
||||
clang-analyzer-optin.cplusplus.UninitializedObject,
|
||||
clang-analyzer-optin.cplusplus.VirtualCall,
|
||||
clang-analyzer-security.insecureAPI.UncheckedReturn,
|
||||
clang-analyzer-security.insecureAPI.bcmp,
|
||||
@ -164,6 +164,8 @@ Checks: '-*,
|
||||
clang-analyzer-unix.cstring.NullArg,
|
||||
|
||||
boost-use-to-string,
|
||||
|
||||
alpha.security.cert.env.InvalidPtr,
|
||||
'
|
||||
WarningsAsErrors: '*'
|
||||
|
||||
@ -210,3 +212,6 @@ CheckOptions:
|
||||
value: false
|
||||
- key: performance-move-const-arg.CheckTriviallyCopyableMove
|
||||
value: false
|
||||
# Workaround clang-tidy bug: https://github.com/llvm/llvm-project/issues/46097
|
||||
- key: readability-identifier-naming.TypeTemplateParameterIgnoredRegexp
|
||||
value: expr-type
|
||||
|
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -1,2 +1,3 @@
|
||||
contrib/* linguist-vendored
|
||||
*.h linguist-language=C++
|
||||
tests/queries/0_stateless/data_json/* binary
|
||||
|
2
.github/ISSUE_TEMPLATE/10_question.md
vendored
2
.github/ISSUE_TEMPLATE/10_question.md
vendored
@ -7,6 +7,6 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
> Make sure to check documentation https://clickhouse.yandex/docs/en/ first. If the question is concise and probably has a short answer, asking it in Telegram chat https://telegram.me/clickhouse_en is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse
|
||||
> Make sure to check documentation https://clickhouse.com/docs/en/ first. If the question is concise and probably has a short answer, asking it in Telegram chat https://telegram.me/clickhouse_en is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse
|
||||
|
||||
> If you still prefer GitHub issues, remove all this text and ask your question here.
|
||||
|
2
.github/ISSUE_TEMPLATE/50_build-issue.md
vendored
2
.github/ISSUE_TEMPLATE/50_build-issue.md
vendored
@ -7,7 +7,7 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
> Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.yandex/docs/en/development/build/
|
||||
> Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/
|
||||
|
||||
**Operating system**
|
||||
|
||||
|
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,4 +1,4 @@
|
||||
Changelog category (leave one):
|
||||
### Changelog category (leave one):
|
||||
- New Feature
|
||||
- Improvement
|
||||
- Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
@ -9,7 +9,7 @@ Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
|
||||
|
||||
Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
|
||||
### Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
|
||||
...
|
||||
|
||||
|
||||
|
43
.github/workflows/backport_branches.yml
vendored
43
.github/workflows/backport_branches.yml
vendored
@ -9,6 +9,18 @@ on: # yamllint disable-line rule:truthy
|
||||
branches:
|
||||
- 'backport/**'
|
||||
jobs:
|
||||
PythonUnitTests:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 -m unittest discover -s . -p '*_test.py'
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
@ -143,8 +155,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -184,8 +196,8 @@ jobs:
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -229,8 +241,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -274,8 +286,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -319,8 +331,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -341,10 +353,15 @@ jobs:
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
DEPENDENCIES=$(cat << 'EOF' | jq '. | length'
|
||||
${{ toJSON(needs) }}
|
||||
EOF
|
||||
)
|
||||
echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -360,7 +377,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
|
2
.github/workflows/debug.yml
vendored
2
.github/workflows/debug.yml
vendored
@ -2,7 +2,7 @@
|
||||
name: Debug
|
||||
|
||||
'on':
|
||||
[push, pull_request, release]
|
||||
[push, pull_request, release, workflow_dispatch]
|
||||
|
||||
jobs:
|
||||
DebugInfo:
|
||||
|
193
.github/workflows/master.yml
vendored
193
.github/workflows/master.yml
vendored
@ -149,7 +149,6 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
SplitBuildSmokeTest:
|
||||
needs: [BuilderDebSplitted]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -220,8 +219,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -261,8 +260,8 @@ jobs:
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -306,8 +305,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -316,7 +315,6 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinRelease:
|
||||
needs: [DockerHubPush]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -352,8 +350,53 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinGCC:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_gcc
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -397,8 +440,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -442,8 +485,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -487,8 +530,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -532,8 +575,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -577,8 +620,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -590,7 +633,6 @@ jobs:
|
||||
##########################################################################################
|
||||
BuilderDebSplitted:
|
||||
needs: [DockerHubPush]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -626,8 +668,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -636,7 +678,6 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinTidy:
|
||||
needs: [DockerHubPush]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -672,8 +713,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -682,7 +723,6 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinDarwin:
|
||||
needs: [DockerHubPush]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -718,8 +758,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -728,7 +768,6 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinAarch64:
|
||||
needs: [DockerHubPush]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -764,8 +803,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -774,7 +813,6 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinFreeBSD:
|
||||
needs: [DockerHubPush]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -810,8 +848,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -820,7 +858,6 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinDarwinAarch64:
|
||||
needs: [DockerHubPush]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -856,8 +893,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -866,7 +903,6 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinPPC64:
|
||||
needs: [DockerHubPush]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -902,8 +938,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -911,6 +947,34 @@ jobs:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # otherwise we will have no version info
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head
|
||||
python3 docker_server.py --release-type head --no-ubuntu \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
@ -918,6 +982,7 @@ jobs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderBinRelease
|
||||
- BuilderBinGCC
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
@ -927,10 +992,16 @@ jobs:
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
DEPENDENCIES=$(cat << 'EOF' | jq '. | length'
|
||||
${{ toJSON(needs) }}
|
||||
EOF
|
||||
)
|
||||
echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -946,7 +1017,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -2608,6 +2679,40 @@ jobs:
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
UnitTestsReleaseGCC:
|
||||
needs: [BuilderBinGCC]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Unit tests (release-gcc, actions)
|
||||
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Unit test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 unit_tests_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
UnitTestsTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
|
51
.github/workflows/nightly.yml
vendored
51
.github/workflows/nightly.yml
vendored
@ -7,6 +7,7 @@ env:
|
||||
"on":
|
||||
schedule:
|
||||
- cron: '13 3 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
DockerHubPushAarch64:
|
||||
@ -71,3 +72,53 @@ jobs:
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
BuilderCoverity:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
BUILD_NAME=coverity
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
EOF
|
||||
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
id: coverity-checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
- name: Upload Coverity Analysis
|
||||
if: ${{ success() || failure() }}
|
||||
run: |
|
||||
curl --form token="${COVERITY_TOKEN}" \
|
||||
--form email='security+coverity@clickhouse.com' \
|
||||
--form file="@$TEMP_PATH/$BUILD_NAME/coverity-scan.tgz" \
|
||||
--form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \
|
||||
--form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \
|
||||
https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
|
244
.github/workflows/pull_request.yml
vendored
244
.github/workflows/pull_request.yml
vendored
@ -4,7 +4,7 @@ env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
on: # yamllint disable-line rule:truthy
|
||||
pull_request:
|
||||
types:
|
||||
- synchronize
|
||||
@ -153,13 +153,19 @@ jobs:
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Fast Test
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE"
|
||||
mkdir "$GITHUB_WORKSPACE"
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Fast Test
|
||||
run: |
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 fast_test_check.py
|
||||
- name: Cleanup
|
||||
@ -272,8 +278,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -317,8 +323,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -362,8 +368,50 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinGCC:
|
||||
needs: [DockerHubPush, FastTest]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_gcc
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -404,8 +452,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -449,8 +497,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -494,8 +542,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -539,8 +587,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -584,8 +632,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -629,8 +677,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -677,8 +725,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -722,8 +770,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -767,8 +815,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -812,8 +860,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -857,8 +905,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -902,8 +950,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -947,8 +995,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -956,6 +1004,34 @@ jobs:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # otherwise we will have no version info
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push
|
||||
python3 docker_server.py --release-type head --no-push --no-ubuntu \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
@ -963,6 +1039,7 @@ jobs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderBinRelease
|
||||
- BuilderBinGCC
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
@ -973,10 +1050,15 @@ jobs:
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
DEPENDENCIES=$(cat << 'EOF' | jq '. | length'
|
||||
${{ toJSON(needs) }}
|
||||
EOF
|
||||
)
|
||||
echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -992,7 +1074,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -1733,6 +1815,51 @@ jobs:
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
TestsBugfixCheck:
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/tests_bugfix_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Tests bugfix validate check (actions)
|
||||
KILL_TIMEOUT=3600
|
||||
REPO_COPY=${{runner.temp}}/tests_bugfix_check/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Bugfix test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
|
||||
TEMP_PATH="${TEMP_PATH}/integration" \
|
||||
REPORTS_PATH="${REPORTS_PATH}/integration" \
|
||||
python3 integration_test_check.py "Integration tests bugfix validate check" \
|
||||
--validate-bugfix --post-commit-status=file || echo 'ignore exit code'
|
||||
|
||||
TEMP_PATH="${TEMP_PATH}/stateless" \
|
||||
REPORTS_PATH="${REPORTS_PATH}/stateless" \
|
||||
python3 functional_test_check.py "Stateless tests bugfix validate check" "$KILL_TIMEOUT" \
|
||||
--validate-bugfix --post-commit-status=file || echo 'ignore exit code'
|
||||
|
||||
python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/post_commit_status.tsv" "${TEMP_PATH}/integration/post_commit_status.tsv"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
@ -2763,6 +2890,40 @@ jobs:
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
UnitTestsReleaseGCC:
|
||||
needs: [BuilderBinGCC]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Unit tests (release-gcc, actions)
|
||||
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Unit test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 unit_tests_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
UnitTestsTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
@ -3016,6 +3177,7 @@ jobs:
|
||||
needs:
|
||||
- StyleCheck
|
||||
- DockerHubPush
|
||||
- DockerServerImages
|
||||
- CheckLabels
|
||||
- BuilderReport
|
||||
- FastTest
|
||||
|
27
.github/workflows/release.yml
vendored
27
.github/workflows/release.yml
vendored
@ -32,7 +32,32 @@ jobs:
|
||||
uses: svenstaro/upload-release-action@v2
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: ${{runner.temp}}/release_packages/*
|
||||
file: ${{runner.temp}}/push_to_artifactory/*
|
||||
overwrite: true
|
||||
tag: ${{ github.ref }}
|
||||
file_glob: true
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # otherwise we will have no version info
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type auto --version "${{ github.ref }}"
|
||||
python3 docker_server.py --release-type auto --version "${{ github.ref }}" --no-ubuntu \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
|
40
.github/workflows/release_branches.yml
vendored
40
.github/workflows/release_branches.yml
vendored
@ -146,8 +146,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -187,8 +187,8 @@ jobs:
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -232,8 +232,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -277,8 +277,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -322,8 +322,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -367,8 +367,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -412,8 +412,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -436,10 +436,16 @@ jobs:
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
DEPENDENCIES=$(cat << 'EOF' | jq '. | length'
|
||||
${{ toJSON(needs) }}
|
||||
EOF
|
||||
)
|
||||
echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -455,7 +461,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
|
141
CHANGELOG.md
141
CHANGELOG.md
@ -1,4 +1,139 @@
|
||||
### ClickHouse release v22.2, 2022-02-17
|
||||
### Table of Contents
|
||||
**[ClickHouse release v22.3-lts, 2022-03-17](#223)**<br>
|
||||
**[ClickHouse release v22.2, 2022-02-17](#222)**<br>
|
||||
**[ClickHouse release v22.1, 2022-01-18](#221)**<br>
|
||||
**[Changelog for 2021](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/whats-new/changelog/2021.md)**<br>
|
||||
|
||||
|
||||
## <a id="223"></a> ClickHouse release v22.3-lts, 2022-03-17
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* Make `arrayCompact` function behave as other higher-order functions: perform compaction not of lambda function results but on the original array. If you're using nontrivial lambda functions in arrayCompact you may restore old behaviour by wrapping `arrayCompact` arguments into `arrayMap`. Closes [#34010](https://github.com/ClickHouse/ClickHouse/issues/34010) [#18535](https://github.com/ClickHouse/ClickHouse/issues/18535) [#14778](https://github.com/ClickHouse/ClickHouse/issues/14778). [#34795](https://github.com/ClickHouse/ClickHouse/pull/34795) ([Alexandre Snarskii](https://github.com/snar)).
|
||||
* Change implementation specific behavior on overflow of function `toDatetime`. It will be saturated to the nearest min/max supported instant of datetime instead of wraparound. This change is highlighted as "backward incompatible" because someone may unintentionally rely on the old behavior. [#32898](https://github.com/ClickHouse/ClickHouse/pull/32898) ([HaiBo Li](https://github.com/marising)).
|
||||
* Make function `cast(value, 'IPv4')`, `cast(value, 'IPv6')` behave same as `toIPv4`, `toIPv6` functions. Changed behavior of incorrect IP address passed into functions `toIPv4`,` toIPv6`, now if invalid IP address passes into this functions exception will be raised, before this function return default value. Added functions `IPv4StringToNumOrDefault`, `IPv4StringToNumOrNull`, `IPv6StringToNumOrDefault`, `IPv6StringOrNull` `toIPv4OrDefault`, `toIPv4OrNull`, `toIPv6OrDefault`, `toIPv6OrNull`. Functions `IPv4StringToNumOrDefault `, `toIPv4OrDefault `, `toIPv6OrDefault ` should be used if previous logic relied on `IPv4StringToNum`, `toIPv4`, `toIPv6` returning default value for invalid address. Added setting `cast_ipv4_ipv6_default_on_conversion_error`, if this setting enabled, then IP address conversion functions will behave as before. Closes [#22825](https://github.com/ClickHouse/ClickHouse/issues/22825). Closes [#5799](https://github.com/ClickHouse/ClickHouse/issues/5799). Closes [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#35240](https://github.com/ClickHouse/ClickHouse/pull/35240) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Support for caching data locally for remote filesystems. It can be enabled for `s3` disks. Closes [#28961](https://github.com/ClickHouse/ClickHouse/issues/28961). [#33717](https://github.com/ClickHouse/ClickHouse/pull/33717) ([Kseniia Sumarokova](https://github.com/kssenii)). In the meantime, we enabled the test suite on s3 filesystem and no more known issues exist, so it is started to be production ready.
|
||||
* Add new table function `hive`. It can be used as follows `hive('<hive metastore url>', '<hive database>', '<hive table name>', '<columns definition>', '<partition columns>')` for example `SELECT * FROM hive('thrift://hivetest:9083', 'test', 'demo', 'id Nullable(String), score Nullable(Int32), day Nullable(String)', 'day')`. [#34946](https://github.com/ClickHouse/ClickHouse/pull/34946) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Support authentication of users connected via SSL by their X.509 certificate. [#31484](https://github.com/ClickHouse/ClickHouse/pull/31484) ([eungenue](https://github.com/eungenue)).
|
||||
* Support schema inference for inserting into table functions `file`/`hdfs`/`s3`/`url`. [#34732](https://github.com/ClickHouse/ClickHouse/pull/34732) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Now you can read `system.zookeeper` table without restrictions on path or using `like` expression. This reads can generate quite heavy load for zookeeper so to enable this ability you have to enable setting `allow_unrestricted_reads_from_keeper`. [#34609](https://github.com/ClickHouse/ClickHouse/pull/34609) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Display CPU and memory metrics in clickhouse-local. Close [#34545](https://github.com/ClickHouse/ClickHouse/issues/34545). [#34605](https://github.com/ClickHouse/ClickHouse/pull/34605) ([李扬](https://github.com/taiyang-li)).
|
||||
* Implement `startsWith` and `endsWith` function for arrays, closes [#33982](https://github.com/ClickHouse/ClickHouse/issues/33982). [#34368](https://github.com/ClickHouse/ClickHouse/pull/34368) ([usurai](https://github.com/usurai)).
|
||||
* Add three functions for Map data type: 1. `mapReplace(map1, map2)` - replaces values for keys in map1 with the values of the corresponding keys in map2; adds keys from map2 that don't exist in map1. 2. `mapFilter` 3. `mapMap`. mapFilter and mapMap are higher order functions, accepting two arguments, the first argument is a lambda function with k, v pair as arguments, the second argument is a column of type Map. [#33698](https://github.com/ClickHouse/ClickHouse/pull/33698) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Allow getting default user and password for clickhouse-client from the `CLICKHOUSE_USER` and `CLICKHOUSE_PASSWORD` environment variables. Close [#34538](https://github.com/ClickHouse/ClickHouse/issues/34538). [#34947](https://github.com/ClickHouse/ClickHouse/pull/34947) ([DR](https://github.com/freedomDR)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* New data type `Object(<schema_format>)`, which supports storing of semi-structured data (for now JSON only). Data is written to such types as string. Then all paths are extracted according to format of semi-structured data and written as separate columns in most optimal types, that can store all their values. Those columns can be queried by names that match paths in source data. E.g `data.key1.key2` or with cast operator `data.key1.key2::Int64`.
|
||||
* Add `database_replicated_allow_only_replicated_engine` setting. When enabled, it only allowed to only create `Replicated` tables or tables with stateless engines in `Replicated` databases. [#35214](https://github.com/ClickHouse/ClickHouse/pull/35214) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). Note that `Replicated` database is still an experimental feature.
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Improve performance of insertion into `MergeTree` tables by optimizing sorting. Up to 2x improvement is observed on realistic benchmarks. [#34750](https://github.com/ClickHouse/ClickHouse/pull/34750) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Columns pruning when reading Parquet, ORC and Arrow files from URL and S3. Closes [#34163](https://github.com/ClickHouse/ClickHouse/issues/34163). [#34849](https://github.com/ClickHouse/ClickHouse/pull/34849) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Columns pruning when reading Parquet, ORC and Arrow files from Hive. [#34954](https://github.com/ClickHouse/ClickHouse/pull/34954) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* A bunch of performance optimizations from a performance superhero. Improve performance of processing queries with large `IN` section. Improve performance of `direct` dictionary if its source is `ClickHouse`. Improve performance of `detectCharset `, `detectLanguageUnknown ` functions. [#34888](https://github.com/ClickHouse/ClickHouse/pull/34888) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improve performance of `any` aggregate function by using more batching. [#34760](https://github.com/ClickHouse/ClickHouse/pull/34760) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Multiple improvements for performance of `clickhouse-keeper`: less locking [#35010](https://github.com/ClickHouse/ClickHouse/pull/35010) ([zhanglistar](https://github.com/zhanglistar)), lower memory usage by streaming reading and writing of snapshot instead of full copy. [#34584](https://github.com/ClickHouse/ClickHouse/pull/34584) ([zhanglistar](https://github.com/zhanglistar)), optimizing compaction of log store in the RAFT implementation. [#34534](https://github.com/ClickHouse/ClickHouse/pull/34534) ([zhanglistar](https://github.com/zhanglistar)), versioning of the internal data structure [#34486](https://github.com/ClickHouse/ClickHouse/pull/34486) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Allow asynchronous inserts to table functions. Fixes [#34864](https://github.com/ClickHouse/ClickHouse/issues/34864). [#34866](https://github.com/ClickHouse/ClickHouse/pull/34866) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Implicit type casting of the key argument for functions `dictGetHierarchy`, `dictIsIn`, `dictGetChildren`, `dictGetDescendants`. Closes [#34970](https://github.com/ClickHouse/ClickHouse/issues/34970). [#35027](https://github.com/ClickHouse/ClickHouse/pull/35027) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `EXPLAIN AST` query can output AST in form of a graph in Graphviz format: `EXPLAIN AST graph = 1 SELECT * FROM system.parts`. [#35173](https://github.com/ClickHouse/ClickHouse/pull/35173) ([李扬](https://github.com/taiyang-li)).
|
||||
* When large files were written with `s3` table function or table engine, the content type on the files was mistakenly set to `application/xml` due to a bug in the AWS SDK. This closes [#33964](https://github.com/ClickHouse/ClickHouse/issues/33964). [#34433](https://github.com/ClickHouse/ClickHouse/pull/34433) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Change restrictive row policies a bit to make them an easier alternative to permissive policies in easy cases. If for a particular table only restrictive policies exist (without permissive policies) users will be able to see some rows. Also `SHOW CREATE ROW POLICY` will always show `AS permissive` or `AS restrictive` in row policy's definition. [#34596](https://github.com/ClickHouse/ClickHouse/pull/34596) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Improve schema inference with globs in File/S3/HDFS/URL engines. Try to use the next path for schema inference in case of error. [#34465](https://github.com/ClickHouse/ClickHouse/pull/34465) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Play UI now correctly detects the preferred light/dark theme from the OS. [#35068](https://github.com/ClickHouse/ClickHouse/pull/35068) ([peledni](https://github.com/peledni)).
|
||||
* Added `date_time_input_format = 'best_effort_us'`. Closes [#34799](https://github.com/ClickHouse/ClickHouse/issues/34799). [#34982](https://github.com/ClickHouse/ClickHouse/pull/34982) ([WenYao](https://github.com/Cai-Yao)).
|
||||
* A new settings called `allow_plaintext_password` and `allow_no_password` are added in server configuration which turn on/off authentication types that can be potentially insecure in some environments. They are allowed by default. [#34738](https://github.com/ClickHouse/ClickHouse/pull/34738) ([Heena Bansal](https://github.com/HeenaBansal2009)).
|
||||
* Support for `DateTime64` data type in `Arrow` format, closes [#8280](https://github.com/ClickHouse/ClickHouse/issues/8280) and closes [#28574](https://github.com/ClickHouse/ClickHouse/issues/28574). [#34561](https://github.com/ClickHouse/ClickHouse/pull/34561) ([李扬](https://github.com/taiyang-li)).
|
||||
* Reload `remote_url_allow_hosts` (filtering of outgoing connections) on config update. [#35294](https://github.com/ClickHouse/ClickHouse/pull/35294) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Support `--testmode` parameter for `clickhouse-local`. This parameter enables interpretation of test hints that we use in functional tests. [#35264](https://github.com/ClickHouse/ClickHouse/pull/35264) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add `distributed_depth` to query log. It is like a more detailed variant of `is_initial_query` [#35207](https://github.com/ClickHouse/ClickHouse/pull/35207) ([李扬](https://github.com/taiyang-li)).
|
||||
* Respect `remote_url_allow_hosts` for `MySQL` and `PostgreSQL` table functions. [#35191](https://github.com/ClickHouse/ClickHouse/pull/35191) ([Heena Bansal](https://github.com/HeenaBansal2009)).
|
||||
* Added `disk_name` field to `system.part_log`. [#35178](https://github.com/ClickHouse/ClickHouse/pull/35178) ([Artyom Yurkov](https://github.com/Varinara)).
|
||||
* Do not retry non-rertiable errors when querying remote URLs. Closes [#35161](https://github.com/ClickHouse/ClickHouse/issues/35161). [#35172](https://github.com/ClickHouse/ClickHouse/pull/35172) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support distributed INSERT SELECT queries (the setting `parallel_distributed_insert_select`) table function `view()`. [#35132](https://github.com/ClickHouse/ClickHouse/pull/35132) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* More precise memory tracking during `INSERT` into `Buffer` with `AggregateFunction`. [#35072](https://github.com/ClickHouse/ClickHouse/pull/35072) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Avoid division by zero in Query Profiler if Linux kernel has a bug. Closes [#34787](https://github.com/ClickHouse/ClickHouse/issues/34787). [#35032](https://github.com/ClickHouse/ClickHouse/pull/35032) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add more sanity checks for keeper configuration: now mixing of localhost and non-local servers is not allowed, also add checks for same value of internal raft port and keeper client port. [#35004](https://github.com/ClickHouse/ClickHouse/pull/35004) ([alesapin](https://github.com/alesapin)).
|
||||
* Currently, if the user changes the settings of the system tables there will be tons of logs and ClickHouse will rename the tables every minute. This fixes [#34929](https://github.com/ClickHouse/ClickHouse/issues/34929). [#34949](https://github.com/ClickHouse/ClickHouse/pull/34949) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Use connection pool for Hive metastore client. [#34940](https://github.com/ClickHouse/ClickHouse/pull/34940) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Ignore per-column `TTL` in `CREATE TABLE AS` if new table engine does not support it (i.e. if the engine is not of `MergeTree` family). [#34938](https://github.com/ClickHouse/ClickHouse/pull/34938) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow `LowCardinality` strings for `ngrambf_v1`/`tokenbf_v1` indexes. Closes [#21865](https://github.com/ClickHouse/ClickHouse/issues/21865). [#34911](https://github.com/ClickHouse/ClickHouse/pull/34911) ([Lars Hiller Eidnes](https://github.com/larspars)).
|
||||
* Allow opening empty sqlite db if the file doesn't exist. Closes [#33367](https://github.com/ClickHouse/ClickHouse/issues/33367). [#34907](https://github.com/ClickHouse/ClickHouse/pull/34907) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Implement memory statistics for FreeBSD - this is required for `max_server_memory_usage` to work correctly. [#34902](https://github.com/ClickHouse/ClickHouse/pull/34902) ([Alexandre Snarskii](https://github.com/snar)).
|
||||
* In previous versions the progress bar in clickhouse-client can jump forward near 50% for no reason. This closes [#34324](https://github.com/ClickHouse/ClickHouse/issues/34324). [#34801](https://github.com/ClickHouse/ClickHouse/pull/34801) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Now `ALTER TABLE DROP COLUMN columnX` queries for `MergeTree` table engines will work instantly when `columnX` is an `ALIAS` column. Fixes [#34660](https://github.com/ClickHouse/ClickHouse/issues/34660). [#34786](https://github.com/ClickHouse/ClickHouse/pull/34786) ([alesapin](https://github.com/alesapin)).
|
||||
* Show hints when user mistyped the name of a data skipping index. Closes [#29698](https://github.com/ClickHouse/ClickHouse/issues/29698). [#34764](https://github.com/ClickHouse/ClickHouse/pull/34764) ([flynn](https://github.com/ucasfl)).
|
||||
* Support `remote()`/`cluster()` table functions for `parallel_distributed_insert_select`. [#34728](https://github.com/ClickHouse/ClickHouse/pull/34728) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Do not reset logging that configured via `--log-file`/`--errorlog-file` command line options in case of empty configuration in the config file. [#34718](https://github.com/ClickHouse/ClickHouse/pull/34718) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Extract schema only once on table creation and prevent reading from local files/external sources to extract schema on each server startup. [#34684](https://github.com/ClickHouse/ClickHouse/pull/34684) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow specifying argument names for executable UDFs. This is necessary for formats where argument name is part of serialization, like `Native`, `JSONEachRow`. Closes [#34604](https://github.com/ClickHouse/ClickHouse/issues/34604). [#34653](https://github.com/ClickHouse/ClickHouse/pull/34653) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `MaterializedMySQL` (experimental feature) now supports `materialized_mysql_tables_list` (a comma-separated list of MySQL database tables, which will be replicated by the MaterializedMySQL database engine. Default value: empty list — means all the tables will be replicated), mentioned at [#32977](https://github.com/ClickHouse/ClickHouse/issues/32977). [#34487](https://github.com/ClickHouse/ClickHouse/pull/34487) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||
* Improve OpenTelemetry span logs for INSERT operation on distributed table. [#34480](https://github.com/ClickHouse/ClickHouse/pull/34480) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Make the znode `ctime` and `mtime` consistent between servers in ClickHouse Keeper. [#33441](https://github.com/ClickHouse/ClickHouse/pull/33441) ([小路](https://github.com/nicelulu)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Package repository is migrated to JFrog Artifactory (**Mikhail f. Shiryaev**).
|
||||
* Randomize some settings in functional tests, so more possible combinations of settings will be tested. This is yet another fuzzing method to ensure better test coverage. This closes [#32268](https://github.com/ClickHouse/ClickHouse/issues/32268). [#34092](https://github.com/ClickHouse/ClickHouse/pull/34092) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Drop PVS-Studio from our CI. [#34680](https://github.com/ClickHouse/ClickHouse/pull/34680) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add an ability to build stripped binaries with CMake. In previous versions it was performed by dh-tools. [#35196](https://github.com/ClickHouse/ClickHouse/pull/35196) ([alesapin](https://github.com/alesapin)).
|
||||
* Smaller "fat-free" `clickhouse-keeper` build. [#35031](https://github.com/ClickHouse/ClickHouse/pull/35031) ([alesapin](https://github.com/alesapin)).
|
||||
* Use @robot-clickhouse as an author and committer for PRs like https://github.com/ClickHouse/ClickHouse/pull/34685. [#34793](https://github.com/ClickHouse/ClickHouse/pull/34793) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Limit DWARF version for debug info by 4 max, because our internal stack symbolizer cannot parse DWARF version 5. This makes sense if you compile ClickHouse with clang-15. [#34777](https://github.com/ClickHouse/ClickHouse/pull/34777) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove `clickhouse-test` debian package as unneeded complication. CI use tests from repository and standalone testing via deb package is no longer supported. [#34606](https://github.com/ClickHouse/ClickHouse/pull/34606) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* A fix for HDFS integration: When the inner buffer size is too small, NEED_MORE_INPUT in `HadoopSnappyDecoder` will run multi times (>=3) for one compressed block. This makes the input data be copied into the wrong place in `HadoopSnappyDecoder::buffer`. [#35116](https://github.com/ClickHouse/ClickHouse/pull/35116) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Ignore obsolete grants in ATTACH GRANT statements. This PR fixes [#34815](https://github.com/ClickHouse/ClickHouse/issues/34815). [#34855](https://github.com/ClickHouse/ClickHouse/pull/34855) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix segfault in Postgres database when getting create table query if database was created using named collections. Closes [#35312](https://github.com/ClickHouse/ClickHouse/issues/35312). [#35313](https://github.com/ClickHouse/ClickHouse/pull/35313) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix partial merge join duplicate rows bug, close [#31009](https://github.com/ClickHouse/ClickHouse/issues/31009). [#35311](https://github.com/ClickHouse/ClickHouse/pull/35311) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix possible `Assertion 'position() != working_buffer.end()' failed` while using bzip2 compression with small `max_read_buffer_size` setting value. The bug was found in https://github.com/ClickHouse/ClickHouse/pull/35047. [#35300](https://github.com/ClickHouse/ClickHouse/pull/35300) ([Kruglov Pavel](https://github.com/Avogar)). While using lz4 compression with a small max_read_buffer_size setting value. [#35296](https://github.com/ClickHouse/ClickHouse/pull/35296) ([Kruglov Pavel](https://github.com/Avogar)). While using lzma compression with small `max_read_buffer_size` setting value. [#35295](https://github.com/ClickHouse/ClickHouse/pull/35295) ([Kruglov Pavel](https://github.com/Avogar)). While using `brotli` compression with a small `max_read_buffer_size` setting value. The bug was found in https://github.com/ClickHouse/ClickHouse/pull/35047. [#35281](https://github.com/ClickHouse/ClickHouse/pull/35281) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible segfault in `JSONEachRow` schema inference. [#35291](https://github.com/ClickHouse/ClickHouse/pull/35291) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `CHECK TABLE` query in case when sparse columns are enabled in table. [#35274](https://github.com/ClickHouse/ClickHouse/pull/35274) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Avoid std::terminate in case of exception in reading from remote VFS. [#35257](https://github.com/ClickHouse/ClickHouse/pull/35257) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix reading port from config, close [#34776](https://github.com/ClickHouse/ClickHouse/issues/34776). [#35193](https://github.com/ClickHouse/ClickHouse/pull/35193) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix error in query with `WITH TOTALS` in case if `HAVING` returned empty result. This fixes [#33711](https://github.com/ClickHouse/ClickHouse/issues/33711). [#35186](https://github.com/ClickHouse/ClickHouse/pull/35186) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix a corner case of `replaceRegexpAll`, close [#35117](https://github.com/ClickHouse/ClickHouse/issues/35117). [#35182](https://github.com/ClickHouse/ClickHouse/pull/35182) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Schema inference didn't work properly on case of `INSERT INTO FUNCTION s3(...) FROM ...`, it tried to read schema from s3 file instead of from select query. [#35176](https://github.com/ClickHouse/ClickHouse/pull/35176) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix MaterializedPostgreSQL (experimental feature) `table overrides` for partition by, etc. Closes [#35048](https://github.com/ClickHouse/ClickHouse/issues/35048). [#35162](https://github.com/ClickHouse/ClickHouse/pull/35162) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix MaterializedPostgreSQL (experimental feature) adding new table to replication (ATTACH TABLE) after manually removing (DETACH TABLE). Closes [#33800](https://github.com/ClickHouse/ClickHouse/issues/33800). Closes [#34922](https://github.com/ClickHouse/ClickHouse/issues/34922). Closes [#34315](https://github.com/ClickHouse/ClickHouse/issues/34315). [#35158](https://github.com/ClickHouse/ClickHouse/pull/35158) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix partition pruning error when non-monotonic function is used with IN operator. This fixes [#35136](https://github.com/ClickHouse/ClickHouse/issues/35136). [#35146](https://github.com/ClickHouse/ClickHouse/pull/35146) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed slightly incorrect translation of YAML configs to XML. [#35135](https://github.com/ClickHouse/ClickHouse/pull/35135) ([Miel Donkers](https://github.com/mdonkers)).
|
||||
* Fix `optimize_skip_unused_shards_rewrite_in` for signed columns and negative values. [#35134](https://github.com/ClickHouse/ClickHouse/pull/35134) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The `update_lag` external dictionary configuration option was unusable showing the error message ``Unexpected key `update_lag` in dictionary source configuration``. [#35089](https://github.com/ClickHouse/ClickHouse/pull/35089) ([Jason Chu](https://github.com/1lann)).
|
||||
* Avoid possible deadlock on server shutdown. [#35081](https://github.com/ClickHouse/ClickHouse/pull/35081) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix missing alias after function is optimized to a subcolumn when setting `optimize_functions_to_subcolumns` is enabled. Closes [#33798](https://github.com/ClickHouse/ClickHouse/issues/33798). [#35079](https://github.com/ClickHouse/ClickHouse/pull/35079) ([qieqieplus](https://github.com/qieqieplus)).
|
||||
* Fix reading from `system.asynchronous_inserts` table if there exists asynchronous insert into table function. [#35050](https://github.com/ClickHouse/ClickHouse/pull/35050) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix possible exception `Reading for MergeTree family tables must be done with last position boundary` (relevant to operation on remote VFS). Closes [#34979](https://github.com/ClickHouse/ClickHouse/issues/34979). [#35001](https://github.com/ClickHouse/ClickHouse/pull/35001) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix unexpected result when use -State type aggregate function in window frame. [#34999](https://github.com/ClickHouse/ClickHouse/pull/34999) ([metahys](https://github.com/metahys)).
|
||||
* Fix possible segfault in FileLog (experimental feature). Closes [#30749](https://github.com/ClickHouse/ClickHouse/issues/30749). [#34996](https://github.com/ClickHouse/ClickHouse/pull/34996) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible rare error `Cannot push block to port which already has data`. [#34993](https://github.com/ClickHouse/ClickHouse/pull/34993) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix wrong schema inference for unquoted dates in CSV. Closes [#34768](https://github.com/ClickHouse/ClickHouse/issues/34768). [#34961](https://github.com/ClickHouse/ClickHouse/pull/34961) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Integration with Hive: Fix unexpected result when use `in` in `where` in hive query. [#34945](https://github.com/ClickHouse/ClickHouse/pull/34945) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Avoid busy polling in ClickHouse Keeper while searching for changelog files to delete. [#34931](https://github.com/ClickHouse/ClickHouse/pull/34931) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix DateTime64 conversion from PostgreSQL. Closes [#33364](https://github.com/ClickHouse/ClickHouse/issues/33364). [#34910](https://github.com/ClickHouse/ClickHouse/pull/34910) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible "Part directory doesn't exist" during `INSERT` into MergeTree table backed by VFS over s3. [#34876](https://github.com/ClickHouse/ClickHouse/pull/34876) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support DDLs like CREATE USER to be executed on cross replicated cluster. [#34860](https://github.com/ClickHouse/ClickHouse/pull/34860) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Fix bugs for multiple columns group by in `WindowView` (experimental feature). [#34859](https://github.com/ClickHouse/ClickHouse/pull/34859) ([vxider](https://github.com/Vxider)).
|
||||
* Fix possible failures in S2 functions when queries contain const columns. [#34745](https://github.com/ClickHouse/ClickHouse/pull/34745) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix bug for H3 funcs containing const columns which cause queries to fail. [#34743](https://github.com/ClickHouse/ClickHouse/pull/34743) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix `No such file or directory` with enabled `fsync_part_directory` and vertical merge. [#34739](https://github.com/ClickHouse/ClickHouse/pull/34739) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix serialization/printing for system queries `RELOAD MODEL`, `RELOAD FUNCTION`, `RESTART DISK` when used `ON CLUSTER`. Closes [#34514](https://github.com/ClickHouse/ClickHouse/issues/34514). [#34696](https://github.com/ClickHouse/ClickHouse/pull/34696) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix `allow_experimental_projection_optimization` with `enable_global_with_statement` (before it may lead to `Stack size too large` error in case of multiple expressions in `WITH` clause, and also it executes scalar subqueries again and again, so not it will be more optimal). [#34650](https://github.com/ClickHouse/ClickHouse/pull/34650) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Stop to select part for mutate when the other replica has already updated the transaction log for `ReplatedMergeTree` engine. [#34633](https://github.com/ClickHouse/ClickHouse/pull/34633) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Fix incorrect result of trivial count query when part movement feature is used [#34089](https://github.com/ClickHouse/ClickHouse/issues/34089). [#34385](https://github.com/ClickHouse/ClickHouse/pull/34385) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
* Fix inconsistency of `max_query_size` limitation in distributed subqueries. [#34078](https://github.com/ClickHouse/ClickHouse/pull/34078) ([Chao Ma](https://github.com/godliness)).
|
||||
|
||||
|
||||
### <a id="222"></a> ClickHouse release v22.2, 2022-02-17
|
||||
|
||||
#### Upgrade Notes
|
||||
|
||||
@ -174,7 +309,7 @@
|
||||
* This PR allows using multiple LDAP storages in the same list of user directories. It worked earlier but was broken because LDAP tests are disabled (they are part of the testflows tests). [#33574](https://github.com/ClickHouse/ClickHouse/pull/33574) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
|
||||
### ClickHouse release v22.1, 2022-01-18
|
||||
### <a id="221"></a> ClickHouse release v22.1, 2022-01-18
|
||||
|
||||
#### Upgrade Notes
|
||||
|
||||
@ -232,7 +367,7 @@
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Now date time conversion functions that generates time before `1970-01-01 00:00:00` will be saturated to zero instead of overflow. [#29953](https://github.com/ClickHouse/ClickHouse/pull/29953) ([Amos Bird](https://github.com/amosbird)). It also fixes a bug in index analysis if date truncation function would yield result before the Unix epoch.
|
||||
* Now date time conversion functions that generates time before `1970-01-01 00:00:00` will be saturated to zero instead of overflow. [#29953](https://github.com/ClickHouse/ClickHouse/pull/29953) ([Amos Bird](https://github.com/amosbird)). It also fixes a bug in index analysis if date truncation function would yield result before the Unix epoch.
|
||||
* Always display resource usage (total CPU usage, total RAM usage and max RAM usage per host) in client. [#33271](https://github.com/ClickHouse/ClickHouse/pull/33271) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve `Bool` type serialization and deserialization, check the range of values. [#32984](https://github.com/ClickHouse/ClickHouse/pull/32984) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* If an invalid setting is defined using the `SET` query or using the query parameters in the HTTP request, error message will contain suggestions that are similar to the invalid setting string (if any exists). [#32946](https://github.com/ClickHouse/ClickHouse/pull/32946) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
@ -222,6 +222,12 @@ else ()
|
||||
set(NO_WHOLE_ARCHIVE --no-whole-archive)
|
||||
endif ()
|
||||
|
||||
option(ENABLE_CURL_BUILD "Enable curl, azure, sentry build on by default except MacOS." ON)
|
||||
if (OS_DARWIN)
|
||||
# Disable the curl, azure, senry build on MacOS
|
||||
set (ENABLE_CURL_BUILD OFF)
|
||||
endif ()
|
||||
|
||||
# Ignored if `lld` is used
|
||||
option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.")
|
||||
|
||||
@ -248,7 +254,9 @@ endif()
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set(USE_DEBUG_HELPERS ON)
|
||||
endif()
|
||||
|
||||
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
|
||||
option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF)
|
||||
|
||||
# Create BuildID when using lld. For other linkers it is created by default.
|
||||
if (LINKER_NAME MATCHES "lld$")
|
||||
@ -259,13 +267,16 @@ endif ()
|
||||
# Add a section with the hash of the compiled machine code for integrity checks.
|
||||
# Only for official builds, because adding a section can be time consuming (rewrite of several GB).
|
||||
# And cross compiled binaries are not supported (since you cannot execute clickhouse hash-binary)
|
||||
if (OBJCOPY_PATH AND YANDEX_OFFICIAL_BUILD AND (NOT CMAKE_TOOLCHAIN_FILE))
|
||||
set (USE_BINARY_HASH 1)
|
||||
if (OBJCOPY_PATH AND CLICKHOUSE_OFFICIAL_BUILD AND (NOT CMAKE_TOOLCHAIN_FILE OR CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64.cmake$"))
|
||||
set (USE_BINARY_HASH 1 CACHE STRING "Calculate binary hash and store it in the separate section")
|
||||
endif ()
|
||||
|
||||
# Allows to build stripped binary in a separate directory
|
||||
if (OBJCOPY_PATH AND READELF_PATH)
|
||||
set(BUILD_STRIPPED_BINARIES_PREFIX "" CACHE STRING "Build stripped binaries with debug info in separate directory")
|
||||
if (OBJCOPY_PATH AND STRIP_PATH)
|
||||
option(INSTALL_STRIPPED_BINARIES "Build stripped binaries with debug info in separate directory" OFF)
|
||||
if (INSTALL_STRIPPED_BINARIES)
|
||||
set(STRIPPED_BINARIES_OUTPUT "stripped" CACHE STRING "A separate directory for stripped information")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) # Not available under freebsd
|
||||
@ -289,14 +300,28 @@ include(cmake/cpu_features.cmake)
|
||||
# Enable it explicitly.
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
|
||||
|
||||
# Reproducible builds
|
||||
# If turned `ON`, remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE().
|
||||
option(ENABLE_BUILD_PATH_MAPPING "Enable remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). It's to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ON)
|
||||
# Reproducible builds.
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set (ENABLE_BUILD_PATH_MAPPING_DEFAULT OFF)
|
||||
else ()
|
||||
set (ENABLE_BUILD_PATH_MAPPING_DEFAULT ON)
|
||||
endif ()
|
||||
|
||||
option (ENABLE_BUILD_PATH_MAPPING "Enable remapping of file source paths in debug info, predefined preprocessor macros, and __builtin_FILE(). It's used to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ${ENABLE_BUILD_PATH_MAPPING_DEFAULT})
|
||||
|
||||
if (ENABLE_BUILD_PATH_MAPPING)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
|
||||
if (ENABLE_BUILD_PROFILING)
|
||||
if (COMPILER_CLANG)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
|
||||
else ()
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Build profiling is only available with CLang")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
|
||||
# CMake < 3.12 doesn't support setting 20 as a C++ standard version.
|
||||
|
@ -15,7 +15,7 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
| 20.x | :x: |
|
||||
| 21.1 | :x: |
|
||||
| 21.2 | :x: |
|
||||
| 21.3 | ✅ |
|
||||
| 21.3 | :x: |
|
||||
| 21.4 | :x: |
|
||||
| 21.5 | :x: |
|
||||
| 21.6 | :x: |
|
||||
@ -23,9 +23,11 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
| 21.8 | ✅ |
|
||||
| 21.9 | :x: |
|
||||
| 21.10 | :x: |
|
||||
| 21.11 | ✅ |
|
||||
| 21.12 | ✅ |
|
||||
| 21.11 | :x: |
|
||||
| 21.12 | :x: |
|
||||
| 22.1 | ✅ |
|
||||
| 22.2 | ✅ |
|
||||
| 22.3 | ✅ |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
|
@ -2,6 +2,7 @@ set (SRCS
|
||||
argsToConfig.cpp
|
||||
coverage.cpp
|
||||
demangle.cpp
|
||||
getAvailableMemoryAmount.cpp
|
||||
getFQDNOrHostName.cpp
|
||||
getMemoryAmount.cpp
|
||||
getPageSize.cpp
|
||||
|
@ -46,9 +46,9 @@ struct StringRef
|
||||
|
||||
constexpr StringRef(const char * data_, size_t size_) : data(data_), size(size_) {}
|
||||
|
||||
StringRef(const std::string & s) : data(s.data()), size(s.size()) {}
|
||||
StringRef(const std::string & s) : data(s.data()), size(s.size()) {} /// NOLINT
|
||||
constexpr explicit StringRef(std::string_view s) : data(s.data()), size(s.size()) {}
|
||||
constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {}
|
||||
constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {} /// NOLINT
|
||||
constexpr StringRef() = default;
|
||||
|
||||
std::string toString() const { return std::string(data, size); }
|
||||
|
44
base/base/getAvailableMemoryAmount.cpp
Normal file
44
base/base/getAvailableMemoryAmount.cpp
Normal file
@ -0,0 +1,44 @@
|
||||
#include <stdexcept>
|
||||
#include <fstream>
|
||||
#include <base/getAvailableMemoryAmount.h>
|
||||
#include <base/getPageSize.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/param.h>
|
||||
#if defined(BSD)
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/vmmeter.h>
|
||||
#endif
|
||||
|
||||
|
||||
uint64_t getAvailableMemoryAmountOrZero()
|
||||
{
|
||||
#if defined(_SC_PHYS_PAGES) // linux
|
||||
return getPageSize() * sysconf(_SC_PHYS_PAGES);
|
||||
#elif defined(__FreeBSD__)
|
||||
struct vmtotal vmt;
|
||||
size_t vmt_size = sizeof(vmt);
|
||||
if (sysctlbyname("vm.vmtotal", &vmt, &vmt_size, NULL, 0) == 0)
|
||||
return getPageSize() * vmt.t_avm;
|
||||
else
|
||||
return 0;
|
||||
#else // darwin
|
||||
unsigned int usermem;
|
||||
size_t len = sizeof(usermem);
|
||||
static int mib[2] = { CTL_HW, HW_USERMEM };
|
||||
if (sysctl(mib, 2, &usermem, &len, nullptr, 0) == 0 && len == sizeof(usermem))
|
||||
return usermem;
|
||||
else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
uint64_t getAvailableMemoryAmount()
|
||||
{
|
||||
auto res = getAvailableMemoryAmountOrZero();
|
||||
if (!res)
|
||||
throw std::runtime_error("Cannot determine available memory amount");
|
||||
return res;
|
||||
}
|
12
base/base/getAvailableMemoryAmount.h
Normal file
12
base/base/getAvailableMemoryAmount.h
Normal file
@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
/** Returns the size of currently available physical memory (RAM) in bytes.
|
||||
* Returns 0 on unsupported platform or if it cannot determine the size of physical memory.
|
||||
*/
|
||||
uint64_t getAvailableMemoryAmountOrZero();
|
||||
|
||||
/** Throws exception if it cannot determine the size of physical memory.
|
||||
*/
|
||||
uint64_t getAvailableMemoryAmount();
|
@ -2,7 +2,7 @@
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
/// This code was based on the code by Fedor Korotkiy (prime@yandex-team.ru) for YT product in Yandex.
|
||||
/// This code was based on the code by Fedor Korotkiy https://www.linkedin.com/in/fedor-korotkiy-659a1838/
|
||||
|
||||
#include <base/defines.h>
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
/// This code was based on the code by Fedor Korotkiy (prime@yandex-team.ru) for YT product in Yandex.
|
||||
/// This code was based on the code by Fedor Korotkiy https://www.linkedin.com/in/fedor-korotkiy-659a1838/
|
||||
|
||||
/** Collects all dl_phdr_info items and caches them in a static array.
|
||||
* Also rewrites dl_iterate_phdr with a lock-free version which consults the above cache
|
||||
|
@ -828,7 +828,6 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
|
||||
/// Setup signal handlers.
|
||||
/// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime.
|
||||
|
||||
addSignalHandler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP, SIGTRAP}, signalHandler, &handled_signals);
|
||||
addSignalHandler({SIGHUP}, closeLogsSignalHandler, &handled_signals);
|
||||
addSignalHandler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler, &handled_signals);
|
||||
|
@ -76,10 +76,10 @@ public:
|
||||
/// return none if daemon doesn't exist, reference to the daemon otherwise
|
||||
static std::optional<std::reference_wrapper<BaseDaemon>> tryGetInstance() { return tryGetInstance<BaseDaemon>(); }
|
||||
|
||||
/// В Graphite компоненты пути(папки) разделяются точкой.
|
||||
/// У нас принят путь формата root_path.hostname_yandex_ru.key
|
||||
/// root_path по умолчанию one_min
|
||||
/// key - лучше группировать по смыслу. Например "meminfo.cached" или "meminfo.free", "meminfo.total"
|
||||
/// Graphite metric name has components separated by dots.
|
||||
/// We used to have the following format: root_path.hostname_clickhouse_com.key
|
||||
/// root_path - one_min by default
|
||||
/// key - something that makes sense. Examples: "meminfo.cached" or "meminfo.free", "meminfo.total".
|
||||
template <class T>
|
||||
void writeToGraphite(const std::string & key, const T & value, const std::string & config_name = DEFAULT_GRAPHITE_CONFIG_NAME, time_t timestamp = 0, const std::string & custom_root_path = "")
|
||||
{
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include "Common/config_version.h"
|
||||
#include <Common/config.h>
|
||||
|
||||
#if USE_SENTRY
|
||||
#if USE_SENTRY && !defined(KEEPER_STANDALONE_BUILD)
|
||||
|
||||
# include <sentry.h>
|
||||
# include <stdio.h>
|
||||
|
@ -51,6 +51,6 @@ if (GLIBC_COMPATIBILITY)
|
||||
|
||||
message (STATUS "Some symbols from glibc will be replaced for compatibility")
|
||||
|
||||
elseif (YANDEX_OFFICIAL_BUILD)
|
||||
elseif (CLICKHOUSE_OFFICIAL_BUILD)
|
||||
message (WARNING "Option GLIBC_COMPATIBILITY must be turned on for production builds.")
|
||||
endif ()
|
||||
|
@ -1,5 +1,13 @@
|
||||
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
|
||||
add_headers_and_sources(loggers .)
|
||||
|
||||
# Standard version depends on DBMS and works with text log
|
||||
add_library(loggers ${loggers_sources} ${loggers_headers})
|
||||
target_compile_definitions(loggers PUBLIC WITH_TEXT_LOG=1)
|
||||
target_link_libraries(loggers PRIVATE dbms clickhouse_common_io)
|
||||
target_include_directories(loggers PUBLIC ..)
|
||||
|
||||
# Lightweight version doesn't work with textlog and also doesn't depend on DBMS
|
||||
add_library(loggers_no_text_log ${loggers_sources} ${loggers_headers})
|
||||
target_link_libraries(loggers_no_text_log PRIVATE clickhouse_common_io)
|
||||
target_include_directories(loggers PUBLIC ..)
|
||||
|
@ -9,7 +9,11 @@
|
||||
#include <Poco/ConsoleChannel.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Net/RemoteSyslogChannel.h>
|
||||
#include <Interpreters/TextLog.h>
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
#include <Interpreters/TextLog.h>
|
||||
#endif
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
@ -30,17 +34,21 @@ static std::string createDirectory(const std::string & file)
|
||||
return path;
|
||||
};
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
void Loggers::setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
|
||||
{
|
||||
text_log = log;
|
||||
text_log_max_priority = max_priority;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Logger & logger /*_root*/, const std::string & cmd_name)
|
||||
{
|
||||
#ifdef WITH_TEXT_LOG
|
||||
if (split)
|
||||
if (auto log = text_log.lock())
|
||||
split->addTextLog(log, text_log_max_priority);
|
||||
#endif
|
||||
|
||||
auto current_logger = config.getString("logger", "");
|
||||
if (config_logger == current_logger) //-V1051
|
||||
@ -189,7 +197,6 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
|
||||
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(color_enabled);
|
||||
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel);
|
||||
logger.warning("Logging " + console_log_level_string + " to console");
|
||||
log->setLevel(console_log_level);
|
||||
split->addChannel(log, "console");
|
||||
}
|
||||
|
@ -7,10 +7,12 @@
|
||||
#include <Poco/Util/Application.h>
|
||||
#include "OwnSplitChannel.h"
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
namespace DB
|
||||
{
|
||||
class TextLog;
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace Poco::Util
|
||||
{
|
||||
@ -27,7 +29,9 @@ public:
|
||||
/// Close log files. On next log write files will be reopened.
|
||||
void closeLogs(Poco::Logger & logger);
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
|
||||
#endif
|
||||
|
||||
private:
|
||||
Poco::AutoPtr<Poco::FileChannel> log_file;
|
||||
@ -37,8 +41,10 @@ private:
|
||||
/// Previous value of logger element in config. It is used to reinitialize loggers whenever the value changed.
|
||||
std::string config_logger;
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
std::weak_ptr<DB::TextLog> text_log;
|
||||
int text_log_max_priority = -1;
|
||||
#endif
|
||||
|
||||
Poco::AutoPtr<DB::OwnSplitChannel> split;
|
||||
};
|
||||
|
@ -20,10 +20,13 @@ namespace DB
|
||||
{
|
||||
void OwnSplitChannel::log(const Poco::Message & msg)
|
||||
{
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
auto logs_queue = CurrentThread::getInternalTextLogsQueue();
|
||||
|
||||
if (channels.empty() && (logs_queue == nullptr || msg.getPriority() > logs_queue->max_priority))
|
||||
return;
|
||||
#endif
|
||||
|
||||
if (auto * masker = SensitiveDataMasker::getInstance())
|
||||
{
|
||||
@ -86,6 +89,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
channel.first->log(msg); // ordinary child
|
||||
}
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
auto logs_queue = CurrentThread::getInternalTextLogsQueue();
|
||||
|
||||
/// Log to "TCP queue" if message is not too noisy
|
||||
@ -137,6 +141,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
if (text_log_locked)
|
||||
text_log_locked->add(elem);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -145,12 +150,14 @@ void OwnSplitChannel::addChannel(Poco::AutoPtr<Poco::Channel> channel, const std
|
||||
channels.emplace(name, ExtendedChannelPtrPair(std::move(channel), dynamic_cast<ExtendedLogChannel *>(channel.get())));
|
||||
}
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
void OwnSplitChannel::addTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(text_log_mutex);
|
||||
text_log = log;
|
||||
text_log_max_priority.store(max_priority, std::memory_order_relaxed);
|
||||
}
|
||||
#endif
|
||||
|
||||
void OwnSplitChannel::setLevel(const std::string & name, int level)
|
||||
{
|
||||
|
@ -7,10 +7,12 @@
|
||||
#include <Poco/Channel.h>
|
||||
#include "ExtendedLogChannel.h"
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
namespace DB
|
||||
{
|
||||
class TextLog;
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -25,7 +27,9 @@ public:
|
||||
/// Adds a child channel
|
||||
void addChannel(Poco::AutoPtr<Poco::Channel> channel, const std::string & name);
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
void addTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
|
||||
#endif
|
||||
|
||||
void setLevel(const std::string & name, int level);
|
||||
|
||||
@ -40,8 +44,10 @@ private:
|
||||
|
||||
std::mutex text_log_mutex;
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
std::weak_ptr<DB::TextLog> text_log;
|
||||
std::atomic<int> text_log_max_priority = -1;
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -4,11 +4,12 @@
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
def parse_block(block=[], options=[]):
|
||||
|
||||
#print('block is here', block)
|
||||
#show_query = False
|
||||
#show_query = options.show_query
|
||||
# print('block is here', block)
|
||||
# show_query = False
|
||||
# show_query = options.show_query
|
||||
result = []
|
||||
query = block[0].strip()
|
||||
if len(block) > 4:
|
||||
@ -20,9 +21,9 @@ def parse_block(block=[], options=[]):
|
||||
timing2 = block[2].strip().split()[1]
|
||||
timing3 = block[3].strip().split()[1]
|
||||
if options.show_queries:
|
||||
result.append( query )
|
||||
result.append(query)
|
||||
if not options.show_first_timings:
|
||||
result += [ timing1 , timing2, timing3 ]
|
||||
result += [timing1, timing2, timing3]
|
||||
else:
|
||||
result.append(timing1)
|
||||
return result
|
||||
@ -37,12 +38,12 @@ def read_stats_file(options, fname):
|
||||
|
||||
for line in f.readlines():
|
||||
|
||||
if 'SELECT' in line:
|
||||
if "SELECT" in line:
|
||||
if len(block) > 1:
|
||||
result.append( parse_block(block, options) )
|
||||
block = [ line ]
|
||||
elif 'Time:' in line:
|
||||
block.append( line )
|
||||
result.append(parse_block(block, options))
|
||||
block = [line]
|
||||
elif "Time:" in line:
|
||||
block.append(line)
|
||||
|
||||
return result
|
||||
|
||||
@ -50,7 +51,7 @@ def read_stats_file(options, fname):
|
||||
def compare_stats_files(options, arguments):
|
||||
result = []
|
||||
file_output = []
|
||||
pyplot_colors = ['y', 'b', 'g', 'r']
|
||||
pyplot_colors = ["y", "b", "g", "r"]
|
||||
for fname in arguments[1:]:
|
||||
file_output.append((read_stats_file(options, fname)))
|
||||
if len(file_output[0]) > 0:
|
||||
@ -58,65 +59,92 @@ def compare_stats_files(options, arguments):
|
||||
for idx, data_set in enumerate(file_output):
|
||||
int_result = []
|
||||
for timing in data_set:
|
||||
int_result.append(float(timing[0])) #y values
|
||||
result.append([[x for x in range(0, len(int_result)) ], int_result,
|
||||
pyplot_colors[idx] + '^' ] )
|
||||
# result.append([x for x in range(1, len(int_result)) ]) #x values
|
||||
# result.append( pyplot_colors[idx] + '^' )
|
||||
int_result.append(float(timing[0])) # y values
|
||||
result.append(
|
||||
[
|
||||
[x for x in range(0, len(int_result))],
|
||||
int_result,
|
||||
pyplot_colors[idx] + "^",
|
||||
]
|
||||
)
|
||||
# result.append([x for x in range(1, len(int_result)) ]) #x values
|
||||
# result.append( pyplot_colors[idx] + '^' )
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def parse_args():
|
||||
from optparse import OptionParser
|
||||
parser = OptionParser(usage='usage: %prog [options] [result_file_path]..')
|
||||
parser.add_option("-q", "--show-queries", help="Show statements along with timings", action="store_true", dest="show_queries")
|
||||
parser.add_option("-f", "--show-first-timings", help="Show only first tries timings", action="store_true", dest="show_first_timings")
|
||||
parser.add_option("-c", "--compare-mode", help="Prepare output for pyplot comparing result files.", action="store", dest="compare_mode")
|
||||
|
||||
parser = OptionParser(usage="usage: %prog [options] [result_file_path]..")
|
||||
parser.add_option(
|
||||
"-q",
|
||||
"--show-queries",
|
||||
help="Show statements along with timings",
|
||||
action="store_true",
|
||||
dest="show_queries",
|
||||
)
|
||||
parser.add_option(
|
||||
"-f",
|
||||
"--show-first-timings",
|
||||
help="Show only first tries timings",
|
||||
action="store_true",
|
||||
dest="show_first_timings",
|
||||
)
|
||||
parser.add_option(
|
||||
"-c",
|
||||
"--compare-mode",
|
||||
help="Prepare output for pyplot comparing result files.",
|
||||
action="store",
|
||||
dest="compare_mode",
|
||||
)
|
||||
(options, arguments) = parser.parse_args(sys.argv)
|
||||
if len(arguments) < 2:
|
||||
parser.print_usage()
|
||||
sys.exit(1)
|
||||
return ( options, arguments )
|
||||
return (options, arguments)
|
||||
|
||||
|
||||
def gen_pyplot_code(options, arguments):
|
||||
result = ''
|
||||
result = ""
|
||||
data_sets = compare_stats_files(options, arguments)
|
||||
for idx, data_set in enumerate(data_sets, start=0):
|
||||
x_values, y_values, line_style = data_set
|
||||
result += '\nplt.plot('
|
||||
result += '%s, %s, \'%s\'' % ( x_values, y_values, line_style )
|
||||
result += ', label=\'%s try\')' % idx
|
||||
print('import matplotlib.pyplot as plt')
|
||||
result += "\nplt.plot("
|
||||
result += "%s, %s, '%s'" % (x_values, y_values, line_style)
|
||||
result += ", label='%s try')" % idx
|
||||
print("import matplotlib.pyplot as plt")
|
||||
print(result)
|
||||
print( 'plt.xlabel(\'Try number\')' )
|
||||
print( 'plt.ylabel(\'Timing\')' )
|
||||
print( 'plt.title(\'Benchmark query timings\')' )
|
||||
print('plt.legend()')
|
||||
print('plt.show()')
|
||||
print("plt.xlabel('Try number')")
|
||||
print("plt.ylabel('Timing')")
|
||||
print("plt.title('Benchmark query timings')")
|
||||
print("plt.legend()")
|
||||
print("plt.show()")
|
||||
|
||||
|
||||
def gen_html_json(options, arguments):
|
||||
tuples = read_stats_file(options, arguments[1])
|
||||
print('{')
|
||||
print("{")
|
||||
print('"system: GreenPlum(x2),')
|
||||
print(('"version": "%s",' % '4.3.9.1'))
|
||||
print(('"version": "%s",' % "4.3.9.1"))
|
||||
print('"data_size": 10000000,')
|
||||
print('"time": "",')
|
||||
print('"comments": "",')
|
||||
print('"result":')
|
||||
print('[')
|
||||
print("[")
|
||||
for s in tuples:
|
||||
print(s)
|
||||
print(']')
|
||||
print('}')
|
||||
print("]")
|
||||
print("}")
|
||||
|
||||
|
||||
def main():
|
||||
( options, arguments ) = parse_args()
|
||||
(options, arguments) = parse_args()
|
||||
if len(arguments) > 2:
|
||||
gen_pyplot_code(options, arguments)
|
||||
else:
|
||||
gen_html_json(options, arguments)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54460)
|
||||
SET(VERSION_REVISION 54461)
|
||||
SET(VERSION_MAJOR 22)
|
||||
SET(VERSION_MINOR 3)
|
||||
SET(VERSION_MINOR 4)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 75366fc95e510b7ac76759ef670702ae5f488a51)
|
||||
SET(VERSION_DESCRIBE v22.3.1.1-testing)
|
||||
SET(VERSION_STRING 22.3.1.1)
|
||||
SET(VERSION_GITHASH 92ab33f560e638d1989c5ca543021ab53d110f5c)
|
||||
SET(VERSION_DESCRIBE v22.4.1.1-testing)
|
||||
SET(VERSION_STRING 22.4.1.1)
|
||||
# end of autochange
|
||||
|
@ -1,25 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
BINARY_PATH=$1
|
||||
BINARY_NAME=$(basename $BINARY_PATH)
|
||||
DESTINATION_STRIPPED_DIR=$2
|
||||
OBJCOPY_PATH=${3:objcopy}
|
||||
READELF_PATH=${4:readelf}
|
||||
|
||||
BUILD_ID=$($READELF_PATH -n $1 | sed -n '/Build ID/ { s/.*: //p; q; }')
|
||||
BUILD_ID_PREFIX=${BUILD_ID:0:2}
|
||||
BUILD_ID_SUFFIX=${BUILD_ID:2}
|
||||
TEMP_BINARY_PATH="${BINARY_PATH}_temp"
|
||||
|
||||
DESTINATION_DEBUG_INFO_DIR="$DESTINATION_STRIPPED_DIR/lib/debug/.build-id"
|
||||
DESTINATION_STRIP_BINARY_DIR="$DESTINATION_STRIPPED_DIR/bin"
|
||||
|
||||
mkdir -p "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX"
|
||||
mkdir -p "$DESTINATION_STRIP_BINARY_DIR"
|
||||
|
||||
$OBJCOPY_PATH --only-keep-debug "$BINARY_PATH" "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX/$BUILD_ID_SUFFIX.debug"
|
||||
|
||||
touch "$TEMP_BINARY_PATH"
|
||||
$OBJCOPY_PATH --add-gnu-debuglink "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX/$BUILD_ID_SUFFIX.debug" "$BINARY_PATH" "$TEMP_BINARY_PATH"
|
||||
$OBJCOPY_PATH --strip-all "$TEMP_BINARY_PATH" "$DESTINATION_STRIP_BINARY_DIR/$BINARY_NAME"
|
||||
rm -f "$TEMP_BINARY_PATH"
|
@ -11,16 +11,43 @@ macro(clickhouse_strip_binary)
|
||||
message(FATAL_ERROR "A binary path name must be provided for stripping binary")
|
||||
endif()
|
||||
|
||||
|
||||
if (NOT DEFINED STRIP_DESTINATION_DIR)
|
||||
message(FATAL_ERROR "Destination directory for stripped binary must be provided")
|
||||
endif()
|
||||
|
||||
add_custom_command(TARGET ${STRIP_TARGET} POST_BUILD
|
||||
COMMAND bash ${ClickHouse_SOURCE_DIR}/cmake/strip.sh ${STRIP_BINARY_PATH} ${STRIP_DESTINATION_DIR} ${OBJCOPY_PATH} ${READELF_PATH}
|
||||
COMMENT "Stripping clickhouse binary" VERBATIM
|
||||
COMMAND mkdir -p "${STRIP_DESTINATION_DIR}/lib/debug/bin"
|
||||
COMMAND mkdir -p "${STRIP_DESTINATION_DIR}/bin"
|
||||
COMMAND cp "${STRIP_BINARY_PATH}" "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}"
|
||||
COMMAND "${OBJCOPY_PATH}" --only-keep-debug --compress-debug-sections "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}" "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug"
|
||||
COMMAND chmod 0644 "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug"
|
||||
COMMAND "${STRIP_PATH}" --remove-section=.comment --remove-section=.note "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}"
|
||||
COMMAND "${OBJCOPY_PATH}" --add-gnu-debuglink "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug" "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}"
|
||||
COMMENT "Stripping clickhouse binary" VERBATIM
|
||||
)
|
||||
|
||||
install(PROGRAMS ${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET} DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
install(DIRECTORY ${STRIP_DESTINATION_DIR}/lib/debug DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse)
|
||||
install(FILES ${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug DESTINATION ${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_INSTALL_FULL_BINDIR}/${STRIP_TARGET}.debug COMPONENT clickhouse)
|
||||
endmacro()
|
||||
|
||||
|
||||
macro(clickhouse_make_empty_debug_info_for_nfpm)
|
||||
set(oneValueArgs TARGET DESTINATION_DIR)
|
||||
cmake_parse_arguments(EMPTY_DEBUG "" "${oneValueArgs}" "" ${ARGN})
|
||||
|
||||
if (NOT DEFINED EMPTY_DEBUG_TARGET)
|
||||
message(FATAL_ERROR "A target name must be provided for stripping binary")
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED EMPTY_DEBUG_DESTINATION_DIR)
|
||||
message(FATAL_ERROR "Destination directory for empty debug must be provided")
|
||||
endif()
|
||||
|
||||
add_custom_command(TARGET ${EMPTY_DEBUG_TARGET} POST_BUILD
|
||||
COMMAND mkdir -p "${EMPTY_DEBUG_DESTINATION_DIR}/lib/debug"
|
||||
COMMAND touch "${EMPTY_DEBUG_DESTINATION_DIR}/lib/debug/${EMPTY_DEBUG_TARGET}.debug"
|
||||
COMMENT "Adding empty debug info for NFPM" VERBATIM
|
||||
)
|
||||
|
||||
install(FILES "${EMPTY_DEBUG_DESTINATION_DIR}/lib/debug/${EMPTY_DEBUG_TARGET}.debug" DESTINATION "${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_INSTALL_FULL_BINDIR}" COMPONENT clickhouse)
|
||||
endmacro()
|
||||
|
@ -170,32 +170,32 @@ else ()
|
||||
message (FATAL_ERROR "Cannot find objcopy.")
|
||||
endif ()
|
||||
|
||||
# Readelf (FIXME copypaste)
|
||||
# Strip (FIXME copypaste)
|
||||
|
||||
if (COMPILER_GCC)
|
||||
find_program (READELF_PATH NAMES "llvm-readelf" "llvm-readelf-13" "llvm-readelf-12" "llvm-readelf-11" "readelf")
|
||||
find_program (STRIP_PATH NAMES "llvm-strip" "llvm-strip-13" "llvm-strip-12" "llvm-strip-11" "strip")
|
||||
else ()
|
||||
find_program (READELF_PATH NAMES "llvm-readelf-${COMPILER_VERSION_MAJOR}" "llvm-readelf" "readelf")
|
||||
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
|
||||
endif ()
|
||||
|
||||
if (NOT READELF_PATH AND OS_DARWIN)
|
||||
if (NOT STRIP_PATH AND OS_DARWIN)
|
||||
find_program (BREW_PATH NAMES "brew")
|
||||
if (BREW_PATH)
|
||||
execute_process (COMMAND ${BREW_PATH} --prefix llvm ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE LLVM_PREFIX)
|
||||
if (LLVM_PREFIX)
|
||||
find_program (READELF_PATH NAMES "llvm-readelf" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH)
|
||||
find_program (STRIP_PATH NAMES "llvm-strip" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH)
|
||||
endif ()
|
||||
if (NOT READELF_PATH)
|
||||
if (NOT STRIP_PATH)
|
||||
execute_process (COMMAND ${BREW_PATH} --prefix binutils ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE BINUTILS_PREFIX)
|
||||
if (BINUTILS_PREFIX)
|
||||
find_program (READELF_PATH NAMES "readelf" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH)
|
||||
find_program (STRIP_PATH NAMES "strip" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH)
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (READELF_PATH)
|
||||
message (STATUS "Using readelf: ${READELF_PATH}")
|
||||
if (STRIP_PATH)
|
||||
message (STATUS "Using strip: ${STRIP_PATH}")
|
||||
else ()
|
||||
message (FATAL_ERROR "Cannot find readelf.")
|
||||
message (FATAL_ERROR "Cannot find strip.")
|
||||
endif ()
|
||||
|
@ -18,6 +18,6 @@ set (VERSION_STRING_SHORT "${VERSION_MAJOR}.${VERSION_MINOR}")
|
||||
|
||||
math (EXPR VERSION_INTEGER "${VERSION_PATCH} + ${VERSION_MINOR}*1000 + ${VERSION_MAJOR}*1000000")
|
||||
|
||||
if(YANDEX_OFFICIAL_BUILD)
|
||||
if(CLICKHOUSE_OFFICIAL_BUILD)
|
||||
set(VERSION_OFFICIAL " (official build)")
|
||||
endif()
|
||||
|
10
contrib/CMakeLists.txt
vendored
10
contrib/CMakeLists.txt
vendored
@ -119,9 +119,13 @@ add_contrib (fastops-cmake fastops)
|
||||
add_contrib (libuv-cmake libuv)
|
||||
add_contrib (amqpcpp-cmake AMQP-CPP) # requires: libuv
|
||||
add_contrib (cassandra-cmake cassandra) # requires: libuv
|
||||
add_contrib (curl-cmake curl)
|
||||
add_contrib (azure-cmake azure)
|
||||
add_contrib (sentry-native-cmake sentry-native) # requires: curl
|
||||
|
||||
if (ENABLE_CURL_BUILD)
|
||||
add_contrib (curl-cmake curl)
|
||||
add_contrib (azure-cmake azure)
|
||||
add_contrib (sentry-native-cmake sentry-native) # requires: curl
|
||||
endif()
|
||||
|
||||
add_contrib (fmtlib-cmake fmtlib)
|
||||
add_contrib (krb5-cmake krb5)
|
||||
add_contrib (cyrus-sasl-cmake cyrus-sasl) # for krb5
|
||||
|
2
contrib/arrow
vendored
2
contrib/arrow
vendored
@ -1 +1 @@
|
||||
Subproject commit 1d9cc51daa4e7e9fc6926320ef73759818bd736e
|
||||
Subproject commit efdcd015cfdee1b6aa349c9ca227ca12c3d697f5
|
@ -69,9 +69,10 @@ endif ()
|
||||
target_compile_options(_avrocpp PRIVATE ${SUPPRESS_WARNINGS})
|
||||
|
||||
# create a symlink to include headers with <avro/...>
|
||||
set(AVRO_INCLUDE_DIR "${CMAKE_CURRENT_BINARY_DIR}/include")
|
||||
ADD_CUSTOM_TARGET(avro_symlink_headers ALL
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${AVROCPP_ROOT_DIR}/include"
|
||||
COMMAND ${CMAKE_COMMAND} -E create_symlink "${AVROCPP_ROOT_DIR}/api" "${AVROCPP_ROOT_DIR}/include/avro"
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${AVRO_INCLUDE_DIR}"
|
||||
COMMAND ${CMAKE_COMMAND} -E create_symlink "${AVROCPP_ROOT_DIR}/api" "${AVRO_INCLUDE_DIR}/avro"
|
||||
)
|
||||
add_dependencies(_avrocpp avro_symlink_headers)
|
||||
target_include_directories(_avrocpp SYSTEM BEFORE PUBLIC "${AVROCPP_ROOT_DIR}/include")
|
||||
target_include_directories(_avrocpp SYSTEM BEFORE PUBLIC "${AVRO_INCLUDE_DIR}")
|
||||
|
@ -27,7 +27,11 @@ target_include_directories (_boost_headers_only SYSTEM BEFORE INTERFACE ${LIBRAR
|
||||
|
||||
# asio
|
||||
|
||||
target_compile_definitions (_boost_headers_only INTERFACE BOOST_ASIO_STANDALONE=1)
|
||||
target_compile_definitions (_boost_headers_only INTERFACE
|
||||
BOOST_ASIO_STANDALONE=1
|
||||
# Avoid using of deprecated in c++ > 17 std::result_of
|
||||
BOOST_ASIO_HAS_STD_INVOKE_RESULT=1
|
||||
)
|
||||
|
||||
# iostreams
|
||||
|
||||
|
2
contrib/curl
vendored
2
contrib/curl
vendored
@ -1 +1 @@
|
||||
Subproject commit 3b8bbbbd1609c638a3d3d0acb148a33dedb67be3
|
||||
Subproject commit 801bd5138ce31aa0d906fa4e2eabfc599d74e793
|
@ -32,7 +32,6 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/transfer.c"
|
||||
"${LIBRARY_DIR}/lib/strcase.c"
|
||||
"${LIBRARY_DIR}/lib/easy.c"
|
||||
"${LIBRARY_DIR}/lib/security.c"
|
||||
"${LIBRARY_DIR}/lib/curl_fnmatch.c"
|
||||
"${LIBRARY_DIR}/lib/fileinfo.c"
|
||||
"${LIBRARY_DIR}/lib/wildcard.c"
|
||||
@ -115,6 +114,12 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/curl_get_line.c"
|
||||
"${LIBRARY_DIR}/lib/altsvc.c"
|
||||
"${LIBRARY_DIR}/lib/socketpair.c"
|
||||
"${LIBRARY_DIR}/lib/bufref.c"
|
||||
"${LIBRARY_DIR}/lib/dynbuf.c"
|
||||
"${LIBRARY_DIR}/lib/hsts.c"
|
||||
"${LIBRARY_DIR}/lib/http_aws_sigv4.c"
|
||||
"${LIBRARY_DIR}/lib/mqtt.c"
|
||||
"${LIBRARY_DIR}/lib/rename.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/vauth.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/cleartext.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/cram.c"
|
||||
@ -131,8 +136,6 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/vtls/gtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/vtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/nss.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/polarssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/polarssl_threadlock.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/wolfssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/schannel.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/schannel_verify.c"
|
||||
@ -141,6 +144,7 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/vtls/mbedtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/mesalink.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/bearssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/keylog.c"
|
||||
"${LIBRARY_DIR}/lib/vquic/ngtcp2.c"
|
||||
"${LIBRARY_DIR}/lib/vquic/quiche.c"
|
||||
"${LIBRARY_DIR}/lib/vssh/libssh2.c"
|
||||
|
2
contrib/hyperscan
vendored
2
contrib/hyperscan
vendored
@ -1 +1 @@
|
||||
Subproject commit e9f08df0213fc637aac0a5bbde9beeaeba2fe9fa
|
||||
Subproject commit 5edc68c5ac68d2d4f876159e9ee84def6d3dc87c
|
@ -4,12 +4,21 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wredundant-decls"
|
||||
#endif
|
||||
|
||||
#include <jemalloc/jemalloc_defs.h>
|
||||
#include <jemalloc/jemalloc_rename.h>
|
||||
#include <jemalloc/jemalloc_macros.h>
|
||||
#include <jemalloc/jemalloc_protos.h>
|
||||
#include <jemalloc/jemalloc_typedefs.h>
|
||||
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -1,4 +1,4 @@
|
||||
set (ENABLE_KRB5_DEFAULT 1)
|
||||
set (ENABLE_KRB5_DEFAULT ${ENABLE_LIBRARIES})
|
||||
if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT (CMAKE_SYSTEM_NAME MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING))
|
||||
message (WARNING "krb5 disabled in non-Linux and non-native-Darwin environments")
|
||||
set (ENABLE_KRB5_DEFAULT 0)
|
||||
@ -16,6 +16,7 @@ if(NOT AWK_PROGRAM)
|
||||
endif()
|
||||
|
||||
set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src")
|
||||
set(KRB5_ET_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/include_private")
|
||||
|
||||
set(ALL_SRCS
|
||||
"${KRB5_SOURCE_DIR}/util/et/et_name.c"
|
||||
@ -90,7 +91,6 @@ set(ALL_SRCS
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c"
|
||||
@ -143,11 +143,12 @@ set(ALL_SRCS
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/gssapi/generic/gssapi_err_generic.c"
|
||||
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c"
|
||||
@ -256,8 +257,8 @@ set(ALL_SRCS
|
||||
"${KRB5_SOURCE_DIR}/util/profile/prof_parse.c"
|
||||
"${KRB5_SOURCE_DIR}/util/profile/prof_get.c"
|
||||
"${KRB5_SOURCE_DIR}/util/profile/prof_set.c"
|
||||
"${KRB5_SOURCE_DIR}/util/profile/prof_err.c"
|
||||
"${KRB5_SOURCE_DIR}/util/profile/prof_init.c"
|
||||
"${KRB5_ET_BIN_DIR}/util/profile/prof_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c"
|
||||
@ -450,13 +451,12 @@ set(ALL_SRCS
|
||||
|
||||
|
||||
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c"
|
||||
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.c"
|
||||
|
||||
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c"
|
||||
@ -473,7 +473,7 @@ set(ALL_SRCS
|
||||
)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${KRB5_SOURCE_DIR}/util/et/compile_et"
|
||||
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/compile_et"
|
||||
COMMAND /bin/sh
|
||||
./config_script
|
||||
./compile_et.sh
|
||||
@ -481,50 +481,17 @@ add_custom_command(
|
||||
${AWK_PROGRAM}
|
||||
sed
|
||||
>
|
||||
compile_et
|
||||
${CMAKE_CURRENT_BINARY_DIR}/compile_et
|
||||
DEPENDS "${KRB5_SOURCE_DIR}/util/et/compile_et.sh" "${KRB5_SOURCE_DIR}/util/et/config_script"
|
||||
WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/util/et"
|
||||
)
|
||||
|
||||
file(GLOB_RECURSE ET_FILES
|
||||
"${KRB5_SOURCE_DIR}/*.et"
|
||||
)
|
||||
|
||||
function(preprocess_et out_var)
|
||||
set(result)
|
||||
foreach(in_f ${ARGN})
|
||||
string(REPLACE
|
||||
.et
|
||||
.c
|
||||
F_C
|
||||
${in_f}
|
||||
)
|
||||
string(REPLACE
|
||||
.et
|
||||
.h
|
||||
F_H
|
||||
${in_f}
|
||||
)
|
||||
|
||||
get_filename_component(ET_PATH ${in_f} DIRECTORY)
|
||||
|
||||
add_custom_command(OUTPUT ${F_C} ${F_H}
|
||||
COMMAND perl "${KRB5_SOURCE_DIR}/util/et/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${in_f}
|
||||
DEPENDS ${in_f} "${KRB5_SOURCE_DIR}/util/et/compile_et"
|
||||
WORKING_DIRECTORY ${ET_PATH}
|
||||
VERBATIM
|
||||
)
|
||||
list(APPEND result ${F_C})
|
||||
endforeach()
|
||||
set(${out_var} "${result}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h"
|
||||
OUTPUT "${KRB5_ET_BIN_DIR}/error_map.h"
|
||||
COMMAND perl
|
||||
-I../../../util
|
||||
../../../util/gen-map.pl
|
||||
-oerror_map.h
|
||||
-o${KRB5_ET_BIN_DIR}/error_map.h
|
||||
NAME=gsserrmap
|
||||
KEY=OM_uint32
|
||||
VALUE=char*
|
||||
@ -536,22 +503,21 @@ add_custom_command(
|
||||
|
||||
add_custom_target(
|
||||
ERROR_MAP_H
|
||||
DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h"
|
||||
DEPENDS "${KRB5_ET_BIN_DIR}/error_map.h"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h"
|
||||
COMMAND perl -w -I../../../util ../../../util/gen.pl bimap errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp
|
||||
OUTPUT "${KRB5_ET_BIN_DIR}/errmap.h"
|
||||
COMMAND perl -w -I../../../util ../../../util/gen.pl bimap ${KRB5_ET_BIN_DIR}/errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp
|
||||
WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/lib/gssapi/generic"
|
||||
)
|
||||
|
||||
add_custom_target(
|
||||
ERRMAP_H
|
||||
DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h"
|
||||
DEPENDS "${KRB5_ET_BIN_DIR}/errmap.h"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
add_custom_target(
|
||||
KRB_5_H
|
||||
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h"
|
||||
@ -567,7 +533,40 @@ add_dependencies(
|
||||
KRB_5_H
|
||||
)
|
||||
|
||||
preprocess_et(processed_et_files ${ET_FILES})
|
||||
#
|
||||
# Generate error tables
|
||||
#
|
||||
function(preprocess_et et_path)
|
||||
string(REPLACE .et .c F_C ${et_path})
|
||||
string(REPLACE .et .h F_H ${et_path})
|
||||
get_filename_component(et_dir ${et_path} DIRECTORY)
|
||||
get_filename_component(et_name ${et_path} NAME_WLE)
|
||||
|
||||
add_custom_command(OUTPUT ${F_C} ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h
|
||||
COMMAND perl "${CMAKE_CURRENT_BINARY_DIR}/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${et_path}
|
||||
# for #include w/o path (via -iquote)
|
||||
COMMAND ${CMAKE_COMMAND} -E create_symlink ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h
|
||||
DEPENDS ${et_path} "${CMAKE_CURRENT_BINARY_DIR}/compile_et"
|
||||
WORKING_DIRECTORY ${et_dir}
|
||||
VERBATIM
|
||||
)
|
||||
endfunction()
|
||||
|
||||
function(generate_error_tables)
|
||||
file(GLOB_RECURSE ET_FILES "${KRB5_SOURCE_DIR}/*.et")
|
||||
foreach(et_path ${ET_FILES})
|
||||
string(REPLACE ${KRB5_SOURCE_DIR} ${KRB5_ET_BIN_DIR} et_bin_path ${et_path})
|
||||
string(REPLACE / _ et_target_name ${et_path})
|
||||
get_filename_component(et_bin_dir ${et_bin_path} DIRECTORY)
|
||||
add_custom_command(OUTPUT ${et_bin_path}
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${et_bin_dir}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${et_path} ${et_bin_path}
|
||||
VERBATIM
|
||||
)
|
||||
preprocess_et(${et_bin_path})
|
||||
endforeach()
|
||||
endfunction()
|
||||
generate_error_tables()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
add_custom_command(
|
||||
@ -634,12 +633,12 @@ file(MAKE_DIRECTORY
|
||||
|
||||
SET(KRBHDEP
|
||||
"${KRB5_SOURCE_DIR}/include/krb5/krb5.hin"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.h"
|
||||
)
|
||||
|
||||
# cmake < 3.18 does not have 'cat' command
|
||||
@ -656,6 +655,11 @@ target_include_directories(_krb5 SYSTEM BEFORE PUBLIC
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include"
|
||||
)
|
||||
|
||||
target_compile_options(_krb5 PRIVATE
|
||||
# For '#include "file.h"'
|
||||
-iquote "${CMAKE_CURRENT_BINARY_DIR}/include_private"
|
||||
)
|
||||
|
||||
target_include_directories(_krb5 PRIVATE
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include_private" # For autoconf.h and other generated headers.
|
||||
${KRB5_SOURCE_DIR}
|
||||
|
2
contrib/libcxx
vendored
2
contrib/libcxx
vendored
@ -1 +1 @@
|
||||
Subproject commit 61e60294b1de01483caa9f5d00f437c99b674de6
|
||||
Subproject commit 172b2ae074f6755145b91c53a95c8540c1468239
|
@ -18,12 +18,14 @@ set(SRCS
|
||||
"${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/format.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/functional.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/future.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/hash.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/ios.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/iostream.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/legacy_pointer_safety.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/locale.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/memory.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/mutex.cpp"
|
||||
@ -33,6 +35,9 @@ set(SRCS
|
||||
"${LIBCXX_SOURCE_DIR}/src/random.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/random_shuffle.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/regex.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/ryu/d2fixed.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/ryu/d2s.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/ryu/f2s.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/string.cpp"
|
||||
@ -49,7 +54,9 @@ set(SRCS
|
||||
add_library(cxx ${SRCS})
|
||||
set_target_properties(cxx PROPERTIES FOLDER "contrib/libcxx-cmake")
|
||||
|
||||
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>)
|
||||
target_include_directories(cxx SYSTEM BEFORE PUBLIC
|
||||
$<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>
|
||||
$<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}>/src)
|
||||
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||
|
||||
# Enable capturing stack traces for all exceptions.
|
||||
|
2
contrib/libcxxabi
vendored
2
contrib/libcxxabi
vendored
@ -1 +1 @@
|
||||
Subproject commit df8f1e727dbc9e2bedf2282096fa189dc3fe0076
|
||||
Subproject commit 6eb7cc7a7bdd779e6734d1b9fb451df2274462d7
|
@ -1,24 +1,24 @@
|
||||
set(LIBCXXABI_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxxabi")
|
||||
|
||||
set(SRCS
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_virtual.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/fallback_malloc.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_default_handlers.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/stdlib_exception.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/abort_message.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_aux_runtime.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_default_handlers.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_demangle.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_exception.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/private_typeinfo.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/stdlib_typeinfo.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_aux_runtime.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_virtual.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/fallback_malloc.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/private_typeinfo.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/stdlib_exception.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/stdlib_new_delete.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/stdlib_typeinfo.cpp"
|
||||
)
|
||||
|
||||
add_library(cxxabi ${SRCS})
|
||||
@ -30,6 +30,7 @@ target_compile_options(cxxabi PRIVATE -w)
|
||||
target_include_directories(cxxabi SYSTEM BEFORE
|
||||
PUBLIC $<BUILD_INTERFACE:${LIBCXXABI_SOURCE_DIR}/include>
|
||||
PRIVATE $<BUILD_INTERFACE:${LIBCXXABI_SOURCE_DIR}/../libcxx/include>
|
||||
PRIVATE $<BUILD_INTERFACE:${LIBCXXABI_SOURCE_DIR}/../libcxx/src>
|
||||
)
|
||||
target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
||||
target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
||||
|
2
contrib/libxml2
vendored
2
contrib/libxml2
vendored
@ -1 +1 @@
|
||||
Subproject commit 18890f471c420411aa3c989e104d090966ec9dbf
|
||||
Subproject commit a075d256fd9ff15590b86d981b75a50ead124fca
|
@ -1,12 +1,9 @@
|
||||
# During cross-compilation in our CI we have to use llvm-tblgen and other building tools
|
||||
# tools to be build for host architecture and everything else for target architecture (e.g. AArch64)
|
||||
# Possible workaround is to use llvm-tblgen from some package...
|
||||
# But lets just enable LLVM for native builds
|
||||
if (CMAKE_CROSSCOMPILING OR SANITIZE STREQUAL "undefined")
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||
else()
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 520a90e02e3e5cb90afeae1846d161dbc508a6f1
|
||||
Subproject commit 008b16469471d55b176db181756c94e3f14dd2dc
|
2
contrib/replxx
vendored
2
contrib/replxx
vendored
@ -1 +1 @@
|
||||
Subproject commit 9460e5e0fc10f78f460af26a6bd928798cac864d
|
||||
Subproject commit 3fd0e3c9364a589447453d9906d854ebd8d385c5
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit bbcac834526d90d1e764164b861be426891d1743
|
||||
Subproject commit e9fb375d0a1e5ebfd74c043f088f2342552103f8
|
2
contrib/unixodbc
vendored
2
contrib/unixodbc
vendored
@ -1 +1 @@
|
||||
Subproject commit b0ad30f7f6289c12b76f04bfb9d466374bb32168
|
||||
Subproject commit a2cd5395e8c7f7390025ec93af5bfebef3fb5fcd
|
18
debian/.gitignore
vendored
18
debian/.gitignore
vendored
@ -1,18 +0,0 @@
|
||||
control
|
||||
copyright
|
||||
tmp/
|
||||
clickhouse-benchmark/
|
||||
clickhouse-client.docs
|
||||
clickhouse-client/
|
||||
clickhouse-common-static-dbg/
|
||||
clickhouse-common-static.docs
|
||||
clickhouse-common-static/
|
||||
clickhouse-server-base/
|
||||
clickhouse-server-common/
|
||||
clickhouse-server/
|
||||
debhelper-build-stamp
|
||||
files
|
||||
*.debhelper.log
|
||||
*.debhelper
|
||||
*.substvars
|
||||
|
223
debian/.pbuilderrc
vendored
223
debian/.pbuilderrc
vendored
@ -1,223 +0,0 @@
|
||||
#
|
||||
# sudo apt install pbuilder fakeroot debhelper debian-archive-keyring debian-keyring
|
||||
#
|
||||
# ubuntu:
|
||||
# prepare old (trusty or earlier) host system:
|
||||
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/eoan
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/disco
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/cosmic
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/artful
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/bionic
|
||||
# sudo ln -s sid /usr/share/debootstrap/scripts/buster
|
||||
# build ubuntu:
|
||||
# sudo DIST=bionic pbuilder create --configfile debian/.pbuilderrc && DIST=bionic pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=cosmic pbuilder create --configfile debian/.pbuilderrc && DIST=cosmic pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=disco pbuilder create --configfile debian/.pbuilderrc && DIST=disco pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=eoan pbuilder create --configfile debian/.pbuilderrc && DIST=eoan pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=devel pbuilder create --configfile debian/.pbuilderrc && DIST=devel pdebuild --configfile debian/.pbuilderrc
|
||||
# build debian:
|
||||
# sudo DIST=stable pbuilder create --configfile debian/.pbuilderrc && DIST=stable pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=testing pbuilder create --configfile debian/.pbuilderrc && DIST=testing pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=unstable pbuilder create --configfile debian/.pbuilderrc && DIST=unstable pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=experimental pbuilder create --configfile debian/.pbuilderrc && DIST=experimental pdebuild --configfile debian/.pbuilderrc
|
||||
# build i386 experimental:
|
||||
# sudo DIST=trusty ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=trusty ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=xenial ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=xenial ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=zesty ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=zesty ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=artful ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=artful ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=bionic ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=bionic ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=stable ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=stable ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=testing ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=testing ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=experimental ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=experimental ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# test gcc-9
|
||||
# env DEB_CC=gcc-9 DEB_CXX=g++-9 EXTRAPACKAGES="g++-9 gcc-9" DIST=disco pdebuild --configfile debian/.pbuilderrc
|
||||
# use only clang:
|
||||
# env DEB_CC=clang-8 DEB_CXX=clang++-8 EXTRAPACKAGES=clang-8 DIST=disco pdebuild --configfile debian/.pbuilderrc
|
||||
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES=clang-5.0 DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
# clang+asan:
|
||||
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES="clang-5.0 libc++abi-dev libc++-dev" CMAKE_FLAGS="-DENABLE_TCMALLOC=0 -DENABLE_UNWIND=0 -DCMAKE_BUILD_TYPE=Asan" DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
# clang+tsan:
|
||||
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES="clang-5.0 libc++abi-dev libc++-dev" CMAKE_FLAGS="-DCMAKE_BUILD_TYPE=Tsan" DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
# without sse for old systems and some VM:
|
||||
# env DH_VERBOSE=1 CMAKE_FLAGS="-DHAVE_SSE41=0 -DHAVE_SSE42=0 -DHAVE_POPCNT=0 -DHAVE_SSE2_INTRIN=0 -DSSE2FLAG=' ' -DHAVE_SSE42_INTRIN=0 -DSSE4FLAG=' ' -DHAVE_PCLMULQDQ_INTRIN=0 -DPCLMULFLAG=' '" DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
|
||||
# Note: on trusty host creating some future dists can fail (debootstrap error).
|
||||
|
||||
# Your packages built here: /var/cache/pbuilder/*-*/result
|
||||
|
||||
# from https://wiki.debian.org/PbuilderTricks :
|
||||
|
||||
# Codenames for Debian suites according to their alias. Update these when
|
||||
# needed.
|
||||
UNSTABLE_CODENAME="sid"
|
||||
TESTING_CODENAME="buster"
|
||||
STABLE_CODENAME="stretch"
|
||||
STABLE_BACKPORTS_SUITE="$STABLE_CODENAME-backports"
|
||||
|
||||
# List of Debian suites.
|
||||
DEBIAN_SUITES=($UNSTABLE_CODENAME $TESTING_CODENAME $STABLE_CODENAME $STABLE_BACKPORTS_SUITE
|
||||
"experimental" "unstable" "testing" "stable")
|
||||
|
||||
# List of Ubuntu suites. Update these when needed.
|
||||
UBUNTU_SUITES=("eoan" "disco" "cosmic" "bionic" "artful" "zesty" "xenial" "trusty" "devel")
|
||||
|
||||
# Set a default distribution if none is used. Note that you can set your own default (i.e. ${DIST:="unstable"}).
|
||||
HOST_DIST=`lsb_release --short --codename`
|
||||
: ${DIST:="$HOST_DIST"}
|
||||
|
||||
# Optionally change Debian codenames in $DIST to their aliases.
|
||||
case "$DIST" in
|
||||
$UNSTABLE_CODENAME)
|
||||
DIST="unstable"
|
||||
;;
|
||||
$TESTING_CODENAME)
|
||||
DIST="testing"
|
||||
;;
|
||||
$STABLE_CODENAME)
|
||||
DIST="stable"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Optionally set the architecture to the host architecture if none set. Note
|
||||
# that you can set your own default (i.e. ${ARCH:="i386"}).
|
||||
: ${ARCH:="$(dpkg --print-architecture)"}
|
||||
|
||||
NAME="$DIST"
|
||||
if [ -n "${ARCH}" ]; then
|
||||
NAME="$NAME-$ARCH"
|
||||
DEBOOTSTRAPOPTS=("--arch" "$ARCH" "${DEBOOTSTRAPOPTS[@]}")
|
||||
fi
|
||||
|
||||
BASETGZ=${SET_BASETGZ}
|
||||
BASETGZ=${BASETGZ:="/var/cache/pbuilder/$NAME-base.tgz"}
|
||||
DISTRIBUTION="$DIST"
|
||||
BUILDRESULT=${SET_BUILDRESULT}
|
||||
BUILDRESULT=${BUILDRESULT:="/var/cache/pbuilder/$NAME/result/"}
|
||||
APTCACHE="/var/cache/pbuilder/$NAME/aptcache/"
|
||||
BUILDPLACE="/var/cache/pbuilder/build/"
|
||||
ALLOWUNTRUSTED=${SET_ALLOWUNTRUSTED:=${ALLOWUNTRUSTED}}
|
||||
|
||||
#DEBOOTSTRAPOPTS=( '--variant=buildd' $SET_DEBOOTSTRAPOPTS )
|
||||
|
||||
|
||||
if $(echo ${DEBIAN_SUITES[@]} | grep -q $DIST); then
|
||||
# Debian configuration
|
||||
OSNAME=debian
|
||||
MIRRORSITE=${SET_MIRRORSITE="http://deb.debian.org/$OSNAME/"}
|
||||
COMPONENTS="main contrib non-free"
|
||||
if $(echo "$STABLE_CODENAME stable" | grep -q $DIST); then
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $STABLE_BACKPORTS_SUITE $COMPONENTS"
|
||||
fi
|
||||
# APTKEYRINGS=/usr/share/keyrings/debian-archive-keyring.gpg
|
||||
|
||||
case "$HOST_DIST" in
|
||||
"trusty" )
|
||||
DEBOOTSTRAPOPTS+=( '--no-check-gpg' )
|
||||
;;
|
||||
*)
|
||||
DEBOOTSTRAPOPTS+=( '--keyring' '/usr/share/keyrings/debian-archive-keyring.gpg' )
|
||||
# DEBOOTSTRAPOPTS+=( '--keyring' '/usr/share/keyrings/debian-keyring.gpg' )
|
||||
esac
|
||||
elif $(echo ${UBUNTU_SUITES[@]} | grep -q $DIST); then
|
||||
# Ubuntu configuration
|
||||
OSNAME=ubuntu
|
||||
|
||||
if [[ "$ARCH" == "amd64" || "$ARCH" == "i386" ]]; then
|
||||
MIRRORSITE=${SET_MIRRORSITE="http://archive.ubuntu.com/$OSNAME/"}
|
||||
else
|
||||
MIRRORSITE=${SET_MIRRORSITE="http://ports.ubuntu.com/ubuntu-ports/"}
|
||||
fi
|
||||
|
||||
COMPONENTS="main restricted universe multiverse"
|
||||
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-updates main restricted universe multiverse"
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-security main restricted universe multiverse"
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-proposed main restricted universe multiverse"
|
||||
|
||||
case "$DIST" in
|
||||
"trusty" | "xenial" )
|
||||
OTHERMIRROR="$OTHERMIRROR | deb http://ppa.launchpad.net/ubuntu-toolchain-r/test/$OSNAME $DIST main"
|
||||
ALLOWUNTRUSTED=yes
|
||||
;;
|
||||
esac
|
||||
|
||||
# deb http://apt.llvm.org/zesty/ llvm-toolchain-zesty-5.0 main
|
||||
else
|
||||
echo "Unknown distribution: $DIST"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "using $NAME $OSNAME $DIST $ARCH $LOGNAME $MIRRORSITE"
|
||||
|
||||
case "$DIST" in
|
||||
"trusty")
|
||||
# ccache broken
|
||||
;;
|
||||
*)
|
||||
CCACHEDIR=${SET_CCACHEDIR:="/var/cache/pbuilder/ccache"}
|
||||
;;
|
||||
esac
|
||||
|
||||
# old systems with default gcc <= 6
|
||||
case "$DIST" in
|
||||
"trusty" | "xenial" | "stable" )
|
||||
export DEB_CC=gcc-7
|
||||
export DEB_CXX=g++-7
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$ARCH" != arm64 ]; then
|
||||
case "$DIST" in
|
||||
# TODO: fix llvm-8 and use for "disco" and "eoan"
|
||||
"experimental")
|
||||
EXTRAPACKAGES+=" liblld-8-dev libclang-8-dev llvm-8-dev liblld-8 "
|
||||
export CMAKE_FLAGS="-DLLVM_VERSION=8 $CMAKE_FLAGS"
|
||||
;;
|
||||
"eoan" | "disco" | "cosmic" | "testing" | "unstable")
|
||||
EXTRAPACKAGES+=" liblld-7-dev libclang-7-dev llvm-7-dev liblld-7 "
|
||||
export CMAKE_FLAGS="-DLLVM_VERSION=7 $CMAKE_FLAGS"
|
||||
;;
|
||||
"bionic")
|
||||
EXTRAPACKAGES+=" liblld-6.0-dev libclang-6.0-dev liblld-6.0 "
|
||||
export CMAKE_FLAGS="-DLLVM_VERSION=6 $CMAKE_FLAGS"
|
||||
;;
|
||||
"artful" )
|
||||
EXTRAPACKAGES+=" liblld-5.0-dev libclang-5.0-dev liblld-5.0 "
|
||||
;;
|
||||
esac
|
||||
else
|
||||
export CMAKE_FLAGS="-DENABLE_EMBEDDED_COMPILER=0 $CMAKE_FLAGS"
|
||||
fi
|
||||
|
||||
# Will test symbols
|
||||
#EXTRAPACKAGES+=" gdb "
|
||||
|
||||
# For killall in pbuilder-hooks:
|
||||
EXTRAPACKAGES+=" psmisc "
|
||||
|
||||
[[ $CCACHE_PREFIX == 'distcc' ]] && EXTRAPACKAGES+=" $CCACHE_PREFIX " && USENETWORK=yes && export DISTCC_DIR=/var/cache/pbuilder/distcc
|
||||
|
||||
[[ $ARCH == 'i386' ]] && EXTRAPACKAGES+=" libssl-dev "
|
||||
|
||||
export DEB_BUILD_OPTIONS=parallel=`nproc`
|
||||
|
||||
# Floating bug with permissions:
|
||||
[ -n "$CCACHEDIR" ] && sudo mkdir -p $CCACHEDIR
|
||||
[ -n "$CCACHEDIR" ] && sudo chmod -R a+rwx $CCACHEDIR || true
|
||||
# chown -R $BUILDUSERID:$BUILDUSERID $CCACHEDIR
|
||||
|
||||
|
||||
# Do not create source package inside pbuilder (-b)
|
||||
# Use current dir to make package (by default should have src archive)
|
||||
# echo "3.0 (native)" > debian/source/format
|
||||
# OR
|
||||
# pdebuild -b --debbuildopts "--source-option=--format=\"3.0 (native)\""
|
||||
# OR
|
||||
DEBBUILDOPTS="-b --source-option=--format=\"3.0 (native)\""
|
||||
|
||||
HOOKDIR="debian/pbuilder-hooks"
|
||||
|
||||
#echo "DEBOOTSTRAPOPTS=${DEBOOTSTRAPOPTS[@]}"
|
||||
#echo "ALLOWUNTRUSTED=${ALLOWUNTRUSTED} OTHERMIRROR=${OTHERMIRROR}"
|
||||
#echo "EXTRAPACKAGES=${EXTRAPACKAGES}"
|
5
debian/changelog
vendored
5
debian/changelog
vendored
@ -1,5 +0,0 @@
|
||||
clickhouse (22.1.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 09 Dec 2021 00:32:58 +0300
|
5
debian/changelog.in
vendored
5
debian/changelog.in
vendored
@ -1,5 +0,0 @@
|
||||
clickhouse (@VERSION_STRING@) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- @AUTHOR@ <@EMAIL@> @DATE@
|
7
debian/clickhouse-client.install
vendored
7
debian/clickhouse-client.install
vendored
@ -1,7 +0,0 @@
|
||||
usr/bin/clickhouse-client
|
||||
usr/bin/clickhouse-local
|
||||
usr/bin/clickhouse-compressor
|
||||
usr/bin/clickhouse-benchmark
|
||||
usr/bin/clickhouse-format
|
||||
usr/bin/clickhouse-obfuscator
|
||||
etc/clickhouse-client/config.xml
|
5
debian/clickhouse-common-static.install
vendored
5
debian/clickhouse-common-static.install
vendored
@ -1,5 +0,0 @@
|
||||
usr/bin/clickhouse
|
||||
usr/bin/clickhouse-odbc-bridge
|
||||
usr/bin/clickhouse-library-bridge
|
||||
usr/bin/clickhouse-extract-from-config
|
||||
usr/share/bash-completion/completions
|
1
debian/clickhouse-server.cron.d
vendored
1
debian/clickhouse-server.cron.d
vendored
@ -1 +0,0 @@
|
||||
#*/10 * * * * root ((which service > /dev/null 2>&1 && (service clickhouse-server condstart ||:)) || /etc/init.d/clickhouse-server condstart) > /dev/null 2>&1
|
4
debian/clickhouse-server.docs
vendored
4
debian/clickhouse-server.docs
vendored
@ -1,4 +0,0 @@
|
||||
LICENSE
|
||||
AUTHORS
|
||||
README.md
|
||||
CHANGELOG.md
|
6
debian/clickhouse-server.install
vendored
6
debian/clickhouse-server.install
vendored
@ -1,6 +0,0 @@
|
||||
usr/bin/clickhouse-server
|
||||
usr/bin/clickhouse-copier
|
||||
usr/bin/clickhouse-report
|
||||
etc/clickhouse-server/config.xml
|
||||
etc/clickhouse-server/users.xml
|
||||
etc/systemd/system/clickhouse-server.service
|
58
debian/control
vendored
58
debian/control
vendored
@ -1,58 +0,0 @@
|
||||
Source: clickhouse
|
||||
Section: database
|
||||
Priority: optional
|
||||
Maintainer: Alexey Milovidov <milovidov@clickhouse.com>
|
||||
Build-Depends: debhelper (>= 9),
|
||||
cmake | cmake3,
|
||||
ninja-build,
|
||||
clang-13,
|
||||
llvm-13,
|
||||
lld-13,
|
||||
libc6-dev,
|
||||
tzdata
|
||||
Standards-Version: 3.9.8
|
||||
|
||||
Package: clickhouse-client
|
||||
Architecture: all
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version})
|
||||
Replaces: clickhouse-compressor
|
||||
Conflicts: clickhouse-compressor
|
||||
Description: Client binary for ClickHouse
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
.
|
||||
This package provides clickhouse-client , clickhouse-local and clickhouse-benchmark
|
||||
|
||||
Package: clickhouse-common-static
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Suggests: clickhouse-common-static-dbg
|
||||
Replaces: clickhouse-common, clickhouse-server-base
|
||||
Provides: clickhouse-common, clickhouse-server-base
|
||||
Description: Common files for ClickHouse
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
.
|
||||
This package provides common files for both clickhouse server and client
|
||||
|
||||
Package: clickhouse-server
|
||||
Architecture: all
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version}), adduser
|
||||
Recommends: libcap2-bin
|
||||
Replaces: clickhouse-server-common, clickhouse-server-base
|
||||
Provides: clickhouse-server-common
|
||||
Description: Server binary for ClickHouse
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
.
|
||||
This package provides clickhouse common configuration files
|
||||
|
||||
Package: clickhouse-common-static-dbg
|
||||
Architecture: any
|
||||
Section: debug
|
||||
Priority: optional
|
||||
Depends: ${misc:Depends}
|
||||
Replaces: clickhouse-common-dbg
|
||||
Conflicts: clickhouse-common-dbg
|
||||
Description: debugging symbols for clickhouse-common-static
|
||||
This package contains the debugging symbols for clickhouse-common.
|
132
debian/rules
vendored
132
debian/rules
vendored
@ -1,132 +0,0 @@
|
||||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
export DH_VERBOSE=1
|
||||
|
||||
# -pie only for static mode
|
||||
export DEB_BUILD_MAINT_OPTIONS=hardening=-all
|
||||
|
||||
# because copy_headers.sh have hardcoded path to build/include_directories.txt
|
||||
BUILDDIR = obj-$(DEB_HOST_GNU_TYPE)
|
||||
CURDIR = $(shell pwd)
|
||||
DESTDIR = $(CURDIR)/debian/tmp
|
||||
|
||||
DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH)
|
||||
|
||||
ifeq ($(CCACHE_PREFIX),distcc)
|
||||
THREADS_COUNT=$(shell distcc -j)
|
||||
endif
|
||||
ifeq ($(THREADS_COUNT),)
|
||||
THREADS_COUNT=$(shell nproc || grep -c ^processor /proc/cpuinfo || sysctl -n hw.ncpu || echo 4)
|
||||
endif
|
||||
DEB_BUILD_OPTIONS+=parallel=$(THREADS_COUNT)
|
||||
|
||||
ifndef ENABLE_TESTS
|
||||
CMAKE_FLAGS += -DENABLE_TESTS=0
|
||||
else
|
||||
# To export binaries and from deb build we do not strip them. No need to run tests in deb build as we run them in CI
|
||||
DEB_BUILD_OPTIONS+= nocheck
|
||||
DEB_BUILD_OPTIONS+= nostrip
|
||||
endif
|
||||
|
||||
ifndef MAKE_TARGET
|
||||
MAKE_TARGET = clickhouse-bundle
|
||||
endif
|
||||
|
||||
CMAKE_FLAGS += -DENABLE_UTILS=0
|
||||
|
||||
DEB_CC ?= $(shell which gcc-11 gcc-10 gcc-9 gcc | head -n1)
|
||||
DEB_CXX ?= $(shell which g++-11 g++-10 g++-9 g++ | head -n1)
|
||||
|
||||
ifdef DEB_CXX
|
||||
DEB_BUILD_GNU_TYPE := $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
|
||||
DEB_HOST_GNU_TYPE := $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE)
|
||||
ifeq ($(DEB_BUILD_GNU_TYPE),$(DEB_HOST_GNU_TYPE))
|
||||
CC := $(DEB_CC)
|
||||
CXX := $(DEB_CXX)
|
||||
else ifeq (clang,$(findstring clang,$(DEB_CXX)))
|
||||
# If we crosscompile with clang, it knows what to do
|
||||
CC := $(DEB_CC)
|
||||
CXX := $(DEB_CXX)
|
||||
else
|
||||
CC := $(DEB_HOST_GNU_TYPE)-$(DEB_CC)
|
||||
CXX := $(DEB_HOST_GNU_TYPE)-$(DEB_CXX)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CXX
|
||||
CMAKE_FLAGS += -DCMAKE_CXX_COMPILER=`which $(CXX)`
|
||||
endif
|
||||
ifdef CC
|
||||
CMAKE_FLAGS += -DCMAKE_C_COMPILER=`which $(CC)`
|
||||
endif
|
||||
|
||||
ifndef DISABLE_NINJA
|
||||
NINJA=$(shell which ninja)
|
||||
ifneq ($(NINJA),)
|
||||
CMAKE_FLAGS += -GNinja
|
||||
export MAKE=$(NINJA) $(NINJA_FLAGS)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef DH_VERBOSE
|
||||
CMAKE_FLAGS += -DCMAKE_VERBOSE_MAKEFILE=0
|
||||
endif
|
||||
|
||||
# Useful for bulding on low memory systems
|
||||
ifndef DISABLE_PARALLEL
|
||||
DH_FLAGS += --parallel
|
||||
else
|
||||
THREADS_COUNT = 1
|
||||
endif
|
||||
|
||||
%:
|
||||
dh $@ $(DH_FLAGS) --buildsystem=cmake
|
||||
|
||||
override_dh_auto_configure:
|
||||
dh_auto_configure -- $(CMAKE_FLAGS)
|
||||
|
||||
override_dh_auto_build:
|
||||
# Fix for ninja. Do not add -O.
|
||||
$(MAKE) -j$(THREADS_COUNT) -C $(BUILDDIR) $(MAKE_TARGET)
|
||||
|
||||
override_dh_auto_test:
|
||||
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
|
||||
cd $(BUILDDIR) && ctest -j$(THREADS_COUNT) -V
|
||||
endif
|
||||
|
||||
# Disable config.guess and config.sub update
|
||||
override_dh_update_autotools_config:
|
||||
|
||||
override_dh_clean:
|
||||
rm -rf debian/copyright debian/clickhouse-client.docs debian/clickhouse-common-static.docs
|
||||
dh_clean # -X contrib
|
||||
|
||||
override_dh_strip:
|
||||
#https://www.debian.org/doc/debian-policy/ch-source.html#debian-rules-and-deb-build-options
|
||||
ifeq (,$(filter nostrip,$(DEB_BUILD_OPTIONS)))
|
||||
dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-static-dbg
|
||||
endif
|
||||
|
||||
override_dh_install:
|
||||
# Making docs
|
||||
cp LICENSE debian/copyright
|
||||
|
||||
ln -sf clickhouse-server.docs debian/clickhouse-client.docs
|
||||
ln -sf clickhouse-server.docs debian/clickhouse-common-static.docs
|
||||
|
||||
# systemd compatibility
|
||||
mkdir -p $(DESTDIR)/etc/systemd/system/
|
||||
cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/
|
||||
|
||||
dh_install --list-missing --sourcedir=$(DESTDIR)
|
||||
|
||||
override_dh_auto_install:
|
||||
env DESTDIR=$(DESTDIR) $(MAKE) -j$(THREADS_COUNT) -C $(BUILDDIR) install
|
||||
|
||||
override_dh_shlibdeps:
|
||||
true # We depend only on libc and dh_shlibdeps gives us wrong (too strict) dependency.
|
||||
|
||||
override_dh_builddeb:
|
||||
dh_builddeb -- -Z gzip # Older systems don't have "xz", so use "gzip" instead.
|
1
debian/source/format
vendored
1
debian/source/format
vendored
@ -1 +0,0 @@
|
||||
3.0 (quilt)
|
9
debian/source/options
vendored
9
debian/source/options
vendored
@ -1,9 +0,0 @@
|
||||
tar-ignore
|
||||
tar-ignore="build_*/*"
|
||||
tar-ignore="workspace/*"
|
||||
tar-ignore="contrib/poco/openssl/*"
|
||||
tar-ignore="contrib/poco/gradle/*"
|
||||
tar-ignore="contrib/poco/Data/SQLite/*"
|
||||
tar-ignore="contrib/poco/PDF/*"
|
||||
compression-level=3
|
||||
compression=gzip
|
6
debian/watch
vendored
6
debian/watch
vendored
@ -1,6 +0,0 @@
|
||||
version=4
|
||||
|
||||
opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)-stable\.tar\.gz%clickhouse-$1.tar.gz%" \
|
||||
https://github.com/ClickHouse/ClickHouse/tags \
|
||||
(?:.*?/)?v?(\d[\d.]*)-stable\.tar\.gz debian uupdate
|
||||
|
@ -1,4 +1,3 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/docs-builder .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/docs-check .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/docs-builder:$FROM_TAG
|
||||
|
74
docker/keeper/Dockerfile
Normal file
74
docker/keeper/Dockerfile
Normal file
@ -0,0 +1,74 @@
|
||||
FROM ubuntu:20.04 AS glibc-donor
|
||||
|
||||
ARG TARGETARCH
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) rarch=x86_64 ;; \
|
||||
arm64) rarch=aarch64 ;; \
|
||||
esac \
|
||||
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu
|
||||
|
||||
|
||||
FROM alpine
|
||||
|
||||
ENV LANG=en_US.UTF-8 \
|
||||
LANGUAGE=en_US:en \
|
||||
LC_ALL=en_US.UTF-8 \
|
||||
TZ=UTC \
|
||||
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
|
||||
|
||||
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/
|
||||
COPY --from=glibc-donor /etc/nsswitch.conf /etc/
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
ARG TARGETARCH
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \
|
||||
arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="22.4.1.917"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
# It is especially important for rootless containers: in that case entrypoint
|
||||
# can't do chown and owners of mounted volumes should be configured externally.
|
||||
# We do that in advance at the begining of Dockerfile before any packages will be
|
||||
# installed to prevent picking those uid / gid by some unrelated software.
|
||||
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||
|
||||
|
||||
ARG TARGETARCH
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
{ \
|
||||
{ echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" -O "/tmp/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& tar xvzf "/tmp/${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / ; \
|
||||
} || \
|
||||
{ echo "Fallback to ${REPOSITORY}/${package}-${VERSION}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}.tgz" -O "/tmp/${package}-${VERSION}.tgz" \
|
||||
&& tar xvzf "/tmp/${package}-${VERSION}.tgz" --strip-components=2 -C / ; \
|
||||
} ; \
|
||||
} || exit 1 \
|
||||
; done \
|
||||
&& rm /tmp/*.tgz /install -r \
|
||||
&& addgroup -S -g 101 clickhouse \
|
||||
&& adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse keeper" -u 101 clickhouse \
|
||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper \
|
||||
&& chown clickhouse:clickhouse /var/lib/clickhouse \
|
||||
&& chown root:clickhouse /var/log/clickhouse-keeper \
|
||||
&& chmod +x /entrypoint.sh \
|
||||
&& apk add --no-cache su-exec bash tzdata \
|
||||
&& cp /usr/share/zoneinfo/UTC /etc/localtime \
|
||||
&& echo "UTC" > /etc/timezone \
|
||||
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
|
||||
|
||||
|
||||
EXPOSE 2181 10181 44444
|
||||
|
||||
VOLUME /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
1
docker/keeper/Dockerfile.alpine
Symbolic link
1
docker/keeper/Dockerfile.alpine
Symbolic link
@ -0,0 +1 @@
|
||||
Dockerfile
|
93
docker/keeper/entrypoint.sh
Normal file
93
docker/keeper/entrypoint.sh
Normal file
@ -0,0 +1,93 @@
|
||||
#!/bin/bash
|
||||
|
||||
set +x
|
||||
set -eo pipefail
|
||||
shopt -s nullglob
|
||||
|
||||
DO_CHOWN=1
|
||||
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
|
||||
DO_CHOWN=0
|
||||
fi
|
||||
|
||||
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
|
||||
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
|
||||
|
||||
# support --user
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
USER=$CLICKHOUSE_UID
|
||||
GROUP=$CLICKHOUSE_GID
|
||||
if command -v gosu &> /dev/null; then
|
||||
gosu="gosu $USER:$GROUP"
|
||||
elif command -v su-exec &> /dev/null; then
|
||||
gosu="su-exec $USER:$GROUP"
|
||||
else
|
||||
echo "No gosu/su-exec detected!"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
USER="$(id -u)"
|
||||
GROUP="$(id -g)"
|
||||
gosu=""
|
||||
DO_CHOWN=0
|
||||
fi
|
||||
|
||||
KEEPER_CONFIG="${KEEPER_CONFIG:-/etc/clickhouse-keeper/config.yaml}"
|
||||
|
||||
if [ -f "$KEEPER_CONFIG" ] && ! $gosu test -f "$KEEPER_CONFIG" -a -r "$KEEPER_CONFIG"; then
|
||||
echo "Configuration file '$KEEPER_CONFIG' isn't readable by user with id '$USER'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DATA_DIR="${CLICKHOUSE_DATA_DIR:-/var/lib/clickhouse}"
|
||||
LOG_DIR="${LOG_DIR:-/var/log/clickhouse-keeper}"
|
||||
LOG_PATH="${LOG_DIR}/clickhouse-keeper.log"
|
||||
ERROR_LOG_PATH="${LOG_DIR}/clickhouse-keeper.err.log"
|
||||
COORDINATION_LOG_DIR="${DATA_DIR}/coordination/log"
|
||||
COORDINATION_SNAPSHOT_DIR="${DATA_DIR}/coordination/snapshots"
|
||||
CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0}
|
||||
|
||||
for dir in "$DATA_DIR" \
|
||||
"$LOG_DIR" \
|
||||
"$TMP_DIR" \
|
||||
"$COORDINATION_LOG_DIR" \
|
||||
"$COORDINATION_SNAPSHOT_DIR"
|
||||
do
|
||||
# check if variable not empty
|
||||
[ -z "$dir" ] && continue
|
||||
# ensure directories exist
|
||||
if ! mkdir -p "$dir"; then
|
||||
echo "Couldn't create necessary directory: $dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$DO_CHOWN" = "1" ]; then
|
||||
# ensure proper directories permissions
|
||||
# but skip it for if directory already has proper premissions, cause recursive chown may be slow
|
||||
if [ "$(stat -c %u "$dir")" != "$USER" ] || [ "$(stat -c %g "$dir")" != "$GROUP" ]; then
|
||||
chown -R "$USER:$GROUP" "$dir"
|
||||
fi
|
||||
elif ! $gosu test -d "$dir" -a -w "$dir" -a -r "$dir"; then
|
||||
echo "Necessary directory '$dir' isn't accessible by user with id '$USER'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments
|
||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
# Watchdog is launched by default, but does not send SIGINT to the main process,
|
||||
# so the container can't be finished by ctrl+c
|
||||
export CLICKHOUSE_WATCHDOG_ENABLE
|
||||
|
||||
cd /var/lib/clickhouse
|
||||
|
||||
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
|
||||
if [ -f "$KEEPER_CONFIG" ]; then
|
||||
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
|
||||
fi
|
||||
|
||||
# There is no config file. Will use embedded one
|
||||
exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
|
||||
fi
|
||||
|
||||
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
|
||||
exec "$@"
|
@ -95,6 +95,14 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
&& apt-get install gcc-11 g++-11 --yes \
|
||||
&& apt-get clean
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
ARG NFPM_VERSION=2.15.0
|
||||
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& curl -Lo /tmp/nfpm.deb "https://github.com/goreleaser/nfpm/releases/download/v${NFPM_VERSION}/nfpm_${arch}.deb" \
|
||||
&& dpkg -i /tmp/nfpm.deb \
|
||||
&& rm /tmp/nfpm.deb
|
||||
|
||||
COPY build.sh /
|
||||
CMD ["bash", "-c", "/build.sh 2>&1 | ts"]
|
||||
CMD ["bash", "-c", "/build.sh 2>&1"]
|
||||
|
@ -1,7 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
exec &> >(ts)
|
||||
set -x -e
|
||||
|
||||
cache_status () {
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
}
|
||||
|
||||
mkdir -p build/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
|
||||
@ -19,15 +25,33 @@ read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
||||
env
|
||||
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" ..
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
if [ "coverity" == "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
mkdir -p /opt/cov-analysis
|
||||
|
||||
wget --post-data "token=$COVERITY_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /opt/cov-analysis --strip-components 1
|
||||
export PATH=$PATH:/opt/cov-analysis/bin
|
||||
cov-configure --config ./coverity.config --template --comptype clangcc --compiler "$CC"
|
||||
SCAN_WRAPPER="cov-build --config ./coverity.config --dir cov-int"
|
||||
fi
|
||||
|
||||
cache_status
|
||||
# clear cache stats
|
||||
ccache --zero-stats ||:
|
||||
|
||||
# No quotes because I want it to expand to nothing if empty.
|
||||
# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty.
|
||||
ninja $NINJA_FLAGS clickhouse-bundle
|
||||
$SCAN_WRAPPER ninja $NINJA_FLAGS clickhouse-bundle
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
cache_status
|
||||
|
||||
if [ -n "$MAKE_DEB" ]; then
|
||||
rm -rf /build/packages/root
|
||||
# No quotes because I want it to expand to nothing if empty.
|
||||
# shellcheck disable=SC2086
|
||||
DESTDIR=/build/packages/root ninja $NINJA_FLAGS install
|
||||
bash -x /build/packages/build
|
||||
fi
|
||||
|
||||
mv ./programs/clickhouse* /output
|
||||
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
|
||||
@ -77,6 +101,12 @@ then
|
||||
mv "$COMBINED_OUTPUT.tgz" /output
|
||||
fi
|
||||
|
||||
if [ "coverity" == "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
tar -cv -I pigz -f "coverity-scan.tgz" cov-int
|
||||
mv "coverity-scan.tgz" /output
|
||||
fi
|
||||
|
||||
# Also build fuzzers if any sanitizer specified
|
||||
# if [ -n "$SANITIZER" ]
|
||||
# then
|
||||
@ -84,8 +114,7 @@ fi
|
||||
# ../docker/packager/other/fuzzer.sh
|
||||
# fi
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
cache_status
|
||||
|
||||
if [ "${CCACHE_DEBUG:-}" == "1" ]
|
||||
then
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#-*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
import subprocess
|
||||
import os
|
||||
import argparse
|
||||
@ -8,36 +8,39 @@ import sys
|
||||
|
||||
SCRIPT_PATH = os.path.realpath(__file__)
|
||||
|
||||
IMAGE_MAP = {
|
||||
"deb": "clickhouse/deb-builder",
|
||||
"binary": "clickhouse/binary-builder",
|
||||
}
|
||||
|
||||
def check_image_exists_locally(image_name):
|
||||
try:
|
||||
output = subprocess.check_output("docker images -q {} 2> /dev/null".format(image_name), shell=True)
|
||||
output = subprocess.check_output(
|
||||
f"docker images -q {image_name} 2> /dev/null", shell=True
|
||||
)
|
||||
return output != ""
|
||||
except subprocess.CalledProcessError as ex:
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
def pull_image(image_name):
|
||||
try:
|
||||
subprocess.check_call("docker pull {}".format(image_name), shell=True)
|
||||
subprocess.check_call(f"docker pull {image_name}", shell=True)
|
||||
return True
|
||||
except subprocess.CalledProcessError as ex:
|
||||
logging.info("Cannot pull image {}".format(image_name))
|
||||
except subprocess.CalledProcessError:
|
||||
logging.info(f"Cannot pull image {image_name}".format())
|
||||
return False
|
||||
|
||||
|
||||
def build_image(image_name, filepath):
|
||||
context = os.path.dirname(filepath)
|
||||
build_cmd = "docker build --network=host -t {} -f {} {}".format(image_name, filepath, context)
|
||||
logging.info("Will build image with cmd: '{}'".format(build_cmd))
|
||||
build_cmd = f"docker build --network=host -t {image_name} -f {filepath} {context}"
|
||||
logging.info("Will build image with cmd: '%s'", build_cmd)
|
||||
subprocess.check_call(
|
||||
build_cmd,
|
||||
shell=True,
|
||||
)
|
||||
|
||||
def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache_dir, docker_image_version):
|
||||
|
||||
def run_docker_image_with_env(
|
||||
image_name, output, env_variables, ch_root, ccache_dir, docker_image_version
|
||||
):
|
||||
env_part = " -e ".join(env_variables)
|
||||
if env_part:
|
||||
env_part = " -e " + env_part
|
||||
@ -47,28 +50,52 @@ def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache
|
||||
else:
|
||||
interactive = ""
|
||||
|
||||
cmd = "docker run --network=host --rm --volume={output_path}:/output --volume={ch_root}:/build --volume={ccache_dir}:/ccache {env} {interactive} {img_name}".format(
|
||||
output_path=output,
|
||||
ch_root=ch_root,
|
||||
ccache_dir=ccache_dir,
|
||||
env=env_part,
|
||||
img_name=image_name + ":" + docker_image_version,
|
||||
interactive=interactive
|
||||
cmd = (
|
||||
f"docker run --network=host --rm --volume={output}:/output "
|
||||
f"--volume={ch_root}:/build --volume={ccache_dir}:/ccache {env_part} "
|
||||
f"{interactive} {image_name}:{docker_image_version}"
|
||||
)
|
||||
|
||||
logging.info("Will build ClickHouse pkg with cmd: '{}'".format(cmd))
|
||||
logging.info("Will build ClickHouse pkg with cmd: '%s'", cmd)
|
||||
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage, with_binaries):
|
||||
|
||||
def is_release_build(build_type, package_type, sanitizer, split_binary):
|
||||
return (
|
||||
build_type == ""
|
||||
and package_type == "deb"
|
||||
and sanitizer == ""
|
||||
and not split_binary
|
||||
)
|
||||
|
||||
|
||||
def parse_env_variables(
|
||||
build_type,
|
||||
compiler,
|
||||
sanitizer,
|
||||
package_type,
|
||||
image_type,
|
||||
cache,
|
||||
distcc_hosts,
|
||||
split_binary,
|
||||
clang_tidy,
|
||||
version,
|
||||
author,
|
||||
official,
|
||||
additional_pkgs,
|
||||
with_coverage,
|
||||
with_binaries,
|
||||
):
|
||||
DARWIN_SUFFIX = "-darwin"
|
||||
DARWIN_ARM_SUFFIX = "-darwin-aarch64"
|
||||
ARM_SUFFIX = "-aarch64"
|
||||
FREEBSD_SUFFIX = "-freebsd"
|
||||
PPC_SUFFIX = '-ppc64le'
|
||||
PPC_SUFFIX = "-ppc64le"
|
||||
|
||||
result = []
|
||||
cmake_flags = ['$CMAKE_FLAGS']
|
||||
result.append("OUTPUT_DIR=/output")
|
||||
cmake_flags = ["$CMAKE_FLAGS"]
|
||||
|
||||
is_cross_darwin = compiler.endswith(DARWIN_SUFFIX)
|
||||
is_cross_darwin_arm = compiler.endswith(DARWIN_ARM_SUFFIX)
|
||||
@ -77,61 +104,93 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
||||
|
||||
if is_cross_darwin:
|
||||
cc = compiler[:-len(DARWIN_SUFFIX)]
|
||||
cc = compiler[: -len(DARWIN_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/x86_64-apple-darwin-ar")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/x86_64-apple-darwin-install_name_tool")
|
||||
cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib")
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/"
|
||||
"x86_64-apple-darwin-install_name_tool"
|
||||
)
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib"
|
||||
)
|
||||
cmake_flags.append("-DLINKER_NAME=/cctools/bin/x86_64-apple-darwin-ld")
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake")
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake"
|
||||
)
|
||||
elif is_cross_darwin_arm:
|
||||
cc = compiler[:-len(DARWIN_ARM_SUFFIX)]
|
||||
cc = compiler[: -len(DARWIN_ARM_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/aarch64-apple-darwin-ar")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/aarch64-apple-darwin-install_name_tool")
|
||||
cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/aarch64-apple-darwin-ranlib")
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/"
|
||||
"aarch64-apple-darwin-install_name_tool"
|
||||
)
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_RANLIB:FILEPATH=/cctools/bin/aarch64-apple-darwin-ranlib"
|
||||
)
|
||||
cmake_flags.append("-DLINKER_NAME=/cctools/bin/aarch64-apple-darwin-ld")
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-aarch64.cmake")
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-aarch64.cmake"
|
||||
)
|
||||
elif is_cross_arm:
|
||||
cc = compiler[:-len(ARM_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake")
|
||||
result.append("DEB_ARCH_FLAG=-aarm64")
|
||||
cc = compiler[: -len(ARM_SUFFIX)]
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake"
|
||||
)
|
||||
result.append("DEB_ARCH=arm64")
|
||||
elif is_cross_freebsd:
|
||||
cc = compiler[:-len(FREEBSD_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake")
|
||||
cc = compiler[: -len(FREEBSD_SUFFIX)]
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake"
|
||||
)
|
||||
elif is_cross_ppc:
|
||||
cc = compiler[:-len(PPC_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake")
|
||||
cc = compiler[: -len(PPC_SUFFIX)]
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake"
|
||||
)
|
||||
else:
|
||||
cc = compiler
|
||||
result.append("DEB_ARCH_FLAG=-aamd64")
|
||||
result.append("DEB_ARCH=amd64")
|
||||
|
||||
cxx = cc.replace('gcc', 'g++').replace('clang', 'clang++')
|
||||
cxx = cc.replace("gcc", "g++").replace("clang", "clang++")
|
||||
|
||||
if image_type == "deb":
|
||||
result.append("DEB_CC={}".format(cc))
|
||||
result.append("DEB_CXX={}".format(cxx))
|
||||
# For building fuzzers
|
||||
result.append("CC={}".format(cc))
|
||||
result.append("CXX={}".format(cxx))
|
||||
elif image_type == "binary":
|
||||
result.append("CC={}".format(cc))
|
||||
result.append("CXX={}".format(cxx))
|
||||
cmake_flags.append('-DCMAKE_C_COMPILER=`which {}`'.format(cc))
|
||||
cmake_flags.append('-DCMAKE_CXX_COMPILER=`which {}`'.format(cxx))
|
||||
result.append("MAKE_DEB=true")
|
||||
cmake_flags.append("-DENABLE_TESTS=0")
|
||||
cmake_flags.append("-DENABLE_UTILS=0")
|
||||
cmake_flags.append("-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON")
|
||||
cmake_flags.append("-DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON")
|
||||
cmake_flags.append("-DCMAKE_AUTOGEN_VERBOSE=ON")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
||||
cmake_flags.append("-DBUILD_STANDALONE_KEEPER=ON")
|
||||
if is_release_build(build_type, package_type, sanitizer, split_binary):
|
||||
cmake_flags.append("-DINSTALL_STRIPPED_BINARIES=ON")
|
||||
|
||||
result.append(f"CC={cc}")
|
||||
result.append(f"CXX={cxx}")
|
||||
cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}")
|
||||
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
|
||||
|
||||
# Create combined output archive for split build and for performance tests.
|
||||
if package_type == "performance":
|
||||
result.append("COMBINED_OUTPUT=performance")
|
||||
cmake_flags.append("-DENABLE_TESTS=0")
|
||||
elif package_type == "coverity":
|
||||
result.append("COMBINED_OUTPUT=coverity")
|
||||
result.append('COVERITY_TOKEN="$COVERITY_TOKEN"')
|
||||
elif split_binary:
|
||||
result.append("COMBINED_OUTPUT=shared_build")
|
||||
|
||||
if sanitizer:
|
||||
result.append("SANITIZER={}".format(sanitizer))
|
||||
result.append(f"SANITIZER={sanitizer}")
|
||||
if build_type:
|
||||
result.append("BUILD_TYPE={}".format(build_type))
|
||||
result.append(f"BUILD_TYPE={build_type.capitalize()}")
|
||||
else:
|
||||
result.append("BUILD_TYPE=None")
|
||||
|
||||
if cache == 'distcc':
|
||||
result.append("CCACHE_PREFIX={}".format(cache))
|
||||
if cache == "distcc":
|
||||
result.append(f"CCACHE_PREFIX={cache}")
|
||||
|
||||
if cache:
|
||||
result.append("CCACHE_DIR=/ccache")
|
||||
@ -142,109 +201,191 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
# result.append("CCACHE_UMASK=777")
|
||||
|
||||
if distcc_hosts:
|
||||
hosts_with_params = ["{}/24,lzo".format(host) for host in distcc_hosts] + ["localhost/`nproc`"]
|
||||
result.append('DISTCC_HOSTS="{}"'.format(" ".join(hosts_with_params)))
|
||||
hosts_with_params = [f"{host}/24,lzo" for host in distcc_hosts] + [
|
||||
"localhost/`nproc`"
|
||||
]
|
||||
result.append('DISTCC_HOSTS="' + " ".join(hosts_with_params) + '"')
|
||||
elif cache == "distcc":
|
||||
result.append('DISTCC_HOSTS="{}"'.format("localhost/`nproc`"))
|
||||
result.append('DISTCC_HOSTS="localhost/`nproc`"')
|
||||
|
||||
if alien_pkgs:
|
||||
result.append("ALIEN_PKGS='" + ' '.join(['--' + pkg for pkg in alien_pkgs]) + "'")
|
||||
if additional_pkgs:
|
||||
result.append("MAKE_APK=true")
|
||||
result.append("MAKE_RPM=true")
|
||||
result.append("MAKE_TGZ=true")
|
||||
|
||||
if with_binaries == "programs":
|
||||
result.append('BINARY_OUTPUT=programs')
|
||||
result.append("BINARY_OUTPUT=programs")
|
||||
elif with_binaries == "tests":
|
||||
result.append('ENABLE_TESTS=1')
|
||||
result.append('BINARY_OUTPUT=tests')
|
||||
cmake_flags.append('-DENABLE_TESTS=1')
|
||||
result.append("ENABLE_TESTS=1")
|
||||
result.append("BINARY_OUTPUT=tests")
|
||||
cmake_flags.append("-DENABLE_TESTS=1")
|
||||
|
||||
if split_binary:
|
||||
cmake_flags.append('-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1')
|
||||
cmake_flags.append(
|
||||
"-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 "
|
||||
"-DCLICKHOUSE_SPLIT_BINARY=1"
|
||||
)
|
||||
# We can't always build utils because it requires too much space, but
|
||||
# we have to build them at least in some way in CI. The split build is
|
||||
# probably the least heavy disk-wise.
|
||||
cmake_flags.append('-DENABLE_UTILS=1')
|
||||
cmake_flags.append("-DENABLE_UTILS=1")
|
||||
|
||||
if clang_tidy:
|
||||
cmake_flags.append('-DENABLE_CLANG_TIDY=1')
|
||||
cmake_flags.append('-DENABLE_UTILS=1')
|
||||
cmake_flags.append('-DENABLE_TESTS=1')
|
||||
cmake_flags.append('-DENABLE_EXAMPLES=1')
|
||||
cmake_flags.append("-DENABLE_CLANG_TIDY=1")
|
||||
cmake_flags.append("-DENABLE_UTILS=1")
|
||||
cmake_flags.append("-DENABLE_TESTS=1")
|
||||
cmake_flags.append("-DENABLE_EXAMPLES=1")
|
||||
# Don't stop on first error to find more clang-tidy errors in one run.
|
||||
result.append('NINJA_FLAGS=-k0')
|
||||
result.append("NINJA_FLAGS=-k0")
|
||||
|
||||
if with_coverage:
|
||||
cmake_flags.append('-DWITH_COVERAGE=1')
|
||||
cmake_flags.append("-DWITH_COVERAGE=1")
|
||||
|
||||
if version:
|
||||
result.append("VERSION_STRING='{}'".format(version))
|
||||
result.append(f"VERSION_STRING='{version}'")
|
||||
|
||||
if author:
|
||||
result.append("AUTHOR='{}'".format(author))
|
||||
result.append(f"AUTHOR='{author}'")
|
||||
|
||||
if official:
|
||||
cmake_flags.append('-DYANDEX_OFFICIAL_BUILD=1')
|
||||
cmake_flags.append("-DCLICKHOUSE_OFFICIAL_BUILD=1")
|
||||
|
||||
result.append('CMAKE_FLAGS="' + ' '.join(cmake_flags) + '"')
|
||||
result.append('CMAKE_FLAGS="' + " ".join(cmake_flags) + '"')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="ClickHouse building script using prebuilt Docker image")
|
||||
# 'performance' creates a combined .tgz with server and configs to be used for performance test.
|
||||
parser.add_argument("--package-type", choices=['deb', 'binary', 'performance'], required=True)
|
||||
parser.add_argument("--clickhouse-repo-path", default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir))
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
description="ClickHouse building script using prebuilt Docker image",
|
||||
)
|
||||
# 'performance' creates a combined .tgz with server
|
||||
# and configs to be used for performance test.
|
||||
parser.add_argument(
|
||||
"--package-type",
|
||||
choices=["deb", "binary", "performance", "coverity"],
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--clickhouse-repo-path",
|
||||
default=os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir
|
||||
),
|
||||
help="ClickHouse git repository",
|
||||
)
|
||||
parser.add_argument("--output-dir", required=True)
|
||||
parser.add_argument("--build-type", choices=("debug", ""), default="")
|
||||
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64",
|
||||
"clang-12", "clang-12-darwin", "clang-12-darwin-aarch64", "clang-12-aarch64",
|
||||
"clang-13", "clang-13-darwin", "clang-13-darwin-aarch64", "clang-13-aarch64", "clang-13-ppc64le",
|
||||
"clang-11-freebsd", "clang-12-freebsd", "clang-13-freebsd", "gcc-11"), default="clang-13")
|
||||
parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="")
|
||||
parser.add_argument(
|
||||
"--compiler",
|
||||
choices=(
|
||||
"clang-11",
|
||||
"clang-11-darwin",
|
||||
"clang-11-darwin-aarch64",
|
||||
"clang-11-aarch64",
|
||||
"clang-12",
|
||||
"clang-12-darwin",
|
||||
"clang-12-darwin-aarch64",
|
||||
"clang-12-aarch64",
|
||||
"clang-13",
|
||||
"clang-13-darwin",
|
||||
"clang-13-darwin-aarch64",
|
||||
"clang-13-aarch64",
|
||||
"clang-13-ppc64le",
|
||||
"clang-11-freebsd",
|
||||
"clang-12-freebsd",
|
||||
"clang-13-freebsd",
|
||||
"gcc-11",
|
||||
),
|
||||
default="clang-13",
|
||||
help="a compiler to use",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sanitizer",
|
||||
choices=("address", "thread", "memory", "undefined", ""),
|
||||
default="",
|
||||
)
|
||||
parser.add_argument("--split-binary", action="store_true")
|
||||
parser.add_argument("--clang-tidy", action="store_true")
|
||||
parser.add_argument("--cache", choices=("", "ccache", "distcc"), default="")
|
||||
parser.add_argument("--ccache_dir", default= os.getenv("HOME", "") + '/.ccache')
|
||||
parser.add_argument("--cache", choices=("ccache", "distcc", ""), default="")
|
||||
parser.add_argument(
|
||||
"--ccache_dir",
|
||||
default=os.getenv("HOME", "") + "/.ccache",
|
||||
help="a directory with ccache",
|
||||
)
|
||||
parser.add_argument("--distcc-hosts", nargs="+")
|
||||
parser.add_argument("--force-build-image", action="store_true")
|
||||
parser.add_argument("--version")
|
||||
parser.add_argument("--author", default="clickhouse")
|
||||
parser.add_argument("--author", default="clickhouse", help="a package author")
|
||||
parser.add_argument("--official", action="store_true")
|
||||
parser.add_argument("--alien-pkgs", nargs='+', default=[])
|
||||
parser.add_argument("--additional-pkgs", action="store_true")
|
||||
parser.add_argument("--with-coverage", action="store_true")
|
||||
parser.add_argument("--with-binaries", choices=("programs", "tests", ""), default="")
|
||||
parser.add_argument("--docker-image-version", default="latest")
|
||||
parser.add_argument(
|
||||
"--with-binaries", choices=("programs", "tests", ""), default=""
|
||||
)
|
||||
parser.add_argument(
|
||||
"--docker-image-version", default="latest", help="docker image tag to use"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
if not os.path.isabs(args.output_dir):
|
||||
args.output_dir = os.path.abspath(os.path.join(os.getcwd(), args.output_dir))
|
||||
|
||||
image_type = 'binary' if args.package_type == 'performance' else args.package_type
|
||||
image_name = IMAGE_MAP[image_type]
|
||||
image_type = (
|
||||
"binary"
|
||||
if args.package_type in ("performance", "coverity")
|
||||
else args.package_type
|
||||
)
|
||||
image_name = "clickhouse/binary-builder"
|
||||
|
||||
if not os.path.isabs(args.clickhouse_repo_path):
|
||||
ch_root = os.path.abspath(os.path.join(os.getcwd(), args.clickhouse_repo_path))
|
||||
else:
|
||||
ch_root = args.clickhouse_repo_path
|
||||
|
||||
if args.alien_pkgs and not image_type == "deb":
|
||||
raise Exception("Can add alien packages only in deb build")
|
||||
if args.additional_pkgs and image_type != "deb":
|
||||
raise Exception("Can build additional packages only in deb build")
|
||||
|
||||
if args.with_binaries != "" and not image_type == "deb":
|
||||
if args.with_binaries != "" and image_type != "deb":
|
||||
raise Exception("Can add additional binaries only in deb build")
|
||||
|
||||
if args.with_binaries != "" and image_type == "deb":
|
||||
logging.info("Should place {} to output".format(args.with_binaries))
|
||||
logging.info("Should place %s to output", args.with_binaries)
|
||||
|
||||
dockerfile = os.path.join(ch_root, "docker/packager", image_type, "Dockerfile")
|
||||
image_with_version = image_name + ":" + args.docker_image_version
|
||||
if image_type != "freebsd" and not check_image_exists_locally(image_name) or args.force_build_image:
|
||||
if (
|
||||
image_type != "freebsd"
|
||||
and not check_image_exists_locally(image_name)
|
||||
or args.force_build_image
|
||||
):
|
||||
if not pull_image(image_with_version) or args.force_build_image:
|
||||
build_image(image_with_version, dockerfile)
|
||||
env_prepared = parse_env_variables(
|
||||
args.build_type, args.compiler, args.sanitizer, args.package_type, image_type,
|
||||
args.cache, args.distcc_hosts, args.split_binary, args.clang_tidy,
|
||||
args.version, args.author, args.official, args.alien_pkgs, args.with_coverage, args.with_binaries)
|
||||
args.build_type,
|
||||
args.compiler,
|
||||
args.sanitizer,
|
||||
args.package_type,
|
||||
image_type,
|
||||
args.cache,
|
||||
args.distcc_hosts,
|
||||
args.split_binary,
|
||||
args.clang_tidy,
|
||||
args.version,
|
||||
args.author,
|
||||
args.official,
|
||||
args.additional_pkgs,
|
||||
args.with_coverage,
|
||||
args.with_binaries,
|
||||
)
|
||||
|
||||
run_docker_image_with_env(image_name, args.output_dir, env_prepared, ch_root, args.ccache_dir, args.docker_image_version)
|
||||
logging.info("Output placed into {}".format(args.output_dir))
|
||||
run_docker_image_with_env(
|
||||
image_name,
|
||||
args.output_dir,
|
||||
env_prepared,
|
||||
ch_root,
|
||||
args.ccache_dir,
|
||||
args.docker_image_version,
|
||||
)
|
||||
logging.info("Output placed into %s", args.output_dir)
|
||||
|
2
docker/server/.gitignore
vendored
2
docker/server/.gitignore
vendored
@ -1,2 +0,0 @@
|
||||
alpine-root/*
|
||||
tgz-packages/*
|
@ -1,122 +0,0 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ARG repository="deb https://packages.clickhouse.com/deb stable main"
|
||||
ARG version=22.1.1.*
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
# from debs created by CI build, for example:
|
||||
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852
|
||||
ARG deb_location_url=""
|
||||
|
||||
# set non-empty single_binary_location_url to create docker image
|
||||
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
|
||||
# for example (run on aarch64 server):
|
||||
# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm
|
||||
# note: clickhouse-odbc-bridge is not supported there.
|
||||
ARG single_binary_location_url=""
|
||||
|
||||
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
# It is especially important for rootless containers: in that case entrypoint
|
||||
# can't do chown and owners of mounted volumes should be configured externally.
|
||||
# We do that in advance at the begining of Dockerfile before any packages will be
|
||||
# installed to prevent picking those uid / gid by some unrelated software.
|
||||
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||
|
||||
# To drop privileges, we need 'su' command, that simply changes uid and gid.
|
||||
# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux:
|
||||
# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking
|
||||
# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal
|
||||
# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does)
|
||||
# and for these reasons people are using alternatives to the 'su' command in Docker,
|
||||
# that don't mess with the terminal, don't care about closing the opened files, etc...
|
||||
# but can only be safe to drop privileges inside Docker.
|
||||
# The question - what implementation of 'su' command to use.
|
||||
# It should be a simple script doing about just two syscalls.
|
||||
# Some people tend to use 'gosu' tool that is written in Go.
|
||||
# It is not used for several reasons:
|
||||
# 1. Dependency on some foreign code in yet another programming language - does not sound alright.
|
||||
# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners.
|
||||
|
||||
COPY su-exec.c /su-exec.c
|
||||
|
||||
RUN groupadd -r clickhouse --gid=101 \
|
||||
&& useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \
|
||||
&& apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
dirmngr \
|
||||
gnupg \
|
||||
locales \
|
||||
wget \
|
||||
tzdata \
|
||||
&& mkdir -p /etc/apt/sources.list.d \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \
|
||||
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \
|
||||
&& if [ -n "$deb_location_url" ]; then \
|
||||
echo "installing from custom url with deb packages: $deb_location_url" \
|
||||
rm -rf /tmp/clickhouse_debs \
|
||||
&& mkdir -p /tmp/clickhouse_debs \
|
||||
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-common-static_${version}_amd64.deb" -P /tmp/clickhouse_debs \
|
||||
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-client_${version}_all.deb" -P /tmp/clickhouse_debs \
|
||||
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-server_${version}_all.deb" -P /tmp/clickhouse_debs \
|
||||
&& dpkg -i /tmp/clickhouse_debs/*.deb ; \
|
||||
elif [ -n "$single_binary_location_url" ]; then \
|
||||
echo "installing from single binary url: $single_binary_location_url" \
|
||||
&& rm -rf /tmp/clickhouse_binary \
|
||||
&& mkdir -p /tmp/clickhouse_binary \
|
||||
&& wget --progress=bar:force:noscroll "$single_binary_location_url" -O /tmp/clickhouse_binary/clickhouse \
|
||||
&& chmod +x /tmp/clickhouse_binary/clickhouse \
|
||||
&& /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \
|
||||
else \
|
||||
echo "installing from repository: $repository" \
|
||||
&& apt-get update \
|
||||
&& apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
|
||||
&& apt-get install --allow-unauthenticated --yes --no-install-recommends \
|
||||
clickhouse-common-static=$version \
|
||||
clickhouse-client=$version \
|
||||
clickhouse-server=$version ; \
|
||||
fi \
|
||||
&& apt-get install -y --no-install-recommends tcc libc-dev && \
|
||||
tcc /su-exec.c -o /bin/su-exec && \
|
||||
chown root:root /bin/su-exec && \
|
||||
chmod 0755 /bin/su-exec && \
|
||||
rm /su-exec.c && \
|
||||
apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \
|
||||
&& clickhouse-local -q 'SELECT * FROM system.build_options' \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
/tmp/* \
|
||||
&& apt-get clean \
|
||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
||||
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
|
||||
|
||||
# we need to allow "others" access to clickhouse folder, because docker container
|
||||
# can be started with arbitrary uid (openshift usecase)
|
||||
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
ENV TZ UTC
|
||||
|
||||
RUN mkdir /docker-entrypoint-initdb.d
|
||||
|
||||
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
EXPOSE 9000 8123 9009
|
||||
VOLUME /var/lib/clickhouse
|
||||
|
||||
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
1
docker/server/Dockerfile
Symbolic link
1
docker/server/Dockerfile
Symbolic link
@ -0,0 +1 @@
|
||||
Dockerfile.ubuntu
|
@ -1,3 +1,14 @@
|
||||
FROM ubuntu:20.04 AS glibc-donor
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) rarch=x86_64 ;; \
|
||||
arm64) rarch=aarch64 ;; \
|
||||
esac \
|
||||
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu
|
||||
|
||||
|
||||
FROM alpine
|
||||
|
||||
ENV LANG=en_US.UTF-8 \
|
||||
@ -6,7 +17,24 @@ ENV LANG=en_US.UTF-8 \
|
||||
TZ=UTC \
|
||||
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
|
||||
|
||||
COPY alpine-root/ /
|
||||
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/
|
||||
COPY --from=glibc-donor /etc/nsswitch.conf /etc/
|
||||
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \
|
||||
arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \
|
||||
esac
|
||||
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="20.9.3.45"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
# It is especially important for rootless containers: in that case entrypoint
|
||||
@ -15,9 +43,23 @@ COPY alpine-root/ /
|
||||
# installed to prevent picking those uid / gid by some unrelated software.
|
||||
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||
|
||||
RUN addgroup -S -g 101 clickhouse \
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
{ \
|
||||
{ echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" -O "/tmp/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& tar xvzf "/tmp/${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / ; \
|
||||
} || \
|
||||
{ echo "Fallback to ${REPOSITORY}/${package}-${VERSION}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}.tgz" -O "/tmp/${package}-${VERSION}.tgz" \
|
||||
&& tar xvzf "/tmp/${package}-${VERSION}.tgz" --strip-components=2 -C / ; \
|
||||
} ; \
|
||||
} || exit 1 \
|
||||
; done \
|
||||
&& rm /tmp/*.tgz /install -r \
|
||||
&& addgroup -S -g 101 clickhouse \
|
||||
&& adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse server" -u 101 clickhouse \
|
||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server/config.d /etc/clickhouse-server/users.d /etc/clickhouse-client /docker-entrypoint-initdb.d \
|
||||
&& chown clickhouse:clickhouse /var/lib/clickhouse \
|
||||
&& chown root:clickhouse /var/log/clickhouse-server \
|
||||
&& chmod +x /entrypoint.sh \
|
||||
|
129
docker/server/Dockerfile.ubuntu
Normal file
129
docker/server/Dockerfile.ubuntu
Normal file
@ -0,0 +1,129 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
COPY su-exec.c /su-exec.c
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list \
|
||||
&& groupadd -r clickhouse --gid=101 \
|
||||
&& useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \
|
||||
&& apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
dirmngr \
|
||||
gnupg \
|
||||
locales \
|
||||
wget \
|
||||
tzdata \
|
||||
&& apt-get install -y --no-install-recommends tcc libc-dev && \
|
||||
tcc /su-exec.c -o /bin/su-exec && \
|
||||
chown root:root /bin/su-exec && \
|
||||
chmod 0755 /bin/su-exec && \
|
||||
rm /su-exec.c && \
|
||||
apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \
|
||||
&& apt-get clean
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION=22.1.1.*
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
# from debs created by CI build, for example:
|
||||
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852
|
||||
ARG deb_location_url=""
|
||||
|
||||
# set non-empty single_binary_location_url to create docker image
|
||||
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
|
||||
# for example (run on aarch64 server):
|
||||
# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm
|
||||
# note: clickhouse-odbc-bridge is not supported there.
|
||||
ARG single_binary_location_url=""
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
# It is especially important for rootless containers: in that case entrypoint
|
||||
# can't do chown and owners of mounted volumes should be configured externally.
|
||||
# We do that in advance at the begining of Dockerfile before any packages will be
|
||||
# installed to prevent picking those uid / gid by some unrelated software.
|
||||
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||
|
||||
# To drop privileges, we need 'su' command, that simply changes uid and gid.
|
||||
# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux:
|
||||
# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking
|
||||
# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal
|
||||
# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does)
|
||||
# and for these reasons people are using alternatives to the 'su' command in Docker,
|
||||
# that don't mess with the terminal, don't care about closing the opened files, etc...
|
||||
# but can only be safe to drop privileges inside Docker.
|
||||
# The question - what implementation of 'su' command to use.
|
||||
# It should be a simple script doing about just two syscalls.
|
||||
# Some people tend to use 'gosu' tool that is written in Go.
|
||||
# It is not used for several reasons:
|
||||
# 1. Dependency on some foreign code in yet another programming language - does not sound alright.
|
||||
# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners.
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& if [ -n "${deb_location_url}" ]; then \
|
||||
echo "installing from custom url with deb packages: ${deb_location_url}" \
|
||||
rm -rf /tmp/clickhouse_debs \
|
||||
&& mkdir -p /tmp/clickhouse_debs \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
{ wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_${arch}.deb" -P /tmp/clickhouse_debs || \
|
||||
wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_all.deb" -P /tmp/clickhouse_debs ; } \
|
||||
|| exit 1 \
|
||||
; done \
|
||||
&& dpkg -i /tmp/clickhouse_debs/*.deb ; \
|
||||
elif [ -n "${single_binary_location_url}" ]; then \
|
||||
echo "installing from single binary url: ${single_binary_location_url}" \
|
||||
&& rm -rf /tmp/clickhouse_binary \
|
||||
&& mkdir -p /tmp/clickhouse_binary \
|
||||
&& wget --progress=bar:force:noscroll "${single_binary_location_url}" -O /tmp/clickhouse_binary/clickhouse \
|
||||
&& chmod +x /tmp/clickhouse_binary/clickhouse \
|
||||
&& /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \
|
||||
else \
|
||||
mkdir -p /etc/apt/sources.list.d \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \
|
||||
&& echo ${REPOSITORY} > /etc/apt/sources.list.d/clickhouse.list \
|
||||
&& echo "installing from repository: ${REPOSITORY}" \
|
||||
&& apt-get update \
|
||||
&& apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
packages="${packages} ${package}=${VERSION}" \
|
||||
; done \
|
||||
&& apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \
|
||||
; fi \
|
||||
&& clickhouse-local -q 'SELECT * FROM system.build_options' \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
/tmp/* \
|
||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
||||
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
|
||||
|
||||
# we need to allow "others" access to clickhouse folder, because docker container
|
||||
# can be started with arbitrary uid (openshift usecase)
|
||||
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
ENV TZ UTC
|
||||
|
||||
RUN mkdir /docker-entrypoint-initdb.d
|
||||
|
||||
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
EXPOSE 9000 8123 9009
|
||||
VOLUME /var/lib/clickhouse
|
||||
|
||||
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
@ -1,63 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
REPO_CHANNEL="${REPO_CHANNEL:-stable}" # lts / testing / prestable / etc
|
||||
REPO_URL="${REPO_URL:-"https://repo.yandex.ru/clickhouse/tgz/${REPO_CHANNEL}"}"
|
||||
VERSION="${VERSION:-20.9.3.45}"
|
||||
DOCKER_IMAGE="${DOCKER_IMAGE:-clickhouse/clickhouse-server}"
|
||||
|
||||
# where original files live
|
||||
DOCKER_BUILD_FOLDER="${BASH_SOURCE%/*}"
|
||||
|
||||
# we will create root for our image here
|
||||
CONTAINER_ROOT_FOLDER="${DOCKER_BUILD_FOLDER}/alpine-root"
|
||||
|
||||
# clean up the root from old runs, it's reconstructed each time
|
||||
rm -rf "$CONTAINER_ROOT_FOLDER"
|
||||
mkdir -p "$CONTAINER_ROOT_FOLDER"
|
||||
|
||||
# where to put downloaded tgz
|
||||
TGZ_PACKAGES_FOLDER="${DOCKER_BUILD_FOLDER}/tgz-packages"
|
||||
mkdir -p "$TGZ_PACKAGES_FOLDER"
|
||||
|
||||
PACKAGES=( "clickhouse-client" "clickhouse-server" "clickhouse-common-static" )
|
||||
|
||||
# download tars from the repo
|
||||
for package in "${PACKAGES[@]}"
|
||||
do
|
||||
wget -c -q --show-progress "${REPO_URL}/${package}-${VERSION}.tgz" -O "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz"
|
||||
done
|
||||
|
||||
# unpack tars
|
||||
for package in "${PACKAGES[@]}"
|
||||
do
|
||||
tar xvzf "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" --strip-components=2 -C "$CONTAINER_ROOT_FOLDER"
|
||||
done
|
||||
|
||||
# prepare few more folders
|
||||
mkdir -p "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/users.d" \
|
||||
"${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d" \
|
||||
"${CONTAINER_ROOT_FOLDER}/var/log/clickhouse-server" \
|
||||
"${CONTAINER_ROOT_FOLDER}/var/lib/clickhouse" \
|
||||
"${CONTAINER_ROOT_FOLDER}/docker-entrypoint-initdb.d" \
|
||||
"${CONTAINER_ROOT_FOLDER}/lib64"
|
||||
|
||||
cp "${DOCKER_BUILD_FOLDER}/docker_related_config.xml" "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d/"
|
||||
cp "${DOCKER_BUILD_FOLDER}/entrypoint.sh" "${CONTAINER_ROOT_FOLDER}/entrypoint.sh"
|
||||
|
||||
## get glibc components from ubuntu 20.04 and put them to expected place
|
||||
docker pull ubuntu:20.04
|
||||
ubuntu20image=$(docker create --rm ubuntu:20.04)
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_files.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64"
|
||||
docker cp -L "${ubuntu20image}":/etc/nsswitch.conf "${CONTAINER_ROOT_FOLDER}/etc"
|
||||
|
||||
docker build "$DOCKER_BUILD_FOLDER" -f Dockerfile.alpine -t "${DOCKER_IMAGE}:${VERSION}-alpine" --pull
|
||||
rm -rf "$CONTAINER_ROOT_FOLDER"
|
@ -1,47 +0,0 @@
|
||||
# Since right now we can't set volumes to the docker during build, we split building container in stages:
|
||||
# 1. build base container
|
||||
# 2. run base conatiner with mounted volumes
|
||||
# 3. commit container as image
|
||||
# 4. build final container atop that image
|
||||
# Middle steps are performed by the bash script.
|
||||
|
||||
FROM ubuntu:18.04 as clickhouse-server-base
|
||||
ARG gosu_ver=1.14
|
||||
|
||||
VOLUME /packages/
|
||||
|
||||
# update to allow installing dependencies of clickhouse automatically
|
||||
RUN apt update; \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
apt install -y locales;
|
||||
|
||||
ADD https://github.com/tianon/gosu/releases/download/${gosu_ver}/gosu-amd64 /bin/gosu
|
||||
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
||||
# installing via apt to simulate real-world scenario, where user installs deb package and all it's dependecies automatically.
|
||||
CMD DEBIAN_FRONTEND=noninteractive \
|
||||
apt install -y \
|
||||
/packages/clickhouse-common-static_*.deb \
|
||||
/packages/clickhouse-server_*.deb ;
|
||||
|
||||
FROM clickhouse-server-base:postinstall as clickhouse-server
|
||||
|
||||
RUN mkdir /docker-entrypoint-initdb.d
|
||||
|
||||
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
RUN chmod +x \
|
||||
/entrypoint.sh \
|
||||
/bin/gosu
|
||||
|
||||
EXPOSE 9000 8123 9009
|
||||
VOLUME /var/lib/clickhouse
|
||||
|
||||
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
@ -115,6 +115,7 @@ function start_server
|
||||
|
||||
function clone_root
|
||||
{
|
||||
git config --global --add safe.directory "$FASTTEST_SOURCE"
|
||||
git clone --depth 1 https://github.com/ClickHouse/ClickHouse.git -- "$FASTTEST_SOURCE" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/clone_log.txt"
|
||||
|
||||
(
|
||||
@ -267,6 +268,7 @@ function run_tests
|
||||
local test_opts=(
|
||||
--hung-check
|
||||
--fast-tests-only
|
||||
--no-random-settings
|
||||
--no-long
|
||||
--testname
|
||||
--shard
|
||||
|
@ -11,7 +11,7 @@ def removesuffix(text, suffix):
|
||||
https://www.python.org/dev/peps/pep-0616/
|
||||
"""
|
||||
if suffix and text.endswith(suffix):
|
||||
return text[:-len(suffix)]
|
||||
return text[: -len(suffix)]
|
||||
else:
|
||||
return text[:]
|
||||
|
||||
|
@ -13,7 +13,7 @@ script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
echo "$script_dir"
|
||||
repo_dir=ch
|
||||
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-13_debug_none_bundled_unsplitted_disable_False_binary"}
|
||||
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
|
||||
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
|
||||
|
||||
function clone
|
||||
{
|
||||
@ -226,7 +226,6 @@ quit
|
||||
--receive_data_timeout_ms=10000 \
|
||||
--stacktrace \
|
||||
--query-fuzzer-runs=1000 \
|
||||
--testmode \
|
||||
--queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \
|
||||
$NEW_TESTS_OPT \
|
||||
> >(tail -n 100000 > fuzzer.log) \
|
||||
|
@ -60,5 +60,5 @@ clientPort=2181 \n\
|
||||
maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg
|
||||
RUN mkdir /zookeeper && chmod -R 777 /zookeeper
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
ENV TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
@ -3,55 +3,55 @@ import subprocess
|
||||
import datetime
|
||||
from flask import Flask, flash, request, redirect, url_for
|
||||
|
||||
|
||||
def run_command(command, wait=False):
|
||||
print("{} - execute shell command:{}".format(datetime.datetime.now(), command))
|
||||
lines = []
|
||||
p = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
shell=True)
|
||||
p = subprocess.Popen(
|
||||
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True
|
||||
)
|
||||
if wait:
|
||||
for l in iter(p.stdout.readline, b''):
|
||||
for l in iter(p.stdout.readline, b""):
|
||||
lines.append(l)
|
||||
p.poll()
|
||||
return (lines, p.returncode)
|
||||
else:
|
||||
return(iter(p.stdout.readline, b''), 0)
|
||||
return (iter(p.stdout.readline, b""), 0)
|
||||
|
||||
|
||||
UPLOAD_FOLDER = './'
|
||||
ALLOWED_EXTENSIONS = {'txt', 'sh'}
|
||||
UPLOAD_FOLDER = "./"
|
||||
ALLOWED_EXTENSIONS = {"txt", "sh"}
|
||||
app = Flask(__name__)
|
||||
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
||||
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
|
||||
|
||||
@app.route('/')
|
||||
|
||||
@app.route("/")
|
||||
def hello_world():
|
||||
return 'Hello World'
|
||||
return "Hello World"
|
||||
|
||||
|
||||
def allowed_file(filename):
|
||||
return '.' in filename and \
|
||||
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
||||
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
|
||||
|
||||
|
||||
@app.route('/upload', methods=['GET', 'POST'])
|
||||
@app.route("/upload", methods=["GET", "POST"])
|
||||
def upload_file():
|
||||
if request.method == 'POST':
|
||||
if request.method == "POST":
|
||||
# check if the post request has the file part
|
||||
if 'file' not in request.files:
|
||||
flash('No file part')
|
||||
if "file" not in request.files:
|
||||
flash("No file part")
|
||||
return redirect(request.url)
|
||||
file = request.files['file']
|
||||
file = request.files["file"]
|
||||
# If the user does not select a file, the browser submits an
|
||||
# empty file without a filename.
|
||||
if file.filename == '':
|
||||
flash('No selected file')
|
||||
if file.filename == "":
|
||||
flash("No selected file")
|
||||
return redirect(request.url)
|
||||
if file and allowed_file(file.filename):
|
||||
filename = file.filename
|
||||
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
|
||||
return redirect(url_for('upload_file', name=filename))
|
||||
return '''
|
||||
file.save(os.path.join(app.config["UPLOAD_FOLDER"], filename))
|
||||
return redirect(url_for("upload_file", name=filename))
|
||||
return """
|
||||
<!doctype html>
|
||||
<title>Upload new File</title>
|
||||
<h1>Upload new File</h1>
|
||||
@ -59,12 +59,15 @@ def upload_file():
|
||||
<input type=file name=file>
|
||||
<input type=submit value=Upload>
|
||||
</form>
|
||||
'''
|
||||
@app.route('/run', methods=['GET', 'POST'])
|
||||
"""
|
||||
|
||||
|
||||
@app.route("/run", methods=["GET", "POST"])
|
||||
def parse_request():
|
||||
data = request.data # data is empty
|
||||
run_command(data, wait=True)
|
||||
return 'Ok'
|
||||
return "Ok"
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(port=5011)
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(port=5011)
|
||||
|
@ -1,8 +1,10 @@
|
||||
# docker build -t clickhouse/mysql-js-client .
|
||||
# MySQL JavaScript client docker container
|
||||
|
||||
FROM node:8
|
||||
FROM node:16.14.2
|
||||
|
||||
WORKDIR /usr/app
|
||||
|
||||
RUN npm install mysql
|
||||
|
||||
COPY ./test.js test.js
|
||||
COPY ./test.js ./test.js
|
||||
|
@ -40,7 +40,7 @@ RUN apt-get update \
|
||||
/tmp/* \
|
||||
&& apt-get clean
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
ENV TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
|
@ -2,7 +2,7 @@
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-13_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"}
|
||||
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-13_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"}
|
||||
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@ if [ -z "$CLICKHOUSE_REPO_PATH" ]; then
|
||||
CLICKHOUSE_REPO_PATH=ch
|
||||
rm -rf ch ||:
|
||||
mkdir ch ||:
|
||||
wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz"
|
||||
wget -nv -nd -c "https://clickhouse-test-reports.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz"
|
||||
tar -C ch --strip-components=1 -xf clickhouse_no_subs.tar.gz
|
||||
ls -lath ||:
|
||||
fi
|
||||
|
@ -362,19 +362,6 @@ function get_profiles
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select 1"
|
||||
}
|
||||
|
||||
function build_log_column_definitions
|
||||
{
|
||||
# FIXME This loop builds column definitons from TSVWithNamesAndTypes in an
|
||||
# absolutely atrocious way. This should be done by the file() function itself.
|
||||
for x in {right,left}-{addresses,{query,query-thread,trace,{async-,}metric}-log}.tsv
|
||||
do
|
||||
paste -d' ' \
|
||||
<(sed -n '1{s/\t/\n/g;p;q}' "$x" | sed 's/\(^.*$\)/"\1"/') \
|
||||
<(sed -n '2{s/\t/\n/g;p;q}' "$x" ) \
|
||||
| tr '\n' ', ' | sed 's/,$//' > "$x.columns"
|
||||
done
|
||||
}
|
||||
|
||||
# Build and analyze randomization distribution for all queries.
|
||||
function analyze_queries
|
||||
{
|
||||
@ -382,8 +369,6 @@ rm -v analyze-commands.txt analyze-errors.log all-queries.tsv unstable-queries.t
|
||||
rm -rf analyze ||:
|
||||
mkdir analyze analyze/tmp ||:
|
||||
|
||||
build_log_column_definitions
|
||||
|
||||
# Split the raw test output into files suitable for analysis.
|
||||
# To debug calculations only for a particular test, substitute a suitable
|
||||
# wildcard here, e.g. `for test_file in modulo-raw.tsv`.
|
||||
@ -422,12 +407,10 @@ create table partial_query_times engine File(TSVWithNamesAndTypes,
|
||||
|
||||
-- Process queries that were run normally, on both servers.
|
||||
create view left_query_log as select *
|
||||
from file('left-query-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "left-query-log.tsv.columns")');
|
||||
from file('left-query-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create view right_query_log as select *
|
||||
from file('right-query-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "right-query-log.tsv.columns")');
|
||||
from file('right-query-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create view query_logs as
|
||||
select 0 version, query_id, ProfileEvents,
|
||||
@ -645,8 +628,6 @@ mkdir report report/tmp ||:
|
||||
|
||||
rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv run-errors.tsv ||:
|
||||
|
||||
build_log_column_definitions
|
||||
|
||||
cat analyze/errors.log >> report/errors.log ||:
|
||||
cat profile-errors.log >> report/errors.log ||:
|
||||
|
||||
@ -1028,8 +1009,7 @@ create table unstable_query_runs engine File(TSVWithNamesAndTypes,
|
||||
;
|
||||
|
||||
create view query_log as select *
|
||||
from file('$version-query-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "$version-query-log.tsv.columns")');
|
||||
from file('$version-query-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create table unstable_run_metrics engine File(TSVWithNamesAndTypes,
|
||||
'unstable-run-metrics.$version.rep') as
|
||||
@ -1057,8 +1037,7 @@ create table unstable_run_metrics_2 engine File(TSVWithNamesAndTypes,
|
||||
array join v, n;
|
||||
|
||||
create view trace_log as select *
|
||||
from file('$version-trace-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "$version-trace-log.tsv.columns")');
|
||||
from file('$version-trace-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create view addresses_src as select addr,
|
||||
-- Some functions change name between builds, e.g. '__clone' or 'clone' or
|
||||
@ -1067,8 +1046,7 @@ create view addresses_src as select addr,
|
||||
[name, 'clone.S (filtered by script)', 'pthread_cond_timedwait (filtered by script)']
|
||||
-- this line is a subscript operator of the above array
|
||||
[1 + multiSearchFirstIndex(name, ['clone.S', 'pthread_cond_timedwait'])] name
|
||||
from file('$version-addresses.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "$version-addresses.tsv.columns")');
|
||||
from file('$version-addresses.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create table addresses_join_$version engine Join(any, left, address) as
|
||||
select addr address, name from addresses_src;
|
||||
@ -1195,15 +1173,12 @@ done
|
||||
|
||||
function report_metrics
|
||||
{
|
||||
build_log_column_definitions
|
||||
|
||||
rm -rf metrics ||:
|
||||
mkdir metrics
|
||||
|
||||
clickhouse-local --query "
|
||||
create view right_async_metric_log as
|
||||
select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat right-async-metric-log.tsv.columns)')
|
||||
select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes)
|
||||
;
|
||||
|
||||
-- Use the right log as time reference because it may have higher precision.
|
||||
@ -1211,8 +1186,7 @@ create table metrics engine File(TSV, 'metrics/metrics.tsv') as
|
||||
with (select min(event_time) from right_async_metric_log) as min_time
|
||||
select metric, r.event_time - min_time event_time, l.value as left, r.value as right
|
||||
from right_async_metric_log r
|
||||
asof join file('left-async-metric-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat left-async-metric-log.tsv.columns)') l
|
||||
asof join file('left-async-metric-log.tsv', TSVWithNamesAndTypes) l
|
||||
on l.metric = r.metric and r.event_time <= l.event_time
|
||||
order by metric, event_time
|
||||
;
|
||||
@ -1294,15 +1268,15 @@ create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv')
|
||||
select '' test_name,
|
||||
'$(sed -n 's/.*<!--message: \(.*\)-->/\1/p' report.html)' test_status,
|
||||
0 test_duration_ms,
|
||||
'https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#fail1' report_url
|
||||
'https://clickhouse-test-reports.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#fail1' report_url
|
||||
union all
|
||||
select test || ' #' || toString(query_index), 'slower' test_status, 0 test_duration_ms,
|
||||
'https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#changes-in-performance.'
|
||||
'https://clickhouse-test-reports.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#changes-in-performance.'
|
||||
|| test || '.' || toString(query_index) report_url
|
||||
from queries where changed_fail != 0 and diff > 0
|
||||
union all
|
||||
select test || ' #' || toString(query_index), 'unstable' test_status, 0 test_duration_ms,
|
||||
'https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#unstable-queries.'
|
||||
'https://clickhouse-test-reports.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#unstable-queries.'
|
||||
|| test || '.' || toString(query_index) report_url
|
||||
from queries where unstable_fail != 0
|
||||
)
|
||||
@ -1378,7 +1352,7 @@ $REF_SHA $SHA_TO_TEST $(numactl --hardware | sed -n 's/^available:[[:space:]]\+/
|
||||
EOF
|
||||
|
||||
# Also insert some data about the check into the CI checks table.
|
||||
"${client[@]}" --query "INSERT INTO "'"'"gh-data"'"'".checks FORMAT TSVWithNamesAndTypes" \
|
||||
"${client[@]}" --query "INSERT INTO "'"'"default"'"'".checks FORMAT TSVWithNamesAndTypes" \
|
||||
< ci-checks.tsv
|
||||
|
||||
set -x
|
||||
|
@ -16,26 +16,17 @@ right_sha=$4
|
||||
datasets=${CHPC_DATASETS-"hits1 hits10 hits100 values"}
|
||||
|
||||
declare -A dataset_paths
|
||||
if [[ $S3_URL == *"s3.amazonaws.com"* ]]; then
|
||||
dataset_paths["hits10"]="https://clickhouse-private-datasets.s3.amazonaws.com/hits_10m_single/partitions/hits_10m_single.tar"
|
||||
dataset_paths["hits100"]="https://clickhouse-private-datasets.s3.amazonaws.com/hits_100m_single/partitions/hits_100m_single.tar"
|
||||
dataset_paths["hits1"]="https://clickhouse-datasets.s3.amazonaws.com/hits/partitions/hits_v1.tar"
|
||||
dataset_paths["values"]="https://clickhouse-datasets.s3.amazonaws.com/values_with_expressions/partitions/test_values.tar"
|
||||
else
|
||||
dataset_paths["hits10"]="https://s3.mds.yandex.net/clickhouse-private-datasets/hits_10m_single/partitions/hits_10m_single.tar"
|
||||
dataset_paths["hits100"]="https://s3.mds.yandex.net/clickhouse-private-datasets/hits_100m_single/partitions/hits_100m_single.tar"
|
||||
dataset_paths["hits1"]="https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar"
|
||||
dataset_paths["values"]="https://clickhouse-datasets.s3.yandex.net/values_with_expressions/partitions/test_values.tar"
|
||||
fi
|
||||
dataset_paths["hits10"]="https://clickhouse-private-datasets.s3.amazonaws.com/hits_10m_single/partitions/hits_10m_single.tar"
|
||||
dataset_paths["hits100"]="https://clickhouse-private-datasets.s3.amazonaws.com/hits_100m_single/partitions/hits_100m_single.tar"
|
||||
dataset_paths["hits1"]="https://clickhouse-datasets.s3.amazonaws.com/hits/partitions/hits_v1.tar"
|
||||
dataset_paths["values"]="https://clickhouse-datasets.s3.amazonaws.com/values_with_expressions/partitions/test_values.tar"
|
||||
|
||||
|
||||
function download
|
||||
{
|
||||
# Historically there were various paths for the performance test package.
|
||||
# Test all of them.
|
||||
declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/$left_pr/$left_sha/performance/performance.tgz"
|
||||
"https://clickhouse-builds.s3.yandex.net/$left_pr/$left_sha/clickhouse_build_check/performance/performance.tgz"
|
||||
)
|
||||
declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/$left_pr/$left_sha/performance/performance.tgz")
|
||||
|
||||
for path in "${urls_to_try[@]}"
|
||||
do
|
||||
|
@ -4,7 +4,7 @@ set -ex
|
||||
CHPC_CHECK_START_TIMESTAMP="$(date +%s)"
|
||||
export CHPC_CHECK_START_TIMESTAMP
|
||||
|
||||
S3_URL=${S3_URL:="https://clickhouse-builds.s3.yandex.net"}
|
||||
S3_URL=${S3_URL:="https://clickhouse-builds.s3.amazonaws.com"}
|
||||
|
||||
COMMON_BUILD_PREFIX="/clickhouse_build_check"
|
||||
if [[ $S3_URL == *"s3.amazonaws.com"* ]]; then
|
||||
@ -64,9 +64,7 @@ function find_reference_sha
|
||||
# Historically there were various path for the performance test package,
|
||||
# test all of them.
|
||||
unset found
|
||||
declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/0/$REF_SHA/performance/performance.tgz"
|
||||
"https://clickhouse-builds.s3.yandex.net/0/$REF_SHA/clickhouse_build_check/performance/performance.tgz"
|
||||
)
|
||||
declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/0/$REF_SHA/performance/performance.tgz")
|
||||
for path in "${urls_to_try[@]}"
|
||||
do
|
||||
if curl_with_retry "$path"
|
||||
|
@ -19,58 +19,126 @@ import xml.etree.ElementTree as et
|
||||
from threading import Thread
|
||||
from scipy import stats
|
||||
|
||||
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(module)s: %(message)s', level='WARNING')
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s: %(levelname)s: %(module)s: %(message)s", level="WARNING"
|
||||
)
|
||||
|
||||
total_start_seconds = time.perf_counter()
|
||||
stage_start_seconds = total_start_seconds
|
||||
|
||||
|
||||
def reportStageEnd(stage):
|
||||
global stage_start_seconds, total_start_seconds
|
||||
|
||||
current = time.perf_counter()
|
||||
print(f'stage\t{stage}\t{current - stage_start_seconds:.3f}\t{current - total_start_seconds:.3f}')
|
||||
print(
|
||||
f"stage\t{stage}\t{current - stage_start_seconds:.3f}\t{current - total_start_seconds:.3f}"
|
||||
)
|
||||
stage_start_seconds = current
|
||||
|
||||
|
||||
def tsv_escape(s):
|
||||
return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','')
|
||||
return (
|
||||
s.replace("\\", "\\\\")
|
||||
.replace("\t", "\\t")
|
||||
.replace("\n", "\\n")
|
||||
.replace("\r", "")
|
||||
)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Run performance test.')
|
||||
parser = argparse.ArgumentParser(description="Run performance test.")
|
||||
# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set.
|
||||
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
|
||||
parser.add_argument('--host', nargs='*', default=['localhost'], help="Space-separated list of server hostname(s). Corresponds to '--port' options.")
|
||||
parser.add_argument('--port', nargs='*', default=[9000], help="Space-separated list of server port(s). Corresponds to '--host' options.")
|
||||
parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.')
|
||||
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
|
||||
parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.')
|
||||
parser.add_argument('--max-query-seconds', type=int, default=15, help='For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.')
|
||||
parser.add_argument('--prewarm-max-query-seconds', type=int, default=180, help='For how many seconds at most a prewarm (cold storage) query is allowed to run. The script finishes with error if this time is exceeded.')
|
||||
parser.add_argument('--profile-seconds', type=int, default=0, help='For how many seconds to profile a query for which the performance has changed.')
|
||||
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
|
||||
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
|
||||
parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.')
|
||||
parser.add_argument('--keep-created-tables', action='store_true', help="Don't drop the created tables after the test.")
|
||||
parser.add_argument('--use-existing-tables', action='store_true', help="Don't create or drop the tables, use the existing ones instead.")
|
||||
parser.add_argument(
|
||||
"file",
|
||||
metavar="FILE",
|
||||
type=argparse.FileType("r", encoding="utf-8"),
|
||||
nargs=1,
|
||||
help="test description file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host",
|
||||
nargs="*",
|
||||
default=["localhost"],
|
||||
help="Space-separated list of server hostname(s). Corresponds to '--port' options.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
nargs="*",
|
||||
default=[9000],
|
||||
help="Space-separated list of server port(s). Corresponds to '--host' options.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--runs", type=int, default=1, help="Number of query runs per server."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max-queries",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Test no more than this number of queries, chosen at random.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--queries-to-run",
|
||||
nargs="*",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Space-separated list of indexes of queries to test.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max-query-seconds",
|
||||
type=int,
|
||||
default=15,
|
||||
help="For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--prewarm-max-query-seconds",
|
||||
type=int,
|
||||
default=180,
|
||||
help="For how many seconds at most a prewarm (cold storage) query is allowed to run. The script finishes with error if this time is exceeded.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--profile-seconds",
|
||||
type=int,
|
||||
default=0,
|
||||
help="For how many seconds to profile a query for which the performance has changed.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--long", action="store_true", help="Do not skip the tests tagged as long."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--print-queries", action="store_true", help="Print test queries and exit."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--print-settings", action="store_true", help="Print test settings and exit."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-created-tables",
|
||||
action="store_true",
|
||||
help="Don't drop the created tables after the test.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use-existing-tables",
|
||||
action="store_true",
|
||||
help="Don't create or drop the tables, use the existing ones instead.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
reportStageEnd('start')
|
||||
reportStageEnd("start")
|
||||
|
||||
test_name = os.path.splitext(os.path.basename(args.file[0].name))[0]
|
||||
|
||||
tree = et.parse(args.file[0])
|
||||
root = tree.getroot()
|
||||
|
||||
reportStageEnd('parse')
|
||||
reportStageEnd("parse")
|
||||
|
||||
# Process query parameters
|
||||
subst_elems = root.findall('substitutions/substitution')
|
||||
available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... }
|
||||
subst_elems = root.findall("substitutions/substitution")
|
||||
available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... }
|
||||
for e in subst_elems:
|
||||
name = e.find('name').text
|
||||
values = [v.text for v in e.findall('values/value')]
|
||||
name = e.find("name").text
|
||||
values = [v.text for v in e.findall("values/value")]
|
||||
if not values:
|
||||
raise Exception(f'No values given for substitution {{{name}}}')
|
||||
raise Exception(f"No values given for substitution {{{name}}}")
|
||||
|
||||
available_parameters[name] = values
|
||||
|
||||
@ -78,7 +146,7 @@ for e in subst_elems:
|
||||
# parameters. The set of parameters is determined based on the first list.
|
||||
# Note: keep the order of queries -- sometimes we have DROP IF EXISTS
|
||||
# followed by CREATE in create queries section, so the order matters.
|
||||
def substitute_parameters(query_templates, other_templates = []):
|
||||
def substitute_parameters(query_templates, other_templates=[]):
|
||||
query_results = []
|
||||
other_results = [[]] * (len(other_templates))
|
||||
for i, q in enumerate(query_templates):
|
||||
@ -103,17 +171,21 @@ def substitute_parameters(query_templates, other_templates = []):
|
||||
# and reporting the queries marked as short.
|
||||
test_queries = []
|
||||
is_short = []
|
||||
for e in root.findall('query'):
|
||||
new_queries, [new_is_short] = substitute_parameters([e.text], [[e.attrib.get('short', '0')]])
|
||||
for e in root.findall("query"):
|
||||
new_queries, [new_is_short] = substitute_parameters(
|
||||
[e.text], [[e.attrib.get("short", "0")]]
|
||||
)
|
||||
test_queries += new_queries
|
||||
is_short += [eval(s) for s in new_is_short]
|
||||
|
||||
assert(len(test_queries) == len(is_short))
|
||||
assert len(test_queries) == len(is_short)
|
||||
|
||||
# If we're given a list of queries to run, check that it makes sense.
|
||||
for i in args.queries_to_run or []:
|
||||
if i < 0 or i >= len(test_queries):
|
||||
print(f'There is no query no. {i} in this test, only [{0}-{len(test_queries) - 1}] are present')
|
||||
print(
|
||||
f"There is no query no. {i} in this test, only [{0}-{len(test_queries) - 1}] are present"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
# If we're only asked to print the queries, do that and exit.
|
||||
@ -125,60 +197,65 @@ if args.print_queries:
|
||||
# Print short queries
|
||||
for i, s in enumerate(is_short):
|
||||
if s:
|
||||
print(f'short\t{i}')
|
||||
print(f"short\t{i}")
|
||||
|
||||
# If we're only asked to print the settings, do that and exit. These are settings
|
||||
# for clickhouse-benchmark, so we print them as command line arguments, e.g.
|
||||
# '--max_memory_usage=10000000'.
|
||||
if args.print_settings:
|
||||
for s in root.findall('settings/*'):
|
||||
print(f'--{s.tag}={s.text}')
|
||||
for s in root.findall("settings/*"):
|
||||
print(f"--{s.tag}={s.text}")
|
||||
|
||||
exit(0)
|
||||
|
||||
# Skip long tests
|
||||
if not args.long:
|
||||
for tag in root.findall('.//tag'):
|
||||
if tag.text == 'long':
|
||||
print('skipped\tTest is tagged as long.')
|
||||
for tag in root.findall(".//tag"):
|
||||
if tag.text == "long":
|
||||
print("skipped\tTest is tagged as long.")
|
||||
sys.exit(0)
|
||||
|
||||
# Print report threshold for the test if it is set.
|
||||
ignored_relative_change = 0.05
|
||||
if 'max_ignored_relative_change' in root.attrib:
|
||||
if "max_ignored_relative_change" in root.attrib:
|
||||
ignored_relative_change = float(root.attrib["max_ignored_relative_change"])
|
||||
print(f'report-threshold\t{ignored_relative_change}')
|
||||
print(f"report-threshold\t{ignored_relative_change}")
|
||||
|
||||
reportStageEnd('before-connect')
|
||||
reportStageEnd("before-connect")
|
||||
|
||||
# Open connections
|
||||
servers = [{'host': host or args.host[0], 'port': port or args.port[0]} for (host, port) in itertools.zip_longest(args.host, args.port)]
|
||||
servers = [
|
||||
{"host": host or args.host[0], "port": port or args.port[0]}
|
||||
for (host, port) in itertools.zip_longest(args.host, args.port)
|
||||
]
|
||||
# Force settings_is_important to fail queries on unknown settings.
|
||||
all_connections = [clickhouse_driver.Client(**server, settings_is_important=True) for server in servers]
|
||||
all_connections = [
|
||||
clickhouse_driver.Client(**server, settings_is_important=True) for server in servers
|
||||
]
|
||||
|
||||
for i, s in enumerate(servers):
|
||||
print(f'server\t{i}\t{s["host"]}\t{s["port"]}')
|
||||
|
||||
reportStageEnd('connect')
|
||||
reportStageEnd("connect")
|
||||
|
||||
if not args.use_existing_tables:
|
||||
# Run drop queries, ignoring errors. Do this before all other activity,
|
||||
# because clickhouse_driver disconnects on error (this is not configurable),
|
||||
# and the new connection loses the changes in settings.
|
||||
drop_query_templates = [q.text for q in root.findall('drop_query')]
|
||||
drop_query_templates = [q.text for q in root.findall("drop_query")]
|
||||
drop_queries = substitute_parameters(drop_query_templates)
|
||||
for conn_index, c in enumerate(all_connections):
|
||||
for q in drop_queries:
|
||||
try:
|
||||
c.execute(q)
|
||||
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
|
||||
print(f"drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}")
|
||||
except:
|
||||
pass
|
||||
|
||||
reportStageEnd('drop-1')
|
||||
reportStageEnd("drop-1")
|
||||
|
||||
# Apply settings.
|
||||
settings = root.findall('settings/*')
|
||||
settings = root.findall("settings/*")
|
||||
for conn_index, c in enumerate(all_connections):
|
||||
for s in settings:
|
||||
# requires clickhouse-driver >= 1.1.5 to accept arbitrary new settings
|
||||
@ -189,48 +266,52 @@ for conn_index, c in enumerate(all_connections):
|
||||
# the test, which is wrong.
|
||||
c.execute("select 1")
|
||||
|
||||
reportStageEnd('settings')
|
||||
reportStageEnd("settings")
|
||||
|
||||
# Check tables that should exist. If they don't exist, just skip this test.
|
||||
tables = [e.text for e in root.findall('preconditions/table_exists')]
|
||||
tables = [e.text for e in root.findall("preconditions/table_exists")]
|
||||
for t in tables:
|
||||
for c in all_connections:
|
||||
try:
|
||||
res = c.execute("select 1 from {} limit 1".format(t))
|
||||
except:
|
||||
exception_message = traceback.format_exception_only(*sys.exc_info()[:2])[-1]
|
||||
skipped_message = ' '.join(exception_message.split('\n')[:2])
|
||||
print(f'skipped\t{tsv_escape(skipped_message)}')
|
||||
skipped_message = " ".join(exception_message.split("\n")[:2])
|
||||
print(f"skipped\t{tsv_escape(skipped_message)}")
|
||||
sys.exit(0)
|
||||
|
||||
reportStageEnd('preconditions')
|
||||
reportStageEnd("preconditions")
|
||||
|
||||
if not args.use_existing_tables:
|
||||
# Run create and fill queries. We will run them simultaneously for both
|
||||
# servers, to save time. The weird XML search + filter is because we want to
|
||||
# keep the relative order of elements, and etree doesn't support the
|
||||
# appropriate xpath query.
|
||||
create_query_templates = [q.text for q in root.findall('./*')
|
||||
if q.tag in ('create_query', 'fill_query')]
|
||||
create_query_templates = [
|
||||
q.text for q in root.findall("./*") if q.tag in ("create_query", "fill_query")
|
||||
]
|
||||
create_queries = substitute_parameters(create_query_templates)
|
||||
|
||||
# Disallow temporary tables, because the clickhouse_driver reconnects on
|
||||
# errors, and temporary tables are destroyed. We want to be able to continue
|
||||
# after some errors.
|
||||
for q in create_queries:
|
||||
if re.search('create temporary table', q, flags=re.IGNORECASE):
|
||||
print(f"Temporary tables are not allowed in performance tests: '{q}'",
|
||||
file = sys.stderr)
|
||||
if re.search("create temporary table", q, flags=re.IGNORECASE):
|
||||
print(
|
||||
f"Temporary tables are not allowed in performance tests: '{q}'",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
def do_create(connection, index, queries):
|
||||
for q in queries:
|
||||
connection.execute(q)
|
||||
print(f'create\t{index}\t{connection.last_query.elapsed}\t{tsv_escape(q)}')
|
||||
print(f"create\t{index}\t{connection.last_query.elapsed}\t{tsv_escape(q)}")
|
||||
|
||||
threads = [
|
||||
Thread(target = do_create, args = (connection, index, create_queries))
|
||||
for index, connection in enumerate(all_connections)]
|
||||
Thread(target=do_create, args=(connection, index, create_queries))
|
||||
for index, connection in enumerate(all_connections)
|
||||
]
|
||||
|
||||
for t in threads:
|
||||
t.start()
|
||||
@ -238,14 +319,16 @@ if not args.use_existing_tables:
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
reportStageEnd('create')
|
||||
reportStageEnd("create")
|
||||
|
||||
# By default, test all queries.
|
||||
queries_to_run = range(0, len(test_queries))
|
||||
|
||||
if args.max_queries:
|
||||
# If specified, test a limited number of queries chosen at random.
|
||||
queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries))
|
||||
queries_to_run = random.sample(
|
||||
range(0, len(test_queries)), min(len(test_queries), args.max_queries)
|
||||
)
|
||||
|
||||
if args.queries_to_run:
|
||||
# Run the specified queries.
|
||||
@ -255,16 +338,16 @@ if args.queries_to_run:
|
||||
profile_total_seconds = 0
|
||||
for query_index in queries_to_run:
|
||||
q = test_queries[query_index]
|
||||
query_prefix = f'{test_name}.query{query_index}'
|
||||
query_prefix = f"{test_name}.query{query_index}"
|
||||
|
||||
# We have some crazy long queries (about 100kB), so trim them to a sane
|
||||
# length. This means we can't use query text as an identifier and have to
|
||||
# use the test name + the test-wide query index.
|
||||
query_display_name = q
|
||||
if len(query_display_name) > 1000:
|
||||
query_display_name = f'{query_display_name[:1000]}...({query_index})'
|
||||
query_display_name = f"{query_display_name[:1000]}...({query_index})"
|
||||
|
||||
print(f'display-name\t{query_index}\t{tsv_escape(query_display_name)}')
|
||||
print(f"display-name\t{query_index}\t{tsv_escape(query_display_name)}")
|
||||
|
||||
# Prewarm: run once on both servers. Helps to bring the data into memory,
|
||||
# precompile the queries, etc.
|
||||
@ -272,10 +355,10 @@ for query_index in queries_to_run:
|
||||
# new one. We want to run them on the new server only, so that the PR author
|
||||
# can ensure that the test works properly. Remember the errors we had on
|
||||
# each server.
|
||||
query_error_on_connection = [None] * len(all_connections);
|
||||
query_error_on_connection = [None] * len(all_connections)
|
||||
for conn_index, c in enumerate(all_connections):
|
||||
try:
|
||||
prewarm_id = f'{query_prefix}.prewarm0'
|
||||
prewarm_id = f"{query_prefix}.prewarm0"
|
||||
|
||||
try:
|
||||
# During the warmup runs, we will also:
|
||||
@ -283,25 +366,30 @@ for query_index in queries_to_run:
|
||||
# * collect profiler traces, which might be helpful for analyzing
|
||||
# test coverage. We disable profiler for normal runs because
|
||||
# it makes the results unstable.
|
||||
res = c.execute(q, query_id = prewarm_id,
|
||||
settings = {
|
||||
'max_execution_time': args.prewarm_max_query_seconds,
|
||||
'query_profiler_real_time_period_ns': 10000000,
|
||||
'memory_profiler_step': '4Mi',
|
||||
})
|
||||
res = c.execute(
|
||||
q,
|
||||
query_id=prewarm_id,
|
||||
settings={
|
||||
"max_execution_time": args.prewarm_max_query_seconds,
|
||||
"query_profiler_real_time_period_ns": 10000000,
|
||||
"memory_profiler_step": "4Mi",
|
||||
},
|
||||
)
|
||||
except clickhouse_driver.errors.Error as e:
|
||||
# Add query id to the exception to make debugging easier.
|
||||
e.args = (prewarm_id, *e.args)
|
||||
e.message = prewarm_id + ': ' + e.message
|
||||
e.message = prewarm_id + ": " + e.message
|
||||
raise
|
||||
|
||||
print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}')
|
||||
print(
|
||||
f"prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}"
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
# FIXME the driver reconnects on error and we lose settings, so this
|
||||
# might lead to further errors or unexpected behavior.
|
||||
query_error_on_connection[conn_index] = traceback.format_exc();
|
||||
query_error_on_connection[conn_index] = traceback.format_exc()
|
||||
continue
|
||||
|
||||
# Report all errors that ocurred during prewarm and decide what to do next.
|
||||
@ -311,14 +399,14 @@ for query_index in queries_to_run:
|
||||
no_errors = []
|
||||
for i, e in enumerate(query_error_on_connection):
|
||||
if e:
|
||||
print(e, file = sys.stderr)
|
||||
print(e, file=sys.stderr)
|
||||
else:
|
||||
no_errors.append(i)
|
||||
|
||||
if len(no_errors) == 0:
|
||||
continue
|
||||
elif len(no_errors) < len(all_connections):
|
||||
print(f'partial\t{query_index}\t{no_errors}')
|
||||
print(f"partial\t{query_index}\t{no_errors}")
|
||||
|
||||
this_query_connections = [all_connections[index] for index in no_errors]
|
||||
|
||||
@ -337,27 +425,34 @@ for query_index in queries_to_run:
|
||||
all_server_times.append([])
|
||||
|
||||
while True:
|
||||
run_id = f'{query_prefix}.run{run}'
|
||||
run_id = f"{query_prefix}.run{run}"
|
||||
|
||||
for conn_index, c in enumerate(this_query_connections):
|
||||
try:
|
||||
res = c.execute(q, query_id = run_id, settings = {'max_execution_time': args.max_query_seconds})
|
||||
res = c.execute(
|
||||
q,
|
||||
query_id=run_id,
|
||||
settings={"max_execution_time": args.max_query_seconds},
|
||||
)
|
||||
except clickhouse_driver.errors.Error as e:
|
||||
# Add query id to the exception to make debugging easier.
|
||||
e.args = (run_id, *e.args)
|
||||
e.message = run_id + ': ' + e.message
|
||||
e.message = run_id + ": " + e.message
|
||||
raise
|
||||
|
||||
elapsed = c.last_query.elapsed
|
||||
all_server_times[conn_index].append(elapsed)
|
||||
|
||||
server_seconds += elapsed
|
||||
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}')
|
||||
print(f"query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}")
|
||||
|
||||
if elapsed > args.max_query_seconds:
|
||||
# Do not stop processing pathologically slow queries,
|
||||
# since this may hide errors in other queries.
|
||||
print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr)
|
||||
print(
|
||||
f"The query no. {query_index} is taking too long to run ({elapsed} s)",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
# Be careful with the counter, after this line it's the next iteration
|
||||
# already.
|
||||
@ -386,7 +481,7 @@ for query_index in queries_to_run:
|
||||
break
|
||||
|
||||
client_seconds = time.perf_counter() - start_seconds
|
||||
print(f'client-time\t{query_index}\t{client_seconds}\t{server_seconds}')
|
||||
print(f"client-time\t{query_index}\t{client_seconds}\t{server_seconds}")
|
||||
|
||||
# Run additional profiling queries to collect profile data, but only if test times appeared to be different.
|
||||
# We have to do it after normal runs because otherwise it will affect test statistics too much
|
||||
@ -397,13 +492,15 @@ for query_index in queries_to_run:
|
||||
# Don't fail if for some reason there are not enough measurements.
|
||||
continue
|
||||
|
||||
pvalue = stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue
|
||||
pvalue = stats.ttest_ind(
|
||||
all_server_times[0], all_server_times[1], equal_var=False
|
||||
).pvalue
|
||||
median = [statistics.median(t) for t in all_server_times]
|
||||
# Keep this consistent with the value used in report. Should eventually move
|
||||
# to (median[1] - median[0]) / min(median), which is compatible with "times"
|
||||
# difference we use in report (max(median) / min(median)).
|
||||
relative_diff = (median[1] - median[0]) / median[0]
|
||||
print(f'diff\t{query_index}\t{median[0]}\t{median[1]}\t{relative_diff}\t{pvalue}')
|
||||
print(f"diff\t{query_index}\t{median[0]}\t{median[1]}\t{relative_diff}\t{pvalue}")
|
||||
if abs(relative_diff) < ignored_relative_change or pvalue > 0.05:
|
||||
continue
|
||||
|
||||
@ -412,25 +509,31 @@ for query_index in queries_to_run:
|
||||
profile_start_seconds = time.perf_counter()
|
||||
run = 0
|
||||
while time.perf_counter() - profile_start_seconds < args.profile_seconds:
|
||||
run_id = f'{query_prefix}.profile{run}'
|
||||
run_id = f"{query_prefix}.profile{run}"
|
||||
|
||||
for conn_index, c in enumerate(this_query_connections):
|
||||
try:
|
||||
res = c.execute(q, query_id = run_id, settings = {'query_profiler_real_time_period_ns': 10000000})
|
||||
print(f'profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}')
|
||||
res = c.execute(
|
||||
q,
|
||||
query_id=run_id,
|
||||
settings={"query_profiler_real_time_period_ns": 10000000},
|
||||
)
|
||||
print(
|
||||
f"profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}"
|
||||
)
|
||||
except clickhouse_driver.errors.Error as e:
|
||||
# Add query id to the exception to make debugging easier.
|
||||
e.args = (run_id, *e.args)
|
||||
e.message = run_id + ': ' + e.message
|
||||
e.message = run_id + ": " + e.message
|
||||
raise
|
||||
|
||||
run += 1
|
||||
|
||||
profile_total_seconds += time.perf_counter() - profile_start_seconds
|
||||
|
||||
print(f'profile-total\t{profile_total_seconds}')
|
||||
print(f"profile-total\t{profile_total_seconds}")
|
||||
|
||||
reportStageEnd('run')
|
||||
reportStageEnd("run")
|
||||
|
||||
# Run drop queries
|
||||
if not args.keep_created_tables and not args.use_existing_tables:
|
||||
@ -438,6 +541,6 @@ if not args.keep_created_tables and not args.use_existing_tables:
|
||||
for conn_index, c in enumerate(all_connections):
|
||||
for q in drop_queries:
|
||||
c.execute(q)
|
||||
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
|
||||
print(f"drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}")
|
||||
|
||||
reportStageEnd('drop-2')
|
||||
reportStageEnd("drop-2")
|
||||
|
@ -12,9 +12,13 @@ import pprint
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
parser = argparse.ArgumentParser(description='Create performance test report')
|
||||
parser.add_argument('--report', default='main', choices=['main', 'all-queries'],
|
||||
help='Which report to build')
|
||||
parser = argparse.ArgumentParser(description="Create performance test report")
|
||||
parser.add_argument(
|
||||
"--report",
|
||||
default="main",
|
||||
choices=["main", "all-queries"],
|
||||
help="Which report to build",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
tables = []
|
||||
@ -31,8 +35,8 @@ unstable_partial_queries = 0
|
||||
# max seconds to run one query by itself, not counting preparation
|
||||
allowed_single_run_time = 2
|
||||
|
||||
color_bad='#ffb0c0'
|
||||
color_good='#b0d050'
|
||||
color_bad = "#ffb0c0"
|
||||
color_good = "#b0d050"
|
||||
|
||||
header_template = """
|
||||
<!DOCTYPE html>
|
||||
@ -151,24 +155,29 @@ tr:nth-child(odd) td {{filter: brightness(90%);}}
|
||||
table_anchor = 0
|
||||
row_anchor = 0
|
||||
|
||||
|
||||
def currentTableAnchor():
|
||||
global table_anchor
|
||||
return f'{table_anchor}'
|
||||
return f"{table_anchor}"
|
||||
|
||||
|
||||
def newTableAnchor():
|
||||
global table_anchor
|
||||
table_anchor += 1
|
||||
return currentTableAnchor()
|
||||
|
||||
|
||||
def currentRowAnchor():
|
||||
global row_anchor
|
||||
global table_anchor
|
||||
return f'{table_anchor}.{row_anchor}'
|
||||
return f"{table_anchor}.{row_anchor}"
|
||||
|
||||
|
||||
def nextRowAnchor():
|
||||
global row_anchor
|
||||
global table_anchor
|
||||
return f'{table_anchor}.{row_anchor + 1}'
|
||||
return f"{table_anchor}.{row_anchor + 1}"
|
||||
|
||||
|
||||
def advanceRowAnchor():
|
||||
global row_anchor
|
||||
@ -178,43 +187,58 @@ def advanceRowAnchor():
|
||||
|
||||
|
||||
def tr(x, anchor=None):
|
||||
#return '<tr onclick="location.href=\'#{a}\'" id={a}>{x}</tr>'.format(a=a, x=str(x))
|
||||
# return '<tr onclick="location.href=\'#{a}\'" id={a}>{x}</tr>'.format(a=a, x=str(x))
|
||||
anchor = anchor if anchor else advanceRowAnchor()
|
||||
return f'<tr id={anchor}>{x}</tr>'
|
||||
return f"<tr id={anchor}>{x}</tr>"
|
||||
|
||||
def td(value, cell_attributes = ''):
|
||||
return '<td {cell_attributes}>{value}</td>'.format(
|
||||
cell_attributes = cell_attributes,
|
||||
value = value)
|
||||
|
||||
def th(value, cell_attributes = ''):
|
||||
return '<th {cell_attributes}>{value}</th>'.format(
|
||||
cell_attributes = cell_attributes,
|
||||
value = value)
|
||||
def td(value, cell_attributes=""):
|
||||
return "<td {cell_attributes}>{value}</td>".format(
|
||||
cell_attributes=cell_attributes, value=value
|
||||
)
|
||||
|
||||
def tableRow(cell_values, cell_attributes = [], anchor=None):
|
||||
|
||||
def th(value, cell_attributes=""):
|
||||
return "<th {cell_attributes}>{value}</th>".format(
|
||||
cell_attributes=cell_attributes, value=value
|
||||
)
|
||||
|
||||
|
||||
def tableRow(cell_values, cell_attributes=[], anchor=None):
|
||||
return tr(
|
||||
''.join([td(v, a)
|
||||
for v, a in itertools.zip_longest(
|
||||
cell_values, cell_attributes,
|
||||
fillvalue = '')
|
||||
if a is not None and v is not None]),
|
||||
anchor)
|
||||
"".join(
|
||||
[
|
||||
td(v, a)
|
||||
for v, a in itertools.zip_longest(
|
||||
cell_values, cell_attributes, fillvalue=""
|
||||
)
|
||||
if a is not None and v is not None
|
||||
]
|
||||
),
|
||||
anchor,
|
||||
)
|
||||
|
||||
def tableHeader(cell_values, cell_attributes = []):
|
||||
|
||||
def tableHeader(cell_values, cell_attributes=[]):
|
||||
return tr(
|
||||
''.join([th(v, a)
|
||||
for v, a in itertools.zip_longest(
|
||||
cell_values, cell_attributes,
|
||||
fillvalue = '')
|
||||
if a is not None and v is not None]))
|
||||
"".join(
|
||||
[
|
||||
th(v, a)
|
||||
for v, a in itertools.zip_longest(
|
||||
cell_values, cell_attributes, fillvalue=""
|
||||
)
|
||||
if a is not None and v is not None
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def tableStart(title):
|
||||
cls = '-'.join(title.lower().split(' ')[:3]);
|
||||
cls = "-".join(title.lower().split(" ")[:3])
|
||||
global table_anchor
|
||||
table_anchor = cls
|
||||
anchor = currentTableAnchor()
|
||||
help_anchor = '-'.join(title.lower().split(' '));
|
||||
help_anchor = "-".join(title.lower().split(" "))
|
||||
return f"""
|
||||
<h2 id="{anchor}">
|
||||
<a class="cancela" href="#{anchor}">{title}</a>
|
||||
@ -223,12 +247,14 @@ def tableStart(title):
|
||||
<table class="{cls}">
|
||||
"""
|
||||
|
||||
|
||||
def tableEnd():
|
||||
return '</table>'
|
||||
return "</table>"
|
||||
|
||||
|
||||
def tsvRows(n):
|
||||
try:
|
||||
with open(n, encoding='utf-8') as fd:
|
||||
with open(n, encoding="utf-8") as fd:
|
||||
result = []
|
||||
for row in csv.reader(fd, delimiter="\t", quoting=csv.QUOTE_NONE):
|
||||
new_row = []
|
||||
@ -237,27 +263,32 @@ def tsvRows(n):
|
||||
# The second one (encode('latin1').decode('utf-8')) fixes the changes with unicode vs utf-8 chars, so
|
||||
# 'Чем зÐ<C2B7>нимаеÑ<C2B5>ЬÑ<C2AC>Ñ<EFBFBD>' is transformed back into 'Чем зАнимаешЬся'.
|
||||
|
||||
new_row.append(e.encode('utf-8').decode('unicode-escape').encode('latin1').decode('utf-8'))
|
||||
new_row.append(
|
||||
e.encode("utf-8")
|
||||
.decode("unicode-escape")
|
||||
.encode("latin1")
|
||||
.decode("utf-8")
|
||||
)
|
||||
result.append(new_row)
|
||||
return result
|
||||
|
||||
except:
|
||||
report_errors.append(
|
||||
traceback.format_exception_only(
|
||||
*sys.exc_info()[:2])[-1])
|
||||
report_errors.append(traceback.format_exception_only(*sys.exc_info()[:2])[-1])
|
||||
pass
|
||||
return []
|
||||
|
||||
|
||||
def htmlRows(n):
|
||||
rawRows = tsvRows(n)
|
||||
result = ''
|
||||
result = ""
|
||||
for row in rawRows:
|
||||
result += tableRow(row)
|
||||
return result
|
||||
|
||||
|
||||
def addSimpleTable(caption, columns, rows, pos=None):
|
||||
global tables
|
||||
text = ''
|
||||
text = ""
|
||||
if not rows:
|
||||
return
|
||||
|
||||
@ -268,51 +299,63 @@ def addSimpleTable(caption, columns, rows, pos=None):
|
||||
text += tableEnd()
|
||||
tables.insert(pos if pos else len(tables), text)
|
||||
|
||||
|
||||
def add_tested_commits():
|
||||
global report_errors
|
||||
try:
|
||||
addSimpleTable('Tested Commits', ['Old', 'New'],
|
||||
[['<pre>{}</pre>'.format(x) for x in
|
||||
[open('left-commit.txt').read(),
|
||||
open('right-commit.txt').read()]]])
|
||||
addSimpleTable(
|
||||
"Tested Commits",
|
||||
["Old", "New"],
|
||||
[
|
||||
[
|
||||
"<pre>{}</pre>".format(x)
|
||||
for x in [
|
||||
open("left-commit.txt").read(),
|
||||
open("right-commit.txt").read(),
|
||||
]
|
||||
]
|
||||
],
|
||||
)
|
||||
except:
|
||||
# Don't fail if no commit info -- maybe it's a manual run.
|
||||
report_errors.append(
|
||||
traceback.format_exception_only(
|
||||
*sys.exc_info()[:2])[-1])
|
||||
report_errors.append(traceback.format_exception_only(*sys.exc_info()[:2])[-1])
|
||||
pass
|
||||
|
||||
|
||||
def add_report_errors():
|
||||
global tables
|
||||
global report_errors
|
||||
# Add the errors reported by various steps of comparison script
|
||||
try:
|
||||
report_errors += [l.strip() for l in open('report/errors.log')]
|
||||
report_errors += [l.strip() for l in open("report/errors.log")]
|
||||
except:
|
||||
report_errors.append(
|
||||
traceback.format_exception_only(
|
||||
*sys.exc_info()[:2])[-1])
|
||||
report_errors.append(traceback.format_exception_only(*sys.exc_info()[:2])[-1])
|
||||
pass
|
||||
|
||||
if not report_errors:
|
||||
return
|
||||
|
||||
text = tableStart('Errors while Building the Report')
|
||||
text += tableHeader(['Error'])
|
||||
text = tableStart("Errors while Building the Report")
|
||||
text += tableHeader(["Error"])
|
||||
for x in report_errors:
|
||||
text += tableRow([x])
|
||||
text += tableEnd()
|
||||
# Insert after Tested Commits
|
||||
tables.insert(1, text)
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">There were some errors while building the report</a>']);
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{currentTableAnchor()}">There were some errors while building the report</a>'
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def add_errors_explained():
|
||||
if not errors_explained:
|
||||
return
|
||||
|
||||
text = '<a name="fail1"/>'
|
||||
text += tableStart('Error Summary')
|
||||
text += tableHeader(['Description'])
|
||||
text += tableStart("Error Summary")
|
||||
text += tableHeader(["Description"])
|
||||
for row in errors_explained:
|
||||
text += tableRow(row)
|
||||
text += tableEnd()
|
||||
@ -321,59 +364,81 @@ def add_errors_explained():
|
||||
tables.insert(1, text)
|
||||
|
||||
|
||||
if args.report == 'main':
|
||||
if args.report == "main":
|
||||
print((header_template.format()))
|
||||
|
||||
add_tested_commits()
|
||||
|
||||
|
||||
run_error_rows = tsvRows('run-errors.tsv')
|
||||
run_error_rows = tsvRows("run-errors.tsv")
|
||||
error_tests += len(run_error_rows)
|
||||
addSimpleTable('Run Errors', ['Test', 'Error'], run_error_rows)
|
||||
addSimpleTable("Run Errors", ["Test", "Error"], run_error_rows)
|
||||
if run_error_rows:
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">There were some errors while running the tests</a>']);
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{currentTableAnchor()}">There were some errors while running the tests</a>'
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
slow_on_client_rows = tsvRows('report/slow-on-client.tsv')
|
||||
slow_on_client_rows = tsvRows("report/slow-on-client.tsv")
|
||||
error_tests += len(slow_on_client_rows)
|
||||
addSimpleTable('Slow on Client',
|
||||
['Client time, s', 'Server time, s', 'Ratio', 'Test', 'Query'],
|
||||
slow_on_client_rows)
|
||||
addSimpleTable(
|
||||
"Slow on Client",
|
||||
["Client time, s", "Server time, s", "Ratio", "Test", "Query"],
|
||||
slow_on_client_rows,
|
||||
)
|
||||
if slow_on_client_rows:
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries are taking noticeable time client-side (missing `FORMAT Null`?)</a>']);
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{currentTableAnchor()}">Some queries are taking noticeable time client-side (missing `FORMAT Null`?)</a>'
|
||||
]
|
||||
)
|
||||
|
||||
unmarked_short_rows = tsvRows('report/unexpected-query-duration.tsv')
|
||||
unmarked_short_rows = tsvRows("report/unexpected-query-duration.tsv")
|
||||
error_tests += len(unmarked_short_rows)
|
||||
addSimpleTable('Unexpected Query Duration',
|
||||
['Problem', 'Marked as "short"?', 'Run time, s', 'Test', '#', 'Query'],
|
||||
unmarked_short_rows)
|
||||
addSimpleTable(
|
||||
"Unexpected Query Duration",
|
||||
["Problem", 'Marked as "short"?', "Run time, s", "Test", "#", "Query"],
|
||||
unmarked_short_rows,
|
||||
)
|
||||
if unmarked_short_rows:
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries have unexpected duration</a>']);
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{currentTableAnchor()}">Some queries have unexpected duration</a>'
|
||||
]
|
||||
)
|
||||
|
||||
def add_partial():
|
||||
rows = tsvRows('report/partial-queries-report.tsv')
|
||||
rows = tsvRows("report/partial-queries-report.tsv")
|
||||
if not rows:
|
||||
return
|
||||
|
||||
global unstable_partial_queries, slow_average_tests, tables
|
||||
text = tableStart('Partial Queries')
|
||||
columns = ['Median time, s', 'Relative time variance', 'Test', '#', 'Query']
|
||||
text = tableStart("Partial Queries")
|
||||
columns = ["Median time, s", "Relative time variance", "Test", "#", "Query"]
|
||||
text += tableHeader(columns)
|
||||
attrs = ['' for c in columns]
|
||||
attrs = ["" for c in columns]
|
||||
for row in rows:
|
||||
anchor = f'{currentTableAnchor()}.{row[2]}.{row[3]}'
|
||||
anchor = f"{currentTableAnchor()}.{row[2]}.{row[3]}"
|
||||
if float(row[1]) > 0.10:
|
||||
attrs[1] = f'style="background: {color_bad}"'
|
||||
unstable_partial_queries += 1
|
||||
errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' has excessive variance of run time. Keep it below 10%</a>'])
|
||||
errors_explained.append(
|
||||
[
|
||||
f"<a href=\"#{anchor}\">The query no. {row[3]} of test '{row[2]}' has excessive variance of run time. Keep it below 10%</a>"
|
||||
]
|
||||
)
|
||||
else:
|
||||
attrs[1] = ''
|
||||
attrs[1] = ""
|
||||
if float(row[0]) > allowed_single_run_time:
|
||||
attrs[0] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run_time} seconds"</a>'])
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run_time} seconds"</a>'
|
||||
]
|
||||
)
|
||||
slow_average_tests += 1
|
||||
else:
|
||||
attrs[0] = ''
|
||||
attrs[0] = ""
|
||||
text += tableRow(row, attrs, anchor)
|
||||
text += tableEnd()
|
||||
tables.append(text)
|
||||
@ -381,41 +446,45 @@ if args.report == 'main':
|
||||
add_partial()
|
||||
|
||||
def add_changes():
|
||||
rows = tsvRows('report/changed-perf.tsv')
|
||||
rows = tsvRows("report/changed-perf.tsv")
|
||||
if not rows:
|
||||
return
|
||||
|
||||
global faster_queries, slower_queries, tables
|
||||
|
||||
text = tableStart('Changes in Performance')
|
||||
text = tableStart("Changes in Performance")
|
||||
columns = [
|
||||
'Old, s', # 0
|
||||
'New, s', # 1
|
||||
'Ratio of speedup (-) or slowdown (+)', # 2
|
||||
'Relative difference (new − old) / old', # 3
|
||||
'p < 0.01 threshold', # 4
|
||||
'', # Failed # 5
|
||||
'Test', # 6
|
||||
'#', # 7
|
||||
'Query', # 8
|
||||
]
|
||||
attrs = ['' for c in columns]
|
||||
"Old, s", # 0
|
||||
"New, s", # 1
|
||||
"Ratio of speedup (-) or slowdown (+)", # 2
|
||||
"Relative difference (new − old) / old", # 3
|
||||
"p < 0.01 threshold", # 4
|
||||
"", # Failed # 5
|
||||
"Test", # 6
|
||||
"#", # 7
|
||||
"Query", # 8
|
||||
]
|
||||
attrs = ["" for c in columns]
|
||||
attrs[5] = None
|
||||
|
||||
text += tableHeader(columns, attrs)
|
||||
|
||||
for row in rows:
|
||||
anchor = f'{currentTableAnchor()}.{row[6]}.{row[7]}'
|
||||
anchor = f"{currentTableAnchor()}.{row[6]}.{row[7]}"
|
||||
if int(row[5]):
|
||||
if float(row[3]) < 0.:
|
||||
if float(row[3]) < 0.0:
|
||||
faster_queries += 1
|
||||
attrs[2] = attrs[3] = f'style="background: {color_good}"'
|
||||
else:
|
||||
slower_queries += 1
|
||||
attrs[2] = attrs[3] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="#{anchor}">The query no. {row[7]} of test \'{row[6]}\' has slowed down</a>'])
|
||||
errors_explained.append(
|
||||
[
|
||||
f"<a href=\"#{anchor}\">The query no. {row[7]} of test '{row[6]}' has slowed down</a>"
|
||||
]
|
||||
)
|
||||
else:
|
||||
attrs[2] = attrs[3] = ''
|
||||
attrs[2] = attrs[3] = ""
|
||||
|
||||
text += tableRow(row, attrs, anchor)
|
||||
|
||||
@ -427,35 +496,35 @@ if args.report == 'main':
|
||||
def add_unstable_queries():
|
||||
global unstable_queries, very_unstable_queries, tables
|
||||
|
||||
unstable_rows = tsvRows('report/unstable-queries.tsv')
|
||||
unstable_rows = tsvRows("report/unstable-queries.tsv")
|
||||
if not unstable_rows:
|
||||
return
|
||||
|
||||
unstable_queries += len(unstable_rows)
|
||||
|
||||
columns = [
|
||||
'Old, s', #0
|
||||
'New, s', #1
|
||||
'Relative difference (new - old)/old', #2
|
||||
'p < 0.01 threshold', #3
|
||||
'', # Failed #4
|
||||
'Test', #5
|
||||
'#', #6
|
||||
'Query' #7
|
||||
"Old, s", # 0
|
||||
"New, s", # 1
|
||||
"Relative difference (new - old)/old", # 2
|
||||
"p < 0.01 threshold", # 3
|
||||
"", # Failed #4
|
||||
"Test", # 5
|
||||
"#", # 6
|
||||
"Query", # 7
|
||||
]
|
||||
attrs = ['' for c in columns]
|
||||
attrs = ["" for c in columns]
|
||||
attrs[4] = None
|
||||
|
||||
text = tableStart('Unstable Queries')
|
||||
text = tableStart("Unstable Queries")
|
||||
text += tableHeader(columns, attrs)
|
||||
|
||||
for r in unstable_rows:
|
||||
anchor = f'{currentTableAnchor()}.{r[5]}.{r[6]}'
|
||||
anchor = f"{currentTableAnchor()}.{r[5]}.{r[6]}"
|
||||
if int(r[4]):
|
||||
very_unstable_queries += 1
|
||||
attrs[3] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
attrs[3] = ''
|
||||
attrs[3] = ""
|
||||
# Just don't add the slightly unstable queries we don't consider
|
||||
# errors. It's not clear what the user should do with them.
|
||||
continue
|
||||
@ -470,53 +539,70 @@ if args.report == 'main':
|
||||
|
||||
add_unstable_queries()
|
||||
|
||||
skipped_tests_rows = tsvRows('analyze/skipped-tests.tsv')
|
||||
addSimpleTable('Skipped Tests', ['Test', 'Reason'], skipped_tests_rows)
|
||||
skipped_tests_rows = tsvRows("analyze/skipped-tests.tsv")
|
||||
addSimpleTable("Skipped Tests", ["Test", "Reason"], skipped_tests_rows)
|
||||
|
||||
addSimpleTable('Test Performance Changes',
|
||||
['Test', 'Ratio of speedup (-) or slowdown (+)', 'Queries', 'Total not OK', 'Changed perf', 'Unstable'],
|
||||
tsvRows('report/test-perf-changes.tsv'))
|
||||
addSimpleTable(
|
||||
"Test Performance Changes",
|
||||
[
|
||||
"Test",
|
||||
"Ratio of speedup (-) or slowdown (+)",
|
||||
"Queries",
|
||||
"Total not OK",
|
||||
"Changed perf",
|
||||
"Unstable",
|
||||
],
|
||||
tsvRows("report/test-perf-changes.tsv"),
|
||||
)
|
||||
|
||||
def add_test_times():
|
||||
global slow_average_tests, tables
|
||||
rows = tsvRows('report/test-times.tsv')
|
||||
rows = tsvRows("report/test-times.tsv")
|
||||
if not rows:
|
||||
return
|
||||
|
||||
columns = [
|
||||
'Test', #0
|
||||
'Wall clock time, entire test, s', #1
|
||||
'Total client time for measured query runs, s', #2
|
||||
'Queries', #3
|
||||
'Longest query, total for measured runs, s', #4
|
||||
'Wall clock time per query, s', #5
|
||||
'Shortest query, total for measured runs, s', #6
|
||||
'', # Runs #7
|
||||
]
|
||||
attrs = ['' for c in columns]
|
||||
"Test", # 0
|
||||
"Wall clock time, entire test, s", # 1
|
||||
"Total client time for measured query runs, s", # 2
|
||||
"Queries", # 3
|
||||
"Longest query, total for measured runs, s", # 4
|
||||
"Wall clock time per query, s", # 5
|
||||
"Shortest query, total for measured runs, s", # 6
|
||||
"", # Runs #7
|
||||
]
|
||||
attrs = ["" for c in columns]
|
||||
attrs[7] = None
|
||||
|
||||
text = tableStart('Test Times')
|
||||
text = tableStart("Test Times")
|
||||
text += tableHeader(columns, attrs)
|
||||
|
||||
allowed_average_run_time = 3.75 # 60 seconds per test at (7 + 1) * 2 runs
|
||||
allowed_average_run_time = 3.75 # 60 seconds per test at (7 + 1) * 2 runs
|
||||
for r in rows:
|
||||
anchor = f'{currentTableAnchor()}.{r[0]}'
|
||||
anchor = f"{currentTableAnchor()}.{r[0]}"
|
||||
total_runs = (int(r[7]) + 1) * 2 # one prewarm run, two servers
|
||||
if r[0] != 'Total' and float(r[5]) > allowed_average_run_time * total_runs:
|
||||
if r[0] != "Total" and float(r[5]) > allowed_average_run_time * total_runs:
|
||||
# FIXME should be 15s max -- investigate parallel_insert
|
||||
slow_average_tests += 1
|
||||
attrs[5] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="#{anchor}">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up'])
|
||||
errors_explained.append(
|
||||
[
|
||||
f"<a href=\"#{anchor}\">The test '{r[0]}' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up"
|
||||
]
|
||||
)
|
||||
else:
|
||||
attrs[5] = ''
|
||||
attrs[5] = ""
|
||||
|
||||
if r[0] != 'Total' and float(r[4]) > allowed_single_run_time * total_runs:
|
||||
if r[0] != "Total" and float(r[4]) > allowed_single_run_time * total_runs:
|
||||
slow_average_tests += 1
|
||||
attrs[4] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report'])
|
||||
errors_explained.append(
|
||||
[
|
||||
f"<a href=\"./all-queries.html#all-query-times.{r[0]}.0\">Some query of the test '{r[0]}' is too slow to run. See the all queries report"
|
||||
]
|
||||
)
|
||||
else:
|
||||
attrs[4] = ''
|
||||
attrs[4] = ""
|
||||
|
||||
text += tableRow(r, attrs, anchor)
|
||||
|
||||
@ -525,10 +611,17 @@ if args.report == 'main':
|
||||
|
||||
add_test_times()
|
||||
|
||||
addSimpleTable('Metric Changes',
|
||||
['Metric', 'Old median value', 'New median value',
|
||||
'Relative difference', 'Times difference'],
|
||||
tsvRows('metrics/changes.tsv'))
|
||||
addSimpleTable(
|
||||
"Metric Changes",
|
||||
[
|
||||
"Metric",
|
||||
"Old median value",
|
||||
"New median value",
|
||||
"Relative difference",
|
||||
"Times difference",
|
||||
],
|
||||
tsvRows("metrics/changes.tsv"),
|
||||
)
|
||||
|
||||
add_report_errors()
|
||||
add_errors_explained()
|
||||
@ -536,7 +629,8 @@ if args.report == 'main':
|
||||
for t in tables:
|
||||
print(t)
|
||||
|
||||
print(f"""
|
||||
print(
|
||||
f"""
|
||||
</div>
|
||||
<p class="links">
|
||||
<a href="all-queries.html">All queries</a>
|
||||
@ -546,104 +640,111 @@ if args.report == 'main':
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
status = 'success'
|
||||
message = 'See the report'
|
||||
status = "success"
|
||||
message = "See the report"
|
||||
message_array = []
|
||||
|
||||
if slow_average_tests:
|
||||
status = 'failure'
|
||||
message_array.append(str(slow_average_tests) + ' too long')
|
||||
status = "failure"
|
||||
message_array.append(str(slow_average_tests) + " too long")
|
||||
|
||||
if faster_queries:
|
||||
message_array.append(str(faster_queries) + ' faster')
|
||||
message_array.append(str(faster_queries) + " faster")
|
||||
|
||||
if slower_queries:
|
||||
if slower_queries > 3:
|
||||
status = 'failure'
|
||||
message_array.append(str(slower_queries) + ' slower')
|
||||
status = "failure"
|
||||
message_array.append(str(slower_queries) + " slower")
|
||||
|
||||
if unstable_partial_queries:
|
||||
very_unstable_queries += unstable_partial_queries
|
||||
status = 'failure'
|
||||
status = "failure"
|
||||
|
||||
# Don't show mildly unstable queries, only the very unstable ones we
|
||||
# treat as errors.
|
||||
if very_unstable_queries:
|
||||
if very_unstable_queries > 5:
|
||||
error_tests += very_unstable_queries
|
||||
status = 'failure'
|
||||
message_array.append(str(very_unstable_queries) + ' unstable')
|
||||
status = "failure"
|
||||
message_array.append(str(very_unstable_queries) + " unstable")
|
||||
|
||||
error_tests += slow_average_tests
|
||||
if error_tests:
|
||||
status = 'failure'
|
||||
message_array.insert(0, str(error_tests) + ' errors')
|
||||
status = "failure"
|
||||
message_array.insert(0, str(error_tests) + " errors")
|
||||
|
||||
if message_array:
|
||||
message = ', '.join(message_array)
|
||||
message = ", ".join(message_array)
|
||||
|
||||
if report_errors:
|
||||
status = 'failure'
|
||||
message = 'Errors while building the report.'
|
||||
status = "failure"
|
||||
message = "Errors while building the report."
|
||||
|
||||
print(("""
|
||||
print(
|
||||
(
|
||||
"""
|
||||
<!--status: {status}-->
|
||||
<!--message: {message}-->
|
||||
""".format(status=status, message=message)))
|
||||
""".format(
|
||||
status=status, message=message
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
elif args.report == 'all-queries':
|
||||
elif args.report == "all-queries":
|
||||
|
||||
print((header_template.format()))
|
||||
|
||||
add_tested_commits()
|
||||
|
||||
def add_all_queries():
|
||||
rows = tsvRows('report/all-queries.tsv')
|
||||
rows = tsvRows("report/all-queries.tsv")
|
||||
if not rows:
|
||||
return
|
||||
|
||||
columns = [
|
||||
'', # Changed #0
|
||||
'', # Unstable #1
|
||||
'Old, s', #2
|
||||
'New, s', #3
|
||||
'Ratio of speedup (-) or slowdown (+)', #4
|
||||
'Relative difference (new − old) / old', #5
|
||||
'p < 0.01 threshold', #6
|
||||
'Test', #7
|
||||
'#', #8
|
||||
'Query', #9
|
||||
]
|
||||
attrs = ['' for c in columns]
|
||||
"", # Changed #0
|
||||
"", # Unstable #1
|
||||
"Old, s", # 2
|
||||
"New, s", # 3
|
||||
"Ratio of speedup (-) or slowdown (+)", # 4
|
||||
"Relative difference (new − old) / old", # 5
|
||||
"p < 0.01 threshold", # 6
|
||||
"Test", # 7
|
||||
"#", # 8
|
||||
"Query", # 9
|
||||
]
|
||||
attrs = ["" for c in columns]
|
||||
attrs[0] = None
|
||||
attrs[1] = None
|
||||
|
||||
text = tableStart('All Query Times')
|
||||
text = tableStart("All Query Times")
|
||||
text += tableHeader(columns, attrs)
|
||||
|
||||
for r in rows:
|
||||
anchor = f'{currentTableAnchor()}.{r[7]}.{r[8]}'
|
||||
anchor = f"{currentTableAnchor()}.{r[7]}.{r[8]}"
|
||||
if int(r[1]):
|
||||
attrs[6] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
attrs[6] = ''
|
||||
attrs[6] = ""
|
||||
|
||||
if int(r[0]):
|
||||
if float(r[5]) > 0.:
|
||||
if float(r[5]) > 0.0:
|
||||
attrs[4] = attrs[5] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
attrs[4] = attrs[5] = f'style="background: {color_good}"'
|
||||
else:
|
||||
attrs[4] = attrs[5] = ''
|
||||
attrs[4] = attrs[5] = ""
|
||||
|
||||
if (float(r[2]) + float(r[3])) / 2 > allowed_single_run_time:
|
||||
attrs[2] = f'style="background: {color_bad}"'
|
||||
attrs[3] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
attrs[2] = ''
|
||||
attrs[3] = ''
|
||||
attrs[2] = ""
|
||||
attrs[3] = ""
|
||||
|
||||
text += tableRow(r, attrs, anchor)
|
||||
|
||||
@ -655,7 +756,8 @@ elif args.report == 'all-queries':
|
||||
for t in tables:
|
||||
print(t)
|
||||
|
||||
print(f"""
|
||||
print(
|
||||
f"""
|
||||
</div>
|
||||
<p class="links">
|
||||
<a href="report.html">Main report</a>
|
||||
@ -665,4 +767,5 @@ elif args.report == 'all-queries':
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
@ -7,18 +7,19 @@ import csv
|
||||
|
||||
RESULT_LOG_NAME = "run.log"
|
||||
|
||||
|
||||
def process_result(result_folder):
|
||||
|
||||
status = "success"
|
||||
description = 'Server started and responded'
|
||||
description = "Server started and responded"
|
||||
summary = [("Smoke test", "OK")]
|
||||
with open(os.path.join(result_folder, RESULT_LOG_NAME), 'r') as run_log:
|
||||
lines = run_log.read().split('\n')
|
||||
if not lines or lines[0].strip() != 'OK':
|
||||
with open(os.path.join(result_folder, RESULT_LOG_NAME), "r") as run_log:
|
||||
lines = run_log.read().split("\n")
|
||||
if not lines or lines[0].strip() != "OK":
|
||||
status = "failure"
|
||||
logging.info("Lines is not ok: %s", str('\n'.join(lines)))
|
||||
logging.info("Lines is not ok: %s", str("\n".join(lines)))
|
||||
summary = [("Smoke test", "FAIL")]
|
||||
description = 'Server failed to respond, see result in logs'
|
||||
description = "Server failed to respond, see result in logs"
|
||||
|
||||
result_logs = []
|
||||
server_log_path = os.path.join(result_folder, "clickhouse-server.log")
|
||||
@ -38,20 +39,22 @@ def process_result(result_folder):
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
with open(results_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerows(results)
|
||||
with open(status_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
with open(status_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerow(status)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of split build smoke test")
|
||||
parser.add_argument("--in-results-dir", default='/test_output/')
|
||||
parser.add_argument("--out-results-file", default='/test_output/test_results.tsv')
|
||||
parser.add_argument("--out-status-file", default='/test_output/check_status.tsv')
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="ClickHouse script for parsing results of split build smoke test"
|
||||
)
|
||||
parser.add_argument("--in-results-dir", default="/test_output/")
|
||||
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results, logs = process_result(args.in_results_dir)
|
||||
|
@ -10,11 +10,18 @@ def process_result(result_folder):
|
||||
status = "success"
|
||||
summary = []
|
||||
paths = []
|
||||
tests = ["TLPWhere", "TLPGroupBy", "TLPHaving", "TLPWhereGroupBy", "TLPDistinct", "TLPAggregate"]
|
||||
tests = [
|
||||
"TLPWhere",
|
||||
"TLPGroupBy",
|
||||
"TLPHaving",
|
||||
"TLPWhereGroupBy",
|
||||
"TLPDistinct",
|
||||
"TLPAggregate",
|
||||
]
|
||||
|
||||
for test in tests:
|
||||
err_path = '{}/{}.err'.format(result_folder, test)
|
||||
out_path = '{}/{}.out'.format(result_folder, test)
|
||||
err_path = "{}/{}.err".format(result_folder, test)
|
||||
out_path = "{}/{}.out".format(result_folder, test)
|
||||
if not os.path.exists(err_path):
|
||||
logging.info("No output err on path %s", err_path)
|
||||
summary.append((test, "SKIPPED"))
|
||||
@ -23,24 +30,24 @@ def process_result(result_folder):
|
||||
else:
|
||||
paths.append(err_path)
|
||||
paths.append(out_path)
|
||||
with open(err_path, 'r') as f:
|
||||
if 'AssertionError' in f.read():
|
||||
with open(err_path, "r") as f:
|
||||
if "AssertionError" in f.read():
|
||||
summary.append((test, "FAIL"))
|
||||
status = 'failure'
|
||||
status = "failure"
|
||||
else:
|
||||
summary.append((test, "OK"))
|
||||
|
||||
logs_path = '{}/logs.tar.gz'.format(result_folder)
|
||||
logs_path = "{}/logs.tar.gz".format(result_folder)
|
||||
if not os.path.exists(logs_path):
|
||||
logging.info("No logs tar on path %s", logs_path)
|
||||
else:
|
||||
paths.append(logs_path)
|
||||
stdout_path = '{}/stdout.log'.format(result_folder)
|
||||
stdout_path = "{}/stdout.log".format(result_folder)
|
||||
if not os.path.exists(stdout_path):
|
||||
logging.info("No stdout log on path %s", stdout_path)
|
||||
else:
|
||||
paths.append(stdout_path)
|
||||
stderr_path = '{}/stderr.log'.format(result_folder)
|
||||
stderr_path = "{}/stderr.log".format(result_folder)
|
||||
if not os.path.exists(stderr_path):
|
||||
logging.info("No stderr log on path %s", stderr_path)
|
||||
else:
|
||||
@ -52,20 +59,22 @@ def process_result(result_folder):
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
with open(results_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerows(results)
|
||||
with open(status_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
with open(status_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerow(status)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of sqlancer test")
|
||||
parser.add_argument("--in-results-dir", default='/test_output/')
|
||||
parser.add_argument("--out-results-file", default='/test_output/test_results.tsv')
|
||||
parser.add_argument("--out-status-file", default='/test_output/check_status.tsv')
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="ClickHouse script for parsing results of sqlancer test"
|
||||
)
|
||||
parser.add_argument("--in-results-dir", default="/test_output/")
|
||||
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results, logs = process_result(args.in_results_dir)
|
||||
|
@ -11,7 +11,7 @@ RUN apt-get update -y \
|
||||
|
||||
COPY s3downloader /s3downloader
|
||||
|
||||
ENV S3_URL="https://clickhouse-datasets.s3.yandex.net"
|
||||
ENV S3_URL="https://clickhouse-datasets.s3.amazonaws.com"
|
||||
ENV DATASETS="hits visits"
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user