diff --git a/.github/workflows/cancel.yml b/.github/workflows/cancel.yml
new file mode 100644
index 00000000000..03e1007b841
--- /dev/null
+++ b/.github/workflows/cancel.yml
@@ -0,0 +1,13 @@
+name: Cancel
+on: # yamllint disable-line rule:truthy
+ workflow_run:
+ workflows: ["CIGithubActions"]
+ types:
+ - requested
+jobs:
+ cancel:
+ runs-on: [self-hosted, style-checker]
+ steps:
+ - uses: styfle/cancel-workflow-action@0.9.1
+ with:
+ workflow_id: ${{ github.event.workflow.id }}
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 7f20206a7b3..0a4cd1fcefe 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -1,4 +1,4 @@
-name: Ligthweight GithubActions
+name: CIGithubActions
on: # yamllint disable-line rule:truthy
pull_request:
types:
@@ -11,20 +11,25 @@ on: # yamllint disable-line rule:truthy
- master
jobs:
CheckLabels:
- runs-on: [self-hosted]
+ runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: actions/checkout@v2
- name: Labels check
- run: cd $GITHUB_WORKSPACE/tests/ci && python3 run_check.py
+ run: |
+ cd $GITHUB_WORKSPACE/tests/ci
+ python3 run_check.py
DockerHubPush:
needs: CheckLabels
- runs-on: [self-hosted]
+ if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
+ runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: actions/checkout@v2
- name: Images check
- run: cd $GITHUB_WORKSPACE/tests/ci && python3 docker_images_check.py
+ run: |
+ cd $GITHUB_WORKSPACE/tests/ci
+ python3 docker_images_check.py
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
with:
@@ -32,7 +37,7 @@ jobs:
path: ${{ runner.temp }}/docker_images_check/changed_images.json
StyleCheck:
needs: DockerHubPush
- runs-on: [self-hosted]
+ runs-on: [self-hosted, style-checker]
steps:
- name: Download changed images
uses: actions/download-artifact@v2
@@ -42,12 +47,82 @@ jobs:
- name: Check out repository code
uses: actions/checkout@v2
- name: Style Check
- run: cd $GITHUB_WORKSPACE/tests/ci && python3 style_check.py
+ run: |
+ cd $GITHUB_WORKSPACE/tests/ci
+ python3 style_check.py
+ BuilderDebDebug:
+ needs: DockerHubPush
+ runs-on: [self-hosted, builder]
+ steps:
+ - name: Download changed images
+ uses: actions/download-artifact@v2
+ with:
+ name: changed_images
+ path: ${{ runner.temp }}/build_check
+ - name: Check out repository code
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+ - name: Build
+ env:
+ TEMP_PATH: ${{runner.temp}}/build_check
+ REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
+ CACHES_PATH: ${{runner.temp}}/../ccaches
+ CHECK_NAME: 'ClickHouse build check (actions)'
+ BUILD_NUMBER: 7
+ run: |
+ sudo rm -fr $TEMP_PATH
+ mkdir -p $TEMP_PATH
+ cp -r $GITHUB_WORKSPACE $TEMP_PATH
+ cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
+ - name: Upload build URLs to artifacts
+ uses: actions/upload-artifact@v2
+ with:
+ name: ${{ env.BUILD_NAME }}
+ path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
+ BuilderReport:
+ needs: [BuilderDebDebug]
+ runs-on: [self-hosted, style-checker]
+ steps:
+ - name: Download json reports
+ uses: actions/download-artifact@v2
+ with:
+ path: ${{runner.temp}}/reports_dir
+ - name: Check out repository code
+ uses: actions/checkout@v2
+ - name: Report Builder
+ env:
+ TEMP_PATH: ${{runner.temp}}/report_check
+ REPORTS_PATH: ${{runner.temp}}/reports_dir
+ CHECK_NAME: 'ClickHouse build check (actions)'
+ run: |
+ sudo rm -fr $TEMP_PATH
+ mkdir -p $TEMP_PATH
+ cd $GITHUB_WORKSPACE/tests/ci
+ python3 build_report_check.py "$CHECK_NAME"
+ FastTest:
+ needs: DockerHubPush
+ runs-on: [self-hosted, builder]
+ steps:
+ - name: Check out repository code
+ uses: actions/checkout@v2
+ - name: Fast Test
+ env:
+ TEMP_PATH: ${{runner.temp}}/fasttest
+ REPO_COPY: ${{runner.temp}}/fasttest/ClickHouse
+ CACHES_PATH: ${{runner.temp}}/../ccaches
+ run: |
+ sudo rm -fr $TEMP_PATH
+ mkdir -p $TEMP_PATH
+ cp -r $GITHUB_WORKSPACE $TEMP_PATH
+ cd $REPO_COPY/tests/ci && python3 fast_test_check.py
FinishCheck:
- needs: [StyleCheck, DockerHubPush, CheckLabels]
- runs-on: [self-hosted]
+ needs: [StyleCheck, DockerHubPush, CheckLabels, BuilderReport, FastTest]
+ runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: actions/checkout@v2
- name: Finish label
- run: cd $GITHUB_WORKSPACE/tests/ci && python3 finish_check.py
+ run: |
+ cd $GITHUB_WORKSPACE/tests/ci
+ python3 finish_check.py
diff --git a/docs/zh/development/continuous-integration.md b/docs/zh/development/continuous-integration.md
deleted file mode 120000
index f68058a436e..00000000000
--- a/docs/zh/development/continuous-integration.md
+++ /dev/null
@@ -1 +0,0 @@
-../../en/development/continuous-integration.md
\ No newline at end of file
diff --git a/docs/zh/development/continuous-integration.md b/docs/zh/development/continuous-integration.md
new file mode 100644
index 00000000000..6cff83067de
--- /dev/null
+++ b/docs/zh/development/continuous-integration.md
@@ -0,0 +1,155 @@
+# 持续集成检查 {#continuous-integration-checks}
+当你提交一个pull请求时, ClickHouse[持续集成(CI)系统](https://clickhouse.com/docs/en/development/tests/#test-automation)会对您的代码运行一些自动检查.
+
+这在存储库维护者(来自ClickHouse团队的人)筛选了您的代码并将可测试标签添加到您的pull请求之后发生.
+
+检查的结果被列在[GitHub检查文档](https://docs.github.com/en/github/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/about-status-checks)中所述的GitHub pull请求页面.
+
+如果检查失败,您可能被要求去修复它. 该界面介绍了您可能遇到的检查,以及如何修复它们.
+
+如果检查失败看起来与您的更改无关, 那么它可能是一些暂时的故障或基础设施问题. 向pull请求推一个空的commit以重新启动CI检查:
+
+```
+git reset
+git commit --allow-empty
+git push
+```
+
+如果您不确定要做什么,可以向维护人员寻求帮助.
+
+## 与Master合并 {#merge-with-master}
+验证PR是否可以合并到master. 如果没有, 它将失败并显示消息'Cannot fetch mergecommit'的.请按[GitHub文档](https://docs.github.com/en/github/collaborating-with-pull-requests/addressing-merge-conflicts/resolving-a-merge-conflict-on-github)中描述的冲突解决, 或使用git将主分支合并到您的pull请求分支来修复这个检查.
+
+## 文档检查 {#docs-check}
+尝试构建ClickHouse文档网站. 如果您更改了文档中的某些内容, 它可能会失败. 最可能的原因是文档中的某些交叉链接是错误的. 转到检查报告并查找`ERROR`和`WARNING`消息.
+
+### 报告详情 {#report-details}
+- [状态页示例](https://clickhouse-test-reports.s3.yandex.net/12550/eabcc293eb02214caa6826b7c15f101643f67a6b/docs_check.html)
+- `docs_output.txt`包含构建日志信息. [成功结果案例](https://clickhouse-test-reports.s3.yandex.net/12550/eabcc293eb02214caa6826b7c15f101643f67a6b/docs_check/docs_output.txt)
+
+## 描述信息检查 {#description-check}
+检查pull请求的描述是否符合[PULL_REQUEST_TEMPLATE.md](https://github.com/ClickHouse/ClickHouse/blob/master/.github/PULL_REQUEST_TEMPLATE.md)模板.
+
+您必须为您的更改指定一个更改日志类别(例如,Bug修复), 并且为[CHANGELOG.md](../whats-new/changelog/)编写一条用户可读的消息用来描述更改.
+
+## 推送到DockerHub {#push-to-dockerhub}
+生成用于构建和测试的docker映像, 然后将它们推送到DockerHub.
+
+## 标记检查 {#marker-check}
+该检查意味着CI系统已经开始处理PR.当它处于'待处理'状态时,意味着尚未开始所有检查. 启动所有检查后,状态更改为'成功'.
+
+# 格式检查 {#style-check}
+使用`utils/check-style/check-style`二进制文件执行一些简单的基于正则表达式的代码样式检查(注意, 它可以在本地运行).
+如果失败, 按照[代码样式指南](./style.md)修复样式错误.
+
+### 报告详情 {#report-details}
+- [状态页示例](https://clickhouse-test-reports.s3.yandex.net/12550/659c78c7abb56141723af6a81bfae39335aa8cb2/style_check.html)
+- `docs_output.txt`记录了查结果错误(无效表格等), 空白页表示没有错误. [成功结果案例](https://clickhouse-test-reports.s3.yandex.net/12550/659c78c7abb56141723af6a81bfae39335aa8cb2/style_check/output.txt)
+
+### PVS 检查 {#pvs-check}
+使用静态分析工具[PVS-studio](https://www.viva64.com/en/pvs-studio/)检查代码. 查看报告以查看确切的错误.如果可以则修复它们, 如果不行, 可以向ClickHouse的维护人员寻求帮忙.
+
+### 报告详情 {#report-details}
+- [状态页示例](https://clickhouse-test-reports.s3.yandex.net/12550/67d716b5cc3987801996c31a67b31bf141bc3486/pvs_check.html)
+- `test_run.txt.out.log`包含构建和分析日志文件.它只包含解析或未找到的错误.
+- `HTML report`包含分析结果.有关说明请访问PVS的[官方网站](https://www.viva64.com/en/m/0036/#ID14E9A2B2CD)
+
+## 快速测试 {#fast-test}
+通常情况下这是PR运行的第一个检查.它构建ClickHouse以及大多数无状态运行测试, 其中省略了一些.如果失败,在修复之前不会开始进一步的检查. 查看报告以了解哪些测试失败, 然后按照[此处](./tests.md#functional-test-locally)描述的在本地重现失败.
+
+### 报告详情 {#report-details}
+[状态页示例](https://clickhouse-test-reports.s3.yandex.net/12550/67d716b5cc3987801996c31a67b31bf141bc3486/fast_test.html)
+
+#### 状态页文件 {#status-page-files}
+- `runlog.out.log` 是包含所有其他日志的通用日志.
+- `test_log.txt`
+- `submodule_log.txt` 包含关于克隆和检查所需子模块的消息.
+- `stderr.log`
+- `stdout.log`
+- `clickhouse-server.log`
+- `clone_log.txt`
+- `install_log.txt`
+- `clickhouse-server.err.log`
+- `build_log.txt`
+- `cmake_log.txt` 包含关于C/C++和Linux标志检查的消息.
+
+#### 状态页列信息 {#status-page-columns}
+- 测试名称 -- 包含测试的名称(不带路径, 例如, 所有类型的测试将被剥离到该名称).
+- 测试状态 -- 跳过、成功或失败之一.
+- 测试时间, 秒. -- 这个测试是空的.
+
+## 建构检查 {#build-check}
+在各种配置中构建ClickHouse, 以便在后续步骤中使用. 您必须修复失败的构建.构建日志通常有足够的信息来修复错误, 但是您可能必须在本地重现故障. `cmake`选项可以在构建日志中通过grep `cmake`操作找到.使用这些选项并遵循[一般的构建过程](./build.md).
+
+### 报告详情 {#report-details}
+[状态页示例](https://clickhouse-builds.s3.yandex.net/12550/67d716b5cc3987801996c31a67b31bf141bc3486/clickhouse_build_check/report.html)
+- **Compiler**: `gcc-9` 或 `clang-10` (或其他架构的`clang-10-xx`, 比如`clang-10-freebsd`).
+- **Build type**: `Debug` or `RelWithDebInfo` (cmake).
+- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
+- **Bundled**: `bundled` 构建使用来自 `contrib` 库, 而 `unbundled` 构建使用系统库.
+- **Splitted**: `splitted` is a [split build](https://clickhouse.com/docs/en/development/build/#split-build)
+- **Status**: `成功` 或 `失败`
+- **Build log**: 链接到构建和文件复制日志, 当构建失败时很有用.
+- **Build time**.
+- **Artifacts**: 构建结果文件 (`XXX`是服务器版本, 比如`20.8.1.4344`).
+ - `clickhouse-client_XXX_all.deb`
+ -` clickhouse-common-static-dbg_XXX[+asan, +msan, +ubsan, +tsan]_amd64.deb`
+ - `clickhouse-common-staticXXX_amd64.deb`
+ - `clickhouse-server_XXX_all.deb`
+ - `clickhouse-test_XXX_all.deb`
+ - `clickhouse_XXX_amd64.buildinfo`
+ - `clickhouse_XXX_amd64.changes`
+ - `clickhouse`: Main built binary.
+ - `clickhouse-odbc-bridge`
+ - `unit_tests_dbms`: 带有 ClickHouse 单元测试的 GoogleTest 二进制文件.
+ - `shared_build.tgz`: 使用共享库构建.
+ - `performance.tgz`: 用于性能测试的特殊包.
+
+## 特殊构建检查 {#special-buildcheck}
+使用clang-tidy执行静态分析和代码样式检查. 该报告类似于构建检查. 修复在构建日志中发现的错误.
+
+## 功能无状态测试 {#functional-stateless-tests}
+为构建在不同配置中的ClickHouse二进制文件运行[无状态功能测试](./tests.md#functional-tests)——发布、调试、使用杀毒软件等.通过报告查看哪些测试失败,然后按照[此处](./tests.md#functional-test-locally)描述的在本地重现失败.注意, 您必须使用正确的构建配置来重现——在AddressSanitizer下测试可能失败,但在Debug中可以通过.从[CI构建检查页面](./build.md#you-dont-have-to-build-clickhouse)下载二进制文件, 或者在本地构建它.
+
+## 功能有状态测试 {#functional-stateful-tests}
+运行[有状态功能测试](./tests.md#functional-tests).以无状态功能测试相同的方式对待它们.不同之处在于它们需要从[Yandex.Metrica数据集](https://clickhouse.com/docs/en/getting-started/example-datasets/metrica/)的`hits`和`visits`表来运行.
+
+## 集成测试 {#integration-tests}
+运行[集成测试](./tests.md#integration-tests).
+
+## Testflows 检查{#testflows-check}
+使用Testflows测试系统去运行一些测试, 在[此处](https://github.com/ClickHouse/ClickHouse/tree/master/tests/testflows#running-tests-locally)查看如何在本地运行它们.
+
+## 压力测试 {#stress-test}
+从多个客户端并发运行无状态功能测试, 用以检测与并发相关的错误.如果失败:
+```
+* Fix all other test failures first;
+* Look at the report to find the server logs and check them for possible causes
+ of error.
+```
+
+## 冒烟测试 {#split-build-smoke-test}
+检查[拆分构建](./build.md#split-build)配置中的服务器构建是否可以启动并运行简单查询.如果失败:
+```
+* Fix other test errors first;
+* Build the server in [split build](./build.md#split-build) configuration
+ locally and check whether it can start and run `select 1`.
+```
+
+## 兼容性检查 {#compatibility-check}
+检查`clickhouse`二进制文件是否可以在带有旧libc版本的发行版上运行.如果失败, 请向维护人员寻求帮助.
+
+## AST模糊器 {#ast-fuzzer}
+运行随机生成的查询来捕获程序错误.如果失败, 请向维护人员寻求帮助.
+
+## 性能测试 {#performance-tests}
+测量查询性能的变化. 这是最长的检查, 只需不到 6 小时即可运行.性能测试报告在[此处](https://github.com/ClickHouse/ClickHouse/tree/master/docker/test/performance-comparison#how-to-read-the-report)有详细描述.
+
+## 质量保证 {#qa}
+什么是状态页面上的任务(专用网络)项目?
+
+它是 Yandex 内部工作系统的链接. Yandex 员工可以看到检查的开始时间及其更详细的状态.
+
+运行测试的地方
+
+Yandex 内部基础设施的某个地方.
diff --git a/docs/zh/operations/external-authenticators/kerberos.md b/docs/zh/operations/external-authenticators/kerberos.md
deleted file mode 120000
index b5a4d557de4..00000000000
--- a/docs/zh/operations/external-authenticators/kerberos.md
+++ /dev/null
@@ -1 +0,0 @@
-../../../en/operations/external-authenticators/kerberos.md
\ No newline at end of file
diff --git a/docs/zh/operations/external-authenticators/kerberos.md b/docs/zh/operations/external-authenticators/kerberos.md
new file mode 100644
index 00000000000..5d2122c5fea
--- /dev/null
+++ b/docs/zh/operations/external-authenticators/kerberos.md
@@ -0,0 +1,105 @@
+# Kerberos认证 {#external-authenticators-kerberos}
+现有正确配置的 ClickHouse 用户可以通过 Kerberos 身份验证协议进行身份验证.
+
+目前, Kerberos 只能用作现有用户的外部身份验证器,这些用户在 `users.xml` 或本地访问控制路径中定义.
+这些用户只能使用 HTTP 请求, 并且必须能够使用 GSS-SPNEGO 机制进行身份验证.
+
+对于这种方法, 必须在系统中配置 Kerberos, 且必须在 ClickHouse 配置中启用.
+
+## 开启Kerberos {#enabling-kerberos-in-clickHouse}
+要启用 Kerberos, 应该在 `config.xml` 中包含 `kerberos` 部分. 此部分可能包含其他参数.
+
+#### 参数: {#parameters}
+- `principal` - 将在接受安全上下文时获取和使用的规范服务主体名称.
+- 此参数是可选的, 如果省略, 将使用默认主体.
+
+- `realm` - 一个领域, 用于将身份验证限制为仅那些发起者领域与其匹配的请求.
+
+- 此参数是可选的,如果省略,则不会应用其他领域的过滤.
+
+示例 (进入 `config.xml`):
+```xml
+
+
+
+
+```
+
+主体规范:
+```xml
+
+
+
+ HTTP/clickhouse.example.com@EXAMPLE.COM
+
+
+```
+
+按领域过滤:
+```xml
+
+
+
+ EXAMPLE.COM
+
+
+```
+
+!!! warning "注意"
+
+您只能定义一个 `kerberos` 部分. 多个 `kerberos` 部分的存在将强制 ClickHouse 禁用 Kerberos 身份验证.
+
+!!! warning "注意"
+
+`主体`和`领域`部分不能同时指定. `主体`和`领域`的出现将迫使ClickHouse禁用Kerberos身份验证.
+
+## Kerberos作为现有用户的外部身份验证器 {#kerberos-as-an-external-authenticator-for-existing-users}
+Kerberos可以用作验证本地定义用户(在`users.xml`或本地访问控制路径中定义的用户)身份的方法。目前,**只有**通过HTTP接口的请求才能被认证(通过GSS-SPNEGO机制).
+
+Kerberos主体名称格式通常遵循以下模式:
+- *primary/instance@REALM*
+
+*/instance* 部分可能出现零次或多次. **发起者的规范主体名称的主要部分应与被认证用户名匹配, 以便身份验证成功**.
+
+### `users.xml`中启用Kerberos {#enabling-kerberos-in-users-xml}
+为了启用用户的 Kerberos 身份验证, 请在用户定义中指定 `kerberos` 部分而不是`密码`或类似部分.
+
+参数:
+- `realm` - 用于将身份验证限制为仅那些发起者的领域与其匹配的请求的领域.
+- 此参数是可选的, 如果省略, 则不会应用其他按领域的过滤.
+
+示例 (进入 `users.xml`):
+```
+
+
+
+
+
+
+
+ EXAMPLE.COM
+
+
+
+
+```
+
+!!! warning "警告"
+
+注意, Kerberos身份验证不能与任何其他身份验证机制一起使用. 任何其他部分(如`密码`和`kerberos`)的出现都会迫使ClickHouse关闭.
+
+!!! info "提醒"
+
+请注意, 现在, 一旦用户 `my_user` 使用 `kerberos`, 必须在主 `config.xml` 文件中启用 Kerberos,如前所述.
+
+### 使用 SQL 启用 Kerberos {#enabling-kerberos-using-sql}
+在 ClickHouse 中启用 [SQL 驱动的访问控制和帐户管理](https://clickhouse.com/docs/en/operations/access-rights/#access-control)后, 也可以使用 SQL 语句创建由 Kerberos 识别的用户.
+
+```sql
+CREATE USER my_user IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'
+```
+
+...或者, 不按领域过滤:
+```sql
+CREATE USER my_user IDENTIFIED WITH kerberos
+```
diff --git a/docs/zh/operations/system-tables/asynchronous_metric_log.md b/docs/zh/operations/system-tables/asynchronous_metric_log.md
index ff7593768d3..592fb99c5ef 100644
--- a/docs/zh/operations/system-tables/asynchronous_metric_log.md
+++ b/docs/zh/operations/system-tables/asynchronous_metric_log.md
@@ -5,4 +5,34 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
## system.asynchronous_metric_log {#system-tables-async-log}
-包含以下内容的历史值 `system.asynchronous_log` (见 [系统。asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics))
+包含每分钟记录一次的 `system.asynchronous_metrics`历史值. 默认开启.
+
+列:
+- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件日期.
+- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件时间.
+- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件时间(微秒).
+- `name` ([String](../../sql-reference/data-types/string.md)) — 指标名.
+- `value` ([Float64](../../sql-reference/data-types/float.md)) — 指标值.
+
+**示例**
+``` sql
+SELECT * FROM system.asynchronous_metric_log LIMIT 10
+```
+``` text
+┌─event_date─┬──────────event_time─┬────event_time_microseconds─┬─name─────────────────────────────────────┬─────value─┐
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ CPUFrequencyMHz_0 │ 2120.9 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pmuzzy │ 743 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pdirty │ 26288 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.run_intervals │ 0 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.num_runs │ 0 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.retained │ 60694528 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.mapped │ 303161344 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.resident │ 260931584 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.metadata │ 12079488 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.allocated │ 133756128 │
+└────────────┴─────────────────────┴────────────────────────────┴──────────────────────────────────────────┴───────────┘
+```
+
+**另请参阅**
+- [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — 包含在后台定期计算的指标.
+- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — 包含定期刷新到磁盘表 `system.metrics` 以及 `system.events` 中的指标值历史记录.
diff --git a/src/Common/RemoteHostFilter.cpp b/src/Common/RemoteHostFilter.cpp
index 73c84364f3c..ba7f163fd16 100644
--- a/src/Common/RemoteHostFilter.cpp
+++ b/src/Common/RemoteHostFilter.cpp
@@ -18,14 +18,14 @@ void RemoteHostFilter::checkURL(const Poco::URI & uri) const
{
if (!checkForDirectEntry(uri.getHost()) &&
!checkForDirectEntry(uri.getHost() + ":" + toString(uri.getPort())))
- throw Exception("URL \"" + uri.toString() + "\" is not allowed in config.xml", ErrorCodes::UNACCEPTABLE_URL);
+ throw Exception("URL \"" + uri.toString() + "\" is not allowed in configuration file, see ", ErrorCodes::UNACCEPTABLE_URL);
}
void RemoteHostFilter::checkHostAndPort(const std::string & host, const std::string & port) const
{
if (!checkForDirectEntry(host) &&
!checkForDirectEntry(host + ":" + port))
- throw Exception("URL \"" + host + ":" + port + "\" is not allowed in config.xml", ErrorCodes::UNACCEPTABLE_URL);
+ throw Exception("URL \"" + host + ":" + port + "\" is not allowed in configuration file, see ", ErrorCodes::UNACCEPTABLE_URL);
}
void RemoteHostFilter::setValuesFromConfig(const Poco::Util::AbstractConfiguration & config)
diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py
new file mode 100644
index 00000000000..ed1e3b534a9
--- /dev/null
+++ b/tests/ci/build_check.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python3
+#
+import subprocess
+import logging
+from s3_helper import S3Helper
+import json
+import os
+from pr_info import PRInfo
+from github import Github
+import shutil
+from get_robot_token import get_best_robot_token, get_parameter_from_ssm
+import os
+import sys
+import time
+from version_helper import get_version_from_repo, update_version_local
+
+
+def get_build_config(build_check_name, build_number, repo_path):
+ if build_check_name == 'ClickHouse build check (actions)':
+ build_config_name = 'build_config'
+ elif build_check_name == 'ClickHouse special build check (actions)':
+ build_config_name = 'special_build_config'
+ else:
+ raise Exception(f"Unknown build check name {build_check_name}")
+
+ ci_config_path = os.path.join(repo_path, "tests/ci/ci_config.json")
+ with open(ci_config_path, 'r') as ci_config:
+ config_dict = json.load(ci_config)
+ return config_dict[build_config_name][build_number]
+
+
+def _can_export_binaries(build_config):
+ if build_config['package-type'] != 'deb':
+ return False
+ if build_config['bundled'] != "bundled":
+ return False
+ if build_config['splitted'] == 'splitted':
+ return False
+ if build_config['sanitizer'] != '':
+ return True
+ if build_config['build-type'] != '':
+ return True
+ return False
+
+
+def get_packager_cmd(build_config, packager_path, output_path, build_version, image_version, ccache_path):
+ package_type = build_config['package-type']
+ comp = build_config['compiler']
+ cmd = f"cd {packager_path} && ./packager --output-dir={output_path} --package-type={package_type} --compiler={comp}"
+
+ if build_config['build-type']:
+ cmd += ' --build-type={}'.format(build_config['build-type'])
+ if build_config['sanitizer']:
+ cmd += ' --sanitizer={}'.format(build_config['sanitizer'])
+ if build_config['bundled'] == 'unbundled':
+ cmd += ' --unbundled'
+ if build_config['splitted'] == 'splitted':
+ cmd += ' --split-binary'
+ if build_config['tidy'] == 'enable':
+ cmd += ' --clang-tidy'
+
+ cmd += ' --cache=ccache'
+ cmd += ' --ccache_dir={}'.format(ccache_path)
+
+ if 'alien_pkgs' in build_config and build_config['alien_pkgs']:
+ cmd += ' --alien-pkgs'
+
+ cmd += ' --docker-image-version={}'.format(image_version)
+ cmd += ' --version={}'.format(build_version)
+
+ if _can_export_binaries(build_config):
+ cmd += ' --with-binaries=tests'
+
+ return cmd
+
+def get_image_name(build_config):
+ if build_config['bundled'] != 'bundled':
+ return 'clickhouse/unbundled-builder'
+ elif build_config['package-type'] != 'deb':
+ return 'clickhouse/binary-builder'
+ else:
+ return 'clickhouse/deb-builder'
+
+
+def build_clickhouse(packager_cmd, logs_path):
+ build_log_path = os.path.join(logs_path, 'build_log.log')
+ with open(build_log_path, 'w') as log_file:
+ retcode = subprocess.Popen(packager_cmd, shell=True, stderr=log_file, stdout=log_file).wait()
+ if retcode == 0:
+ logging.info("Built successfully")
+ else:
+ logging.info("Build failed")
+ return build_log_path, retcode == 0
+
+def build_config_to_string(build_config):
+ if build_config["package-type"] == "performance":
+ return "performance"
+
+ return "_".join([
+ build_config['compiler'],
+ build_config['build-type'] if build_config['build-type'] else "relwithdebuginfo",
+ build_config['sanitizer'] if build_config['sanitizer'] else "none",
+ build_config['bundled'],
+ build_config['splitted'],
+ "tidy" if build_config['tidy'] == "enable" else "notidy",
+ "with_coverage" if build_config['with_coverage'] else "without_coverage",
+ build_config['package-type'],
+ ])
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.INFO)
+ repo_path = os.getenv("REPO_COPY", os.path.abspath("../../"))
+ temp_path = os.getenv("TEMP_PATH", os.path.abspath("."))
+ caches_path = os.getenv("CACHES_PATH", temp_path)
+
+ build_check_name = sys.argv[1]
+ build_number = int(sys.argv[2])
+
+ build_config = get_build_config(build_check_name, build_number, repo_path)
+
+ if not os.path.exists(temp_path):
+ os.makedirs(temp_path)
+
+ with open(os.getenv('GITHUB_EVENT_PATH'), 'r') as event_file:
+ event = json.load(event_file)
+
+ pr_info = PRInfo(event)
+
+ logging.info("Repo copy path %s", repo_path)
+
+ gh = Github(get_best_robot_token())
+
+ images_path = os.path.join(temp_path, 'changed_images.json')
+ image_name = get_image_name(build_config)
+ image_version = 'latest'
+ if os.path.exists(images_path):
+ logging.info("Images file exists")
+ with open(images_path, 'r') as images_fd:
+ images = json.load(images_fd)
+ logging.info("Got images %s", images)
+ if image_name in images:
+ image_version = images[image_name]
+
+ for i in range(10):
+ try:
+ logging.info(f"Pulling image {image_name}:{image_version}")
+ subprocess.check_output(f"docker pull {image_name}:{image_version}", stderr=subprocess.STDOUT, shell=True)
+ break
+ except Exception as ex:
+ time.sleep(i * 3)
+ logging.info("Got execption pulling docker %s", ex)
+ else:
+ raise Exception(f"Cannot pull dockerhub for image docker pull {image_name}:{image_version}")
+
+ version = get_version_from_repo(repo_path)
+ version.tweak_update()
+ update_version_local(repo_path, pr_info.sha, version)
+
+ build_name = build_config_to_string(build_config)
+ logging.info(f"Build short name {build_name}")
+ subprocess.check_call(f"echo 'BUILD_NAME=build_urls_{build_name}' >> $GITHUB_ENV", shell=True)
+
+ build_output_path = os.path.join(temp_path, build_name)
+ if not os.path.exists(build_output_path):
+ os.makedirs(build_output_path)
+
+ ccache_path = os.path.join(caches_path, build_name + '_ccache')
+ if not os.path.exists(ccache_path):
+ os.makedirs(ccache_path)
+
+ packager_cmd = get_packager_cmd(build_config, os.path.join(repo_path, "docker/packager"), build_output_path, version.get_version_string(), image_version, ccache_path)
+ logging.info("Going to run packager with %s", packager_cmd)
+
+ build_clickhouse_log = os.path.join(temp_path, "build_log")
+ if not os.path.exists(build_clickhouse_log):
+ os.makedirs(build_clickhouse_log)
+
+ start = time.time()
+ log_path, success = build_clickhouse(packager_cmd, build_clickhouse_log)
+ elapsed = int(time.time() - start)
+ subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {build_output_path}", shell=True)
+ subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {ccache_path}", shell=True)
+ logging.info("Build finished with %s, log path %s", success, log_path)
+
+ s3_helper = S3Helper('https://s3.amazonaws.com')
+ s3_path_prefix = str(pr_info.number) + "/" + pr_info.sha + "/" + build_check_name.lower().replace(' ', '_') + "/" + build_name
+ if os.path.exists(log_path):
+ log_url = s3_helper.upload_build_file_to_s3(log_path, s3_path_prefix + "/" + os.path.basename(log_path))
+ logging.info("Log url %s", log_url)
+ else:
+ logging.info("Build log doesn't exist")
+
+ build_urls = s3_helper.upload_build_folder_to_s3(build_output_path, s3_path_prefix, keep_dirs_in_s3_path=False, upload_symlinks=False)
+ logging.info("Got build URLs %s", build_urls)
+
+ print("::notice ::Build URLs: {}".format('\n'.join(build_urls)))
+
+ result = {
+ "log_url": log_url,
+ "build_urls": build_urls,
+ "build_config": build_config,
+ "elapsed_seconds": elapsed,
+ "status": success,
+ }
+
+ print("::notice ::Log URL: {}".format(log_url))
+
+ with open(os.path.join(temp_path, "build_urls_" + build_name + '.json'), 'w') as build_links:
+ json.dump(result, build_links)
diff --git a/tests/ci/build_report_check.py b/tests/ci/build_report_check.py
new file mode 100644
index 00000000000..4639365ded0
--- /dev/null
+++ b/tests/ci/build_report_check.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+
+import json
+from github import Github
+from report import create_build_html_report
+from s3_helper import S3Helper
+import logging
+import os
+from get_robot_token import get_best_robot_token
+import sys
+from pr_info import PRInfo
+
+class BuildResult(object):
+ def __init__(self, compiler, build_type, sanitizer, bundled, splitted, status, elapsed_seconds, with_coverage):
+ self.compiler = compiler
+ self.build_type = build_type
+ self.sanitizer = sanitizer
+ self.bundled = bundled
+ self.splitted = splitted
+ self.status = status
+ self.elapsed_seconds = elapsed_seconds
+ self.with_coverage = with_coverage
+
+def group_by_artifacts(build_urls):
+ groups = {'deb': [], 'binary': [], 'tgz': [], 'rpm': [], 'preformance': []}
+ for url in build_urls:
+ if url.endswith('performance.tgz'):
+ groups['performance'].append(url)
+ elif url.endswith('.deb') or url.endswith('.buildinfo') or url.endswith('.changes') or url.endswith('.tar.gz'):
+ groups['deb'].append(url)
+ elif url.endswith('.rpm'):
+ groups['rpm'].append(url)
+ elif url.endswith('.tgz'):
+ groups['tgz'].append(url)
+ else:
+ groups['binary'].append(url)
+ return groups
+
+def get_commit(gh, commit_sha):
+ repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse"))
+ commit = repo.get_commit(commit_sha)
+ return commit
+
+def process_report(build_report):
+ build_config = build_report['build_config']
+ build_result = BuildResult(
+ compiler=build_config['compiler'],
+ build_type=build_config['build-type'],
+ sanitizer=build_config['sanitizer'],
+ bundled=build_config['bundled'],
+ splitted=build_config['splitted'],
+ status="success" if build_report['status'] else "failure",
+ elapsed_seconds=build_report['elapsed_seconds'],
+ with_coverage=False
+ )
+ build_results = []
+ build_urls = []
+ build_logs_urls = []
+ urls_groups = group_by_artifacts(build_report['build_urls'])
+ found_group = False
+ for _, group_urls in urls_groups.items():
+ if group_urls:
+ build_results.append(build_result)
+ build_urls.append(group_urls)
+ build_logs_urls.append(build_report['log_url'])
+ found_group = True
+
+ if not found_group:
+ build_results.append(build_result)
+ build_urls.append([""])
+ build_logs_urls.append(build_report['log_url'])
+
+ return build_results, build_urls, build_logs_urls
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.INFO)
+ reports_path = os.getenv("REPORTS_PATH", "./reports")
+ temp_path = os.path.join(os.getenv("TEMP_PATH", "."))
+ logging.info("Reports path %s", reports_path)
+
+ if not os.path.exists(temp_path):
+ os.makedirs(temp_path)
+
+ build_check_name = sys.argv[1]
+
+ build_reports = []
+ for root, dirs, files in os.walk(reports_path):
+ print(files)
+ for f in files:
+ if f.startswith("build_urls_") and f.endswith('.json'):
+ logging.info("Found build report json %s", f)
+ with open(os.path.join(root, f), 'r') as file_handler:
+ build_report = json.load(file_handler)
+ build_reports.append(build_report)
+
+
+ build_results = []
+ build_artifacts = []
+ build_logs = []
+
+ for build_report in build_reports:
+ build_result, build_artifacts_url, build_logs_url = process_report(build_report)
+ logging.info("Got %s result for report", len(build_result))
+ build_results += build_result
+ build_artifacts += build_artifacts_url
+ build_logs += build_logs_url
+
+ logging.info("Totally got %s results", len(build_results))
+
+ gh = Github(get_best_robot_token())
+ s3_helper = S3Helper('https://s3.amazonaws.com')
+ with open(os.getenv('GITHUB_EVENT_PATH'), 'r') as event_file:
+ event = json.load(event_file)
+
+ pr_info = PRInfo(event)
+
+ branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master"
+ branch_name = "master"
+ if pr_info.number != 0:
+ branch_name = "PR #{}".format(pr_info.number)
+ branch_url = "https://github.com/ClickHouse/ClickHouse/pull/" + str(pr_info.number)
+ commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{pr_info.sha}"
+ task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID', 0)}"
+ report = create_build_html_report(
+ build_check_name,
+ build_results,
+ build_logs,
+ build_artifacts,
+ task_url,
+ branch_url,
+ branch_name,
+ commit_url
+ )
+
+ report_path = os.path.join(temp_path, 'report.html')
+ with open(report_path, 'w') as f:
+ f.write(report)
+
+ logging.info("Going to upload prepared report")
+ context_name_for_path = build_check_name.lower().replace(' ', '_')
+ s3_path_prefix = str(pr_info.number) + "/" + pr_info.sha + "/" + context_name_for_path
+
+ url = s3_helper.upload_build_file_to_s3(report_path, s3_path_prefix + "/report.html")
+ logging.info("Report url %s", url)
+
+ total_builds = len(build_results)
+ ok_builds = 0
+ summary_status = "success"
+ for build_result in build_results:
+ if build_result.status == "failure" and summary_status != "error":
+ summary_status = "failure"
+ if build_result.status == "error" or not build_result.status:
+ summary_status = "error"
+
+ if build_result.status == "success":
+ ok_builds += 1
+
+ description = "{}/{} builds are OK".format(ok_builds, total_builds)
+
+ print("::notice ::Report url: {}".format(url))
+
+ commit = get_commit(gh, pr_info.sha)
+ commit.create_status(context=build_check_name, description=description, state=summary_status, target_url=url)
diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py
new file mode 100644
index 00000000000..5405187e2e5
--- /dev/null
+++ b/tests/ci/fast_test_check.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python3
+
+import logging
+import subprocess
+import os
+import json
+import time
+from pr_info import PRInfo
+from report import create_test_html_report
+from s3_helper import S3Helper
+from github import Github
+from get_robot_token import get_best_robot_token, get_parameter_from_ssm
+import csv
+
+NAME = 'Fast test (actions)'
+
+def get_fasttest_cmd(workspace, output_path, ccache_path, repo_path, pr_number, commit_sha, image):
+ return f"docker run --cap-add=SYS_PTRACE " \
+ f"-e FASTTEST_WORKSPACE=/fasttest-workspace -e FASTTEST_OUTPUT=/test_output " \
+ f"-e FASTTEST_SOURCE=/ClickHouse --cap-add=SYS_PTRACE " \
+ f"-e PULL_REQUEST_NUMBER={pr_number} -e COMMIT_SHA={commit_sha} -e COPY_CLICKHOUSE_BINARY_TO_OUTPUT=1 " \
+ f"--volume={workspace}:/fasttest-workspace --volume={repo_path}:/ClickHouse --volume={output_path}:/test_output "\
+ f"--volume={ccache_path}:/fasttest-workspace/ccache {image}"
+
+
+def process_results(result_folder):
+ test_results = []
+ additional_files = []
+ # Just upload all files from result_folder.
+ # If task provides processed results, then it's responsible for content of result_folder.
+ if os.path.exists(result_folder):
+ test_files = [f for f in os.listdir(result_folder) if os.path.isfile(os.path.join(result_folder, f))]
+ additional_files = [os.path.join(result_folder, f) for f in test_files]
+
+ status_path = os.path.join(result_folder, "check_status.tsv")
+ logging.info("Found test_results.tsv")
+ status = list(csv.reader(open(status_path, 'r'), delimiter='\t'))
+ if len(status) != 1 or len(status[0]) != 2:
+ return "error", "Invalid check_status.tsv", test_results, additional_files
+ state, description = status[0][0], status[0][1]
+
+ results_path = os.path.join(result_folder, "test_results.tsv")
+ test_results = list(csv.reader(open(results_path, 'r'), delimiter='\t'))
+ if len(test_results) == 0:
+ raise Exception("Empty results")
+
+ return state, description, test_results, additional_files
+
+
+def process_logs(s3_client, additional_logs, s3_path_prefix):
+ additional_urls = []
+ for log_path in additional_logs:
+ if log_path:
+ additional_urls.append(
+ s3_client.upload_test_report_to_s3(
+ log_path,
+ s3_path_prefix + "/" + os.path.basename(log_path)))
+
+ return additional_urls
+
+
+def upload_results(s3_client, pr_number, commit_sha, test_results, raw_log, additional_files):
+ additional_files = [raw_log] + additional_files
+ s3_path_prefix = f"{pr_number}/{commit_sha}/fasttest"
+ additional_urls = process_logs(s3_client, additional_files, s3_path_prefix)
+
+ branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master"
+ branch_name = "master"
+ if pr_number != 0:
+ branch_name = "PR #{}".format(pr_number)
+ branch_url = "https://github.com/ClickHouse/ClickHouse/pull/" + str(pr_number)
+ commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}"
+
+ task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}"
+
+ raw_log_url = additional_urls[0]
+ additional_urls.pop(0)
+
+ html_report = create_test_html_report(NAME, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls, True)
+ with open('report.html', 'w') as f:
+ f.write(html_report)
+
+ url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html")
+ logging.info("Search result in url %s", url)
+ return url
+
+def get_commit(gh, commit_sha):
+ repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse"))
+ commit = repo.get_commit(commit_sha)
+ return commit
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.INFO)
+ temp_path = os.getenv("TEMP_PATH", os.path.abspath("."))
+ caches_path = os.getenv("CACHES_PATH", temp_path)
+
+ if not os.path.exists(temp_path):
+ os.makedirs(temp_path)
+
+ with open(os.getenv('GITHUB_EVENT_PATH'), 'r') as event_file:
+ event = json.load(event_file)
+
+ pr_info = PRInfo(event)
+
+ gh = Github(get_best_robot_token())
+
+ images_path = os.path.join(temp_path, 'changed_images.json')
+ docker_image = 'clickhouse/fasttest'
+ if os.path.exists(images_path):
+ logging.info("Images file exists")
+ with open(images_path, 'r') as images_fd:
+ images = json.load(images_fd)
+ logging.info("Got images %s", images)
+ if 'clickhouse/fasttest' in images:
+ docker_image += ':' + images['clickhouse/pvs-test']
+
+ logging.info("Got docker image %s", docker_image)
+ for i in range(10):
+ try:
+ subprocess.check_output(f"docker pull {docker_image}", shell=True)
+ break
+ except Exception as ex:
+ time.sleep(i * 3)
+ logging.info("Got execption pulling docker %s", ex)
+ else:
+ raise Exception(f"Cannot pull dockerhub for image {docker_image}")
+
+
+ s3_helper = S3Helper('https://s3.amazonaws.com')
+
+ workspace = os.path.join(temp_path, "fasttest-workspace")
+ if not os.path.exists(workspace):
+ os.makedirs(workspace)
+
+ output_path = os.path.join(temp_path, "fasttest-output")
+ if not os.path.exists(output_path):
+ os.makedirs(output_path)
+
+ cache_path = os.path.join(caches_path, "fasttest")
+ if not os.path.exists(cache_path):
+ os.makedirs(cache_path)
+
+ repo_path = os.path.join(temp_path, "fasttest-repo")
+ if not os.path.exists(repo_path):
+ os.makedirs(repo_path)
+
+ run_cmd = get_fasttest_cmd(workspace, output_path, cache_path, repo_path, pr_info.number, pr_info.sha, docker_image)
+ logging.info("Going to run fasttest with cmd %s", run_cmd)
+
+ logs_path = os.path.join(temp_path, "fasttest-logs")
+ if not os.path.exists(logs_path):
+ os.makedirs(logs_path)
+
+ run_log_path = os.path.join(logs_path, 'runlog.log')
+ with open(run_log_path, 'w') as log:
+ retcode = subprocess.Popen(run_cmd, shell=True, stderr=log, stdout=log).wait()
+ if retcode == 0:
+ logging.info("Run successfully")
+ else:
+ logging.info("Run failed")
+
+ subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True)
+ subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {cache_path}", shell=True)
+
+ test_output_files = os.listdir(output_path)
+ additional_logs = []
+ for f in test_output_files:
+ additional_logs.append(os.path.join(output_path, f))
+
+ test_log_exists = 'test_log.txt' in test_output_files or 'test_result.txt' in test_output_files
+ test_result_exists = 'test_results.tsv' in test_output_files
+ test_results = []
+ if 'submodule_log.txt' not in test_output_files:
+ description = "Cannot clone repository"
+ state = "failure"
+ elif 'cmake_log.txt' not in test_output_files:
+ description = "Cannot fetch submodules"
+ state = "failure"
+ elif 'build_log.txt' not in test_output_files:
+ description = "Cannot finish cmake"
+ state = "failure"
+ elif 'install_log.txt' not in test_output_files:
+ description = "Cannot build ClickHouse"
+ state = "failure"
+ elif not test_log_exists and not test_result_exists:
+ description = "Cannot install or start ClickHouse"
+ state = "failure"
+ else:
+ state, description, test_results, additional_logs = process_results(output_path)
+
+ report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, run_log_path, additional_logs)
+ print("::notice ::Report url: {}".format(report_url))
+ commit = get_commit(gh, pr_info.sha)
+ commit.create_status(context=NAME, description=description, state=state, target_url=report_url)
diff --git a/tests/ci/metrics_lambda/app.py b/tests/ci/metrics_lambda/app.py
index d2fb048638b..2a3c7670ac4 100644
--- a/tests/ci/metrics_lambda/app.py
+++ b/tests/ci/metrics_lambda/app.py
@@ -67,6 +67,24 @@ def list_runners(access_token):
result.append(desc)
return result
+def group_runners_by_tag(listed_runners):
+ result = {}
+
+ RUNNER_TYPE_LABELS = ['style-checker', 'builder']
+ for runner in listed_runners:
+ for tag in runner.tags:
+ if tag in RUNNER_TYPE_LABELS:
+ if tag not in result:
+ result[tag] = []
+ result[tag].append(runner)
+ break
+ else:
+ if 'unlabeled' not in result:
+ result['unlabeled'] = []
+ result['unlabeled'].append(runner)
+ return result
+
+
def push_metrics_to_cloudwatch(listed_runners, namespace):
import boto3
client = boto3.client('cloudwatch')
@@ -100,7 +118,7 @@ def push_metrics_to_cloudwatch(listed_runners, namespace):
'Unit': 'Percent',
})
- client.put_metric_data(Namespace='RunnersMetrics', MetricData=metrics_data)
+ client.put_metric_data(Namespace=namespace, MetricData=metrics_data)
def main(github_secret_key, github_app_id, push_to_cloudwatch):
payload = {
@@ -113,10 +131,12 @@ def main(github_secret_key, github_app_id, push_to_cloudwatch):
installation_id = get_installation_id(encoded_jwt)
access_token = get_access_token(encoded_jwt, installation_id)
runners = list_runners(access_token)
- if push_to_cloudwatch:
- push_metrics_to_cloudwatch(runners, 'RunnersMetrics')
- else:
- print(runners)
+ grouped_runners = group_runners_by_tag(runners)
+ for group, group_runners in grouped_runners.items():
+ if push_to_cloudwatch:
+ push_metrics_to_cloudwatch(group_runners, 'RunnersMetrics/' + group)
+ else:
+ print(group, group_runners)
if __name__ == "__main__":
diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py
index 8feedb2d4d7..5eb4cd6ad3e 100644
--- a/tests/ci/pr_info.py
+++ b/tests/ci/pr_info.py
@@ -39,3 +39,9 @@ class PRInfo:
'user_login': self.user_login,
'user_orgs': self.user_orgs,
}
+
+
+class FakePRInfo:
+ def __init__(self):
+ self.number = 11111
+ self.sha = "xxxxxxxxxxxxxxxxxx"
diff --git a/tests/ci/pvs_check.py b/tests/ci/pvs_check.py
index f8b1b58f307..03600476d92 100644
--- a/tests/ci/pvs_check.py
+++ b/tests/ci/pvs_check.py
@@ -9,7 +9,7 @@ from s3_helper import S3Helper
from pr_info import PRInfo
import shutil
import sys
-from get_robot_token import get_best_robot_token
+from get_robot_token import get_best_robot_token, get_parameter_from_ssm
NAME = 'PVS Studio (actions)'
LICENCE_NAME = 'Free license: ClickHouse, Yandex'
@@ -97,7 +97,7 @@ if __name__ == "__main__":
s3_helper = S3Helper('https://s3.amazonaws.com')
- licence_key = os.getenv('PVS_STUDIO_KEY')
+ licence_key = get_parameter_from_ssm('pvs_studio_key')
cmd = f"docker run -u $(id -u ${{USER}}):$(id -g ${{USER}}) --volume={repo_path}:/repo_folder --volume={temp_path}:/test_output -e LICENCE_NAME='{LICENCE_NAME}' -e LICENCE_KEY='{licence_key}' {docker_image}"
commit = get_commit(gh, pr_info.sha)
diff --git a/tests/ci/report.py b/tests/ci/report.py
index 5c9b174599d..a102a005d9b 100644
--- a/tests/ci/report.py
+++ b/tests/ci/report.py
@@ -32,6 +32,9 @@ table {{ border: 0; }}
.main {{ margin-left: 10%; }}
p.links a {{ padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-space: nowrap; box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.05), 0 8px 25px -5px rgba(0, 0, 0, 0.1); }}
th {{ cursor: pointer; }}
+.failed {{ cursor: pointer; }}
+.failed-content.open {{}}
+.failed-content {{ display: none; }}
{title}
@@ -51,7 +54,13 @@ th {{ cursor: pointer; }}