ClickHouse/tests/ci/workflow_approve_rerun_lambda/app.py

501 lines
15 KiB
Python
Raw Normal View History

2021-11-02 10:59:25 +00:00
#!/usr/bin/env python3
2022-05-06 18:50:44 +00:00
from collections import namedtuple
import fnmatch
2021-11-02 10:59:25 +00:00
import json
import time
2022-05-06 18:50:44 +00:00
import jwt
2022-01-19 16:00:41 +00:00
import requests # type: ignore
import boto3 # type: ignore
2021-11-02 10:59:25 +00:00
SUSPICIOUS_CHANGED_FILES_NUMBER = 200
SUSPICIOUS_PATTERNS = [
"tests/ci/*",
"docs/tools/*",
".github/*",
"utils/release/*",
"docker/*",
"release",
]
# Number of retries for API calls.
2021-11-02 10:59:25 +00:00
MAX_RETRY = 5
# Number of times a check can re-run as a whole.
# It is needed, because we are using AWS "spot" instances, that are terminated often
MAX_WORKFLOW_RERUN = 20
2021-11-02 10:59:25 +00:00
WorkflowDescription = namedtuple(
"WorkflowDescription",
[
"name",
"action",
"run_id",
"event",
"workflow_id",
"conclusion",
"status",
"api_url",
"fork_owner_login",
"fork_branch",
"rerun_url",
"jobs_url",
"attempt",
"repo_url",
"url",
],
)
2021-11-08 10:05:59 +00:00
# See https://api.github.com/orgs/{name}
2021-11-02 10:59:25 +00:00
TRUSTED_ORG_IDS = {
7409213, # yandex
2021-11-02 10:59:25 +00:00
28471076, # altinity
54801242, # clickhouse
}
2022-01-19 16:00:41 +00:00
# See https://api.github.com/repos/ClickHouse/ClickHouse/actions/workflows
# Use ID to not inject a malicious workflow
TRUSTED_WORKFLOW_IDS = {
14586616, # Cancel workflows, always trusted
}
2021-12-09 13:28:15 +00:00
NEED_RERUN_WORKFLOWS = {
2022-01-19 16:00:41 +00:00
"BackportPR",
"Docs",
"DocsRelease",
"MasterCI",
"PullRequestCI",
"ReleaseCI",
2021-12-09 13:28:15 +00:00
}
2021-11-02 10:59:25 +00:00
# Individual trusted contirbutors who are not in any trusted organization.
# Can be changed in runtime: we will append users that we learned to be in
# a trusted org, to save GitHub API calls.
TRUSTED_CONTRIBUTORS = {
e.lower()
for e in [
"achimbab",
"adevyatova ", # DOCSUP
"Algunenano", # Raúl Marín, Tinybird
"amosbird",
"AnaUvarova", # DOCSUP
"anauvarova", # technical writer, Yandex
"annvsh", # technical writer, Yandex
"atereh", # DOCSUP
"azat",
"bharatnc", # Newbie, but already with many contributions.
"bobrik", # Seasoned contributor, CloudFlare
"BohuTANG",
"codyrobert", # Flickerbox engineer
"cwurm", # Employee
"damozhaeva", # DOCSUP
"den-crane",
"flickerbox-tom", # Flickerbox
"gyuton", # DOCSUP
"hagen1778", # Roman Khavronenko, seasoned contributor
"hczhcz",
"hexiaoting", # Seasoned contributor
"ildus", # adjust, ex-pgpro
"javisantana", # a Spanish ClickHouse enthusiast, ex-Carto
"ka1bi4", # DOCSUP
"kirillikoff", # DOCSUP
"kreuzerkrieg",
"lehasm", # DOCSUP
"michon470", # DOCSUP
"MyroTk", # Tester in Altinity
"myrrc", # Michael Kot, Altinity
"nikvas0",
"nvartolomei",
"olgarev", # DOCSUP
"otrazhenia", # Yandex docs contractor
"pdv-ru", # DOCSUP
"podshumok", # cmake expert from QRator Labs
"s-mx", # Maxim Sabyanin, former employee, present contributor
"sevirov", # technical writer, Yandex
"spongedu", # Seasoned contributor
2022-02-18 12:59:47 +00:00
"taiyang-li",
2022-02-18 21:26:43 +00:00
"ucasFL", # Amos Bird's friend
"vdimir", # Employee
"vzakaznikov",
"YiuRULE",
"zlobober", # Developer of YT
"ilejn", # Arenadata, responsible for Kerberized Kafka
"thomoco", # ClickHouse
"BoloniniD", # Seasoned contributor, HSE
"tonickkozlov", # Cloudflare
2022-04-13 22:36:38 +00:00
"tylerhannan", # ClickHouse Employee
]
}
2021-11-02 10:59:25 +00:00
2021-11-04 15:47:35 +00:00
def get_installation_id(jwt_token):
headers = {
"Authorization": f"Bearer {jwt_token}",
"Accept": "application/vnd.github.v3+json",
}
response = requests.get("https://api.github.com/app/installations", headers=headers)
response.raise_for_status()
data = response.json()
for installation in data:
if installation["account"]["login"] == "ClickHouse":
installation_id = installation["id"]
return installation_id
2021-11-04 15:47:35 +00:00
def get_access_token(jwt_token, installation_id):
headers = {
"Authorization": f"Bearer {jwt_token}",
"Accept": "application/vnd.github.v3+json",
}
response = requests.post(
f"https://api.github.com/app/installations/{installation_id}/access_tokens",
headers=headers,
)
2021-11-04 15:47:35 +00:00
response.raise_for_status()
data = response.json()
return data["token"]
2021-11-04 15:47:35 +00:00
def get_key_and_app_from_aws():
secret_name = "clickhouse_github_secret_key"
session = boto3.session.Session()
client = session.client(
service_name="secretsmanager",
2021-11-04 15:47:35 +00:00
)
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
data = json.loads(get_secret_value_response["SecretString"])
return data["clickhouse-app-key"], int(data["clickhouse-app-id"])
2021-11-04 15:47:35 +00:00
2021-11-25 16:25:29 +00:00
def is_trusted_contributor(pr_user_login, pr_user_orgs):
if pr_user_login.lower() in TRUSTED_CONTRIBUTORS:
2021-11-08 10:05:59 +00:00
print(f"User '{pr_user_login}' is trusted")
2021-11-02 10:59:25 +00:00
return True
2021-11-08 10:05:59 +00:00
print(f"User '{pr_user_login}' is not trusted")
2021-11-02 10:59:25 +00:00
for org_id in pr_user_orgs:
if org_id in TRUSTED_ORG_IDS:
print(
f"Org '{org_id}' is trusted; will mark user {pr_user_login} as trusted"
)
2021-11-02 10:59:25 +00:00
return True
2021-11-08 10:05:59 +00:00
print(f"Org '{org_id}' is not trusted")
2021-11-02 10:59:25 +00:00
return False
2022-08-03 16:35:57 +00:00
def _exec_get_with_retry(url, token):
headers = {"Authorization": f"token {token}"}
2021-11-02 10:59:25 +00:00
for i in range(MAX_RETRY):
try:
2022-08-03 16:35:57 +00:00
response = requests.get(url, headers=headers)
2021-11-02 10:59:25 +00:00
response.raise_for_status()
return response.json()
except Exception as ex:
2021-11-08 10:05:59 +00:00
print("Got exception executing request", ex)
2021-11-02 10:59:25 +00:00
time.sleep(i + 1)
raise Exception("Cannot execute GET request with retries")
2021-11-02 10:59:25 +00:00
def _exec_post_with_retry(url, token, data=None):
headers = {"Authorization": f"token {token}"}
2021-11-02 10:59:25 +00:00
for i in range(MAX_RETRY):
try:
if data:
response = requests.post(url, headers=headers, json=data)
else:
response = requests.post(url, headers=headers)
2021-11-04 15:47:35 +00:00
if response.status_code == 403:
data = response.json()
if (
"message" in data
and data["message"]
== "This workflow run is not waiting for approval"
):
2021-11-08 10:05:59 +00:00
print("Workflow doesn't need approval")
2021-11-04 15:47:35 +00:00
return data
2021-11-02 10:59:25 +00:00
response.raise_for_status()
return response.json()
except Exception as ex:
2021-11-08 10:05:59 +00:00
print("Got exception executing request", ex)
2021-11-02 10:59:25 +00:00
time.sleep(i + 1)
raise Exception("Cannot execute POST request with retry")
2022-08-03 16:35:57 +00:00
def _get_pull_requests_from(repo_url, owner, branch, token):
url = f"{repo_url}/pulls?head={owner}:{branch}"
2022-08-03 16:35:57 +00:00
return _exec_get_with_retry(url, token)
2021-11-02 10:59:25 +00:00
2021-11-02 10:59:25 +00:00
def get_workflow_description_from_event(event):
action = event["action"]
run_id = event["workflow_run"]["id"]
event_type = event["workflow_run"]["event"]
fork_owner = event["workflow_run"]["head_repository"]["owner"]["login"]
fork_branch = event["workflow_run"]["head_branch"]
name = event["workflow_run"]["name"]
workflow_id = event["workflow_run"]["workflow_id"]
conclusion = event["workflow_run"]["conclusion"]
attempt = event["workflow_run"]["run_attempt"]
status = event["workflow_run"]["status"]
jobs_url = event["workflow_run"]["jobs_url"]
rerun_url = event["workflow_run"]["rerun_url"]
url = event["workflow_run"]["html_url"]
api_url = event["workflow_run"]["url"]
repo_url = event["repository"]["url"]
2021-11-02 10:59:25 +00:00
return WorkflowDescription(
2021-11-08 10:05:59 +00:00
name=name,
2021-11-02 10:59:25 +00:00
action=action,
run_id=run_id,
2021-11-03 15:25:15 +00:00
event=event_type,
2021-11-02 10:59:25 +00:00
fork_owner_login=fork_owner,
fork_branch=fork_branch,
2021-11-08 10:05:59 +00:00
workflow_id=workflow_id,
2021-12-09 13:28:15 +00:00
conclusion=conclusion,
attempt=attempt,
status=status,
jobs_url=jobs_url,
rerun_url=rerun_url,
2021-12-13 07:48:57 +00:00
url=url,
repo_url=repo_url,
api_url=api_url,
2021-11-02 10:59:25 +00:00
)
2022-08-03 16:35:57 +00:00
def get_pr_author_and_orgs(pull_request, token):
author = pull_request["user"]["login"]
2022-08-03 16:35:57 +00:00
orgs = _exec_get_with_retry(pull_request["user"]["organizations_url"], token)
return author, [org["id"] for org in orgs]
2021-11-02 10:59:25 +00:00
2022-08-03 16:35:57 +00:00
def get_changed_files_for_pull_request(pull_request, token):
url = pull_request["url"]
2021-11-02 10:59:25 +00:00
changed_files = set([])
for i in range(1, 31):
2021-11-08 10:05:59 +00:00
print("Requesting changed files page", i)
2022-08-03 16:35:57 +00:00
data = _exec_get_with_retry(f"{url}/files?page={i}&per_page=100", token)
2021-11-08 10:05:59 +00:00
print(f"Got {len(data)} changed files")
if len(data) == 0:
print("No more changed files")
break
2021-11-02 10:59:25 +00:00
for change in data:
# print("Adding changed file", change['filename'])
changed_files.add(change["filename"])
2021-11-02 10:59:25 +00:00
if len(changed_files) >= SUSPICIOUS_CHANGED_FILES_NUMBER:
print(
f"More than {len(changed_files)} changed files. "
"Will stop fetching new files."
)
2021-11-02 10:59:25 +00:00
break
return changed_files
2021-11-02 10:59:25 +00:00
def check_suspicious_changed_files(changed_files):
if len(changed_files) >= SUSPICIOUS_CHANGED_FILES_NUMBER:
2021-11-08 10:05:59 +00:00
print(f"Too many files changed {len(changed_files)}, need manual approve")
2021-11-02 10:59:25 +00:00
return True
for path in changed_files:
for pattern in SUSPICIOUS_PATTERNS:
if fnmatch.fnmatch(path, pattern):
print(
f"File {path} match suspicious pattern {pattern}, "
"will not approve automatically"
)
2021-11-02 10:59:25 +00:00
return True
2021-11-08 10:05:59 +00:00
print("No changed files match suspicious patterns, run will be approved")
2021-11-02 10:59:25 +00:00
return False
def approve_run(workflow_description: WorkflowDescription, token):
url = f"{workflow_description.api_url}/approve"
2021-11-02 10:59:25 +00:00
_exec_post_with_retry(url, token)
2021-11-02 10:59:25 +00:00
def label_manual_approve(pull_request, token):
url = f"{pull_request['url']}/labels"
data = {"labels": "manual approve"}
2021-11-02 10:59:25 +00:00
_exec_post_with_retry(url, token, data)
2021-11-02 10:59:25 +00:00
def get_token_from_aws():
2021-11-04 15:47:35 +00:00
private_key, app_id = get_key_and_app_from_aws()
payload = {
"iat": int(time.time()) - 60,
"exp": int(time.time()) + (10 * 60),
"iss": app_id,
}
encoded_jwt = jwt.encode(payload, private_key, algorithm="RS256")
installation_id = get_installation_id(encoded_jwt)
return get_access_token(encoded_jwt, installation_id)
2021-11-02 10:59:25 +00:00
2022-08-03 16:35:57 +00:00
def get_workflow_jobs(workflow_description, token):
jobs_url = (
workflow_description.api_url + f"/attempts/{workflow_description.attempt}/jobs"
)
2021-12-13 07:48:57 +00:00
jobs = []
i = 1
while True:
2022-08-03 16:35:57 +00:00
got_jobs = _exec_get_with_retry(jobs_url + f"?page={i}", token)
if len(got_jobs["jobs"]) == 0:
2021-12-13 07:48:57 +00:00
break
jobs += got_jobs["jobs"]
2021-12-13 07:48:57 +00:00
i += 1
return jobs
2022-08-03 16:35:57 +00:00
def check_need_to_rerun(workflow_description, token):
2021-12-13 07:48:57 +00:00
if workflow_description.attempt >= MAX_WORKFLOW_RERUN:
print(
"Not going to rerun workflow because it's already tried more than two times"
)
2021-12-09 13:28:15 +00:00
return False
print("Going to check jobs")
2022-08-03 16:35:57 +00:00
jobs = get_workflow_jobs(workflow_description, token)
2021-12-13 07:48:57 +00:00
print("Got jobs", len(jobs))
for job in jobs:
if job["conclusion"] not in ("success", "skipped"):
print("Job", job["name"], "failed, checking steps")
for step in job["steps"]:
2021-12-09 13:28:15 +00:00
# always the last job
if step["name"] == "Complete job":
print("Found Complete job step for job", job["name"])
2021-12-09 13:28:15 +00:00
break
else:
print(
"Checked all steps and doesn't found Complete job, going to rerun"
)
2021-12-09 13:28:15 +00:00
return True
return False
2021-12-09 13:28:15 +00:00
def rerun_workflow(workflow_description, token):
print("Going to rerun workflow")
try:
_exec_post_with_retry(f"{workflow_description.rerun_url}-failed-jobs", token)
except Exception:
_exec_post_with_retry(workflow_description.rerun_url, token)
2021-12-09 13:28:15 +00:00
def check_workflow_completed(
event_data, workflow_description: WorkflowDescription, token: str
) -> bool:
if workflow_description.action == "completed":
attempt = 0
# Nice and reliable GH API sends from time to time such events, e.g:
# action='completed', conclusion=None, status='in_progress',
# So let's try receiving a real workflow data
while workflow_description.conclusion is None and attempt < MAX_RETRY:
progressive_sleep = 3 * sum(i + 1 for i in range(attempt))
time.sleep(progressive_sleep)
event_data["workflow_run"] = _exec_get_with_retry(
2022-08-03 16:35:57 +00:00
workflow_description.api_url, token
)
workflow_description = get_workflow_description_from_event(event_data)
attempt += 1
if workflow_description.conclusion != "failure":
print(
"Workflow finished with status "
f"{workflow_description.conclusion}, exiting"
)
return True
print(
"Workflow",
workflow_description.url,
"completed and failed, let's check for rerun",
)
2021-12-09 13:28:15 +00:00
2022-01-19 16:00:41 +00:00
if workflow_description.name not in NEED_RERUN_WORKFLOWS:
print(
"Workflow",
2022-01-19 16:00:41 +00:00
workflow_description.name,
"not in list of rerunable workflows",
)
return True
2021-12-09 13:28:15 +00:00
2022-08-03 16:35:57 +00:00
if check_need_to_rerun(workflow_description, token):
2021-12-09 13:28:15 +00:00
rerun_workflow(workflow_description, token)
return True
return False
def main(event):
token = get_token_from_aws()
event_data = json.loads(event["body"])
print("The body received:", event["body"])
workflow_description = get_workflow_description_from_event(event_data)
print("Got workflow description", workflow_description)
if check_workflow_completed(event_data, workflow_description, token):
return
2021-12-09 13:28:15 +00:00
2021-11-08 10:05:59 +00:00
if workflow_description.action != "requested":
print("Exiting, event action is", workflow_description.action)
return
if workflow_description.workflow_id in TRUSTED_WORKFLOW_IDS:
print("Workflow in trusted list, approving run")
approve_run(workflow_description, token)
2021-11-08 10:05:59 +00:00
return
2021-11-02 10:59:25 +00:00
pull_requests = _get_pull_requests_from(
workflow_description.repo_url,
workflow_description.fork_owner_login,
workflow_description.fork_branch,
2022-08-03 16:35:57 +00:00
token,
)
2021-11-25 16:25:29 +00:00
2021-11-08 10:05:59 +00:00
print("Got pull requests for workflow", len(pull_requests))
if len(pull_requests) != 1:
print(f"Can't continue with non-uniq PRs: {pull_requests}")
return
2021-11-02 10:59:25 +00:00
pull_request = pull_requests[0]
print("Pull request for workflow number", pull_request["number"])
2021-11-02 10:59:25 +00:00
2022-08-03 16:35:57 +00:00
author, author_orgs = get_pr_author_and_orgs(pull_request, token)
2021-11-25 16:25:29 +00:00
if is_trusted_contributor(author, author_orgs):
print("Contributor is trusted, approving run")
approve_run(workflow_description, token)
2021-11-25 16:25:29 +00:00
return
2022-08-03 16:35:57 +00:00
changed_files = get_changed_files_for_pull_request(pull_request, token)
2021-11-08 10:05:59 +00:00
print(f"Totally have {len(changed_files)} changed files in PR:", changed_files)
2021-11-02 10:59:25 +00:00
if check_suspicious_changed_files(changed_files):
print(
2022-01-19 16:00:41 +00:00
f"Pull Request {pull_request['number']} has suspicious changes, "
"label it for manuall approve"
)
2021-11-02 10:59:25 +00:00
label_manual_approve(pull_request, token)
else:
2021-11-08 10:05:59 +00:00
print(f"Pull Request {pull_request['number']} has no suspicious changes")
approve_run(workflow_description, token)
2021-11-02 10:59:25 +00:00
2021-11-02 10:59:25 +00:00
def handler(event, _):
try:
main(event)
except Exception:
print("Received event: ", event)
raise