Merge branch 'master' into master

This commit is contained in:
mergify[bot] 2022-02-11 23:36:34 +00:00 committed by GitHub
commit 0fa28a2648
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 304 additions and 117 deletions

73
.github/workflows/nightly.yml vendored Normal file
View File

@ -0,0 +1,73 @@
name: NightlyBuilds
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
"on":
schedule:
- cron: '0 0 * * *'
jobs:
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64 --all
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64 --all
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Download changed aarch64 images
uses: actions/download-artifact@v2
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v2
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json

View File

@ -32,6 +32,7 @@
"dependent": []
},
"docker/test/pvs": {
"only_amd64": true,
"name": "clickhouse/pvs-test",
"dependent": []
},
@ -72,6 +73,7 @@
"dependent": []
},
"docker/test/integration/runner": {
"only_amd64": true,
"name": "clickhouse/integration-tests-runner",
"dependent": []
},
@ -124,6 +126,7 @@
"dependent": []
},
"docker/test/integration/kerberos_kdc": {
"only_amd64": true,
"name": "clickhouse/kerberos-kdc",
"dependent": []
},
@ -137,6 +140,7 @@
]
},
"docker/test/integration/kerberized_hadoop": {
"only_amd64": true,
"name": "clickhouse/kerberized-hadoop",
"dependent": []
},

View File

@ -20,4 +20,4 @@ RUN cd /tmp && \
cd commons-daemon-1.0.15-src/src/native/unix && \
./configure && \
make && \
cp ./jsvc /usr/local/hadoop/sbin
cp ./jsvc /usr/local/hadoop-2.7.0/sbin

View File

@ -58,9 +58,7 @@ RUN apt-get update \
RUN dockerd --version; docker --version
ARG TARGETARCH
# FIXME: psycopg2-binary is not available for aarch64, we skip it for now
RUN test x$TARGETARCH = xarm64 || ( python3 -m pip install \
RUN python3 -m pip install \
PyMySQL \
aerospike==4.0.0 \
avro==1.10.2 \
@ -90,7 +88,7 @@ RUN test x$TARGETARCH = xarm64 || ( python3 -m pip install \
urllib3 \
requests-kerberos \
pyhdfs \
azure-storage-blob )
azure-storage-blob
COPY modprobe.sh /usr/local/bin/modprobe
COPY dockerd-entrypoint.sh /usr/local/bin/

View File

@ -1,5 +1,5 @@
# docker build -t clickhouse/performance-comparison .
FROM ubuntu:18.04
FROM ubuntu:20.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"

View File

@ -4,11 +4,7 @@
ARG FROM_TAG=latest
FROM clickhouse/binary-builder:$FROM_TAG
# PVS studio doesn't support aarch64/arm64, so there is a check for it everywhere
# We'll produce an empty image for arm64
ARG TARGETARCH
RUN test x$TARGETARCH = xarm64 || ( apt-get update --yes \
RUN apt-get update --yes \
&& apt-get install \
bash \
wget \
@ -21,7 +17,7 @@ RUN test x$TARGETARCH = xarm64 || ( apt-get update --yes \
libprotoc-dev \
libgrpc++-dev \
libc-ares-dev \
--yes --no-install-recommends )
--yes --no-install-recommends
#RUN wget -nv -O - http://files.viva64.com/etc/pubkey.txt | sudo apt-key add -
#RUN sudo wget -nv -O /etc/apt/sources.list.d/viva64.list http://files.viva64.com/etc/viva64.list
@ -33,7 +29,7 @@ RUN test x$TARGETARCH = xarm64 || ( apt-get update --yes \
ENV PKG_VERSION="pvs-studio-latest"
RUN test x$TARGETARCH = xarm64 || ( set -x \
RUN set -x \
&& export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \
&& wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \
&& echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \
@ -41,7 +37,7 @@ RUN test x$TARGETARCH = xarm64 || ( set -x \
&& wget -nv "https://files.viva64.com/${PKG_VERSION}.deb" \
&& { debsig-verify ${PKG_VERSION}.deb \
|| echo "WARNING: Some file was just downloaded from the internet without any validation and we are installing it into the system"; } \
&& dpkg -i "${PKG_VERSION}.deb" )
&& dpkg -i "${PKG_VERSION}.deb"
ENV CCACHE_DIR=/test_output/ccache

View File

@ -43,24 +43,27 @@ RUN pip3 install urllib3 testflows==1.7.20 docker-compose==1.29.1 docker==5.0.0
ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 20.10.6
RUN set -eux; \
\
# this "case" statement is generated via "update.sh"
\
if ! wget -nv -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/x86_64/docker-${DOCKER_VERSION}.tgz"; then \
echo >&2 "error: failed to download 'docker-${DOCKER_VERSION}' from '${DOCKER_CHANNEL}' for '${x86_64}'"; \
exit 1; \
fi; \
\
tar --extract \
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH
# Install docker
RUN arch=${TARGETARCH:-amd64} \
&& case $arch in \
amd64) rarch=x86_64 ;; \
arm64) rarch=aarch64 ;; \
esac \
&& set -eux \
&& if ! wget -nv -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/${rarch}/docker-${DOCKER_VERSION}.tgz"; then \
echo >&2 "error: failed to download 'docker-${DOCKER_VERSION}' from '${DOCKER_CHANNEL}' for '${rarch}'" \
&& exit 1; \
fi \
&& tar --extract \
--file docker.tgz \
--strip-components 1 \
--directory /usr/local/bin/ \
; \
rm docker.tgz; \
\
dockerd --version; \
docker --version
&& rm docker.tgz \
&& dockerd --version \
&& docker --version
COPY modprobe.sh /usr/local/bin/modprobe
COPY dockerd-entrypoint.sh /usr/local/bin/

View File

@ -2304,7 +2304,7 @@ Possible values:
- 1 — Enabled.
- 0 — Disabled.
Default value: `0`.
Default value: `1`.
## output_format_parallel_formatting {#output-format-parallel-formatting}
@ -2315,7 +2315,7 @@ Possible values:
- 1 — Enabled.
- 0 — Disabled.
Default value: `0`.
Default value: `1`.
## min_chunk_bytes_for_parallel_parsing {#min-chunk-bytes-for-parallel-parsing}

View File

@ -2119,7 +2119,7 @@ ClickHouse генерирует исключение:
- 1 — включен режим параллельного разбора.
- 0 — отключен режим параллельного разбора.
Значение по умолчанию: `0`.
Значение по умолчанию: `1`.
## output_format_parallel_formatting {#output-format-parallel-formatting}
@ -2130,7 +2130,7 @@ ClickHouse генерирует исключение:
- 1 — включен режим параллельного форматирования.
- 0 — отключен режим параллельного форматирования.
Значение по умолчанию: `0`.
Значение по умолчанию: `1`.
## min_chunk_bytes_for_parallel_parsing {#min-chunk-bytes-for-parallel-parsing}

View File

@ -372,44 +372,47 @@ String HDFSSource::getName() const
Chunk HDFSSource::generate()
{
if (!reader)
return {};
Chunk chunk;
if (reader->pull(chunk))
while (true)
{
Columns columns = chunk.getColumns();
UInt64 num_rows = chunk.getNumRows();
if (!reader || isCancelled())
break;
/// Enrich with virtual columns.
if (need_path_column)
Chunk chunk;
if (reader->pull(chunk))
{
auto column = DataTypeLowCardinality{std::make_shared<DataTypeString>()}.createColumnConst(num_rows, current_path);
columns.push_back(column->convertToFullColumnIfConst());
Columns columns = chunk.getColumns();
UInt64 num_rows = chunk.getNumRows();
/// Enrich with virtual columns.
if (need_path_column)
{
auto column = DataTypeLowCardinality{std::make_shared<DataTypeString>()}.createColumnConst(num_rows, current_path);
columns.push_back(column->convertToFullColumnIfConst());
}
if (need_file_column)
{
size_t last_slash_pos = current_path.find_last_of('/');
auto file_name = current_path.substr(last_slash_pos + 1);
auto column = DataTypeLowCardinality{std::make_shared<DataTypeString>()}.createColumnConst(num_rows, std::move(file_name));
columns.push_back(column->convertToFullColumnIfConst());
}
return Chunk(std::move(columns), num_rows);
}
if (need_file_column)
{
size_t last_slash_pos = current_path.find_last_of('/');
auto file_name = current_path.substr(last_slash_pos + 1);
std::lock_guard lock(reader_mutex);
reader.reset();
pipeline.reset();
read_buf.reset();
auto column = DataTypeLowCardinality{std::make_shared<DataTypeString>()}.createColumnConst(num_rows, std::move(file_name));
columns.push_back(column->convertToFullColumnIfConst());
if (!initialize())
break;
}
return Chunk(std::move(columns), num_rows);
}
{
std::lock_guard lock(reader_mutex);
reader.reset();
pipeline.reset();
read_buf.reset();
if (!initialize())
return {};
}
return generate();
return {};
}

View File

@ -302,40 +302,42 @@ String StorageS3Source::getName() const
Chunk StorageS3Source::generate()
{
if (!reader)
return {};
Chunk chunk;
if (reader->pull(chunk))
while (true)
{
UInt64 num_rows = chunk.getNumRows();
if (!reader || isCancelled())
break;
if (with_path_column)
chunk.addColumn(DataTypeLowCardinality{std::make_shared<DataTypeString>()}
.createColumnConst(num_rows, file_path)
->convertToFullColumnIfConst());
if (with_file_column)
Chunk chunk;
if (reader->pull(chunk))
{
size_t last_slash_pos = file_path.find_last_of('/');
chunk.addColumn(DataTypeLowCardinality{std::make_shared<DataTypeString>()}
.createColumnConst(num_rows, file_path.substr(last_slash_pos + 1))
->convertToFullColumnIfConst());
UInt64 num_rows = chunk.getNumRows();
if (with_path_column)
chunk.addColumn(DataTypeLowCardinality{std::make_shared<DataTypeString>()}
.createColumnConst(num_rows, file_path)
->convertToFullColumnIfConst());
if (with_file_column)
{
size_t last_slash_pos = file_path.find_last_of('/');
chunk.addColumn(DataTypeLowCardinality{std::make_shared<DataTypeString>()}
.createColumnConst(num_rows, file_path.substr(last_slash_pos + 1))
->convertToFullColumnIfConst());
}
return chunk;
}
return chunk;
{
std::lock_guard lock(reader_mutex);
reader.reset();
pipeline.reset();
read_buf.reset();
if (!initialize())
break;
}
}
{
std::lock_guard lock(reader_mutex);
reader.reset();
pipeline.reset();
read_buf.reset();
if (!initialize())
return {};
}
return generate();
return {};
}
static bool checkIfObjectExists(const std::shared_ptr<Aws::S3::S3Client> & client, const String & bucket, const String & key)

View File

@ -3,10 +3,11 @@ import argparse
import json
import logging
import os
import platform
import shutil
import subprocess
import time
from typing import List, Optional, Set, Tuple, Union
from typing import Dict, List, Optional, Set, Tuple, Union
from github import Github
@ -23,24 +24,32 @@ NAME = "Push to Dockerhub (actions)"
TEMP_PATH = os.path.join(RUNNER_TEMP, "docker_images_check")
ImagesDict = Dict[str, dict]
class DockerImage:
def __init__(
self,
path: str,
repo: str,
only_amd64: bool,
parent: Optional["DockerImage"] = None,
gh_repo_path: str = GITHUB_WORKSPACE,
):
self.path = path
self.full_path = os.path.join(gh_repo_path, path)
self.repo = repo
self.only_amd64 = only_amd64
self.parent = parent
self.built = False
def __eq__(self, other) -> bool: # type: ignore
"""Is used to check if DockerImage is in a set or not"""
return self.path == other.path and self.repo == self.repo
return (
self.path == other.path
and self.repo == self.repo
and self.only_amd64 == other.only_amd64
)
def __lt__(self, other) -> bool:
if not isinstance(other, DockerImage):
@ -65,9 +74,8 @@ class DockerImage:
return f"DockerImage(path={self.path},repo={self.repo},parent={self.parent})"
def get_changed_docker_images(
pr_info: PRInfo, repo_path: str, image_file_path: str
) -> Set[DockerImage]:
def get_images_dict(repo_path: str, image_file_path: str) -> ImagesDict:
"""Return images suppose to build on the current architecture host"""
images_dict = {}
path_to_images_file = os.path.join(repo_path, image_file_path)
if os.path.exists(path_to_images_file):
@ -78,6 +86,13 @@ def get_changed_docker_images(
"Image file %s doesnt exists in repo %s", image_file_path, repo_path
)
return images_dict
def get_changed_docker_images(
pr_info: PRInfo, images_dict: ImagesDict
) -> Set[DockerImage]:
if not images_dict:
return set()
@ -96,6 +111,7 @@ def get_changed_docker_images(
for f in files_changed:
if f.startswith(dockerfile_dir):
name = image_description["name"]
only_amd64 = image_description.get("only_amd64", False)
logging.info(
"Found changed file '%s' which affects "
"docker image '%s' with path '%s'",
@ -103,7 +119,7 @@ def get_changed_docker_images(
name,
dockerfile_dir,
)
changed_images.append(DockerImage(dockerfile_dir, name))
changed_images.append(DockerImage(dockerfile_dir, name, only_amd64))
break
# The order is important: dependents should go later than bases, so that
@ -118,9 +134,9 @@ def get_changed_docker_images(
dependent,
image,
)
changed_images.append(
DockerImage(dependent, images_dict[dependent]["name"], image)
)
name = images_dict[dependent]["name"]
only_amd64 = images_dict[dependent].get("only_amd64", False)
changed_images.append(DockerImage(dependent, name, only_amd64, image))
index += 1
if index > 5 * len(images_dict):
# Sanity check to prevent infinite loop.
@ -161,12 +177,43 @@ def gen_versions(
return versions, result_version
def build_and_push_dummy_image(
image: DockerImage,
version_string: str,
push: bool,
) -> Tuple[bool, str]:
dummy_source = "ubuntu:20.04"
logging.info("Building docker image %s as %s", image.repo, dummy_source)
build_log = os.path.join(
TEMP_PATH, f"build_and_push_log_{image.repo.replace('/', '_')}_{version_string}"
)
with open(build_log, "wb") as bl:
cmd = (
f"docker pull {dummy_source}; "
f"docker tag {dummy_source} {image.repo}:{version_string}; "
)
if push:
cmd += f"docker push {image.repo}:{version_string}"
logging.info("Docker command to run: %s", cmd)
with subprocess.Popen(cmd, shell=True, stderr=bl, stdout=bl) as proc:
retcode = proc.wait()
if retcode != 0:
return False, build_log
logging.info("Processing of %s successfully finished", image.repo)
return True, build_log
def build_and_push_one_image(
image: DockerImage,
version_string: str,
push: bool,
child: bool,
) -> Tuple[bool, str]:
if image.only_amd64 and platform.machine() not in ["amd64", "x86_64"]:
return build_and_push_dummy_image(image, version_string, push)
logging.info(
"Building docker image %s with version %s from path %s",
image.repo,
@ -290,10 +337,15 @@ def parse_args() -> argparse.Namespace:
default="clickhouse",
help="docker hub repository prefix",
)
parser.add_argument(
"--all",
action="store_true",
help="rebuild all images",
)
parser.add_argument(
"--image-path",
type=str,
action="append",
nargs="*",
help="list of image paths to build instead of using pr_info + diff URL, "
"e.g. 'docker/packager/binary'",
)
@ -336,15 +388,18 @@ def main():
shutil.rmtree(TEMP_PATH)
os.makedirs(TEMP_PATH)
if args.image_path:
images_dict = get_images_dict(GITHUB_WORKSPACE, "docker/images.json")
if args.all:
pr_info = PRInfo()
pr_info.changed_files = set(images_dict.keys())
elif args.image_path:
pr_info = PRInfo()
pr_info.changed_files = set(i for i in args.image_path)
else:
pr_info = PRInfo(need_changed_files=True)
changed_images = get_changed_docker_images(
pr_info, GITHUB_WORKSPACE, "docker/images.json"
)
changed_images = get_changed_docker_images(pr_info, images_dict)
logging.info("Has changed images %s", ", ".join([im.path for im in changed_images]))
image_versions, result_version = gen_versions(pr_info, args.suffix)

View File

@ -57,7 +57,7 @@ def parse_args() -> argparse.Namespace:
args = parser.parse_args()
if len(args.suffixes) < 2:
raise parser.error("more than two --suffix should be given")
parser.error("more than two --suffix should be given")
return args
@ -81,6 +81,7 @@ def strip_suffix(suffix: str, images: Images) -> Images:
def check_sources(to_merge: Dict[str, Images]) -> Images:
"""get a dict {arch1: Images, arch2: Images}"""
result = {} # type: Images
first_suffix = ""
for suffix, images in to_merge.items():

View File

@ -23,54 +23,69 @@ class TestDockerImageCheck(unittest.TestCase):
"docker/docs/builder",
}
images = sorted(
list(di.get_changed_docker_images(pr_info, "/", self.docker_images_path))
list(
di.get_changed_docker_images(
pr_info, di.get_images_dict("/", self.docker_images_path)
)
)
)
self.maxDiff = None
expected = sorted(
[
di.DockerImage("docker/test/base", "clickhouse/test-base"),
di.DockerImage("docker/docs/builder", "clickhouse/docs-builder"),
di.DockerImage("docker/test/base", "clickhouse/test-base", False),
di.DockerImage("docker/docs/builder", "clickhouse/docs-builder", True),
di.DockerImage(
"docker/test/stateless",
"clickhouse/stateless-test",
False,
"clickhouse/test-base",
),
di.DockerImage(
"docker/test/integration/base",
"clickhouse/integration-test",
False,
"clickhouse/test-base",
),
di.DockerImage(
"docker/test/fuzzer", "clickhouse/fuzzer", "clickhouse/test-base"
"docker/test/fuzzer",
"clickhouse/fuzzer",
False,
"clickhouse/test-base",
),
di.DockerImage(
"docker/test/keeper-jepsen",
"clickhouse/keeper-jepsen-test",
False,
"clickhouse/test-base",
),
di.DockerImage(
"docker/docs/check",
"clickhouse/docs-check",
False,
"clickhouse/docs-builder",
),
di.DockerImage(
"docker/docs/release",
"clickhouse/docs-release",
False,
"clickhouse/docs-builder",
),
di.DockerImage(
"docker/test/stateful",
"clickhouse/stateful-test",
False,
"clickhouse/stateless-test",
),
di.DockerImage(
"docker/test/unit",
"clickhouse/unit-test",
False,
"clickhouse/stateless-test",
),
di.DockerImage(
"docker/test/stress",
"clickhouse/stress-test",
False,
"clickhouse/stateful-test",
),
]
@ -92,13 +107,15 @@ class TestDockerImageCheck(unittest.TestCase):
@patch("builtins.open")
@patch("subprocess.Popen")
def test_build_and_push_one_image(self, mock_popen, mock_open):
@patch("platform.machine")
def test_build_and_push_one_image(self, mock_machine, mock_popen, mock_open):
mock_popen.return_value.__enter__.return_value.wait.return_value = 0
image = di.DockerImage("path", "name", gh_repo_path="")
image = di.DockerImage("path", "name", False, gh_repo_path="")
result, _ = di.build_and_push_one_image(image, "version", True, True)
mock_open.assert_called_once()
mock_popen.assert_called_once()
mock_machine.assert_not_called()
self.assertIn(
"docker buildx build --builder default --build-arg FROM_TAG=version "
"--build-arg BUILDKIT_INLINE_CACHE=1 --tag name:version --cache-from "
@ -106,11 +123,15 @@ class TestDockerImageCheck(unittest.TestCase):
mock_popen.call_args.args,
)
self.assertTrue(result)
mock_open.reset_mock()
mock_popen.reset_mock()
mock_machine.reset_mock()
mock_open.reset()
mock_popen.reset()
mock_popen.return_value.__enter__.return_value.wait.return_value = 0
result, _ = di.build_and_push_one_image(image, "version2", False, True)
mock_open.assert_called_once()
mock_popen.assert_called_once()
mock_machine.assert_not_called()
self.assertIn(
"docker buildx build --builder default --build-arg FROM_TAG=version2 "
"--build-arg BUILDKIT_INLINE_CACHE=1 --tag name:version2 --cache-from "
@ -119,8 +140,14 @@ class TestDockerImageCheck(unittest.TestCase):
)
self.assertTrue(result)
mock_open.reset_mock()
mock_popen.reset_mock()
mock_machine.reset_mock()
mock_popen.return_value.__enter__.return_value.wait.return_value = 1
result, _ = di.build_and_push_one_image(image, "version2", False, False)
mock_open.assert_called_once()
mock_popen.assert_called_once()
mock_machine.assert_not_called()
self.assertIn(
"docker buildx build --builder default "
"--build-arg BUILDKIT_INLINE_CACHE=1 --tag name:version2 --cache-from "
@ -129,13 +156,37 @@ class TestDockerImageCheck(unittest.TestCase):
)
self.assertFalse(result)
mock_open.reset_mock()
mock_popen.reset_mock()
mock_machine.reset_mock()
only_amd64_image = di.DockerImage("path", "name", True)
mock_popen.return_value.__enter__.return_value.wait.return_value = 0
result, _ = di.build_and_push_one_image(only_amd64_image, "version", True, True)
mock_open.assert_called_once()
mock_popen.assert_called_once()
mock_machine.assert_called_once()
self.assertIn(
"docker pull ubuntu:20.04; docker tag ubuntu:20.04 name:version; "
"docker push name:version",
mock_popen.call_args.args,
)
self.assertTrue(result)
result, _ = di.build_and_push_one_image(
only_amd64_image, "version", False, True
)
self.assertIn(
"docker pull ubuntu:20.04; docker tag ubuntu:20.04 name:version; ",
mock_popen.call_args.args,
)
@patch("docker_images_check.build_and_push_one_image")
def test_process_image_with_parents(self, mock_build):
mock_build.side_effect = lambda w, x, y, z: (True, f"{w.repo}_{x}.log")
im1 = di.DockerImage("path1", "repo1")
im2 = di.DockerImage("path2", "repo2", im1)
im3 = di.DockerImage("path3", "repo3", im2)
im4 = di.DockerImage("path4", "repo4", im1)
im1 = di.DockerImage("path1", "repo1", False)
im2 = di.DockerImage("path2", "repo2", False, im1)
im3 = di.DockerImage("path3", "repo3", False, im2)
im4 = di.DockerImage("path4", "repo4", False, im1)
# We use list to have determined order of image builgings
images = [im4, im1, im3, im2, im1]
results = [

View File

@ -150,6 +150,7 @@
},
"docker/docs/builder": {
"name": "clickhouse/docs-builder",
"only_amd64": true,
"dependent": [
"docker/docs/check",
"docker/docs/release"