Merge pull request #44848 from qoega/green-ci-2

Pre-pulling images for integration tests to detect broken infrastructure earlier
This commit is contained in:
Ilya Yatsishin 2023-01-05 17:29:39 +01:00 committed by GitHub
commit a28d6fb490
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 84 additions and 53 deletions

View File

@ -0,0 +1,5 @@
version: '2.3'
# Used to pre-pull images with docker-compose
services:
clickhouse1:
image: clickhouse/integration-test

View File

@ -5,8 +5,8 @@ services:
hostname: hdfs1 hostname: hdfs1
restart: always restart: always
expose: expose:
- ${HDFS_NAME_PORT} - ${HDFS_NAME_PORT:-50070}
- ${HDFS_DATA_PORT} - ${HDFS_DATA_PORT:-50075}
entrypoint: /etc/bootstrap.sh -d entrypoint: /etc/bootstrap.sh -d
volumes: volumes:
- type: ${HDFS_FS:-tmpfs} - type: ${HDFS_FS:-tmpfs}

View File

@ -15,7 +15,7 @@ services:
image: confluentinc/cp-kafka:5.2.0 image: confluentinc/cp-kafka:5.2.0
hostname: kafka1 hostname: kafka1
ports: ports:
- ${KAFKA_EXTERNAL_PORT}:${KAFKA_EXTERNAL_PORT} - ${KAFKA_EXTERNAL_PORT:-8081}:${KAFKA_EXTERNAL_PORT:-8081}
environment: environment:
KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:${KAFKA_EXTERNAL_PORT},OUTSIDE://kafka1:19092 KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:${KAFKA_EXTERNAL_PORT},OUTSIDE://kafka1:19092
KAFKA_ADVERTISED_HOST_NAME: kafka1 KAFKA_ADVERTISED_HOST_NAME: kafka1
@ -35,7 +35,7 @@ services:
image: confluentinc/cp-schema-registry:5.2.0 image: confluentinc/cp-schema-registry:5.2.0
hostname: schema-registry hostname: schema-registry
ports: ports:
- ${SCHEMA_REGISTRY_EXTERNAL_PORT}:${SCHEMA_REGISTRY_INTERNAL_PORT} - ${SCHEMA_REGISTRY_EXTERNAL_PORT:-12313}:${SCHEMA_REGISTRY_INTERNAL_PORT:-12313}
environment: environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT

View File

@ -15,8 +15,8 @@ services:
source: ${KERBERIZED_HDFS_LOGS:-} source: ${KERBERIZED_HDFS_LOGS:-}
target: /var/log/hadoop-hdfs target: /var/log/hadoop-hdfs
expose: expose:
- ${KERBERIZED_HDFS_NAME_PORT} - ${KERBERIZED_HDFS_NAME_PORT:-50070}
- ${KERBERIZED_HDFS_DATA_PORT} - ${KERBERIZED_HDFS_DATA_PORT:-1006}
depends_on: depends_on:
- hdfskerberos - hdfskerberos
entrypoint: /etc/bootstrap.sh -d entrypoint: /etc/bootstrap.sh -d

View File

@ -23,7 +23,7 @@ services:
# restart: always # restart: always
hostname: kerberized_kafka1 hostname: kerberized_kafka1
ports: ports:
- ${KERBERIZED_KAFKA_EXTERNAL_PORT}:${KERBERIZED_KAFKA_EXTERNAL_PORT} - ${KERBERIZED_KAFKA_EXTERNAL_PORT:-19092}:${KERBERIZED_KAFKA_EXTERNAL_PORT:-19092}
environment: environment:
KAFKA_LISTENERS: OUTSIDE://:19092,UNSECURED_OUTSIDE://:19093,UNSECURED_INSIDE://0.0.0.0:${KERBERIZED_KAFKA_EXTERNAL_PORT} KAFKA_LISTENERS: OUTSIDE://:19092,UNSECURED_OUTSIDE://:19093,UNSECURED_INSIDE://0.0.0.0:${KERBERIZED_KAFKA_EXTERNAL_PORT}
KAFKA_ADVERTISED_LISTENERS: OUTSIDE://kerberized_kafka1:19092,UNSECURED_OUTSIDE://kerberized_kafka1:19093,UNSECURED_INSIDE://localhost:${KERBERIZED_KAFKA_EXTERNAL_PORT} KAFKA_ADVERTISED_LISTENERS: OUTSIDE://kerberized_kafka1:19092,UNSECURED_OUTSIDE://kerberized_kafka1:19093,UNSECURED_INSIDE://localhost:${KERBERIZED_KAFKA_EXTERNAL_PORT}
@ -41,7 +41,7 @@ services:
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/broker_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/krb.conf -Dsun.security.krb5.debug=true" KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/broker_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/krb.conf -Dsun.security.krb5.debug=true"
volumes: volumes:
- ${KERBERIZED_KAFKA_DIR}/secrets:/etc/kafka/secrets - ${KERBERIZED_KAFKA_DIR:-}/secrets:/etc/kafka/secrets
- /dev/urandom:/dev/random - /dev/urandom:/dev/random
depends_on: depends_on:
- kafka_kerberized_zookeeper - kafka_kerberized_zookeeper

View File

@ -4,13 +4,13 @@ services:
image: getmeili/meilisearch:v0.27.0 image: getmeili/meilisearch:v0.27.0
restart: always restart: always
ports: ports:
- ${MEILI_EXTERNAL_PORT}:${MEILI_INTERNAL_PORT} - ${MEILI_EXTERNAL_PORT:-7700}:${MEILI_INTERNAL_PORT:-7700}
meili_secure: meili_secure:
image: getmeili/meilisearch:v0.27.0 image: getmeili/meilisearch:v0.27.0
restart: always restart: always
ports: ports:
- ${MEILI_SECURE_EXTERNAL_PORT}:${MEILI_SECURE_INTERNAL_PORT} - ${MEILI_SECURE_EXTERNAL_PORT:-7700}:${MEILI_SECURE_INTERNAL_PORT:-7700}
environment: environment:
MEILI_MASTER_KEY: "password" MEILI_MASTER_KEY: "password"

View File

@ -9,7 +9,7 @@ services:
- data1-1:/data1 - data1-1:/data1
- ${MINIO_CERTS_DIR:-}:/certs - ${MINIO_CERTS_DIR:-}:/certs
expose: expose:
- ${MINIO_PORT} - ${MINIO_PORT:-9001}
environment: environment:
MINIO_ACCESS_KEY: minio MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123 MINIO_SECRET_KEY: minio123

View File

@ -7,11 +7,11 @@ services:
MONGO_INITDB_ROOT_USERNAME: root MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: clickhouse MONGO_INITDB_ROOT_PASSWORD: clickhouse
ports: ports:
- ${MONGO_EXTERNAL_PORT}:${MONGO_INTERNAL_PORT} - ${MONGO_EXTERNAL_PORT:-27017}:${MONGO_INTERNAL_PORT:-27017}
command: --profile=2 --verbose command: --profile=2 --verbose
mongo2: mongo2:
image: mongo:5.0 image: mongo:5.0
restart: always restart: always
ports: ports:
- ${MONGO_NO_CRED_EXTERNAL_PORT}:${MONGO_NO_CRED_INTERNAL_PORT} - ${MONGO_NO_CRED_EXTERNAL_PORT:-27017}:${MONGO_NO_CRED_INTERNAL_PORT:-27017}

View File

@ -7,7 +7,7 @@ services:
MONGO_INITDB_ROOT_USERNAME: root MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: clickhouse MONGO_INITDB_ROOT_PASSWORD: clickhouse
volumes: volumes:
- ${MONGO_CONFIG_PATH}:/mongo/ - ${MONGO_CONFIG_PATH:-}:/mongo/
ports: ports:
- ${MONGO_EXTERNAL_PORT}:${MONGO_INTERNAL_PORT} - ${MONGO_EXTERNAL_PORT:-27017}:${MONGO_INTERNAL_PORT:-27017}
command: --config /mongo/mongo_secure.conf --profile=2 --verbose command: --config /mongo/mongo_secure.conf --profile=2 --verbose

View File

@ -8,7 +8,7 @@ services:
MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST} MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST}
DATADIR: /mysql/ DATADIR: /mysql/
expose: expose:
- ${MYSQL_PORT} - ${MYSQL_PORT:-3306}
command: --server_id=100 command: --server_id=100
--log-bin='mysql-bin-1.log' --log-bin='mysql-bin-1.log'
--default-time-zone='+3:00' --default-time-zone='+3:00'

View File

@ -1,21 +0,0 @@
version: '2.3'
services:
mysql1:
image: mysql:5.7
restart: 'no'
environment:
MYSQL_ROOT_PASSWORD: clickhouse
ports:
- 3308:3306
command: --server_id=100 --log-bin='mysql-bin-1.log'
--default-time-zone='+3:00'
--gtid-mode="ON"
--enforce-gtid-consistency
--log-error-verbosity=3
--log-error=/var/log/mysqld/error.log
--general-log=ON
--general-log-file=/var/log/mysqld/general.log
volumes:
- type: ${MYSQL_LOGS_FS:-tmpfs}
source: ${MYSQL_LOGS:-}
target: /var/log/mysqld/

View File

@ -8,7 +8,7 @@ services:
MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST} MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST}
DATADIR: /mysql/ DATADIR: /mysql/
expose: expose:
- ${MYSQL8_PORT} - ${MYSQL8_PORT:-3306}
command: --server_id=100 --log-bin='mysql-bin-1.log' command: --server_id=100 --log-bin='mysql-bin-1.log'
--default_authentication_plugin='mysql_native_password' --default_authentication_plugin='mysql_native_password'
--default-time-zone='+3:00' --gtid-mode="ON" --default-time-zone='+3:00' --gtid-mode="ON"

View File

@ -8,7 +8,7 @@ services:
MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST} MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST}
DATADIR: /mysql/ DATADIR: /mysql/
expose: expose:
- ${MYSQL_CLUSTER_PORT} - ${MYSQL_CLUSTER_PORT:-3306}
command: --server_id=100 command: --server_id=100
--log-bin='mysql-bin-2.log' --log-bin='mysql-bin-2.log'
--default-time-zone='+3:00' --default-time-zone='+3:00'
@ -30,7 +30,7 @@ services:
MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST} MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST}
DATADIR: /mysql/ DATADIR: /mysql/
expose: expose:
- ${MYSQL_CLUSTER_PORT} - ${MYSQL_CLUSTER_PORT:-3306}
command: --server_id=100 command: --server_id=100
--log-bin='mysql-bin-3.log' --log-bin='mysql-bin-3.log'
--default-time-zone='+3:00' --default-time-zone='+3:00'
@ -52,7 +52,7 @@ services:
MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST} MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST}
DATADIR: /mysql/ DATADIR: /mysql/
expose: expose:
- ${MYSQL_CLUSTER_PORT} - ${MYSQL_CLUSTER_PORT:-3306}
command: --server_id=100 command: --server_id=100
--log-bin='mysql-bin-4.log' --log-bin='mysql-bin-4.log'
--default-time-zone='+3:00' --default-time-zone='+3:00'

View File

@ -3,9 +3,9 @@ services:
nats1: nats1:
image: nats image: nats
ports: ports:
- "${NATS_EXTERNAL_PORT}:${NATS_INTERNAL_PORT}" - "${NATS_EXTERNAL_PORT:-4444}:${NATS_INTERNAL_PORT:-4444}"
command: "-p 4444 --user click --pass house --tls --tlscert=/etc/certs/server-cert.pem --tlskey=/etc/certs/server-key.pem" command: "-p 4444 --user click --pass house --tls --tlscert=/etc/certs/server-cert.pem --tlskey=/etc/certs/server-key.pem"
volumes: volumes:
- type: bind - type: bind
source: "${NATS_CERT_DIR}/nats" source: "${NATS_CERT_DIR:-}/nats"
target: /etc/certs target: /etc/certs

View File

@ -5,7 +5,7 @@ services:
command: ["postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=2", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all", "-c", "max_connections=200"] command: ["postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=2", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all", "-c", "max_connections=200"]
restart: always restart: always
expose: expose:
- ${POSTGRES_PORT} - ${POSTGRES_PORT:-5432}
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"] test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s interval: 10s

View File

@ -9,7 +9,7 @@ services:
POSTGRES_PASSWORD: mysecretpassword POSTGRES_PASSWORD: mysecretpassword
PGDATA: /postgres/data PGDATA: /postgres/data
expose: expose:
- ${POSTGRES_PORT} - ${POSTGRES_PORT:-5432}
volumes: volumes:
- type: ${POSTGRES_LOGS_FS:-tmpfs} - type: ${POSTGRES_LOGS_FS:-tmpfs}
source: ${POSTGRES2_DIR:-} source: ${POSTGRES2_DIR:-}
@ -23,7 +23,7 @@ services:
POSTGRES_PASSWORD: mysecretpassword POSTGRES_PASSWORD: mysecretpassword
PGDATA: /postgres/data PGDATA: /postgres/data
expose: expose:
- ${POSTGRES_PORT} - ${POSTGRES_PORT:-5432}
volumes: volumes:
- type: ${POSTGRES_LOGS_FS:-tmpfs} - type: ${POSTGRES_LOGS_FS:-tmpfs}
source: ${POSTGRES3_DIR:-} source: ${POSTGRES3_DIR:-}
@ -37,7 +37,7 @@ services:
POSTGRES_PASSWORD: mysecretpassword POSTGRES_PASSWORD: mysecretpassword
PGDATA: /postgres/data PGDATA: /postgres/data
expose: expose:
- ${POSTGRES_PORT} - ${POSTGRES_PORT:-5432}
volumes: volumes:
- type: ${POSTGRES_LOGS_FS:-tmpfs} - type: ${POSTGRES_LOGS_FS:-tmpfs}
source: ${POSTGRES4_DIR:-} source: ${POSTGRES4_DIR:-}

View File

@ -5,7 +5,7 @@ services:
image: rabbitmq:3.8-management-alpine image: rabbitmq:3.8-management-alpine
hostname: rabbitmq1 hostname: rabbitmq1
expose: expose:
- ${RABBITMQ_PORT} - ${RABBITMQ_PORT:-5672}
environment: environment:
RABBITMQ_DEFAULT_USER: "root" RABBITMQ_DEFAULT_USER: "root"
RABBITMQ_DEFAULT_PASS: "clickhouse" RABBITMQ_DEFAULT_PASS: "clickhouse"

View File

@ -4,5 +4,5 @@ services:
image: redis image: redis
restart: always restart: always
ports: ports:
- ${REDIS_EXTERNAL_PORT}:${REDIS_INTERNAL_PORT} - ${REDIS_EXTERNAL_PORT:-6379}:${REDIS_INTERNAL_PORT:-6379}
command: redis-server --requirepass "clickhouse" --databases 32 command: redis-server --requirepass "clickhouse" --databases 32

View File

@ -247,6 +247,11 @@ if __name__ == "__main__":
retcode = process.wait() retcode = process.wait()
if retcode == 0: if retcode == 0:
logging.info("Run tests successfully") logging.info("Run tests successfully")
elif retcode == 13:
logging.warning(
"There were issues with infrastructure. Not writing status report to restart job."
)
sys.exit(1)
else: else:
logging.info("Some tests failed") logging.info("Some tests failed")

View File

@ -292,6 +292,34 @@ class ClickhouseIntegrationTestsRunner:
"clickhouse/postgresql-java-client", "clickhouse/postgresql-java-client",
] ]
def _pre_pull_images(self, repo_path):
image_cmd = self._get_runner_image_cmd(repo_path)
cmd = (
"cd {repo_path}/tests/integration && "
"timeout -s 9 1h ./runner {runner_opts} {image_cmd} --pre-pull --command '{command}' ".format(
repo_path=repo_path,
runner_opts=self._get_runner_opts(),
image_cmd=image_cmd,
command=r""" echo Pre Pull finished """,
)
)
for i in range(5):
logging.info("Pulling images before running tests. Attempt %s", i)
try:
subprocess.check_output(
cmd,
shell=True,
)
return
except subprocess.CalledProcessError as err:
logging.info("docker-compose pull failed: " + str(err))
continue
logging.error("Pulling images failed for 5 attempts. Will fail the worker.")
# We pass specific retcode to to ci/integration_test_check.py to skip status reporting and restart job
exit(13)
def _can_run_with(self, path, opt): def _can_run_with(self, path, opt):
with open(path, "r") as script: with open(path, "r") as script:
for line in script: for line in script:
@ -806,6 +834,10 @@ class ClickhouseIntegrationTestsRunner:
) )
self._install_clickhouse(build_path) self._install_clickhouse(build_path)
logging.info("Pulling images")
runner._pre_pull_images(repo_path)
logging.info( logging.info(
"Dump iptables before run %s", "Dump iptables before run %s",
subprocess.check_output("sudo iptables -nvL", shell=True), subprocess.check_output("sudo iptables -nvL", shell=True),

View File

@ -246,6 +246,10 @@ if __name__ == "__main__":
"--no-random", action="store", dest="no_random", help="Disable tests order randomization" "--no-random", action="store", dest="no_random", help="Disable tests order randomization"
) )
parser.add_argument(
"--pre-pull", action="store_true", default=False, dest="pre_pull", help="Pull images for docker_compose before all other actions"
)
parser.add_argument( parser.add_argument(
"-t", "-t",
"--tests_list", "--tests_list",
@ -383,7 +387,7 @@ if __name__ == "__main__":
if args.keyword_expression: if args.keyword_expression:
args.pytest_args += ["-k", args.keyword_expression] args.pytest_args += ["-k", args.keyword_expression]
cmd = "docker run {net} {tty} --rm --name {name} --privileged \ cmd_base = "docker run {net} {tty} --rm --name {name} --privileged \
--volume={odbc_bridge_bin}:/clickhouse-odbc-bridge --volume={bin}:/clickhouse \ --volume={odbc_bridge_bin}:/clickhouse-odbc-bridge --volume={bin}:/clickhouse \
--volume={library_bridge_bin}:/clickhouse-library-bridge \ --volume={library_bridge_bin}:/clickhouse-library-bridge \
--volume={base_cfg}:/clickhouse-config --volume={cases_dir}:/ClickHouse/tests/integration \ --volume={base_cfg}:/clickhouse-config --volume={cases_dir}:/ClickHouse/tests/integration \
@ -392,7 +396,7 @@ if __name__ == "__main__":
{dockerd_internal_volume} -e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 \ {dockerd_internal_volume} -e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 \
-e XTABLES_LOCKFILE=/run/host/xtables.lock \ -e XTABLES_LOCKFILE=/run/host/xtables.lock \
-e PYTHONUNBUFFERED=1 \ -e PYTHONUNBUFFERED=1 \
{env_tags} {env_cleanup} -e PYTEST_OPTS='{parallel} {opts} {tests_list} {rand} -vvv' {img} {command}".format( {env_tags} {env_cleanup} -e PYTEST_OPTS='{parallel} {opts} {tests_list} {rand} -vvv' {img}".format(
net=net, net=net,
tty=tty, tty=tty,
bin=args.binary, bin=args.binary,
@ -410,9 +414,11 @@ if __name__ == "__main__":
dockerd_internal_volume=dockerd_internal_volume, dockerd_internal_volume=dockerd_internal_volume,
img=DIND_INTEGRATION_TESTS_IMAGE_NAME + ":" + args.docker_image_version, img=DIND_INTEGRATION_TESTS_IMAGE_NAME + ":" + args.docker_image_version,
name=CONTAINER_NAME, name=CONTAINER_NAME,
command=args.command,
) )
cmd = cmd_base + " " + args.command
cmd_pre_pull = cmd_base + " find /compose -name docker_compose_*.yml -exec docker-compose -f '{}' pull \;"
containers = subprocess.check_output( containers = subprocess.check_output(
f"docker ps --all --quiet --filter name={CONTAINER_NAME} --format={{{{.ID}}}}", f"docker ps --all --quiet --filter name={CONTAINER_NAME} --format={{{{.ID}}}}",
shell=True, shell=True,
@ -423,5 +429,9 @@ if __name__ == "__main__":
subprocess.check_call(f"docker kill {' '.join(containers)}", shell=True) subprocess.check_call(f"docker kill {' '.join(containers)}", shell=True)
print(f"Containers {containers} killed") print(f"Containers {containers} killed")
if args.pre_pull:
print(("Running pre pull as: '" + cmd_pre_pull + "'."))
subprocess.check_call(cmd_pre_pull, shell=True)
print(("Running pytest container as: '" + cmd + "'.")) print(("Running pytest container as: '" + cmd + "'."))
subprocess.check_call(cmd, shell=True) subprocess.check_call(cmd, shell=True)