diff --git a/docker/reqgenerator.py b/docker/reqgenerator.py index 6c1d89ac0ac..58417b80fa8 100644 --- a/docker/reqgenerator.py +++ b/docker/reqgenerator.py @@ -7,14 +7,24 @@ import os import sys -def build_docker_deps(image_name, imagedir): - cmd = f"""docker run --entrypoint "/bin/bash" {image_name} -c "pip install pipdeptree 2>/dev/null 1>/dev/null && pipdeptree --freeze --warn silence | sed 's/ \+//g' | sort | uniq" > {imagedir}/requirements.txt""" +def build_docker_deps(image_name: str, imagedir: str) -> None: + print("Fetch the newest manifest for", image_name) + pip_cmd = ( + "pip install pipdeptree 2>/dev/null 1>/dev/null && pipdeptree --freeze " + "--warn silence --exclude pipdeptree" + ) + # /=/!d - remove dependencies without pin + # ubuntu - ignore system packages + # \s - remove spaces + sed = r"sed '/==/!d; /==.*+ubuntu/d; s/\s//g'" + cmd = rf"""docker run --rm --entrypoint "/bin/bash" {image_name} -c "{pip_cmd} | {sed} | sort -u" > {imagedir}/requirements.txt""" + print("Running the command:", cmd) subprocess.check_call(cmd, shell=True) def check_docker_file_install_with_pip(filepath): image_name = None - with open(filepath, "r") as f: + with open(filepath, "r", encoding="utf-8") as f: for line in f: if "docker build" in line: arr = line.split(" ") @@ -25,7 +35,7 @@ def check_docker_file_install_with_pip(filepath): return image_name, False -def process_affected_images(images_dir): +def process_affected_images(images_dir: str) -> None: for root, _dirs, files in os.walk(images_dir): for f in files: if f == "Dockerfile": diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index d62009f1be3..403409072f4 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -48,7 +48,7 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ && add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \ && apt-get update \ && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ - docker-ce='5:23.*' \ + docker-ce='5:23.*' docker-compose-plugin='2.29.*' \ && rm -rf \ /var/lib/apt/lists/* \ /var/cache/debconf \ diff --git a/docker/test/integration/runner/requirements.txt b/docker/test/integration/runner/requirements.txt index 2c9df73ccca..4802623abd6 100644 --- a/docker/test/integration/runner/requirements.txt +++ b/docker/test/integration/runner/requirements.txt @@ -1,15 +1,13 @@ PyHDFS==0.3.1 -PyJWT==2.3.0 -PyMySQL==1.1.0 +PyJWT==2.4.0 +PyMySQL==1.1.1 PyNaCl==1.5.0 -PyYAML==5.3.1 SecretStorage==3.3.1 argon2-cffi-bindings==21.2.0 argon2-cffi==23.1.0 async-timeout==4.0.3 asyncio==3.4.3 -attrs==23.2.0 -avro==1.10.2 +avro==1.11.3 azure-core==1.30.1 azure-storage-blob==12.19.0 bcrypt==4.1.3 @@ -24,18 +22,13 @@ cffi==1.16.0 charset-normalizer==3.3.2 click==8.1.7 confluent-kafka==2.3.0 -cryptography==3.4.8 +cryptography==42.0.0 dbus-python==1.2.18 -decorator==5.1.1 delta-spark==2.3.0 +deltalake==0.16.0 dict2xml==1.7.4 dicttoxml==1.7.16 -distro-info==1.1+ubuntu0.2 -distro==1.7.0 -docker-compose==1.29.2 docker==6.1.3 -dockerpty==0.4.1 -docopt==0.6.2 exceptiongroup==1.2.1 execnet==2.1.1 geomet==0.2.1.post1 @@ -49,7 +42,6 @@ iniconfig==2.0.0 isodate==0.6.1 jeepney==0.7.1 jmespath==1.0.1 -jsonschema==3.2.0 jwcrypto==1.5.6 kafka-python==2.0.2 kazoo==2.9.0 @@ -63,23 +55,22 @@ lz4==4.3.3 minio==7.2.3 more-itertools==8.10.0 nats-py==2.6.0 +numpy==2.1.0 oauthlib==3.2.0 packaging==24.0 paramiko==3.4.0 pika==1.2.0 pip==24.1.1 -pipdeptree==2.23.0 pluggy==1.5.0 protobuf==4.25.2 psycopg2-binary==2.9.6 py4j==0.10.9.5 -py==1.11.0 +pyarrow-hotfix==0.6 pyarrow==17.0.0 pycparser==2.22 pycryptodome==3.20.0 pymongo==3.11.0 pyparsing==2.4.7 -pyrsistent==0.20.0 pyspark==3.3.2 pyspnego==0.10.2 pytest-order==1.0.0 @@ -89,28 +80,22 @@ pytest-reportlog==0.4.0 pytest-timeout==2.2.0 pytest-xdist==3.5.0 pytest==7.4.4 -python-apt==2.4.0+ubuntu3 python-dateutil==2.9.0.post0 -python-dotenv==0.21.1 pytz==2023.3.post1 redis==5.0.1 requests-kerberos==0.14.0 requests==2.31.0 -retry==0.9.2 s3transfer==0.10.1 -setuptools==59.6.0 +setuptools==70.0.0 simplejson==3.19.2 six==1.16.0 soupsieve==2.5 -texttable==1.7.0 tomli==2.0.1 typing_extensions==4.11.0 tzlocal==2.1 unattended-upgrades==0.1 urllib3==2.0.7 wadllib==1.3.6 -websocket-client==0.59.0 -wheel==0.37.1 +websocket-client==1.8.0 +wheel==0.38.1 zipp==1.0.0 -deltalake==0.16.0 - diff --git a/tests/integration/README.md b/tests/integration/README.md index a149a4d60ef..b246eeb0674 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -14,7 +14,7 @@ Don't use Docker from your system repository. * [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python3-pip libpq-dev zlib1g-dev libcrypto++-dev libssl-dev libkrb5-dev python3-dev` * [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest` -* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: +* [docker compose](https://docs.docker.com/compose/) and additional python libraries. To install: ```bash sudo -H pip install \ @@ -24,7 +24,6 @@ sudo -H pip install \ confluent-kafka \ dicttoxml \ docker \ - docker-compose \ grpcio \ grpcio-tools \ kafka-python \ @@ -48,7 +47,7 @@ sudo -H pip install \ nats-py ``` -(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python3-pytest python3-dicttoxml python3-docker python3-pymysql python3-protobuf python3-pymongo python3-tzlocal python3-kazoo python3-psycopg2 kafka-python python3-pytest-timeout python3-minio` +(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose-v2 python3-pytest python3-dicttoxml python3-docker python3-pymysql python3-protobuf python3-pymongo python3-tzlocal python3-kazoo python3-psycopg2 kafka-python python3-pytest-timeout python3-minio` Some tests have other dependencies, e.g. spark. See docker/test/integration/runner/Dockerfile for how to install those. See docker/test/integration/runner/dockerd-entrypoint.sh for environment variables that need to be set (e.g. JAVA_PATH). diff --git a/tests/integration/compose/docker_compose_azurite.yml b/tests/integration/compose/docker_compose_azurite.yml index 7c379a971ea..8ae9f7b85ff 100644 --- a/tests/integration/compose/docker_compose_azurite.yml +++ b/tests/integration/compose/docker_compose_azurite.yml @@ -1,5 +1,3 @@ -version: '2.3' - services: azurite1: image: mcr.microsoft.com/azure-storage/azurite diff --git a/tests/integration/compose/docker_compose_cassandra.yml b/tests/integration/compose/docker_compose_cassandra.yml index b6190a11d73..98bc5a22fb3 100644 --- a/tests/integration/compose/docker_compose_cassandra.yml +++ b/tests/integration/compose/docker_compose_cassandra.yml @@ -1,4 +1,3 @@ -version: '2.3' services: cassandra1: image: cassandra:4.0 diff --git a/tests/integration/compose/docker_compose_clickhouse.yml b/tests/integration/compose/docker_compose_clickhouse.yml index fdd124ede91..8b73db02903 100644 --- a/tests/integration/compose/docker_compose_clickhouse.yml +++ b/tests/integration/compose/docker_compose_clickhouse.yml @@ -1,5 +1,4 @@ -version: '2.3' -# Used to pre-pull images with docker-compose +# Used to pre-pull images with docker compose services: clickhouse1: image: clickhouse/integration-test diff --git a/tests/integration/compose/docker_compose_coredns.yml b/tests/integration/compose/docker_compose_coredns.yml index e4736e04846..90b714f5c2c 100644 --- a/tests/integration/compose/docker_compose_coredns.yml +++ b/tests/integration/compose/docker_compose_coredns.yml @@ -1,5 +1,3 @@ -version: "2.3" - services: coredns: image: coredns/coredns:1.9.3 # :latest broke this test diff --git a/tests/integration/compose/docker_compose_dotnet_client.yml b/tests/integration/compose/docker_compose_dotnet_client.yml index b63dac51522..b44a47da5b1 100644 --- a/tests/integration/compose/docker_compose_dotnet_client.yml +++ b/tests/integration/compose/docker_compose_dotnet_client.yml @@ -1,4 +1,3 @@ -version: '2.3' services: dotnet1: image: clickhouse/dotnet-client:${DOCKER_DOTNET_CLIENT_TAG:-latest} diff --git a/tests/integration/compose/docker_compose_hdfs.yml b/tests/integration/compose/docker_compose_hdfs.yml index 40a10df01f7..1635219e333 100644 --- a/tests/integration/compose/docker_compose_hdfs.yml +++ b/tests/integration/compose/docker_compose_hdfs.yml @@ -1,4 +1,3 @@ -version: '2.3' services: hdfs1: image: prasanthj/docker-hadoop:2.6.0 diff --git a/tests/integration/compose/docker_compose_hive.yml b/tests/integration/compose/docker_compose_hive.yml index 459e8481d0b..16253e50f4c 100644 --- a/tests/integration/compose/docker_compose_hive.yml +++ b/tests/integration/compose/docker_compose_hive.yml @@ -1,4 +1,3 @@ -version: '2.3' services: hdfs1: image: lgboustc/hive_test:v2.0 diff --git a/tests/integration/compose/docker_compose_jdbc_bridge.yml b/tests/integration/compose/docker_compose_jdbc_bridge.yml index b3686adc21c..26f575923a2 100644 --- a/tests/integration/compose/docker_compose_jdbc_bridge.yml +++ b/tests/integration/compose/docker_compose_jdbc_bridge.yml @@ -1,4 +1,3 @@ -version: '2.3' services: bridge1: image: clickhouse/jdbc-bridge @@ -24,4 +23,4 @@ services: volumes: - type: ${JDBC_BRIDGE_FS:-tmpfs} source: ${JDBC_BRIDGE_LOGS:-} - target: /app/logs \ No newline at end of file + target: /app/logs diff --git a/tests/integration/compose/docker_compose_kafka.yml b/tests/integration/compose/docker_compose_kafka.yml index 4ae3de3cbc7..e4ee9fbc0b9 100644 --- a/tests/integration/compose/docker_compose_kafka.yml +++ b/tests/integration/compose/docker_compose_kafka.yml @@ -1,5 +1,3 @@ -version: '2.3' - services: kafka_zookeeper: image: zookeeper:3.4.9 diff --git a/tests/integration/compose/docker_compose_keeper.yml b/tests/integration/compose/docker_compose_keeper.yml index 91010c4aa83..4b2fe1e637a 100644 --- a/tests/integration/compose/docker_compose_keeper.yml +++ b/tests/integration/compose/docker_compose_keeper.yml @@ -1,4 +1,3 @@ -version: '2.3' services: zoo1: image: ${image:-clickhouse/integration-test} diff --git a/tests/integration/compose/docker_compose_kerberized_hdfs.yml b/tests/integration/compose/docker_compose_kerberized_hdfs.yml index e955a14eb3d..4354f4aba01 100644 --- a/tests/integration/compose/docker_compose_kerberized_hdfs.yml +++ b/tests/integration/compose/docker_compose_kerberized_hdfs.yml @@ -1,5 +1,3 @@ -version: '2.3' - services: kerberizedhdfs1: cap_add: diff --git a/tests/integration/compose/docker_compose_kerberized_kafka.yml b/tests/integration/compose/docker_compose_kerberized_kafka.yml index 49d4c1db90f..90bcf11a50f 100644 --- a/tests/integration/compose/docker_compose_kerberized_kafka.yml +++ b/tests/integration/compose/docker_compose_kerberized_kafka.yml @@ -1,5 +1,3 @@ -version: '2.3' - services: kafka_kerberized_zookeeper: image: confluentinc/cp-zookeeper:5.2.0 diff --git a/tests/integration/compose/docker_compose_kerberos_kdc.yml b/tests/integration/compose/docker_compose_kerberos_kdc.yml index 3ce9a6df1fb..8cdac4118b9 100644 --- a/tests/integration/compose/docker_compose_kerberos_kdc.yml +++ b/tests/integration/compose/docker_compose_kerberos_kdc.yml @@ -1,5 +1,3 @@ -version: '2.3' - services: kerberoskdc: image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest} diff --git a/tests/integration/compose/docker_compose_ldap.yml b/tests/integration/compose/docker_compose_ldap.yml index f49e00400a2..440a271272b 100644 --- a/tests/integration/compose/docker_compose_ldap.yml +++ b/tests/integration/compose/docker_compose_ldap.yml @@ -1,4 +1,3 @@ -version: '2.3' services: openldap: image: bitnami/openldap:2.6.6 diff --git a/tests/integration/compose/docker_compose_minio.yml b/tests/integration/compose/docker_compose_minio.yml index 40098d05b04..44a07e97843 100644 --- a/tests/integration/compose/docker_compose_minio.yml +++ b/tests/integration/compose/docker_compose_minio.yml @@ -1,5 +1,3 @@ -version: '2.3' - services: minio1: image: minio/minio:RELEASE.2024-07-31T05-46-26Z diff --git a/tests/integration/compose/docker_compose_mongo.yml b/tests/integration/compose/docker_compose_mongo.yml index 9a6eae6ca8c..ac56525d904 100644 --- a/tests/integration/compose/docker_compose_mongo.yml +++ b/tests/integration/compose/docker_compose_mongo.yml @@ -1,4 +1,3 @@ -version: '2.3' services: mongo1: image: mongo:5.0 diff --git a/tests/integration/compose/docker_compose_mysql.yml b/tests/integration/compose/docker_compose_mysql.yml index 69f7f02fb4d..f45410bde78 100644 --- a/tests/integration/compose/docker_compose_mysql.yml +++ b/tests/integration/compose/docker_compose_mysql.yml @@ -1,4 +1,3 @@ -version: '2.3' services: mysql57: image: mysql:5.7 diff --git a/tests/integration/compose/docker_compose_mysql_8_0.yml b/tests/integration/compose/docker_compose_mysql_8_0.yml index 1e0ded6c6bd..e1ff1633bc7 100644 --- a/tests/integration/compose/docker_compose_mysql_8_0.yml +++ b/tests/integration/compose/docker_compose_mysql_8_0.yml @@ -1,4 +1,3 @@ -version: '2.3' services: mysql80: image: mysql:8.0 diff --git a/tests/integration/compose/docker_compose_mysql_client.yml b/tests/integration/compose/docker_compose_mysql_client.yml index ee590118d4f..74262d61d9a 100644 --- a/tests/integration/compose/docker_compose_mysql_client.yml +++ b/tests/integration/compose/docker_compose_mysql_client.yml @@ -1,4 +1,3 @@ -version: '2.3' services: mysql_client: image: mysql:8.0 diff --git a/tests/integration/compose/docker_compose_mysql_cluster.yml b/tests/integration/compose/docker_compose_mysql_cluster.yml index 3f7d21b733f..e065cea9d5d 100644 --- a/tests/integration/compose/docker_compose_mysql_cluster.yml +++ b/tests/integration/compose/docker_compose_mysql_cluster.yml @@ -1,4 +1,3 @@ -version: '2.3' services: mysql2: image: mysql:8.0 diff --git a/tests/integration/compose/docker_compose_mysql_golang_client.yml b/tests/integration/compose/docker_compose_mysql_golang_client.yml index 56cc0410574..5268978b0fe 100644 --- a/tests/integration/compose/docker_compose_mysql_golang_client.yml +++ b/tests/integration/compose/docker_compose_mysql_golang_client.yml @@ -1,4 +1,3 @@ -version: '2.3' services: golang1: image: clickhouse/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest} diff --git a/tests/integration/compose/docker_compose_mysql_java_client.yml b/tests/integration/compose/docker_compose_mysql_java_client.yml index 529974dd4bf..20c95a7d51e 100644 --- a/tests/integration/compose/docker_compose_mysql_java_client.yml +++ b/tests/integration/compose/docker_compose_mysql_java_client.yml @@ -1,4 +1,3 @@ -version: '2.3' services: java1: image: clickhouse/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest} diff --git a/tests/integration/compose/docker_compose_mysql_js_client.yml b/tests/integration/compose/docker_compose_mysql_js_client.yml index 90939449c5f..be4edaead4a 100644 --- a/tests/integration/compose/docker_compose_mysql_js_client.yml +++ b/tests/integration/compose/docker_compose_mysql_js_client.yml @@ -1,4 +1,3 @@ -version: '2.3' services: mysqljs1: image: clickhouse/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest} diff --git a/tests/integration/compose/docker_compose_mysql_php_client.yml b/tests/integration/compose/docker_compose_mysql_php_client.yml index 408b8ff089a..0b00dedf152 100644 --- a/tests/integration/compose/docker_compose_mysql_php_client.yml +++ b/tests/integration/compose/docker_compose_mysql_php_client.yml @@ -1,4 +1,3 @@ -version: '2.3' services: php1: image: clickhouse/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest} diff --git a/tests/integration/compose/docker_compose_nats.yml b/tests/integration/compose/docker_compose_nats.yml index b17ac62fa93..059b538218b 100644 --- a/tests/integration/compose/docker_compose_nats.yml +++ b/tests/integration/compose/docker_compose_nats.yml @@ -1,4 +1,3 @@ -version: '2.3' services: nats1: image: nats diff --git a/tests/integration/compose/docker_compose_net.yml b/tests/integration/compose/docker_compose_net.yml index eff43681f2e..7d0335aa1e8 100644 --- a/tests/integration/compose/docker_compose_net.yml +++ b/tests/integration/compose/docker_compose_net.yml @@ -1,4 +1,3 @@ -version: '2.3' networks: default: driver: bridge diff --git a/tests/integration/compose/docker_compose_nginx.yml b/tests/integration/compose/docker_compose_nginx.yml index 38d2a6d84c8..2767a3c6f81 100644 --- a/tests/integration/compose/docker_compose_nginx.yml +++ b/tests/integration/compose/docker_compose_nginx.yml @@ -1,4 +1,3 @@ -version: '2.3' services: # nginx server to host static files. # Accepts only PUT data by test.com/path and GET already existing data on test.com/path. diff --git a/tests/integration/compose/docker_compose_postgres.yml b/tests/integration/compose/docker_compose_postgres.yml index c55cd8a31cf..cd1debce771 100644 --- a/tests/integration/compose/docker_compose_postgres.yml +++ b/tests/integration/compose/docker_compose_postgres.yml @@ -1,4 +1,3 @@ -version: '2.3' services: postgres1: image: postgres diff --git a/tests/integration/compose/docker_compose_postgres_cluster.yml b/tests/integration/compose/docker_compose_postgres_cluster.yml index 5af13ca3e0f..0458dee6320 100644 --- a/tests/integration/compose/docker_compose_postgres_cluster.yml +++ b/tests/integration/compose/docker_compose_postgres_cluster.yml @@ -1,4 +1,3 @@ -version: '2.3' services: postgres2: image: postgres @@ -41,4 +40,4 @@ services: volumes: - type: ${POSTGRES_LOGS_FS:-tmpfs} source: ${POSTGRES4_DIR:-} - target: /postgres/ \ No newline at end of file + target: /postgres/ diff --git a/tests/integration/compose/docker_compose_postgresql.yml b/tests/integration/compose/docker_compose_postgresql.yml index 90764188ddd..79c10bf175e 100644 --- a/tests/integration/compose/docker_compose_postgresql.yml +++ b/tests/integration/compose/docker_compose_postgresql.yml @@ -1,4 +1,3 @@ -version: '2.2' services: psql: image: postgres:12.2-alpine diff --git a/tests/integration/compose/docker_compose_postgresql_java_client.yml b/tests/integration/compose/docker_compose_postgresql_java_client.yml index 904bfffdfd5..133dccd569e 100644 --- a/tests/integration/compose/docker_compose_postgresql_java_client.yml +++ b/tests/integration/compose/docker_compose_postgresql_java_client.yml @@ -1,4 +1,3 @@ -version: '2.2' services: java: image: clickhouse/postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest} diff --git a/tests/integration/compose/docker_compose_prometheus.yml b/tests/integration/compose/docker_compose_prometheus.yml index 0a1db2138ba..24710b971a8 100644 --- a/tests/integration/compose/docker_compose_prometheus.yml +++ b/tests/integration/compose/docker_compose_prometheus.yml @@ -1,4 +1,3 @@ -version: '2.3' services: prometheus_writer: image: prom/prometheus:v2.50.1 diff --git a/tests/integration/compose/docker_compose_rabbitmq.yml b/tests/integration/compose/docker_compose_rabbitmq.yml index 94c7f0111c4..4aae2427596 100644 --- a/tests/integration/compose/docker_compose_rabbitmq.yml +++ b/tests/integration/compose/docker_compose_rabbitmq.yml @@ -1,5 +1,3 @@ -version: '2.3' - services: rabbitmq1: image: rabbitmq:3.12.6-alpine diff --git a/tests/integration/compose/docker_compose_redis.yml b/tests/integration/compose/docker_compose_redis.yml index e2aa836ae46..21f303669ed 100644 --- a/tests/integration/compose/docker_compose_redis.yml +++ b/tests/integration/compose/docker_compose_redis.yml @@ -1,4 +1,3 @@ -version: '2.3' services: redis1: image: redis diff --git a/tests/integration/compose/docker_compose_zookeeper.yml b/tests/integration/compose/docker_compose_zookeeper.yml index 1601d217a25..708d2379360 100644 --- a/tests/integration/compose/docker_compose_zookeeper.yml +++ b/tests/integration/compose/docker_compose_zookeeper.yml @@ -1,4 +1,3 @@ -version: '2.3' services: zoo1: image: zookeeper:3.6.2 diff --git a/tests/integration/compose/docker_compose_zookeeper_secure.yml b/tests/integration/compose/docker_compose_zookeeper_secure.yml index b5dbae423b2..40b22717942 100644 --- a/tests/integration/compose/docker_compose_zookeeper_secure.yml +++ b/tests/integration/compose/docker_compose_zookeeper_secure.yml @@ -1,4 +1,3 @@ -version: '2.3' services: zoo1: image: zookeeper:3.6.2 diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index aa235118aed..9f80c58b879 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -89,7 +89,7 @@ def cleanup_environment(): nothrow=True, ) logging.debug("Unstopped containers killed") - r = run_and_check(["docker-compose", "ps", "--services", "--all"]) + r = run_and_check(["docker", "compose", "ps", "--services", "--all"]) logging.debug("Docker ps before start:%s", r.stdout) else: logging.debug("No running containers") diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 215718463e8..0f4697eb3f6 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -1,60 +1,62 @@ import base64 import errno -from functools import cache import http.client import logging import os import platform -import stat -import os.path as p import pprint import pwd import re +import shlex import shutil import socket +import stat import subprocess import time import traceback import urllib.parse -import shlex -import urllib3 +from functools import cache +from os import path as p +from pathlib import Path +from typing import List, Sequence, Tuple, Union + import requests +import urllib3 try: # Please, add modules that required for specific tests only here. # So contributors will be able to run most tests locally # without installing tons of unneeded packages that may be not so easy to install. import asyncio - from cassandra.policies import RoundRobinPolicy + import ssl + import cassandra.cluster + import nats import psycopg2 - from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT import pymongo import pymysql - import nats - import ssl + from cassandra.policies import RoundRobinPolicy from confluent_kafka.avro.cached_schema_registry_client import ( CachedSchemaRegistryClient, ) + from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT + from .hdfs_api import HDFSApi # imports requests_kerberos except Exception as e: logging.warning(f"Cannot import some modules, some tests may not work: {e}") +import docker from dict2xml import dict2xml +from helpers import pytest_xdist_logging_to_separate_files +from helpers.client import QueryRuntimeException +from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry from kazoo.client import KazooClient from kazoo.exceptions import KazooException from minio import Minio -from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry -from helpers import pytest_xdist_logging_to_separate_files -from helpers.client import QueryRuntimeException - -import docker - from .client import Client -from .retry_decorator import retry - from .config_cluster import * +from .retry_decorator import retry HELPERS_DIR = p.dirname(__file__) CLICKHOUSE_ROOT_DIR = p.join(p.dirname(__file__), "../../..") @@ -79,7 +81,7 @@ CLICKHOUSE_CI_MIN_TESTED_VERSION = "23.3" # to create docker-compose env file def _create_env_file(path, variables): - logging.debug(f"Env {variables} stored in {path}") + logging.debug("Env %s stored in %s", variables, path) with open(path, "w") as f: for var, value in list(variables.items()): f.write("=".join([var, value]) + "\n") @@ -87,7 +89,7 @@ def _create_env_file(path, variables): def run_and_check( - args, + args: Union[Sequence[str], str], env=None, shell=False, stdout=subprocess.PIPE, @@ -95,7 +97,16 @@ def run_and_check( timeout=300, nothrow=False, detach=False, -): +) -> str: + if shell: + if isinstance(args, str): + shell_args = args + else: + shell_args = next(a for a in args) + else: + shell_args = " ".join(args) + + logging.debug("Command:[%s]", shell_args) if detach: subprocess.Popen( args, @@ -104,26 +115,31 @@ def run_and_check( env=env, shell=shell, ) - return + return "" - logging.debug(f"Command:{args}") res = subprocess.run( - args, stdout=stdout, stderr=stderr, env=env, shell=shell, timeout=timeout + args, + stdout=stdout, + stderr=stderr, + env=env, + shell=shell, + timeout=timeout, + check=False, ) out = res.stdout.decode("utf-8", "ignore") err = res.stderr.decode("utf-8", "ignore") # check_call(...) from subprocess does not print stderr, so we do it manually for outline in out.splitlines(): - logging.debug(f"Stdout:{outline}") + logging.debug("Stdout:%s", outline) for errline in err.splitlines(): - logging.debug(f"Stderr:{errline}") + logging.debug("Stderr:%s", errline) if res.returncode != 0: - logging.debug(f"Exitcode:{res.returncode}") + logging.debug("Exitcode:%s", res.returncode) if env: - logging.debug(f"Env:{env}") + logging.debug("Env:%s", env) if not nothrow: raise Exception( - f"Command {args} return non-zero code {res.returncode}: {res.stderr.decode('utf-8')}" + f"Command [{shell_args}] return non-zero code {res.returncode}: {res.stderr.decode('utf-8')}" ) return out @@ -181,6 +197,11 @@ class PortPoolManager: self.used_ports.clear() +def docker_exec(*args: str) -> Tuple[str, ...]: + "Function to ease the `docker exec -i...`" + return ("docker", "exec", "-i", *args) + + def retry_exception(num, delay, func, exception=Exception, *args, **kwargs): """ Retry if `func()` throws, `num` times. @@ -238,10 +259,7 @@ def get_docker_compose_path(): def check_kafka_is_available(kafka_id, kafka_port): p = subprocess.Popen( - ( - "docker", - "exec", - "-i", + docker_exec( kafka_id, "/usr/bin/kafka-broker-api-versions", "--bootstrap-server", @@ -256,14 +274,7 @@ def check_kafka_is_available(kafka_id, kafka_port): def check_kerberos_kdc_is_available(kerberos_kdc_id): p = subprocess.Popen( - ( - "docker", - "exec", - "-i", - kerberos_kdc_id, - "/etc/rc.d/init.d/krb5kdc", - "status", - ), + docker_exec(kerberos_kdc_id, "/etc/rc.d/init.d/krb5kdc", "status"), stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) @@ -273,7 +284,7 @@ def check_kerberos_kdc_is_available(kerberos_kdc_id): def check_postgresql_java_client_is_available(postgresql_java_client_id): p = subprocess.Popen( - ("docker", "exec", "-i", postgresql_java_client_id, "java", "-version"), + docker_exec(postgresql_java_client_id, "java", "-version"), stdout=subprocess.PIPE, ) p.communicate() @@ -282,12 +293,9 @@ def check_postgresql_java_client_is_available(postgresql_java_client_id): def check_rabbitmq_is_available(rabbitmq_id, cookie): p = subprocess.Popen( - ( - "docker", - "exec", + docker_exec( "-e", f"RABBITMQ_ERLANG_COOKIE={cookie}", - "-i", rabbitmq_id, "rabbitmqctl", "await_startup", @@ -300,12 +308,9 @@ def check_rabbitmq_is_available(rabbitmq_id, cookie): def rabbitmq_debuginfo(rabbitmq_id, cookie): p = subprocess.Popen( - ( - "docker", - "exec", + docker_exec( "-e", f"RABBITMQ_ERLANG_COOKIE={cookie}", - "-i", rabbitmq_id, "rabbitmq-diagnostics", "status", @@ -315,12 +320,9 @@ def rabbitmq_debuginfo(rabbitmq_id, cookie): p.communicate() p = subprocess.Popen( - ( - "docker", - "exec", + docker_exec( "-e", f"RABBITMQ_ERLANG_COOKIE={cookie}", - "-i", rabbitmq_id, "rabbitmq-diagnostics", "listeners", @@ -330,12 +332,9 @@ def rabbitmq_debuginfo(rabbitmq_id, cookie): p.communicate() p = subprocess.Popen( - ( - "docker", - "exec", + docker_exec( "-e", f"RABBITMQ_ERLANG_COOKIE={cookie}", - "-i", rabbitmq_id, "rabbitmq-diagnostics", "environment", @@ -370,12 +369,9 @@ async def nats_connect_ssl(nats_port, user, password, ssl_ctx=None): def enable_consistent_hash_plugin(rabbitmq_id, cookie): p = subprocess.Popen( - ( - "docker", - "exec", + docker_exec( "-e", f"RABBITMQ_ERLANG_COOKIE={cookie}", - "-i", rabbitmq_id, "rabbitmq-plugins", "enable", @@ -393,10 +389,10 @@ def get_instances_dir(name): run_id = os.environ.get("INTEGRATION_TESTS_RUN_ID", "") if name: - instances_dir_name += "_" + name + instances_dir_name += "-" + name if run_id: - instances_dir_name += "_" + shlex.quote(run_id) + instances_dir_name += "-" + shlex.quote(run_id) return instances_dir_name @@ -483,8 +479,8 @@ class ClickHouseCluster: self.instances_dir_name = get_instances_dir(self.name) xdist_worker = os.getenv("PYTEST_XDIST_WORKER") if xdist_worker: - self.project_name += f"_{xdist_worker}" - self.instances_dir_name += f"_{xdist_worker}" + self.project_name += f"-{xdist_worker}" + self.instances_dir_name += f"-{xdist_worker}" self.instances_dir = p.join(self.base_dir, self.instances_dir_name) self.docker_logs_path = p.join(self.instances_dir, "docker.log") @@ -505,7 +501,7 @@ class ClickHouseCluster: self.docker_api_version = os.environ.get("DOCKER_API_VERSION") self.docker_base_tag = os.environ.get("DOCKER_BASE_TAG", "latest") - self.base_cmd = ["docker-compose"] + self.base_cmd = ["docker", "compose"] if custom_dockerd_host: self.base_cmd += ["--host", custom_dockerd_host] self.base_cmd += ["--env-file", self.env_file] @@ -783,6 +779,9 @@ class ClickHouseCluster: self.port_pool = PortPoolManager() + def compose_cmd(self, *args: str) -> List[str]: + return ["docker", "compose", "--project-name", self.project_name, *args] + @property def kafka_port(self): if self._kafka_port: @@ -931,7 +930,7 @@ class ClickHouseCluster: logging.debug("Trying to prune unused volumes...") result = run_and_check(["docker volume ls | wc -l"], shell=True) - if int(result > 0): + if int(result) > 1: run_and_check(["docker", "volume", "prune", "-f"]) logging.debug(f"Volumes pruned: {result}") except: @@ -957,10 +956,10 @@ class ClickHouseCluster: # Returns the list of currently running docker containers corresponding to this ClickHouseCluster. def get_running_containers(self): # docker-compose names containers using the following formula: - # container_name = project_name + '_' + instance_name + '_1' + # container_name = project_name + '-' + instance_name + '-1' # We need to have "^/" and "$" in the "--filter name" option below to filter by exact name of the container, see # https://stackoverflow.com/questions/48767760/how-to-make-docker-container-ls-f-name-filter-by-exact-name - filter_name = f"^/{self.project_name}_.*_1$" + filter_name = f"^/{self.project_name}-.*-1$" # We want the command "docker container list" to show only containers' ID and their names, separated by colon. format = "{{.ID}}:{{.Names}}" containers = run_and_check( @@ -1006,15 +1005,12 @@ class ClickHouseCluster: self.with_zookeeper_secure = True self.base_cmd.extend(["--file", zookeeper_docker_compose_path]) - self.base_zookeeper_cmd = [ - "docker-compose", + self.base_zookeeper_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", zookeeper_docker_compose_path, - ] + ) return self.base_zookeeper_cmd def setup_zookeeper_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1038,15 +1034,12 @@ class ClickHouseCluster: self.with_zookeeper = True self.base_cmd.extend(["--file", zookeeper_docker_compose_path]) - self.base_zookeeper_cmd = [ - "docker-compose", + self.base_zookeeper_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", zookeeper_docker_compose_path, - ] + ) return self.base_zookeeper_cmd def setup_keeper_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1086,15 +1079,12 @@ class ClickHouseCluster: self.with_zookeeper = True self.base_cmd.extend(["--file", keeper_docker_compose_path]) - self.base_zookeeper_cmd = [ - "docker-compose", + self.base_zookeeper_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", keeper_docker_compose_path, - ] + ) return self.base_zookeeper_cmd def setup_mysql_client_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1105,15 +1095,12 @@ class ClickHouseCluster: p.join(docker_compose_yml_dir, "docker_compose_mysql_client.yml"), ] ) - self.base_mysql_client_cmd = [ - "docker-compose", + self.base_mysql_client_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_mysql_client.yml"), - ] + ) return self.base_mysql_client_cmd @@ -1129,15 +1116,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_mysql.yml")] ) - self.base_mysql57_cmd = [ - "docker-compose", + self.base_mysql57_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_mysql.yml"), - ] + ) return self.base_mysql57_cmd @@ -1153,15 +1137,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_mysql_8_0.yml")] ) - self.base_mysql8_cmd = [ - "docker-compose", + self.base_mysql8_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_mysql_8_0.yml"), - ] + ) return self.base_mysql8_cmd @@ -1179,15 +1160,12 @@ class ClickHouseCluster: p.join(docker_compose_yml_dir, "docker_compose_mysql_cluster.yml"), ] ) - self.base_mysql_cluster_cmd = [ - "docker-compose", + self.base_mysql_cluster_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_mysql_cluster.yml"), - ] + ) return self.base_mysql_cluster_cmd @@ -1200,15 +1178,12 @@ class ClickHouseCluster: env_variables["POSTGRES_LOGS_FS"] = "bind" self.with_postgres = True - self.base_postgres_cmd = [ - "docker-compose", + self.base_postgres_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_postgres.yml"), - ] + ) return self.base_postgres_cmd def setup_postgres_cluster_cmd( @@ -1226,15 +1201,12 @@ class ClickHouseCluster: p.join(docker_compose_yml_dir, "docker_compose_postgres_cluster.yml"), ] ) - self.base_postgres_cluster_cmd = [ - "docker-compose", + self.base_postgres_cluster_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_postgres_cluster.yml"), - ] + ) def setup_postgresql_java_client_cmd( self, instance, env_variables, docker_compose_yml_dir @@ -1248,15 +1220,12 @@ class ClickHouseCluster: ), ] ) - self.base_postgresql_java_client_cmd = [ - "docker-compose", + self.base_postgresql_java_client_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_postgresql_java_client.yml"), - ] + ) def setup_hdfs_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_hdfs = True @@ -1268,15 +1237,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_hdfs.yml")] ) - self.base_hdfs_cmd = [ - "docker-compose", + self.base_hdfs_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_hdfs.yml"), - ] + ) logging.debug("HDFS BASE CMD:{self.base_hdfs_cmd)}") return self.base_hdfs_cmd @@ -1296,15 +1262,12 @@ class ClickHouseCluster: p.join(docker_compose_yml_dir, "docker_compose_kerberized_hdfs.yml"), ] ) - self.base_kerberized_hdfs_cmd = [ - "docker-compose", + self.base_kerberized_hdfs_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_kerberized_hdfs.yml"), - ] + ) return self.base_kerberized_hdfs_cmd def setup_kafka_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1319,15 +1282,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_kafka.yml")] ) - self.base_kafka_cmd = [ - "docker-compose", + self.base_kafka_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_kafka.yml"), - ] + ) return self.base_kafka_cmd def setup_kerberized_kafka_cmd( @@ -1345,15 +1305,12 @@ class ClickHouseCluster: p.join(docker_compose_yml_dir, "docker_compose_kerberized_kafka.yml"), ] ) - self.base_kerberized_kafka_cmd = [ - "docker-compose", + self.base_kerberized_kafka_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_kerberized_kafka.yml"), - ] + ) return self.base_kerberized_kafka_cmd def setup_kerberos_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1366,15 +1323,12 @@ class ClickHouseCluster: p.join(docker_compose_yml_dir, "docker_compose_kerberos_kdc.yml"), ] ) - self.base_kerberos_kdc_cmd = [ - "docker-compose", + self.base_kerberos_kdc_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_kerberos_kdc.yml"), - ] + ) return self.base_kerberos_kdc_cmd def setup_redis_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1386,15 +1340,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_redis.yml")] ) - self.base_redis_cmd = [ - "docker-compose", + self.base_redis_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_redis.yml"), - ] + ) return self.base_redis_cmd def setup_rabbitmq_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1410,15 +1361,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_rabbitmq.yml")] ) - self.base_rabbitmq_cmd = [ - "docker-compose", + self.base_rabbitmq_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_rabbitmq.yml"), - ] + ) return self.base_rabbitmq_cmd def setup_nats_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1431,15 +1379,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_nats.yml")] ) - self.base_nats_cmd = [ - "docker-compose", + self.base_nats_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_nats.yml"), - ] + ) return self.base_nats_cmd def setup_mongo_secure_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1475,15 +1420,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_mongo.yml")] ) - self.base_mongo_cmd = [ - "docker-compose", + self.base_mongo_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_mongo.yml"), - ] + ) return self.base_mongo_cmd def setup_coredns_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1493,15 +1435,12 @@ class ClickHouseCluster: ["--file", p.join(docker_compose_yml_dir, "docker_compose_coredns.yml")] ) - self.base_coredns_cmd = [ - "docker-compose", + self.base_coredns_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_coredns.yml"), - ] + ) return self.base_coredns_cmd @@ -1516,15 +1455,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_minio.yml")] ) - self.base_minio_cmd = [ - "docker-compose", + self.base_minio_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_minio.yml"), - ] + ) return self.base_minio_cmd def setup_azurite_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1542,15 +1478,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_azurite.yml")] ) - self.base_azurite_cmd = [ - "docker-compose", + self.base_azurite_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_azurite.yml"), - ] + ) return self.base_azurite_cmd def setup_cassandra_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1559,15 +1492,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_cassandra.yml")] ) - self.base_cassandra_cmd = [ - "docker-compose", + self.base_cassandra_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_cassandra.yml"), - ] + ) return self.base_cassandra_cmd def setup_ldap_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1576,15 +1506,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_ldap.yml")] ) - self.base_ldap_cmd = [ - "docker-compose", + self.base_ldap_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_ldap.yml"), - ] + ) return self.base_ldap_cmd def setup_jdbc_bridge_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1594,15 +1521,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_jdbc_bridge.yml")] ) - self.base_jdbc_bridge_cmd = [ - "docker-compose", + self.base_jdbc_bridge_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_jdbc_bridge.yml"), - ] + ) return self.base_jdbc_bridge_cmd def setup_nginx_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1611,15 +1535,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_nginx.yml")] ) - self.base_nginx_cmd = [ - "docker-compose", + self.base_nginx_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_nginx.yml"), - ] + ) return self.base_nginx_cmd def setup_hive(self, instance, env_variables, docker_compose_yml_dir): @@ -1627,15 +1548,12 @@ class ClickHouseCluster: self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_hive.yml")] ) - self.base_hive_cmd = [ - "docker-compose", + self.base_hive_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_hive.yml"), - ] + ) return self.base_hive_cmd def setup_prometheus_cmd(self, instance, env_variables, docker_compose_yml_dir): @@ -1663,15 +1581,12 @@ class ClickHouseCluster: p.join(docker_compose_yml_dir, "docker_compose_prometheus.yml"), ] ) - self.base_prometheus_cmd = [ - "docker-compose", + self.base_prometheus_cmd = self.compose_cmd( "--env-file", instance.env_file, - "--project-name", - self.project_name, "--file", p.join(docker_compose_yml_dir, "docker_compose_prometheus.yml"), - ] + ) return self.base_prometheus_cmd def add_instance( @@ -1846,13 +1761,15 @@ class ClickHouseCluster: ) docker_compose_yml_dir = get_docker_compose_path() + docker_compose_net = p.join(docker_compose_yml_dir, "docker_compose_net.yml") self.instances[name] = instance - if ipv4_address is not None or ipv6_address is not None: + if not self.with_net_trics and ( + ipv4_address is not None or ipv6_address is not None + ): + # docker compose v2 does not accept more than one argument `-f net.yml` self.with_net_trics = True - self.base_cmd.extend( - ["--file", p.join(docker_compose_yml_dir, "docker_compose_net.yml")] - ) + self.base_cmd.extend(["--file", docker_compose_net]) self.base_cmd.extend(["--file", instance.docker_compose_path]) @@ -2002,12 +1919,6 @@ class ClickHouseCluster: self.setup_coredns_cmd(instance, env_variables, docker_compose_yml_dir) ) - if self.with_net_trics: - for cmd in cmds: - cmd.extend( - ["--file", p.join(docker_compose_yml_dir, "docker_compose_net.yml")] - ) - if with_redis and not self.with_redis: cmds.append( self.setup_redis_cmd(instance, env_variables, docker_compose_yml_dir) @@ -2070,6 +1981,13 @@ class ClickHouseCluster: ) ) + ### !!!! This is the last step after combining all cmds, don't put anything after + if self.with_net_trics: + for cmd in cmds: + # Again, adding it only once + if docker_compose_net not in cmd: + cmd.extend(["--file", docker_compose_net]) + logging.debug( "Cluster name:{} project_name:{}. Added instance name:{} tag:{} base_cmd:{} docker_compose_yml_dir:{}".format( self.name, @@ -2084,7 +2002,7 @@ class ClickHouseCluster: def get_instance_docker_id(self, instance_name): # According to how docker-compose names containers. - return self.project_name + "_" + instance_name + "_1" + return self.project_name + "-" + instance_name + "-1" def _replace(self, path, what, to): with open(path, "r") as p: @@ -2823,7 +2741,7 @@ class ClickHouseCluster: "Got exception pulling images: %s", kwargs["exception"] ) - retry(log_function=logging_pulling_images)(run_and_check)(images_pull_cmd) + retry(log_function=logging_pulling_images)(run_and_check, images_pull_cmd) if self.with_zookeeper_secure and self.base_zookeeper_cmd: logging.debug("Setup ZooKeeper Secure") @@ -3104,9 +3022,7 @@ class ClickHouseCluster: retry( log_function=logging_azurite_initialization, - )( - run_and_check - )(azurite_start_cmd) + )(run_and_check, azurite_start_cmd) self.up_called = True logging.info("Trying to connect to Azurite") self.wait_azurite_to_start() @@ -3237,7 +3153,7 @@ class ClickHouseCluster: ) else: logging.warning( - "docker-compose up was not called. Trying to export docker.log for running containers" + "docker compose up was not called. Trying to export docker.log for running containers" ) self.cleanup() @@ -3324,8 +3240,7 @@ class ClickHouseCluster: subprocess_check_call(self.base_zookeeper_cmd + ["start", n]) -DOCKER_COMPOSE_TEMPLATE = """ -version: '2.3' +DOCKER_COMPOSE_TEMPLATE = """--- services: {name}: image: {image}:{tag} diff --git a/tests/integration/helpers/keeper_utils.py b/tests/integration/helpers/keeper_utils.py index be710db37d1..af5bdd57398 100644 --- a/tests/integration/helpers/keeper_utils.py +++ b/tests/integration/helpers/keeper_utils.py @@ -1,13 +1,46 @@ +import contextlib import io -import subprocess +import re +import select import socket +import subprocess import time import typing as tp -import contextlib -import select -from kazoo.client import KazooClient -from helpers.cluster import ClickHouseCluster, ClickHouseInstance + from helpers.client import CommandRequest +from helpers.cluster import ClickHouseCluster, ClickHouseInstance +from kazoo.client import KazooClient + +ss_established = [ + "ss", + "--resolve", + "--tcp", + "--no-header", + "state", + "ESTABLISHED", + "( dport = 2181 or sport = 2181 )", +] + + +def get_active_zk_connections(node: ClickHouseInstance) -> tp.List[str]: + return ( + str(node.exec_in_container(ss_established, privileged=True, user="root")) + .strip() + .split("\n") + ) + + +def get_zookeeper_which_node_connected_to(node: ClickHouseInstance) -> str: + line = str( + node.exec_in_container(ss_established, privileged=True, user="root") + ).strip() + + pattern = re.compile(r"zoo[0-9]+", re.IGNORECASE) + result = pattern.findall(line) + assert ( + len(result) == 1 + ), "ClickHouse must be connected only to one Zookeeper at a time" + return result[0] def execute_keeper_client_query( diff --git a/tests/integration/helpers/retry_decorator.py b/tests/integration/helpers/retry_decorator.py index e7bafbe29c1..0608fc164b3 100644 --- a/tests/integration/helpers/retry_decorator.py +++ b/tests/integration/helpers/retry_decorator.py @@ -4,36 +4,31 @@ from typing import Type, List def retry( + *exceptions: Type[BaseException], retries: int = 5, delay: float = 1, backoff: float = 1.5, jitter: float = 2, log_function=None, # should take **kwargs or arguments: `retry_number`, `exception` and `sleep_time` - retriable_expections_list: List[Type[BaseException]] = [Exception], ): - def inner(func): - def wrapper(*args, **kwargs): - current_delay = delay - for retry in range(retries): - try: - func(*args, **kwargs) - break - except Exception as e: - should_retry = False - for retriable_exception in retriable_expections_list: - if isinstance(e, retriable_exception): - should_retry = True - break - if not should_retry or (retry == retries - 1): - raise e - sleep_time = current_delay + random.uniform(0, jitter) - if log_function is not None: - log_function( - retry_number=retry, exception=e, sleep_time=sleep_time - ) - time.sleep(sleep_time) - current_delay *= backoff + exceptions = exceptions or (Exception,) - return wrapper + def inner(func, *args, **kwargs): + current_delay = delay + for retry in range(retries): + try: + func(*args, **kwargs) + break + except Exception as e: + should_retry = (retry < retries - 1) and any( + isinstance(e, re) for re in exceptions + ) + if not should_retry: + raise e + sleep_time = current_delay + random.uniform(0, jitter) + if log_function is not None: + log_function(retry_number=retry, exception=e, sleep_time=sleep_time) + time.sleep(sleep_time) + current_delay *= backoff return inner diff --git a/tests/integration/runner b/tests/integration/runner index 0667541b196..2c605577f80 100755 --- a/tests/integration/runner +++ b/tests/integration/runner @@ -424,7 +424,7 @@ if __name__ == "__main__": cmd = cmd_base + " " + args.command cmd_pre_pull = ( f"{cmd_base} find /ClickHouse/tests/integration/compose -name docker_compose_*.yml " - r"-exec docker-compose -f '{}' pull \;" + r"-exec docker compose -f '{}' pull \;" ) containers = subprocess.check_output( diff --git a/tests/integration/test_dotnet_client/test.py b/tests/integration/test_dotnet_client/test.py index 2af9b80f720..52055d2876e 100644 --- a/tests/integration/test_dotnet_client/test.py +++ b/tests/integration/test_dotnet_client/test.py @@ -37,19 +37,16 @@ def dotnet_container(): DOCKER_COMPOSE_PATH, "docker_compose_dotnet_client.yml" ) run_and_check( - [ - "docker-compose", - "-p", - cluster.project_name, + cluster.compose_cmd( "-f", docker_compose, "up", "--force-recreate", "-d", "--no-build", - ] + ) ) - yield docker.from_env().containers.get(cluster.project_name + "_dotnet1_1") + yield docker.from_env().containers.get(cluster.get_instance_docker_id("dotnet1")) def test_dotnet_client(started_cluster, dotnet_container): diff --git a/tests/integration/test_keeper_s3_snapshot/test.py b/tests/integration/test_keeper_s3_snapshot/test.py index b6c25305aef..cdd2ed8ae0c 100644 --- a/tests/integration/test_keeper_s3_snapshot/test.py +++ b/tests/integration/test_keeper_s3_snapshot/test.py @@ -1,12 +1,14 @@ -import pytest -from helpers.cluster import ClickHouseCluster -from time import sleep -from retry import retry from multiprocessing.dummy import Pool -import helpers.keeper_utils as keeper_utils -from minio.deleteobjects import DeleteObject +from time import sleep +import helpers.keeper_utils as keeper_utils +import pytest +from helpers import keeper_utils +from helpers.cluster import ClickHouseCluster +from helpers.retry_decorator import retry from kazoo.client import KazooClient +from minio.deleteobjects import DeleteObject +from retry import retry # from kazoo.protocol.serialization import Connect, read_buffer, write_buffer @@ -109,7 +111,6 @@ def test_s3_upload(started_cluster): cluster.minio_client.remove_object("snapshots", s.object_name) # Keeper sends snapshots asynchornously, hence we need to retry. - @retry(AssertionError, tries=10, delay=2) def _check_snapshots(): assert set(get_saved_snapshots()) == set( [ @@ -120,7 +121,7 @@ def test_s3_upload(started_cluster): ] ) - _check_snapshots() + retry(AssertionError, retries=10, delay=2, jitter=0, backoff=1)(_check_snapshots) destroy_zk_client(node1_zk) node1.stop_clickhouse(kill=True) @@ -132,10 +133,13 @@ def test_s3_upload(started_cluster): for _ in range(200): node2_zk.create("/test", sequence=True) - @retry(AssertionError, tries=10, delay=2) def _check_snapshots_without_quorum(): assert len(get_saved_snapshots()) > 4 + retry(AssertionError, retries=10, delay=2, jitter=0, backoff=1)( + _check_snapshots_without_quorum + ) + _check_snapshots_without_quorum() success_upload_message = "Successfully uploaded" diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py index 094ae7b9fd0..2bb84f2756a 100644 --- a/tests/integration/test_mysql_protocol/test.py +++ b/tests/integration/test_mysql_protocol/test.py @@ -55,8 +55,7 @@ def golang_container(): DOCKER_COMPOSE_PATH, "docker_compose_mysql_golang_client.yml" ) run_and_check( - [ - "docker-compose", + cluster.compose_cmd( "-p", cluster.project_name, "-f", @@ -65,13 +64,13 @@ def golang_container(): "--force-recreate", "-d", "--no-build", - ] + ) ) yield docker.DockerClient( base_url="unix:///var/run/docker.sock", version=cluster.docker_api_version, timeout=600, - ).containers.get(cluster.project_name + "_golang1_1") + ).containers.get(cluster.get_instance_docker_id("golang1")) @pytest.fixture(scope="module") @@ -80,25 +79,22 @@ def php_container(): DOCKER_COMPOSE_PATH, "docker_compose_mysql_php_client.yml" ) run_and_check( - [ - "docker-compose", + cluster.compose_cmd( "--env-file", cluster.instances["node"].env_file, - "-p", - cluster.project_name, "-f", docker_compose, "up", "--force-recreate", "-d", "--no-build", - ] + ) ) yield docker.DockerClient( base_url="unix:///var/run/docker.sock", version=cluster.docker_api_version, timeout=600, - ).containers.get(cluster.project_name + "_php1_1") + ).containers.get(cluster.get_instance_docker_id("php1")) @pytest.fixture(scope="module") @@ -107,25 +103,22 @@ def nodejs_container(): DOCKER_COMPOSE_PATH, "docker_compose_mysql_js_client.yml" ) run_and_check( - [ - "docker-compose", + cluster.compose_cmd( "--env-file", cluster.instances["node"].env_file, - "-p", - cluster.project_name, "-f", docker_compose, "up", "--force-recreate", "-d", "--no-build", - ] + ) ) yield docker.DockerClient( base_url="unix:///var/run/docker.sock", version=cluster.docker_api_version, timeout=600, - ).containers.get(cluster.project_name + "_mysqljs1_1") + ).containers.get(cluster.get_instance_docker_id("mysqljs1")) @pytest.fixture(scope="module") @@ -134,25 +127,22 @@ def java_container(): DOCKER_COMPOSE_PATH, "docker_compose_mysql_java_client.yml" ) run_and_check( - [ - "docker-compose", + cluster.compose_cmd( "--env-file", cluster.instances["node"].env_file, - "-p", - cluster.project_name, "-f", docker_compose, "up", "--force-recreate", "-d", "--no-build", - ] + ) ) yield docker.DockerClient( base_url="unix:///var/run/docker.sock", version=cluster.docker_api_version, timeout=600, - ).containers.get(cluster.project_name + "_java1_1") + ).containers.get(cluster.get_instance_docker_id("java1")) def test_mysql_client(started_cluster): diff --git a/tests/integration/test_read_only_table/test.py b/tests/integration/test_read_only_table/test.py index df084f9dbbd..72f30591f56 100644 --- a/tests/integration/test_read_only_table/test.py +++ b/tests/integration/test_read_only_table/test.py @@ -1,9 +1,10 @@ -import time -import re import logging +import re +import time import pytest from helpers.cluster import ClickHouseCluster +from helpers.keeper_utils import get_zookeeper_which_node_connected_to from helpers.test_tools import assert_eq_with_retry NUM_TABLES = 10 @@ -56,26 +57,6 @@ def test_restart_zookeeper(start_cluster): logging.info("Inserted test data and initialized all tables") - def get_zookeeper_which_node_connected_to(node): - line = str( - node.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep 2181 | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) - ).strip() - - pattern = re.compile(r"zoo[0-9]+", re.IGNORECASE) - result = pattern.findall(line) - assert ( - len(result) == 1 - ), "ClickHouse must be connected only to one Zookeeper at a time" - return result[0] - node1_zk = get_zookeeper_which_node_connected_to(node1) # ClickHouse should +- immediately reconnect to another zookeeper node diff --git a/tests/integration/test_reload_zookeeper/test.py b/tests/integration/test_reload_zookeeper/test.py index 8924376d6fd..6603c1bab4b 100644 --- a/tests/integration/test_reload_zookeeper/test.py +++ b/tests/integration/test_reload_zookeeper/test.py @@ -1,12 +1,12 @@ -import time -import pytest import os +import time -from helpers.cluster import ClickHouseCluster +import pytest from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster +from helpers.keeper_utils import get_active_zk_connections from helpers.test_tools import assert_eq_with_retry - cluster = ClickHouseCluster(__file__, zookeeper_config_path="configs/zookeeper.xml") node = cluster.add_instance("node", with_zookeeper=True) @@ -85,19 +85,6 @@ def test_reload_zookeeper(start_cluster): settings={"select_sequential_consistency": 1}, ) - def get_active_zk_connections(): - return str( - node.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep 2181 | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ## set config to zoo2, server will be normal new_config = """ @@ -113,16 +100,16 @@ def test_reload_zookeeper(start_cluster): node.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config) node.query("SYSTEM RELOAD CONFIG") - active_zk_connections = get_active_zk_connections() + active_zk_connections = get_active_zk_connections(node) assert ( - active_zk_connections == "1" + len(active_zk_connections) == 1 ), "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) assert_eq_with_retry( node, "SELECT COUNT() FROM test_table", "1000", retry_count=120, sleep_time=0.5 ) - active_zk_connections = get_active_zk_connections() + active_zk_connections = get_active_zk_connections(node) assert ( - active_zk_connections == "1" + len(active_zk_connections) == 1 ), "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) diff --git a/tests/integration/test_replicated_user_defined_functions/test.py b/tests/integration/test_replicated_user_defined_functions/test.py index 92d86a8fd2c..afb8496d718 100644 --- a/tests/integration/test_replicated_user_defined_functions/test.py +++ b/tests/integration/test_replicated_user_defined_functions/test.py @@ -1,13 +1,13 @@ import inspect +import os.path +import time from contextlib import nullcontext as does_not_raise import pytest -import time -import os.path - -from helpers.cluster import ClickHouseCluster from helpers.client import QueryRuntimeException -from helpers.test_tools import assert_eq_with_retry, TSV +from helpers.cluster import ClickHouseCluster +from helpers.keeper_utils import get_active_zk_connections +from helpers.test_tools import TSV, assert_eq_with_retry SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) @@ -65,20 +65,6 @@ def revert_zookeeper_config(): replace_zookeeper_config(f.read()) -def get_active_zk_connections(): - return str( - node1.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep 2181 | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - - def test_create_and_drop(): node1.query("CREATE FUNCTION f1 AS (x, y) -> x + y") assert node1.query("SELECT f1(12, 3)") == "15\n" @@ -266,9 +252,9 @@ def test_reload_zookeeper(): ) ) - active_zk_connections = get_active_zk_connections() + active_zk_connections = get_active_zk_connections(node1) assert ( - active_zk_connections == "1" + len(active_zk_connections) == 1 ), "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) node1.query("CREATE FUNCTION f3 AS (x, y) -> x / y") @@ -280,9 +266,9 @@ def test_reload_zookeeper(): assert node2.query("SELECT f1(12, 3), f2(), f3(12, 3)") == TSV([[15, 2, 4]]) - active_zk_connections = get_active_zk_connections() + active_zk_connections = get_active_zk_connections(node1) assert ( - active_zk_connections == "1" + len(active_zk_connections) == 1 ), "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) node1.query("DROP FUNCTION f1") diff --git a/tests/integration/test_replicated_users/test.py b/tests/integration/test_replicated_users/test.py index e34495a0071..7f13eff59a3 100644 --- a/tests/integration/test_replicated_users/test.py +++ b/tests/integration/test_replicated_users/test.py @@ -1,10 +1,11 @@ import inspect -import pytest import time - from dataclasses import dataclass + +import pytest from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry, TSV +from helpers.keeper_utils import get_active_zk_connections +from helpers.test_tools import TSV, assert_eq_with_retry cluster = ClickHouseCluster(__file__, zookeeper_config_path="configs/zookeeper.xml") @@ -188,19 +189,6 @@ def test_reload_zookeeper(started_cluster): node1.query("SYSTEM RELOAD CONFIG") node2.query("SYSTEM RELOAD CONFIG") - def get_active_zk_connections(): - return str( - node1.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep 2181 | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - node1.query("CREATE USER u1") assert_eq_with_retry( node2, "SELECT name FROM system.users WHERE name ='u1'", "u1\n" @@ -259,9 +247,9 @@ def test_reload_zookeeper(started_cluster): """ ) - active_zk_connections = get_active_zk_connections() + active_zk_connections = get_active_zk_connections(node1) assert ( - active_zk_connections == "1" + len(active_zk_connections) == 1 ), "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) node1.query("CREATE USER u3") @@ -271,7 +259,7 @@ def test_reload_zookeeper(started_cluster): TSV(["u1", "u2", "u3"]), ) - active_zk_connections = get_active_zk_connections() + active_zk_connections = get_active_zk_connections(node1) assert ( - active_zk_connections == "1" + len(active_zk_connections) == 1 ), "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) diff --git a/tests/integration/test_zookeeper_config_load_balancing/test.py b/tests/integration/test_zookeeper_config_load_balancing/test.py index cc0a9022674..21c731502b3 100644 --- a/tests/integration/test_zookeeper_config_load_balancing/test.py +++ b/tests/integration/test_zookeeper_config_load_balancing/test.py @@ -1,6 +1,9 @@ +import logging +import re import time import pytest from helpers.cluster import ClickHouseCluster +from helpers.keeper_utils import ss_established from helpers.network import PartitionManager from helpers.test_tools import assert_eq_with_retry @@ -18,11 +21,23 @@ node2 = cluster.add_instance( node3 = cluster.add_instance( "nod3", with_zookeeper=True, main_configs=["configs/zookeeper_load_balancing.xml"] ) - node4 = cluster.add_instance( "nod4", with_zookeeper=True, main_configs=["configs/zookeeper_load_balancing2.xml"] ) +zk1_re = re.compile( + r"testzookeeperconfigloadbalancing-(gw\d+-)?zoo1-1" + r".*testzookeeperconfigloadbalancing(-gw\d+)?_default:2181" +) +zk2_re = re.compile( + r"testzookeeperconfigloadbalancing-(gw\d+-)?zoo2-1" + r".*testzookeeperconfigloadbalancing(-gw\d+)?_default:2181" +) +zk3_re = re.compile( + r"testzookeeperconfigloadbalancing-(gw\d+-)?zoo3-1" + r".*testzookeeperconfigloadbalancing(-gw\d+)?_default:2181" +) + def change_balancing(old, new, reload=True): line = "{}<" @@ -51,89 +66,15 @@ def started_cluster(): def test_first_or_random(started_cluster): try: change_balancing("random", "first_or_random") - print( - str( - node1.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) + for node in (node1, node2, node3): + connections = ( + node.exec_in_container(ss_established, privileged=True, user="root") + .strip() + .split("\n") ) - ) - assert ( - "1" - == str( - node1.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) - - print( - str( - node2.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) - ) - ) - assert ( - "1" - == str( - node2.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) - - print( - str( - node3.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) - ) - ) - assert ( - "1" - == str( - node3.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) + logging.debug("Established connections for 2181:\n%s", connections) + assert len(connections) == 1 + assert zk1_re.search(connections[0]) finally: change_balancing("first_or_random", "random", reload=False) @@ -141,89 +82,15 @@ def test_first_or_random(started_cluster): def test_in_order(started_cluster): try: change_balancing("random", "in_order") - print( - str( - node1.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) + for node in (node1, node2, node3): + connections = ( + node.exec_in_container(ss_established, privileged=True, user="root") + .strip() + .split("\n") ) - ) - assert ( - "1" - == str( - node1.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) - - print( - str( - node2.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) - ) - ) - assert ( - "1" - == str( - node2.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) - - print( - str( - node3.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) - ) - ) - assert ( - "1" - == str( - node3.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) + logging.debug("Established connections for 2181:\n%s", connections) + assert len(connections) == 1 + assert zk1_re.search(connections[0]) finally: change_balancing("in_order", "random", reload=False) @@ -231,89 +98,15 @@ def test_in_order(started_cluster): def test_nearest_hostname(started_cluster): try: change_balancing("random", "nearest_hostname") - print( - str( - node1.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) + for node, regexp in ((node1, zk1_re), (node2, zk2_re), (node3, zk3_re)): + connections = ( + node.exec_in_container(ss_established, privileged=True, user="root") + .strip() + .split("\n") ) - ) - assert ( - "1" - == str( - node1.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) - - print( - str( - node2.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) - ) - ) - assert ( - "1" - == str( - node2.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo2_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) - - print( - str( - node3.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) - ) - ) - assert ( - "1" - == str( - node3.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo3_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) + logging.debug("Established connections for 2181:\n%s", connections) + assert len(connections) == 1 + assert regexp.search(connections[0]) finally: change_balancing("nearest_hostname", "random", reload=False) @@ -321,89 +114,15 @@ def test_nearest_hostname(started_cluster): def test_hostname_levenshtein_distance(started_cluster): try: change_balancing("random", "hostname_levenshtein_distance") - print( - str( - node1.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) + for node, regexp in ((node1, zk1_re), (node2, zk2_re), (node3, zk3_re)): + connections = ( + node.exec_in_container(ss_established, privileged=True, user="root") + .strip() + .split("\n") ) - ) - assert ( - "1" - == str( - node1.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) - - print( - str( - node2.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) - ) - ) - assert ( - "1" - == str( - node2.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo2_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) - - print( - str( - node3.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", - ], - privileged=True, - user="root", - ) - ) - ) - assert ( - "1" - == str( - node3.exec_in_container( - [ - "bash", - "-c", - "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo3_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", - ], - privileged=True, - user="root", - ) - ).strip() - ) + logging.debug("Established connections for 2181:\n%s", connections) + assert len(connections) == 1 + assert regexp.search(connections[0]) finally: change_balancing("hostname_levenshtein_distance", "random", reload=False)