mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-30 03:22:14 +00:00
289 lines
12 KiB
Python
Executable File
289 lines
12 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
#-*- coding: utf-8 -*-
|
|
import subprocess
|
|
import os
|
|
import getpass
|
|
import argparse
|
|
import logging
|
|
import signal
|
|
import subprocess
|
|
import sys
|
|
|
|
CUR_FILE_DIR = os.path.dirname(os.path.realpath(__file__))
|
|
DEFAULT_CLICKHOUSE_ROOT = os.path.abspath(os.path.join(CUR_FILE_DIR, "../../"))
|
|
CURRENT_WORK_DIR = os.getcwd()
|
|
CONTAINER_NAME = "clickhouse_integration_tests"
|
|
|
|
CONFIG_DIR_IN_REPO = "programs/server"
|
|
INTERGATION_DIR_IN_REPO = "tests/integration"
|
|
SRC_DIR_IN_REPO = "src"
|
|
|
|
DIND_INTEGRATION_TESTS_IMAGE_NAME = "yandex/clickhouse-integration-tests-runner"
|
|
|
|
def check_args_and_update_paths(args):
|
|
if args.clickhouse_root:
|
|
if not os.path.isabs(args.clickhouse_root):
|
|
CLICKHOUSE_ROOT = os.path.abspath(args.clickhouse_root)
|
|
else:
|
|
CLICKHOUSE_ROOT = args.clickhouse_root
|
|
else:
|
|
logging.info("ClickHouse root is not set. Will use %s" % (DEFAULT_CLICKHOUSE_ROOT))
|
|
CLICKHOUSE_ROOT = DEFAULT_CLICKHOUSE_ROOT
|
|
|
|
if not os.path.isabs(args.binary):
|
|
args.binary = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.binary))
|
|
|
|
if not args.odbc_bridge_binary:
|
|
args.odbc_bridge_binary = os.path.join(os.path.dirname(args.binary), 'clickhouse-odbc-bridge')
|
|
elif not os.path.isabs(args.odbc_bridge_binary):
|
|
args.odbc_bridge_binary = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.odbc_bridge_binary))
|
|
|
|
if not args.library_bridge_binary:
|
|
args.library_bridge_binary = os.path.join(os.path.dirname(args.binary), 'clickhouse-library-bridge')
|
|
elif not os.path.isabs(args.library_bridge_binary):
|
|
args.library_bridge_binary = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.library_bridge_binary))
|
|
|
|
if args.base_configs_dir:
|
|
if not os.path.isabs(args.base_configs_dir):
|
|
args.base_configs_dir = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.base_configs_dir))
|
|
else:
|
|
args.base_configs_dir = os.path.abspath(os.path.join(CLICKHOUSE_ROOT, CONFIG_DIR_IN_REPO))
|
|
logging.info("Base configs dir is not set. Will use %s" % (args.base_configs_dir))
|
|
|
|
if args.cases_dir:
|
|
if not os.path.isabs(args.cases_dir):
|
|
args.cases_dir = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.cases_dir))
|
|
else:
|
|
args.cases_dir = os.path.abspath(os.path.join(CLICKHOUSE_ROOT, INTERGATION_DIR_IN_REPO))
|
|
logging.info("Cases dir is not set. Will use %s" % (args.cases_dir))
|
|
|
|
if args.src_dir:
|
|
if not os.path.isabs(args.src_dir):
|
|
args.src_dir = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.src_dir))
|
|
else:
|
|
args.src_dir = os.path.abspath(os.path.join(CLICKHOUSE_ROOT, SRC_DIR_IN_REPO))
|
|
logging.info("src dir is not set. Will use %s" % (args.src_dir))
|
|
|
|
logging.info("base_configs_dir: {}, binary: {}, cases_dir: {} ".format(args.base_configs_dir, args.binary, args.cases_dir))
|
|
|
|
for path in [args.binary, args.odbc_bridge_binary, args.library_bridge_binary, args.base_configs_dir, args.cases_dir, CLICKHOUSE_ROOT]:
|
|
if not os.path.exists(path):
|
|
raise Exception("Path {} doesn't exist".format(path))
|
|
|
|
if args.dockerd_volume:
|
|
if not os.path.isabs(args.dockerd_volume):
|
|
args.src_dir = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.dockerd_volume))
|
|
|
|
if (not os.path.exists(os.path.join(args.base_configs_dir, "config.xml"))) and (not os.path.exists(os.path.join(args.base_configs_dir, "config.yaml"))):
|
|
raise Exception("No configs.xml or configs.yaml in {}".format(args.base_configs_dir))
|
|
|
|
if (not os.path.exists(os.path.join(args.base_configs_dir, "users.xml"))) and (not os.path.exists(os.path.join(args.base_configs_dir, "users.yaml"))):
|
|
raise Exception("No users.xml or users.yaml in {}".format(args.base_configs_dir))
|
|
|
|
def docker_kill_handler_handler(signum, frame):
|
|
subprocess.check_call('docker kill $(docker ps -a -q --filter name={name} --format="{{{{.ID}}}}")'.format(name=CONTAINER_NAME), shell=True)
|
|
raise KeyboardInterrupt("Killed by Ctrl+C")
|
|
|
|
signal.signal(signal.SIGINT, docker_kill_handler_handler)
|
|
|
|
# Integration tests runner should allow to run tests on several versions of ClickHouse.
|
|
# Integration tests should be portable.
|
|
# To run integration tests following artfacts should be sufficient:
|
|
# - clickhouse binaries (env CLICKHOUSE_TESTS_SERVER_BIN_PATH or --binary arg)
|
|
# - clickhouse default configs(config.xml, users.xml) from same version as binary (env CLICKHOUSE_TESTS_BASE_CONFIG_DIR or --base-configs-dir arg)
|
|
# - odbc bridge binary (env CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH or --odbc-bridge-binary arg)
|
|
# - library bridge binary (env CLICKHOUSE_TESTS_LIBRARY_BRIDGE_BIN_PATH or --library-bridge-binary)
|
|
# - tests/integration directory with all test cases and configs (env CLICKHOUSE_TESTS_INTEGRATION_PATH or --cases-dir)
|
|
#
|
|
# 1) --clickhouse-root is only used to determine other paths on default places
|
|
# 2) path of runner script is used to determine paths for trivial case, when we run it from repository
|
|
|
|
if __name__ == "__main__":
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
|
parser = argparse.ArgumentParser(description="ClickHouse integration tests runner")
|
|
|
|
parser.add_argument(
|
|
"--binary",
|
|
default=os.environ.get("CLICKHOUSE_TESTS_SERVER_BIN_PATH", os.environ.get("CLICKHOUSE_TESTS_CLIENT_BIN_PATH", "/usr/bin/clickhouse")),
|
|
help="Path to clickhouse binary. For example /usr/bin/clickhouse")
|
|
|
|
parser.add_argument(
|
|
"--odbc-bridge-binary",
|
|
default=os.environ.get("CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH", ""),
|
|
help="Path to clickhouse-odbc-bridge binary. Defaults to clickhouse-odbc-bridge in the same dir as clickhouse.")
|
|
|
|
parser.add_argument(
|
|
"--library-bridge-binary",
|
|
default=os.environ.get("CLICKHOUSE_TESTS_LIBRARY_BRIDGE_BIN_PATH", ""),
|
|
help="Path to clickhouse-library-bridge binary. Defaults to clickhouse-library-bridge in the same dir as clickhouse.")
|
|
|
|
parser.add_argument(
|
|
"--base-configs-dir",
|
|
default=os.environ.get("CLICKHOUSE_TESTS_BASE_CONFIG_DIR"),
|
|
help="Path to clickhouse base configs directory with config.xml/users.xml")
|
|
|
|
parser.add_argument(
|
|
"--cases-dir",
|
|
default=os.environ.get("CLICKHOUSE_TESTS_INTEGRATION_PATH"),
|
|
help="Path to integration tests cases and configs directory. For example tests/integration in repository")
|
|
|
|
parser.add_argument(
|
|
"--src-dir",
|
|
default=os.environ.get("CLICKHOUSE_SRC_DIR"),
|
|
help="Path to the 'src' directory in repository. Used to provide schemas (e.g. *.proto) for some tests when those schemas are located in the 'src' directory")
|
|
|
|
parser.add_argument(
|
|
"--clickhouse-root",
|
|
help="Path to repository root folder. Used to take configuration from repository default paths.")
|
|
|
|
parser.add_argument(
|
|
"--command",
|
|
default='',
|
|
help="Set it to run some other command in container (for example bash)")
|
|
|
|
parser.add_argument(
|
|
"--disable-net-host",
|
|
action='store_true',
|
|
default=False,
|
|
help="Don't use net host in parent docker container")
|
|
|
|
parser.add_argument(
|
|
"--network",
|
|
default='host',
|
|
help="Set network driver for runnner container")
|
|
|
|
parser.add_argument(
|
|
"--docker-image-version",
|
|
default="latest",
|
|
help="Version of docker image which runner will use to run tests")
|
|
|
|
parser.add_argument(
|
|
"--docker-compose-images-tags",
|
|
action="append",
|
|
help="Set non-default tags for images used in docker compose recipes(yandex/my_container:my_tag)")
|
|
|
|
parser.add_argument(
|
|
"-n", "--parallel",
|
|
action="store",
|
|
dest="parallel",
|
|
help="Parallelism")
|
|
|
|
parser.add_argument(
|
|
"-t", "--tests_list",
|
|
action="store",
|
|
nargs='+',
|
|
default=[],
|
|
dest="tests_list",
|
|
help="List of tests to run")
|
|
|
|
parser.add_argument(
|
|
"--tmpfs",
|
|
action='store_true',
|
|
default=False,
|
|
dest="tmpfs",
|
|
help="Use tmpfs for dockerd files")
|
|
|
|
parser.add_argument(
|
|
"--cleanup-containers",
|
|
action='store_true',
|
|
default=False,
|
|
dest="cleanup_containers",
|
|
help="Remove all running containers on test session start")
|
|
|
|
parser.add_argument(
|
|
"--dockerd-volume-dir",
|
|
action='store',
|
|
dest="dockerd_volume",
|
|
help="Bind volume to this dir to use for dockerd files")
|
|
|
|
parser.add_argument('pytest_args', nargs='*', help="args for pytest command")
|
|
|
|
args = parser.parse_args()
|
|
|
|
check_args_and_update_paths(args)
|
|
|
|
parallel_args = ""
|
|
if args.parallel:
|
|
parallel_args += "--dist=loadfile"
|
|
parallel_args += " -n {}".format(args.parallel)
|
|
|
|
net = ""
|
|
if args.network:
|
|
net = "--net={}".format(args.network)
|
|
elif not args.disable_net_host:
|
|
net = "--net=host"
|
|
|
|
env_tags = ""
|
|
|
|
if args.docker_compose_images_tags is not None:
|
|
for img_tag in args.docker_compose_images_tags:
|
|
[image, tag] = img_tag.split(":")
|
|
if image == "yandex/clickhouse-mysql-golang-client":
|
|
env_tags += "-e {}={} ".format("DOCKER_MYSQL_GOLANG_CLIENT_TAG", tag)
|
|
elif image == "yandex/clickhouse-mysql-java-client":
|
|
env_tags += "-e {}={} ".format("DOCKER_MYSQL_JAVA_CLIENT_TAG", tag)
|
|
elif image == "yandex/clickhouse-mysql-js-client":
|
|
env_tags += "-e {}={} ".format("DOCKER_MYSQL_JS_CLIENT_TAG", tag)
|
|
elif image == "yandex/clickhouse-mysql-php-client":
|
|
env_tags += "-e {}={} ".format("DOCKER_MYSQL_PHP_CLIENT_TAG", tag)
|
|
elif image == "yandex/clickhouse-postgresql-java-client":
|
|
env_tags += "-e {}={} ".format("DOCKER_POSTGRESQL_JAVA_CLIENT_TAG", tag)
|
|
elif image == "yandex/clickhouse-integration-test":
|
|
env_tags += "-e {}={} ".format("DOCKER_BASE_TAG", tag)
|
|
elif image == "yandex/clickhouse-kerberos-kdc":
|
|
env_tags += "-e {}={}".format("DOCKER_KERBEROS_KDC_TAG", tag)
|
|
else:
|
|
logging.info("Unknown image %s" % (image))
|
|
|
|
# create named volume which will be used inside to store images and other docker related files,
|
|
# to avoid redownloading it every time
|
|
#
|
|
# should be removed manually when not needed
|
|
dockerd_internal_volume = ""
|
|
if args.tmpfs:
|
|
dockerd_internal_volume = "--tmpfs /var/lib/docker -e DOCKER_RAMDISK=true"
|
|
elif args.dockerd_volume:
|
|
dockerd_internal_volume = "--mount type=bind,source={},target=/var/lib/docker".format(args.dockerd_volume)
|
|
else:
|
|
subprocess.check_call('docker volume create {name}_volume'.format(name=CONTAINER_NAME), shell=True)
|
|
dockerd_internal_volume = "--volume={}_volume:/var/lib/docker".format(CONTAINER_NAME)
|
|
|
|
# If enabled we kill and remove containers before pytest session run.
|
|
env_cleanup = ""
|
|
if args.cleanup_containers:
|
|
env_cleanup = "-e PYTEST_CLEANUP_CONTAINERS=1"
|
|
# enable tty mode & interactive for docker if we have real tty
|
|
tty = ""
|
|
if sys.stdout.isatty() and sys.stdin.isatty():
|
|
tty = "-it"
|
|
|
|
|
|
cmd = "docker run {net} {tty} --rm --name {name} --privileged \
|
|
--volume={odbc_bridge_bin}:/clickhouse-odbc-bridge --volume={bin}:/clickhouse \
|
|
--volume={library_bridge_bin}:/clickhouse-library-bridge --volume={bin}:/clickhouse \
|
|
--volume={base_cfg}:/clickhouse-config --volume={cases_dir}:/ClickHouse/tests/integration \
|
|
--volume={src_dir}/Server/grpc_protos:/ClickHouse/src/Server/grpc_protos \
|
|
{dockerd_internal_volume} -e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 \
|
|
{env_tags} {env_cleanup} -e PYTEST_OPTS='{parallel} {opts} {tests_list}' {img} {command}".format(
|
|
net=net,
|
|
tty=tty,
|
|
bin=args.binary,
|
|
odbc_bridge_bin=args.odbc_bridge_binary,
|
|
library_bridge_bin=args.library_bridge_binary,
|
|
base_cfg=args.base_configs_dir,
|
|
cases_dir=args.cases_dir,
|
|
src_dir=args.src_dir,
|
|
env_tags=env_tags,
|
|
env_cleanup=env_cleanup,
|
|
parallel=parallel_args,
|
|
opts=' '.join(args.pytest_args),
|
|
tests_list=' '.join(args.tests_list),
|
|
dockerd_internal_volume=dockerd_internal_volume,
|
|
img=DIND_INTEGRATION_TESTS_IMAGE_NAME + ":" + args.docker_image_version,
|
|
name=CONTAINER_NAME,
|
|
command=args.command
|
|
)
|
|
|
|
print(("Running pytest container as: '" + cmd + "'."))
|
|
subprocess.check_call(cmd, shell=True)
|