mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Updating example, rbac and ldap testflows suites.
Adding LDAP tests for cluster with secret.
This commit is contained in:
parent
edc8568b2f
commit
a35690344b
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: registry.gitlab.com/altinity-public/container-images/test/clickhouse-integration-test:21.12
|
||||
privileged: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
31
tests/testflows/example/example_env_arm64/docker-compose.yml
Normal file
31
tests/testflows/example/example_env_arm64/docker-compose.yml
Normal file
@ -0,0 +1,31 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.6.2
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -7,22 +7,31 @@ append_path(sys.path, "..")
|
||||
|
||||
from helpers.cluster import Cluster
|
||||
from helpers.argparser import argparser
|
||||
from platform import processor as current_cpu
|
||||
|
||||
@TestFeature
|
||||
@Name("example")
|
||||
@ArgumentParser(argparser)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version, stress=None):
|
||||
"""Simple example of how you can use TestFlows to test ClickHouse.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse": ("clickhouse1",),
|
||||
}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
|
||||
folder_name = os.path.basename(current_dir())
|
||||
if current_cpu() == 'aarch64':
|
||||
env = f"{folder_name}_env_arm64"
|
||||
else:
|
||||
env = f"{folder_name}_env"
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "example_env")) as cluster:
|
||||
docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster:
|
||||
self.context.cluster = cluster
|
||||
|
||||
Scenario(run=load("example.tests.example", "scenario"))
|
||||
|
@ -7,6 +7,10 @@ def argparser(parser):
|
||||
action="store_true",
|
||||
help="run regression in local mode", default=False)
|
||||
|
||||
parser.add_argument("--clickhouse-version", type=str, dest="clickhouse_version",
|
||||
help="clickhouse server version", metavar="version",
|
||||
default=os.getenv("CLICKHOUSE_TESTS_SERVER_VERSION", None))
|
||||
|
||||
parser.add_argument("--clickhouse-binary-path",
|
||||
type=str, dest="clickhouse_binary_path",
|
||||
help="path to ClickHouse binary, default: /usr/bin/clickhouse", metavar="path",
|
||||
|
@ -1,9 +1,12 @@
|
||||
import os
|
||||
import uuid
|
||||
import time
|
||||
import inspect
|
||||
import threading
|
||||
import tempfile
|
||||
|
||||
from testflows._core.cli.arg.common import description
|
||||
|
||||
import testflows.settings as settings
|
||||
|
||||
from testflows.core import *
|
||||
@ -12,6 +15,21 @@ from testflows.connect import Shell as ShellBase
|
||||
from testflows.uexpect import ExpectTimeoutError
|
||||
from testflows._core.testtype import TestSubType
|
||||
|
||||
MESSAGES_TO_RETRY = [
|
||||
"DB::Exception: ZooKeeper session has been expired",
|
||||
"DB::Exception: Connection loss",
|
||||
"Coordination::Exception: Session expired",
|
||||
"Coordination::Exception: Connection loss",
|
||||
"Coordination::Exception: Operation timeout",
|
||||
"DB::Exception: Operation timeout",
|
||||
"Operation timed out",
|
||||
"ConnectionPoolWithFailover: Connection failed at try",
|
||||
"DB::Exception: New table appeared in database being dropped or detached. Try again",
|
||||
"is already started to be removing by another replica right now",
|
||||
"Shutdown is called for table", # happens in SYSTEM SYNC REPLICA query if session with ZooKeeper is being reinitialized.
|
||||
"is executing longer than distributed_ddl_task_timeout" # distributed TTL timeout message
|
||||
]
|
||||
|
||||
class Shell(ShellBase):
|
||||
def __exit__(self, type, value, traceback):
|
||||
# send exit and Ctrl-D repeatedly
|
||||
@ -56,33 +74,36 @@ class Node(object):
|
||||
shell = self.cluster._bash.pop(key)
|
||||
shell.__exit__(None, None, None)
|
||||
|
||||
def restart(self, timeout=300, retries=5, safe=True):
|
||||
def wait_healthy(self, timeout=300):
|
||||
with By(f"waiting until container {self.name} is healthy"):
|
||||
for attempt in retries(timeout=timeout, delay=1):
|
||||
with attempt:
|
||||
if self.command("echo 1", no_checks=1, steps=False).exitcode != 0:
|
||||
fail("container is not healthy")
|
||||
|
||||
def restart(self, timeout=300, retry_count=5, safe=True):
|
||||
"""Restart node.
|
||||
"""
|
||||
self.close_bashes()
|
||||
retry(self.cluster.command, retry_count)(
|
||||
None, f'{self.cluster.docker_compose} restart {self.name}',
|
||||
timeout=timeout, exitcode=0, steps=False)
|
||||
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} restart {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
|
||||
def start(self, timeout=300, retries=5):
|
||||
def start(self, timeout=300, retry_count=5):
|
||||
"""Start node.
|
||||
"""
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} start {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
retry(self.cluster.command, retry_count)(
|
||||
None, f'{self.cluster.docker_compose} start {self.name}',
|
||||
timeout=timeout, exitcode=0, steps=False)
|
||||
|
||||
def stop(self, timeout=300, retries=5, safe=True):
|
||||
def stop(self, timeout=300, retry_count=5, safe=True):
|
||||
"""Stop node.
|
||||
"""
|
||||
self.close_bashes()
|
||||
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} stop {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
retry(self.cluster.command, retry_count)(
|
||||
None, f'{self.cluster.docker_compose} stop {self.name}',
|
||||
timeout=timeout, exitcode=0, steps=False)
|
||||
|
||||
def command(self, *args, **kwargs):
|
||||
return self.cluster.command(self.name, *args, **kwargs)
|
||||
@ -96,7 +117,7 @@ class Node(object):
|
||||
"""
|
||||
|
||||
command = f"{cmd}"
|
||||
with Step("executing command", description=command, format_description=False) if steps else NullStep():
|
||||
with step("executing command", description=command, format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(self.name, command=shell_command)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
@ -114,124 +135,326 @@ class Node(object):
|
||||
with Then(f"output should contain message", description=message) if steps else NullStep():
|
||||
assert message in r.output, error(r.output)
|
||||
|
||||
if message is None or "Exception:" not in message:
|
||||
with Then("check if output has exception") if steps else NullStep():
|
||||
if "Exception:" in r.output:
|
||||
if raise_on_exception:
|
||||
raise QueryRuntimeException(r.output)
|
||||
assert False, error(r.output)
|
||||
|
||||
return r
|
||||
|
||||
|
||||
|
||||
class ClickHouseNode(Node):
|
||||
"""Node with ClickHouse server.
|
||||
"""
|
||||
def wait_healthy(self, timeout=300):
|
||||
with By(f"waiting until container {self.name} is healthy"):
|
||||
start_time = time.time()
|
||||
while True:
|
||||
if self.query("select 1", no_checks=1, timeout=300, steps=False).exitcode == 0:
|
||||
break
|
||||
if time.time() - start_time < timeout:
|
||||
time.sleep(2)
|
||||
continue
|
||||
assert False, "container is not healthy"
|
||||
def thread_fuzzer(self):
|
||||
with Given("exporting THREAD_FUZZER"):
|
||||
self.command("export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000")
|
||||
self.command("export THREAD_FUZZER_SLEEP_PROBABILITY=0.1")
|
||||
self.command("export THREAD_FUZZER_SLEEP_TIME_US=100000")
|
||||
|
||||
def stop(self, timeout=300, safe=True, retries=5):
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1")
|
||||
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000")
|
||||
|
||||
def wait_clickhouse_healthy(self, timeout=300):
|
||||
with By(f"waiting until ClickHouse server on {self.name} is healthy"):
|
||||
for attempt in retries(timeout=timeout, delay=1):
|
||||
with attempt:
|
||||
if self.query("SELECT version()", no_checks=1, steps=False).exitcode != 0:
|
||||
fail("ClickHouse server is not healthy")
|
||||
node_version = self.query("SELECT version()", no_checks=1, steps=False).output
|
||||
if current().context.clickhouse_version is None:
|
||||
current().context.clickhouse_version = node_version
|
||||
else:
|
||||
assert current().context.clickhouse_version == node_version, error()
|
||||
|
||||
def clickhouse_pid(self):
|
||||
"""Return ClickHouse server pid if present
|
||||
otherwise return None.
|
||||
"""
|
||||
if self.command("ls /tmp/clickhouse-server.pid").exitcode == 0:
|
||||
return self.command("cat /tmp/clickhouse-server.pid").output.strip()
|
||||
return None
|
||||
|
||||
def stop_clickhouse(self, timeout=300, safe=True):
|
||||
"""Stop ClickHouse server.
|
||||
"""
|
||||
if safe:
|
||||
self.query("SYSTEM STOP MOVES")
|
||||
self.query("SYSTEM STOP MERGES")
|
||||
self.query("SYSTEM FLUSH LOGS")
|
||||
with By("waiting for 5 sec for moves and merges to stop"):
|
||||
time.sleep(5)
|
||||
with And("forcing to sync everything to disk"):
|
||||
self.command("sync", timeout=300, exitcode=0)
|
||||
|
||||
with By(f"sending kill -TERM to ClickHouse server process on {self.name}"):
|
||||
pid = self.clickhouse_pid()
|
||||
self.command(f"kill -TERM {pid}", exitcode=0, steps=False)
|
||||
|
||||
with And("checking pid does not exist"):
|
||||
for attempt in retries(timeout=100, delay=1):
|
||||
with attempt:
|
||||
if self.command(f"ps {pid}", steps=False, no_checks=True).exitcode != 1:
|
||||
fail("pid still alive")
|
||||
|
||||
with And("deleting ClickHouse server pid file"):
|
||||
self.command("rm -rf /tmp/clickhouse-server.pid", exitcode=0, steps=False)
|
||||
|
||||
def start_clickhouse(self, timeout=300, wait_healthy=True, retry_count=5, user=None, thread_fuzzer=False):
|
||||
"""Start ClickHouse server.
|
||||
"""
|
||||
pid = self.clickhouse_pid()
|
||||
if pid:
|
||||
raise RuntimeError(f"ClickHouse server already running with pid {pid}")
|
||||
|
||||
if thread_fuzzer:
|
||||
self.thread_fuzzer()
|
||||
|
||||
if user is None:
|
||||
with By("starting ClickHouse server process"):
|
||||
self.command(
|
||||
"clickhouse server --config-file=/etc/clickhouse-server/config.xml"
|
||||
" --log-file=/var/log/clickhouse-server/clickhouse-server.log"
|
||||
" --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
" --pidfile=/tmp/clickhouse-server.pid --daemon",
|
||||
exitcode=0, steps=False)
|
||||
else:
|
||||
with By(f"starting ClickHouse server process from {user}"):
|
||||
self.command(f'su {user} -c'
|
||||
'"clickhouse server --config-file=/etc/clickhouse-server/config.xml'
|
||||
' --log-file=/var/log/clickhouse-server/clickhouse-server.log'
|
||||
' --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log'
|
||||
' --pidfile=/tmp/clickhouse-server.pid --daemon"',
|
||||
exitcode=0, steps=False)
|
||||
|
||||
with And("checking that ClickHouse server pid file was created"):
|
||||
for attempt in retries(timeout=timeout, delay=1):
|
||||
with attempt:
|
||||
if self.command("ls /tmp/clickhouse-server.pid", steps=False, no_checks=True).exitcode != 0:
|
||||
fail("no pid file yet")
|
||||
|
||||
if wait_healthy:
|
||||
self.wait_clickhouse_healthy(timeout=timeout)
|
||||
|
||||
def restart_clickhouse(self, timeout=300, safe=True, wait_healthy=True, retry_count=5, user=None):
|
||||
"""Restart ClickHouse server.
|
||||
"""
|
||||
if self.clickhouse_pid():
|
||||
self.stop_clickhouse(timeout=timeout, safe=safe)
|
||||
|
||||
self.start_clickhouse(timeout=timeout, wait_healthy=wait_healthy, user=user)
|
||||
|
||||
def stop(self, timeout=300, safe=True, retry_count=5):
|
||||
"""Stop node.
|
||||
"""
|
||||
if safe:
|
||||
self.query("SYSTEM STOP MOVES")
|
||||
self.query("SYSTEM STOP MERGES")
|
||||
self.query("SYSTEM FLUSH LOGS")
|
||||
with By("waiting for 5 sec for moves and merges to stop"):
|
||||
time.sleep(5)
|
||||
with And("forcing to sync everything to disk"):
|
||||
self.command("sync", timeout=300)
|
||||
if self.clickhouse_pid():
|
||||
self.stop_clickhouse(timeout=timeout, safe=safe)
|
||||
|
||||
self.close_bashes()
|
||||
return super(ClickHouseNode, self).stop(timeout=timeout, retry_count=retry_count)
|
||||
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} stop {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
|
||||
def start(self, timeout=300, wait_healthy=True, retries=5):
|
||||
def start(self, timeout=300, start_clickhouse=True, wait_healthy=True, retry_count=5, user=None):
|
||||
"""Start node.
|
||||
"""
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} start {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
super(ClickHouseNode, self).start(timeout=timeout, retry_count=retry_count)
|
||||
|
||||
if wait_healthy:
|
||||
self.wait_healthy(timeout)
|
||||
if start_clickhouse:
|
||||
self.start_clickhouse(timeout=timeout, wait_healthy=wait_healthy, user=user,)
|
||||
|
||||
def restart(self, timeout=300, safe=True, wait_healthy=True, retries=5):
|
||||
def restart(self, timeout=300, safe=True, start_clickhouse=True,
|
||||
wait_healthy=True, retry_count=5, user=None):
|
||||
"""Restart node.
|
||||
"""
|
||||
if safe:
|
||||
self.query("SYSTEM STOP MOVES")
|
||||
self.query("SYSTEM STOP MERGES")
|
||||
self.query("SYSTEM FLUSH LOGS")
|
||||
with By("waiting for 5 sec for moves and merges to stop"):
|
||||
time.sleep(5)
|
||||
with And("forcing to sync everything to disk"):
|
||||
self.command("sync", timeout=300)
|
||||
if self.clickhouse_pid():
|
||||
self.stop_clickhouse(timeout=timeout, safe=safe)
|
||||
|
||||
self.close_bashes()
|
||||
super(ClickHouseNode, self).restart(timeout=timeout, retry_count=retry_count)
|
||||
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} restart {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
if start_clickhouse:
|
||||
self.start_clickhouse(timeout=timeout, wait_healthy=wait_healthy, user=user)
|
||||
|
||||
if wait_healthy:
|
||||
self.wait_healthy(timeout)
|
||||
def hash_query(self, sql, hash_utility="sha1sum", steps=True, step=By,
|
||||
settings=None, secure=False, *args, **kwargs):
|
||||
"""Execute sql query inside the container and return the hash of the output.
|
||||
|
||||
def query(self, sql, message=None, exitcode=None, steps=True, no_checks=False,
|
||||
raise_on_exception=False, step=By, settings=None, *args, **kwargs):
|
||||
"""Execute and check query.
|
||||
:param sql: sql query
|
||||
:param message: expected message that should be in the output, default: None
|
||||
:param exitcode: expected exitcode, default: None
|
||||
:param hash_utility: hash function which used to compute hash
|
||||
"""
|
||||
settings = list(settings or [])
|
||||
query_settings = list(settings)
|
||||
|
||||
if hasattr(current().context, "default_query_settings"):
|
||||
settings += current().context.default_query_settings
|
||||
query_settings += current().context.default_query_settings
|
||||
|
||||
client = "clickhouse client -n"
|
||||
if secure:
|
||||
client += " -s"
|
||||
|
||||
if len(sql) > 1024:
|
||||
with tempfile.NamedTemporaryFile("w", encoding="utf-8") as query:
|
||||
query.write(sql)
|
||||
query.flush()
|
||||
command = f"cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} clickhouse client -n"
|
||||
for setting in settings:
|
||||
command = f"set -o pipefail && cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} {client} | {hash_utility}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
description = f"""
|
||||
echo -e \"{sql[:100]}...\" > {query.name}
|
||||
{command}
|
||||
"""
|
||||
with step("executing command", description=description,
|
||||
format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(None)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(None)
|
||||
else:
|
||||
command = f"set -o pipefail && echo -e \"{sql}\" | {client} | {hash_utility}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
with step("executing command", description=command,
|
||||
format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(self.name)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(self.name)
|
||||
|
||||
with Then(f"exitcode should be 0") if steps else NullStep():
|
||||
assert r.exitcode == 0, error(r.output)
|
||||
|
||||
return r.output
|
||||
|
||||
def diff_query(self, sql, expected_output, steps=True, step=By,
|
||||
settings=None, secure=False, *args, **kwargs):
|
||||
"""Execute inside the container but from the host and compare its output
|
||||
to file that is located on the host.
|
||||
|
||||
For example:
|
||||
diff <(echo "SELECT * FROM myints FORMAT CSVWithNames" | clickhouse-client -mn) select.out
|
||||
|
||||
:param sql: sql query
|
||||
:param expected_output: path to the expected output
|
||||
"""
|
||||
settings = list(settings or [])
|
||||
query_settings = list(settings)
|
||||
|
||||
if hasattr(current().context, "default_query_settings"):
|
||||
query_settings += current().context.default_query_settings
|
||||
|
||||
client = "clickhouse client -n"
|
||||
if secure:
|
||||
client += " -s"
|
||||
|
||||
if len(sql) > 1024:
|
||||
with tempfile.NamedTemporaryFile("w", encoding="utf-8") as query:
|
||||
query.write(sql)
|
||||
query.flush()
|
||||
command = f"diff <(cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} {client}) {expected_output}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
description = f"""
|
||||
echo -e \"{sql[:100]}...\" > {query.name}
|
||||
{command}
|
||||
"""
|
||||
with Step("executing command", description=description, format_description=False) if steps else NullStep():
|
||||
with step("executing command", description=description, format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(None)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(None)
|
||||
else:
|
||||
command = f"echo -e \"{sql}\" | clickhouse client -n"
|
||||
for setting in settings:
|
||||
command = f"diff <(echo -e \"{sql}\" | {self.cluster.docker_compose} exec -T {self.name} {client}) {expected_output}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
with Step("executing command", description=command, format_description=False) if steps else NullStep():
|
||||
with step("executing command", description=command,
|
||||
format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(None)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(None)
|
||||
|
||||
with Then(f"exitcode should be 0") if steps else NullStep():
|
||||
assert r.exitcode == 0, error(r.output)
|
||||
|
||||
def query(self, sql, message=None, exitcode=None, steps=True, no_checks=False,
|
||||
raise_on_exception=False, step=By, settings=None,
|
||||
retry_count=5, messages_to_retry=None, retry_delay=5, secure=False,
|
||||
*args, **kwargs):
|
||||
"""Execute and check query.
|
||||
:param sql: sql query
|
||||
:param message: expected message that should be in the output, default: None
|
||||
:param exitcode: expected exitcode, default: None
|
||||
:param steps: wrap query execution in a step, default: True
|
||||
:param no_check: disable exitcode and message checks, default: False
|
||||
:param step: wrapping step class, default: By
|
||||
:param settings: list of settings to be used for the query in the form [(name, value),...], default: None
|
||||
:param retry_count: number of retries, default: 5
|
||||
:param messages_to_retry: list of messages in the query output for
|
||||
which retry should be triggered, default: MESSAGES_TO_RETRY
|
||||
:param retry_delay: number of seconds to sleep before retry, default: 5
|
||||
:param secure: use secure connection, default: False
|
||||
"""
|
||||
retry_count = max(0, int(retry_count))
|
||||
retry_delay = max(0, float(retry_delay))
|
||||
settings = list(settings or [])
|
||||
query_settings = list(settings)
|
||||
|
||||
if messages_to_retry is None:
|
||||
messages_to_retry = MESSAGES_TO_RETRY
|
||||
|
||||
if hasattr(current().context, "default_query_settings"):
|
||||
query_settings += current().context.default_query_settings
|
||||
|
||||
client = "clickhouse client -n"
|
||||
if secure:
|
||||
client += " -s"
|
||||
|
||||
if len(sql) > 1024:
|
||||
with tempfile.NamedTemporaryFile("w", encoding="utf-8") as query:
|
||||
query.write(sql)
|
||||
query.flush()
|
||||
command = f"cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} {client}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
description = f"""
|
||||
echo -e \"{sql[:100]}...\" > {query.name}
|
||||
{command}
|
||||
"""
|
||||
with step("executing command", description=description, format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(None)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(None)
|
||||
raise
|
||||
else:
|
||||
command = f"echo -e \"{sql}\" | {client}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
with step("executing command", description=command, format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(self.name)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(self.name)
|
||||
raise
|
||||
|
||||
if retry_count and retry_count > 0:
|
||||
if any(msg in r.output for msg in messages_to_retry):
|
||||
time.sleep(retry_delay)
|
||||
return self.query(sql=sql, message=message, exitcode=exitcode,
|
||||
steps=steps, no_checks=no_checks,
|
||||
raise_on_exception=raise_on_exception, step=step, settings=settings,
|
||||
retry_count=retry_count-1, messages_to_retry=messages_to_retry,
|
||||
*args, **kwargs)
|
||||
|
||||
if no_checks:
|
||||
return r
|
||||
|
||||
@ -252,19 +475,24 @@ class ClickHouseNode(Node):
|
||||
|
||||
return r
|
||||
|
||||
|
||||
class Cluster(object):
|
||||
"""Simple object around docker-compose cluster.
|
||||
"""
|
||||
def __init__(self, local=False,
|
||||
clickhouse_binary_path=None, configs_dir=None,
|
||||
clickhouse_binary_path=None,
|
||||
clickhouse_odbc_bridge_binary_path=None,
|
||||
configs_dir=None,
|
||||
nodes=None,
|
||||
docker_compose="docker-compose", docker_compose_project_dir=None,
|
||||
docker_compose_file="docker-compose.yml"):
|
||||
docker_compose_file="docker-compose.yml",
|
||||
environ=None):
|
||||
|
||||
self._bash = {}
|
||||
self._control_shell = None
|
||||
self.environ = {}
|
||||
self.environ = {} if (environ is None) else environ
|
||||
self.clickhouse_binary_path = clickhouse_binary_path
|
||||
self.clickhouse_odbc_bridge_binary_path = clickhouse_odbc_bridge_binary_path
|
||||
self.configs_dir = configs_dir
|
||||
self.local = local
|
||||
self.nodes = nodes or {}
|
||||
@ -282,20 +510,54 @@ class Cluster(object):
|
||||
if not os.path.exists(self.configs_dir):
|
||||
raise TypeError(f"configs directory '{self.configs_dir}' does not exist")
|
||||
|
||||
# auto set docker-compose project directory
|
||||
if docker_compose_project_dir is None:
|
||||
caller_project_dir = os.path.join(caller_dir, "docker-compose")
|
||||
if os.path.exists(caller_project_dir):
|
||||
docker_compose_project_dir = caller_project_dir
|
||||
raise TypeError("docker compose directory must be specified.")
|
||||
|
||||
docker_compose_file_path = os.path.join(docker_compose_project_dir or "", docker_compose_file)
|
||||
|
||||
if not os.path.exists(docker_compose_file_path):
|
||||
raise TypeError("docker compose file '{docker_compose_file_path}' does not exist")
|
||||
raise TypeError(f"docker compose file '{docker_compose_file_path}' does not exist")
|
||||
|
||||
if self.clickhouse_binary_path and self.clickhouse_binary_path.startswith("docker://"):
|
||||
if current().context.clickhouse_version is None:
|
||||
try:
|
||||
current().context.clickhouse_version = self.clickhouse_binary_path.split(":")[2]
|
||||
debug(f"auto setting clickhouse version to {current().context.clickhouse_version}")
|
||||
except IndexError:
|
||||
current().context.clickhouse_version = None
|
||||
self.clickhouse_binary_path, self.clickhouse_odbc_bridge_binary_path = self.get_clickhouse_binary_from_docker_container(
|
||||
self.clickhouse_binary_path)
|
||||
|
||||
self.docker_compose += f" --ansi never --project-directory \"{docker_compose_project_dir}\" --file \"{docker_compose_file_path}\""
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def get_clickhouse_binary_from_docker_container(self, docker_image,
|
||||
container_clickhouse_binary_path="/usr/bin/clickhouse",
|
||||
container_clickhouse_odbc_bridge_binary_path="/usr/bin/clickhouse-odbc-bridge",
|
||||
host_clickhouse_binary_path=None,
|
||||
host_clickhouse_odbc_bridge_binary_path=None):
|
||||
"""Get clickhouse-server and clickhouse-odbc-bridge binaries
|
||||
from some Docker container.
|
||||
"""
|
||||
docker_image = docker_image.split("docker://", 1)[-1]
|
||||
docker_container_name = str(uuid.uuid1())
|
||||
|
||||
if host_clickhouse_binary_path is None:
|
||||
host_clickhouse_binary_path = os.path.join(tempfile.gettempdir(), f"{docker_image.rsplit('/',1)[-1].replace(':','_')}")
|
||||
|
||||
if host_clickhouse_odbc_bridge_binary_path is None:
|
||||
host_clickhouse_odbc_bridge_binary_path = host_clickhouse_binary_path + "_odbc_bridge"
|
||||
|
||||
with Given("I get ClickHouse server binary from docker container", description=f"{docker_image}"):
|
||||
with Shell() as bash:
|
||||
bash.timeout = 300
|
||||
bash(f"docker run -d --name \"{docker_container_name}\" {docker_image} | tee")
|
||||
bash(f"docker cp \"{docker_container_name}:{container_clickhouse_binary_path}\" \"{host_clickhouse_binary_path}\"")
|
||||
bash(f"docker cp \"{docker_container_name}:{container_clickhouse_odbc_bridge_binary_path}\" \"{host_clickhouse_odbc_bridge_binary_path}\"")
|
||||
bash(f"docker stop \"{docker_container_name}\"")
|
||||
|
||||
return host_clickhouse_binary_path, host_clickhouse_odbc_bridge_binary_path
|
||||
|
||||
@property
|
||||
def control_shell(self, timeout=300):
|
||||
"""Must be called with self.lock.acquired.
|
||||
@ -310,25 +572,42 @@ class Cluster(object):
|
||||
shell.timeout = 30
|
||||
shell("echo 1")
|
||||
break
|
||||
except:
|
||||
except IOError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
shell.__exit__(None, None, None)
|
||||
if time.time() - time_start > timeout:
|
||||
raise RuntimeError(f"failed to open control shell")
|
||||
self._control_shell = shell
|
||||
return self._control_shell
|
||||
|
||||
def close_control_shell(self):
|
||||
"""Must be called with self.lock.acquired.
|
||||
"""
|
||||
if self._control_shell is None:
|
||||
return
|
||||
shell = self._control_shell
|
||||
self._control_shell = None
|
||||
shell.__exit__(None, None, None)
|
||||
|
||||
def node_container_id(self, node, timeout=300):
|
||||
"""Must be called with self.lock acquired.
|
||||
"""
|
||||
container_id = None
|
||||
time_start = time.time()
|
||||
while True:
|
||||
c = self.control_shell(f"{self.docker_compose} ps -q {node}")
|
||||
container_id = c.output.strip()
|
||||
if c.exitcode == 0 and len(container_id) > 1:
|
||||
break
|
||||
if time.time() - time_start > timeout:
|
||||
raise RuntimeError(f"failed to get docker container id for the {node} service")
|
||||
try:
|
||||
c = self.control_shell(f"{self.docker_compose} ps -q {node}", timeout=timeout)
|
||||
container_id = c.output.strip()
|
||||
if c.exitcode == 0 and len(container_id) > 1:
|
||||
break
|
||||
except IOError:
|
||||
raise
|
||||
except ExpectTimeoutError:
|
||||
self.close_control_shell()
|
||||
timeout = timeout - (time.time() - time_start)
|
||||
if timeout <= 0:
|
||||
raise RuntimeError(f"failed to get docker container id for the {node} service")
|
||||
return container_id
|
||||
|
||||
def shell(self, node, timeout=300):
|
||||
@ -352,7 +631,9 @@ class Cluster(object):
|
||||
shell.timeout = 30
|
||||
shell("echo 1")
|
||||
break
|
||||
except:
|
||||
except IOError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
shell.__exit__(None, None, None)
|
||||
if time.time() - time_start > timeout:
|
||||
raise RuntimeError(f"failed to open bash to node {node}")
|
||||
@ -387,7 +668,9 @@ class Cluster(object):
|
||||
self._bash[id].timeout = 30
|
||||
self._bash[id]("echo 1")
|
||||
break
|
||||
except:
|
||||
except IOError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
self._bash[id].__exit__(None, None, None)
|
||||
if time.time() - time_start > timeout:
|
||||
raise RuntimeError(f"failed to open bash to node {node}")
|
||||
@ -459,13 +742,26 @@ class Cluster(object):
|
||||
else:
|
||||
self._bash[id] = shell
|
||||
finally:
|
||||
cmd = self.command(None, f"{self.docker_compose} down --timeout 60", bash=bash, timeout=timeout)
|
||||
cmd = self.command(None, f"{self.docker_compose} down -v --remove-orphans --timeout 60", bash=bash, timeout=timeout)
|
||||
with self.lock:
|
||||
if self._control_shell:
|
||||
self._control_shell.__exit__(None, None, None)
|
||||
self._control_shell = None
|
||||
return cmd
|
||||
|
||||
def temp_path(self):
|
||||
"""Return temporary folder path.
|
||||
"""
|
||||
p = f"{self.environ['CLICKHOUSE_TESTS_DIR']}/_temp"
|
||||
if not os.path.exists(p):
|
||||
os.mkdir(p)
|
||||
return p
|
||||
|
||||
def temp_file(self, name):
|
||||
"""Return absolute temporary file path.
|
||||
"""
|
||||
return f"{os.path.join(self.temp_path(), name)}"
|
||||
|
||||
def up(self, timeout=30*60):
|
||||
if self.local:
|
||||
with Given("I am running in local mode"):
|
||||
@ -477,7 +773,7 @@ class Cluster(object):
|
||||
with And("I set all the necessary environment variables"):
|
||||
self.environ["COMPOSE_HTTP_TIMEOUT"] = "300"
|
||||
self.environ["CLICKHOUSE_TESTS_SERVER_BIN_PATH"] = self.clickhouse_binary_path
|
||||
self.environ["CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH"] = os.path.join(
|
||||
self.environ["CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH"] = self.clickhouse_odbc_bridge_binary_path or os.path.join(
|
||||
os.path.dirname(self.clickhouse_binary_path), "clickhouse-odbc-bridge")
|
||||
self.environ["CLICKHOUSE_TESTS_DIR"] = self.configs_dir
|
||||
|
||||
@ -525,11 +821,14 @@ class Cluster(object):
|
||||
if cmd.exitcode != 0 or "is unhealthy" in cmd.output or "Exit" in ps_cmd.output:
|
||||
fail("could not bring up docker-compose cluster")
|
||||
|
||||
with Then("wait all nodes report healhy"):
|
||||
with Then("wait all nodes report healthy"):
|
||||
for name in self.nodes["clickhouse"]:
|
||||
self.node(name).wait_healthy()
|
||||
if name.startswith("clickhouse"):
|
||||
self.node(name).start_clickhouse()
|
||||
|
||||
def command(self, node, command, message=None, exitcode=None, steps=True, bash=None, *args, **kwargs):
|
||||
def command(self, node, command, message=None, exitcode=None, steps=True,
|
||||
bash=None, no_checks=False, use_error=True, *args, **kwargs):
|
||||
"""Execute and check command.
|
||||
:param node: name of the service
|
||||
:param command: command
|
||||
@ -545,10 +844,16 @@ class Cluster(object):
|
||||
except ExpectTimeoutError:
|
||||
self.close_bash(node)
|
||||
raise
|
||||
|
||||
if no_checks:
|
||||
return r
|
||||
|
||||
if exitcode is not None:
|
||||
with Then(f"exitcode should be {exitcode}", format_name=False) if steps else NullStep():
|
||||
assert r.exitcode == exitcode, error(r.output)
|
||||
|
||||
if message is not None:
|
||||
with Then(f"output should contain message", description=message, format_description=False) if steps else NullStep():
|
||||
assert message in r.output, error(r.output)
|
||||
|
||||
return r
|
||||
|
@ -1,23 +1,76 @@
|
||||
import os
|
||||
import uuid
|
||||
import time
|
||||
import xml.etree.ElementTree as xmltree
|
||||
import packaging.version as pkg_version
|
||||
from collections import namedtuple
|
||||
|
||||
import testflows.settings as settings
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
from testflows.core.name import basename, parentname
|
||||
from testflows._core.testtype import TestSubType
|
||||
|
||||
def check_clickhouse_version(version):
|
||||
"""Compare ClickHouse version.
|
||||
"""
|
||||
def check(test):
|
||||
if getattr(test.context, "clickhouse_version", None) is None:
|
||||
return False
|
||||
|
||||
clickhouse_version = pkg_version.parse(str(test.context.clickhouse_version))
|
||||
|
||||
if version.startswith("=="):
|
||||
return clickhouse_version == pkg_version.parse(str(version.split("==",1)[-1]))
|
||||
elif version.startswith(">="):
|
||||
return clickhouse_version >= pkg_version.parse(str(version.split(">=",1)[-1]))
|
||||
elif version.startswith("<="):
|
||||
return clickhouse_version <= pkg_version.parse(str(version.split("<=",1)[-1]))
|
||||
elif version.startswith("="):
|
||||
return clickhouse_version == pkg_version.parse(str(version.split("=",1)[-1]))
|
||||
elif version.startswith(">"):
|
||||
return clickhouse_version > pkg_version.parse(str(version.split(">",1)[-1]))
|
||||
elif version.startswith("<"):
|
||||
return clickhouse_version < pkg_version.parse(str(version.split("<",1)[-1]))
|
||||
else:
|
||||
return clickhouse_version == pkg_version.parse(str(version))
|
||||
|
||||
return check
|
||||
|
||||
def getuid(with_test_name=False):
|
||||
if not with_test_name:
|
||||
return str(uuid.uuid1()).replace('-', '_')
|
||||
|
||||
if current().subtype == TestSubType.Example:
|
||||
testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',', '')}"
|
||||
else:
|
||||
testname = f"{basename(current().name).replace(' ', '_').replace(',', '')}"
|
||||
|
||||
return testname + "_" + str(uuid.uuid1()).replace('-', '_')
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def instrument_clickhouse_server_log(self, node=None, test=None,
|
||||
clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log"):
|
||||
clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log", always_dump=False):
|
||||
"""Instrument clickhouse-server.log for the current test (default)
|
||||
by adding start and end messages that include test name to log
|
||||
of the specified node. If we are in the debug mode and the test
|
||||
fails then dump the messages from the log for this test.
|
||||
|
||||
:param always_dump: always dump clickhouse log after test, default: `False`
|
||||
"""
|
||||
if test is None:
|
||||
test = current()
|
||||
test = current()
|
||||
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
|
||||
with By("getting current log size"):
|
||||
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
|
||||
start_logsize = cmd.output.split(" ")[0].strip()
|
||||
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
|
||||
if cmd.output == f"stat: cannot stat '{clickhouse_server_log}': No such file or directory":
|
||||
start_logsize = 0
|
||||
else:
|
||||
start_logsize = cmd.output.split(" ")[0].strip()
|
||||
|
||||
try:
|
||||
with And("adding test name start message to the clickhouse-server.log"):
|
||||
@ -29,14 +82,394 @@ def instrument_clickhouse_server_log(self, node=None, test=None,
|
||||
return
|
||||
|
||||
with Finally("adding test name end message to the clickhouse-server.log", flags=TE):
|
||||
node.command(f"echo -e \"\\n-- end: {test.name} --\\n\" >> {clickhouse_server_log}")
|
||||
node.command(f"echo -e \"\\n-- end: {test.name} --\\n\" >> {clickhouse_server_log}")
|
||||
|
||||
with And("getting current log size at the end of the test"):
|
||||
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
|
||||
end_logsize = cmd.output.split(" ")[0].strip()
|
||||
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
|
||||
end_logsize = cmd.output.split(" ")[0].strip()
|
||||
|
||||
with And("checking if test has failing result"):
|
||||
if settings.debug and not self.parent.result:
|
||||
with Then("dumping clickhouse-server.log for this test"):
|
||||
node.command(f"tail -c +{start_logsize} {clickhouse_server_log}"
|
||||
f" | head -c {int(end_logsize) - int(start_logsize)}")
|
||||
dump_log = always_dump or (settings.debug and not self.parent.result)
|
||||
|
||||
if dump_log:
|
||||
with Then("dumping clickhouse-server.log for this test"):
|
||||
node.command(f"tail -c +{start_logsize} {clickhouse_server_log}"
|
||||
f" | head -c {int(end_logsize) - int(start_logsize)}")
|
||||
|
||||
|
||||
xml_with_utf8 = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||
|
||||
|
||||
def xml_indent(elem, level=0, by=" "):
|
||||
i = "\n" + level * by
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + by
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
xml_indent(elem, level + 1)
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
||||
|
||||
|
||||
def xml_append(root, tag, text):
|
||||
element = xmltree.Element(tag)
|
||||
element.text = text
|
||||
root.append(element)
|
||||
return element
|
||||
|
||||
|
||||
class Config:
|
||||
def __init__(self, content, path, name, uid, preprocessed_name):
|
||||
self.content = content
|
||||
self.path = path
|
||||
self.name = name
|
||||
self.uid = uid
|
||||
self.preprocessed_name = preprocessed_name
|
||||
|
||||
|
||||
class KeyWithAttributes:
|
||||
def __init__(self, name, attributes):
|
||||
"""XML key with attributes.
|
||||
|
||||
:param name: key name
|
||||
:param attributes: dictionary of attributes {name: value, ...}
|
||||
"""
|
||||
self.name = name
|
||||
self.attributes = dict(attributes)
|
||||
|
||||
|
||||
def create_xml_config_content(entries, config_file, config_d_dir="/etc/clickhouse-server/config.d"):
|
||||
"""Create XML configuration file from a dictionary.
|
||||
|
||||
:param entries: dictionary that defines xml
|
||||
:param config_file: name of the config file
|
||||
:param config_d_dir: config.d directory path, default: `/etc/clickhouse-server/config.d`
|
||||
"""
|
||||
uid = getuid()
|
||||
path = os.path.join(config_d_dir, config_file)
|
||||
name = config_file
|
||||
root = xmltree.Element("clickhouse")
|
||||
root.append(xmltree.Comment(text=f"config uid: {uid}"))
|
||||
|
||||
def create_xml_tree(entries, root):
|
||||
for k, v in entries.items():
|
||||
if isinstance(k, KeyWithAttributes):
|
||||
xml_element = xmltree.Element(k.name)
|
||||
for attr_name, attr_value in k.attributes.items():
|
||||
xml_element.set(attr_name, attr_value)
|
||||
if type(v) is dict:
|
||||
create_xml_tree(v, xml_element)
|
||||
elif type(v) in (list, tuple):
|
||||
for e in v:
|
||||
create_xml_tree(e, xml_element)
|
||||
else:
|
||||
xml_element.text = v
|
||||
root.append(xml_element)
|
||||
elif type(v) is dict:
|
||||
xml_element = xmltree.Element(k)
|
||||
create_xml_tree(v, xml_element)
|
||||
root.append(xml_element)
|
||||
elif type(v) in (list, tuple):
|
||||
xml_element = xmltree.Element(k)
|
||||
for e in v:
|
||||
create_xml_tree(e, xml_element)
|
||||
root.append(xml_element)
|
||||
else:
|
||||
xml_append(root, k, v)
|
||||
|
||||
create_xml_tree(entries, root)
|
||||
xml_indent(root)
|
||||
content = xml_with_utf8 + str(
|
||||
xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"),
|
||||
"utf-8")
|
||||
|
||||
return Config(content, path, name, uid, "config.xml")
|
||||
|
||||
|
||||
def add_invalid_config(config, message, recover_config=None, tail=30, timeout=300, restart=True, user=None):
|
||||
"""Check that ClickHouse errors when trying to load invalid configuration file.
|
||||
"""
|
||||
cluster = current().context.cluster
|
||||
node = current().context.node
|
||||
|
||||
try:
|
||||
with Given("I prepare the error log by writing empty lines into it"):
|
||||
node.command("echo -e \"%s\" > /var/log/clickhouse-server/clickhouse-server.err.log" % ("-\\n" * tail))
|
||||
|
||||
with When("I add the config", description=config.path):
|
||||
command = f"cat <<HEREDOC > {config.path}\n{config.content}\nHEREDOC"
|
||||
node.command(command, steps=False, exitcode=0)
|
||||
|
||||
with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"):
|
||||
started = time.time()
|
||||
command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}"
|
||||
while time.time() - started < timeout:
|
||||
exitcode = node.command(command, steps=False).exitcode
|
||||
if exitcode == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
assert exitcode == 0, error()
|
||||
|
||||
if restart:
|
||||
with When("I restart ClickHouse to apply the config changes"):
|
||||
node.restart_clickhouse(safe=False, wait_healthy=False, user=user)
|
||||
|
||||
finally:
|
||||
if recover_config is None:
|
||||
with Finally(f"I remove {config.name}"):
|
||||
with By("removing invalid configuration file"):
|
||||
system_config_path = os.path.join(cluster.environ["CLICKHOUSE_TESTS_DIR"], "configs", node.name,
|
||||
"config.d", config.path.split("config.d/")[-1])
|
||||
cluster.command(None, f'rm -rf {system_config_path}', timeout=timeout, exitcode=0)
|
||||
|
||||
if restart:
|
||||
with And("restarting ClickHouse"):
|
||||
node.restart_clickhouse(safe=False, user=user)
|
||||
node.restart_clickhouse(safe=False, user=user)
|
||||
else:
|
||||
with Finally(f"I change {config.name}"):
|
||||
with By("changing invalid configuration file"):
|
||||
system_config_path = os.path.join(cluster.environ["CLICKHOUSE_TESTS_DIR"], "configs", node.name,
|
||||
"config.d", config.path.split("config.d/")[-1])
|
||||
cluster.command(None, f'rm -rf {system_config_path}', timeout=timeout, exitcode=0)
|
||||
command = f"cat <<HEREDOC > {system_config_path}\n{recover_config.content}\nHEREDOC"
|
||||
cluster.command(None, command, timeout=timeout, exitcode=0)
|
||||
|
||||
if restart:
|
||||
with And("restarting ClickHouse"):
|
||||
node.restart_clickhouse(safe=False, user=user)
|
||||
|
||||
with Then("error log should contain the expected error message"):
|
||||
started = time.time()
|
||||
command = f"tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep \"{message}\""
|
||||
while time.time() - started < timeout:
|
||||
exitcode = node.command(command, steps=False).exitcode
|
||||
if exitcode == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
assert exitcode == 0, error()
|
||||
|
||||
|
||||
def add_config(config, timeout=300, restart=False, modify=False, node=None, user=None, wait_healthy=True,
|
||||
check_preprocessed = True):
|
||||
"""Add dynamic configuration file to ClickHouse.
|
||||
|
||||
:param config: configuration file description
|
||||
:param timeout: timeout, default: 300 sec
|
||||
:param restart: restart server, default: False
|
||||
:param modify: only modify configuration file, default: False
|
||||
"""
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
cluster = current().context.cluster
|
||||
|
||||
def check_preprocessed_config_is_updated(after_removal=False):
|
||||
"""Check that preprocessed config is updated.
|
||||
"""
|
||||
started = time.time()
|
||||
command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}"
|
||||
|
||||
while time.time() - started < timeout:
|
||||
exitcode = node.command(command, steps=False).exitcode
|
||||
if after_removal:
|
||||
if exitcode == 1:
|
||||
break
|
||||
else:
|
||||
if exitcode == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
if settings.debug:
|
||||
node.command(f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name}")
|
||||
|
||||
if after_removal:
|
||||
assert exitcode == 1, error()
|
||||
else:
|
||||
assert exitcode == 0, error()
|
||||
|
||||
def wait_for_config_to_be_loaded(user=None):
|
||||
"""Wait for config to be loaded.
|
||||
"""
|
||||
if restart:
|
||||
with When("I close terminal to the node to be restarted"):
|
||||
bash.close()
|
||||
|
||||
with And("I stop ClickHouse to apply the config changes"):
|
||||
node.stop_clickhouse(safe=False)
|
||||
|
||||
with And("I get the current log size"):
|
||||
cmd = node.cluster.command(None,
|
||||
f"stat --format=%s {cluster.environ['CLICKHOUSE_TESTS_DIR']}/_instances/{node.name}/logs/clickhouse-server.log")
|
||||
logsize = cmd.output.split(" ")[0].strip()
|
||||
|
||||
with And("I start ClickHouse back up"):
|
||||
node.start_clickhouse(user=user, wait_healthy=wait_healthy)
|
||||
|
||||
with Then("I tail the log file from using previous log size as the offset"):
|
||||
bash.prompt = bash.__class__.prompt
|
||||
bash.open()
|
||||
bash.send(f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
|
||||
with Then("I wait for config reload message in the log file"):
|
||||
if restart:
|
||||
bash.expect(
|
||||
f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration",
|
||||
timeout=timeout)
|
||||
else:
|
||||
bash.expect(
|
||||
f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration",
|
||||
timeout=timeout)
|
||||
|
||||
try:
|
||||
with Given(f"{config.name}"):
|
||||
if settings.debug:
|
||||
with When("I output the content of the config"):
|
||||
debug(config.content)
|
||||
|
||||
with node.cluster.shell(node.name) as bash:
|
||||
bash.expect(bash.prompt)
|
||||
bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
# make sure tail process is launched and started to follow the file
|
||||
bash.expect("<==")
|
||||
bash.expect("\n")
|
||||
|
||||
with When("I add the config", description=config.path):
|
||||
command = f"cat <<HEREDOC > {config.path}\n{config.content}\nHEREDOC"
|
||||
node.command(command, steps=False, exitcode=0)
|
||||
|
||||
if check_preprocessed:
|
||||
with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"):
|
||||
check_preprocessed_config_is_updated()
|
||||
|
||||
with And("I wait for config to be reloaded"):
|
||||
wait_for_config_to_be_loaded(user=user)
|
||||
|
||||
yield
|
||||
finally:
|
||||
if not modify:
|
||||
with Finally(f"I remove {config.name} on {node.name}"):
|
||||
with node.cluster.shell(node.name) as bash:
|
||||
bash.expect(bash.prompt)
|
||||
bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
# make sure tail process is launched and started to follow the file
|
||||
bash.expect("<==")
|
||||
bash.expect("\n")
|
||||
|
||||
with By("removing the config file", description=config.path):
|
||||
node.command(f"rm -rf {config.path}", exitcode=0)
|
||||
|
||||
with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"):
|
||||
check_preprocessed_config_is_updated(after_removal=True)
|
||||
|
||||
with And("I wait for config to be reloaded"):
|
||||
wait_for_config_to_be_loaded()
|
||||
|
||||
|
||||
@TestStep(When)
|
||||
def copy(self, dest_node, src_path, dest_path, bash=None, binary=False, eof="EOF", src_node=None):
|
||||
"""Copy file from source to destination node.
|
||||
"""
|
||||
if binary:
|
||||
raise NotImplementedError("not yet implemented; need to use base64 encoding")
|
||||
|
||||
bash = self.context.cluster.bash(node=src_node)
|
||||
|
||||
cmd = bash(f"cat {src_path}")
|
||||
|
||||
assert cmd.exitcode == 0, error()
|
||||
contents = cmd.output
|
||||
|
||||
dest_node.command(f"cat << {eof} > {dest_path}\n{contents}\n{eof}")
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def add_user_to_group_on_node(self, node=None, group="clickhouse", username="clickhouse"):
|
||||
"""Add user {username} into group {group}.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
|
||||
node.command(f"usermode -g {group} {username}", exitcode=0)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def change_user_on_node(self, node=None, username="clickhouse"):
|
||||
"""Change user on node.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
node.command(f"su {username}", exitcode=0)
|
||||
yield
|
||||
finally:
|
||||
node.command("exit", exitcode=0)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def add_user_on_node(self, node=None, groupname=None, username="clickhouse"):
|
||||
"""Create user on node with group specifying.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
if groupname is None:
|
||||
node.command(f"useradd -s /bin/bash {username}", exitcode=0)
|
||||
else:
|
||||
node.command(f"useradd -g {groupname} -s /bin/bash {username}", exitcode=0)
|
||||
yield
|
||||
finally:
|
||||
node.command(f"deluser {username}", exitcode=0)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def add_group_on_node(self, node=None, groupname="clickhouse"):
|
||||
"""Create group on node
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
node.command(f"groupadd {groupname}", exitcode=0)
|
||||
yield
|
||||
finally:
|
||||
node.command(f"delgroup clickhouse")
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def create_file_on_node(self, path, content, node=None):
|
||||
"""Create file on node.
|
||||
|
||||
:param path: file path
|
||||
:param content: file content
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
with By(f"creating file {path}"):
|
||||
node.command(f"cat <<HEREDOC > {path}\n{content}\nHEREDOC", exitcode=0)
|
||||
yield path
|
||||
finally:
|
||||
with Finally(f"I remove {path}"):
|
||||
node.command(f"rm -rf {path}", exitcode=0)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def set_envs_on_node(self, envs, node=None):
|
||||
"""Set environment variables on node.
|
||||
|
||||
:param envs: dictionary of env variables key=value
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
with By("setting envs"):
|
||||
for key, value in envs.items():
|
||||
node.command(f"export {key}={value}", exitcode=0)
|
||||
yield
|
||||
finally:
|
||||
with Finally(f"I unset envs"):
|
||||
for key in envs:
|
||||
node.command(f"unset {key}", exitcode=0)
|
||||
|
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/integration-test
|
||||
init: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "tail -f /dev/null"
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,162 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: registry.gitlab.com/altinity-public/container-images/test/clickhouse-integration-test:21.12
|
||||
privileged: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,162 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -37,20 +37,28 @@ xfails = {
|
||||
RQ_SRS_007_LDAP_Authentication("1.0")
|
||||
)
|
||||
@XFails(xfails)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version, stress=None):
|
||||
"""ClickHouse integration with LDAP regression module.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"),
|
||||
}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
if parallel is not None:
|
||||
self.context.parallel = parallel
|
||||
|
||||
from platform import processor as current_cpu
|
||||
|
||||
folder_name = os.path.basename(current_dir())
|
||||
if current_cpu() == 'aarch64':
|
||||
env = f"{folder_name}_env_arm64"
|
||||
else:
|
||||
env = f"{folder_name}_env"
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "ldap_authentication_env")) as cluster:
|
||||
docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster:
|
||||
self.context.cluster = cluster
|
||||
|
||||
Scenario(run=load("ldap.authentication.tests.sanity", "scenario"))
|
||||
|
@ -2,7 +2,6 @@
|
||||
import random
|
||||
import time
|
||||
|
||||
from helpers.common import Pool
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
from ldap.authentication.tests.common import *
|
||||
|
@ -14,33 +14,7 @@ import testflows.settings as settings
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
def getuid():
|
||||
return str(uuid.uuid1()).replace('-', '_')
|
||||
|
||||
xml_with_utf8 = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||
|
||||
def xml_indent(elem, level=0, by=" "):
|
||||
i = "\n" + level * by
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + by
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
xml_indent(elem, level + 1)
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
||||
|
||||
def xml_append(root, tag, text):
|
||||
element = xmltree.Element(tag)
|
||||
element.text = text
|
||||
root.append(element)
|
||||
return element
|
||||
|
||||
Config = namedtuple("Config", "content path name uid preprocessed_name")
|
||||
from helpers.common import xml_indent, xml_with_utf8, xml_append, add_config, getuid, Config
|
||||
|
||||
ASCII_CHARS = string.ascii_lowercase + string.ascii_uppercase + string.digits
|
||||
|
||||
@ -78,115 +52,6 @@ def restart(node=None, safe=False, timeout=300):
|
||||
f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration",
|
||||
timeout=timeout)
|
||||
|
||||
def add_config(config, timeout=300, restart=False, modify=False):
|
||||
"""Add dynamic configuration file to ClickHouse.
|
||||
|
||||
:param node: node
|
||||
:param config: configuration file description
|
||||
:param timeout: timeout, default: 20 sec
|
||||
"""
|
||||
node = current().context.node
|
||||
cluster = current().context.cluster
|
||||
|
||||
def check_preprocessed_config_is_updated(after_removal=False):
|
||||
"""Check that preprocessed config is updated.
|
||||
"""
|
||||
started = time.time()
|
||||
command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}"
|
||||
|
||||
while time.time() - started < timeout:
|
||||
exitcode = node.command(command, steps=False).exitcode
|
||||
if after_removal:
|
||||
if exitcode == 1:
|
||||
break
|
||||
else:
|
||||
if exitcode == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
if settings.debug:
|
||||
node.command(f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name}")
|
||||
|
||||
if after_removal:
|
||||
assert exitcode == 1, error()
|
||||
else:
|
||||
assert exitcode == 0, error()
|
||||
|
||||
def wait_for_config_to_be_loaded():
|
||||
"""Wait for config to be loaded.
|
||||
"""
|
||||
if restart:
|
||||
with When("I close terminal to the node to be restarted"):
|
||||
bash.close()
|
||||
|
||||
with And("I stop ClickHouse to apply the config changes"):
|
||||
node.stop(safe=False)
|
||||
|
||||
with And("I get the current log size"):
|
||||
cmd = node.cluster.command(None,
|
||||
f"stat --format=%s {cluster.environ['CLICKHOUSE_TESTS_DIR']}/_instances/{node.name}/logs/clickhouse-server.log")
|
||||
logsize = cmd.output.split(" ")[0].strip()
|
||||
|
||||
with And("I start ClickHouse back up"):
|
||||
node.start()
|
||||
|
||||
with Then("I tail the log file from using previous log size as the offset"):
|
||||
bash.prompt = bash.__class__.prompt
|
||||
bash.open()
|
||||
bash.send(f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
|
||||
with Then("I wait for config reload message in the log file"):
|
||||
if restart:
|
||||
bash.expect(
|
||||
f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration",
|
||||
timeout=timeout)
|
||||
else:
|
||||
bash.expect(
|
||||
f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration",
|
||||
timeout=timeout)
|
||||
|
||||
try:
|
||||
with Given(f"{config.name}"):
|
||||
if settings.debug:
|
||||
with When("I output the content of the config"):
|
||||
debug(config.content)
|
||||
|
||||
with node.cluster.shell(node.name) as bash:
|
||||
bash.expect(bash.prompt)
|
||||
bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
# make sure tail process is launched and started to follow the file
|
||||
bash.expect("<==")
|
||||
bash.expect("\n")
|
||||
|
||||
with When("I add the config", description=config.path):
|
||||
command = f"cat <<HEREDOC > {config.path}\n{config.content}\nHEREDOC"
|
||||
node.command(command, steps=False, exitcode=0)
|
||||
|
||||
with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"):
|
||||
check_preprocessed_config_is_updated()
|
||||
|
||||
with And("I wait for config to be reloaded"):
|
||||
wait_for_config_to_be_loaded()
|
||||
yield
|
||||
finally:
|
||||
if not modify:
|
||||
with Finally(f"I remove {config.name}"):
|
||||
with node.cluster.shell(node.name) as bash:
|
||||
bash.expect(bash.prompt)
|
||||
bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
# make sure tail process is launched and started to follow the file
|
||||
bash.expect("<==")
|
||||
bash.expect("\n")
|
||||
|
||||
with By("removing the config file", description=config.path):
|
||||
node.command(f"rm -rf {config.path}", exitcode=0)
|
||||
|
||||
with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"):
|
||||
check_preprocessed_config_is_updated(after_removal=True)
|
||||
|
||||
with And("I wait for config to be reloaded"):
|
||||
wait_for_config_to_be_loaded()
|
||||
|
||||
def create_ldap_servers_config_content(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml"):
|
||||
"""Create LDAP servers configuration content.
|
||||
"""
|
||||
@ -210,19 +75,19 @@ def create_ldap_servers_config_content(servers, config_d_dir="/etc/clickhouse-se
|
||||
return Config(content, path, name, uid, "config.xml")
|
||||
|
||||
@contextmanager
|
||||
def modify_config(config, restart=False):
|
||||
def modify_config(config, restart=False, node=None):
|
||||
"""Apply updated configuration file.
|
||||
"""
|
||||
return add_config(config, restart=restart, modify=True)
|
||||
return add_config(config, restart=restart, modify=True, node=node)
|
||||
|
||||
@contextmanager
|
||||
def ldap_servers(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml",
|
||||
timeout=300, restart=False, config=None):
|
||||
timeout=300, restart=False, config=None, node=None):
|
||||
"""Add LDAP servers configuration.
|
||||
"""
|
||||
if config is None:
|
||||
config = create_ldap_servers_config_content(servers, config_d_dir, config_file)
|
||||
return add_config(config, restart=restart)
|
||||
return add_config(config, restart=restart, node=node)
|
||||
|
||||
def create_ldap_users_config_content(*users, config_d_dir="/etc/clickhouse-server/users.d", config_file="ldap_users.xml"):
|
||||
"""Create LDAP users configuration file content.
|
||||
@ -247,11 +112,12 @@ def create_ldap_users_config_content(*users, config_d_dir="/etc/clickhouse-serve
|
||||
|
||||
return Config(content, path, name, uid, "users.xml")
|
||||
|
||||
def add_users_identified_with_ldap(*users):
|
||||
def add_users_identified_with_ldap(*users, node=None):
|
||||
"""Add one or more users that are identified via
|
||||
an ldap server using RBAC.
|
||||
"""
|
||||
node = current().context.node
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
try:
|
||||
with Given("I create users"):
|
||||
for user in users:
|
||||
@ -265,17 +131,20 @@ def add_users_identified_with_ldap(*users):
|
||||
|
||||
@contextmanager
|
||||
def ldap_authenticated_users(*users, config_d_dir="/etc/clickhouse-server/users.d",
|
||||
config_file=None, timeout=300, restart=True, config=None, rbac=False):
|
||||
config_file=None, timeout=300, restart=True, config=None, rbac=False, node=None):
|
||||
"""Add LDAP authenticated users.
|
||||
"""
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
|
||||
if rbac:
|
||||
return add_users_identified_with_ldap(*users)
|
||||
return add_users_identified_with_ldap(*users, node=node)
|
||||
else:
|
||||
if config_file is None:
|
||||
config_file = f"ldap_users_{getuid()}.xml"
|
||||
if config is None:
|
||||
config = create_ldap_users_config_content(*users, config_d_dir=config_d_dir, config_file=config_file)
|
||||
return add_config(config, timeout=timeout, restart=restart)
|
||||
return add_config(config, timeout=timeout, restart=restart, node=node)
|
||||
|
||||
def invalid_server_config(servers, message=None, tail=30, timeout=300):
|
||||
"""Check that ClickHouse errors when trying to load invalid LDAP servers configuration file.
|
||||
|
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/integration-test
|
||||
init: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "tail -f /dev/null"
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,162 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: registry.gitlab.com/altinity-public/container-images/test/clickhouse-integration-test:21.12
|
||||
privileged: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,162 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -8,6 +8,7 @@ append_path(sys.path, "..", "..")
|
||||
from helpers.cluster import Cluster
|
||||
from helpers.argparser import argparser
|
||||
from ldap.external_user_directory.requirements import *
|
||||
from helpers.common import check_clickhouse_version
|
||||
|
||||
# Cross-outs of known fails
|
||||
xfails = {
|
||||
@ -27,6 +28,11 @@ xfails = {
|
||||
[(Fail, "can't get it to work")]
|
||||
}
|
||||
|
||||
ffails ={
|
||||
"user authentications/verification cooldown performance/:":
|
||||
(Skip, "causes timeout on 21.8", (lambda test: check_clickhouse_version(">=21.8")(test) and check_clickhouse_version("<21.9")(test)))
|
||||
}
|
||||
|
||||
@TestFeature
|
||||
@Name("external user directory")
|
||||
@ArgumentParser(argparser)
|
||||
@ -37,20 +43,29 @@ xfails = {
|
||||
RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication("1.0")
|
||||
)
|
||||
@XFails(xfails)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||
@FFails(ffails)
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version, stress=None):
|
||||
"""ClickHouse LDAP external user directory regression module.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"),
|
||||
}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
if parallel is not None:
|
||||
self.context.parallel = parallel
|
||||
|
||||
from platform import processor as current_cpu
|
||||
|
||||
folder_name = os.path.basename(current_dir())
|
||||
if current_cpu() == 'aarch64':
|
||||
env = f"{folder_name}_env_arm64"
|
||||
else:
|
||||
env = f"{folder_name}_env"
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "ldap_external_user_directory_env")) as cluster:
|
||||
docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster:
|
||||
self.context.cluster = cluster
|
||||
|
||||
Scenario(run=load("ldap.authentication.tests.sanity", "scenario"))
|
||||
|
@ -1,7 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import random
|
||||
|
||||
from helpers.common import Pool
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
|
@ -27,8 +27,9 @@ def table(name, create_statement, on_cluster=False):
|
||||
node.query(f"DROP TABLE IF EXISTS {name}")
|
||||
|
||||
@contextmanager
|
||||
def rbac_users(*users):
|
||||
node = current().context.node
|
||||
def rbac_users(*users, node=None):
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
try:
|
||||
with Given("I have local users"):
|
||||
for user in users:
|
||||
@ -42,8 +43,9 @@ def rbac_users(*users):
|
||||
node.query(f"DROP USER IF EXISTS {user['cn']}")
|
||||
|
||||
@contextmanager
|
||||
def rbac_roles(*roles):
|
||||
node = current().context.node
|
||||
def rbac_roles(*roles, node=None):
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
try:
|
||||
with Given("I have roles"):
|
||||
for role in roles:
|
||||
|
@ -1,6 +1,5 @@
|
||||
import random
|
||||
|
||||
from helpers.common import Pool
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
|
@ -4,16 +4,17 @@ from testflows.core import *
|
||||
|
||||
append_path(sys.path, "..")
|
||||
|
||||
from helpers.common import Pool, join
|
||||
from helpers.argparser import argparser
|
||||
|
||||
@TestModule
|
||||
@Name("ldap")
|
||||
@ArgumentParser(argparser)
|
||||
def regression(self, local, clickhouse_binary_path, parallel=None, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version, stress=None):
|
||||
"""ClickHouse LDAP integration regression module.
|
||||
"""
|
||||
args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path}
|
||||
args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path, "clickhouse_version": clickhouse_version}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
|
@ -2,7 +2,8 @@
|
||||
<openSSL>
|
||||
<server>
|
||||
<certificateFile>/etc/clickhouse-server/ssl/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/ssl/server.key</privateKeyFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/ssl/server.key</privateKeyFile>
|
||||
<dhParamsFile>/etc/clickhouse-server/ssl/dhparam.pem</dhParamsFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
</server>
|
||||
|
@ -0,0 +1,4 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
</users>
|
||||
</clickhouse>
|
@ -8,13 +8,22 @@ append_path(sys.path, "..", "..")
|
||||
from helpers.cluster import Cluster
|
||||
from helpers.argparser import argparser
|
||||
from ldap.role_mapping.requirements import *
|
||||
from helpers.common import check_clickhouse_version
|
||||
|
||||
# Cross-outs of known fails
|
||||
xfails = {
|
||||
"mapping/roles removed and added in parallel":
|
||||
[(Fail, "known bug")],
|
||||
"user dn detection/mapping/roles removed and added in parallel":
|
||||
[(Fail, "known bug")]
|
||||
"mapping/roles removed and added in parallel":
|
||||
[(Fail, "known bug")],
|
||||
"user dn detection/mapping/roles removed and added in parallel":
|
||||
[(Fail, "known bug")],
|
||||
"cluster secret/external user directory/:/:/cluster with secret/ldap user/:mapped True/select using mapped role/with privilege on source and distributed":
|
||||
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/34130")]
|
||||
}
|
||||
|
||||
# Force results without running the test
|
||||
ffails={
|
||||
"cluster secret":
|
||||
(Skip, "feature available on 20.10+", check_clickhouse_version("<20.10"))
|
||||
}
|
||||
|
||||
@TestFeature
|
||||
@ -27,26 +36,36 @@ xfails = {
|
||||
RQ_SRS_014_LDAP_RoleMapping("1.0")
|
||||
)
|
||||
@XFails(xfails)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||
@FFails(ffails)
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version, stress=None):
|
||||
"""ClickHouse LDAP role mapping regression module.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"),
|
||||
}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
if parallel is not None:
|
||||
self.context.parallel = parallel
|
||||
|
||||
from platform import processor as current_cpu
|
||||
|
||||
folder_name = os.path.basename(current_dir())
|
||||
if current_cpu() == 'aarch64':
|
||||
env = f"{folder_name}_env_arm64"
|
||||
else:
|
||||
env = f"{folder_name}_env"
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "ldap_role_mapping_env")) as cluster:
|
||||
docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster:
|
||||
self.context.cluster = cluster
|
||||
|
||||
Scenario(run=load("ldap.authentication.tests.sanity", "scenario"), name="ldap sanity")
|
||||
Feature(run=load("ldap.role_mapping.tests.server_config", "feature"))
|
||||
Feature(run=load("ldap.role_mapping.tests.mapping", "feature"))
|
||||
#Feature(run=load("ldap.role_mapping.tests.user_dn_detection", "feature"))
|
||||
Feature(run=load("ldap.role_mapping.tests.user_dn_detection", "feature"))
|
||||
Feature(run=load("ldap.role_mapping.tests.cluster_secret", "feature"))
|
||||
|
||||
if main():
|
||||
regression()
|
||||
|
@ -76,6 +76,8 @@
|
||||
* 4.8.8.3 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithUTF8Characters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithutf8characters)
|
||||
* 4.8.8.4 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialXMLCharacters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithspecialxmlcharacters)
|
||||
* 4.8.8.5 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialRegexCharacters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithspecialregexcharacters)
|
||||
* 4.9 [Cluster With And Without Secret](#cluster-with-and-without-secret)
|
||||
* 4.9.8.1 [RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable](#rqsrs-014ldapclusterwithandwithoutsecretdistributedtable)
|
||||
* 5 [References](#references)
|
||||
|
||||
## Revision History
|
||||
@ -548,6 +550,67 @@ version: 1.0
|
||||
[ClickHouse] SHALL support regex special characters as the value of the `<prefix>` parameter in
|
||||
the `<user directories><ldap><role_mapping>` section of the `config.xml`.
|
||||
|
||||
### Cluster With And Without Secret
|
||||
|
||||
##### RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support propagating query user roles and their corresponding privileges
|
||||
when using `Distributed` table to the remote servers for the users that are authenticated
|
||||
using LDAP either via external user directory or defined in `users.xml` when
|
||||
cluster is configured with and without `<secret>`.
|
||||
|
||||
For example,
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<secret>qwerty123</secret>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host1</host>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host2</host>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host1</host>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host2</host>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
* **Access Control and Account Management**: https://clickhouse.com/docs/en/operations/access-rights/
|
||||
|
@ -1,6 +1,6 @@
|
||||
# These requirements were auto generated
|
||||
# from software requirements specification (SRS)
|
||||
# document by TestFlows v1.6.210505.1133630.
|
||||
# document by TestFlows v1.7.220210.1155232.
|
||||
# Do not edit by hand but re-generate instead
|
||||
# using 'tfs requirements generate' command.
|
||||
from testflows.core import Specification
|
||||
@ -913,6 +913,75 @@ RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithS
|
||||
level=4,
|
||||
num='4.8.8.5')
|
||||
|
||||
RQ_SRS_014_LDAP_ClusterWithAndWithoutSecret_DistributedTable = Requirement(
|
||||
name='RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable',
|
||||
version='1.0',
|
||||
priority=None,
|
||||
group=None,
|
||||
type=None,
|
||||
uid=None,
|
||||
description=(
|
||||
'[ClickHouse] SHALL support propagating query user roles and their corresponding privileges\n'
|
||||
'when using `Distributed` table to the remote servers for the users that are authenticated\n'
|
||||
'using LDAP either via external user directory or defined in `users.xml` when\n'
|
||||
'cluster is configured with and without `<secret>`.\n'
|
||||
'\n'
|
||||
'For example,\n'
|
||||
'\n'
|
||||
'```xml\n'
|
||||
'<clickhouse>\n'
|
||||
' <remote_servers>\n'
|
||||
' <cluster>\n'
|
||||
' <secret>qwerty123</secret>\n'
|
||||
' <shard>\n'
|
||||
' <internal_replication>true</internal_replication>\n'
|
||||
' <replica>\n'
|
||||
' <default_database>dwh</default_database>\n'
|
||||
' <host>host1</host>\n'
|
||||
' </replica>\n'
|
||||
' </shard>\n'
|
||||
' <shard>\n'
|
||||
' <internal_replication>true</internal_replication>\n'
|
||||
' <replica>\n'
|
||||
' <default_database>dwh</default_database>\n'
|
||||
' <host>host2</host>\n'
|
||||
' </replica>\n'
|
||||
' </shard>\n'
|
||||
' </cluster>\n'
|
||||
' </remote_servers>\n'
|
||||
'</clickhouse>\n'
|
||||
'```\n'
|
||||
'\n'
|
||||
'or \n'
|
||||
'\n'
|
||||
'```xml\n'
|
||||
'<clickhouse>\n'
|
||||
' <remote_servers>\n'
|
||||
' <cluster>\n'
|
||||
' <shard>\n'
|
||||
' <internal_replication>true</internal_replication>\n'
|
||||
' <replica>\n'
|
||||
' <default_database>dwh</default_database>\n'
|
||||
' <host>host1</host>\n'
|
||||
' </replica>\n'
|
||||
' </shard>\n'
|
||||
' <shard>\n'
|
||||
' <internal_replication>true</internal_replication>\n'
|
||||
' <replica>\n'
|
||||
' <default_database>dwh</default_database>\n'
|
||||
' <host>host2</host>\n'
|
||||
' </replica>\n'
|
||||
' </shard>\n'
|
||||
' </cluster>\n'
|
||||
' </remote_servers>\n'
|
||||
'</clickhouse>\n'
|
||||
'```\n'
|
||||
'\n'
|
||||
),
|
||||
link=None,
|
||||
level=4,
|
||||
num='4.9.8.1')
|
||||
|
||||
SRS_014_ClickHouse_LDAP_Role_Mapping = Specification(
|
||||
name='SRS-014 ClickHouse LDAP Role Mapping',
|
||||
description=None,
|
||||
@ -1003,6 +1072,8 @@ SRS_014_ClickHouse_LDAP_Role_Mapping = Specification(
|
||||
Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithUTF8Characters', level=4, num='4.8.8.3'),
|
||||
Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialXMLCharacters', level=4, num='4.8.8.4'),
|
||||
Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialRegexCharacters', level=4, num='4.8.8.5'),
|
||||
Heading(name='Cluster With And Without Secret', level=2, num='4.9'),
|
||||
Heading(name='RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable', level=4, num='4.9.8.1'),
|
||||
Heading(name='References', level=1, num='5'),
|
||||
),
|
||||
requirements=(
|
||||
@ -1056,6 +1127,7 @@ SRS_014_ClickHouse_LDAP_Role_Mapping = Specification(
|
||||
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithUTF8Characters,
|
||||
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialXMLCharacters,
|
||||
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialRegexCharacters,
|
||||
RQ_SRS_014_LDAP_ClusterWithAndWithoutSecret_DistributedTable,
|
||||
),
|
||||
content='''
|
||||
# SRS-014 ClickHouse LDAP Role Mapping
|
||||
@ -1136,6 +1208,8 @@ SRS_014_ClickHouse_LDAP_Role_Mapping = Specification(
|
||||
* 4.8.8.3 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithUTF8Characters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithutf8characters)
|
||||
* 4.8.8.4 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialXMLCharacters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithspecialxmlcharacters)
|
||||
* 4.8.8.5 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialRegexCharacters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithspecialregexcharacters)
|
||||
* 4.9 [Cluster With And Without Secret](#cluster-with-and-without-secret)
|
||||
* 4.9.8.1 [RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable](#rqsrs-014ldapclusterwithandwithoutsecretdistributedtable)
|
||||
* 5 [References](#references)
|
||||
|
||||
## Revision History
|
||||
@ -1608,6 +1682,67 @@ version: 1.0
|
||||
[ClickHouse] SHALL support regex special characters as the value of the `<prefix>` parameter in
|
||||
the `<user directories><ldap><role_mapping>` section of the `config.xml`.
|
||||
|
||||
### Cluster With And Without Secret
|
||||
|
||||
##### RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support propagating query user roles and their corresponding privileges
|
||||
when using `Distributed` table to the remote servers for the users that are authenticated
|
||||
using LDAP either via external user directory or defined in `users.xml` when
|
||||
cluster is configured with and without `<secret>`.
|
||||
|
||||
For example,
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<secret>qwerty123</secret>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host1</host>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host2</host>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host1</host>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host2</host>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
* **Access Control and Account Management**: https://clickhouse.com/docs/en/operations/access-rights/
|
||||
|
@ -0,0 +1,37 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/integration-test
|
||||
init: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/common.xml:/etc/clickhouse-server/users.d/common.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/common.xml:/etc/clickhouse-server/config.d/common.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/logs.xml:/etc/clickhouse-server/config.d/logs.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/ports.xml:/etc/clickhouse-server/config.d/ports.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/remote.xml:/etc/clickhouse-server/config.d/remote.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/ssl.xml:/etc/clickhouse-server/config.d/ssl.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/storage.xml:/etc/clickhouse-server/config.d/storage.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/zookeeper.xml:/etc/clickhouse-server/config.d/zookeeper.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/dhparam.pem:/etc/clickhouse-server/ssl/dhparam.pem"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/server.crt:/etc/clickhouse-server/ssl/server.crt"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/server.key:/etc/clickhouse-server/ssl/server.key"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "tail -f /dev/null"
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,159 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,37 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: registry.gitlab.com/altinity-public/container-images/test/clickhouse-integration-test:21.12
|
||||
init: true
|
||||
privileged: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/common.xml:/etc/clickhouse-server/users.d/common.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/logs.xml:/etc/clickhouse-server/config.d/logs.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/ports.xml:/etc/clickhouse-server/config.d/ports.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/remote.xml:/etc/clickhouse-server/config.d/remote.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/ssl.xml:/etc/clickhouse-server/config.d/ssl.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/storage.xml:/etc/clickhouse-server/config.d/storage.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/zookeeper.xml:/etc/clickhouse-server/config.d/zookeeper.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/dhparam.pem:/etc/clickhouse-server/ssl/dhparam.pem"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/server.crt:/etc/clickhouse-server/ssl/server.crt"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/server.key:/etc/clickhouse-server/ssl/server.key"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "tail -f /dev/null"
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,159 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
442
tests/testflows/ldap/role_mapping/tests/cluster_secret.py
Normal file
442
tests/testflows/ldap/role_mapping/tests/cluster_secret.py
Normal file
@ -0,0 +1,442 @@
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
from ldap.role_mapping.requirements import *
|
||||
from ldap.role_mapping.tests.common import *
|
||||
|
||||
|
||||
def cluster_node(name):
|
||||
"""Get cluster node instance.
|
||||
"""
|
||||
return current().context.cluster.node(name)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def add_sharded_cluster(self, node, name="sharded_cluster_with_secret", with_secret=True, restart=False):
|
||||
"""Add configuration of sharded cluster that uses secret.
|
||||
"""
|
||||
entries = {
|
||||
"remote_servers": {
|
||||
name: []
|
||||
}
|
||||
}
|
||||
|
||||
if with_secret:
|
||||
entries["remote_servers"][name].append(
|
||||
{
|
||||
"secret": "qwerty123"
|
||||
}
|
||||
)
|
||||
|
||||
for node_name in self.context.cluster.nodes["clickhouse"]:
|
||||
entries["remote_servers"][name].append(
|
||||
{
|
||||
"shard": {
|
||||
"replica": {
|
||||
"host": node_name,
|
||||
"port": "9000"
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
config = create_xml_config_content(entries=entries, config_file=f"{name}.xml")
|
||||
return add_config(config, node=node, restart=restart)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def create_table(self, on_cluster, name=None, node=None):
|
||||
"""Create table on cluster.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
if name is None:
|
||||
name = getuid()
|
||||
|
||||
try:
|
||||
node.query(
|
||||
f"CREATE TABLE {name} ON CLUSTER {on_cluster} (d Date, a String, b UInt8, x String, y Int8) "
|
||||
f"ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') "
|
||||
"PARTITION BY y ORDER BY (d, b)"
|
||||
)
|
||||
yield name
|
||||
finally:
|
||||
with Finally(f"I drop table {name} on cluster {on_cluster} on {node.name}"):
|
||||
node.query(f"DROP TABLE IF EXISTS {name} ON CLUSTER {on_cluster} SYNC")
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def create_distributed_table(self, on_cluster, over, name=None, node=None):
|
||||
"""Create distributed table on cluster over some underlying table.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
if name is None:
|
||||
name = getuid()
|
||||
|
||||
try:
|
||||
node.query(f"CREATE TABLE {name} ON CLUSTER {on_cluster} AS {over} "
|
||||
f"ENGINE = Distributed({on_cluster}, default, {over}, rand())")
|
||||
yield name
|
||||
finally:
|
||||
with Finally(f"I drop table {name} on cluster {on_cluster} on {node.name}"):
|
||||
node.query(f"DROP TABLE IF EXISTS {name} ON CLUSTER {on_cluster} SYNC")
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def grant_select(self, cluster, privilege, role_or_user, node=None):
|
||||
"""Grant select privilege on a table on a given cluster
|
||||
to a role or a user.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
|
||||
try:
|
||||
node.query(f"GRANT ON CLUSTER {cluster} {privilege} TO {role_or_user}")
|
||||
yield
|
||||
finally:
|
||||
with Finally(f"I remove privilege '{privilege}' on {cluster} from {role_or_user}"):
|
||||
node.query(f"REVOKE ON CLUSTER {cluster} {privilege} FROM {role_or_user}")
|
||||
|
||||
|
||||
@TestScenario
|
||||
def select_using_mapped_role(self, cluster, role_name, role_mapped, user):
|
||||
"""Check accessing normal and distributed table using
|
||||
a user and the specified role that is either granted
|
||||
rights to access the tables or not and is or is not assigned to the user
|
||||
from all cluster nodes.
|
||||
"""
|
||||
# default cluster node
|
||||
node = cluster_node("clickhouse1")
|
||||
|
||||
query_settings=[
|
||||
("user", user["username"]), ("password", user["password"])
|
||||
]
|
||||
|
||||
with Given(f"I create base table on cluster {cluster}"):
|
||||
src_table = create_table(on_cluster=cluster, node=node)
|
||||
|
||||
with And(f"I create distristibuted table over base table on cluster {cluster}"):
|
||||
dist_table = create_distributed_table(on_cluster=cluster, over=src_table, node=node)
|
||||
|
||||
with And("I check that grants for the user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
for attempt in retries(timeout=10):
|
||||
with attempt:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
r = self.context.cluster.node(name).query(f"SHOW GRANTS", settings=query_settings)
|
||||
if role_mapped:
|
||||
with Then("check that role is mapped"):
|
||||
assert role_name in r.output, error()
|
||||
|
||||
with Example("no privilege on source table"):
|
||||
with When("user tries to read from the source table without privilege"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
self.context.cluster.node(name).query(f"SELECT * FROM {src_table}", settings=query_settings,
|
||||
exitcode=241, message=f"DB::Exception:")
|
||||
|
||||
with Example("with privilege on source table"):
|
||||
with Given("I grant SELECT on source table to the mapped role"):
|
||||
grant_select(cluster=cluster, privilege=f"SELECT ON {src_table}", role_or_user=role_name, node=node)
|
||||
|
||||
with Then("user should be able to read from the source table"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
self.context.cluster.node(name).query(f"SELECT * FROM {src_table}", settings=query_settings,
|
||||
exitcode=0 if role_mapped else 241, message="" if role_mapped else "DB::Exception:")
|
||||
|
||||
with Example("with privilege only on distributed table"):
|
||||
with Given("I grant SELECT on distributed table to the mapped role"):
|
||||
grant_select(cluster=cluster, privilege=f"SELECT ON {dist_table}", role_or_user=role_name, node=node)
|
||||
|
||||
with Then("user should still not be able to read from distributed table"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
self.context.cluster.node(name).query(f"SELECT * FROM {dist_table}", settings=query_settings,
|
||||
exitcode=241, message=f"DB::Exception:")
|
||||
|
||||
with Example("with privilege only on source but not on distributed table"):
|
||||
with Given("I grant SELECT on source table to the mapped role"):
|
||||
grant_select(cluster=cluster, privilege=f"SELECT ON {src_table}", role_or_user=role_name, node=node)
|
||||
|
||||
with Then("user should still not be able to read from distributed table"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
self.context.cluster.node(name).query(f"SELECT * FROM {dist_table}", settings=query_settings,
|
||||
exitcode=241, message=f"DB::Exception:")
|
||||
|
||||
with Example("with privilege on source and distributed"):
|
||||
with Given("I grant SELECT on source table to the mapped role"):
|
||||
grant_select(cluster=cluster, privilege=f"SELECT ON {src_table}", role_or_user=role_name, node=node)
|
||||
|
||||
with And("I grant SELECT on distributed table to the mapped role"):
|
||||
grant_select(cluster=cluster, privilege=f"SELECT ON {dist_table}", role_or_user=role_name, node=node)
|
||||
|
||||
with Then("user should be able to read from the distributed table"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
self.context.cluster.node(name).query(f"SELECT * FROM {dist_table}", settings=query_settings,
|
||||
exitcode=0 if role_mapped else 241, message="" if role_mapped else "DB::Exception:")
|
||||
|
||||
|
||||
@TestFeature
|
||||
def execute_tests(self, role_name, role_mapped, ldap_user, local_user):
|
||||
"""Execute all scenarios on cluster with or without secret
|
||||
for LDAP and local users, using a role that might be
|
||||
mapped or not.
|
||||
"""
|
||||
for cluster_type in ["with secret", "without secret"]:
|
||||
with Feature("cluster " + cluster_type):
|
||||
for user in [ldap_user, local_user]:
|
||||
with Feature(user["type"]):
|
||||
with Feature(f"role {role_name} mapped {role_mapped}"):
|
||||
if role_mapped and user["type"] == "local user":
|
||||
with Given(f"I grant role {role_name} to local RBAC user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
cluster_node(name).query(f"GRANT {role_name} TO {local_user['username']}")
|
||||
|
||||
for scenario in ordered(loads(current_module(), Scenario)):
|
||||
scenario(cluster="sharded_cluster_" + cluster_type.replace(" ", "_"),
|
||||
role_name=role_name, role_mapped=role_mapped, user=user)
|
||||
|
||||
|
||||
@TestOutline(Feature)
|
||||
def outline_using_external_user_directory(self, ldap_servers, mapping, ldap_roles_or_groups, rbac_roles, mapped_roles):
|
||||
"""Check using simple and distributed table access when using
|
||||
LDAP external user directory or LDAP authenticated existing RBAC users
|
||||
with and without cluster secret.
|
||||
|
||||
Where mapping can be one of the following:
|
||||
'static' or 'dynamic' or 'dynamic and static'
|
||||
"""
|
||||
ldap_user = {
|
||||
"type": "ldap user",
|
||||
"server": "openldap1",
|
||||
"username": "user1",
|
||||
"password": "user1",
|
||||
"dn": "cn=user1,ou=users,dc=company,dc=com",
|
||||
}
|
||||
|
||||
local_user = {
|
||||
"type": "local user",
|
||||
"username": "local_user1",
|
||||
"password": "local_user1"
|
||||
}
|
||||
|
||||
role_mappings = [
|
||||
{
|
||||
"base_dn": "ou=groups,dc=company,dc=com",
|
||||
"attribute": "cn",
|
||||
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
|
||||
"prefix": "clickhouse_"
|
||||
}
|
||||
]
|
||||
|
||||
if mapping in ["dynamic", "dynamic and static"]:
|
||||
with Given("I add LDAP groups"):
|
||||
for name in ldap_servers:
|
||||
for group_name in ldap_roles_or_groups:
|
||||
with By(f"adding {group_name}"):
|
||||
ldap_groups = add_ldap_groups(groups=({"cn": group_name},), node=cluster_node(name))
|
||||
|
||||
with And("I add LDAP user to the group"):
|
||||
add_user_to_group_in_ldap(user=ldap_user, group=ldap_groups[0], node=cluster_node(name))
|
||||
|
||||
with Given(f"I add LDAP external user directory configuration with {mapping} role mapping"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
if mapping == "dynamic":
|
||||
By(f"on node {name}", test=add_ldap_external_user_directory, parallel=True)(
|
||||
server="openldap1", role_mappings=role_mappings,
|
||||
restart=True, node=cluster_node(name))
|
||||
elif mapping == "dynamic and static":
|
||||
By(f"on node {name}", test=add_ldap_external_user_directory, parallel=True)(
|
||||
server="openldap1", role_mappings=role_mappings,
|
||||
roles=ldap_roles_or_groups,
|
||||
restart=True, node=cluster_node(name))
|
||||
else:
|
||||
By(f"on node {name}", test=add_ldap_external_user_directory, parallel=True)(
|
||||
server="openldap1", roles=ldap_roles_or_groups,
|
||||
restart=True, node=cluster_node(name))
|
||||
|
||||
with And("I add local RBAC user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
add_rbac_users(users=[local_user], node=cluster_node(name))
|
||||
|
||||
with And("I add RBAC roles on cluster"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
add_rbac_roles(roles=rbac_roles, node=cluster_node(name))
|
||||
|
||||
for role_name in rbac_roles:
|
||||
execute_tests(role_name=role_name, role_mapped=(role_name in mapped_roles) ,ldap_user=ldap_user, local_user=local_user)
|
||||
|
||||
|
||||
@TestFeature
|
||||
def using_authenticated_users(self, ldap_servers):
|
||||
"""Check using simple and distributed table access when using
|
||||
LDAP authenticated existing users with and without cluster secret.
|
||||
"""
|
||||
role_name = f"role_{getuid()}"
|
||||
|
||||
ldap_user = {
|
||||
"type": "ldap authenticated user",
|
||||
"cn": "myuser",
|
||||
"username": "myuser",
|
||||
"userpassword": "myuser",
|
||||
"password": "myuser",
|
||||
"server": "openldap1"
|
||||
}
|
||||
|
||||
local_user = {
|
||||
"type": "local user",
|
||||
"username": "local_user2",
|
||||
"password": "local_user2"
|
||||
}
|
||||
|
||||
with Given("I add LDAP user"):
|
||||
add_user = {
|
||||
"cn": ldap_user["cn"],
|
||||
"userpassword": ldap_user["userpassword"],
|
||||
}
|
||||
for name in ldap_servers:
|
||||
add_ldap_users(users=[add_user], node=cluster_node(name))
|
||||
|
||||
with And("I add LDAP authenticated users configuration"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
By(f"on node {name}", test=add_ldap_authenticated_users, parallel=True)(
|
||||
users=[ldap_user], rbac=True, node=cluster_node(name))
|
||||
|
||||
with And("I add local RBAC user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
add_rbac_users(users=[local_user], node=cluster_node(name))
|
||||
|
||||
with And("I add RBAC role on cluster that user will use"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
add_rbac_roles(roles=(f"{role_name}",), node=cluster_node(name))
|
||||
|
||||
with And("I grant role to LDAP authenticated user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
cluster_node(name).query(f"GRANT {role_name} TO {ldap_user['username']}")
|
||||
|
||||
with And("I grant role to local RBAC user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
cluster_node(name).query(f"GRANT {role_name} TO {local_user['username']}")
|
||||
|
||||
execute_tests(role_name=role_name, role_mapped=role_name, ldap_user=ldap_user, local_user=local_user)
|
||||
|
||||
|
||||
@TestFeature
|
||||
def using_external_user_directory(self, ldap_servers):
|
||||
"""Check using LDAP external user directory with different
|
||||
role mapping mode and different cases of role existens.
|
||||
"""
|
||||
uid = getuid()
|
||||
|
||||
for mapping in ["dynamic", "static", "dynamic and static"]:
|
||||
with Example(f"{mapping}"):
|
||||
with Example("all mapped roles exist"):
|
||||
if mapping == "dynamic":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}"]
|
||||
elif mapping == "dynamic and static":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}", f"role2_{uid}", f"role3_{uid}"]
|
||||
else:
|
||||
ldap_roles_or_groups = [f"role0_{uid}", f"role1_{uid}", f"role2_{uid}", f"role3_{uid}"]
|
||||
|
||||
rbac_roles = [f"role0_{uid}", f"role1_{uid}"]
|
||||
mapped_roles = [f"role0_{uid}", f"role1_{uid}"]
|
||||
|
||||
outline_using_external_user_directory(ldap_servers=ldap_servers,
|
||||
mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups,
|
||||
rbac_roles=rbac_roles, mapped_roles=mapped_roles)
|
||||
|
||||
with Example("some mapped roles exist"):
|
||||
if mapping == "dynamic":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}"]
|
||||
elif mapping == "dynamic and static":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}", f"role2_{uid}", f"role3_{uid}"]
|
||||
else:
|
||||
ldap_roles_or_groups = [f"role0_{uid}", f"role1_{uid}"]
|
||||
|
||||
rbac_roles = [f"role0_{uid}", f"role_not_mapped_{uid}", f"role2_{uid}"]
|
||||
|
||||
if mapping == "dynamic and static":
|
||||
mapped_roles = [f"role0_{uid}", f"role2_{uid}"]
|
||||
else:
|
||||
mapped_roles = [f"role0_{uid}"]
|
||||
|
||||
outline_using_external_user_directory(ldap_servers=ldap_servers,
|
||||
mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups,
|
||||
rbac_roles=rbac_roles, mapped_roles=mapped_roles)
|
||||
|
||||
with Example("no mapped roles exist"):
|
||||
if mapping == "dynamic":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}"]
|
||||
elif mapping == "dynamic and static":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}", f"role2_{uid}", f"role3_{uid}"]
|
||||
else:
|
||||
ldap_roles_or_groups = [f"role0_{uid}", f"role1_{uid}"]
|
||||
|
||||
rbac_roles = [f"role_not_mapped0_{uid}", f"role_not_mapped1_{uid}"]
|
||||
mapped_roles = []
|
||||
|
||||
outline_using_external_user_directory(ldap_servers=ldap_servers,
|
||||
mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups,
|
||||
rbac_roles=rbac_roles, mapped_roles=mapped_roles)
|
||||
|
||||
with Example("empty roles"):
|
||||
ldap_roles_or_groups = []
|
||||
rbac_roles = [f"role0_{uid}", f"role1_{uid}"]
|
||||
mapped_roles = []
|
||||
|
||||
outline_using_external_user_directory(ldap_servers=ldap_servers,
|
||||
mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups,
|
||||
rbac_roles=rbac_roles, mapped_roles=mapped_roles)
|
||||
|
||||
|
||||
@TestFeature
|
||||
@Name("cluster secret")
|
||||
@Requirements(
|
||||
RQ_SRS_014_LDAP_ClusterWithAndWithoutSecret_DistributedTable("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check using Distributed table when cluster is configured with and without secret
|
||||
using users authenticated via LDAP either through external user directory
|
||||
or defined using RBAC with LDAP server authentication.
|
||||
"""
|
||||
ldap_servers = {
|
||||
"openldap1": {
|
||||
"host": "openldap1",
|
||||
"port": "389",
|
||||
"enable_tls": "no",
|
||||
"bind_dn": "cn={user_name},ou=users,dc=company,dc=com"
|
||||
},
|
||||
}
|
||||
|
||||
with Given("I fix LDAP access permissions"):
|
||||
for name in ldap_servers:
|
||||
fix_ldap_permissions(node=cluster_node(name))
|
||||
|
||||
with And("I add LDAP servers configuration on all nodes", description=f"{ldap_servers}"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
By(f"on node {name}", test=add_ldap_servers_configuration, parallel=True)(
|
||||
servers=ldap_servers, node=cluster_node(name))
|
||||
|
||||
with And("I add sharded cluster that uses secrets on all the nodes"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
By(f"adding configuration on {name}", test=add_sharded_cluster, parallel=True)(
|
||||
node=cluster_node(name), name="sharded_cluster_with_secret", with_secret=True)
|
||||
|
||||
with And("I add sharded cluster that does not use secrets on all the nodes"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
By(f"adding configuration on {name}", test=add_sharded_cluster, parallel=True)(
|
||||
node=cluster_node(name), name="sharded_cluster_without_secret", with_secret=False)
|
||||
|
||||
Feature("external user directory", test=using_external_user_directory)(ldap_servers=ldap_servers)
|
||||
Feature("authenticated users", test=using_authenticated_users)(ldap_servers=ldap_servers)
|
@ -3,15 +3,16 @@ import os
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
from ldap.authentication.tests.common import getuid, create_ldap_servers_config_content, add_config, Config
|
||||
from helpers.common import create_xml_config_content, add_config
|
||||
from ldap.authentication.tests.common import getuid, create_ldap_servers_config_content, ldap_authenticated_users
|
||||
from ldap.external_user_directory.tests.common import rbac_roles, rbac_users, ldap_users
|
||||
from ldap.authentication.tests.common import xmltree, xml_indent, xml_append, xml_with_utf8
|
||||
|
||||
@TestStep(Given)
|
||||
def create_table(self, name, create_statement, on_cluster=False):
|
||||
def create_table(self, name, create_statement, on_cluster=False, node=None):
|
||||
"""Create table.
|
||||
"""
|
||||
node = current().context.node
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
try:
|
||||
with Given(f"I have a {name} table"):
|
||||
node.query(create_statement.format(name=name))
|
||||
@ -25,12 +26,12 @@ def create_table(self, name, create_statement, on_cluster=False):
|
||||
|
||||
@TestStep(Given)
|
||||
def add_ldap_servers_configuration(self, servers, config=None, config_d_dir="/etc/clickhouse-server/config.d",
|
||||
config_file="ldap_servers.xml", timeout=60, restart=False):
|
||||
config_file="ldap_servers.xml", timeout=60, restart=False, node=None):
|
||||
"""Add LDAP servers configuration to config.xml.
|
||||
"""
|
||||
if config is None:
|
||||
config = create_ldap_servers_config_content(servers, config_d_dir, config_file)
|
||||
return add_config(config, restart=restart)
|
||||
return add_config(config, restart=restart, node=node)
|
||||
|
||||
@TestStep(Given)
|
||||
def add_ldap_groups(self, groups, node=None):
|
||||
@ -50,7 +51,7 @@ def add_ldap_groups(self, groups, node=None):
|
||||
@TestStep(Given)
|
||||
def add_ldap_external_user_directory(self, server, roles=None, role_mappings=None,
|
||||
config_d_dir="/etc/clickhouse-server/config.d",
|
||||
config_file=None, timeout=60, restart=True, config=None):
|
||||
config_file=None, timeout=60, restart=True, config=None, node=None):
|
||||
"""Add LDAP external user directory.
|
||||
"""
|
||||
if config_file is None:
|
||||
@ -60,21 +61,35 @@ def add_ldap_external_user_directory(self, server, roles=None, role_mappings=Non
|
||||
config = create_ldap_external_user_directory_config_content(server=server, roles=roles,
|
||||
role_mappings=role_mappings, config_d_dir=config_d_dir, config_file=config_file)
|
||||
|
||||
return add_config(config, restart=restart)
|
||||
return add_config(config, restart=restart, node=node)
|
||||
|
||||
@TestStep(Given)
|
||||
def add_rbac_roles(self, roles):
|
||||
def add_rbac_roles(self, roles, node=None):
|
||||
"""Add RBAC roles.
|
||||
"""
|
||||
with rbac_roles(*roles) as _roles:
|
||||
with rbac_roles(*roles, node=node) as _roles:
|
||||
yield _roles
|
||||
|
||||
@TestStep(Given)
|
||||
def add_rbac_users(self, users):
|
||||
def add_rbac_users(self, users, node=None):
|
||||
"""Add RBAC users.
|
||||
"""
|
||||
with rbac_users(*users) as _users:
|
||||
yield _users
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
with Given(f"I create local users on {node}"):
|
||||
for user in users:
|
||||
username = user.get('username', None) or user['cn']
|
||||
password = user.get('password', None) or user['userpassword']
|
||||
with By(f"creating user {username}"):
|
||||
node.query(f"CREATE USER OR REPLACE {username} IDENTIFIED WITH PLAINTEXT_PASSWORD BY '{password}'")
|
||||
yield users
|
||||
finally:
|
||||
with Finally(f"I drop local users on {node}"):
|
||||
for user in users:
|
||||
username = user.get('username', None) or user['cn']
|
||||
with By(f"dropping user {username}", flags=TE):
|
||||
node.query(f"DROP USER IF EXISTS {username}")
|
||||
|
||||
@TestStep(Given)
|
||||
def add_ldap_users(self, users, node=None):
|
||||
@ -83,6 +98,16 @@ def add_ldap_users(self, users, node=None):
|
||||
with ldap_users(*users, node=node) as _users:
|
||||
yield _users
|
||||
|
||||
@TestStep(Given)
|
||||
def add_ldap_authenticated_users(self, users, config_file=None, rbac=False, node=None, restart=True):
|
||||
"""Add LDAP authenticated users.
|
||||
"""
|
||||
if config_file is None:
|
||||
config_file = f"ldap_users_{getuid()}.xml"
|
||||
|
||||
with ldap_authenticated_users(*users, config_file=config_file, restart=restart, rbac=rbac, node=node):
|
||||
yield users
|
||||
|
||||
def add_group_to_ldap(cn, gidnumber=None, node=None, _gidnumber=[600], exitcode=0):
|
||||
"""Add new group entry to LDAP.
|
||||
"""
|
||||
@ -193,39 +218,11 @@ def delete_user_from_group_in_ldap(user, group, node=None, exitcode=0):
|
||||
if exitcode is not None:
|
||||
assert r.exitcode == exitcode, error()
|
||||
|
||||
def create_xml_config_content(entries, config_d_dir="/etc/clickhouse-server/config.d",
|
||||
config_file="ldap_external_user_directories.xml"):
|
||||
"""Create XML configuration file from a dictionary.
|
||||
"""
|
||||
uid = getuid()
|
||||
path = os.path.join(config_d_dir, config_file)
|
||||
name = config_file
|
||||
root = xmltree.Element("yandex")
|
||||
root.append(xmltree.Comment(text=f"config uid: {uid}"))
|
||||
|
||||
def create_xml_tree(entries, root):
|
||||
for k,v in entries.items():
|
||||
if type(v) is dict:
|
||||
xml_element = xmltree.Element(k)
|
||||
create_xml_tree(v, xml_element)
|
||||
root.append(xml_element)
|
||||
elif type(v) in (list, tuple):
|
||||
xml_element = xmltree.Element(k)
|
||||
for e in v:
|
||||
create_xml_tree(e, xml_element)
|
||||
root.append(xml_element)
|
||||
else:
|
||||
xml_append(root, k, v)
|
||||
|
||||
create_xml_tree(entries, root)
|
||||
xml_indent(root)
|
||||
content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8")
|
||||
|
||||
return Config(content, path, name, uid, "config.xml")
|
||||
|
||||
def create_ldap_external_user_directory_config_content(server=None, roles=None, role_mappings=None, **kwargs):
|
||||
"""Create LDAP external user directory configuration file content.
|
||||
"""
|
||||
kwargs["config_file"] = kwargs.pop("config_file", "external_ldap_user_directory.xml")
|
||||
|
||||
entries = {
|
||||
"user_directories": {
|
||||
"ldap": {
|
||||
@ -250,4 +247,5 @@ def create_ldap_external_user_directory_config_content(server=None, roles=None,
|
||||
def create_entries_ldap_external_user_directory_config_content(entries, **kwargs):
|
||||
"""Create LDAP external user directory configuration file content.
|
||||
"""
|
||||
kwargs["config_file"] = kwargs.pop("config_file", "external_ldap_user_directory.xml")
|
||||
return create_xml_config_content(entries, **kwargs)
|
||||
|
@ -2,8 +2,6 @@
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
from helpers.common import Pool
|
||||
|
||||
from ldap.role_mapping.requirements import *
|
||||
from ldap.role_mapping.tests.common import *
|
||||
from ldap.external_user_directory.tests.common import randomword
|
||||
|
@ -3,6 +3,7 @@ version: '2.3'
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/integration-test
|
||||
init: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
@ -15,9 +16,9 @@ services:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
entrypoint: bash -c "tail -f /dev/null"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
29
tests/testflows/rbac/rbac_env_arm64/clickhouse-service.yml
Executable file
29
tests/testflows/rbac/rbac_env_arm64/clickhouse-service.yml
Executable file
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: registry.gitlab.com/altinity-public/container-images/test/clickhouse-integration-test:21.12
|
||||
privileged: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
60
tests/testflows/rbac/rbac_env_arm64/docker-compose.yml
Executable file
60
tests/testflows/rbac/rbac_env_arm64/docker-compose.yml
Executable file
@ -0,0 +1,60 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
18
tests/testflows/rbac/rbac_env_arm64/zookeeper-service.yml
Executable file
18
tests/testflows/rbac/rbac_env_arm64/zookeeper-service.yml
Executable file
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 3s
|
||||
timeout: 2s
|
||||
retries: 5
|
||||
start_period: 2s
|
||||
security_opt:
|
||||
- label:disable
|
@ -9,6 +9,7 @@ append_path(sys.path, "..")
|
||||
from helpers.cluster import Cluster
|
||||
from helpers.argparser import argparser
|
||||
from rbac.requirements import SRS_006_ClickHouse_Role_Based_Access_Control
|
||||
from helpers.common import check_clickhouse_version
|
||||
|
||||
issue_14091 = "https://github.com/ClickHouse/ClickHouse/issues/14091"
|
||||
issue_14149 = "https://github.com/ClickHouse/ClickHouse/issues/14149"
|
||||
@ -159,7 +160,7 @@ xfails = {
|
||||
"views/live view/create with join subquery privilege granted directly or via role/create with join subquery, privilege granted directly":
|
||||
[(Fail, issue_26746)],
|
||||
"views/live view/create with join subquery privilege granted directly or via role/create with join subquery, privilege granted through a role":
|
||||
[(Fail, issue_26746)]
|
||||
[(Fail, issue_26746)],
|
||||
}
|
||||
|
||||
xflags = {
|
||||
@ -167,15 +168,23 @@ xflags = {
|
||||
(SKIP, 0)
|
||||
}
|
||||
|
||||
ffails ={
|
||||
"/clickhouse/rbac/privileges/:/table_type='ReplicatedReplacingMergeTree-sharded_cluster":
|
||||
(Skip, "Causes clickhouse timeout on 21.10", (lambda test: check_clickhouse_version(">=21.10")(test) and check_clickhouse_version("<21.11")(test))),
|
||||
"/clickhouse/rbac/views":
|
||||
(Skip, "Does not work on clickhouse 21.09", (lambda test: check_clickhouse_version(">=21.9")(test) and check_clickhouse_version("<21.10")(test)))
|
||||
}
|
||||
|
||||
@TestModule
|
||||
@ArgumentParser(argparser)
|
||||
@XFails(xfails)
|
||||
@XFlags(xflags)
|
||||
@FFails(ffails)
|
||||
@Name("rbac")
|
||||
@Specifications(
|
||||
SRS_006_ClickHouse_Role_Based_Access_Control
|
||||
)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version, stress=None):
|
||||
"""RBAC regression.
|
||||
"""
|
||||
nodes = {
|
||||
@ -183,11 +192,21 @@ def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
("clickhouse1", "clickhouse2", "clickhouse3")
|
||||
}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
|
||||
from platform import processor as current_cpu
|
||||
|
||||
folder_name = os.path.basename(current_dir())
|
||||
if current_cpu() == 'aarch64':
|
||||
env = f"{folder_name}_env_arm64"
|
||||
else:
|
||||
env = f"{folder_name}_env"
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "rbac_env")) as cluster:
|
||||
docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster:
|
||||
self.context.cluster = cluster
|
||||
|
||||
Feature(run=load("rbac.tests.syntax.feature", "feature"))
|
||||
|
@ -4,6 +4,7 @@ from testflows.core import *
|
||||
|
||||
import rbac.helper.errors as errors
|
||||
from rbac.requirements import *
|
||||
from helpers.common import check_clickhouse_version
|
||||
|
||||
@TestFeature
|
||||
@Name("grant role")
|
||||
@ -58,7 +59,7 @@ def feature(self, node="clickhouse1"):
|
||||
RQ_SRS_006_RBAC_Grant_Role("1.0")]):
|
||||
with setup(0,0):
|
||||
with When("I grant nonexistent role to a nonexistent user"):
|
||||
exitcode, message = errors.role_not_found_in_disk(name="user0")
|
||||
exitcode, message = errors.role_not_found_in_disk(name="user0") if check_clickhouse_version(">=21.09")(self) else errors.role_not_found_in_disk(name="role0")
|
||||
node.query("GRANT role0 TO user0", exitcode=exitcode, message=message)
|
||||
|
||||
with Scenario("I grant a role to multiple users", requirements=[
|
||||
|
@ -4,6 +4,7 @@ from testflows.core import *
|
||||
|
||||
import rbac.helper.errors as errors
|
||||
from rbac.requirements import *
|
||||
from helpers.common import check_clickhouse_version
|
||||
|
||||
@TestFeature
|
||||
@Name("revoke role")
|
||||
@ -70,7 +71,7 @@ def feature(self, node="clickhouse1"):
|
||||
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
|
||||
with setup(0,0):
|
||||
with When("I revoke nonexistent role from a nonexistent user"):
|
||||
exitcode, message = errors.role_not_found_in_disk(name="user0")
|
||||
exitcode, message = errors.role_not_found_in_disk(name="user0") if check_clickhouse_version(">=21.09")(self) else errors.role_not_found_in_disk(name="role0")
|
||||
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
|
||||
|
||||
with Scenario("I revoke a role from multiple users", requirements=[
|
||||
|
Loading…
Reference in New Issue
Block a user