Add integration test test_kerberos_auth

This commit is contained in:
Roman Vasin 2022-12-14 13:39:23 +00:00
parent 9c70b13702
commit e2ced517dd
9 changed files with 310 additions and 2 deletions

View File

@ -0,0 +1,11 @@
version: '2.3'
services:
kerberoskdc:
image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
hostname: kerberoskdc
volumes:
- ${KERBEROS_AUTH_DIR}/secrets:/tmp/keytab
- ${KERBEROS_AUTH_DIR}/../../kerberos_image_config.sh:/config.sh
- /dev/urandom:/dev/random
ports: [88, 749]

View File

@ -388,6 +388,7 @@ class ClickHouseCluster:
self.with_postgres_cluster = False
self.with_kafka = False
self.with_kerberized_kafka = False
self.with_kerberos_kdc = False
self.with_rabbitmq = False
self.with_nats = False
self.with_odbc_drivers = False
@ -455,6 +456,12 @@ class ClickHouseCluster:
self.kerberized_kafka_host
)
# available when with_kerberos_kdc == True
self.kerberos_kdc_host = "kerberoskdc"
self.keberos_docker_id = self.get_instance_docker_id(
self.kerberos_kdc_host
)
# available when with_mongo == True
self.mongo_host = "mongo1"
self.mongo_port = get_free_port()
@ -1059,6 +1066,31 @@ class ClickHouseCluster:
]
return self.base_kerberized_kafka_cmd
def setup_kerberos_cmd(
self, instance, env_variables, docker_compose_yml_dir
):
self.with_kerberos_kdc = True
env_variables["KERBEROS_AUTH_DIR"] = instance.path + "/"
env_variables["KERBEROS_KDC_HOST"] = self.kerberos_kdc_host
self.base_cmd.extend(
[
"--file",
p.join(docker_compose_yml_dir, "docker_compose_kerberos_kdc.yml"),
]
)
self.base_kerberos_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_kerberos_kdc.yml"),
]
return self.base_kerberos_cmd
def setup_redis_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_redis = True
env_variables["REDIS_HOST"] = self.redis_host
@ -1329,6 +1361,7 @@ class ClickHouseCluster:
with_mysql_cluster=False,
with_kafka=False,
with_kerberized_kafka=False,
with_kerberos_kdc=False,
with_rabbitmq=False,
with_nats=False,
clickhouse_path_dir=None,
@ -1420,6 +1453,7 @@ class ClickHouseCluster:
with_mysql_cluster=with_mysql_cluster,
with_kafka=with_kafka,
with_kerberized_kafka=with_kerberized_kafka,
with_kerberos_kdc=with_kerberos_kdc,
with_rabbitmq=with_rabbitmq,
with_nats=with_nats,
with_nginx=with_nginx,
@ -1554,6 +1588,13 @@ class ClickHouseCluster:
)
)
if with_kerberos_kdc and not self.with_kerberos_kdc:
cmds.append(
self.setup_kerberos_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_rabbitmq and not self.with_rabbitmq:
cmds.append(
self.setup_rabbitmq_cmd(instance, env_variables, docker_compose_yml_dir)
@ -2124,6 +2165,11 @@ class ClickHouseCluster:
logging.debug("Waiting for Kafka to start up")
time.sleep(1)
def wait_kerberos_kdc_is_available(self, kafka_docker_id):
logging.debug("Waiting for Kerberos KDC to start up")
# temp code: sleep 50 seconds
time.sleep(50)
def wait_hdfs_to_start(self, timeout=300, check_marker=False):
start = time.time()
while time.time() - start < timeout:
@ -2473,6 +2519,16 @@ class ClickHouseCluster:
self.kerberized_kafka_docker_id, self.kerberized_kafka_port, 100
)
if self.with_kerberos_kdc and self.base_kerberos_cmd:
logging.debug("Setup Kerberos KDC")
run_and_check(
self.base_kerberos_cmd
+ common_opts
+ ["--renew-anon-volumes"]
)
self.up_called = True
self.wait_kerberos_kdc_is_available(self.keberos_docker_id)
if self.with_rabbitmq and self.base_rabbitmq_cmd:
logging.debug("Setup RabbitMQ")
os.makedirs(self.rabbitmq_logs_dir)
@ -2872,6 +2928,7 @@ class ClickHouseInstance:
with_mysql_cluster,
with_kafka,
with_kerberized_kafka,
with_kerberos_kdc,
with_rabbitmq,
with_nats,
with_nginx,
@ -2955,6 +3012,7 @@ class ClickHouseInstance:
self.with_postgres_cluster = with_postgres_cluster
self.with_kafka = with_kafka
self.with_kerberized_kafka = with_kerberized_kafka
self.with_kerberos_kdc = with_kerberos_kdc
self.with_rabbitmq = with_rabbitmq
self.with_nats = with_nats
self.with_nginx = with_nginx
@ -2988,7 +3046,7 @@ class ClickHouseInstance:
else:
self.odbc_ini_path = ""
if with_kerberized_kafka or with_kerberized_hdfs:
if with_kerberized_kafka or with_kerberized_hdfs or with_kerberos_kdc:
self.keytab_path = (
"- "
+ os.path.dirname(self.docker_compose_path)
@ -3906,7 +3964,7 @@ class ClickHouseInstance:
if self.with_zookeeper:
shutil.copy(self.zookeeper_config_path, conf_d_dir)
if self.with_kerberized_kafka or self.with_kerberized_hdfs:
if self.with_kerberized_kafka or self.with_kerberized_hdfs or self.with_kerberos_kdc:
shutil.copytree(
self.kerberos_secrets_dir, p.abspath(p.join(self.path, "secrets"))
)
@ -3978,6 +4036,9 @@ class ClickHouseInstance:
if self.with_kerberized_kafka:
depends_on.append("kerberized_kafka1")
if self.with_kerberos_kdc:
depends_on.append("kerberoskdc")
if self.with_kerberized_hdfs:
depends_on.append("kerberizedhdfs1")

View File

@ -0,0 +1,6 @@
<clickhouse>
<kerberos>
<realm>TEST.CLICKHOUSE.TECH</realm>
<keytab>/tmp/keytab/clickhouse.keytab</keytab>
</kerberos>
</clickhouse>

View File

@ -0,0 +1,19 @@
<clickhouse>
<profiles>
<default>
</default>
</profiles>
<users>
<kuser>
<kerberos>
<realm>TEST.CLICKHOUSE.TECH</realm>
</kerberos>
<access_management>1</access_management>
<networks replace="replace">
<ip>::/0</ip>
</networks>
<profile>default</profile>
<quota>default</quota>
</kuser>
</users>
</clickhouse>

View File

@ -0,0 +1,126 @@
#!/bin/bash
set -x # trace
: "${REALM:=TEST.CLICKHOUSE.TECH}"
: "${DOMAIN_REALM:=test.clickhouse.com}"
: "${KERB_MASTER_KEY:=masterkey}"
: "${KERB_ADMIN_USER:=admin}"
: "${KERB_ADMIN_PASS:=admin}"
create_config() {
: "${KDC_ADDRESS:=$(hostname -f)}"
cat>/etc/krb5.conf<<EOF
[logging]
default = FILE:/var/log/kerberos/krb5libs.log
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmind.log
[libdefaults]
default_realm = $REALM
dns_lookup_realm = false
dns_lookup_kdc = false
ticket_lifetime = 15s
renew_lifetime = 15s
forwardable = true
# WARNING: We use weaker key types to simplify testing as stronger key types
# require the enhanced security JCE policy file to be installed. You should
# NOT run with this configuration in production or any real environment. You
# have been warned.
default_tkt_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
default_tgs_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
permitted_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
[realms]
$REALM = {
kdc = $KDC_ADDRESS
admin_server = $KDC_ADDRESS
}
[domain_realm]
.$DOMAIN_REALM = $REALM
$DOMAIN_REALM = $REALM
EOF
cat>/var/kerberos/krb5kdc/kdc.conf<<EOF
[kdcdefaults]
kdc_ports = 88
kdc_tcp_ports = 88
[realms]
$REALM = {
acl_file = /var/kerberos/krb5kdc/kadm5.acl
dict_file = /usr/share/dict/words
admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
# WARNING: We use weaker key types to simplify testing as stronger key types
# require the enhanced security JCE policy file to be installed. You should
# NOT run with this configuration in production or any real environment. You
# have been warned.
master_key_type = des3-hmac-sha1
supported_enctypes = arcfour-hmac:normal des3-hmac-sha1:normal des-cbc-crc:normal des:normal des:v4 des:norealm des:onlyrealm des:afs3
default_principal_flags = +preauth
}
EOF
}
create_db() {
/usr/sbin/kdb5_util -P $KERB_MASTER_KEY -r $REALM create -s
}
start_kdc() {
mkdir -p /var/log/kerberos
/etc/rc.d/init.d/krb5kdc start
/etc/rc.d/init.d/kadmin start
chkconfig krb5kdc on
chkconfig kadmin on
}
restart_kdc() {
/etc/rc.d/init.d/krb5kdc restart
/etc/rc.d/init.d/kadmin restart
}
create_admin_user() {
kadmin.local -q "addprinc -pw $KERB_ADMIN_PASS $KERB_ADMIN_USER/admin"
echo "*/admin@$REALM *" > /var/kerberos/krb5kdc/kadm5.acl
}
create_keytabs() {
rm /tmp/keytab/*.keytab
kadmin.local -q "addprinc -randkey kuser@${REALM}"
kadmin.local -q "ktadd -norandkey -k /tmp/keytab/kuser.keytab kuser@${REALM}"
kadmin.local -q "addprinc -randkey HTTP/instance@${REALM}"
kadmin.local -q "ktadd -norandkey -k /tmp/keytab/clickhouse.keytab HTTP/instance@${REALM}"
chmod g+r /tmp/keytab/kuser.keytab
chmod g+r /tmp/keytab/clickhouse.keytab
}
main() {
if [ ! -f /kerberos_initialized ]; then
create_config
create_db
create_admin_user
start_kdc
touch /kerberos_initialized
fi
if [ ! -f /var/kerberos/krb5kdc/principal ]; then
while true; do sleep 1000; done
else
start_kdc
create_keytabs
tail -F /var/log/kerberos/krb5kdc.log
fi
}
[[ "$0" == "${BASH_SOURCE[0]}" ]] && main "$@"

View File

@ -0,0 +1,22 @@
[logging]
default = FILE:/var/log/kerberos/krb5libs.log
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmind.log
[libdefaults]
default_realm = TEST.CLICKHOUSE.TECH
dns_lookup_realm = false
dns_lookup_kdc = false
ticket_lifetime = 15s
renew_lifetime = 15s
forwardable = true
[realms]
TEST.CLICKHOUSE.TECH = {
kdc = kerberoskdc
admin_server = kerberoskdc
}
[domain_realm]
.TEST.CLICKHOUSE.TECH = TEST.CLICKHOUSE.TECH
TEST.CLICKHOUSE.TECH = TEST.CLICKHOUSE.TECH

View File

@ -0,0 +1,63 @@
import os.path as p
import random
import threading
import time
import pytest
import logging
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from helpers.client import QueryRuntimeException
import json
import subprocess
import socket
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance(
"instance",
main_configs=["configs/kerberos.xml"],
user_configs=["configs/users.xml"],
with_kerberos_kdc=True,
clickhouse_path_dir="clickhouse_path",
)
# Fixtures
@pytest.fixture(scope="module")
def kerberos_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kerberos_setup_teardown():
yield # run test
# Tests
def test_kerberos_auth_with_keytab(kerberos_cluster):
logging.debug("kerberos test")
instance.exec_in_container(
["bash", "-c", "kinit -V -k -t /tmp/keytab/kuser.keytab kuser"]
)
assert (
instance.exec_in_container(
["bash", "-c", "echo 'select currentUser()' | curl -vvv --negotiate -u : http://{}:8123/ --data-binary @-".format(instance.hostname)]
)
== "kuser\n"
)
if __name__ == "__main__":
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()