Merge pull request #12412 from Jokser/s3-https

S3 HTTPS integration tests
This commit is contained in:
alexey-milovidov 2020-07-12 03:36:37 +03:00 committed by GitHub
commit 26d32f2ce5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 406 additions and 60 deletions

View File

@ -87,5 +87,13 @@
"docker/test/testflows/runner": {
"name": "yandex/clickhouse-testflows-runner",
"dependent": []
},
"docker/test/integration/s3_proxy": {
"name": "yandex/clickhouse-s3-proxy",
"dependent": []
},
"docker/test/integration/resolver": {
"name": "yandex/clickhouse-python-bottle",
"dependent": []
}
}

View File

@ -1,4 +1,5 @@
# docker build -t yandex/clickhouse-python-bottle .
# Helper docker container to run python bottle apps
FROM python:3
RUN python -m pip install bottle
RUN python -m pip install bottle

View File

@ -76,4 +76,3 @@ VOLUME /var/lib/docker
EXPOSE 2375
ENTRYPOINT ["dockerd-entrypoint.sh"]
CMD ["sh", "-c", "pytest $PYTEST_OPTS"]

View File

@ -5,50 +5,37 @@ services:
image: minio/minio
volumes:
- data1-1:/data1
- ${MINIO_CERTS_DIR:-}:/certs
ports:
- "9001:9001"
environment:
MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123
command: server --address :9001 /data1-1
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9001/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
command: server --address :9001 --certs-dir /certs /data1-1
depends_on:
- redirect
- resolver
- proxy1
- proxy2
# Redirects all requests to origin Minio.
redirect:
image: schmunk42/nginx-redirect
volumes:
- /nginx:/nginx
environment:
- SERVER_REDIRECT=minio1:9001
- SERVER_REDIRECT_CODE=307
- SERVER_ACCESS_LOG=/nginx/access.log
# HTTP proxies for Minio.
# HTTP proxies for Minio.
proxy1:
image: vimagick/tinyproxy
image: yandex/clickhouse-s3-proxy
ports:
- "4081:8888"
- "8080" # Redirect proxy port
- "80" # Reverse proxy port
- "443" # Reverse proxy port (secure)
proxy2:
image: vimagick/tinyproxy
image: yandex/clickhouse-s3-proxy
ports:
- "4082:8888"
- "8080"
- "80"
- "443"
# Empty container to run proxy resolver.
# Empty container to run proxy resolver.
resolver:
build:
context: ../../../docker/test/integration/
dockerfile: resolver/Dockerfile
network: host
image: yandex/clickhouse-python-bottle
ports:
- "4083:8080"
- "8080"
tty: true
depends_on:
- proxy1

View File

@ -0,0 +1,11 @@
# docker build -t yandex/clickhouse-s3-proxy .
FROM nginx:alpine
COPY run.sh /run.sh
COPY server.crt /etc/ssl/certs/server.crt
COPY server.key /etc/ssl/certs/server.key
COPY nginx.conf /etc/nginx/nginx.conf
RUN chmod +x /run.sh
CMD ["/run.sh"]

View File

@ -0,0 +1,59 @@
events {
use epoll;
worker_connections 128;
}
http {
# Docker DNS resolver
resolver 127.0.0.11;
map $http_x_forwarded_proto $redirect_scheme {
default $scheme;
https https;
}
# Redirect proxy
server {
listen 8080;
server_name proxy1 proxy2;
# To allow special characters in headers
ignore_invalid_headers off;
return 307 $redirect_scheme://${S3_HOST}:${S3_PORT}$request_uri;
}
# Reverse proxy
server {
listen 80;
listen 443 ssl;
server_name proxy1 proxy2;
ssl_certificate /etc/ssl/certs/server.crt;
ssl_certificate_key /etc/ssl/certs/server.key;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass $scheme://${S3_HOST}:${S3_PORT};
proxy_ssl_verify off;
}
}
}

View File

@ -0,0 +1,15 @@
#!/usr/bin/env sh
if [ -z "$S3_HOST" ] ; then
S3_HOST='minio1'
fi
if [ -z "$S3_PORT" ] ; then
S3_PORT='9001'
fi
# Replace config placeholders with environment variables
sed -i "s|\${S3_HOST}|${S3_HOST}|" /etc/nginx/nginx.conf
sed -i "s|\${S3_PORT}|${S3_PORT}|" /etc/nginx/nginx.conf
exec nginx -g 'daemon off;'

View File

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDBTCCAe2gAwIBAgIRANb2pr4HgR8YFwKNJMUSWiIwDQYJKoZIhvcNAQELBQAw
EjEQMA4GA1UEChMHQWNtZSBDbzAeFw0yMDA3MDkxODE1MDBaFw0yMTA3MDkxODE1
MDBaMBIxEDAOBgNVBAoTB0FjbWUgQ28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
ggEKAoIBAQC9ORgaBCx42ejp9PSjc0uvwH/hTB6yZvZB4S+wxbzzfeKomX/JBcFH
mGCIJJVjVV0rafv3vw+9f9u4wrZpN4HZKnVyz3mBXEA1WDvLTLV8n8zVyso1qbnf
F9Fa8wnk89b0xGWyM7jie7/cTIGMrgm7hIPaM2zDzFwIfIAqZ1AexC4vADIffF9r
cFLLjNHuv1uAc32jdfQEPluvmBMzGkz254+MabxZWIZjkYn70kNSZDoyFmMGafBt
kRTUPNq2+fGv/eLJ9Lxm3153Ja0sCyzLlEo9+/z4ERqM5zwWre4vcwfO63c5pcSC
zGw84teTpmDwSyiSR70TYJdtBGQqZvLZAgMBAAGjVjBUMA4GA1UdDwEB/wQEAwIC
pDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBwGA1UdEQQV
MBOCBm1pbmlvMYIJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQAKU2LhvFFz
RFfUibt/WTj3rtUfKEBrQuUOYt2A8MTbC8pyEu+UJASTzunluUFze5zchEm1s3pZ
YRLcNwbJqLE6CzUxQ9b2iUhaeWuKrx4ZoPkY0uGiaXM/iKfVKTuNmhF2Sf/P4xUE
Pt19yQjpIhcicWQc37BBQFvnvy+n5wgHa/pgl1+QUvAa/fwYhF9S28xRLESzZepm
NMYysopV+YMaxcFa9SH44toXtXnvRWwVdEorlq1W3/AiJg8hDPzSa9UXLMjA968J
ONtn3qvwac9Ot53+QsXJdsMmDZLWGCi6I1w0ZQetpr/0ubaA1F3GdK9eB/S0thqU
l2VUgn3c/kKS
-----END CERTIFICATE-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC9ORgaBCx42ejp
9PSjc0uvwH/hTB6yZvZB4S+wxbzzfeKomX/JBcFHmGCIJJVjVV0rafv3vw+9f9u4
wrZpN4HZKnVyz3mBXEA1WDvLTLV8n8zVyso1qbnfF9Fa8wnk89b0xGWyM7jie7/c
TIGMrgm7hIPaM2zDzFwIfIAqZ1AexC4vADIffF9rcFLLjNHuv1uAc32jdfQEPluv
mBMzGkz254+MabxZWIZjkYn70kNSZDoyFmMGafBtkRTUPNq2+fGv/eLJ9Lxm3153
Ja0sCyzLlEo9+/z4ERqM5zwWre4vcwfO63c5pcSCzGw84teTpmDwSyiSR70TYJdt
BGQqZvLZAgMBAAECggEANe8oJ4I5CtlRwh3H/S7Hy/iaeqUvuroORwjghwpVqTGg
gV3/RlUVmkqceTG0QvP58n3rC9qxqdnfzvHw/FyN7lBj2a25fF3HD21u3aunrzX9
NJLwwAr4p9YqHjpX/6JhCrNQKVMEx8luDmTgKDETJRfIXVF7FvQQ53pVLcD03U+g
MgN61HBzfT5L0TLHoiKNQbVi+Wm1gw3zvb/a9Z1rULRZfIuKGM0bNNqRZt4rUUAV
QicklDR0Qv59jhr5Y/zjinKkqF8qudvUkaNT2JH1DLfXiAhuC0OQugMjYzNntQB4
hMhkqARnjuk/WPMvnXivnqx9o69BL5wyXIj3vD4fgQKBgQDVKaXAZJ5bo3VfcpLm
cyjtUuOzAxLU1bVGI0Hm1ARqeGVxSTypZLSX8xFi2n5Bvbgh/Y60aEac/1uKoXA9
gej1MT4hKpXyagrARx97E8zk5nf88kVxkiKUrifMjP2lDzHIYhdKk9R3SiV6gWvA
FoJtjBwFhJ6uWUPyry4nqFSENQKBgQDjP9k6CTZF0EnDqbADiQr7VKpebqhtLWRD
U0bQh/l57VrWqGksVOlivIJChP49q1H+hQ1YgfKIEDag8JJnf/inUSpVsw1ljAjv
knqNzn0Gdd9lTsiNGgqlCjhmWedkh4eO8uau479TwQc6gB4PQdLAFynQtt8Kk45P
GxdpRx4AlQKBgQCgxUGbYwhBC37aF1sObqrenBbajCXm2qxXEv6Ab0ZJWzb/g4I6
LJc8x3pEeZCiWsoG8Otxy/f+L2bGn049Rb8DNzmp4Cmp5SrorHvk4yE1P1IeOEgC
CXsFcnjYATrJBDXC8aCpgefMdOLhi71N6mxC3VrBGq5nxzHFVzTTelUMRQKBgQDa
yekhiCb5liy+tcuhy7qH+Z7BpjaATrh+XVoLgS5+5jeT/basmN/OUQH0e0iwJRaf
Poh30zynJT0DPDsobLwAkxN4SRg30Vf1GAjoKIqUwr2fMvfBafYfqbRdTmeKkTXB
OjlA3kKhp3GHMDxAojX+/Q4kRTx+WUwk+0dR88d99QKBgEiYrkSLjKXUFllDmVyp
HtlYKZiq5c33DA06SA2uVOprCdTbnbvP4WrgUsLGvqBcaPEd06fGGbvJWwUdnkXM
HNAkqSeUe5ueovidtoPdF+aPyxdGg3Z8551xOoHZFYrvgdZ4YMPcJrwQQsvWCcYP
GDnSoD8Xjd2LmekTpDBt5ZVz
-----END PRIVATE KEY-----

View File

@ -116,6 +116,8 @@ void registerDiskS3(DiskFactory & factory)
if (uri.key.back() != '/')
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
cfg.connectTimeoutMs = config.getUInt(config_prefix + ".connect_timeout_ms", 10000);
cfg.httpRequestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 5000);
cfg.endpointOverride = uri.endpoint;
auto proxy_config = getProxyConfiguration(config_prefix, config);

View File

@ -75,7 +75,12 @@ void PocoHTTPClient::MakeRequestInternal(
auto request_configuration = per_request_configuration(request);
if (!request_configuration.proxyHost.empty())
session->setProxy(request_configuration.proxyHost, request_configuration.proxyPort);
session->setProxy(
request_configuration.proxyHost,
request_configuration.proxyPort,
Aws::Http::SchemeMapper::ToString(request_configuration.proxyScheme),
false /// Disable proxy tunneling by default
);
Poco::Net::HTTPRequest poco_request(Poco::Net::HTTPRequest::HTTP_1_1);

View File

@ -137,12 +137,13 @@ class ClickHouseCluster:
self.with_cassandra = False
self.with_minio = False
self.minio_certs_dir = None
self.minio_host = "minio1"
self.minio_bucket = "root"
self.minio_port = 9001
self.minio_client = None # type: Minio
self.minio_redirect_host = "redirect"
self.minio_redirect_port = 80
self.minio_redirect_host = "proxy1"
self.minio_redirect_port = 8080
# available when with_kafka == True
self.schema_registry_client = None
@ -166,7 +167,7 @@ class ClickHouseCluster:
with_redis=False, with_minio=False, with_cassandra=False,
hostname=None, env_variables=None, image="yandex/clickhouse-integration-test",
stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=None,
zookeeper_docker_compose_path=None, zookeeper_use_tmpfs=True):
zookeeper_docker_compose_path=None, zookeeper_use_tmpfs=True, minio_certs_dir=None):
"""Add an instance to the cluster.
name - the name of the instance directory and the value of the 'instance' macro in ClickHouse.
@ -285,6 +286,7 @@ class ClickHouseCluster:
if with_minio and not self.with_minio:
self.with_minio = True
self.minio_certs_dir = minio_certs_dir
self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_minio.yml')])
self.base_minio_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name',
self.project_name, '--file', p.join(docker_compose_yml_dir, 'docker_compose_minio.yml')]
@ -442,11 +444,11 @@ class ClickHouseCluster:
print "Can't connect to Mongo " + str(ex)
time.sleep(1)
def wait_minio_to_start(self, timeout=30):
def wait_minio_to_start(self, timeout=30, secure=False):
minio_client = Minio('localhost:9001',
access_key='minio',
secret_key='minio123',
secure=False)
secure=secure)
start = time.time()
while time.time() - start < timeout:
try:
@ -568,11 +570,34 @@ class ClickHouseCluster:
time.sleep(10)
if self.with_minio and self.base_minio_cmd:
env = os.environ.copy()
prev_ca_certs = os.environ.get('SSL_CERT_FILE')
if self.minio_certs_dir:
minio_certs_dir = p.join(self.base_dir, self.minio_certs_dir)
env['MINIO_CERTS_DIR'] = minio_certs_dir
# Minio client (urllib3) uses SSL_CERT_FILE for certificate validation.
os.environ['SSL_CERT_FILE'] = p.join(minio_certs_dir, 'public.crt')
else:
# Attach empty certificates directory to ensure non-secure mode.
minio_certs_dir = p.join(self.instances_dir, 'empty_minio_certs_dir')
os.mkdir(minio_certs_dir)
env['MINIO_CERTS_DIR'] = minio_certs_dir
minio_start_cmd = self.base_minio_cmd + common_opts
logging.info("Trying to create Minio instance by command %s", ' '.join(map(str, minio_start_cmd)))
subprocess_check_call(minio_start_cmd)
logging.info("Trying to connect to Minio...")
self.wait_minio_to_start()
subprocess.check_call(minio_start_cmd, env=env)
try:
logging.info("Trying to connect to Minio...")
self.wait_minio_to_start(secure=self.minio_certs_dir is not None)
finally:
# Safely return previous value of SSL_CERT_FILE environment variable.
if self.minio_certs_dir:
if prev_ca_certs:
os.environ['SSL_CERT_FILE'] = prev_ca_certs
else:
os.environ.pop('SSL_CERT_FILE')
if self.with_cassandra and self.base_cassandra_cmd:
subprocess_check_call(self.base_cassandra_cmd + ['up', '-d', '--force-recreate'])
@ -1095,7 +1120,6 @@ class ClickHouseInstance:
if self.with_minio:
depends_on.append("minio1")
depends_on.append("redirect")
env_file = _create_env_file(os.path.dirname(self.docker_compose_path), self.env_variables)

View File

@ -0,0 +1,12 @@
<yandex>
<shutdown_wait_unfinished>3</shutdown_wait_unfinished>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/log.log</log>
<errorlog>/var/log/clickhouse-server/log.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
</logger>
</yandex>

View File

@ -0,0 +1,37 @@
<yandex>
<storage_configuration>
<disks>
<s3_secure>
<type>s3</type>
<endpoint>https://minio1:9001/root/data/</endpoint>
<access_key_id>minio</access_key_id>
<secret_access_key>minio123</secret_access_key>
</s3_secure>
<s3_secure_with_proxy>
<type>s3</type>
<endpoint>https://minio1:9001/root/data/</endpoint>
<access_key_id>minio</access_key_id>
<secret_access_key>minio123</secret_access_key>
<proxy>
<uri>https://proxy1</uri>
</proxy>
</s3_secure_with_proxy>
</disks>
<policies>
<s3_secure>
<volumes>
<main>
<disk>s3_secure</disk>
</main>
</volumes>
</s3_secure>
<s3_secure_with_proxy>
<volumes>
<main>
<disk>s3_secure_with_proxy</disk>
</main>
</volumes>
</s3_secure_with_proxy>
</policies>
</storage_configuration>
</yandex>

View File

@ -0,0 +1,5 @@
<yandex>
<profiles>
<default/>
</profiles>
</yandex>

View File

@ -0,0 +1,20 @@
<?xml version="1.0"?>
<yandex>
<tcp_port>9000</tcp_port>
<listen_host>127.0.0.1</listen_host>
<openSSL>
<client>
<cacheSessions>true</cacheSessions>
<verificationMode>none</verificationMode>
<invalidCertificateHandler>
<name>AcceptCertificateHandler</name>
</invalidCertificateHandler>
</client>
</openSSL>
<max_concurrent_queries>500</max_concurrent_queries>
<mark_cache_size>5368709120</mark_cache_size>
<path>./clickhouse/</path>
<users_config>users.xml</users_config>
</yandex>

View File

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC+TCCAeGgAwIBAgIQfF4j70ZdR/W3XlFJq5iZgDANBgkqhkiG9w0BAQsFADAS
MRAwDgYDVQQKEwdBY21lIENvMB4XDTIwMDcwOTE1MTQ1M1oXDTIxMDcwOTE1MTQ1
M1owEjEQMA4GA1UEChMHQWNtZSBDbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
AQoCggEBAM4i2tOlbbDxcvckVK/Zms95n2ipr7dZ0qToSf8qmF5d2EH6mqC0Vv2d
MJ+8JhQEKBh8AvUjmSqjd8tWmLJcqA84Gc8s8stB565wwkaMBvMExKlO+PQtynRd
xZjQVnj16hB0ZP4JHeVUOqMQa7uPQZQp6kqdkJ3u84EhRmU8fCCtUPOJIYHcfx7P
ScYfmJCpmqxrfWP18XcyYlhoCTm/nV+XT+XfUGwc6Sok5pCX5C70PiQ5MrEvYDIC
Q3iRNi2Lj4pTG8GUSwAcKLB08o7mxHvR1MGDGohtGnSAhdniK9aljNmBQfNIErFI
3529YDMW/qwRKSEkJpMy7r8RkfYamUsCAwEAAaNLMEkwDgYDVR0PAQH/BAQDAgKk
MBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0RBAow
CIIGbWluaW8xMA0GCSqGSIb3DQEBCwUAA4IBAQDAlDKuJfQHzsBtFioNnf9VS+LA
m/aUG7ngl0Orynef45Kl21l1ToM0feKk1UnEN8crwE0wjZttby84sYaq0us7mnbl
CnFsvphNqk5jxMbSs/8Vx4TVEimyO7r5JeG4J9lEnAu2hKK5ZlwPzj7G8bL4fOvk
OGiiP5r0K3wTVU/Y96MmDUaJwBNiyp7WtsBRzkteSPQJDC98gUCYeYsIFokUs3gz
ILOAbGQBLKUn9kmYc+/LLNha0nsC0eQGmLaJgIYfele63c6KkklQ3ePjRZ71JfmN
TulovRrwUf0J4hYcIgC1URZbClsnQDOBFCY6Lm8eI+IGNWWU4I9WGoJ1Lkvk
-----END CERTIFICATE-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC9ORgaBCx42ejp
9PSjc0uvwH/hTB6yZvZB4S+wxbzzfeKomX/JBcFHmGCIJJVjVV0rafv3vw+9f9u4
wrZpN4HZKnVyz3mBXEA1WDvLTLV8n8zVyso1qbnfF9Fa8wnk89b0xGWyM7jie7/c
TIGMrgm7hIPaM2zDzFwIfIAqZ1AexC4vADIffF9rcFLLjNHuv1uAc32jdfQEPluv
mBMzGkz254+MabxZWIZjkYn70kNSZDoyFmMGafBtkRTUPNq2+fGv/eLJ9Lxm3153
Ja0sCyzLlEo9+/z4ERqM5zwWre4vcwfO63c5pcSCzGw84teTpmDwSyiSR70TYJdt
BGQqZvLZAgMBAAECggEANe8oJ4I5CtlRwh3H/S7Hy/iaeqUvuroORwjghwpVqTGg
gV3/RlUVmkqceTG0QvP58n3rC9qxqdnfzvHw/FyN7lBj2a25fF3HD21u3aunrzX9
NJLwwAr4p9YqHjpX/6JhCrNQKVMEx8luDmTgKDETJRfIXVF7FvQQ53pVLcD03U+g
MgN61HBzfT5L0TLHoiKNQbVi+Wm1gw3zvb/a9Z1rULRZfIuKGM0bNNqRZt4rUUAV
QicklDR0Qv59jhr5Y/zjinKkqF8qudvUkaNT2JH1DLfXiAhuC0OQugMjYzNntQB4
hMhkqARnjuk/WPMvnXivnqx9o69BL5wyXIj3vD4fgQKBgQDVKaXAZJ5bo3VfcpLm
cyjtUuOzAxLU1bVGI0Hm1ARqeGVxSTypZLSX8xFi2n5Bvbgh/Y60aEac/1uKoXA9
gej1MT4hKpXyagrARx97E8zk5nf88kVxkiKUrifMjP2lDzHIYhdKk9R3SiV6gWvA
FoJtjBwFhJ6uWUPyry4nqFSENQKBgQDjP9k6CTZF0EnDqbADiQr7VKpebqhtLWRD
U0bQh/l57VrWqGksVOlivIJChP49q1H+hQ1YgfKIEDag8JJnf/inUSpVsw1ljAjv
knqNzn0Gdd9lTsiNGgqlCjhmWedkh4eO8uau479TwQc6gB4PQdLAFynQtt8Kk45P
GxdpRx4AlQKBgQCgxUGbYwhBC37aF1sObqrenBbajCXm2qxXEv6Ab0ZJWzb/g4I6
LJc8x3pEeZCiWsoG8Otxy/f+L2bGn049Rb8DNzmp4Cmp5SrorHvk4yE1P1IeOEgC
CXsFcnjYATrJBDXC8aCpgefMdOLhi71N6mxC3VrBGq5nxzHFVzTTelUMRQKBgQDa
yekhiCb5liy+tcuhy7qH+Z7BpjaATrh+XVoLgS5+5jeT/basmN/OUQH0e0iwJRaf
Poh30zynJT0DPDsobLwAkxN4SRg30Vf1GAjoKIqUwr2fMvfBafYfqbRdTmeKkTXB
OjlA3kKhp3GHMDxAojX+/Q4kRTx+WUwk+0dR88d99QKBgEiYrkSLjKXUFllDmVyp
HtlYKZiq5c33DA06SA2uVOprCdTbnbvP4WrgUsLGvqBcaPEd06fGGbvJWwUdnkXM
HNAkqSeUe5ueovidtoPdF+aPyxdGg3Z8551xOoHZFYrvgdZ4YMPcJrwQQsvWCcYP
GDnSoD8Xjd2LmekTpDBt5ZVz
-----END PRIVATE KEY-----

View File

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDBTCCAe2gAwIBAgIRANb2pr4HgR8YFwKNJMUSWiIwDQYJKoZIhvcNAQELBQAw
EjEQMA4GA1UEChMHQWNtZSBDbzAeFw0yMDA3MDkxODE1MDBaFw0yMTA3MDkxODE1
MDBaMBIxEDAOBgNVBAoTB0FjbWUgQ28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
ggEKAoIBAQC9ORgaBCx42ejp9PSjc0uvwH/hTB6yZvZB4S+wxbzzfeKomX/JBcFH
mGCIJJVjVV0rafv3vw+9f9u4wrZpN4HZKnVyz3mBXEA1WDvLTLV8n8zVyso1qbnf
F9Fa8wnk89b0xGWyM7jie7/cTIGMrgm7hIPaM2zDzFwIfIAqZ1AexC4vADIffF9r
cFLLjNHuv1uAc32jdfQEPluvmBMzGkz254+MabxZWIZjkYn70kNSZDoyFmMGafBt
kRTUPNq2+fGv/eLJ9Lxm3153Ja0sCyzLlEo9+/z4ERqM5zwWre4vcwfO63c5pcSC
zGw84teTpmDwSyiSR70TYJdtBGQqZvLZAgMBAAGjVjBUMA4GA1UdDwEB/wQEAwIC
pDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBwGA1UdEQQV
MBOCBm1pbmlvMYIJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQAKU2LhvFFz
RFfUibt/WTj3rtUfKEBrQuUOYt2A8MTbC8pyEu+UJASTzunluUFze5zchEm1s3pZ
YRLcNwbJqLE6CzUxQ9b2iUhaeWuKrx4ZoPkY0uGiaXM/iKfVKTuNmhF2Sf/P4xUE
Pt19yQjpIhcicWQc37BBQFvnvy+n5wgHa/pgl1+QUvAa/fwYhF9S28xRLESzZepm
NMYysopV+YMaxcFa9SH44toXtXnvRWwVdEorlq1W3/AiJg8hDPzSa9UXLMjA968J
ONtn3qvwac9Ot53+QsXJdsMmDZLWGCi6I1w0ZQetpr/0ubaA1F3GdK9eB/S0thqU
l2VUgn3c/kKS
-----END CERTIFICATE-----

View File

@ -0,0 +1,55 @@
import logging
import pytest
from helpers.cluster import ClickHouseCluster
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
def check_proxy_logs(cluster, proxy_instance):
logs = cluster.get_container_logs(proxy_instance)
# Check that all possible interactions with Minio are present
for http_method in ["PUT", "GET", "DELETE"]:
assert logs.find(http_method + " https://minio1") >= 0
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("node", config_dir="configs", with_minio=True, minio_certs_dir='minio_certs')
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
yield cluster
finally:
cluster.shutdown()
@pytest.mark.parametrize(
"policy", ["s3_secure", "s3_secure_with_proxy"]
)
def test_s3_with_https(cluster, policy):
node = cluster.instances["node"]
node.query(
"""
CREATE TABLE s3_test (
id Int64,
data String
) ENGINE=MergeTree()
ORDER BY id
SETTINGS storage_policy='{}'
"""
.format(policy)
)
node.query("INSERT INTO s3_test VALUES (0,'data'),(1,'data')")
assert node.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')"
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
if policy.find("proxy") != -1:
check_proxy_logs(cluster, "proxy1")

View File

@ -7,8 +7,8 @@
<access_key_id>minio</access_key_id>
<secret_access_key>minio123</secret_access_key>
<proxy>
<uri>http://proxy1:8888</uri>
<uri>http://proxy2:8888</uri>
<uri>http://proxy1</uri>
<uri>http://proxy2</uri>
</proxy>
</s3>
<s3_with_resolver>
@ -25,7 +25,7 @@
<resolver>
<endpoint>http://resolver:8080/hostname</endpoint>
<proxy_scheme>http</proxy_scheme>
<proxy_port>8888</proxy_port>
<proxy_port>80</proxy_port>
</resolver>
</proxy>
</s3_with_resolver>

View File

@ -70,14 +70,6 @@ def get_s3_file_content(cluster, bucket, filename):
return data_str
# Returns nginx access log lines.
def get_nginx_access_logs():
handle = open("/nginx/access.log", "r")
data = handle.readlines()
handle.close()
return data
@pytest.fixture(scope="module")
def cluster():
try:
@ -250,10 +242,9 @@ def test_multipart_put(cluster, maybe_auth, positive):
else:
assert positive
# Use Nginx access logs to count number of parts uploaded to Minio.
nginx_logs = get_nginx_access_logs()
uploaded_parts = filter(lambda log_line: log_line.find(filename) >= 0 and log_line.find("PUT") >= 0, nginx_logs)
assert len(uploaded_parts) > 1
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(cluster, bucket, filename)
@ -321,8 +312,7 @@ def run_s3_mock(cluster):
logging.info("S3 mock started")
# Test get values in CSV format with default settings.
def test_get_csv_default(cluster):
def test_custom_auth_headers(cluster):
ping_response = cluster.exec_in_container(cluster.get_container_id('resolver'), ["curl", "-s", "http://resolver:8080"])
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
@ -343,12 +333,16 @@ def test_infinite_redirect(cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket="redirected",
bucket=bucket,
file=filename,
table_format=table_format)
instance = cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
result = run_query(instance, get_query)
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised