custom py script in integ tests

This commit is contained in:
Arthur Passos 2022-10-14 12:39:08 -03:00
parent 91560dd607
commit 6d22bb78e0
8 changed files with 169 additions and 0 deletions

View File

@ -27,9 +27,14 @@ RUN apt-get update \
tar \
tzdata \
unixodbc \
python3-pip \
libcurl4-openssl-dev \
libssl-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
RUN pip3 install pycurl
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH

View File

@ -0,0 +1,11 @@
<yandex>
<users>
<test_dns>
<password/>
<networks>
<host_regexp>test1\.example\.com$</host_regexp>
</networks>
<profile>default</profile>
</test_dns>
</users>
</yandex>

View File

@ -0,0 +1,5 @@
<yandex>
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>
<listen_try>1</listen_try>
</yandex>

View File

@ -0,0 +1,8 @@
. {
hosts /example.com {
reload "200ms"
fallthrough
}
forward . 127.0.0.11
log
}

View File

@ -0,0 +1 @@
filled in runtime, but needs to exist in order to be volume mapped in docker

View File

@ -0,0 +1,56 @@
import pycurl
import threading
from io import BytesIO
import sys
server_ip = sys.argv[1]
mutex = threading.Lock()
success_counter = 0
number_of_threads = 1
number_of_iterations = 400
def perform_request():
buffer = BytesIO()
crl = pycurl.Curl()
crl.setopt(pycurl.INTERFACE, '192.168.0.157')
crl.setopt(crl.WRITEDATA, buffer)
crl.setopt(crl.URL, f'http://{server_ip}:8123/?query=select+1&user=test_dns')
crl.perform()
# End curl session
crl.close()
str_response = buffer.getvalue().decode('iso-8859-1')
expected_response = "1\n"
mutex.acquire()
global success_counter
if (str_response == expected_response):
success_counter += 1
mutex.release()
# print(buffer.getvalue().decode('iso-8859-1'))
def perform_multiple_requests(n):
for i in range(n):
perform_request()
threads = []
for i in range(number_of_threads):
thread = threading.Thread(target=perform_multiple_requests, args=(number_of_iterations,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
exit(success_counter == number_of_threads * number_of_iterations)

View File

@ -0,0 +1,83 @@
import pytest
from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check
from time import sleep
import os
DOCKER_COMPOSE_PATH = get_docker_compose_path()
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
ch_server = cluster.add_instance(
"clickhouse-server",
with_coredns=True,
main_configs=["configs/listen_host.xml"],
user_configs=["configs/host_regexp.xml"],
ipv6_address="2001:3984:3989::1:1111",
)
client = cluster.add_instance(
"clickhouse-client",
ipv6_address="2001:3984:3989::1:1112",
)
@pytest.fixture(scope="module")
def started_cluster():
global cluster
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def setup_dns_server(ip):
domains_string = "test3.example.com test2.example.com test1.example.com"
example_file_path = f'{ch_server.env_variables["COREDNS_CONFIG_DIR"]}/example.com'
run_and_check(f"echo '{ip} {domains_string}' > {example_file_path}", shell=True)
def setup_ch_server(dns_server_ip):
ch_server.exec_in_container(
(["bash", "-c", f"echo 'nameserver {dns_server_ip}' > /etc/resolv.conf"])
)
ch_server.exec_in_container(
(["bash", "-c", "echo 'options ndots:0' >> /etc/resolv.conf"])
)
ch_server.query("SYSTEM DROP DNS CACHE")
def build_endpoint_v4(ip):
return f"'http://{ip}:8123/?query=SELECT+1&user=test_dns'"
def build_endpoint_v6(ip):
return build_endpoint_v4(f"[{ip}]")
def test_host_regexp_multiple_ptr_v4(started_cluster):
server_ip = cluster.get_instance_ip("clickhouse-server")
client_ip = cluster.get_instance_ip("clickhouse-client")
dns_server_ip = cluster.get_instance_ip(cluster.coredns_host)
setup_dns_server(client_ip)
setup_ch_server(dns_server_ip)
current_dir = os.path.dirname(__file__)
client.copy_file_to_container(os.path.join(current_dir, "scripts", "stress_test.py"), "stress_test.py")
assert "1\n" == client.exec_in_container(["python3", f"stress_test.py", server_ip])
# benchmark_command = f"echo 'select 1' | clickhouse benchmark -h {server_ip} --user test_dns -c 10 --reconnect"
# assert "1\n" == client.exec_in_container((["bash", "-c", benchmark_command]))
# container_id = cluster.get_container_id("resolver")
# current_dir = os.path.dirname(__file__)
# cluster.copy_file_to_container(
# container_id,
# os.path.join(current_dir, "s3_endpoint", "endpoint.py"),
# "endpoint.py",
# )
# cluster.exec_in_container(container_id, ["python", "endpoint.py"], detach=True)