CLICKHOUSE-3878: Add ODBC test

This commit is contained in:
alesapin 2018-08-22 18:42:27 +03:00
parent 4c0b30fb7d
commit e2f18da1a9
5 changed files with 128 additions and 17 deletions

View File

@ -3,6 +3,7 @@
#include <sstream>
#include <IO/ReadHelpers.h>
#include <IO/ReadWriteBufferFromHTTP.h>
#include <Poco/File.h>
#include <Poco/Net/HTTPRequest.h>
#include <Poco/Path.h>
#include <Poco/Util/AbstractConfiguration.h>
@ -32,13 +33,13 @@ ODBCBridgeHelper::ODBCBridgeHelper(
void ODBCBridgeHelper::startODBCBridge() const
{
Poco::Path path{config.getString("application.dir", "")};
path.setFileName("clickhouse-odbc-bridge");
path.setFileName("clickhouse");
if (!path.isFile())
if (!Poco::File(path).exists())
throw Exception("clickhouse-odbc-bridge is not found", ErrorCodes::EXTERNAL_EXECUTABLE_NOT_FOUND);
std::stringstream command;
command << path.toString() << ' ';
command << path.toString() << " odbc-bridge ";
command << "--http-port " << config.getUInt("odbc_bridge.port", DEFAULT_PORT) << ' ';
command << "--listen-host " << config.getString("odbc_bridge.listen_host", DEFAULT_HOST) << ' ';
command << "--http-timeout " << http_timeout.totalMicroseconds() << ' ';

View File

@ -11,11 +11,11 @@ class Client:
self.command = [command, '--host', self.host, '--port', str(self.port), '--stacktrace']
def query(self, sql, stdin=None, timeout=None, settings=None, user=None):
return self.get_query_request(sql, stdin=stdin, timeout=timeout, settings=settings, user=user).get_answer()
def query(self, sql, stdin=None, timeout=None, settings=None, user=None, ignore_error=False):
return self.get_query_request(sql, stdin=stdin, timeout=timeout, settings=settings, user=user, ignore_error=ignore_error).get_answer()
def get_query_request(self, sql, stdin=None, timeout=None, settings=None, user=None):
def get_query_request(self, sql, stdin=None, timeout=None, settings=None, user=None, ignore_error=False):
command = self.command[:]
if stdin is None:
@ -31,7 +31,7 @@ class Client:
if user is not None:
command += ['--user', user]
return CommandRequest(command, stdin, timeout)
return CommandRequest(command, stdin, timeout, ignore_error)
class QueryTimeoutExceedException(Exception):
@ -43,13 +43,14 @@ class QueryRuntimeException(Exception):
class CommandRequest:
def __init__(self, command, stdin=None, timeout=None):
def __init__(self, command, stdin=None, timeout=None, ignore_error=False):
# Write data to tmp file to avoid PIPEs and execution blocking
stdin_file = tempfile.TemporaryFile()
stdin_file.write(stdin)
stdin_file.seek(0)
self.stdout_file = tempfile.TemporaryFile()
self.stderr_file = tempfile.TemporaryFile()
self.ignore_error = ignore_error
#print " ".join(command)
@ -75,10 +76,10 @@ class CommandRequest:
stdout = self.stdout_file.read()
stderr = self.stderr_file.read()
if self.timer is not None and not self.process_finished_before_timeout:
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
raise QueryTimeoutExceedException('Client timed out!')
if self.process.returncode != 0 or stderr:
if (self.process.returncode != 0 or stderr) and not self.ignore_error:
raise QueryRuntimeException('Client failed! Return code: {}, stderr: {}'.format(self.process.returncode, stderr))
return stdout

View File

@ -9,6 +9,7 @@ import socket
import time
import errno
from dicttoxml import dicttoxml
import pymysql
import xml.dom.minidom
from kazoo.client import KazooClient
from kazoo.exceptions import KazooException
@ -22,7 +23,6 @@ from .client import Client, CommandRequest
HELPERS_DIR = p.dirname(__file__)
DEFAULT_ENV_NAME = 'env_file'
def _create_env_file(path, variables, fname=DEFAULT_ENV_NAME):
full_path = os.path.join(path, fname)
with open(full_path, 'w') as f:
@ -63,12 +63,13 @@ class ClickHouseCluster:
self.with_zookeeper = False
self.with_mysql = False
self.with_kafka = False
self.with_odbc_drivers = False
self.docker_client = None
self.is_up = False
def add_instance(self, name, config_dir=None, main_configs=[], user_configs=[], macros={}, with_zookeeper=False, with_mysql=False, with_kafka=False, clickhouse_path_dir=None, hostname=None, env_variables={}):
def add_instance(self, name, config_dir=None, main_configs=[], user_configs=[], macros={}, with_zookeeper=False, with_mysql=False, with_kafka=False, clickhouse_path_dir=None, with_odbc_drivers=False, hostname=None, env_variables={}, image="ubuntu:14.04"):
"""Add an instance to the cluster.
name - the name of the instance directory and the value of the 'instance' macro in ClickHouse.
@ -86,7 +87,8 @@ class ClickHouseCluster:
instance = ClickHouseInstance(
self, self.base_dir, name, config_dir, main_configs, user_configs, macros, with_zookeeper,
self.zookeeper_config_path, with_mysql, with_kafka, self.base_configs_dir, self.server_bin_path, clickhouse_path_dir, hostname=hostname, env_variables=env_variables)
self.zookeeper_config_path, with_mysql, with_kafka, self.base_configs_dir, self.server_bin_path,
clickhouse_path_dir, with_odbc_drivers, hostname=hostname, env_variables=env_variables, image=image)
self.instances[name] = instance
self.base_cmd.extend(['--file', instance.docker_compose_path])
@ -102,6 +104,14 @@ class ClickHouseCluster:
self.base_mysql_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name',
self.project_name, '--file', p.join(HELPERS_DIR, 'docker_compose_mysql.yml')]
if with_odbc_drivers and not self.with_odbc_drivers:
self.with_odbc_drivers = True
if not self.with_mysql:
self.with_mysql = True
self.base_cmd.extend(['--file', p.join(HELPERS_DIR, 'docker_compose_mysql.yml')])
self.base_mysql_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name',
self.project_name, '--file', p.join(HELPERS_DIR, 'docker_compose_mysql.yml')]
if with_kafka and not self.with_kafka:
self.with_kafka = True
self.base_cmd.extend(['--file', p.join(HELPERS_DIR, 'docker_compose_kafka.yml')])
@ -121,6 +131,19 @@ class ClickHouseCluster:
handle = self.docker_client.containers.get(docker_id)
return handle.attrs['NetworkSettings']['Networks'].values()[0]['IPAddress']
def wait_mysql_to_start(self, timeout=60):
start = time.time()
while time.time() - start < timeout:
try:
conn = pymysql.connect(user='root', password='clickhouse', host='127.0.0.1', port=3308)
conn.close()
print "Mysql Started"
return
except Exception:
time.sleep(0.5)
raise Exception("Cannot wait MySQL container")
def start(self, destroy_dirs=True):
if self.is_up:
@ -149,6 +172,7 @@ class ClickHouseCluster:
if self.with_mysql and self.base_mysql_cmd:
subprocess.check_call(self.base_mysql_cmd + ['up', '-d', '--no-recreate'])
self.wait_mysql_to_start()
if self.with_kafka and self.base_kafka_cmd:
subprocess.check_call(self.base_kafka_cmd + ['up', '-d', '--no-recreate'])
@ -168,7 +192,6 @@ class ClickHouseCluster:
instance.client = Client(instance.ip_address, command=self.client_bin_path)
self.is_up = True
@ -212,7 +235,7 @@ DOCKER_COMPOSE_TEMPLATE = '''
version: '2'
services:
{name}:
image: ubuntu:14.04
image: {image}
hostname: {hostname}
user: '{uid}'
volumes:
@ -220,6 +243,7 @@ services:
- {configs_dir}:/etc/clickhouse-server/
- {db_dir}:/var/lib/clickhouse/
- {logs_dir}:/var/log/clickhouse-server/
{odbc_ini_path}
entrypoint:
- /usr/bin/clickhouse
- server
@ -233,9 +257,11 @@ services:
class ClickHouseInstance:
def __init__(
self, cluster, base_path, name, custom_config_dir, custom_main_configs, custom_user_configs, macros,
with_zookeeper, zookeeper_config_path, with_mysql, with_kafka, base_configs_dir, server_bin_path, clickhouse_path_dir, hostname=None, env_variables={}):
with_zookeeper, zookeeper_config_path, with_mysql, with_kafka, base_configs_dir, server_bin_path,
clickhouse_path_dir, with_odbc_drivers, hostname=None, env_variables={}, image="ubuntu:14.04"):
self.name = name
self.base_cmd = cluster.base_cmd[:]
@ -260,11 +286,17 @@ class ClickHouseInstance:
self.path = p.join(self.cluster.instances_dir, name)
self.docker_compose_path = p.join(self.path, 'docker_compose.yml')
self.env_variables = env_variables
if with_odbc_drivers:
self.odbc_ini_path = os.path.dirname(self.docker_compose_path) + "/odbc.ini:/etc/odbc.ini"
self.with_mysql = True
else:
self.odbc_ini_path = ""
self.docker_client = None
self.ip_address = None
self.client = None
self.default_timeout = 20.0 # 20 sec
self.image = image
# Connects to the instance via clickhouse-client, sends a query (1st argument) and returns the answer
def query(self, *args, **kwargs):
@ -340,6 +372,40 @@ class ClickHouseInstance:
xml_str = dicttoxml(dictionary, custom_root="yandex", attr_type=False)
return xml.dom.minidom.parseString(xml_str).toprettyxml()
@property
def odbc_drivers(self):
if self.odbc_ini_path:
return {
"SQLite3": {
"DSN": "sqlite3_odbc",
"Database" : "/tmp/sqliteodbc",
"Driver": "/usr/lib/x86_64-linux-gnu/odbc/libsqlite3odbc.so",
"Setup": "/usr/lib/x86_64-linux-gnu/odbc/libsqlite3odbc.so",
},
"MySQL": {
"DSN": "mysql_odbc",
"Driver": "/usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so",
"Database": "clickhouse",
"Uid": "root",
"Pwd": "clickhouse",
"Server": "mysql1",
},
"PostgreSQL": {
"DSN": "postgresql_odbc",
"Driver": "/usr/lib/x86_64-linux-gnu/odbc/psqlodbca.so",
"Setup": "/usr/lib/x86_64-linux-gnu/odbc/libodbcpsqlS.so",
}
}
else:
return {}
def _create_odbc_config_file(self):
with open(self.odbc_ini_path.split(':')[0], 'w') as f:
for driver_setup in self.odbc_drivers.values():
f.write("[{}]\n".format(driver_setup["DSN"]))
for key, value in driver_setup.items():
if key != "DSN":
f.write(key + "=" + value + "\n")
def create_dir(self, destroy_dir=True):
"""Create the instance directory and all the needed files there."""
@ -409,8 +475,14 @@ class ClickHouseInstance:
env_file = _create_env_file(os.path.dirname(self.docker_compose_path), self.env_variables)
odbc_ini_path = ""
if self.odbc_ini_path:
self._create_odbc_config_file()
odbc_ini_path = '- ' + self.odbc_ini_path
with open(self.docker_compose_path, 'w') as docker_compose:
docker_compose.write(DOCKER_COMPOSE_TEMPLATE.format(
image=self.image,
name=self.name,
hostname=self.hostname,
uid=os.getuid(),
@ -420,7 +492,9 @@ class ClickHouseInstance:
db_dir=db_dir,
logs_dir=logs_dir,
depends_on=str(depends_on),
env_file=env_file))
env_file=env_file,
odbc_ini_path=odbc_ini_path,
))
def destroy_dir(self):

View File

@ -0,0 +1,35 @@
import time
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__, server_bin_path="/home/alesap/ClickHouse/dbms/programs/clickhouse")
node1 = cluster.add_instance('node1', with_odbc_drivers=True, with_mysql=True, image='alesapin/ubuntu_with_odbc:14.04')
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_segfault_doesnt_crash_server(started_cluster):
mysql_setup = node1.odbc_drivers["MySQL"]
# actually, I don't know, what wrong with that connection string, but libmyodbc always falls into segfault
node1.query("select 1 from odbc('DSN={}', 'dual')".format(mysql_setup["DSN"]), ignore_error=True)
# but after segfault server is still available
assert node1.query("select 1") == "1\n"
def test_simple_select_works(started_cluster):
sqlite_setup = node1.odbc_drivers["SQLite3"]
sqlite_db = sqlite_setup["Database"]
node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t1(x INTEGER PRIMARY KEY ASC, y, z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t1 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
assert node1.query("select * from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t2\t3\n"