2018-08-22 15:42:27 +00:00
import time
2018-10-15 14:49:23 +00:00
import psycopg2
2020-09-16 04:26:10 +00:00
import pymysql . cursors
import pytest
2021-02-16 07:10:01 +00:00
import logging
2021-02-17 15:40:01 +00:00
import os . path
2021-02-16 07:10:01 +00:00
2018-08-22 15:42:27 +00:00
from helpers . cluster import ClickHouseCluster
2020-11-23 14:40:32 +00:00
from helpers . test_tools import assert_eq_with_retry
2020-09-16 04:26:10 +00:00
from psycopg2 . extensions import ISOLATION_LEVEL_AUTOCOMMIT
2021-04-06 18:59:34 +00:00
from multiprocessing . dummy import Pool
2018-09-13 11:38:20 +00:00
2020-08-12 08:55:04 +00:00
cluster = ClickHouseCluster ( __file__ )
2022-03-22 16:39:58 +00:00
node1 = cluster . add_instance (
" node1 " ,
with_odbc_drivers = True ,
with_mysql = True ,
with_postgres = True ,
main_configs = [ " configs/openssl.xml " , " configs/odbc_logging.xml " ] ,
dictionaries = [
" configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml " ,
" configs/dictionaries/sqlite3_odbc_cached_dictionary.xml " ,
" configs/dictionaries/postgres_odbc_hashed_dictionary.xml " ,
] ,
stay_alive = True ,
)
2020-09-16 04:26:10 +00:00
2021-02-16 07:10:01 +00:00
drop_table_sql_template = """
DROP TABLE IF EXISTS ` clickhouse ` . ` { } `
"""
2020-09-16 04:26:10 +00:00
create_table_sql_template = """
2018-08-22 16:14:51 +00:00
CREATE TABLE ` clickhouse ` . ` { } ` (
` id ` int ( 11 ) NOT NULL ,
` name ` varchar ( 50 ) NOT NULL ,
` age ` int NOT NULL default 0 ,
` money ` int NOT NULL default 0 ,
2019-10-21 09:13:33 +00:00
` column_x ` int default NULL ,
2018-08-22 16:14:51 +00:00
PRIMARY KEY ( ` id ` ) ) ENGINE = InnoDB ;
"""
2020-09-16 04:26:10 +00:00
2021-04-28 08:39:55 +00:00
def skip_test_msan ( instance ) :
if instance . is_built_with_memory_sanitizer ( ) :
pytest . skip ( " Memory Sanitizer cannot work with third-party shared libraries " )
2018-08-22 16:14:51 +00:00
def get_mysql_conn ( ) :
2021-02-16 07:10:01 +00:00
errors = [ ]
conn = None
2021-02-17 15:40:01 +00:00
for _ in range ( 15 ) :
2021-02-16 07:10:01 +00:00
try :
if conn is None :
2022-03-22 16:39:58 +00:00
conn = pymysql . connect (
user = " root " ,
password = " clickhouse " ,
host = cluster . mysql_ip ,
port = cluster . mysql_port ,
)
2021-02-16 07:10:01 +00:00
else :
conn . ping ( reconnect = True )
2022-03-22 16:39:58 +00:00
logging . debug (
f " MySQL Connection establised: { cluster . mysql_ip } : { cluster . mysql_port } "
)
2021-02-16 07:10:01 +00:00
return conn
except Exception as e :
errors + = [ str ( e ) ]
time . sleep ( 1 )
2021-06-04 08:43:41 +00:00
2021-02-16 07:10:01 +00:00
raise Exception ( " Connection not establised, {} " . format ( errors ) )
2018-08-22 16:14:51 +00:00
2020-09-16 04:26:10 +00:00
2018-08-22 16:14:51 +00:00
def create_mysql_db ( conn , name ) :
with conn . cursor ( ) as cursor :
2021-02-15 09:35:45 +00:00
cursor . execute ( " DROP DATABASE IF EXISTS {} " . format ( name ) )
cursor . execute ( " CREATE DATABASE {} DEFAULT CHARACTER SET ' utf8 ' " . format ( name ) )
2018-08-22 16:14:51 +00:00
2020-09-16 04:26:10 +00:00
2018-08-22 16:14:51 +00:00
def create_mysql_table ( conn , table_name ) :
with conn . cursor ( ) as cursor :
2021-02-16 07:10:01 +00:00
cursor . execute ( drop_table_sql_template . format ( table_name ) )
2018-08-22 16:14:51 +00:00
cursor . execute ( create_table_sql_template . format ( table_name ) )
2020-09-16 04:26:10 +00:00
2021-03-19 16:44:08 +00:00
def get_postgres_conn ( started_cluster ) :
2022-03-22 16:39:58 +00:00
conn_string = " host= {} port= {} user= ' postgres ' password= ' mysecretpassword ' " . format (
started_cluster . postgres_ip , started_cluster . postgres_port
)
2021-02-17 15:40:01 +00:00
errors = [ ]
for _ in range ( 15 ) :
try :
conn = psycopg2 . connect ( conn_string )
logging . debug ( " Postgre Connection establised: {} " . format ( conn_string ) )
conn . set_isolation_level ( ISOLATION_LEVEL_AUTOCOMMIT )
conn . autocommit = True
return conn
except Exception as e :
errors + = [ str ( e ) ]
time . sleep ( 1 )
2021-06-04 08:43:41 +00:00
2022-03-22 16:39:58 +00:00
raise Exception (
" Postgre connection not establised DSN= {} , {} " . format ( conn_string , errors )
)
2018-10-15 14:49:23 +00:00
2020-09-16 04:26:10 +00:00
2018-10-15 14:49:23 +00:00
def create_postgres_db ( conn , name ) :
2018-10-16 09:14:54 +00:00
cursor = conn . cursor ( )
cursor . execute ( " CREATE SCHEMA {} " . format ( name ) )
2018-10-15 14:49:23 +00:00
2020-09-16 04:26:10 +00:00
2018-08-22 15:42:27 +00:00
@pytest.fixture ( scope = " module " )
def started_cluster ( ) :
try :
cluster . start ( )
2018-10-15 20:56:01 +00:00
sqlite_db = node1 . odbc_drivers [ " SQLite3 " ] [ " Database " ]
2018-08-22 15:42:27 +00:00
2021-06-09 08:44:46 +00:00
logging . debug ( f " sqlite data received: { sqlite_db } " )
2020-09-16 04:26:10 +00:00
node1 . exec_in_container (
2022-03-22 16:39:58 +00:00
[
" sqlite3 " ,
sqlite_db ,
" CREATE TABLE t1(id INTEGER PRIMARY KEY ASC, x INTEGER, y, z); " ,
] ,
privileged = True ,
user = " root " ,
)
2020-09-16 04:26:10 +00:00
node1 . exec_in_container (
2022-03-22 16:39:58 +00:00
[
" sqlite3 " ,
sqlite_db ,
" CREATE TABLE t2(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z); " ,
] ,
privileged = True ,
user = " root " ,
)
2020-09-16 04:26:10 +00:00
node1 . exec_in_container (
2022-03-22 16:39:58 +00:00
[
" sqlite3 " ,
sqlite_db ,
" CREATE TABLE t3(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z); " ,
] ,
privileged = True ,
user = " root " ,
)
2020-09-16 04:26:10 +00:00
node1 . exec_in_container (
2022-03-22 16:39:58 +00:00
[
" sqlite3 " ,
sqlite_db ,
" CREATE TABLE t4(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z); " ,
] ,
privileged = True ,
user = " root " ,
)
2021-03-19 14:02:48 +00:00
node1 . exec_in_container (
2022-03-22 16:39:58 +00:00
[
" sqlite3 " ,
sqlite_db ,
" CREATE TABLE tf1(id INTEGER PRIMARY KEY ASC, x INTEGER, y, z); " ,
] ,
privileged = True ,
user = " root " ,
)
2021-06-09 09:03:03 +00:00
logging . debug ( " sqlite tables created " )
2018-10-15 14:49:23 +00:00
mysql_conn = get_mysql_conn ( )
2021-06-09 09:03:03 +00:00
logging . debug ( " mysql connection received " )
2018-08-22 16:14:51 +00:00
## create mysql db and table
2022-03-22 16:39:58 +00:00
create_mysql_db ( mysql_conn , " clickhouse " )
2021-06-09 09:03:03 +00:00
logging . debug ( " mysql database created " )
2018-10-15 14:49:23 +00:00
2021-03-19 16:44:08 +00:00
postgres_conn = get_postgres_conn ( cluster )
2021-06-09 09:03:03 +00:00
logging . debug ( " postgres connection received " )
2018-10-15 14:49:23 +00:00
2022-03-22 16:39:58 +00:00
create_postgres_db ( postgres_conn , " clickhouse " )
2021-06-09 09:03:03 +00:00
logging . debug ( " postgres db created " )
2018-08-22 16:14:51 +00:00
2018-10-16 10:27:21 +00:00
cursor = postgres_conn . cursor ( )
2020-09-16 04:26:10 +00:00
cursor . execute (
2022-03-22 16:39:58 +00:00
" create table if not exists clickhouse.test_table (id int primary key, column1 int not null, column2 varchar(40) not null) "
)
2018-10-16 10:27:21 +00:00
2018-08-22 15:42:27 +00:00
yield cluster
2018-09-13 11:38:20 +00:00
except Exception as ex :
2021-06-09 09:03:03 +00:00
logging . exception ( ex )
2018-10-15 20:56:01 +00:00
raise ex
2018-08-22 15:42:27 +00:00
finally :
cluster . shutdown ( )
2020-09-16 04:26:10 +00:00
2018-08-22 16:14:51 +00:00
def test_mysql_simple_select_works ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2018-08-22 15:42:27 +00:00
mysql_setup = node1 . odbc_drivers [ " MySQL " ]
2018-08-22 16:14:51 +00:00
2022-03-22 16:39:58 +00:00
table_name = " test_insert_select "
2018-08-22 16:14:51 +00:00
conn = get_mysql_conn ( )
create_mysql_table ( conn , table_name )
2019-10-21 09:13:33 +00:00
# Check that NULL-values are handled correctly by the ODBC-bridge
with conn . cursor ( ) as cursor :
2020-09-16 04:26:10 +00:00
cursor . execute (
" INSERT INTO clickhouse. {} VALUES(50, ' null-guy ' , 127, 255, NULL), (100, ' non-null-guy ' , 127, 255, 511); " . format (
2022-03-22 16:39:58 +00:00
table_name
)
)
2019-10-21 09:13:33 +00:00
conn . commit ( )
2022-03-22 16:39:58 +00:00
assert (
node1 . query (
" SELECT column_x FROM odbc( ' DSN= {} ' , ' {} ' ) " . format (
mysql_setup [ " DSN " ] , table_name
) ,
settings = { " external_table_functions_use_nulls " : " 1 " } ,
)
== " \\ N \n 511 \n "
)
assert (
node1 . query (
" SELECT column_x FROM odbc( ' DSN= {} ' , ' {} ' ) " . format (
mysql_setup [ " DSN " ] , table_name
) ,
settings = { " external_table_functions_use_nulls " : " 0 " } ,
)
== " 0 \n 511 \n "
)
2019-10-21 09:13:33 +00:00
2022-03-22 16:39:58 +00:00
node1 . query (
"""
2021-02-15 09:35:45 +00:00
CREATE TABLE { } ( id UInt32 , name String , age UInt32 , money UInt32 , column_x Nullable ( UInt32 ) ) ENGINE = MySQL ( ' mysql57:3306 ' , ' clickhouse ' , ' {} ' , ' root ' , ' clickhouse ' ) ;
2022-03-22 16:39:58 +00:00
""" .format(
table_name , table_name
)
)
2018-08-22 16:14:51 +00:00
2020-09-16 04:26:10 +00:00
node1 . query (
" INSERT INTO {} (id, name, money, column_x) select number, concat( ' name_ ' , toString(number)), 3, NULL from numbers(49) " . format (
2022-03-22 16:39:58 +00:00
table_name
)
)
2020-09-16 04:26:10 +00:00
node1 . query (
" INSERT INTO {} (id, name, money, column_x) select number, concat( ' name_ ' , toString(number)), 3, 42 from numbers(51, 49) " . format (
2022-03-22 16:39:58 +00:00
table_name
)
)
assert (
node1 . query (
" SELECT COUNT () FROM {} WHERE column_x IS NOT NULL " . format ( table_name )
)
== " 50 \n "
)
assert (
node1 . query ( " SELECT COUNT () FROM {} WHERE column_x IS NULL " . format ( table_name ) )
== " 50 \n "
)
assert (
node1 . query (
" SELECT count(*) FROM odbc( ' DSN= {} ' , ' {} ' ) " . format (
mysql_setup [ " DSN " ] , table_name
)
)
== " 100 \n "
)
2018-08-22 15:42:27 +00:00
2019-01-30 18:48:20 +00:00
# previously this test fails with segfault
# just to be sure :)
2018-08-22 15:42:27 +00:00
assert node1 . query ( " select 1 " ) == " 1 \n "
2018-08-22 16:14:51 +00:00
conn . close ( )
2020-09-16 04:26:10 +00:00
2020-05-14 21:51:21 +00:00
def test_mysql_insert ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2020-05-14 21:51:21 +00:00
mysql_setup = node1 . odbc_drivers [ " MySQL " ]
2022-03-22 16:39:58 +00:00
table_name = " test_insert "
2020-05-14 21:51:21 +00:00
conn = get_mysql_conn ( )
create_mysql_table ( conn , table_name )
2022-03-22 16:39:58 +00:00
odbc_args = " ' DSN= {} ' , ' {} ' , ' {} ' " . format (
mysql_setup [ " DSN " ] , mysql_setup [ " Database " ] , table_name
)
2020-05-14 21:51:21 +00:00
2020-09-16 04:26:10 +00:00
node1 . query (
" create table mysql_insert (id Int64, name String, age UInt8, money Float, column_x Nullable(Int16)) Engine=ODBC( {} ) " . format (
2022-03-22 16:39:58 +00:00
odbc_args
)
)
node1 . query (
" insert into mysql_insert values (1, ' test ' , 11, 111, 1111), (2, ' odbc ' , 22, 222, NULL) "
)
assert (
node1 . query ( " select * from mysql_insert " )
== " 1 \t test \t 11 \t 111 \t 1111 \n 2 \t odbc \t 22 \t 222 \t \\ N \n "
)
2020-05-14 21:51:21 +00:00
2022-03-22 16:39:58 +00:00
node1 . query (
" insert into table function odbc( {} ) values (3, ' insert ' , 33, 333, 3333) " . format (
odbc_args
)
)
2020-09-16 04:26:10 +00:00
node1 . query (
" insert into table function odbc( {} ) (id, name, age, money) select id*4, upper(name), age*4, money*4 from odbc( {} ) where id=1 " . format (
2022-03-22 16:39:58 +00:00
odbc_args , odbc_args
)
)
assert (
node1 . query ( " select * from mysql_insert where id in (3, 4) " )
== " 3 \t insert \t 33 \t 333 \t 3333 \n 4 \t TEST \t 44 \t 444 \t \\ N \n "
)
2020-05-14 21:51:21 +00:00
2018-09-22 14:58:03 +00:00
def test_sqlite_simple_select_function_works ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2018-08-22 15:42:27 +00:00
sqlite_setup = node1 . odbc_drivers [ " SQLite3 " ]
sqlite_db = sqlite_setup [ " Database " ]
2022-03-22 16:39:58 +00:00
node1 . exec_in_container (
[ " sqlite3 " , sqlite_db , " INSERT INTO t1 values(1, 1, 2, 3); " ] ,
privileged = True ,
user = " root " ,
)
assert (
node1 . query (
" select * from odbc( ' DSN= {} ' , ' {} ' ) " . format ( sqlite_setup [ " DSN " ] , " t1 " )
)
== " 1 \t 1 \t 2 \t 3 \n "
)
assert (
node1 . query (
" select y from odbc( ' DSN= {} ' , ' {} ' ) " . format ( sqlite_setup [ " DSN " ] , " t1 " )
)
== " 2 \n "
)
assert (
node1 . query (
" select z from odbc( ' DSN= {} ' , ' {} ' ) " . format ( sqlite_setup [ " DSN " ] , " t1 " )
)
== " 3 \n "
)
assert (
node1 . query (
" select x from odbc( ' DSN= {} ' , ' {} ' ) " . format ( sqlite_setup [ " DSN " ] , " t1 " )
)
== " 1 \n "
)
assert (
node1 . query (
" select x, y from odbc( ' DSN= {} ' , ' {} ' ) " . format ( sqlite_setup [ " DSN " ] , " t1 " )
)
== " 1 \t 2 \n "
)
assert (
node1 . query (
" select z, x, y from odbc( ' DSN= {} ' , ' {} ' ) " . format ( sqlite_setup [ " DSN " ] , " t1 " )
)
== " 3 \t 1 \t 2 \n "
)
assert (
node1 . query (
" select count(), sum(x) from odbc( ' DSN= {} ' , ' {} ' ) group by x " . format (
sqlite_setup [ " DSN " ] , " t1 "
)
)
== " 1 \t 1 \n "
)
2018-09-13 10:12:11 +00:00
2020-09-16 04:26:10 +00:00
2021-03-19 14:02:48 +00:00
def test_sqlite_table_function ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2021-03-19 14:02:48 +00:00
sqlite_setup = node1 . odbc_drivers [ " SQLite3 " ]
sqlite_db = sqlite_setup [ " Database " ]
2022-03-22 16:39:58 +00:00
node1 . exec_in_container (
[ " sqlite3 " , sqlite_db , " INSERT INTO tf1 values(1, 1, 2, 3); " ] ,
privileged = True ,
user = " root " ,
)
node1 . query (
" create table odbc_tf as odbc( ' DSN= {} ' , ' {} ' ) " . format (
sqlite_setup [ " DSN " ] , " tf1 "
)
)
2021-08-17 13:33:30 +00:00
assert node1 . query ( " select * from odbc_tf " ) == " 1 \t 1 \t 2 \t 3 \n "
2021-03-19 14:02:48 +00:00
assert node1 . query ( " select y from odbc_tf " ) == " 2 \n "
assert node1 . query ( " select z from odbc_tf " ) == " 3 \n "
assert node1 . query ( " select x from odbc_tf " ) == " 1 \n "
assert node1 . query ( " select x, y from odbc_tf " ) == " 1 \t 2 \n "
assert node1 . query ( " select z, x, y from odbc_tf " ) == " 3 \t 1 \t 2 \n "
assert node1 . query ( " select count(), sum(x) from odbc_tf group by x " ) == " 1 \t 1 \n "
2018-09-22 14:58:03 +00:00
2022-03-22 16:39:58 +00:00
2018-09-22 14:58:03 +00:00
def test_sqlite_simple_select_storage_works ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2018-09-22 14:58:03 +00:00
sqlite_setup = node1 . odbc_drivers [ " SQLite3 " ]
sqlite_db = sqlite_setup [ " Database " ]
2022-03-22 16:39:58 +00:00
node1 . exec_in_container (
[ " sqlite3 " , sqlite_db , " INSERT INTO t4 values(1, 1, 2, 3); " ] ,
privileged = True ,
user = " root " ,
)
node1 . query (
" create table SqliteODBC (x Int32, y String, z String) engine = ODBC( ' DSN= {} ' , ' ' , ' t4 ' ) " . format (
sqlite_setup [ " DSN " ]
)
)
2018-09-22 14:58:03 +00:00
assert node1 . query ( " select * from SqliteODBC " ) == " 1 \t 2 \t 3 \n "
assert node1 . query ( " select y from SqliteODBC " ) == " 2 \n "
assert node1 . query ( " select z from SqliteODBC " ) == " 3 \n "
assert node1 . query ( " select x from SqliteODBC " ) == " 1 \n "
assert node1 . query ( " select x, y from SqliteODBC " ) == " 1 \t 2 \n "
assert node1 . query ( " select z, x, y from SqliteODBC " ) == " 3 \t 1 \t 2 \n "
assert node1 . query ( " select count(), sum(x) from SqliteODBC group by x " ) == " 1 \t 1 \n "
2020-09-16 04:26:10 +00:00
2018-09-13 10:12:11 +00:00
def test_sqlite_odbc_hashed_dictionary ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2020-09-16 04:26:10 +00:00
sqlite_db = node1 . odbc_drivers [ " SQLite3 " ] [ " Database " ]
2022-03-22 16:39:58 +00:00
node1 . exec_in_container (
[ " sqlite3 " , sqlite_db , " INSERT INTO t2 values(1, 1, 2, 3); " ] ,
privileged = True ,
user = " root " ,
)
2018-09-13 10:12:11 +00:00
2020-11-24 08:09:10 +00:00
node1 . query ( " SYSTEM RELOAD DICTIONARY sqlite3_odbc_hashed " )
2022-03-22 16:39:58 +00:00
first_update_time = node1 . query (
" SELECT last_successful_update_time FROM system.dictionaries WHERE name = ' sqlite3_odbc_hashed ' "
)
2021-06-09 09:03:03 +00:00
logging . debug ( f " First update time { first_update_time } " )
2020-11-24 08:09:10 +00:00
2022-03-22 16:39:58 +00:00
assert_eq_with_retry (
node1 , " select dictGetUInt8( ' sqlite3_odbc_hashed ' , ' Z ' , toUInt64(1)) " , " 3 "
)
assert_eq_with_retry (
node1 , " select dictGetUInt8( ' sqlite3_odbc_hashed ' , ' Z ' , toUInt64(200)) " , " 1 "
) # default
2018-09-13 10:12:11 +00:00
2022-03-22 16:39:58 +00:00
second_update_time = node1 . query (
" SELECT last_successful_update_time FROM system.dictionaries WHERE name = ' sqlite3_odbc_hashed ' "
)
2020-11-24 08:09:10 +00:00
# Reloaded with new data
2021-06-09 09:03:03 +00:00
logging . debug ( f " Second update time { second_update_time } " )
2020-11-24 08:09:10 +00:00
while first_update_time == second_update_time :
2022-03-22 16:39:58 +00:00
second_update_time = node1 . query (
" SELECT last_successful_update_time FROM system.dictionaries WHERE name = ' sqlite3_odbc_hashed ' "
)
2021-06-09 09:03:03 +00:00
logging . debug ( " Waiting dictionary to update for the second time " )
2020-11-24 08:09:10 +00:00
time . sleep ( 0.1 )
2018-09-13 10:12:11 +00:00
2022-03-22 16:39:58 +00:00
node1 . exec_in_container (
[ " sqlite3 " , sqlite_db , " INSERT INTO t2 values(200, 200, 2, 7); " ] ,
privileged = True ,
user = " root " ,
)
2018-09-13 10:12:11 +00:00
# No reload because of invalidate query
2022-03-22 16:39:58 +00:00
third_update_time = node1 . query (
" SELECT last_successful_update_time FROM system.dictionaries WHERE name = ' sqlite3_odbc_hashed ' "
)
2021-06-09 09:03:03 +00:00
logging . debug ( f " Third update time { second_update_time } " )
2020-11-24 08:09:10 +00:00
counter = 0
while third_update_time == second_update_time :
2022-03-22 16:39:58 +00:00
third_update_time = node1 . query (
" SELECT last_successful_update_time FROM system.dictionaries WHERE name = ' sqlite3_odbc_hashed ' "
)
2020-11-24 08:09:10 +00:00
time . sleep ( 0.1 )
if counter > 50 :
break
counter + = 1
2022-03-22 16:39:58 +00:00
assert_eq_with_retry (
node1 , " select dictGetUInt8( ' sqlite3_odbc_hashed ' , ' Z ' , toUInt64(1)) " , " 3 "
)
assert_eq_with_retry (
node1 , " select dictGetUInt8( ' sqlite3_odbc_hashed ' , ' Z ' , toUInt64(200)) " , " 1 "
) # still default
2018-09-13 10:12:11 +00:00
2022-03-22 16:39:58 +00:00
node1 . exec_in_container (
[ " sqlite3 " , sqlite_db , " REPLACE INTO t2 values(1, 1, 2, 5); " ] ,
privileged = True ,
user = " root " ,
)
2018-09-13 10:12:11 +00:00
2022-03-22 16:39:58 +00:00
assert_eq_with_retry (
node1 , " select dictGetUInt8( ' sqlite3_odbc_hashed ' , ' Z ' , toUInt64(1)) " , " 5 "
)
assert_eq_with_retry (
node1 , " select dictGetUInt8( ' sqlite3_odbc_hashed ' , ' Z ' , toUInt64(200)) " , " 7 "
)
2020-09-16 04:26:10 +00:00
2018-09-13 10:12:11 +00:00
def test_sqlite_odbc_cached_dictionary ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2020-09-16 04:26:10 +00:00
sqlite_db = node1 . odbc_drivers [ " SQLite3 " ] [ " Database " ]
2022-03-22 16:39:58 +00:00
node1 . exec_in_container (
[ " sqlite3 " , sqlite_db , " INSERT INTO t3 values(1, 1, 2, 3); " ] ,
privileged = True ,
user = " root " ,
)
2018-09-13 10:12:11 +00:00
2022-03-22 16:39:58 +00:00
assert (
node1 . query ( " select dictGetUInt8( ' sqlite3_odbc_cached ' , ' Z ' , toUInt64(1)) " )
== " 3 \n "
)
2018-09-13 10:12:11 +00:00
2020-05-14 21:51:21 +00:00
# Allow insert
2022-03-22 16:39:58 +00:00
node1 . exec_in_container ( [ " chmod " , " a+rw " , " /tmp " ] , privileged = True , user = " root " )
node1 . exec_in_container ( [ " chmod " , " a+rw " , sqlite_db ] , privileged = True , user = " root " )
2020-05-14 21:51:21 +00:00
2022-03-22 16:39:58 +00:00
node1 . query (
" insert into table function odbc( ' DSN= {} ;ReadOnly=0 ' , ' ' , ' t3 ' ) values (200, 200, 2, 7) " . format (
node1 . odbc_drivers [ " SQLite3 " ] [ " DSN " ]
)
)
2018-09-13 10:12:11 +00:00
2022-03-22 16:39:58 +00:00
assert (
node1 . query ( " select dictGetUInt8( ' sqlite3_odbc_cached ' , ' Z ' , toUInt64(200)) " )
== " 7 \n "
) # new value
2018-09-13 10:12:11 +00:00
2022-03-22 16:39:58 +00:00
node1 . exec_in_container (
[ " sqlite3 " , sqlite_db , " REPLACE INTO t3 values(1, 1, 2, 12); " ] ,
privileged = True ,
user = " root " ,
)
2018-09-13 10:12:11 +00:00
2022-03-22 16:39:58 +00:00
assert_eq_with_retry (
node1 , " select dictGetUInt8( ' sqlite3_odbc_cached ' , ' Z ' , toUInt64(1)) " , " 12 "
)
2018-10-15 14:49:23 +00:00
2020-09-16 04:26:10 +00:00
2021-01-28 08:40:12 +00:00
def test_postgres_odbc_hashed_dictionary_with_schema ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2021-03-19 16:44:08 +00:00
conn = get_postgres_conn ( started_cluster )
2018-10-15 14:49:23 +00:00
cursor = conn . cursor ( )
2021-01-28 08:40:12 +00:00
cursor . execute ( " truncate table clickhouse.test_table " )
2022-03-22 16:39:58 +00:00
cursor . execute (
" insert into clickhouse.test_table values(1, 1, ' hello ' ),(2, 2, ' world ' ) "
)
2020-11-23 14:40:32 +00:00
node1 . query ( " SYSTEM RELOAD DICTIONARY postgres_odbc_hashed " )
2022-03-22 16:39:58 +00:00
node1 . exec_in_container (
[ " ss " , " -K " , " dport " , " postgresql " ] , privileged = True , user = " root "
)
2021-12-29 12:27:39 +00:00
node1 . query ( " SYSTEM RELOAD DICTIONARY postgres_odbc_hashed " )
2022-03-22 16:39:58 +00:00
assert_eq_with_retry (
node1 ,
" select dictGetString( ' postgres_odbc_hashed ' , ' column2 ' , toUInt64(1)) " ,
" hello " ,
)
assert_eq_with_retry (
node1 ,
" select dictGetString( ' postgres_odbc_hashed ' , ' column2 ' , toUInt64(2)) " ,
" world " ,
)
2018-11-22 15:59:00 +00:00
2020-09-16 04:26:10 +00:00
2021-01-28 08:40:12 +00:00
def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2021-03-19 16:44:08 +00:00
conn = get_postgres_conn ( started_cluster )
2018-12-17 20:11:19 +00:00
cursor = conn . cursor ( )
2021-01-28 08:40:12 +00:00
cursor . execute ( " truncate table clickhouse.test_table " )
2021-08-17 13:33:30 +00:00
cursor . execute ( " insert into clickhouse.test_table values(3, 3, ' xxx ' ) " )
2020-10-02 16:54:07 +00:00
for i in range ( 100 ) :
2018-12-17 20:11:19 +00:00
try :
2021-04-13 10:52:22 +00:00
node1 . query ( " system reload dictionary postgres_odbc_hashed " , timeout = 15 )
2018-12-17 20:11:19 +00:00
except Exception as ex :
assert False , " Exception occured -- odbc-bridge hangs: " + str ( ex )
2022-03-22 16:39:58 +00:00
assert_eq_with_retry (
node1 ,
" select dictGetString( ' postgres_odbc_hashed ' , ' column2 ' , toUInt64(3)) " ,
" xxx " ,
)
2018-12-17 20:11:19 +00:00
2020-09-16 04:26:10 +00:00
2020-05-13 19:47:35 +00:00
def test_postgres_insert ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2021-03-19 16:44:08 +00:00
conn = get_postgres_conn ( started_cluster )
2020-05-13 19:47:35 +00:00
conn . cursor ( ) . execute ( " truncate table clickhouse.test_table " )
2020-06-21 22:12:35 +00:00
2020-06-22 13:31:16 +00:00
# Also test with Servername containing '.' and '-' symbols (defined in
# postgres .yml file). This is needed to check parsing, validation and
# reconstruction of connection string.
2020-06-21 22:12:35 +00:00
2020-09-16 04:26:10 +00:00
node1 . query (
2022-03-22 16:39:58 +00:00
" create table pg_insert (id UInt64, column1 UInt8, column2 String) engine=ODBC( ' DSN=postgresql_odbc;Servername=postgre-sql.local ' , ' clickhouse ' , ' test_table ' ) "
)
2021-08-17 13:33:30 +00:00
node1 . query ( " insert into pg_insert values (1, 1, ' hello ' ), (2, 2, ' world ' ) " )
2022-03-22 16:39:58 +00:00
assert node1 . query ( " select * from pg_insert " ) == " 1 \t 1 \t hello \n 2 \t 2 \t world \n "
2020-09-16 04:26:10 +00:00
node1 . query (
2022-03-22 16:39:58 +00:00
" insert into table function odbc( ' DSN=postgresql_odbc ' , ' clickhouse ' , ' test_table ' ) format CSV 3,3,test "
)
node1 . query (
" insert into table function odbc( ' DSN=postgresql_odbc;Servername=postgre-sql.local ' , ' clickhouse ' , ' test_table ' ) "
" select number, number, ' s ' || toString(number) from numbers (4, 7) "
)
assert (
node1 . query ( " select sum(column1), count(column1) from pg_insert " ) == " 55 \t 10 \n "
)
assert (
node1 . query (
" select sum(n), count(n) from (select (*,).1 as n from (select * from odbc( ' DSN=postgresql_odbc ' , ' clickhouse ' , ' test_table ' ))) "
)
== " 55 \t 10 \n "
)
2020-09-16 04:26:10 +00:00
2020-05-13 19:47:35 +00:00
2018-11-22 15:59:00 +00:00
def test_bridge_dies_with_parent ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2020-11-23 15:18:09 +00:00
if node1 . is_built_with_address_sanitizer ( ) :
# TODO: Leak sanitizer falsely reports about a leak of 16 bytes in clickhouse-odbc-bridge in this test and
# that's linked somehow with that we have replaced getauxval() in glibc-compatibility.
# The leak sanitizer calls getauxval() for its own purposes, and our replaced version doesn't seem to be equivalent in that case.
2022-03-22 16:39:58 +00:00
pytest . skip (
" Leak sanitizer falsely reports about a leak of 16 bytes in clickhouse-odbc-bridge "
)
2020-11-23 15:18:09 +00:00
2018-11-22 15:59:00 +00:00
node1 . query ( " select dictGetString( ' postgres_odbc_hashed ' , ' column2 ' , toUInt64(1)) " )
2019-06-21 08:03:13 +00:00
clickhouse_pid = node1 . get_process_pid ( " clickhouse server " )
bridge_pid = node1 . get_process_pid ( " odbc-bridge " )
2018-11-22 15:59:00 +00:00
assert clickhouse_pid is not None
assert bridge_pid is not None
2018-11-23 08:08:35 +00:00
while clickhouse_pid is not None :
try :
2022-03-22 16:39:58 +00:00
node1 . exec_in_container (
[ " kill " , str ( clickhouse_pid ) ] , privileged = True , user = " root "
)
2018-11-23 08:08:35 +00:00
except :
pass
2019-06-21 08:03:13 +00:00
clickhouse_pid = node1 . get_process_pid ( " clickhouse server " )
2018-11-23 08:08:35 +00:00
time . sleep ( 1 )
2020-11-23 16:39:57 +00:00
for i in range ( 30 ) :
2020-09-16 04:26:10 +00:00
time . sleep ( 1 ) # just for sure, that odbc-bridge caught signal
2020-07-08 07:36:35 +00:00
bridge_pid = node1 . get_process_pid ( " odbc-bridge " )
if bridge_pid is None :
break
2018-11-22 15:59:00 +00:00
2019-12-09 18:32:03 +00:00
if bridge_pid :
2022-03-22 16:39:58 +00:00
out = node1 . exec_in_container (
[ " gdb " , " -p " , str ( bridge_pid ) , " --ex " , " thread apply all bt " , " --ex " , " q " ] ,
privileged = True ,
user = " root " ,
)
2021-06-09 09:03:03 +00:00
logging . debug ( f " Bridge is running, gdb output: \n { out } " )
2019-12-09 18:32:03 +00:00
2018-11-22 15:59:00 +00:00
assert clickhouse_pid is None
assert bridge_pid is None
2021-03-26 14:45:52 +00:00
node1 . start_clickhouse ( 20 )
2021-02-17 18:40:25 +00:00
def test_odbc_postgres_date_data_type ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2022-03-22 16:39:58 +00:00
conn = get_postgres_conn ( started_cluster )
2021-02-17 18:40:25 +00:00
cursor = conn . cursor ( )
2022-03-22 16:39:58 +00:00
cursor . execute (
" CREATE TABLE IF NOT EXISTS clickhouse.test_date (id integer, column1 integer, column2 date) "
)
2021-02-17 18:40:25 +00:00
2021-08-17 13:33:30 +00:00
cursor . execute ( " INSERT INTO clickhouse.test_date VALUES (1, 1, ' 2020-12-01 ' ) " )
cursor . execute ( " INSERT INTO clickhouse.test_date VALUES (2, 2, ' 2020-12-02 ' ) " )
cursor . execute ( " INSERT INTO clickhouse.test_date VALUES (3, 3, ' 2020-12-03 ' ) " )
2021-02-17 18:40:25 +00:00
conn . commit ( )
node1 . query (
2022-03-22 16:39:58 +00:00
"""
2021-08-17 13:33:30 +00:00
CREATE TABLE test_date ( id UInt64 , column1 UInt64 , column2 Date )
2022-03-22 16:39:58 +00:00
ENGINE = ODBC ( ' DSN=postgresql_odbc; Servername=postgre-sql.local ' , ' clickhouse ' , ' test_date ' ) """
)
2021-02-17 18:40:25 +00:00
2022-03-22 16:39:58 +00:00
expected = " 1 \t 1 \t 2020-12-01 \n 2 \t 2 \t 2020-12-02 \n 3 \t 3 \t 2020-12-03 \n "
result = node1 . query ( " SELECT * FROM test_date " )
assert result == expected
2021-03-26 21:26:49 +00:00
cursor . execute ( " DROP TABLE IF EXISTS clickhouse.test_date " )
node1 . query ( " DROP TABLE IF EXISTS test_date " )
2021-02-17 18:40:25 +00:00
2021-03-26 21:26:49 +00:00
def test_odbc_postgres_conversions ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2021-04-14 11:21:40 +00:00
conn = get_postgres_conn ( started_cluster )
2021-03-26 21:26:49 +00:00
cursor = conn . cursor ( )
cursor . execute (
2022-03-22 16:39:58 +00:00
""" CREATE TABLE IF NOT EXISTS clickhouse.test_types (
2021-03-26 21:26:49 +00:00
a smallint , b integer , c bigint , d real , e double precision , f serial , g bigserial ,
2022-03-22 16:39:58 +00:00
h timestamp ) """
)
2021-03-26 21:26:49 +00:00
2022-03-22 16:39:58 +00:00
node1 . query (
"""
2021-03-26 21:26:49 +00:00
INSERT INTO TABLE FUNCTION
odbc ( ' DSN=postgresql_odbc; Servername=postgre-sql.local ' , ' clickhouse ' , ' test_types ' )
2022-03-22 16:39:58 +00:00
VALUES ( - 32768 , - 2147483648 , - 9223372036854775808 , 1.12345 , 1.1234567890 , 2147483647 , 9223372036854775807 , ' 2000-05-12 12:12:12 ' ) """
)
2021-03-26 21:26:49 +00:00
2022-03-22 16:39:58 +00:00
result = node1 . query (
"""
2021-03-26 21:26:49 +00:00
SELECT a , b , c , d , e , f , g , h
FROM odbc ( ' DSN=postgresql_odbc; Servername=postgre-sql.local ' , ' clickhouse ' , ' test_types ' )
2022-03-22 16:39:58 +00:00
"""
)
2021-03-26 21:26:49 +00:00
2022-03-22 16:39:58 +00:00
assert (
result
== " -32768 \t -2147483648 \t -9223372036854775808 \t 1.12345 \t 1.123456789 \t 2147483647 \t 9223372036854775807 \t 2000-05-12 12:12:12 \n "
)
2021-03-26 21:26:49 +00:00
cursor . execute ( " DROP TABLE IF EXISTS clickhouse.test_types " )
2022-03-22 16:39:58 +00:00
cursor . execute (
""" CREATE TABLE IF NOT EXISTS clickhouse.test_types (column1 Timestamp, column2 Numeric) """
)
2021-03-26 21:26:49 +00:00
node1 . query (
2022-03-22 16:39:58 +00:00
"""
2021-03-26 21:26:49 +00:00
CREATE TABLE test_types ( column1 DateTime64 , column2 Decimal ( 5 , 1 ) )
2022-03-22 16:39:58 +00:00
ENGINE = ODBC ( ' DSN=postgresql_odbc; Servername=postgre-sql.local ' , ' clickhouse ' , ' test_types ' ) """
)
2021-03-26 21:26:49 +00:00
node1 . query (
""" INSERT INTO test_types
2022-03-22 16:39:58 +00:00
SELECT toDateTime64 ( ' 2019-01-01 00:00:00 ' , 3 , ' Etc/UTC ' ) , toDecimal32 ( 1.1 , 1 ) """
)
2021-03-26 21:26:49 +00:00
2022-03-22 16:39:58 +00:00
expected = node1 . query (
" SELECT toDateTime64( ' 2019-01-01 00:00:00 ' , 3, ' Etc/UTC ' ), toDecimal32(1.1, 1) "
)
2021-03-26 21:26:49 +00:00
result = node1 . query ( " SELECT * FROM test_types " )
2021-06-09 09:03:03 +00:00
logging . debug ( result )
2021-03-26 21:26:49 +00:00
cursor . execute ( " DROP TABLE IF EXISTS clickhouse.test_types " )
2022-03-22 16:39:58 +00:00
assert result == expected
2021-03-26 21:26:49 +00:00
2021-03-29 14:25:15 +00:00
def test_odbc_cyrillic_with_varchar ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2021-04-14 11:21:40 +00:00
conn = get_postgres_conn ( started_cluster )
2021-03-29 14:25:15 +00:00
cursor = conn . cursor ( )
cursor . execute ( " DROP TABLE IF EXISTS clickhouse.test_cyrillic " )
cursor . execute ( " CREATE TABLE clickhouse.test_cyrillic (name varchar(11)) " )
2022-03-22 16:39:58 +00:00
node1 . query (
"""
2021-03-29 14:25:15 +00:00
CREATE TABLE test_cyrillic ( name String )
2022-03-22 16:39:58 +00:00
ENGINE = ODBC ( ' DSN=postgresql_odbc; Servername=postgre-sql.local ' , ' clickhouse ' , ' test_cyrillic ' ) """
)
2021-03-29 14:25:15 +00:00
cursor . execute ( " INSERT INTO clickhouse.test_cyrillic VALUES ( ' A-nice-word ' ) " )
cursor . execute ( " INSERT INTO clickhouse.test_cyrillic VALUES ( ' Красивенько ' ) " )
2022-03-22 16:39:58 +00:00
result = node1 . query ( """ SELECT * FROM test_cyrillic ORDER BY name """ )
assert result == " A-nice-word \n Красивенько \n "
result = node1 . query (
""" SELECT name FROM odbc( ' DSN=postgresql_odbc; Servername=postgre-sql.local ' , ' clickhouse ' , ' test_cyrillic ' ) """
)
assert result == " A-nice-word \n Красивенько \n "
2021-03-29 14:25:15 +00:00
2021-04-06 18:59:34 +00:00
def test_many_connections ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2021-04-14 11:21:40 +00:00
conn = get_postgres_conn ( started_cluster )
2021-04-06 18:59:34 +00:00
cursor = conn . cursor ( )
2022-03-22 16:39:58 +00:00
cursor . execute ( " DROP TABLE IF EXISTS clickhouse.test_pg_table " )
cursor . execute ( " CREATE TABLE clickhouse.test_pg_table (key integer, value integer) " )
2021-04-06 18:59:34 +00:00
2022-03-22 16:39:58 +00:00
node1 . query (
"""
2021-04-07 07:16:50 +00:00
DROP TABLE IF EXISTS test_pg_table ;
CREATE TABLE test_pg_table ( key UInt32 , value UInt32 )
2022-03-22 16:39:58 +00:00
ENGINE = ODBC ( ' DSN=postgresql_odbc; Servername=postgre-sql.local ' , ' clickhouse ' , ' test_pg_table ' ) """
)
2021-04-06 18:59:34 +00:00
2021-04-07 07:16:50 +00:00
node1 . query ( " INSERT INTO test_pg_table SELECT number, number FROM numbers(10) " )
2021-04-06 18:59:34 +00:00
query = " SELECT count() FROM ( "
2022-03-22 16:39:58 +00:00
for i in range ( 24 ) :
2021-04-06 18:59:34 +00:00
query + = " SELECT key FROM {t} UNION ALL "
query + = " SELECT key FROM {t} ) "
2022-03-22 16:39:58 +00:00
assert node1 . query ( query . format ( t = " test_pg_table " ) ) == " 250 \n "
2021-04-06 18:59:34 +00:00
def test_concurrent_queries ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2021-04-14 11:21:40 +00:00
conn = get_postgres_conn ( started_cluster )
2021-04-06 18:59:34 +00:00
cursor = conn . cursor ( )
2022-03-22 16:39:58 +00:00
node1 . query (
"""
2021-04-07 07:16:50 +00:00
DROP TABLE IF EXISTS test_pg_table ;
CREATE TABLE test_pg_table ( key UInt32 , value UInt32 )
2022-03-22 16:39:58 +00:00
ENGINE = ODBC ( ' DSN=postgresql_odbc; Servername=postgre-sql.local ' , ' clickhouse ' , ' test_pg_table ' ) """
)
2021-04-06 18:59:34 +00:00
2022-03-22 16:39:58 +00:00
cursor . execute ( " DROP TABLE IF EXISTS clickhouse.test_pg_table " )
cursor . execute ( " CREATE TABLE clickhouse.test_pg_table (key integer, value integer) " )
2021-04-06 18:59:34 +00:00
def node_insert ( _ ) :
for i in range ( 5 ) :
2022-03-22 16:39:58 +00:00
node1 . query (
" INSERT INTO test_pg_table SELECT number, number FROM numbers(1000) " ,
user = " default " ,
)
2021-04-06 18:59:34 +00:00
busy_pool = Pool ( 5 )
p = busy_pool . map_async ( node_insert , range ( 5 ) )
p . wait ( )
2022-03-22 16:39:58 +00:00
assert_eq_with_retry (
node1 , " SELECT count() FROM test_pg_table " , str ( 5 * 5 * 1000 ) , retry_count = 100
)
2021-04-06 18:59:34 +00:00
def node_insert_select ( _ ) :
for i in range ( 5 ) :
2022-03-22 16:39:58 +00:00
result = node1 . query (
" INSERT INTO test_pg_table SELECT number, number FROM numbers(1000) " ,
user = " default " ,
)
result = node1 . query (
" SELECT * FROM test_pg_table LIMIT 100 " , user = " default "
)
2021-04-06 18:59:34 +00:00
busy_pool = Pool ( 5 )
p = busy_pool . map_async ( node_insert_select , range ( 5 ) )
p . wait ( )
2022-03-22 16:39:58 +00:00
assert_eq_with_retry (
node1 ,
" SELECT count() FROM test_pg_table " ,
str ( 5 * 5 * 1000 * 2 ) ,
retry_count = 100 ,
)
2021-04-06 18:59:34 +00:00
2022-03-22 16:39:58 +00:00
node1 . query ( " DROP TABLE test_pg_table; " )
cursor . execute ( " DROP TABLE clickhouse.test_pg_table; " )
2021-04-17 08:09:22 +00:00
def test_odbc_long_column_names ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2022-03-22 16:39:58 +00:00
conn = get_postgres_conn ( started_cluster )
2021-04-17 08:09:22 +00:00
cursor = conn . cursor ( )
column_name = " column " * 8
create_table = " CREATE TABLE clickhouse.test_long_column_names ( "
for i in range ( 1000 ) :
if i != 0 :
create_table + = " , "
create_table + = " {} integer " . format ( column_name + str ( i ) )
create_table + = " ) "
cursor . execute ( create_table )
2022-03-22 16:39:58 +00:00
insert = (
" INSERT INTO clickhouse.test_long_column_names SELECT i "
+ " , i " * 999
+ " FROM generate_series(0, 99) as t(i) "
)
2021-04-17 08:09:22 +00:00
cursor . execute ( insert )
conn . commit ( )
create_table = " CREATE TABLE test_long_column_names ( "
for i in range ( 1000 ) :
if i != 0 :
create_table + = " , "
create_table + = " {} UInt32 " . format ( column_name + str ( i ) )
create_table + = " ) ENGINE=ODBC( ' DSN=postgresql_odbc; Servername=postgre-sql.local ' , ' clickhouse ' , ' test_long_column_names ' ) "
2022-03-22 16:39:58 +00:00
result = node1 . query ( create_table )
2021-04-17 08:09:22 +00:00
2022-03-22 16:39:58 +00:00
result = node1 . query ( " SELECT * FROM test_long_column_names " )
2021-04-17 08:09:22 +00:00
expected = node1 . query ( " SELECT number " + " , number " * 999 + " FROM numbers(100) " )
2022-03-22 16:39:58 +00:00
assert result == expected
2021-04-17 08:09:22 +00:00
cursor . execute ( " DROP TABLE IF EXISTS clickhouse.test_long_column_names " )
node1 . query ( " DROP TABLE IF EXISTS test_long_column_names " )
2021-04-17 18:29:03 +00:00
2021-04-17 09:55:36 +00:00
def test_odbc_long_text ( started_cluster ) :
2021-04-28 08:39:55 +00:00
skip_test_msan ( node1 )
2021-04-29 11:57:48 +00:00
conn = get_postgres_conn ( started_cluster )
2021-04-17 09:55:36 +00:00
cursor = conn . cursor ( )
cursor . execute ( " drop table if exists clickhouse.test_long_text " )
2022-03-22 16:39:58 +00:00
cursor . execute ( " create table clickhouse.test_long_text(flen int, field1 text) " )
2021-04-17 09:55:36 +00:00
# sample test from issue 9363
text_from_issue = """ BEGIN These examples only show the order that data is arranged in. The values from different columns are stored separately, and data from the same column is stored together. Examples of a column-oriented DBMS: Vertica, Paraccel (Actian Matrix and Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise and Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid, and kdb+. Different orders for storing data are better suited to different scenarios. The data access scenario refers to what queries are made, how often, and in what proportion; how much data is read for each type of query – rows, columns, and bytes; the relationship between reading and updating data; the working size of the data and how locally it is used; whether transactions are used, and how isolated they are; requirements for data replication and logical integrity; requirements for latency and throughput for each type of query, and so on. The higher the load on the system, the more important it is to customize the system set up to match the requirements of the usage scenario, and the more fine grained this customization becomes. There is no system that is equally well-suited to significantly different scenarios. If a system is adaptable to a wide set of scenarios, under a high load, the system will handle all the scenarios equally poorly, or will work well for just one or few of possible scenarios. Key Properties of OLAP Scenario¶ The vast majority of requests are for read access. Data is updated in fairly large batches (> 1000 rows), not by single rows; or it is not updated at all. Data is added to the DB but is not modified. For reads, quite a large number of rows are extracted from the DB, but only a small subset of columns. Tables are " wide, " meaning they contain a large number of columns. Queries are relatively rare (usually hundreds of queries per server or less per second). For simple queries, latencies around 50 ms are allowed. Column values are fairly small: numbers and short strings (for example, 60 bytes per URL). Requires high throughput when processing a single query (up to billions of rows per second per server). Transactions are not necessary. Low requirements for data consistency. There is one large table per query. All tables are small, except for one. A query result is significantly smaller than the source data. In other words, data is filtered or aggregated, so the result fits in a single server " s RAM. It is easy to see that the OLAP scenario is very different from other popular scenarios (such as OLTP or Key-Value access). So it doesn " t make sense to try to use OLTP or a Key-Value DB for processing analytical queries if you want to get decent performance. For example, if you try to use MongoDB or Redis for analytics, you will get very poor performance compared to OLAP databases. Why Column-Oriented Databases Work Better in the OLAP Scenario¶ Column-oriented databases are better suited to OLAP scenarios: they are at least 100 times faster in processing most queries. The reasons are explained in detail below, but the fact is easier to demonstrate visually. END """
2022-03-22 16:39:58 +00:00
cursor . execute (
""" insert into clickhouse.test_long_text (flen, field1) values (3248, ' {} ' ) """ . format (
text_from_issue
)
)
2021-04-17 09:55:36 +00:00
2022-03-22 16:39:58 +00:00
node1 . query (
"""
2021-04-17 09:55:36 +00:00
DROP TABLE IF EXISTS test_long_test ;
CREATE TABLE test_long_text ( flen UInt32 , field1 String )
2022-03-22 16:39:58 +00:00
ENGINE = ODBC ( ' DSN=postgresql_odbc; Servername=postgre-sql.local ' , ' clickhouse ' , ' test_long_text ' ) """
)
2021-04-17 09:55:36 +00:00
result = node1 . query ( " select field1 from test_long_text; " )
2022-03-22 16:39:58 +00:00
assert result . strip ( ) == text_from_issue
2021-04-17 09:55:36 +00:00
long_text = " text " * 1000000
2022-03-22 16:39:58 +00:00
cursor . execute (
""" insert into clickhouse.test_long_text (flen, field1) values (400000, ' {} ' ) """ . format (
long_text
)
)
2021-04-17 09:55:36 +00:00
result = node1 . query ( " select field1 from test_long_text where flen=400000; " )
2022-03-22 16:39:58 +00:00
assert result . strip ( ) == long_text