Try fix integration tests.

This commit is contained in:
Nikolai Kochetov 2021-08-17 16:33:30 +03:00
parent c7dc42e30b
commit c36569e17c
6 changed files with 44 additions and 44 deletions

View File

@ -19,10 +19,10 @@
<structure>
<id>
<name>id</name>
<type>UInt32</type>
<!--<type>UInt32</type>-->
</id>
<attribute>
<name>id</name>
<name>key</name>
<type>UInt32</type>
<null_value></null_value>
</attribute>
@ -65,10 +65,10 @@
<structure>
<id>
<name>id</name>
<type>UInt32</type>
<!--<type>UInt32</type>-->
</id>
<attribute>
<name>id</name>
<name>key</name>
<type>UInt32</type>
<null_value></null_value>
</attribute>

View File

@ -13,11 +13,11 @@ node1 = cluster.add_instance('node1',
postgres_dict_table_template = """
CREATE TABLE IF NOT EXISTS {} (
id Integer NOT NULL, value Integer NOT NULL, PRIMARY KEY (id))
id Integer NOT NULL, key Integer NOT NULL, value Integer NOT NULL, PRIMARY KEY (id))
"""
click_dict_table_template = """
CREATE TABLE IF NOT EXISTS `test`.`dict_table_{}` (
`id` UInt64, `value` UInt32
`key` UInt32, `value` UInt32
) ENGINE = Dictionary({})
"""
@ -43,7 +43,7 @@ def create_and_fill_postgres_table(cursor, table_name, port, host):
create_postgres_table(cursor, table_name)
# Fill postgres table using clickhouse postgres table function and check
table_func = '''postgresql('{}:{}', 'clickhouse', '{}', 'postgres', 'mysecretpassword')'''.format(host, port, table_name)
node1.query('''INSERT INTO TABLE FUNCTION {} SELECT number, number from numbers(10000)
node1.query('''INSERT INTO TABLE FUNCTION {} SELECT number, number, number from numbers(10000)
'''.format(table_func, table_name))
result = node1.query("SELECT count() FROM {}".format(table_func))
assert result.rstrip() == '10000'
@ -82,7 +82,7 @@ def test_load_dictionaries(started_cluster):
node1.query("SYSTEM RELOAD DICTIONARY {}".format(dict_name))
assert node1.query("SELECT count() FROM `test`.`dict_table_{}`".format(table_name)).rstrip() == '10000'
assert node1.query("SELECT dictGetUInt32('{}', 'id', toUInt64(0))".format(dict_name)) == '0\n'
assert node1.query("SELECT dictGetUInt32('{}', 'key', toUInt64(0))".format(dict_name)) == '0\n'
assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(9999))".format(dict_name)) == '9999\n'
cursor.execute("DROP TABLE IF EXISTS {}".format(table_name))
@ -252,11 +252,11 @@ def test_dictionary_with_replicas(started_cluster):
create_postgres_table(cursor1, 'test1')
create_postgres_table(cursor2, 'test1')
cursor1.execute('INSERT INTO test1 select i, i from generate_series(0, 99) as t(i);');
cursor2.execute('INSERT INTO test1 select i, i from generate_series(100, 199) as t(i);');
cursor1.execute('INSERT INTO test1 select i, i, i from generate_series(0, 99) as t(i);')
cursor2.execute('INSERT INTO test1 select i, i, i from generate_series(100, 199) as t(i);')
create_dict('test1', 1)
result = node1.query("SELECT * FROM `test`.`dict_table_test1` ORDER BY id")
result = node1.query("SELECT * FROM `test`.`dict_table_test1` ORDER BY key")
# priority 0 - non running port
assert node1.contains_in_log('PostgreSQLConnectionPool: Connection error*')

View File

@ -18,7 +18,7 @@
<structure>
<id>
<name>column1</name>
<name>id</name>
</id>
<attribute>

View File

@ -20,7 +20,7 @@
<structure>
<id>
<name>X</name>
<name>id</name>
</id>
<attribute>

View File

@ -20,7 +20,7 @@
<structure>
<id>
<name>X</name>
<name>id</name>
</id>
<attribute>

View File

@ -99,19 +99,19 @@ def started_cluster():
logging.debug(f"sqlite data received: {sqlite_db}")
node1.exec_in_container(
["sqlite3", sqlite_db, "CREATE TABLE t1(x INTEGER PRIMARY KEY ASC, y, z);"],
["sqlite3", sqlite_db, "CREATE TABLE t1(id INTEGER PRIMARY KEY ASC, x INTEGER, y, z);"],
privileged=True, user='root')
node1.exec_in_container(
["sqlite3", sqlite_db, "CREATE TABLE t2(X INTEGER PRIMARY KEY ASC, Y, Z);"],
["sqlite3", sqlite_db, "CREATE TABLE t2(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z);"],
privileged=True, user='root')
node1.exec_in_container(
["sqlite3", sqlite_db, "CREATE TABLE t3(X INTEGER PRIMARY KEY ASC, Y, Z);"],
["sqlite3", sqlite_db, "CREATE TABLE t3(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z);"],
privileged=True, user='root')
node1.exec_in_container(
["sqlite3", sqlite_db, "CREATE TABLE t4(X INTEGER PRIMARY KEY ASC, Y, Z);"],
["sqlite3", sqlite_db, "CREATE TABLE t4(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z);"],
privileged=True, user='root')
node1.exec_in_container(
["sqlite3", sqlite_db, "CREATE TABLE tf1(x INTEGER PRIMARY KEY ASC, y, z);"],
["sqlite3", sqlite_db, "CREATE TABLE tf1(id INTEGER PRIMARY KEY ASC, x INTEGER, y, z);"],
privileged=True, user='root')
logging.debug("sqlite tables created")
mysql_conn = get_mysql_conn()
@ -128,7 +128,7 @@ def started_cluster():
cursor = postgres_conn.cursor()
cursor.execute(
"create table if not exists clickhouse.test_table (column1 int primary key, column2 varchar(40) not null)")
"create table if not exists clickhouse.test_table (id int primary key, column1 int not null, column2 varchar(40) not null)")
yield cluster
@ -210,9 +210,9 @@ def test_sqlite_simple_select_function_works(started_cluster):
sqlite_setup = node1.odbc_drivers["SQLite3"]
sqlite_db = sqlite_setup["Database"]
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t1 values(1, 2, 3);"],
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t1 values(1, 1, 2, 3);"],
privileged=True, user='root')
assert node1.query("select * from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t2\t3\n"
assert node1.query("select * from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t1\t2\t3\n"
assert node1.query("select y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "2\n"
assert node1.query("select z from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "3\n"
@ -228,10 +228,10 @@ def test_sqlite_table_function(started_cluster):
sqlite_setup = node1.odbc_drivers["SQLite3"]
sqlite_db = sqlite_setup["Database"]
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO tf1 values(1, 2, 3);"],
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO tf1 values(1, 1, 2, 3);"],
privileged=True, user='root')
node1.query("create table odbc_tf as odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 'tf1'))
assert node1.query("select * from odbc_tf") == "1\t2\t3\n"
assert node1.query("select * from odbc_tf") == "1\t1\t2\t3\n"
assert node1.query("select y from odbc_tf") == "2\n"
assert node1.query("select z from odbc_tf") == "3\n"
@ -246,7 +246,7 @@ def test_sqlite_simple_select_storage_works(started_cluster):
sqlite_setup = node1.odbc_drivers["SQLite3"]
sqlite_db = sqlite_setup["Database"]
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t4 values(1, 2, 3);"],
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t4 values(1, 1, 2, 3);"],
privileged=True, user='root')
node1.query("create table SqliteODBC (x Int32, y String, z String) engine = ODBC('DSN={}', '', 't4')".format(
sqlite_setup["DSN"]))
@ -264,7 +264,7 @@ def test_sqlite_odbc_hashed_dictionary(started_cluster):
skip_test_msan(node1)
sqlite_db = node1.odbc_drivers["SQLite3"]["Database"]
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t2 values(1, 2, 3);"],
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t2 values(1, 1, 2, 3);"],
privileged=True, user='root')
node1.query("SYSTEM RELOAD DICTIONARY sqlite3_odbc_hashed")
@ -282,7 +282,7 @@ def test_sqlite_odbc_hashed_dictionary(started_cluster):
logging.debug("Waiting dictionary to update for the second time")
time.sleep(0.1)
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t2 values(200, 2, 7);"],
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t2 values(200, 200, 2, 7);"],
privileged=True, user='root')
# No reload because of invalidate query
@ -299,7 +299,7 @@ def test_sqlite_odbc_hashed_dictionary(started_cluster):
assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))", "3")
assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))", "1") # still default
node1.exec_in_container(["sqlite3", sqlite_db, "REPLACE INTO t2 values(1, 2, 5);"],
node1.exec_in_container(["sqlite3", sqlite_db, "REPLACE INTO t2 values(1, 1, 2, 5);"],
privileged=True, user='root')
assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))", "5")
@ -310,7 +310,7 @@ def test_sqlite_odbc_cached_dictionary(started_cluster):
skip_test_msan(node1)
sqlite_db = node1.odbc_drivers["SQLite3"]["Database"]
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t3 values(1, 2, 3);"],
node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t3 values(1, 1, 2, 3);"],
privileged=True, user='root')
assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))") == "3\n"
@ -319,12 +319,12 @@ def test_sqlite_odbc_cached_dictionary(started_cluster):
node1.exec_in_container(["chmod", "a+rw", "/tmp"], privileged=True, user='root')
node1.exec_in_container(["chmod", "a+rw", sqlite_db], privileged=True, user='root')
node1.query("insert into table function odbc('DSN={};ReadOnly=0', '', 't3') values (200, 2, 7)".format(
node1.query("insert into table function odbc('DSN={};ReadOnly=0', '', 't3') values (200, 200, 2, 7)".format(
node1.odbc_drivers["SQLite3"]["DSN"]))
assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(200))") == "7\n" # new value
node1.exec_in_container(["sqlite3", sqlite_db, "REPLACE INTO t3 values(1, 2, 12);"],
node1.exec_in_container(["sqlite3", sqlite_db, "REPLACE INTO t3 values(1, 1, 2, 12);"],
privileged=True, user='root')
assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))", "12")
@ -336,7 +336,7 @@ def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster):
conn = get_postgres_conn(started_cluster)
cursor = conn.cursor()
cursor.execute("truncate table clickhouse.test_table")
cursor.execute("insert into clickhouse.test_table values(1, 'hello'),(2, 'world')")
cursor.execute("insert into clickhouse.test_table values(1, 1, 'hello'),(2, 2, 'world')")
node1.query("SYSTEM RELOAD DICTIONARY postgres_odbc_hashed")
assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))", "hello")
assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))", "world")
@ -348,7 +348,7 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster):
conn = get_postgres_conn(started_cluster)
cursor = conn.cursor()
cursor.execute("truncate table clickhouse.test_table")
cursor.execute("insert into clickhouse.test_table values(3, 'xxx')")
cursor.execute("insert into clickhouse.test_table values(3, 3, 'xxx')")
for i in range(100):
try:
node1.query("system reload dictionary postgres_odbc_hashed", timeout=15)
@ -369,13 +369,13 @@ def test_postgres_insert(started_cluster):
# reconstruction of connection string.
node1.query(
"create table pg_insert (column1 UInt8, column2 String) engine=ODBC('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')")
node1.query("insert into pg_insert values (1, 'hello'), (2, 'world')")
assert node1.query("select * from pg_insert") == '1\thello\n2\tworld\n'
node1.query("insert into table function odbc('DSN=postgresql_odbc', 'clickhouse', 'test_table') format CSV 3,test")
"create table pg_insert (id UInt64, column1 UInt8, column2 String) engine=ODBC('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')")
node1.query("insert into pg_insert values (1, 1, 'hello'), (2, 2, 'world')")
assert node1.query("select * from pg_insert") == '1\t1\thello\n2\t2\tworld\n'
node1.query("insert into table function odbc('DSN=postgresql_odbc', 'clickhouse', 'test_table') format CSV 3,3,test")
node1.query(
"insert into table function odbc('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')" \
" select number, 's' || toString(number) from numbers (4, 7)")
" select number, number, 's' || toString(number) from numbers (4, 7)")
assert node1.query("select sum(column1), count(column1) from pg_insert") == "55\t10\n"
assert node1.query(
"select sum(n), count(n) from (select (*,).1 as n from (select * from odbc('DSN=postgresql_odbc', 'clickhouse', 'test_table')))") == "55\t10\n"
@ -426,19 +426,19 @@ def test_odbc_postgres_date_data_type(started_cluster):
conn = get_postgres_conn(started_cluster);
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS clickhouse.test_date (column1 integer, column2 date)")
cursor.execute("CREATE TABLE IF NOT EXISTS clickhouse.test_date (id integer, column1 integer, column2 date)")
cursor.execute("INSERT INTO clickhouse.test_date VALUES (1, '2020-12-01')")
cursor.execute("INSERT INTO clickhouse.test_date VALUES (2, '2020-12-02')")
cursor.execute("INSERT INTO clickhouse.test_date VALUES (3, '2020-12-03')")
cursor.execute("INSERT INTO clickhouse.test_date VALUES (1, 1, '2020-12-01')")
cursor.execute("INSERT INTO clickhouse.test_date VALUES (2, 2, '2020-12-02')")
cursor.execute("INSERT INTO clickhouse.test_date VALUES (3, 3, '2020-12-03')")
conn.commit()
node1.query(
'''
CREATE TABLE test_date (column1 UInt64, column2 Date)
CREATE TABLE test_date (id UInt64, column1 UInt64, column2 Date)
ENGINE=ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_date')''')
expected = '1\t2020-12-01\n2\t2020-12-02\n3\t2020-12-03\n'
expected = '1\t1\t2020-12-01\n2\t2\t2020-12-02\n3\t3\t2020-12-03\n'
result = node1.query('SELECT * FROM test_date');
assert(result == expected)
cursor.execute("DROP TABLE IF EXISTS clickhouse.test_date")