Make psql tests in test_odbc_interaction more robust to other test failures

Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
This commit is contained in:
Azat Khuzhin 2023-07-18 11:34:28 +02:00
parent ee5e639ce9
commit 92ca2b0bdd

View File

@ -582,6 +582,7 @@ def test_sqlite_odbc_cached_dictionary(started_cluster):
def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster):
skip_test_msan(node1) skip_test_msan(node1)
try:
conn = get_postgres_conn(started_cluster) conn = get_postgres_conn(started_cluster)
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute( cursor.execute(
@ -602,12 +603,14 @@ def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster):
"select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))", "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))",
"world", "world",
) )
finally:
cursor.execute("truncate table clickhouse.test_table") cursor.execute("truncate table clickhouse.test_table")
def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster):
skip_test_msan(node1) skip_test_msan(node1)
try:
conn = get_postgres_conn(started_cluster) conn = get_postgres_conn(started_cluster)
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute("insert into clickhouse.test_table values(3, 3, 'xxx')") cursor.execute("insert into clickhouse.test_table values(3, 3, 'xxx')")
@ -622,18 +625,22 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster):
"select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(3))", "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(3))",
"xxx", "xxx",
) )
finally:
cursor.execute("truncate table clickhouse.test_table") cursor.execute("truncate table clickhouse.test_table")
def test_no_connection_pooling(started_cluster): def test_no_connection_pooling(started_cluster):
skip_test_msan(node1) skip_test_msan(node1)
try:
conn = get_postgres_conn(started_cluster) conn = get_postgres_conn(started_cluster)
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute( cursor.execute(
"insert into clickhouse.test_table values(1, 1, 'hello'),(2, 2, 'world')" "insert into clickhouse.test_table values(1, 1, 'hello'),(2, 2, 'world')"
) )
node1.exec_in_container(["ss", "-K", "dport", "5432"], privileged=True, user="root") node1.exec_in_container(
["ss", "-K", "dport", "5432"], privileged=True, user="root"
)
node1.query("SYSTEM RELOAD DICTIONARY postgres_odbc_nopool") node1.query("SYSTEM RELOAD DICTIONARY postgres_odbc_nopool")
assert_eq_with_retry( assert_eq_with_retry(
node1, node1,
@ -650,6 +657,7 @@ def test_no_connection_pooling(started_cluster):
assert "" == node1.exec_in_container( assert "" == node1.exec_in_container(
["ss", "-H", "dport", "5432"], privileged=True, user="root" ["ss", "-H", "dport", "5432"], privileged=True, user="root"
) )
finally:
cursor.execute("truncate table clickhouse.test_table") cursor.execute("truncate table clickhouse.test_table")
@ -662,6 +670,7 @@ def test_postgres_insert(started_cluster):
# postgres .yml file). This is needed to check parsing, validation and # postgres .yml file). This is needed to check parsing, validation and
# reconstruction of connection string. # reconstruction of connection string.
try:
node1.query( node1.query(
"create table pg_insert (id UInt64, column1 UInt8, column2 String) engine=ODBC('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')" "create table pg_insert (id UInt64, column1 UInt8, column2 String) engine=ODBC('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')"
) )
@ -675,7 +684,8 @@ def test_postgres_insert(started_cluster):
" select number, number, 's' || toString(number) from numbers (4, 7)" " select number, number, 's' || toString(number) from numbers (4, 7)"
) )
assert ( assert (
node1.query("select sum(column1), count(column1) from pg_insert") == "55\t10\n" node1.query("select sum(column1), count(column1) from pg_insert")
== "55\t10\n"
) )
assert ( assert (
node1.query( node1.query(
@ -683,13 +693,15 @@ def test_postgres_insert(started_cluster):
) )
== "55\t10\n" == "55\t10\n"
) )
node1.query("DROP TABLE pg_insert") finally:
node1.query("DROP TABLE IF EXISTS pg_insert")
conn.cursor().execute("truncate table clickhouse.test_table") conn.cursor().execute("truncate table clickhouse.test_table")
def test_odbc_postgres_date_data_type(started_cluster): def test_odbc_postgres_date_data_type(started_cluster):
skip_test_msan(node1) skip_test_msan(node1)
try:
conn = get_postgres_conn(started_cluster) conn = get_postgres_conn(started_cluster)
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute( cursor.execute(
@ -710,13 +722,15 @@ def test_odbc_postgres_date_data_type(started_cluster):
expected = "1\t1\t2020-12-01\n2\t2\t2020-12-02\n3\t3\t2020-12-03\n" expected = "1\t1\t2020-12-01\n2\t2\t2020-12-02\n3\t3\t2020-12-03\n"
result = node1.query("SELECT * FROM test_date") result = node1.query("SELECT * FROM test_date")
assert result == expected assert result == expected
finally:
cursor.execute("DROP TABLE clickhouse.test_date") cursor.execute("DROP TABLE clickhouse.test_date")
node1.query("DROP TABLE test_date") node1.query("DROP TABLE IF EXISTS test_date")
def test_odbc_postgres_conversions(started_cluster): def test_odbc_postgres_conversions(started_cluster):
skip_test_msan(node1) skip_test_msan(node1)
try:
conn = get_postgres_conn(started_cluster) conn = get_postgres_conn(started_cluster)
cursor = conn.cursor() cursor = conn.cursor()
@ -765,9 +779,10 @@ def test_odbc_postgres_conversions(started_cluster):
"SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Etc/UTC'), toDecimal32(1.1, 1)" "SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Etc/UTC'), toDecimal32(1.1, 1)"
) )
result = node1.query("SELECT * FROM test_types") result = node1.query("SELECT * FROM test_types")
cursor.execute("DROP TABLE clickhouse.test_types")
node1.query("DROP TABLE test_types")
assert result == expected assert result == expected
finally:
cursor.execute("DROP TABLE IF EXISTS clickhouse.test_types")
node1.query("DROP TABLE IF EXISTS test_types")
def test_odbc_cyrillic_with_varchar(started_cluster): def test_odbc_cyrillic_with_varchar(started_cluster):