From 67efca21dd89acacaec81ba965cb8bb2fcf2d11c Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 28 Apr 2021 11:39:55 +0300 Subject: [PATCH] Disable test_odbc_interaction for MSan build --- .../integration/test_odbc_interaction/test.py | 43 ++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_odbc_interaction/test.py b/tests/integration/test_odbc_interaction/test.py index 47d01389530..25668737885 100644 --- a/tests/integration/test_odbc_interaction/test.py +++ b/tests/integration/test_odbc_interaction/test.py @@ -27,6 +27,11 @@ create_table_sql_template = """ """ +def skip_test_msan(instance): + if instance.is_built_with_memory_sanitizer(): + pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") + + def get_mysql_conn(): conn = pymysql.connect(user='root', password='clickhouse', host='127.0.0.1', port=3308) return conn @@ -105,6 +110,8 @@ def started_cluster(): def test_mysql_simple_select_works(started_cluster): + skip_test_msan(node1) + mysql_setup = node1.odbc_drivers["MySQL"] table_name = 'test_insert_select' @@ -145,6 +152,8 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, column_x Nulla def test_mysql_insert(started_cluster): + skip_test_msan(node1) + mysql_setup = node1.odbc_drivers["MySQL"] table_name = 'test_insert' conn = get_mysql_conn() @@ -166,6 +175,8 @@ def test_mysql_insert(started_cluster): def test_sqlite_simple_select_function_works(started_cluster): + skip_test_msan(node1) + sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] @@ -182,6 +193,8 @@ def test_sqlite_simple_select_function_works(started_cluster): "select count(), sum(x) from odbc('DSN={}', '{}') group by x".format(sqlite_setup["DSN"], 't1')) == "1\t1\n" def test_sqlite_table_function(started_cluster): + skip_test_msan(node1) + sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] @@ -198,6 +211,8 @@ def test_sqlite_table_function(started_cluster): assert node1.query("select count(), sum(x) from odbc_tf group by x") == "1\t1\n" def test_sqlite_simple_select_storage_works(started_cluster): + skip_test_msan(node1) + sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] @@ -216,6 +231,8 @@ def test_sqlite_simple_select_storage_works(started_cluster): def test_sqlite_odbc_hashed_dictionary(started_cluster): + skip_test_msan(node1) + sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t2 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') @@ -260,6 +277,8 @@ def test_sqlite_odbc_hashed_dictionary(started_cluster): def test_sqlite_odbc_cached_dictionary(started_cluster): + skip_test_msan(node1) + sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t3 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') @@ -282,6 +301,8 @@ def test_sqlite_odbc_cached_dictionary(started_cluster): def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): + skip_test_msan(node1) + conn = get_postgres_conn() cursor = conn.cursor() cursor.execute("truncate table clickhouse.test_table") @@ -292,6 +313,8 @@ def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): + skip_test_msan(node1) + conn = get_postgres_conn() cursor = conn.cursor() cursor.execute("truncate table clickhouse.test_table") @@ -306,6 +329,8 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): def test_postgres_insert(started_cluster): + skip_test_msan(node1) + conn = get_postgres_conn() conn.cursor().execute("truncate table clickhouse.test_table") @@ -326,11 +351,13 @@ def test_postgres_insert(started_cluster): def test_bridge_dies_with_parent(started_cluster): + skip_test_msan(node1) + if node1.is_built_with_address_sanitizer(): # TODO: Leak sanitizer falsely reports about a leak of 16 bytes in clickhouse-odbc-bridge in this test and # that's linked somehow with that we have replaced getauxval() in glibc-compatibility. # The leak sanitizer calls getauxval() for its own purposes, and our replaced version doesn't seem to be equivalent in that case. - return + pytest.skip("Leak sanitizer falsely reports about a leak of 16 bytes in clickhouse-odbc-bridge") node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))") @@ -365,6 +392,8 @@ def test_bridge_dies_with_parent(started_cluster): def test_odbc_postgres_date_data_type(started_cluster): + skip_test_msan(node1) + conn = get_postgres_conn(); cursor = conn.cursor() cursor.execute("CREATE TABLE IF NOT EXISTS clickhouse.test_date (column1 integer, column2 date)") @@ -387,6 +416,8 @@ def test_odbc_postgres_date_data_type(started_cluster): def test_odbc_postgres_conversions(started_cluster): + skip_test_msan(node1) + conn = get_postgres_conn() cursor = conn.cursor() @@ -427,6 +458,8 @@ def test_odbc_postgres_conversions(started_cluster): def test_odbc_cyrillic_with_varchar(started_cluster): + skip_test_msan(node1) + conn = get_postgres_conn() cursor = conn.cursor() @@ -447,6 +480,8 @@ def test_odbc_cyrillic_with_varchar(started_cluster): def test_many_connections(started_cluster): + skip_test_msan(node1) + conn = get_postgres_conn() cursor = conn.cursor() @@ -469,6 +504,8 @@ def test_many_connections(started_cluster): def test_concurrent_queries(started_cluster): + skip_test_msan(node1) + conn = get_postgres_conn() cursor = conn.cursor() @@ -508,6 +545,8 @@ def test_concurrent_queries(started_cluster): def test_odbc_long_column_names(started_cluster): + skip_test_msan(node1) + conn = get_postgres_conn(); cursor = conn.cursor() @@ -540,6 +579,8 @@ def test_odbc_long_column_names(started_cluster): def test_odbc_long_text(started_cluster): + skip_test_msan(node1) + conn = get_postgres_conn() cursor = conn.cursor() cursor.execute("drop table if exists clickhouse.test_long_text")