diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index 84c04dd03ec..ccfd63c8ed0 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -83,6 +83,7 @@ RUN python3 -m pip install \ pytest \ pytest-order==1.0.0 \ pytest-timeout \ + pytest-random \ pytest-xdist \ pytest-repeat \ pytz \ diff --git a/tests/integration/runner b/tests/integration/runner index f4f853e00ad..6a05d383089 100755 --- a/tests/integration/runner +++ b/tests/integration/runner @@ -242,6 +242,10 @@ if __name__ == "__main__": "-n", "--parallel", action="store", dest="parallel", help="Parallelism" ) + parser.add_argument( + "--no-random", action="store", dest="no_random", help="Disable tests order randomization" + ) + parser.add_argument( "-t", "--tests_list", @@ -294,6 +298,11 @@ if __name__ == "__main__": parallel_args += "--dist=loadfile" parallel_args += " -n {}".format(args.parallel) + rand_args = "" + if not args.no_random: + rand_args += f"--random-seed={os.getpid()}" + + net = "" if args.network: net = "--net={}".format(args.network) @@ -383,7 +392,7 @@ if __name__ == "__main__": {dockerd_internal_volume} -e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 \ -e XTABLES_LOCKFILE=/run/host/xtables.lock \ -e PYTHONUNBUFFERED=1 \ - {env_tags} {env_cleanup} -e PYTEST_OPTS='{parallel} {opts} {tests_list} -vvv' {img} {command}".format( + {env_tags} {env_cleanup} -e PYTEST_OPTS='{parallel} {opts} {tests_list} {rand} -vvv' {img} {command}".format( net=net, tty=tty, bin=args.binary, @@ -395,6 +404,7 @@ if __name__ == "__main__": env_tags=env_tags, env_cleanup=env_cleanup, parallel=parallel_args, + rand=rand_args, opts=" ".join(args.pytest_args).replace("'", "\\'"), tests_list=" ".join(args.tests_list), dockerd_internal_volume=dockerd_internal_volume, diff --git a/tests/integration/test_grpc_protocol/test.py b/tests/integration/test_grpc_protocol/test.py index a3f2650eac7..f4884d309b1 100644 --- a/tests/integration/test_grpc_protocol/test.py +++ b/tests/integration/test_grpc_protocol/test.py @@ -37,7 +37,12 @@ import clickhouse_grpc_pb2_grpc config_dir = os.path.join(SCRIPT_DIR, "./configs") cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", main_configs=["configs/grpc_config.xml"]) +node = cluster.add_instance( + "node", + main_configs=["configs/grpc_config.xml"], + # Bug in TSAN reproduces in this test https://github.com/grpc/grpc/issues/29550#issuecomment-1188085387 + # second_deadlock_stack -- just ordinary option we use everywhere, don't want to overwrite it + env_variables={"TSAN_OPTIONS": "report_atomic_races=0 second_deadlock_stack=1"},) main_channel = None diff --git a/tests/integration/test_grpc_protocol_ssl/test.py b/tests/integration/test_grpc_protocol_ssl/test.py index 80599126dcf..3c28fb666c8 100644 --- a/tests/integration/test_grpc_protocol_ssl/test.py +++ b/tests/integration/test_grpc_protocol_ssl/test.py @@ -42,6 +42,9 @@ node = cluster.add_instance( "configs/server-cert.pem", "configs/ca-cert.pem", ], + # Bug in TSAN reproduces in this test https://github.com/grpc/grpc/issues/29550#issuecomment-1188085387 + # second_deadlock_stack -- just ordinary option we use everywhere, don't want to overwrite it + env_variables={"TSAN_OPTIONS": "report_atomic_races=0 second_deadlock_stack=1"}, ) diff --git a/tests/integration/test_odbc_interaction/test.py b/tests/integration/test_odbc_interaction/test.py index 743e4ecd68a..faa48c00c44 100644 --- a/tests/integration/test_odbc_interaction/test.py +++ b/tests/integration/test_odbc_interaction/test.py @@ -4,7 +4,6 @@ import psycopg2 import pymysql.cursors import pytest import logging -import os.path from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry @@ -80,9 +79,11 @@ def create_mysql_db(conn, name): def create_mysql_table(conn, table_name): with conn.cursor() as cursor: - cursor.execute(drop_table_sql_template.format(table_name)) cursor.execute(create_table_sql_template.format(table_name)) +def drop_mysql_table(conn, table_name): + with conn.cursor() as cursor: + cursor.execute(drop_table_sql_template.format(table_name)) def get_postgres_conn(started_cluster): conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format( @@ -267,7 +268,9 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, column_x Nulla # just to be sure :) assert node1.query("select 1") == "1\n" + node1.query(f"DROP TABLE {table_name}") conn.close() + drop_mysql_table(conn, table_name) def test_mysql_insert(started_cluster): @@ -309,6 +312,9 @@ def test_mysql_insert(started_cluster): == "3\tinsert\t33\t333\t3333\n4\tTEST\t44\t444\t\\N\n" ) + node1.query("DROP TABLE mysql_insert") + drop_mysql_table(conn, table_name) + def test_sqlite_simple_select_function_works(started_cluster): skip_test_msan(node1) @@ -367,6 +373,12 @@ def test_sqlite_simple_select_function_works(started_cluster): == "1\t1\n" ) + node1.exec_in_container( + ["sqlite3", sqlite_db, "DELETE FROM t1;"], + privileged=True, + user="root", + ) + def test_sqlite_table_function(started_cluster): skip_test_msan(node1) @@ -392,6 +404,12 @@ def test_sqlite_table_function(started_cluster): assert node1.query("select x, y from odbc_tf") == "1\t2\n" assert node1.query("select z, x, y from odbc_tf") == "3\t1\t2\n" assert node1.query("select count(), sum(x) from odbc_tf group by x") == "1\t1\n" + node1.query("DROP TABLE odbc_tf") + node1.exec_in_container( + ["sqlite3", sqlite_db, "DELETE FROM tf1;"], + privileged=True, + user="root", + ) def test_sqlite_simple_select_storage_works(started_cluster): @@ -418,6 +436,13 @@ def test_sqlite_simple_select_storage_works(started_cluster): assert node1.query("select x, y from SqliteODBC") == "1\t2\n" assert node1.query("select z, x, y from SqliteODBC") == "3\t1\t2\n" assert node1.query("select count(), sum(x) from SqliteODBC group by x") == "1\t1\n" + node1.query("DROP TABLE SqliteODBC") + + node1.exec_in_container( + ["sqlite3", sqlite_db, "DELETE FROM t4;"], + privileged=True, + user="root", + ) def test_sqlite_odbc_hashed_dictionary(started_cluster): @@ -496,6 +521,12 @@ def test_sqlite_odbc_hashed_dictionary(started_cluster): node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))", "7" ) + node1.exec_in_container( + ["sqlite3", sqlite_db, "DELETE FROM t2;"], + privileged=True, + user="root", + ) + def test_sqlite_odbc_cached_dictionary(started_cluster): skip_test_msan(node1) @@ -537,13 +568,19 @@ def test_sqlite_odbc_cached_dictionary(started_cluster): node1, "select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))", "12" ) + node1.exec_in_container( + ["sqlite3", sqlite_db, "DELETE FROM t3;"], + privileged=True, + user="root", + ) + + node1.query("SYSTEM RELOAD DICTIONARIES") def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): skip_test_msan(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() - cursor.execute("truncate table clickhouse.test_table") cursor.execute( "insert into clickhouse.test_table values(1, 1, 'hello'),(2, 2, 'world')" ) @@ -562,6 +599,7 @@ def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))", "world", ) + cursor.execute("truncate table clickhouse.test_table") def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): @@ -569,7 +607,6 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): conn = get_postgres_conn(started_cluster) cursor = conn.cursor() - cursor.execute("truncate table clickhouse.test_table") cursor.execute("insert into clickhouse.test_table values(3, 3, 'xxx')") for i in range(100): try: @@ -582,13 +619,13 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(3))", "xxx", ) + cursor.execute("truncate table clickhouse.test_table") def test_postgres_insert(started_cluster): skip_test_msan(node1) conn = get_postgres_conn(started_cluster) - conn.cursor().execute("truncate table clickhouse.test_table") # Also test with Servername containing '.' and '-' symbols (defined in # postgres .yml file). This is needed to check parsing, validation and @@ -615,6 +652,8 @@ def test_postgres_insert(started_cluster): ) == "55\t10\n" ) + node1.query("DROP TABLE pg_insert") + conn.cursor().execute("truncate table clickhouse.test_table") def test_bridge_dies_with_parent(started_cluster): @@ -675,7 +714,7 @@ def test_odbc_postgres_date_data_type(started_cluster): conn = get_postgres_conn(started_cluster) cursor = conn.cursor() cursor.execute( - "CREATE TABLE IF NOT EXISTS clickhouse.test_date (id integer, column1 integer, column2 date)" + "CREATE TABLE clickhouse.test_date (id integer, column1 integer, column2 date)" ) cursor.execute("INSERT INTO clickhouse.test_date VALUES (1, 1, '2020-12-01')") @@ -692,8 +731,8 @@ def test_odbc_postgres_date_data_type(started_cluster): expected = "1\t1\t2020-12-01\n2\t2\t2020-12-02\n3\t3\t2020-12-03\n" result = node1.query("SELECT * FROM test_date") assert result == expected - cursor.execute("DROP TABLE IF EXISTS clickhouse.test_date") - node1.query("DROP TABLE IF EXISTS test_date") + cursor.execute("DROP TABLE clickhouse.test_date") + node1.query("DROP TABLE test_date") def test_odbc_postgres_conversions(started_cluster): @@ -703,7 +742,7 @@ def test_odbc_postgres_conversions(started_cluster): cursor = conn.cursor() cursor.execute( - """CREATE TABLE IF NOT EXISTS clickhouse.test_types ( + """CREATE TABLE clickhouse.test_types ( a smallint, b integer, c bigint, d real, e double precision, f serial, g bigserial, h timestamp)""" ) @@ -729,7 +768,7 @@ def test_odbc_postgres_conversions(started_cluster): cursor.execute("DROP TABLE IF EXISTS clickhouse.test_types") cursor.execute( - """CREATE TABLE IF NOT EXISTS clickhouse.test_types (column1 Timestamp, column2 Numeric)""" + """CREATE TABLE clickhouse.test_types (column1 Timestamp, column2 Numeric)""" ) node1.query( @@ -747,8 +786,8 @@ def test_odbc_postgres_conversions(started_cluster): "SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Etc/UTC'), toDecimal32(1.1, 1)" ) result = node1.query("SELECT * FROM test_types") - logging.debug(result) - cursor.execute("DROP TABLE IF EXISTS clickhouse.test_types") + cursor.execute("DROP TABLE clickhouse.test_types") + node1.query("DROP TABLE test_types") assert result == expected @@ -776,6 +815,7 @@ def test_odbc_cyrillic_with_varchar(started_cluster): """ SELECT name FROM odbc('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_cyrillic') """ ) assert result == "A-nice-word\nКрасивенько\n" + node1.query("DROP TABLE test_cyrillic") def test_many_connections(started_cluster): @@ -784,7 +824,6 @@ def test_many_connections(started_cluster): conn = get_postgres_conn(started_cluster) cursor = conn.cursor() - cursor.execute("DROP TABLE IF EXISTS clickhouse.test_pg_table") cursor.execute("CREATE TABLE clickhouse.test_pg_table (key integer, value integer)") node1.query( @@ -802,6 +841,7 @@ def test_many_connections(started_cluster): query += "SELECT key FROM {t})" assert node1.query(query.format(t="test_pg_table")) == "250\n" + cursor.execute("DROP TABLE clickhouse.test_pg_table") def test_concurrent_queries(started_cluster): @@ -901,7 +941,6 @@ def test_odbc_long_text(started_cluster): conn = get_postgres_conn(started_cluster) cursor = conn.cursor() - cursor.execute("drop table if exists clickhouse.test_long_text") cursor.execute("create table clickhouse.test_long_text(flen int, field1 text)") # sample test from issue 9363 @@ -929,3 +968,5 @@ def test_odbc_long_text(started_cluster): ) result = node1.query("select field1 from test_long_text where flen=400000;") assert result.strip() == long_text + node1.query("DROP TABLE test_long_text") + cursor.execute("drop table clickhouse.test_long_text")