Merge pull request #44711 from qoega/green-ci-1

Improve some integration tests and add pytest-random
This commit is contained in:
Ilya Yatsishin 2023-01-03 22:39:54 +01:00 committed by GitHub
commit 49155d9614
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 80 additions and 16 deletions

View File

@ -83,6 +83,7 @@ RUN python3 -m pip install \
pytest \
pytest-order==1.0.0 \
pytest-timeout \
pytest-random \
pytest-xdist \
pytest-repeat \
pytz \

View File

@ -242,6 +242,10 @@ if __name__ == "__main__":
"-n", "--parallel", action="store", dest="parallel", help="Parallelism"
)
parser.add_argument(
"--no-random", action="store", dest="no_random", help="Disable tests order randomization"
)
parser.add_argument(
"-t",
"--tests_list",
@ -294,6 +298,11 @@ if __name__ == "__main__":
parallel_args += "--dist=loadfile"
parallel_args += " -n {}".format(args.parallel)
rand_args = ""
if not args.no_random:
rand_args += f"--random-seed={os.getpid()}"
net = ""
if args.network:
net = "--net={}".format(args.network)
@ -383,7 +392,7 @@ if __name__ == "__main__":
{dockerd_internal_volume} -e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 \
-e XTABLES_LOCKFILE=/run/host/xtables.lock \
-e PYTHONUNBUFFERED=1 \
{env_tags} {env_cleanup} -e PYTEST_OPTS='{parallel} {opts} {tests_list} -vvv' {img} {command}".format(
{env_tags} {env_cleanup} -e PYTEST_OPTS='{parallel} {opts} {tests_list} {rand} -vvv' {img} {command}".format(
net=net,
tty=tty,
bin=args.binary,
@ -395,6 +404,7 @@ if __name__ == "__main__":
env_tags=env_tags,
env_cleanup=env_cleanup,
parallel=parallel_args,
rand=rand_args,
opts=" ".join(args.pytest_args).replace("'", "\\'"),
tests_list=" ".join(args.tests_list),
dockerd_internal_volume=dockerd_internal_volume,

View File

@ -37,7 +37,13 @@ import clickhouse_grpc_pb2_grpc
config_dir = os.path.join(SCRIPT_DIR, "./configs")
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance("node", main_configs=["configs/grpc_config.xml"])
node = cluster.add_instance(
"node",
main_configs=["configs/grpc_config.xml"],
# Bug in TSAN reproduces in this test https://github.com/grpc/grpc/issues/29550#issuecomment-1188085387
# second_deadlock_stack -- just ordinary option we use everywhere, don't want to overwrite it
env_variables={"TSAN_OPTIONS": "report_atomic_races=0 second_deadlock_stack=1"},
)
main_channel = None

View File

@ -42,6 +42,9 @@ node = cluster.add_instance(
"configs/server-cert.pem",
"configs/ca-cert.pem",
],
# Bug in TSAN reproduces in this test https://github.com/grpc/grpc/issues/29550#issuecomment-1188085387
# second_deadlock_stack -- just ordinary option we use everywhere, don't want to overwrite it
env_variables={"TSAN_OPTIONS": "report_atomic_races=0 second_deadlock_stack=1"},
)

View File

@ -4,7 +4,6 @@ import psycopg2
import pymysql.cursors
import pytest
import logging
import os.path
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
@ -80,10 +79,14 @@ def create_mysql_db(conn, name):
def create_mysql_table(conn, table_name):
with conn.cursor() as cursor:
cursor.execute(drop_table_sql_template.format(table_name))
cursor.execute(create_table_sql_template.format(table_name))
def drop_mysql_table(conn, table_name):
with conn.cursor() as cursor:
cursor.execute(drop_table_sql_template.format(table_name))
def get_postgres_conn(started_cluster):
conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(
started_cluster.postgres_ip, started_cluster.postgres_port
@ -267,6 +270,8 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, column_x Nulla
# just to be sure :)
assert node1.query("select 1") == "1\n"
node1.query(f"DROP TABLE {table_name}")
drop_mysql_table(conn, table_name)
conn.close()
@ -309,6 +314,9 @@ def test_mysql_insert(started_cluster):
== "3\tinsert\t33\t333\t3333\n4\tTEST\t44\t444\t\\N\n"
)
node1.query("DROP TABLE mysql_insert")
drop_mysql_table(conn, table_name)
def test_sqlite_simple_select_function_works(started_cluster):
skip_test_msan(node1)
@ -367,6 +375,12 @@ def test_sqlite_simple_select_function_works(started_cluster):
== "1\t1\n"
)
node1.exec_in_container(
["sqlite3", sqlite_db, "DELETE FROM t1;"],
privileged=True,
user="root",
)
def test_sqlite_table_function(started_cluster):
skip_test_msan(node1)
@ -392,6 +406,12 @@ def test_sqlite_table_function(started_cluster):
assert node1.query("select x, y from odbc_tf") == "1\t2\n"
assert node1.query("select z, x, y from odbc_tf") == "3\t1\t2\n"
assert node1.query("select count(), sum(x) from odbc_tf group by x") == "1\t1\n"
node1.query("DROP TABLE odbc_tf")
node1.exec_in_container(
["sqlite3", sqlite_db, "DELETE FROM tf1;"],
privileged=True,
user="root",
)
def test_sqlite_simple_select_storage_works(started_cluster):
@ -418,6 +438,13 @@ def test_sqlite_simple_select_storage_works(started_cluster):
assert node1.query("select x, y from SqliteODBC") == "1\t2\n"
assert node1.query("select z, x, y from SqliteODBC") == "3\t1\t2\n"
assert node1.query("select count(), sum(x) from SqliteODBC group by x") == "1\t1\n"
node1.query("DROP TABLE SqliteODBC")
node1.exec_in_container(
["sqlite3", sqlite_db, "DELETE FROM t4;"],
privileged=True,
user="root",
)
def test_sqlite_odbc_hashed_dictionary(started_cluster):
@ -496,6 +523,12 @@ def test_sqlite_odbc_hashed_dictionary(started_cluster):
node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))", "7"
)
node1.exec_in_container(
["sqlite3", sqlite_db, "DELETE FROM t2;"],
privileged=True,
user="root",
)
def test_sqlite_odbc_cached_dictionary(started_cluster):
skip_test_msan(node1)
@ -537,13 +570,20 @@ def test_sqlite_odbc_cached_dictionary(started_cluster):
node1, "select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))", "12"
)
node1.exec_in_container(
["sqlite3", sqlite_db, "DELETE FROM t3;"],
privileged=True,
user="root",
)
node1.query("SYSTEM RELOAD DICTIONARIES")
def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster):
skip_test_msan(node1)
conn = get_postgres_conn(started_cluster)
cursor = conn.cursor()
cursor.execute("truncate table clickhouse.test_table")
cursor.execute(
"insert into clickhouse.test_table values(1, 1, 'hello'),(2, 2, 'world')"
)
@ -562,6 +602,7 @@ def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster):
"select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))",
"world",
)
cursor.execute("truncate table clickhouse.test_table")
def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster):
@ -569,7 +610,6 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster):
conn = get_postgres_conn(started_cluster)
cursor = conn.cursor()
cursor.execute("truncate table clickhouse.test_table")
cursor.execute("insert into clickhouse.test_table values(3, 3, 'xxx')")
for i in range(100):
try:
@ -582,13 +622,13 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster):
"select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(3))",
"xxx",
)
cursor.execute("truncate table clickhouse.test_table")
def test_postgres_insert(started_cluster):
skip_test_msan(node1)
conn = get_postgres_conn(started_cluster)
conn.cursor().execute("truncate table clickhouse.test_table")
# Also test with Servername containing '.' and '-' symbols (defined in
# postgres .yml file). This is needed to check parsing, validation and
@ -615,6 +655,8 @@ def test_postgres_insert(started_cluster):
)
== "55\t10\n"
)
node1.query("DROP TABLE pg_insert")
conn.cursor().execute("truncate table clickhouse.test_table")
def test_bridge_dies_with_parent(started_cluster):
@ -675,7 +717,7 @@ def test_odbc_postgres_date_data_type(started_cluster):
conn = get_postgres_conn(started_cluster)
cursor = conn.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS clickhouse.test_date (id integer, column1 integer, column2 date)"
"CREATE TABLE clickhouse.test_date (id integer, column1 integer, column2 date)"
)
cursor.execute("INSERT INTO clickhouse.test_date VALUES (1, 1, '2020-12-01')")
@ -692,8 +734,8 @@ def test_odbc_postgres_date_data_type(started_cluster):
expected = "1\t1\t2020-12-01\n2\t2\t2020-12-02\n3\t3\t2020-12-03\n"
result = node1.query("SELECT * FROM test_date")
assert result == expected
cursor.execute("DROP TABLE IF EXISTS clickhouse.test_date")
node1.query("DROP TABLE IF EXISTS test_date")
cursor.execute("DROP TABLE clickhouse.test_date")
node1.query("DROP TABLE test_date")
def test_odbc_postgres_conversions(started_cluster):
@ -703,7 +745,7 @@ def test_odbc_postgres_conversions(started_cluster):
cursor = conn.cursor()
cursor.execute(
"""CREATE TABLE IF NOT EXISTS clickhouse.test_types (
"""CREATE TABLE clickhouse.test_types (
a smallint, b integer, c bigint, d real, e double precision, f serial, g bigserial,
h timestamp)"""
)
@ -729,7 +771,7 @@ def test_odbc_postgres_conversions(started_cluster):
cursor.execute("DROP TABLE IF EXISTS clickhouse.test_types")
cursor.execute(
"""CREATE TABLE IF NOT EXISTS clickhouse.test_types (column1 Timestamp, column2 Numeric)"""
"""CREATE TABLE clickhouse.test_types (column1 Timestamp, column2 Numeric)"""
)
node1.query(
@ -747,8 +789,8 @@ def test_odbc_postgres_conversions(started_cluster):
"SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Etc/UTC'), toDecimal32(1.1, 1)"
)
result = node1.query("SELECT * FROM test_types")
logging.debug(result)
cursor.execute("DROP TABLE IF EXISTS clickhouse.test_types")
cursor.execute("DROP TABLE clickhouse.test_types")
node1.query("DROP TABLE test_types")
assert result == expected
@ -776,6 +818,7 @@ def test_odbc_cyrillic_with_varchar(started_cluster):
""" SELECT name FROM odbc('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_cyrillic') """
)
assert result == "A-nice-word\nКрасивенько\n"
node1.query("DROP TABLE test_cyrillic")
def test_many_connections(started_cluster):
@ -784,7 +827,6 @@ def test_many_connections(started_cluster):
conn = get_postgres_conn(started_cluster)
cursor = conn.cursor()
cursor.execute("DROP TABLE IF EXISTS clickhouse.test_pg_table")
cursor.execute("CREATE TABLE clickhouse.test_pg_table (key integer, value integer)")
node1.query(
@ -802,6 +844,7 @@ def test_many_connections(started_cluster):
query += "SELECT key FROM {t})"
assert node1.query(query.format(t="test_pg_table")) == "250\n"
cursor.execute("DROP TABLE clickhouse.test_pg_table")
def test_concurrent_queries(started_cluster):
@ -901,7 +944,6 @@ def test_odbc_long_text(started_cluster):
conn = get_postgres_conn(started_cluster)
cursor = conn.cursor()
cursor.execute("drop table if exists clickhouse.test_long_text")
cursor.execute("create table clickhouse.test_long_text(flen int, field1 text)")
# sample test from issue 9363
@ -929,3 +971,5 @@ def test_odbc_long_text(started_cluster):
)
result = node1.query("select field1 from test_long_text where flen=400000;")
assert result.strip() == long_text
node1.query("DROP TABLE test_long_text")
cursor.execute("drop table clickhouse.test_long_text")