Merge pull request #22170 from ClickHouse/fix_odbc_interaction

Fix some flaky order dependent integration tests.
This commit is contained in:
alesapin 2021-03-27 10:17:37 +03:00 committed by GitHub
commit b8eab8cba9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 21 additions and 5 deletions

View File

@ -80,7 +80,7 @@ def test_load_dictionaries(started_cluster):
create_dict(table_name) create_dict(table_name)
dict_name = 'dict0' dict_name = 'dict0'
node1.query("SYSTEM RELOAD DICTIONARIES") node1.query("SYSTEM RELOAD DICTIONARY {}".format(dict_name))
assert node1.query("SELECT count() FROM `test`.`dict_table_{}`".format(table_name)).rstrip() == '10000' assert node1.query("SELECT count() FROM `test`.`dict_table_{}`".format(table_name)).rstrip() == '10000'
assert node1.query("SELECT dictGetUInt32('{}', 'id', toUInt64(0))".format(dict_name)) == '0\n' assert node1.query("SELECT dictGetUInt32('{}', 'id', toUInt64(0))".format(dict_name)) == '0\n'
assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(9999))".format(dict_name)) == '9999\n' assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(9999))".format(dict_name)) == '9999\n'

View File

@ -141,7 +141,8 @@ def test_reload_after_loading(started_cluster):
time.sleep(1) # see the comment above time.sleep(1) # see the comment above
replace_in_file_in_container('/etc/clickhouse-server/config.d/executable.xml', '81', '82') replace_in_file_in_container('/etc/clickhouse-server/config.d/executable.xml', '81', '82')
replace_in_file_in_container('/etc/clickhouse-server/config.d/file.txt', '101', '102') replace_in_file_in_container('/etc/clickhouse-server/config.d/file.txt', '101', '102')
query("SYSTEM RELOAD DICTIONARIES") query("SYSTEM RELOAD DICTIONARY 'file'")
query("SYSTEM RELOAD DICTIONARY 'executable'")
assert query("SELECT dictGetInt32('executable', 'a', toUInt64(7))") == "82\n" assert query("SELECT dictGetInt32('executable', 'a', toUInt64(7))") == "82\n"
assert query("SELECT dictGetInt32('file', 'a', toUInt64(9))") == "102\n" assert query("SELECT dictGetInt32('file', 'a', toUInt64(9))") == "102\n"

View File

@ -97,12 +97,14 @@ def test_insecure():
n1.query('SELECT * FROM dist_insecure') n1.query('SELECT * FROM dist_insecure')
def test_insecure_insert_async(): def test_insecure_insert_async():
n1.query("TRUNCATE TABLE data")
n1.query('INSERT INTO dist_insecure SELECT * FROM numbers(2)') n1.query('INSERT INTO dist_insecure SELECT * FROM numbers(2)')
n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER insecure dist_insecure') n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER insecure dist_insecure')
assert int(n1.query('SELECT count() FROM dist_insecure')) == 2 assert int(n1.query('SELECT count() FROM dist_insecure')) == 2
n1.query('TRUNCATE TABLE data ON CLUSTER insecure') n1.query('TRUNCATE TABLE data ON CLUSTER insecure')
def test_insecure_insert_sync(): def test_insecure_insert_sync():
n1.query("TRUNCATE TABLE data")
n1.query('INSERT INTO dist_insecure SELECT * FROM numbers(2)', settings={'insert_distributed_sync': 1}) n1.query('INSERT INTO dist_insecure SELECT * FROM numbers(2)', settings={'insert_distributed_sync': 1})
assert int(n1.query('SELECT count() FROM dist_insecure')) == 2 assert int(n1.query('SELECT count() FROM dist_insecure')) == 2
n1.query('TRUNCATE TABLE data ON CLUSTER secure') n1.query('TRUNCATE TABLE data ON CLUSTER secure')
@ -111,12 +113,14 @@ def test_secure():
n1.query('SELECT * FROM dist_secure') n1.query('SELECT * FROM dist_secure')
def test_secure_insert_async(): def test_secure_insert_async():
n1.query("TRUNCATE TABLE data")
n1.query('INSERT INTO dist_secure SELECT * FROM numbers(2)') n1.query('INSERT INTO dist_secure SELECT * FROM numbers(2)')
n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure') n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure')
assert int(n1.query('SELECT count() FROM dist_secure')) == 2 assert int(n1.query('SELECT count() FROM dist_secure')) == 2
n1.query('TRUNCATE TABLE data ON CLUSTER secure') n1.query('TRUNCATE TABLE data ON CLUSTER secure')
def test_secure_insert_sync(): def test_secure_insert_sync():
n1.query("TRUNCATE TABLE data")
n1.query('INSERT INTO dist_secure SELECT * FROM numbers(2)', settings={'insert_distributed_sync': 1}) n1.query('INSERT INTO dist_secure SELECT * FROM numbers(2)', settings={'insert_distributed_sync': 1})
assert int(n1.query('SELECT count() FROM dist_secure')) == 2 assert int(n1.query('SELECT count() FROM dist_secure')) == 2
n1.query('TRUNCATE TABLE data ON CLUSTER secure') n1.query('TRUNCATE TABLE data ON CLUSTER secure')
@ -126,6 +130,7 @@ def test_secure_insert_sync():
# Buffer() flush happens with global context, that does not have user # Buffer() flush happens with global context, that does not have user
# And so Context::user/ClientInfo::current_user/ClientInfo::initial_user will be empty # And so Context::user/ClientInfo::current_user/ClientInfo::initial_user will be empty
def test_secure_insert_buffer_async(): def test_secure_insert_buffer_async():
n1.query("TRUNCATE TABLE data")
n1.query('INSERT INTO dist_secure_buffer SELECT * FROM numbers(2)') n1.query('INSERT INTO dist_secure_buffer SELECT * FROM numbers(2)')
n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure') n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure')
# no Buffer flush happened # no Buffer flush happened
@ -141,6 +146,7 @@ def test_secure_disagree():
n1.query('SELECT * FROM dist_secure_disagree') n1.query('SELECT * FROM dist_secure_disagree')
def test_secure_disagree_insert(): def test_secure_disagree_insert():
n1.query("TRUNCATE TABLE data")
n1.query('INSERT INTO dist_secure_disagree SELECT * FROM numbers(2)') n1.query('INSERT INTO dist_secure_disagree SELECT * FROM numbers(2)')
with pytest.raises(QueryRuntimeException, match='.*Hash mismatch.*'): with pytest.raises(QueryRuntimeException, match='.*Hash mismatch.*'):
n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure_disagree dist_secure_disagree') n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure_disagree dist_secure_disagree')

View File

@ -43,6 +43,8 @@ def start_small_cluster():
def test_single_endpoint_connections_count(start_small_cluster): def test_single_endpoint_connections_count(start_small_cluster):
node1.query("TRUNCATE TABLE test_table")
node2.query("SYSTEM SYNC REPLICA test_table")
def task(count): def task(count):
print(("Inserting ten times from {}".format(count))) print(("Inserting ten times from {}".format(count)))
for i in range(count, count + 10): for i in range(count, count + 10):
@ -58,9 +60,11 @@ def test_single_endpoint_connections_count(start_small_cluster):
def test_keepalive_timeout(start_small_cluster): def test_keepalive_timeout(start_small_cluster):
current_count = int(node1.query("select count() from test_table").strip()) node1.query("TRUNCATE TABLE test_table")
node2.query("SYSTEM SYNC REPLICA test_table")
node1.query("insert into test_table values ('2017-06-16', 777, 0)") node1.query("insert into test_table values ('2017-06-16', 777, 0)")
assert_eq_with_retry(node2, "select count() from test_table", str(current_count + 1)) assert_eq_with_retry(node2, "select count() from test_table", str(1))
# Server keepAliveTimeout is 3 seconds, default client session timeout is 8 # Server keepAliveTimeout is 3 seconds, default client session timeout is 8
# lets sleep in that interval # lets sleep in that interval
time.sleep(4) time.sleep(4)
@ -69,7 +73,7 @@ def test_keepalive_timeout(start_small_cluster):
time.sleep(3) time.sleep(3)
assert_eq_with_retry(node2, "select count() from test_table", str(current_count + 2)) assert_eq_with_retry(node2, "select count() from test_table", str(2))
assert not node2.contains_in_log("No message received"), "Found 'No message received' in clickhouse-server.log" assert not node2.contains_in_log("No message received"), "Found 'No message received' in clickhouse-server.log"

View File

@ -360,6 +360,7 @@ def test_bridge_dies_with_parent(started_cluster):
assert clickhouse_pid is None assert clickhouse_pid is None
assert bridge_pid is None assert bridge_pid is None
node1.start_clickhouse(20)
def test_odbc_postgres_date_data_type(started_cluster): def test_odbc_postgres_date_data_type(started_cluster):

View File

@ -396,6 +396,10 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run):
node_right.query("OPTIMIZE TABLE test_ttl_group_by FINAL") node_right.query("OPTIMIZE TABLE test_ttl_group_by FINAL")
node_right.query("OPTIMIZE TABLE test_ttl_where FINAL") node_right.query("OPTIMIZE TABLE test_ttl_where FINAL")
node_left.query("SYSTEM SYNC REPLICA test_ttl_delete", timeout=20)
node_left.query("SYSTEM SYNC REPLICA test_ttl_group_by", timeout=20)
node_left.query("SYSTEM SYNC REPLICA test_ttl_where", timeout=20)
assert node_left.query("SELECT id FROM test_ttl_delete ORDER BY id") == "2\n4\n" assert node_left.query("SELECT id FROM test_ttl_delete ORDER BY id") == "2\n4\n"
assert node_right.query("SELECT id FROM test_ttl_delete ORDER BY id") == "2\n4\n" assert node_right.query("SELECT id FROM test_ttl_delete ORDER BY id") == "2\n4\n"