mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge pull request #17486 from ClickHouse/fix_more_flaky_tests
Fix more flaky integration tests
This commit is contained in:
commit
1d179dac22
@ -50,7 +50,7 @@ services:
|
||||
- label:disable
|
||||
|
||||
kafka_kerberos:
|
||||
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG}
|
||||
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
|
||||
hostname: kafka_kerberos
|
||||
volumes:
|
||||
- ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab
|
||||
|
@ -1,6 +1,6 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
golang1:
|
||||
image: yandex/clickhouse-mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG}
|
||||
image: yandex/clickhouse-mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
|
||||
# to keep container running
|
||||
command: sleep infinity
|
||||
|
@ -1,6 +1,6 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
java1:
|
||||
image: yandex/clickhouse-mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG}
|
||||
image: yandex/clickhouse-mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
|
||||
# to keep container running
|
||||
command: sleep infinity
|
||||
|
@ -1,6 +1,6 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
mysqljs1:
|
||||
image: yandex/clickhouse-mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG}
|
||||
image: yandex/clickhouse-mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
|
||||
# to keep container running
|
||||
command: sleep infinity
|
||||
|
@ -1,6 +1,6 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
php1:
|
||||
image: yandex/clickhouse-mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG}
|
||||
image: yandex/clickhouse-mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
|
||||
# to keep container running
|
||||
command: sleep infinity
|
||||
|
@ -1,6 +1,6 @@
|
||||
version: '2.2'
|
||||
services:
|
||||
java:
|
||||
image: yandex/clickhouse-postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG}
|
||||
image: yandex/clickhouse-postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
|
||||
# to keep container running
|
||||
command: sleep infinity
|
||||
|
@ -179,16 +179,19 @@ class _NetworkManager:
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
|
||||
# for some reason docker api may hang if image doesn't exist, so we download it
|
||||
# before running
|
||||
for i in range(5):
|
||||
try:
|
||||
subprocess.check_call("docker pull yandex/clickhouse-integration-helper", shell=True)
|
||||
break
|
||||
except:
|
||||
time.sleep(i)
|
||||
else:
|
||||
raise Exception("Cannot pull yandex/clickhouse-integration-helper image")
|
||||
image = subprocess.check_output("docker images -q yandex/clickhouse-integration-helper 2>/dev/null", shell=True)
|
||||
if not image.strip():
|
||||
print("No network image helper, will try download")
|
||||
# for some reason docker api may hang if image doesn't exist, so we download it
|
||||
# before running
|
||||
for i in range(5):
|
||||
try:
|
||||
subprocess.check_call("docker pull yandex/clickhouse-integration-helper", shell=True)
|
||||
break
|
||||
except:
|
||||
time.sleep(i)
|
||||
else:
|
||||
raise Exception("Cannot pull yandex/clickhouse-integration-helper image")
|
||||
|
||||
self._container = self._docker_client.containers.run('yandex/clickhouse-integration-helper',
|
||||
auto_remove=True,
|
||||
|
@ -57,6 +57,7 @@ def test_reload_zookeeper(start_cluster):
|
||||
</yandex >
|
||||
"""
|
||||
node.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config)
|
||||
node.query("SYSTEM RELOAD CONFIG")
|
||||
## config reloads, but can still work
|
||||
assert_eq_with_retry(node, "SELECT COUNT() FROM test_table", '1000', retry_count=120, sleep_time=0.5)
|
||||
|
||||
@ -86,6 +87,7 @@ def test_reload_zookeeper(start_cluster):
|
||||
</yandex>
|
||||
"""
|
||||
node.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config)
|
||||
node.query("SYSTEM RELOAD CONFIG")
|
||||
|
||||
assert_eq_with_retry(node, "SELECT COUNT() FROM test_table", '1000', retry_count=120, sleep_time=0.5)
|
||||
|
||||
|
@ -17,19 +17,14 @@ distributed = cluster.add_instance('distributed', main_configs=["configs/config.
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
for node in [node1, node2]:
|
||||
node.query("CREATE TABLE sometable (date Date, id UInt32, value Int32) ENGINE = MergeTree() ORDER BY id;")
|
||||
node.query("INSERT INTO sometable VALUES (toDate('2010-01-10'), 1, 1)")
|
||||
node.query("CREATE USER shard")
|
||||
node.query("GRANT ALL ON *.* TO shard")
|
||||
|
||||
distributed.query(
|
||||
"CREATE TABLE proxy (date Date, id UInt32, value Int32) ENGINE = Distributed(test_cluster, default, sometable, toUInt64(date));")
|
||||
distributed.query(
|
||||
"CREATE TABLE shard_settings (name String, value String) ENGINE = Distributed(test_cluster, system, settings);")
|
||||
distributed.query("CREATE ROLE admin")
|
||||
distributed.query("GRANT ALL ON *.* TO admin")
|
||||
distributed.query(
|
||||
"CREATE TABLE shard_settings (name String, value String) ENGINE = Distributed(test_cluster, system, settings);")
|
||||
|
||||
yield cluster
|
||||
|
||||
@ -46,6 +41,14 @@ def restart_distributed():
|
||||
|
||||
|
||||
def test_select_clamps_settings():
|
||||
for node in [node1, node2]:
|
||||
node.query("CREATE TABLE sometable_select (date Date, id UInt32, value Int32) ENGINE = MergeTree() ORDER BY id;")
|
||||
node.query("INSERT INTO sometable_select VALUES (toDate('2010-01-10'), 1, 1)")
|
||||
|
||||
distributed.query(
|
||||
"CREATE TABLE proxy_select (date Date, id UInt32, value Int32) ENGINE = Distributed(test_cluster, default, sometable_select, toUInt64(date));")
|
||||
|
||||
|
||||
distributed.query("CREATE USER normal DEFAULT ROLE admin SETTINGS max_memory_usage = 80000000")
|
||||
distributed.query("CREATE USER wasteful DEFAULT ROLE admin SETTINGS max_memory_usage = 2000000000")
|
||||
distributed.query("CREATE USER readonly DEFAULT ROLE admin SETTINGS readonly = 1")
|
||||
@ -53,7 +56,7 @@ def test_select_clamps_settings():
|
||||
node2.query("ALTER USER shard SETTINGS readonly = 1")
|
||||
|
||||
# Check that shards doesn't throw exceptions on constraints violation
|
||||
query = "SELECT COUNT() FROM proxy"
|
||||
query = "SELECT COUNT() FROM proxy_select"
|
||||
assert distributed.query(query) == '2\n'
|
||||
assert distributed.query(query, user='normal') == '2\n'
|
||||
assert distributed.query(query, user='wasteful') == '2\n'
|
||||
@ -62,7 +65,7 @@ def test_select_clamps_settings():
|
||||
assert distributed.query(query, settings={"max_memory_usage": 40000000, "readonly": 2}) == '2\n'
|
||||
assert distributed.query(query, settings={"max_memory_usage": 3000000000, "readonly": 2}) == '2\n'
|
||||
|
||||
query = "SELECT COUNT() FROM remote('node{1,2}', 'default', 'sometable')"
|
||||
query = "SELECT COUNT() FROM remote('node{1,2}', 'default', 'sometable_select')"
|
||||
assert distributed.query(query) == '2\n'
|
||||
assert distributed.query(query, user='normal') == '2\n'
|
||||
assert distributed.query(query, user='wasteful') == '2\n'
|
||||
@ -103,10 +106,17 @@ def test_select_clamps_settings():
|
||||
|
||||
|
||||
def test_insert_clamps_settings():
|
||||
for node in [node1, node2]:
|
||||
node.query("CREATE TABLE sometable_insert (date Date, id UInt32, value Int32) ENGINE = MergeTree() ORDER BY id;")
|
||||
node.query("INSERT INTO sometable_insert VALUES (toDate('2010-01-10'), 1, 1)")
|
||||
|
||||
distributed.query(
|
||||
"CREATE TABLE proxy_insert (date Date, id UInt32, value Int32) ENGINE = Distributed(test_cluster, default, sometable_insert, toUInt64(date));")
|
||||
|
||||
node1.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999")
|
||||
node2.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999")
|
||||
|
||||
distributed.query("INSERT INTO proxy VALUES (toDate('2020-02-20'), 2, 2)")
|
||||
distributed.query("INSERT INTO proxy VALUES (toDate('2020-02-21'), 2, 2)", settings={"max_memory_usage": 5000000})
|
||||
distributed.query("SYSTEM FLUSH DISTRIBUTED proxy")
|
||||
assert_eq_with_retry(distributed, "SELECT COUNT() FROM proxy", "4")
|
||||
distributed.query("INSERT INTO proxy_insert VALUES (toDate('2020-02-20'), 2, 2)")
|
||||
distributed.query("INSERT INTO proxy_insert VALUES (toDate('2020-02-21'), 2, 2)", settings={"max_memory_usage": 5000000})
|
||||
distributed.query("SYSTEM FLUSH DISTRIBUTED proxy_insert")
|
||||
assert_eq_with_retry(distributed, "SELECT COUNT() FROM proxy_insert", "4")
|
||||
|
Loading…
Reference in New Issue
Block a user