This commit is contained in:
Yatsishin Ilya 2021-04-12 10:03:12 +03:00
parent 2cc9d314be
commit b4dded3fc2
6 changed files with 75 additions and 72 deletions

View File

@ -5,10 +5,10 @@ services:
restart: always
environment:
MYSQL_ROOT_PASSWORD: clickhouse
MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST}
MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST}
DATADIR: /mysql/
expose:
- ${MYSQL_PORT}
- ${MYSQL_CLUSTER_PORT}
command: --server_id=100
--log-bin='mysql-bin-2.log'
--default-time-zone='+3:00'
@ -19,40 +19,40 @@ services:
--general-log=ON
--general-log-file=/mysql/2_general.log
volumes:
- type: ${MYSQL_LOGS_FS:-tmpfs}
source: ${MYSQL_LOGS:-}
- type: ${MYSQL_CLUSTER_LOGS_FS:-tmpfs}
source: ${MYSQL_CLUSTER_LOGS:-}
target: /mysql/
mysql3:
image: mysql:5.7
restart: always
environment:
MYSQL_ROOT_PASSWORD: clickhouse
MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST}
DATADIR: /mysql/
expose:
- ${MYSQL_PORT}
command: --server_id=100
--log-bin='mysql-bin-3.log'
--default-time-zone='+3:00'
--gtid-mode="ON"
--enforce-gtid-consistency
--log-error-verbosity=3
--log-error=/mysql/3_error.log
--general-log=ON
--general-log-file=/mysql/3_general.log
volumes:
- type: ${MYSQL_LOGS_FS:-tmpfs}
source: ${MYSQL_LOGS:-}
target: /mysql/
image: mysql:5.7
restart: always
environment:
MYSQL_ROOT_PASSWORD: clickhouse
MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST}
DATADIR: /mysql/
expose:
- ${MYSQL_CLUSTER_PORT}
command: --server_id=100
--log-bin='mysql-bin-3.log'
--default-time-zone='+3:00'
--gtid-mode="ON"
--enforce-gtid-consistency
--log-error-verbosity=3
--log-error=/mysql/3_error.log
--general-log=ON
--general-log-file=/mysql/3_general.log
volumes:
- type: ${MYSQL_CLUSTER_LOGS_FS:-tmpfs}
source: ${MYSQL_CLUSTER_LOGS:-}
target: /mysql/
mysql4:
image: mysql:5.7
restart: always
environment:
MYSQL_ROOT_PASSWORD: clickhouse
MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST}
MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST}
DATADIR: /mysql/
expose:
- ${MYSQL_PORT}
- ${MYSQL_CLUSTER_PORT}
command: --server_id=100
--log-bin='mysql-bin-4.log'
--default-time-zone='+3:00'
@ -63,6 +63,6 @@ services:
--general-log=ON
--general-log-file=/mysql/4_general.log
volumes:
- type: ${MYSQL_LOGS_FS:-tmpfs}
source: ${MYSQL_LOGS:-}
target: /mysql/
- type: ${MYSQL_CLUSTER_LOGS_FS:-tmpfs}
source: ${MYSQL_CLUSTER_LOGS:-}
target: /mysql/

View File

@ -181,7 +181,9 @@ test_storage_mysql/test.py::test_where
test_storage_mysql/test.py::test_table_function
test_storage_mysql/test.py::test_binary_type
test_storage_mysql/test.py::test_enum_type
test_storage_mysql/test.py::test_mysql_distributed
test_storage_postgresql/test.py::test_postgres_select_insert
test_storage_postgresql/test.py::test_postgres_conversions
test_storage_postgresql/test.py::test_non_default_scema
test_storage_postgresql/test.py::test_concurrent_queries
test_storage_postgresql/test.py::test_postgres_distributed

View File

@ -46,7 +46,7 @@ def mysql_server(mysql_client):
:type mysql_client: Container
:rtype: Container
"""
retries = 30
retries = 60
for i in range(retries):
info = mysql_client.client.api.inspect_container(mysql_client.name)
if info['State']['Health']['Status'] == 'healthy':

View File

@ -36,7 +36,7 @@ def started_cluster():
try:
cluster.start()
conn = get_mysql_conn(cluster, cluster.mysql57_ip)
conn = get_mysql_conn(cluster, cluster.mysql_ip)
create_mysql_db(conn, 'clickhouse')
## create mysql db and table
@ -50,7 +50,7 @@ def started_cluster():
def test_many_connections(started_cluster):
table_name = 'test_many_connections'
conn = get_mysql_conn(started_cluster, cluster.mysql57_ip)
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
create_mysql_table(conn, table_name)
node1.query('''
@ -70,7 +70,7 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
def test_insert_select(started_cluster):
table_name = 'test_insert_select'
conn = get_mysql_conn(started_cluster, cluster.mysql57_ip)
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
create_mysql_table(conn, table_name)
node1.query('''
@ -86,7 +86,7 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
def test_replace_select(started_cluster):
table_name = 'test_replace_select'
conn = get_mysql_conn(started_cluster, cluster.mysql57_ip)
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
create_mysql_table(conn, table_name)
node1.query('''
@ -105,7 +105,7 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
def test_insert_on_duplicate_select(started_cluster):
table_name = 'test_insert_on_duplicate_select'
conn = get_mysql_conn(started_cluster, cluster.mysql57_ip)
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
create_mysql_table(conn, table_name)
node1.query('''
@ -124,7 +124,7 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
def test_where(started_cluster):
table_name = 'test_where'
conn = get_mysql_conn(started_cluster, cluster.mysql57_ip)
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
@ -144,7 +144,7 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
def test_table_function(started_cluster):
conn = get_mysql_conn(started_cluster, cluster.mysql57_ip)
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
create_mysql_table(conn, 'table_function')
table_function = "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('table_function')
assert node1.query("SELECT count() FROM {}".format(table_function)).rstrip() == '0'
@ -166,7 +166,7 @@ def test_table_function(started_cluster):
def test_binary_type(started_cluster):
conn = get_mysql_conn(started_cluster, cluster.mysql57_ip)
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
with conn.cursor() as cursor:
cursor.execute("CREATE TABLE clickhouse.binary_type (id INT PRIMARY KEY, data BINARY(16) NOT NULL)")
table_function = "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('binary_type')
@ -176,7 +176,7 @@ def test_binary_type(started_cluster):
def test_enum_type(started_cluster):
table_name = 'test_enum_type'
conn = get_mysql_conn(started_cluster, cluster.mysql57_ip)
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, source Enum8('IP' = 1, 'URL' = 2)) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse', 1);
@ -202,7 +202,7 @@ def create_mysql_table(conn, tableName):
def test_mysql_distributed(started_cluster):
table_name = 'test_replicas'
conn1 = get_mysql_conn(started_cluster, started_cluster.mysql57_ip)
conn1 = get_mysql_conn(started_cluster, started_cluster.mysql_ip)
conn2 = get_mysql_conn(started_cluster, started_cluster.mysql2_ip)
conn3 = get_mysql_conn(started_cluster, started_cluster.mysql3_ip)
conn4 = get_mysql_conn(started_cluster, started_cluster.mysql4_ip)
@ -210,6 +210,7 @@ def test_mysql_distributed(started_cluster):
create_mysql_db(conn1, 'clickhouse')
create_mysql_db(conn2, 'clickhouse')
create_mysql_db(conn3, 'clickhouse')
create_mysql_db(conn4, 'clickhouse')
create_mysql_table(conn1, table_name)
create_mysql_table(conn2, table_name)
@ -228,13 +229,13 @@ def test_mysql_distributed(started_cluster):
nodes[i-1].query('''
CREATE TABLE test_replica{}
(id UInt32, name String, age UInt32, money UInt32)
ENGINE = MySQL(`mysql{}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse');'''.format(i, i))
ENGINE = MySQL(`mysql{}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse');'''.format(i, 57 if i==1 else i))
nodes[i-1].query("INSERT INTO test_replica{} (id, name) SELECT number, 'host{}' from numbers(10) ".format(i, i))
# test multiple ports parsing
result = node2.query('''SELECT DISTINCT(name) FROM mysql(`mysql{1|2|3}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''')
result = node2.query('''SELECT DISTINCT(name) FROM mysql(`mysql{57|2|3}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''')
assert(result == 'host1\n' or result == 'host2\n' or result == 'host3\n')
result = node2.query('''SELECT DISTINCT(name) FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''')
result = node2.query('''SELECT DISTINCT(name) FROM mysql(`mysql57:3306|mysql2:3306|mysql3:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''')
assert(result == 'host1\n' or result == 'host2\n' or result == 'host3\n')
# check all replicas are traversed
@ -250,7 +251,7 @@ def test_mysql_distributed(started_cluster):
node2.query('''
CREATE TABLE test_shards
(id UInt32, name String, age UInt32, money UInt32)
ENGINE = ExternalDistributed('MySQL', `mysql{1|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''')
ENGINE = ExternalDistributed('MySQL', `mysql{57|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''')
# Check only one replica in each shard is used
result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name")
@ -264,10 +265,10 @@ def test_mysql_distributed(started_cluster):
result = node2.query(query)
assert(result == 'host1\nhost2\nhost3\nhost4\n')
# disconnect mysql1
started_cluster.pause_container('mysql1')
# disconnect mysql57
started_cluster.pause_container('mysql57')
result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name")
started_cluster.unpause_container('mysql1')
started_cluster.unpause_container('mysql57')
assert(result == 'host2\nhost4\n' or result == 'host3\nhost4\n')
if __name__ == '__main__':

View File

@ -243,10 +243,10 @@ def test_concurrent_queries(started_cluster):
def test_postgres_distributed(started_cluster):
conn0 = get_postgres_conn(started_cluster.postgres_ip, database=True)
conn1 = get_postgres_conn(started_cluster.postgres2_ip, database=True)
conn2 = get_postgres_conn(started_cluster.postgres3_ip, database=True)
conn3 = get_postgres_conn(started_cluster.postgres4_ip, database=True)
conn0 = get_postgres_conn(started_cluster, started_cluster.postgres_ip, database=True)
conn1 = get_postgres_conn(started_cluster, started_cluster.postgres2_ip, database=True)
conn2 = get_postgres_conn(started_cluster, started_cluster.postgres3_ip, database=True)
conn3 = get_postgres_conn(started_cluster, started_cluster.postgres4_ip, database=True)
cursor0 = conn0.cursor()
cursor1 = conn1.cursor()

View File

@ -114,14 +114,14 @@ def run_query(instance, query, stdin=None, settings=None):
# Test simple put. Also checks that wrong credentials produce an error with every compression method.
@pytest.mark.parametrize("maybe_auth,positive,compression", [
("", True, 'auto'),
("'minio','minio123',", True, 'auto'),
("'wrongid','wrongkey',", False, 'auto'),
("'wrongid','wrongkey',", False, 'gzip'),
("'wrongid','wrongkey',", False, 'deflate'),
("'wrongid','wrongkey',", False, 'brotli'),
("'wrongid','wrongkey',", False, 'xz'),
("'wrongid','wrongkey',", False, 'zstd')
pytest.param("", True, 'auto', id="positive"),
pytest.param("'minio','minio123',", True, 'auto', id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, 'auto', id="auto-"),
pytest.param("'wrongid','wrongkey',", False, 'gzip', id=""),
pytest.param("'wrongid','wrongkey',", False, 'deflate', id=""),
pytest.param("'wrongid','wrongkey',", False, 'brotli', id=""),
pytest.param("'wrongid','wrongkey',", False, 'xz', id=""),
pytest.param("'wrongid','wrongkey',", False, 'zstd, id=""')
])
def test_put(started_cluster, maybe_auth, positive, compression):
# type: (ClickHouseCluster) -> None
@ -147,7 +147,7 @@ def test_put(started_cluster, maybe_auth, positive, compression):
# Test put no data to S3.
@pytest.mark.parametrize("auth", [
"'minio','minio123',"
pytest.param("'minio','minio123'", id="minio")
])
def test_empty_put(started_cluster, auth):
# type: (ClickHouseCluster) -> None
@ -181,9 +181,9 @@ def test_empty_put(started_cluster, auth):
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
("'minio','minio123',", True),
("'wrongid','wrongkey',", False)
pytest.param("", True, id="positive"),
pytest.param("'minio','minio123',", True, id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, id="negative"),
])
def test_put_csv(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
@ -285,9 +285,9 @@ def test_put_get_with_globs(started_cluster):
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
pytest.param("", True, id="positive"),
pytest.param("'wrongid','wrongkey'", False, id="negative"),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
("'wrongid','wrongkey',", False),
])
def test_multipart_put(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
@ -344,8 +344,8 @@ def test_remote_host_filter(started_cluster):
@pytest.mark.parametrize("s3_storage_args", [
"''", # 1 arguments
"'','','','','',''" # 6 arguments
pytest.param("''", id="1_argument"),
pytest.param("'','','','','',''", id="6_arguments"),
])
def test_wrong_s3_syntax(started_cluster, s3_storage_args):
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
@ -458,8 +458,8 @@ def test_custom_auth_headers_exclusion(started_cluster):
assert '403 Forbidden' in ei.value.stderr
@pytest.mark.parametrize("extension,method", [
("bin", "gzip"),
("gz", "auto")
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz"),
])
def test_storage_s3_get_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
@ -532,8 +532,8 @@ def test_storage_s3_put_uncompressed(started_cluster):
@pytest.mark.parametrize("extension,method", [
("bin", "gzip"),
("gz", "auto")
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz")
])
def test_storage_s3_put_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket