mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Improve some tests
This commit is contained in:
parent
05e8441b59
commit
ada6c50aaa
@ -5,7 +5,6 @@ cluster = ClickHouseCluster(__file__)
|
|||||||
|
|
||||||
node1 = cluster.add_instance('node1')
|
node1 = cluster.add_instance('node1')
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def start_cluster():
|
def start_cluster():
|
||||||
try:
|
try:
|
||||||
@ -42,3 +41,4 @@ def test_attach_without_checksums(start_cluster):
|
|||||||
|
|
||||||
assert node1.query("SELECT COUNT() FROM test WHERE key % 10 == 0") == "10\n"
|
assert node1.query("SELECT COUNT() FROM test WHERE key % 10 == 0") == "10\n"
|
||||||
assert node1.query("SELECT COUNT() FROM test") == "100\n"
|
assert node1.query("SELECT COUNT() FROM test") == "100\n"
|
||||||
|
node1.query("DROP TABLE test")
|
@ -67,15 +67,15 @@ def started_cluster():
|
|||||||
])
|
])
|
||||||
def test_create_and_select_mysql(started_cluster, clickhouse, name, layout):
|
def test_create_and_select_mysql(started_cluster, clickhouse, name, layout):
|
||||||
mysql_conn = create_mysql_conn("root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port)
|
mysql_conn = create_mysql_conn("root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port)
|
||||||
execute_mysql_query(mysql_conn, "DROP DATABASE IF EXISTS clickhouse")
|
execute_mysql_query(mysql_conn, "DROP DATABASE IF EXISTS create_and_select")
|
||||||
execute_mysql_query(mysql_conn, "CREATE DATABASE clickhouse")
|
execute_mysql_query(mysql_conn, "CREATE DATABASE create_and_select")
|
||||||
execute_mysql_query(mysql_conn,
|
execute_mysql_query(mysql_conn,
|
||||||
"CREATE TABLE clickhouse.{} (key_field1 int, key_field2 bigint, value1 text, value2 float, PRIMARY KEY (key_field1, key_field2))".format(
|
"CREATE TABLE create_and_select.{} (key_field1 int, key_field2 bigint, value1 text, value2 float, PRIMARY KEY (key_field1, key_field2))".format(
|
||||||
name))
|
name))
|
||||||
values = []
|
values = []
|
||||||
for i in range(1000):
|
for i in range(1000):
|
||||||
values.append('(' + ','.join([str(i), str(i * i), str(i) * 5, str(i * 3.14)]) + ')')
|
values.append('(' + ','.join([str(i), str(i * i), str(i) * 5, str(i * 3.14)]) + ')')
|
||||||
execute_mysql_query(mysql_conn, "INSERT INTO clickhouse.{} VALUES ".format(name) + ','.join(values))
|
execute_mysql_query(mysql_conn, "INSERT INTO create_and_select.{} VALUES ".format(name) + ','.join(values))
|
||||||
|
|
||||||
clickhouse.query("""
|
clickhouse.query("""
|
||||||
CREATE DICTIONARY default.{} (
|
CREATE DICTIONARY default.{} (
|
||||||
@ -88,7 +88,7 @@ def test_create_and_select_mysql(started_cluster, clickhouse, name, layout):
|
|||||||
SOURCE(MYSQL(
|
SOURCE(MYSQL(
|
||||||
USER 'root'
|
USER 'root'
|
||||||
PASSWORD 'clickhouse'
|
PASSWORD 'clickhouse'
|
||||||
DB 'clickhouse'
|
DB 'create_and_select'
|
||||||
TABLE '{}'
|
TABLE '{}'
|
||||||
REPLICA(PRIORITY 1 HOST '127.0.0.1' PORT 3333)
|
REPLICA(PRIORITY 1 HOST '127.0.0.1' PORT 3333)
|
||||||
REPLICA(PRIORITY 2 HOST 'mysql57' PORT 3306)
|
REPLICA(PRIORITY 2 HOST 'mysql57' PORT 3306)
|
||||||
@ -110,7 +110,7 @@ def test_create_and_select_mysql(started_cluster, clickhouse, name, layout):
|
|||||||
|
|
||||||
for i in range(1000):
|
for i in range(1000):
|
||||||
values.append('(' + ','.join([str(i), str(i * i), str(i) * 3, str(i * 2.718)]) + ')')
|
values.append('(' + ','.join([str(i), str(i * i), str(i) * 3, str(i * 2.718)]) + ')')
|
||||||
execute_mysql_query(mysql_conn, "REPLACE INTO clickhouse.{} VALUES ".format(name) + ','.join(values))
|
execute_mysql_query(mysql_conn, "REPLACE INTO create_and_select.{} VALUES ".format(name) + ','.join(values))
|
||||||
|
|
||||||
clickhouse.query("SYSTEM RELOAD DICTIONARY 'default.{}'".format(name))
|
clickhouse.query("SYSTEM RELOAD DICTIONARY 'default.{}'".format(name))
|
||||||
|
|
||||||
@ -127,6 +127,7 @@ def test_create_and_select_mysql(started_cluster, clickhouse, name, layout):
|
|||||||
|
|
||||||
clickhouse.query("select dictGetUInt8('xml_dictionary', 'SomeValue1', toUInt64(17))") == "17\n"
|
clickhouse.query("select dictGetUInt8('xml_dictionary', 'SomeValue1', toUInt64(17))") == "17\n"
|
||||||
clickhouse.query("select dictGetString('xml_dictionary', 'SomeValue2', toUInt64(977))") == str(hex(977))[2:] + '\n'
|
clickhouse.query("select dictGetString('xml_dictionary', 'SomeValue2', toUInt64(977))") == str(hex(977))[2:] + '\n'
|
||||||
|
clickhouse.query(f"drop dictionary default.{name}")
|
||||||
|
|
||||||
|
|
||||||
def test_restricted_database(started_cluster):
|
def test_restricted_database(started_cluster):
|
||||||
@ -188,6 +189,9 @@ def test_restricted_database(started_cluster):
|
|||||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_in_restricted_db' DB 'restricted_db'))
|
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_in_restricted_db' DB 'restricted_db'))
|
||||||
LIFETIME(MIN 1 MAX 10)
|
LIFETIME(MIN 1 MAX 10)
|
||||||
""")
|
""")
|
||||||
|
for node in [node1, node2]:
|
||||||
|
node.query("DROP TABLE restricted_db.table_in_restricted_db", user="admin")
|
||||||
|
node.query("DROP DATABASE restricted_db", user="admin")
|
||||||
|
|
||||||
|
|
||||||
def test_conflicting_name(started_cluster):
|
def test_conflicting_name(started_cluster):
|
||||||
@ -225,6 +229,7 @@ def test_http_dictionary_restrictions(started_cluster):
|
|||||||
node3.query("SELECT dictGetString('test.restricted_http_dictionary', 'value', toUInt64(1))")
|
node3.query("SELECT dictGetString('test.restricted_http_dictionary', 'value', toUInt64(1))")
|
||||||
except QueryRuntimeException as ex:
|
except QueryRuntimeException as ex:
|
||||||
assert 'is not allowed in config.xml' in str(ex)
|
assert 'is not allowed in config.xml' in str(ex)
|
||||||
|
node3.query("DROP DICTIONARY test.restricted_http_dictionary")
|
||||||
|
|
||||||
|
|
||||||
def test_file_dictionary_restrictions(started_cluster):
|
def test_file_dictionary_restrictions(started_cluster):
|
||||||
@ -242,14 +247,15 @@ def test_file_dictionary_restrictions(started_cluster):
|
|||||||
node3.query("SELECT dictGetString('test.restricted_file_dictionary', 'value', toUInt64(1))")
|
node3.query("SELECT dictGetString('test.restricted_file_dictionary', 'value', toUInt64(1))")
|
||||||
except QueryRuntimeException as ex:
|
except QueryRuntimeException as ex:
|
||||||
assert 'is not inside' in str(ex)
|
assert 'is not inside' in str(ex)
|
||||||
|
node3.query("DROP DICTIONARY test.restricted_file_dictionary")
|
||||||
|
|
||||||
|
|
||||||
def test_dictionary_with_where(started_cluster):
|
def test_dictionary_with_where(started_cluster):
|
||||||
mysql_conn = create_mysql_conn("root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port)
|
mysql_conn = create_mysql_conn("root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port)
|
||||||
execute_mysql_query(mysql_conn, "CREATE DATABASE IF NOT EXISTS clickhouse")
|
execute_mysql_query(mysql_conn, "CREATE DATABASE IF NOT EXISTS dictionary_with_where")
|
||||||
execute_mysql_query(mysql_conn,
|
execute_mysql_query(mysql_conn,
|
||||||
"CREATE TABLE clickhouse.special_table (key_field1 int, value1 text, PRIMARY KEY (key_field1))")
|
"CREATE TABLE dictionary_with_where.special_table (key_field1 int, value1 text, PRIMARY KEY (key_field1))")
|
||||||
execute_mysql_query(mysql_conn, "INSERT INTO clickhouse.special_table VALUES (1, 'abcabc'), (2, 'qweqwe')")
|
execute_mysql_query(mysql_conn, "INSERT INTO dictionary_with_where.special_table VALUES (1, 'abcabc'), (2, 'qweqwe')")
|
||||||
|
|
||||||
node1.query("""
|
node1.query("""
|
||||||
CREATE DICTIONARY default.special_dict (
|
CREATE DICTIONARY default.special_dict (
|
||||||
@ -260,7 +266,7 @@ def test_dictionary_with_where(started_cluster):
|
|||||||
SOURCE(MYSQL(
|
SOURCE(MYSQL(
|
||||||
USER 'root'
|
USER 'root'
|
||||||
PASSWORD 'clickhouse'
|
PASSWORD 'clickhouse'
|
||||||
DB 'clickhouse'
|
DB 'dictionary_with_where'
|
||||||
TABLE 'special_table'
|
TABLE 'special_table'
|
||||||
REPLICA(PRIORITY 1 HOST 'mysql57' PORT 3306)
|
REPLICA(PRIORITY 1 HOST 'mysql57' PORT 3306)
|
||||||
WHERE 'value1 = \\'qweqwe\\' OR value1 = \\'\\\\u3232\\''
|
WHERE 'value1 = \\'qweqwe\\' OR value1 = \\'\\\\u3232\\''
|
||||||
@ -272,6 +278,9 @@ def test_dictionary_with_where(started_cluster):
|
|||||||
node1.query("SYSTEM RELOAD DICTIONARY default.special_dict")
|
node1.query("SYSTEM RELOAD DICTIONARY default.special_dict")
|
||||||
|
|
||||||
assert node1.query("SELECT dictGetString('default.special_dict', 'value1', toUInt64(2))") == 'qweqwe\n'
|
assert node1.query("SELECT dictGetString('default.special_dict', 'value1', toUInt64(2))") == 'qweqwe\n'
|
||||||
|
node1.query("DROP DICTIONARY default.special_dict")
|
||||||
|
execute_mysql_query(mysql_conn, "DROP TABLE dictionary_with_where.special_table")
|
||||||
|
execute_mysql_query(mysql_conn, "DROP DATABASE dictionary_with_where")
|
||||||
|
|
||||||
|
|
||||||
def test_clickhouse_remote(started_cluster):
|
def test_clickhouse_remote(started_cluster):
|
||||||
|
@ -81,7 +81,7 @@ def test_url_with_redirect_allowed(started_cluster):
|
|||||||
node1.query(
|
node1.query(
|
||||||
"create table WebHDFSStorageWithRedirect (id UInt32, name String, weight Float64) ENGINE = URL('http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV')")
|
"create table WebHDFSStorageWithRedirect (id UInt32, name String, weight Float64) ENGINE = URL('http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV')")
|
||||||
assert node1.query("SET max_http_get_redirects=1; select * from WebHDFSStorageWithRedirect") == "1\tMark\t72.53\n"
|
assert node1.query("SET max_http_get_redirects=1; select * from WebHDFSStorageWithRedirect") == "1\tMark\t72.53\n"
|
||||||
|
node1.query("drop table WebHDFSStorageWithRedirect")
|
||||||
|
|
||||||
def test_predefined_connection_configuration(started_cluster):
|
def test_predefined_connection_configuration(started_cluster):
|
||||||
hdfs_api = started_cluster.hdfs_api
|
hdfs_api = started_cluster.hdfs_api
|
||||||
@ -89,9 +89,9 @@ def test_predefined_connection_configuration(started_cluster):
|
|||||||
hdfs_api.write_data("/simple_storage", "1\tMark\t72.53\n")
|
hdfs_api.write_data("/simple_storage", "1\tMark\t72.53\n")
|
||||||
assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n"
|
assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n"
|
||||||
|
|
||||||
node1.query("drop table if exists WebHDFSStorageWithRedirect")
|
|
||||||
node1.query(
|
node1.query(
|
||||||
"create table WebHDFSStorageWithRedirect (id UInt32, name String, weight Float64) ENGINE = URL(url1, url='http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', format='TSV')")
|
"create table WebHDFSStorageWithRedirect (id UInt32, name String, weight Float64) ENGINE = URL(url1, url='http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', format='TSV')")
|
||||||
assert node1.query("SET max_http_get_redirects=1; select * from WebHDFSStorageWithRedirect") == "1\tMark\t72.53\n"
|
assert node1.query("SET max_http_get_redirects=1; select * from WebHDFSStorageWithRedirect") == "1\tMark\t72.53\n"
|
||||||
result = node1.query("SET max_http_get_redirects=1; select * from url(url1, url='http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', format='TSV', structure='id UInt32, name String, weight Float64')")
|
result = node1.query("SET max_http_get_redirects=1; select * from url(url1, url='http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', format='TSV', structure='id UInt32, name String, weight Float64')")
|
||||||
assert(result == "1\tMark\t72.53\n")
|
assert(result == "1\tMark\t72.53\n")
|
||||||
|
node1.query("drop table WebHDFSStorageWithRedirect")
|
||||||
|
Loading…
Reference in New Issue
Block a user