Enable Pytest framework for stateless tests (#17902)

This commit is contained in:
Ivan 2020-12-18 16:26:11 +03:00 committed by GitHub
parent 26637bd7ee
commit 57763e6867
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 461 additions and 316 deletions

View File

@ -45,16 +45,6 @@
"name": "yandex/clickhouse-pvs-test",
"dependent": []
},
"docker/test/stateful": {
"name": "yandex/clickhouse-stateful-test",
"dependent": [
"docker/test/stress"
]
},
"docker/test/stateful_with_coverage": {
"name": "yandex/clickhouse-stateful-test-with-coverage",
"dependent": []
},
"docker/test/stateless": {
"name": "yandex/clickhouse-stateless-test",
"dependent": [
@ -71,6 +61,16 @@
"docker/test/stateful_with_coverage"
]
},
"docker/test/stateful": {
"name": "yandex/clickhouse-stateful-test",
"dependent": [
"docker/test/stress"
]
},
"docker/test/stateful_with_coverage": {
"name": "yandex/clickhouse-stateful-test-with-coverage",
"dependent": []
},
"docker/test/unit": {
"name": "yandex/clickhouse-unit-test",
"dependent": []

View File

@ -4,11 +4,16 @@ FROM yandex/clickhouse-test-base
RUN apt-get update -y && \
apt-get install -y --no-install-recommends \
python3-pip \
python3-setuptools
python3-setuptools \
python3-wheel
RUN python3 -m pip install \
wheel \
pytest \
pytest-html \
pytest-json \
pytest-randomly \
pytest-rerunfailures \
pytest-timeout \
pytest-xdist
@ -17,4 +22,4 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
dpkg -i package_folder/clickhouse-server_*.deb; \
dpkg -i package_folder/clickhouse-client_*.deb; \
dpkg -i package_folder/clickhouse-test_*.deb; \
python3 -m pytest /usr/share/clickhouse-test/queries -n $(nproc) --html=test_output/report.html --self-contained-html
python3 -m pytest /usr/share/clickhouse-test/queries -n $(nproc) --reruns=1 --timeout=600 --json=test_output/report.json --html=test_output/report.html --self-contained-html

View File

@ -321,6 +321,18 @@
"with_coverage": false
}
},
"Functional stateless tests (pytest)": {
"required_build_properties": {
"compiler": "clang-11",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang-tidy": "disable",
"with_coverage": false
}
},
"Functional stateless tests (unbundled)": {
"required_build_properties": {
"compiler": "gcc-9",

View File

@ -1,9 +1,9 @@
*** Create and kill a single invalid mutation ***
happened during execution of mutation '0000000000'
1
waiting test kill_mutation_r1 0000000000 DELETE WHERE toUInt32(s) = 1
waiting default kill_mutation_r1 0000000000 DELETE WHERE toUInt32(s) = 1
0
*** Create and kill invalid mutation that blocks another mutation ***
1
waiting test kill_mutation_r1 0000000001 DELETE WHERE toUInt32(s) = 1
waiting default kill_mutation_r1 0000000001 DELETE WHERE toUInt32(s) = 1
2001-01-01 2 b

View File

@ -5,42 +5,42 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/mergetree_mutations.lib
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.kill_mutation_r1"
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.kill_mutation_r2"
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS kill_mutation_r1"
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS kill_mutation_r2"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.kill_mutation_r1(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00834/kill_mutation', '1') ORDER BY x PARTITION BY d"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.kill_mutation_r2(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00834/kill_mutation', '2') ORDER BY x PARTITION BY d"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE kill_mutation_r1(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00834/kill_mutation', '1') ORDER BY x PARTITION BY d"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE kill_mutation_r2(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00834/kill_mutation', '2') ORDER BY x PARTITION BY d"
${CLICKHOUSE_CLIENT} --query="INSERT INTO test.kill_mutation_r1 VALUES ('2000-01-01', 1, 'a')"
${CLICKHOUSE_CLIENT} --query="INSERT INTO test.kill_mutation_r1 VALUES ('2001-01-01', 2, 'b')"
${CLICKHOUSE_CLIENT} --query="INSERT INTO kill_mutation_r1 VALUES ('2000-01-01', 1, 'a')"
${CLICKHOUSE_CLIENT} --query="INSERT INTO kill_mutation_r1 VALUES ('2001-01-01', 2, 'b')"
${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill a single invalid mutation ***'"
# wrong mutation
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toUInt32(s) = 1 SETTINGS mutations_sync=2" 2>&1 | grep -o "happened during execution of mutation '0000000000'" | head -n 1
${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation_r1 DELETE WHERE toUInt32(s) = 1 SETTINGS mutations_sync=2" 2>&1 | grep -o "happened during execution of mutation '0000000000'" | head -n 1
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1' AND is_done = 0"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' AND table = 'kill_mutation_r1' AND is_done = 0"
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation_r1'"
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = '$CLICKHOUSE_DATABASE' AND table = 'kill_mutation_r1'"
# No active mutations exists
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1'"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' AND table = 'kill_mutation_r1'"
${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill invalid mutation that blocks another mutation ***'"
${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA test.kill_mutation_r1"
${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA test.kill_mutation_r2"
${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA kill_mutation_r1"
${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA kill_mutation_r2"
# Should be empty, but in case of problems we will see some diagnostics
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.replication_queue WHERE table like 'kill_mutation_r%'"
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toUInt32(s) = 1"
${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation_r1 DELETE WHERE toUInt32(s) = 1"
# good mutation, but blocked with wrong mutation
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE x = 1"
${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation_r1 DELETE WHERE x = 1"
check_query1="SELECT count() FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1' AND is_done = 0"
check_query1="SELECT count() FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' AND table = 'kill_mutation_r1' AND is_done = 0"
query_result=$($CLICKHOUSE_CLIENT --query="$check_query1" 2>&1)
@ -50,17 +50,17 @@ do
sleep 0.5
done
$CLICKHOUSE_CLIENT --query="SELECT count() FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001' AND is_done = 0"
$CLICKHOUSE_CLIENT --query="SELECT count() FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001' AND is_done = 0"
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = '$CLICKHOUSE_DATABASE' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"
${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA test.kill_mutation_r1"
${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA test.kill_mutation_r2"
${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA kill_mutation_r1"
${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA kill_mutation_r2"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.kill_mutation_r2"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM kill_mutation_r2"
# must be empty
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.mutations WHERE table = 'kill_mutation' AND database = 'test' AND is_done = 0"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.mutations WHERE table = 'kill_mutation' AND database = '$CLICKHOUSE_DATABASE' AND is_done = 0"
${CLICKHOUSE_CLIENT} --query="DROP TABLE test.kill_mutation_r1"
${CLICKHOUSE_CLIENT} --query="DROP TABLE test.kill_mutation_r2"
${CLICKHOUSE_CLIENT} --query="DROP TABLE kill_mutation_r1"
${CLICKHOUSE_CLIENT} --query="DROP TABLE kill_mutation_r2"

View File

@ -21,7 +21,7 @@ ${CLICKHOUSE_CLIENT} -n -q "
INSERT INTO table_for_dict VALUES (3, 3003),(4,4004);
CREATE DICTIONARY dict1( y UInt64 DEFAULT 0, y_new UInt32 DEFAULT 0 ) PRIMARY KEY y
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' PASSWORD '' DB '${CLICKHOUSE_DATABASE}'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB '${CLICKHOUSE_DATABASE}'))
LIFETIME(MIN 1 MAX 10)
LAYOUT(FLAT());

View File

@ -16,7 +16,7 @@ $CLICKHOUSE_CLIENT -q "
)
PRIMARY KEY key1, key2
LAYOUT(HASHED())
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
LIFETIME(MIN 1 MAX 10)
" 2>&1 | grep -c 'Primary key for simple dictionary must contain exactly one element'
@ -31,7 +31,7 @@ $CLICKHOUSE_CLIENT -q "
)
PRIMARY KEY non_existing_column
LAYOUT(HASHED())
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
LIFETIME(MIN 1 MAX 10)
" 2>&1 | grep -c "Unknown key attribute 'non_existing_column'"
@ -45,7 +45,7 @@ $CLICKHOUSE_CLIENT -q "
)
PRIMARY KEY non_existing_column, key1
LAYOUT(COMPLEX_KEY_HASHED())
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
LIFETIME(MIN 1 MAX 10)
" 2>&1 | grep -c "Unknown key attribute 'non_existing_column'"
@ -58,7 +58,7 @@ $CLICKHOUSE_CLIENT -q "
value String
)
PRIMARY KEY key2, key1
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
LIFETIME(MIN 1 MAX 10)
" 2>&1 | grep -c "Cannot create dictionary with empty layout"
@ -71,7 +71,7 @@ $CLICKHOUSE_CLIENT -q "
value String
)
LAYOUT(COMPLEX_KEY_HASHED())
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
LIFETIME(MIN 1 MAX 10)
" 2>&1 | grep -c "Cannot create dictionary without primary key"
@ -85,7 +85,7 @@ $CLICKHOUSE_CLIENT -q "
)
PRIMARY KEY key2, key1
LAYOUT(COMPLEX_KEY_HASHED())
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
" 2>&1 | grep -c "Cannot create dictionary with empty lifetime"
# No source
@ -112,7 +112,7 @@ $CLICKHOUSE_CLIENT -q "
)
PRIMARY KEY key1
LAYOUT(COMPLEX_KEY_HASHED())
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' DB '$CLICKHOUSE_DATABASE'))
LIFETIME(MIN 1 MAX 10)
" || exit 1

View File

@ -18,9 +18,9 @@ $CLICKHOUSE_CLIENT -n -q "
CREATE DATABASE database_for_dict;
CREATE DICTIONARY database_for_dict.dict1 (key_column UInt64, value_column String) PRIMARY KEY key_column SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict1' PASSWORD '' DB '$CLICKHOUSE_DATABASE')) LIFETIME(MIN 1 MAX 5) LAYOUT(FLAT());
CREATE DICTIONARY database_for_dict.dict1 (key_column UInt64, value_column String) PRIMARY KEY key_column SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' PASSWORD '' DB '$CLICKHOUSE_DATABASE')) LIFETIME(MIN 1 MAX 5) LAYOUT(FLAT());
CREATE DICTIONARY database_for_dict.dict2 (key_column UInt64, value_column String) PRIMARY KEY key_column SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict2' PASSWORD '' DB '$CLICKHOUSE_DATABASE')) LIFETIME(MIN 1 MAX 5) LAYOUT(CACHE(SIZE_IN_CELLS 150));
CREATE DICTIONARY database_for_dict.dict2 (key_column UInt64, value_column String) PRIMARY KEY key_column SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict2' PASSWORD '' DB '$CLICKHOUSE_DATABASE')) LIFETIME(MIN 1 MAX 5) LAYOUT(CACHE(SIZE_IN_CELLS 150));
"

View File

@ -50,7 +50,7 @@ do
value UInt64 DEFAULT 101
)
PRIMARY KEY key
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037'))
LIFETIME(0)
LAYOUT($type());

View File

@ -50,7 +50,7 @@ do
value UInt64 DEFAULT 101
)
PRIMARY KEY key
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037'))
LIFETIME(0)
LAYOUT($type());

View File

@ -64,7 +64,7 @@ do
value UInt64 DEFAULT 101
)
PRIMARY KEY key
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037'))
LIFETIME(0)
LAYOUT($type());
@ -77,7 +77,7 @@ do
value UInt64 DEFAULT 101
)
PRIMARY KEY key
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_tuple' PASSWORD '' DB 'test_01037'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons_tuple' PASSWORD '' DB 'test_01037'))
LIFETIME(0)
LAYOUT($type());

View File

@ -26,7 +26,7 @@ CREATE DICTIONARY db_01038.dict_with_zero_min_lifetime
value Float64 DEFAULT 77.77
)
PRIMARY KEY key_column
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' DB 'db_01038'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB 'db_01038'))
LIFETIME(1)
LAYOUT(FLAT())"

View File

@ -28,7 +28,7 @@ CREATE DICTIONARY IF NOT EXISTS dictdb_01076.dict_datarace
value Float64 DEFAULT 77.77
)
PRIMARY KEY key_column
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_datarace' DB 'dictdb_01076'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_datarace' DB 'dictdb_01076'))
LIFETIME(1)
LAYOUT(CACHE(SIZE_IN_CELLS 10));
"

View File

@ -1,34 +1,38 @@
-- TODO: can't just default prefix, it breaks the test!
-- TODO: can't just remove default prefix, it breaks the test!
drop table if exists default.test_table_01080;
CREATE TABLE default.test_table_01080 (dim_key Int64, dim_id String) ENGINE = MergeTree Order by (dim_key);
insert into default.test_table_01080 values(1,'test1');
drop database if exists db_01080;
create database db_01080;
drop DICTIONARY if exists default.test_dict_01080;
drop table if exists db_01080.test_table_01080;
CREATE TABLE db_01080.test_table_01080 (dim_key Int64, dim_id String) ENGINE = MergeTree Order by (dim_key);
insert into db_01080.test_table_01080 values(1,'test1');
CREATE DICTIONARY default.test_dict_01080 ( dim_key Int64, dim_id String )
drop DICTIONARY if exists db_01080.test_dict_01080;
CREATE DICTIONARY db_01080.test_dict_01080 ( dim_key Int64, dim_id String )
PRIMARY KEY dim_key
source(clickhouse(host 'localhost' port '9000' user 'default' password '' db 'default' table 'test_table_01080'))
source(clickhouse(host 'localhost' port tcpPort() user 'default' password '' db 'db_01080' table 'test_table_01080'))
LIFETIME(MIN 0 MAX 0) LAYOUT(complex_key_hashed());
SELECT dictGetString('default.test_dict_01080', 'dim_id', tuple(toInt64(1)));
SELECT dictGetString('db_01080.test_dict_01080', 'dim_id', tuple(toInt64(1)));
SELECT dictGetString('default.test_dict_01080', 'dim_id', tuple(toInt64(0)));
SELECT dictGetString('db_01080.test_dict_01080', 'dim_id', tuple(toInt64(0)));
select dictGetString('default.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(0)) as x);
select dictGetString('db_01080.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(0)) as x);
select dictGetString('default.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(1)) as x);
select dictGetString('db_01080.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(1)) as x);
select dictGetString('default.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(number)) as x from numbers(5));
select dictGetString('db_01080.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(number)) as x from numbers(5));
select dictGetString('default.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(rand64()*0)) as x);
select dictGetString('db_01080.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(rand64()*0)) as x);
select dictGetString('default.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(blockSize()=0)) as x);
select dictGetString('db_01080.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(blockSize()=0)) as x);
select dictGetString('default.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(materialize(0))) as x);
select dictGetString('db_01080.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(materialize(0))) as x);
select dictGetString('default.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(materialize(1))) as x);
select dictGetString('db_01080.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(materialize(1))) as x);
drop DICTIONARY default.test_dict_01080;
drop table default.test_table_01080;
drop DICTIONARY db_01080.test_dict_01080;
drop table db_01080.test_table_01080;
drop database db_01080;

View File

@ -11,15 +11,10 @@ done
${CLICKHOUSE_CLIENT} --query "select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (Unicode)}','system','tables'))"
${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS test_01086"
${CLICKHOUSE_CLIENT} --query "CREATE DATABASE test_01086"
${CLICKHOUSE_CLIENT} --query "CREATE TABLE t (x UInt8, y Float32, z String) ENGINE = Memory"
${CLICKHOUSE_CLIENT} --query "INSERT INTO t VALUES (1,0.1,'a я'),(2,0.2,'b ą'),(3,0.3,'c d')"
${CLICKHOUSE_CLIENT} --query "SELECT * FROM odbc('DSN={ClickHouse DSN (ANSI)}','$CLICKHOUSE_DATABASE','t') ORDER BY x"
${CLICKHOUSE_CLIENT} --query "SELECT * FROM odbc('DSN={ClickHouse DSN (Unicode)}','$CLICKHOUSE_DATABASE','t') ORDER BY x"
${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_01086.t (x UInt8, y Float32, z String) ENGINE = Memory"
${CLICKHOUSE_CLIENT} --query "INSERT INTO test_01086.t VALUES (1,0.1,'a я'),(2,0.2,'b ą'),(3,0.3,'c d')"
${CLICKHOUSE_CLIENT} --query "SELECT * FROM odbc('DSN={ClickHouse DSN (ANSI)}','test_01086','t') ORDER BY x"
${CLICKHOUSE_CLIENT} --query "SELECT * FROM odbc('DSN={ClickHouse DSN (Unicode)}','test_01086','t') ORDER BY x"
${CLICKHOUSE_CLIENT} --query "DROP DATABASE test_01086;"
${CLICKHOUSE_CLIENT} --query "DROP TABLE t"

View File

@ -0,0 +1,88 @@
#!/usr/bin/expect -f
log_user 0
set timeout 60
spawn bash -c "$env(CLICKHOUSE_CLIENT_BINARY) $env(CLICKHOUSE_CLIENT_OPT)"
match_max 100000
expect ":) "
send -- "SELECT 1\r"
expect "│ 1 │"
expect ":) "
send -- "SELECT 1\\G\r"
expect "Row 1:"
expect "1: 1"
expect ":) "
send -- "SELECT 1\\\r"
expect ":-] "
send -- ", 2\r"
expect "│ 1 │ 2 │"
expect ":) "
send -- "SELECT 1\\\r"
expect ":-] "
send -- ", 2\\G\r"
expect "Row 1:"
expect "1: 1"
expect "2: 2"
expect ":) "
send -- ""
expect eof
set timeout 60
spawn bash -c "$env(CLICKHOUSE_CLIENT_BINARY) $env(CLICKHOUSE_CLIENT_OPT)"
match_max 100000
expect ":) "
send -- "SELECT 1;\r"
expect "│ 1 │"
expect ":) "
send -- "SELECT 1\\G\r"
expect "Row 1:"
expect "1: 1"
expect ":) "
send -- "SELECT 1; \r"
expect "│ 1 │"
expect ":) "
send -- "SELECT 1\\G \r"
expect "Row 1:"
expect "1: 1"
expect ":) "
send -- "SELECT 1\r"
expect ":-] "
send -- ";\r"
expect "│ 1 │"
expect ":) "
send -- "SELECT 1\r"
expect ":-] "
send -- "\\G\r"
expect "Row 1:"
expect "1: 1"
expect ":) "
send -- "SELECT 1\r"
expect ":-] "
send -- ", 2;\r"
expect "│ 1 │ 2 │"
expect ":) "
send -- "SELECT 1\r"
expect ":-] "
send -- ", 2\\G\r"
expect "Row 1:"
expect "1: 1"
expect "2: 2"
expect ":) "
send -- ""
expect eof

View File

@ -1,88 +1,6 @@
#!/usr/bin/expect -f
#!/usr/bin/env bash
log_user 0
set timeout 60
spawn clickhouse-client
match_max 100000
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
expect ":) "
send -- "SELECT 1\r"
expect "│ 1 │"
expect ":) "
send -- "SELECT 1\\G\r"
expect "Row 1:"
expect "1: 1"
expect ":) "
send -- "SELECT 1\\\r"
expect ":-] "
send -- ", 2\r"
expect "│ 1 │ 2 │"
expect ":) "
send -- "SELECT 1\\\r"
expect ":-] "
send -- ", 2\\G\r"
expect "Row 1:"
expect "1: 1"
expect "2: 2"
expect ":) "
send -- ""
expect eof
set timeout 60
spawn clickhouse-client --multiline
match_max 100000
expect ":) "
send -- "SELECT 1;\r"
expect "│ 1 │"
expect ":) "
send -- "SELECT 1\\G\r"
expect "Row 1:"
expect "1: 1"
expect ":) "
send -- "SELECT 1; \r"
expect "│ 1 │"
expect ":) "
send -- "SELECT 1\\G \r"
expect "Row 1:"
expect "1: 1"
expect ":) "
send -- "SELECT 1\r"
expect ":-] "
send -- ";\r"
expect "│ 1 │"
expect ":) "
send -- "SELECT 1\r"
expect ":-] "
send -- "\\G\r"
expect "Row 1:"
expect "1: 1"
expect ":) "
send -- "SELECT 1\r"
expect ":-] "
send -- ", 2;\r"
expect "│ 1 │ 2 │"
expect ":) "
send -- "SELECT 1\r"
expect ":-] "
send -- ", 2\\G\r"
expect "Row 1:"
expect "1: 1"
expect "2: 2"
expect ":) "
send -- ""
expect eof
${CURDIR}/01293_client_interactive_vertical_multiline.expect

View File

@ -0,0 +1,34 @@
#!/usr/bin/expect -f
log_user 0
set timeout 60
spawn bash -c "$env(CLICKHOUSE_CLIENT_BINARY) $env(CLICKHOUSE_CLIENT_OPT)"
match_max 100000
expect ":) "
send -- "SELECT 1\r"
expect "│ 1 │"
expect ":) "
send -- "SELECT 1\\G\r"
expect "Row 1:"
expect "1: 1"
expect ":) "
send -- "SELECT 1\\\r"
expect ":-] "
send -- ", 2\r"
expect "│ 1 │ 2 │"
expect ":) "
send -- "SELECT 1\\\r"
expect ":-] "
send -- ", 2\\G\r"
expect "Row 1:"
expect "1: 1"
expect "2: 2"
expect ":) "
send -- ""
expect eof

View File

@ -1,34 +1,6 @@
#!/usr/bin/expect -f
#!/usr/bin/env bash
log_user 0
set timeout 60
spawn clickhouse-client
match_max 100000
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
expect ":) "
send -- "SELECT 1\r"
expect "│ 1 │"
expect ":) "
send -- "SELECT 1\\G\r"
expect "Row 1:"
expect "1: 1"
expect ":) "
send -- "SELECT 1\\\r"
expect ":-] "
send -- ", 2\r"
expect "│ 1 │ 2 │"
expect ":) "
send -- "SELECT 1\\\r"
expect ":-] "
send -- ", 2\\G\r"
expect "Row 1:"
expect "1: 1"
expect "2: 2"
expect ":) "
send -- ""
expect eof
${CURDIR}/01293_client_interactive_vertical_singleline.expect

View File

@ -0,0 +1,35 @@
#!/usr/bin/expect -f
log_user 0
set timeout 60
match_max 100000
spawn bash -c "$env(CLICKHOUSE_CLIENT_BINARY) $env(CLICKHOUSE_CLIENT_OPT)"
expect ":) "
# Make a query
send -- "SELECT 'for the history'\r"
expect "for the history"
expect ":) "
# Kill the client to check if the history was saved
exec kill -9 [exp_pid]
close
# Run client one more time and press "up" to see the last recorded query
spawn bash -c "$env(CLICKHOUSE_CLIENT_BINARY) $env(CLICKHOUSE_CLIENT_OPT)"
expect ":) "
send -- "\[A"
expect "SELECT 'for the history'"
# Will check that Ctrl+C clears current line.
send -- "\3"
expect ":)"
# Will check that second Ctrl+C invocation does not exit from client.
send -- "\3"
expect ":)"
# But Ctrl+D does.
send -- "\4"
expect eof

View File

@ -1,35 +1,6 @@
#!/usr/bin/expect -f
#!/usr/bin/env bash
log_user 0
set timeout 60
match_max 100000
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
spawn clickhouse-client
expect ":) "
# Make a query
send -- "SELECT 'for the history'\r"
expect "for the history"
expect ":) "
# Kill the client to check if the history was saved
exec kill -9 [exp_pid]
close
# Run client one more time and press "up" to see the last recorded query
spawn clickhouse-client
expect ":) "
send -- "\[A"
expect "SELECT 'for the history'"
# Will check that Ctrl+C clears current line.
send -- "\3"
expect ":)"
# Will check that second Ctrl+C invocation does not exit from client.
send -- "\3"
expect ":)"
# But Ctrl+D does.
send -- "\4"
expect eof
${CURDIR}/01300_client_save_history_when_terminated.expect

View File

@ -0,0 +1,29 @@
#!/usr/bin/expect -f
log_user 0
set timeout 1
match_max 100000
spawn bash -c "$env(CLICKHOUSE_CLIENT_BINARY) $env(CLICKHOUSE_CLIENT_OPT)"
expect ":) "
# Make a query
send -- "SET max_distributed"
expect "SET max_distributed"
# Wait for suggestions to load, they are loaded in background
set is_done 0
while {$is_done == 0} {
send -- "\t"
expect {
"_connections" {
set is_done 1
}
default {
sleep 1
}
}
}
send -- "\3\4"
expect eof

View File

@ -1,29 +1,6 @@
#!/usr/bin/expect -f
#!/usr/bin/env bash
log_user 0
set timeout 1
match_max 100000
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
spawn clickhouse-client
expect ":) "
# Make a query
send -- "SET max_distributed"
expect "SET max_distributed"
# Wait for suggestions to load, they are loaded in background
set is_done 0
while {$is_done == 0} {
send -- "\t"
expect {
"_connections" {
set is_done 1
}
default {
sleep 1
}
}
}
send -- "\3\4"
expect eof
${CURDIR}/01370_client_autocomplete_word_break_characters.expect

View File

@ -17,7 +17,7 @@ CREATE DICTIONARY ordinary_db.dict1
third_column String DEFAULT 'qqq'
)
PRIMARY KEY key_column
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'view_for_dict' PASSWORD '' DB 'ordinary_db'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'view_for_dict' PASSWORD '' DB 'ordinary_db'))
LIFETIME(MIN 1 MAX 3)
LAYOUT(CACHE(SIZE_IN_CELLS 3));
"

View File

@ -4,3 +4,5 @@ create table named_tuples engine File(JSONEachRow)
from numbers(3);
select * from named_tuples format JSONEachRow settings output_format_json_named_tuples_as_objects = 1;
drop table named_tuples

View File

@ -48,8 +48,8 @@ CREATE DICTIONARY IF NOT EXISTS db_01501.cache_dict (
Decimal64_ Decimal64(15) DEFAULT 444.11,
Decimal128_ Decimal128(35) DEFAULT 555.11,
ParentKeyField UInt64 DEFAULT 444)
PRIMARY KEY KeyField
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_cache_dict' DB 'db_01501'))
PRIMARY KEY KeyField
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_cache_dict' DB 'db_01501'))
LIFETIME(5) LAYOUT(CACHE(SIZE_IN_CELLS 20));
@ -61,31 +61,31 @@ INSERT INTO db_01501.table_cache_dict VALUES (5, 22222, 33333, 44444, 55555, -11
SELECT arrayDistinct(groupArray(dictGetUInt8('db_01501.cache_dict', 'UInt8_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGetUInt16('db_01501.cache_dict', 'UInt16_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGetUInt32('db_01501.cache_dict', 'UInt32_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGetUInt64('db_01501.cache_dict', 'UInt64_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGetInt8('db_01501.cache_dict', 'Int8_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGetInt16('db_01501.cache_dict', 'Int16_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGetInt32('db_01501.cache_dict', 'Int32_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGetInt64('db_01501.cache_dict', 'Int64_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGetFloat32('db_01501.cache_dict', 'Float32_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGetFloat64('db_01501.cache_dict', 'Float64_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGet('db_01501.cache_dict', 'Decimal32_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGet('db_01501.cache_dict', 'Decimal64_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGet('db_01501.cache_dict', 'Decimal128_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT arrayDistinct(groupArray(dictGetString('db_01501.cache_dict', 'String_', toUInt64(number)))) from numbers(10);
@ -106,7 +106,7 @@ SELECT arrayDistinct(groupArray(dictGet('db_01501.cache_dict', 'Decimal128_', to
SELECT arrayDistinct(groupArray(dictGetString('db_01501.cache_dict', 'String_', toUInt64(number)))) from numbers(10);
system reload dictionaries;
system reload dictionaries;
SELECT groupArray(dictHas('db_01501.cache_dict', toUInt64(number))) from numbers(10);
@ -117,4 +117,4 @@ SELECT groupArray(dictHas('db_01501.cache_dict', toUInt64(number))) from numbers
drop table if exists table_cache_dict;
drop dictionary if exists cache_dict;
drop database if exists db_01501;
drop database if exists db_01501;

View File

@ -0,0 +1,20 @@
#!/usr/bin/expect -f
log_user 0
set timeout 5
match_max 100000
spawn bash -c "$env(CLICKHOUSE_CLIENT_BINARY) $env(CLICKHOUSE_CLIENT_OPT)"
expect ":) "
# Make a query
send -- "SELECT 'print query id'\r"
expect {
"Query id: *" { }
timeout { exit 1 }
}
expect "print query id"
expect ":) "
send -- "\4"
expect eof

View File

@ -1,22 +1,6 @@
#!/usr/bin/expect -f
#!/usr/bin/env bash
log_user 0
set timeout 5
match_max 100000
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
if ![info exists env(CLICKHOUSE_PORT_TCP)] {set env(CLICKHOUSE_PORT_TCP) 9000}
spawn clickhouse-client --port "$env(CLICKHOUSE_PORT_TCP)"
expect ":) "
# Make a query
send -- "SELECT 'print query id'\r"
expect {
"Query id: *" { }
timeout { exit 1 }
}
expect "print query id"
expect ":) "
send -- "\4"
expect eof
${CURDIR}/01520_client_print_query_id.expect

View File

@ -4,7 +4,7 @@ log_user 1
set timeout 5
match_max 100000
spawn bash -c "$env(CLICKHOUSE_CLIENT) --port $env(CLICKHOUSE_PORT_TCP) && echo $?"
spawn bash -c "$env(CLICKHOUSE_CLIENT_BINARY) $env(CLICKHOUSE_CLIENT_OPT)"
expect ":) "
send -- "\4"
expect eof

View File

@ -24,3 +24,9 @@ system reload dictionary db_01527_ranges.dict;
select _shard_num from dist_01527 where key=0;
select _shard_num from dist_01527 where key=1;
drop table data_01527;
drop table dist_01527;
drop table db_01527_ranges.data;
drop dictionary db_01527_ranges.dict;
drop database db_01527_ranges;

View File

@ -12,7 +12,7 @@ $CLICKHOUSE_CLIENT --query "INSERT INTO database_for_dict.table_for_dict VALUES
$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS ordinary_db"
$CLICKHOUSE_CLIENT --query "CREATE DATABASE ordinary_db"
$CLICKHOUSE_CLIENT --query "CREATE DICTIONARY ordinary_db.dict1 ( key_column UInt64 DEFAULT 0, second_column UInt64 DEFAULT 1, third_column String DEFAULT 'qqq' ) PRIMARY KEY key_column SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict')) LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT()) SETTINGS(max_result_bytes=1)"
$CLICKHOUSE_CLIENT --query "CREATE DICTIONARY ordinary_db.dict1 ( key_column UInt64 DEFAULT 0, second_column UInt64 DEFAULT 1, third_column String DEFAULT 'qqq' ) PRIMARY KEY key_column SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict')) LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT()) SETTINGS(max_result_bytes=1)"
function dict_get_thread()
{

View File

@ -17,3 +17,5 @@ SELECT 'TSV';
SELECT * FROM map_formats ORDER BY m['k1'] FORMAT TSV;
SELECT 'TSKV';
SELECT * FROM map_formats ORDER BY m['k1'] FORMAT TSKV;
DROP TABLE map_formats;

View File

@ -19,3 +19,5 @@ $CLICKHOUSE_CLIENT -q "SELECT * FROM map_formats_input"
$CLICKHOUSE_CLIENT -q 'SELECT * FROM map_formats_input FORMAT Native' | $CLICKHOUSE_CLIENT -q "INSERT INTO map_formats_input FORMAT Native"
$CLICKHOUSE_CLIENT -q "SELECT * FROM map_formats_input"
$CLICKHOUSE_CLIENT -q "DROP TABLE map_formats_input"

View File

@ -7,7 +7,7 @@ CREATE TABLE database_for_dict.table_for_dict (k UInt64, v UInt8) ENGINE = Merge
DROP DICTIONARY IF EXISTS database_for_dict.dict1;
CREATE DICTIONARY database_for_dict.dict1 (k UInt64 DEFAULT 0, v UInt8 DEFAULT 1) PRIMARY KEY k
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict'))
LIFETIME(MIN 1 MAX 10)
LAYOUT(FLAT());

View File

@ -6,3 +6,5 @@ insert into data_01593 select * from numbers_mt(10);
insert into data_01593 select * from numbers_mt(10) settings max_partitions_per_insert_block=1; -- { serverError 252 }
-- settings for INSERT is prefered
insert into data_01593 select * from numbers_mt(10) settings max_partitions_per_insert_block=1 settings max_partitions_per_insert_block=100;
drop table data_01593;

View File

@ -6,10 +6,10 @@ OPTIMIZE TABLE test FINAL;
-- Only set limit
SET limit = 5;
SELECT * FROM test; -- 5 rows
SELECT * FROM test OFFSET 20; -- 5 rows
SELECT * FROM (SELECT i FROM test LIMIT 10 OFFSET 50) TMP; -- 5 rows
SELECT * FROM test LIMIT 4 OFFSET 192; -- 4 rows
SELECT * FROM test; -- 5 rows
SELECT * FROM test OFFSET 20; -- 5 rows
SELECT * FROM (SELECT i FROM test LIMIT 10 OFFSET 50) TMP; -- 5 rows
SELECT * FROM test LIMIT 4 OFFSET 192; -- 4 rows
SELECT * FROM test LIMIT 10 OFFSET 195; -- 5 rows
-- Only set offset
@ -24,7 +24,9 @@ SELECT * FROM test LIMIT 11 OFFSET 100; -- 1 rows
-- offset and limit together
SET limit = 10;
SELECT * FROM test LIMIT 50 OFFSET 50; -- 10 rows
SELECT * FROM test LIMIT 50 OFFSET 50; -- 10 rows
SELECT * FROM test LIMIT 50 OFFSET 190; -- 0 rows
SELECT * FROM test LIMIT 50 OFFSET 185; -- 5 rows
SELECT * FROM test LIMIT 18 OFFSET 5; -- 8 rows
SELECT * FROM test LIMIT 50 OFFSET 185; -- 5 rows
SELECT * FROM test LIMIT 18 OFFSET 5; -- 8 rows
DROP TABLE test;

View File

@ -1,11 +1,11 @@
CREATE DATABASE IF NOT EXISTS db_dict;
DROP DICTIONARY IF EXISTS db_dict.cache_hits;
CREATE DICTIONARY db_dict.cache_hits
(WatchID UInt64, UserID UInt64, SearchPhrase String)
PRIMARY KEY WatchID
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'hits' PASSWORD '' DB 'test'))
LIFETIME(MIN 300 MAX 600)
CREATE DICTIONARY db_dict.cache_hits
(WatchID UInt64, UserID UInt64, SearchPhrase String)
PRIMARY KEY WatchID
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hits' PASSWORD '' DB 'test'))
LIFETIME(MIN 300 MAX 600)
LAYOUT(CACHE(SIZE_IN_CELLS 100000 QUERY_WAIT_TIMEOUT_MILLISECONDS 600000));
SELECT sum(flag) FROM (SELECT dictHas('db_dict.cache_hits', toUInt64(WatchID)) as flag FROM test.hits PREWHERE WatchID % 1400 == 0);

View File

@ -8,6 +8,80 @@ import subprocess
import sys
SKIP_LIST = [
# these couple of tests hangs everything
"00600_replace_running_query",
"00987_distributed_stack_overflow",
# just fail
"00302_http_compression",
"00463_long_sessions_in_http_interface",
"00505_secure",
"00505_shard_secure",
"00506_union_distributed", # flaky
"00646_url_engine",
"00834_cancel_http_readonly_queries_on_client_close",
"00933_test_fix_extra_seek_on_compressed_cache",
"00965_logs_level_bugfix",
"00965_send_logs_level_concurrent_queries",
"00990_hasToken",
"00990_metric_log_table_not_empty",
"01014_lazy_database_concurrent_recreate_reattach_and_show_tables",
"01018_Distributed__shard_num",
"01018_ip_dictionary",
"01023_materialized_view_query_context", # flaky
"01035_lc_empty_part_bug", # flaky
"01046_materialized_view_with_join_over_distributed", # flaky
"01050_clickhouse_dict_source_with_subquery",
"01053_ssd_dictionary",
"01054_cache_dictionary_overflow_cell",
"01057_http_compression_prefer_brotli",
"01080_check_for_error_incorrect_size_of_nested_column",
"01083_expressions_in_engine_arguments",
"01086_odbc_roundtrip",
"01088_benchmark_query_id",
"01098_temporary_and_external_tables",
"01099_parallel_distributed_insert_select", # flaky
"01103_check_cpu_instructions_at_startup",
"01114_database_atomic",
"01148_zookeeper_path_macros_unfolding",
"01280_ssd_complex_key_dictionary",
"01293_client_interactive_vertical_multiline", # expect-test
"01293_client_interactive_vertical_singleline", # expect-test
"01293_show_clusters",
"01294_lazy_database_concurrent_recreate_reattach_and_show_tables",
"01294_system_distributed_on_cluster",
"01300_client_save_history_when_terminated", # expect-test
"01304_direct_io",
"01306_benchmark_json",
"01320_create_sync_race_condition_zookeeper",
"01355_CSV_input_format_allow_errors",
"01370_client_autocomplete_word_break_characters", # expect-test
"01376_GROUP_BY_injective_elimination_dictGet",
"01393_benchmark_secure_port",
"01418_custom_settings",
"01451_wrong_error_long_query",
"01455_opentelemetry_distributed",
"01473_event_time_microseconds",
"01474_executable_dictionary",
"01507_clickhouse_server_start_with_embedded_config",
"01514_distributed_cancel_query_on_error",
"01520_client_print_query_id", # expect-test
"01527_dist_sharding_key_dictGet_reload",
"01545_url_file_format_settings",
"01553_datetime64_comparison",
"01555_system_distribution_queue_mask",
"01558_ttest_scipy",
"01561_mann_whitney_scipy",
"01582_distinct_optimization",
"01586_storage_join_low_cardinality_key",
"01599_multiline_input_and_singleline_comments",
"01600_benchmark_query",
"01601_custom_tld",
"01601_proxy_protocol",
]
def check_result(result, error, return_code, reference, replace_map):
for old, new in replace_map.items():
result = result.replace(old.encode('utf-8'), new.encode('utf-8'))
@ -29,7 +103,7 @@ def check_result(result, error, return_code, reference, replace_map):
def run_client(bin_prefix, port, query, reference, replace_map={}):
# We can't use `text=True` since some tests may return binary data
client = subprocess.Popen([bin_prefix + '-client', '--port', str(port), '-m', '-n', '--testmode', '--use_antlr_parser=1'],
client = subprocess.Popen([bin_prefix + '-client', '--port', str(port), '-m', '-n', '--testmode'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result, error = client.communicate(query.encode('utf-8'))
assert client.returncode is not None, "Client should exit after processing all queries"
@ -61,6 +135,11 @@ def random_str(length=10):
def test_sql_query(bin_prefix, sql_query, standalone_server):
for test in SKIP_LIST:
if test in sql_query:
pytest.skip("Test matches skip-list: " + test)
return
tcp_port = standalone_server.tcp_port
query_path = sql_query + ".sql"
@ -85,10 +164,15 @@ def test_sql_query(bin_prefix, sql_query, standalone_server):
run_client(bin_prefix, tcp_port, query, b'')
query = "SELECT 'SHOW ORPHANED DATABASES'; SHOW DATABASES;"
run_client(bin_prefix, tcp_port, query, b'SHOW ORPHANED DATABASES\n_temporary_and_external_tables\ndefault\nsystem\n')
run_client(bin_prefix, tcp_port, query, b'SHOW ORPHANED DATABASES\ndefault\nsystem\n')
def test_shell_query(bin_prefix, shell_query, standalone_server):
for test in SKIP_LIST:
if test in shell_query:
pytest.skip("Test matches skip-list: " + test)
return
tcp_port = standalone_server.tcp_port
shell_path = shell_query + ".sh"
@ -113,4 +197,4 @@ def test_shell_query(bin_prefix, shell_query, standalone_server):
run_client(bin_prefix, tcp_port, query, b'')
query = "SELECT 'SHOW ORPHANED DATABASES'; SHOW DATABASES;"
run_client(bin_prefix, tcp_port, query, b'SHOW ORPHANED DATABASES\n_temporary_and_external_tables\ndefault\nsystem\n')
run_client(bin_prefix, tcp_port, query, b'SHOW ORPHANED DATABASES\ndefault\nsystem\n')

View File

@ -16,7 +16,8 @@ export CLICKHOUSE_BINARY=${CLICKHOUSE_BINARY:="clickhouse"}
[ -x "$CLICKHOUSE_BINARY-client" ] && CLICKHOUSE_CLIENT_BINARY=${CLICKHOUSE_CLIENT_BINARY:=$CLICKHOUSE_BINARY-client}
[ -x "$CLICKHOUSE_BINARY" ] && CLICKHOUSE_CLIENT_BINARY=${CLICKHOUSE_CLIENT_BINARY:=$CLICKHOUSE_BINARY client}
export CLICKHOUSE_CLIENT_BINARY=${CLICKHOUSE_CLIENT_BINARY:=$CLICKHOUSE_BINARY-client}
export CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:="$CLICKHOUSE_CLIENT_BINARY ${CLICKHOUSE_CLIENT_OPT0:-} ${CLICKHOUSE_CLIENT_OPT:-}"}
export CLICKHOUSE_CLIENT_OPT="${CLICKHOUSE_CLIENT_OPT0:-} ${CLICKHOUSE_CLIENT_OPT:-}"
export CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:="$CLICKHOUSE_CLIENT_BINARY ${CLICKHOUSE_CLIENT_OPT:-}"}
[ -x "${CLICKHOUSE_BINARY}-local" ] && CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY}-local"}
[ -x "${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY} local"}
export CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY}-local"}