mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
d28963d695
This should fix issues like [1], where because one query from the previous test was left the next fails, in this particular report the problem was that 01281_group_by_limit_memory_tracking fails due to the query from 01301_aggregate_state_exception_memory_leak was running in background. [1]: https://s3.amazonaws.com/clickhouse-test-reports/34919/311f884d3d4215f7f82c2dd66ea51d071d313241/stateless_tests__thread__actions__[2/3].html v2: wait for queries in 01249_flush_interactive Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
133 lines
4.5 KiB
Bash
Executable File
133 lines
4.5 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
# Tags: no-parallel, no-fasttest
|
|
|
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|
# shellcheck source=../shell_config.sh
|
|
. "$CURDIR"/../shell_config.sh
|
|
|
|
set -e
|
|
|
|
$CLICKHOUSE_CLIENT -mn -q "
|
|
DROP DATABASE IF EXISTS database_for_dict;
|
|
DROP TABLE IF EXISTS table_for_dict1;
|
|
DROP TABLE IF EXISTS table_for_dict2;
|
|
|
|
CREATE TABLE table_for_dict1 (key_column UInt64, value_column String) ENGINE = MergeTree ORDER BY key_column;
|
|
CREATE TABLE table_for_dict2 (key_column UInt64, value_column String) ENGINE = MergeTree ORDER BY key_column;
|
|
|
|
INSERT INTO table_for_dict1 SELECT number, toString(number) from numbers(1000);
|
|
INSERT INTO table_for_dict2 SELECT number, toString(number) from numbers(1000, 1000);
|
|
|
|
CREATE DATABASE database_for_dict;
|
|
|
|
CREATE DICTIONARY database_for_dict.dict1 (key_column UInt64, value_column String)
|
|
PRIMARY KEY key_column
|
|
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' PASSWORD '' DB '$CLICKHOUSE_DATABASE'))
|
|
LIFETIME(MIN 1 MAX 5)
|
|
LAYOUT(FLAT());
|
|
|
|
CREATE DICTIONARY database_for_dict.dict2 (key_column UInt64, value_column String)
|
|
PRIMARY KEY key_column
|
|
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict2' PASSWORD '' DB '$CLICKHOUSE_DATABASE'))
|
|
LIFETIME(MIN 1 MAX 5)
|
|
LAYOUT(CACHE(SIZE_IN_CELLS 150));
|
|
"
|
|
|
|
|
|
function thread1()
|
|
{
|
|
$CLICKHOUSE_CLIENT --query "SELECT * FROM system.dictionaries FORMAT Null"
|
|
}
|
|
|
|
function thread2()
|
|
{
|
|
$CLICKHOUSE_CLIENT --query "ATTACH DICTIONARY database_for_dict.dict1" ||:
|
|
}
|
|
|
|
function thread3()
|
|
{
|
|
$CLICKHOUSE_CLIENT --query "ATTACH DICTIONARY database_for_dict.dict2" ||:
|
|
}
|
|
|
|
|
|
function thread4()
|
|
{
|
|
$CLICKHOUSE_CLIENT -n -q "
|
|
SELECT * FROM database_for_dict.dict1 FORMAT Null;
|
|
SELECT * FROM database_for_dict.dict2 FORMAT Null;
|
|
" ||:
|
|
}
|
|
|
|
function thread5()
|
|
{
|
|
$CLICKHOUSE_CLIENT -n -q "
|
|
SELECT dictGetString('database_for_dict.dict1', 'value_column', toUInt64(number)) from numbers(1000) FROM FORMAT Null;
|
|
SELECT dictGetString('database_for_dict.dict2', 'value_column', toUInt64(number)) from numbers(1000) FROM FORMAT Null;
|
|
" ||:
|
|
}
|
|
|
|
function thread6()
|
|
{
|
|
$CLICKHOUSE_CLIENT -q "DETACH DICTIONARY database_for_dict.dict1"
|
|
}
|
|
|
|
function thread7()
|
|
{
|
|
$CLICKHOUSE_CLIENT -q "DETACH DICTIONARY database_for_dict.dict2"
|
|
}
|
|
|
|
|
|
export -f thread1
|
|
export -f thread2
|
|
export -f thread3
|
|
export -f thread4
|
|
export -f thread5
|
|
export -f thread6
|
|
export -f thread7
|
|
|
|
TIMEOUT=10
|
|
|
|
clickhouse_client_loop_timeout $TIMEOUT thread1 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread2 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread3 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread4 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread5 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread6 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread7 2> /dev/null &
|
|
|
|
clickhouse_client_loop_timeout $TIMEOUT thread1 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread2 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread3 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread4 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread5 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread6 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread7 2> /dev/null &
|
|
|
|
clickhouse_client_loop_timeout $TIMEOUT thread1 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread2 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread3 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread4 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread5 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread6 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread7 2> /dev/null &
|
|
|
|
clickhouse_client_loop_timeout $TIMEOUT thread1 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread2 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread3 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread4 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread5 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread6 2> /dev/null &
|
|
clickhouse_client_loop_timeout $TIMEOUT thread7 2> /dev/null &
|
|
|
|
wait
|
|
$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'"
|
|
|
|
$CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict1"
|
|
$CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict2"
|
|
|
|
$CLICKHOUSE_CLIENT -n -q "
|
|
DROP DATABASE database_for_dict;
|
|
DROP TABLE table_for_dict1;
|
|
DROP TABLE table_for_dict2;
|
|
"
|