mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge pull request #61961 from nickitat/rm_01193_metadata_loading
Remove flaky test `01193_metadata_loading`
This commit is contained in:
commit
bce6817fcc
@ -1,5 +0,0 @@
|
|||||||
1000 0 2020-06-25 hello [1,2] [3,4]
|
|
||||||
1000 1 2020-06-26 word [10,20] [30,40]
|
|
||||||
ok
|
|
||||||
8000 0 2020-06-25 hello [1,2] [3,4]
|
|
||||||
8000 1 2020-06-26 word [10,20] [30,40]
|
|
@ -1,53 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug, no-parallel, no-fasttest, no-s3-storage, no-sanitize-coverage
|
|
||||||
|
|
||||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|
||||||
# shellcheck source=../shell_config.sh
|
|
||||||
. "$CURDIR"/../shell_config.sh
|
|
||||||
|
|
||||||
# Check that attaching a database with a large number of tables is not too slow.
|
|
||||||
# it is the worst way of making performance test, nevertheless it can detect significant slowdown and some other issues, that usually found by stress test
|
|
||||||
|
|
||||||
db="test_01193_$RANDOM_$RANDOM_$RANDOM_$RANDOM"
|
|
||||||
tables=1000
|
|
||||||
threads=10
|
|
||||||
count_multiplier=1
|
|
||||||
max_time_ms=1500
|
|
||||||
|
|
||||||
create_tables() {
|
|
||||||
$CLICKHOUSE_CLIENT -q "WITH
|
|
||||||
'CREATE TABLE $db.table_$1_' AS create1,
|
|
||||||
' (i UInt64, d Date, s String, n Nested(i UInt8, f Float32)) ENGINE=' AS create2,
|
|
||||||
['Memory', 'File(CSV)', 'Log', 'StripeLog', 'MergeTree ORDER BY i'] AS engines,
|
|
||||||
'INSERT INTO $db.table_$1_' AS insert1,
|
|
||||||
' VALUES (0, ''2020-06-25'', ''hello'', [1, 2], [3, 4]), (1, ''2020-06-26'', ''word'', [10, 20], [30, 40])' AS insert2
|
|
||||||
SELECT arrayStringConcat(
|
|
||||||
groupArray(
|
|
||||||
create1 || toString(number) || create2 || engines[1 + number % length(engines)] || ';\n' ||
|
|
||||||
insert1 || toString(number) || insert2
|
|
||||||
), ';\n') FROM numbers($tables) SETTINGS max_bytes_before_external_group_by = 0 FORMAT TSVRaw;" | $CLICKHOUSE_CLIENT -nm
|
|
||||||
}
|
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "CREATE DATABASE $db"
|
|
||||||
|
|
||||||
for i in $(seq 1 $threads); do
|
|
||||||
create_tables "$i" &
|
|
||||||
done
|
|
||||||
wait
|
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE $db.table_merge (i UInt64, d Date, s String, n Nested(i UInt8, f Float32)) ENGINE=Merge('$db', '^table_')"
|
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT count() * $count_multiplier, i, d, s, n.i, n.f FROM merge('$db', '^table_9') GROUP BY i, d, s, n.i, n.f ORDER BY i"
|
|
||||||
|
|
||||||
for i in {1..50}; do
|
|
||||||
$CLICKHOUSE_CLIENT -q "DETACH DATABASE $db"
|
|
||||||
$CLICKHOUSE_CLIENT --query_profiler_real_time_period_ns=100000000 --query_profiler_cpu_time_period_ns=100000000 -q "ATTACH DATABASE $db" --query_id="$db-$i";
|
|
||||||
done
|
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS"
|
|
||||||
durations=$($CLICKHOUSE_CLIENT -q "SELECT groupArray(query_duration_ms) FROM system.query_log WHERE current_database = currentDatabase() AND query_id LIKE '$db-%' AND type=2")
|
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT 'durations', '$db', $durations FORMAT Null"
|
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT if(quantile(0.5)(arrayJoin($durations)) < $max_time_ms, 'ok', toString($durations))"
|
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT count() * $count_multiplier, i, d, s, n.i, n.f FROM $db.table_merge GROUP BY i, d, s, n.i, n.f ORDER BY i"
|
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS $db"
|
|
Loading…
Reference in New Issue
Block a user