Merge pull request #50934 from azat/tests/fix-query_log

Fix tests sanity checks and avoid dropping system.query_log table
This commit is contained in:
Kseniia Sumarokova 2023-06-15 12:44:23 +02:00 committed by GitHub
commit f1f8b302bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 34 additions and 27 deletions

View File

@ -6,8 +6,10 @@ rules:
level: warning
indent-sequences: consistent
line-length:
# there are some bash -c "", so this is OK
max: 300
# there are:
# - bash -c "", so this is OK
# - yaml in tests
max: 1000
level: warning
comments:
min-spaces-from-content: 1

View File

@ -25,7 +25,7 @@ QUERY_ID="${CLICKHOUSE_DATABASE}_$(date +%s)_02883_q1"
${CLICKHOUSE_CLIENT} -m --query "$EXCEPTION_BEFORE_START_QUERY" --query_id="$QUERY_ID" >/dev/null 2>&1
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
${CLICKHOUSE_CLIENT} --query "SELECT type == 'ExceptionBeforeStart' as expected_type, query_duration_ms <= 1000 as elapsed_more_than_one_second FROM system.query_log WHERE query_id='$QUERY_ID'"
${CLICKHOUSE_CLIENT} --query "SELECT type == 'ExceptionBeforeStart' as expected_type, query_duration_ms <= 1000 as elapsed_more_than_one_second FROM system.query_log WHERE current_database = '$CLICKHOUSE_DATABASE' AND query_id='$QUERY_ID'"
# Now we test with a query that will take 1+ seconds. The CLI should show that as part of the output format
OK_QUERY_JSON="

View File

@ -1,9 +1,8 @@
-- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache
-- Start with empty query cache (QC) and query log
-- Start with empty query cache (QC)
SYSTEM DROP QUERY CACHE;
DROP TABLE system.query_log SYNC;
-- Insert an entry into the query cache.
SELECT 1 SETTINGS use_query_cache = true;
@ -22,6 +21,7 @@ SYSTEM FLUSH LOGS;
SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses']
FROM system.query_log
WHERE type = 'QueryFinish'
AND current_database = currentDatabase()
AND query = 'select 1 SETTINGS use_query_cache = true;';
SYSTEM DROP QUERY CACHE;

View File

@ -3,4 +3,5 @@
0 1
---
1
0 1
1 0

View File

@ -1,9 +1,8 @@
-- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache
-- Start with empty query cache QC and query log
-- Start with empty query cache QC
SYSTEM DROP QUERY CACHE;
DROP TABLE system.query_log SYNC;
-- Run a query with QC on. The first execution is a QC miss.
SELECT '---';
@ -13,6 +12,7 @@ SYSTEM FLUSH LOGS;
SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses']
FROM system.query_log
WHERE type = 'QueryFinish'
AND current_database = currentDatabase()
AND query = 'SELECT 1 SETTINGS use_query_cache = true;';
@ -20,11 +20,12 @@ WHERE type = 'QueryFinish'
SELECT '---';
SELECT 1 SETTINGS use_query_cache = true;
DROP TABLE system.query_log SYNC;
SYSTEM FLUSH LOGS;
SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses']
FROM system.query_log
WHERE type = 'QueryFinish'
AND query = 'SELECT 1 SETTINGS use_query_cache = true;';
AND current_database = currentDatabase()
AND query = 'SELECT 1 SETTINGS use_query_cache = true;'
ORDER BY event_time_microseconds;
SYSTEM DROP QUERY CACHE;

View File

@ -1,9 +1,8 @@
-- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache
-- Start with empty query cache (QC) and query log.
-- Start with empty query cache (QC)
SYSTEM DROP QUERY CACHE;
DROP TABLE system.query_log SYNC;
-- Run query whose result gets cached in the query cache.
-- Besides "use_query_cache", pass two more knobs (one QC-specific knob and one non-QC-specific knob). We just care
@ -24,6 +23,7 @@ SYSTEM FLUSH LOGS;
SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses']
FROM system.query_log
WHERE type = 'QueryFinish'
AND current_database = currentDatabase()
AND query = 'SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false, max_threads = 16;';
SYSTEM DROP QUERY CACHE;

View File

@ -9,4 +9,5 @@
-----
1
1
0 1
1 0

View File

@ -22,10 +22,7 @@ SELECT COUNT(*) FROM system.query_cache;
SELECT '-----';
-- Run same query with passive mode again. There must still be one entry in the QC and we must have a QC hit.
-- Get rid of log of previous SELECT
DROP TABLE system.query_log SYNC;
/* Run same query with passive mode again. There must still be one entry in the QC and we must have a QC hit. */
SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;
SELECT COUNT(*) FROM system.query_cache;
@ -34,6 +31,9 @@ SYSTEM FLUSH LOGS;
SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses']
FROM system.query_log
WHERE type = 'QueryFinish'
AND query = 'SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;';
AND current_database = currentDatabase()
/* NOTE: client incorrectly join comments from the previous line into query, hence LIKE */
AND query LIKE '%\nSELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;'
ORDER BY event_time_microseconds;
SYSTEM DROP QUERY CACHE;

View File

@ -18,5 +18,5 @@ query_id="${CLICKHOUSE_DATABASE}_02499_$RANDOM$RANDOM"
$CLICKHOUSE_CLIENT --query_id="$query_id" -q "select ts from t order by toUnixTimestamp64Nano(ts) limit 10 format Null settings max_block_size = $max_block_size, optimize_read_in_order = 1;"
$CLICKHOUSE_CLIENT -q "system flush logs;"
$CLICKHOUSE_CLIENT --param_query_id="$query_id" -q "select read_rows <= $max_block_size from system.query_log where event_date >= yesterday() and query_id = {query_id:String} and type = 'QueryFinish';"
$CLICKHOUSE_CLIENT --param_query_id="$query_id" -q "select read_rows <= $max_block_size from system.query_log where event_date >= yesterday() and current_database = '$CLICKHOUSE_DATABASE' and query_id = {query_id:String} and type = 'QueryFinish';"

View File

@ -16,7 +16,7 @@ do
query_id="${CLICKHOUSE_DATABASE}_02521_${i}_$RANDOM$RANDOM"
$CLICKHOUSE_CLIENT --query_id="$query_id" -q "INSERT INTO test_02521_insert_delay SELECT number, toString(number) FROM numbers(${i}, 1)"
$CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS"
$CLICKHOUSE_CLIENT --param_query_id="$query_id" -q "select ProfileEvents['DelayedInsertsMilliseconds'] as delay from system.query_log where event_date >= yesterday() and query_id = {query_id:String} order by delay desc limit 1"
$CLICKHOUSE_CLIENT --param_query_id="$query_id" -q "select ProfileEvents['DelayedInsertsMilliseconds'] as delay from system.query_log where event_date >= yesterday() and current_database = '$CLICKHOUSE_DATABASE' and query_id = {query_id:String} order by delay desc limit 1"
done
$CLICKHOUSE_CLIENT -q "INSERT INTO test_02521_insert_delay VALUES(0, 'This query throws error')" 2>&1 | grep -o 'TOO_MANY_PARTS' | head -n 1

View File

@ -24,6 +24,7 @@ SET param_new_tbl_name = 02661_t1;
CREATE TABLE {new_db_name:Identifier}.{old_tbl_name:Identifier} (a UInt64) ENGINE = MergeTree ORDER BY tuple();
RENAME TABLE {new_db_name:Identifier}.{old_tbl_name:Identifier} TO {new_db_name:Identifier}.{new_tbl_name:Identifier};
-- NOTE: no 'database = currentDatabase()' on purpose
SELECT name FROM system.tables WHERE name = {new_tbl_name:String};
-- Case 3: RENAME DICTIONARY

View File

@ -1,4 +1,5 @@
#!/usr/bin/env bash
# Tags: long
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
@ -14,7 +15,7 @@ $CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS"
while true
do
res=$($CLICKHOUSE_CLIENT -q "select query, event_time from system.query_log where query_id = '$QUERY_ID' and query like 'select%' limit 1")
res=$($CLICKHOUSE_CLIENT -q "select query, event_time from system.query_log where query_id = '$QUERY_ID' and current_database = '$CLICKHOUSE_DATABASE' and query like 'select%' limit 1")
if [ -n "$res" ]; then
break
fi

View File

@ -21,7 +21,7 @@ detach table 02681_undrop_detach;
undrop table 02681_undrop_detach; -- { serverError 57 }
attach table 02681_undrop_detach;
alter table 02681_undrop_detach update num = 2 where id = 1;
select command from system.mutations where table='02681_undrop_detach' limit 1;
select command from system.mutations where table='02681_undrop_detach' and database=currentDatabase() limit 1;
drop table 02681_undrop_detach sync;
select 'test MergeTree with cluster';

View File

@ -21,4 +21,6 @@ $CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS"
# - replace() is required to avoid non deterministic behaviour of
# normalizeQuery() that replaces the identifier with "?" only if it has more
# then two numbers.
#
# NOTE: no current_database = '$CLICKHOUSE_DATABASE' filter on purpose (since ON CLUSTER queries does not have current_database passed)
$CLICKHOUSE_CLIENT -q "SELECT normalizeQuery(replace(query, currentDatabase(), 'default')) FROM system.query_log WHERE initial_query_id = '$query_id' AND type != 'QueryStart' ORDER BY event_time_microseconds"

View File

@ -12,7 +12,7 @@ function has_used_parallel_replicas () {
sumIf(read_rows, is_initial_query) as read_rows,
sumIf(read_bytes, is_initial_query) as read_bytes
FROM system.query_log
WHERE event_date >= yesterday() and initial_query_id LIKE '$1%'
WHERE event_date >= yesterday() and initial_query_id LIKE '$1%' AND current_database = '$CLICKHOUSE_DATABASE'
GROUP BY initial_query_id
ORDER BY min(event_time_microseconds) ASC
FORMAT TSV"

View File

@ -12,6 +12,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
check_replicas_read_in_order() {
# to check this we actually look for at least one log message from MergeTreeInOrderSelectProcessor.
# hopefully logger's names are a bit more stable than log messages itself
#
# NOTE: lack of "current_database = '$CLICKHOUSE_DATABASE'" filter is made on purpose
$CLICKHOUSE_CLIENT -nq "
SYSTEM FLUSH LOGS;

View File

@ -13,7 +13,7 @@
# and then to run formatter only for the specified files.
ROOT_PATH=$(git rev-parse --show-toplevel)
EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|poco/|memcpy/|consistent-hashing|benchmark|tests/|utils/keeper-bench/example.yaml'
EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|poco/|memcpy/|consistent-hashing|benchmark|tests/.*.cpp|utils/keeper-bench/example.yaml'
# From [1]:
# But since array_to_string_internal() in array.c still loops over array
@ -163,14 +163,12 @@ find $ROOT_PATH -not -path $ROOT_PATH'/contrib*' \( -name '*.yaml' -or -name '*.
# Tests should not be named with "fail" in their names. It makes looking at the results less convenient.
find $ROOT_PATH/tests/queries -iname '*fail*' |
grep -vP $EXCLUDE_DIRS |
grep . && echo 'Tests should not be named with "fail" in their names. It makes looking at the results less convenient when you search for "fail" substring in browser.'
# Queries to system.query_log/system.query_thread_log should have current_database = currentDatabase() condition
# NOTE: it is not that accurate, but at least something.
tests_with_query_log=( $(
find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' |
grep -vP $EXCLUDE_DIRS |
xargs grep --with-filename -e system.query_log -e system.query_thread_log | cut -d: -f1 | sort -u
) )
for test_case in "${tests_with_query_log[@]}"; do
@ -205,7 +203,6 @@ tables_with_database_column=(
# NOTE: it is not that accuate, but at least something.
tests_with_database_column=( $(
find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' |
grep -vP $EXCLUDE_DIRS |
xargs grep --with-filename $(printf -- "-e %s " "${tables_with_database_column[@]}") |
grep -v -e ':--' -e ':#' |
cut -d: -f1 | sort -u
@ -225,7 +222,6 @@ done
# NOTE: it is not that accuate, but at least something.
tests_with_replicated_merge_tree=( $(
find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' |
grep -vP $EXCLUDE_DIRS |
xargs grep --with-filename -e "Replicated.*MergeTree[ ]*(.*" | cut -d: -f1 | sort -u
) )
for test_case in "${tests_with_replicated_merge_tree[@]}"; do
@ -233,7 +229,7 @@ for test_case in "${tests_with_replicated_merge_tree[@]}"; do
*.gen.*)
;;
*.sh)
test_case_zk_prefix="\$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX"
test_case_zk_prefix="\(\$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX\|{database}\)"
grep -q -e "Replicated.*MergeTree[ ]*(.*$test_case_zk_prefix" "$test_case" || echo "Replicated.*MergeTree should contain '$test_case_zk_prefix' in zookeeper path to avoid overlaps ($test_case)"
;;
*.sql|*.sql.j2)