diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 63750b90b5a..a07afe0c99e 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -92,6 +92,8 @@ function run_tests() if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then ADDITIONAL_OPTIONS+=('--replicated-database') + # Cannot be used with replicated database, due to distributed_ddl_output_mode=none + ADDITIONAL_OPTIONS+=('--no-left-queries-check') ADDITIONAL_OPTIONS+=('--jobs') ADDITIONAL_OPTIONS+=('2') else diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 13d45e7f8cd..a1f1f467e59 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -356,6 +356,7 @@ class FailureReason(enum.Enum): RESULT_DIFF = "result differs with reference: " TOO_LONG = "Test runs too long (> 60s). Make it faster." INTERNAL_QUERY_FAIL = "Internal query (CREATE/DROP DATABASE) failed:" + LEFT_QUERIES = "Queries left in background after the test finished:" # SKIPPED reasons DISABLED = "disabled" @@ -671,6 +672,15 @@ class TestCase: # We're in Flaky Check mode, check the run time as well while we're at it. return TestResult(self.name, TestStatus.FAIL, FailureReason.TOO_LONG, total_time, description) + left_queries_check = args.no_left_queries_check is False + if self.tags and 'no-left-queries-check' in self.tags: + left_queries_check = False + if left_queries_check: + processlist = get_processlist_after_test(self.testcase_args) + if processlist: + description += "\n{}\n".format(json.dumps(processlist, indent=4)) + return TestResult(self.name, TestStatus.FAIL, FailureReason.LEFT_QUERIES, total_time, description) + if os.path.exists(self.stdout_file): os.remove(self.stdout_file) if os.path.exists(self.stderr_file): @@ -747,16 +757,6 @@ class TestCase: proc.stdout is None or 'Exception' not in proc.stdout) need_drop_database = not maybe_passed - left_queries_check = args.no_left_queries_check is False - if self.tags and 'no-left-queries-check' in self.tags: - left_queries_check = False - if left_queries_check: - processlist = get_processlist_after_test(args) - if processlist: - print(colored(f"\nFound queries left in processlist after running {args.testcase_basename} (database={database}):", args, "red", attrs=["bold"])) - print(json.dumps(processlist, indent=4)) - exit_code.value = 1 - if need_drop_database: seconds_left = max(args.timeout - (datetime.now() - start_time).total_seconds(), 20) try: diff --git a/tests/queries/0_stateless/01085_max_distributed_connections_http.sh b/tests/queries/0_stateless/01085_max_distributed_connections_http.sh index 0e40918257d..6e840f44930 100755 --- a/tests/queries/0_stateless/01085_max_distributed_connections_http.sh +++ b/tests/queries/0_stateless/01085_max_distributed_connections_http.sh @@ -15,3 +15,5 @@ while [[ $i -lt $retries ]]; do timeout 1.8s ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&max_distributed_connections=2&max_threads=1" -d "$query" && break ((++i)) done + +clickhouse_test_wait_queries 60 diff --git a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh index b659d550fa4..142b83f1e0b 100755 --- a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh +++ b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh @@ -10,46 +10,40 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -function thread_create { - while true; do - $CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS $1 (x UInt64, s Array(Nullable(String))) ENGINE = $2" - sleep 0.0$RANDOM - done +function thread_create() +{ + $CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS $1 (x UInt64, s Array(Nullable(String))) ENGINE = $2" + sleep 0.0$RANDOM } -function thread_drop { - while true; do - $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS $1" - sleep 0.0$RANDOM - done +function thread_drop() +{ + $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS $1" + sleep 0.0$RANDOM } -function thread_rename { - while true; do - $CLICKHOUSE_CLIENT --query "RENAME TABLE $1 TO $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|57)' - sleep 0.0$RANDOM - done +function thread_rename() +{ + $CLICKHOUSE_CLIENT --query "RENAME TABLE $1 TO $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|57)' + sleep 0.0$RANDOM } -function thread_select { - while true; do - $CLICKHOUSE_CLIENT --query "SELECT * FROM $1 FORMAT Null" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)' - sleep 0.0$RANDOM - done +function thread_select() +{ + $CLICKHOUSE_CLIENT --query "SELECT * FROM $1 FORMAT Null" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)' + sleep 0.0$RANDOM } -function thread_insert { - while true; do - $CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT rand64(1), [toString(rand64(2))] FROM numbers($2)" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: '| grep -v -P 'Code: (60|218)' - sleep 0.0$RANDOM - done +function thread_insert() +{ + $CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT rand64(1), [toString(rand64(2))] FROM numbers($2)" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: '| grep -v -P 'Code: (60|218)' + sleep 0.0$RANDOM } -function thread_insert_select { - while true; do - $CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT * FROM $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)' - sleep 0.0$RANDOM - done +function thread_insert_select() +{ + $CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT * FROM $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)' + sleep 0.0$RANDOM } export -f thread_create @@ -65,18 +59,18 @@ export -f thread_insert_select function test_with_engine { echo "Testing $1" - timeout 10 bash -c "thread_create t1 $1" & - timeout 10 bash -c "thread_create t2 $1" & - timeout 10 bash -c 'thread_drop t1' & - timeout 10 bash -c 'thread_drop t2' & - timeout 10 bash -c 'thread_rename t1 t2' & - timeout 10 bash -c 'thread_rename t2 t1' & - timeout 10 bash -c 'thread_select t1' & - timeout 10 bash -c 'thread_select t2' & - timeout 10 bash -c 'thread_insert t1 5' & - timeout 10 bash -c 'thread_insert t2 10' & - timeout 10 bash -c 'thread_insert_select t1 t2' & - timeout 10 bash -c 'thread_insert_select t2 t1' & + clickhouse_client_loop_timeout 10 thread_create t1 $1 & + clickhouse_client_loop_timeout 10 thread_create t2 $1 & + clickhouse_client_loop_timeout 10 thread_drop t1 & + clickhouse_client_loop_timeout 10 thread_drop t2 & + clickhouse_client_loop_timeout 10 thread_rename t1 t2 & + clickhouse_client_loop_timeout 10 thread_rename t2 t1 & + clickhouse_client_loop_timeout 10 thread_select t1 & + clickhouse_client_loop_timeout 10 thread_select t2 & + clickhouse_client_loop_timeout 10 thread_insert t1 5 & + clickhouse_client_loop_timeout 10 thread_insert t2 10 & + clickhouse_client_loop_timeout 10 thread_insert_select t1 t2 & + clickhouse_client_loop_timeout 10 thread_insert_select t2 t1 & wait echo "Done $1" diff --git a/tests/queries/0_stateless/01675_data_type_coroutine.sh b/tests/queries/0_stateless/01675_data_type_coroutine.sh index 4106d0d7f73..687ff6ac473 100755 --- a/tests/queries/0_stateless/01675_data_type_coroutine.sh +++ b/tests/queries/0_stateless/01675_data_type_coroutine.sh @@ -17,3 +17,9 @@ while [[ $counter -lt $retries ]]; do done echo 'Ok' + +# wait queries, since there is 'Maximum parse depth' error on the client +# and in this case it simply reset the connection and don't read everything +# from server, so there is no guarantee that the query is stopped when the +# client returns +clickhouse_test_wait_queries 60 diff --git a/tests/queries/0_stateless/01675_data_type_coroutine_2.sh b/tests/queries/0_stateless/01675_data_type_coroutine_2.sh index 501b9d4ab12..9c4ed81e345 100755 --- a/tests/queries/0_stateless/01675_data_type_coroutine_2.sh +++ b/tests/queries/0_stateless/01675_data_type_coroutine_2.sh @@ -17,3 +17,9 @@ done #echo "I = ${I}" echo 'Ok' + +# wait queries, since there is 'Maximum parse depth' error on the client +# and in this case it simply reset the connection and don't read everything +# from server, so there is no guarantee that the query is stopped when the +# client returns +clickhouse_test_wait_queries 60 diff --git a/tests/queries/0_stateless/01731_async_task_queue_wait.sh b/tests/queries/0_stateless/01731_async_task_queue_wait.sh index e0babf3c6ff..6fdd676f336 100755 --- a/tests/queries/0_stateless/01731_async_task_queue_wait.sh +++ b/tests/queries/0_stateless/01731_async_task_queue_wait.sh @@ -8,3 +8,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # cancellation with async_socket_for_remote=1 (that ignores # max_distributed_connections) timeout --signal=SIGINT 1 ${CLICKHOUSE_CLIENT} --max_distributed_connections=1 --max_block_size=2 --interactive_delay=900000 -q "select number + sleep(0.3) as x from remote('127.{2,3}', system.numbers) settings max_block_size = 2" 2>&1 | grep "Empty task was returned from async task queue" || true + +clickhouse_test_wait_queries 60 diff --git a/tests/queries/0_stateless/02104_overcommit_memory.sh b/tests/queries/0_stateless/02104_overcommit_memory.sh index 140557304c6..7fdf74a30bf 100755 --- a/tests/queries/0_stateless/02104_overcommit_memory.sh +++ b/tests/queries/0_stateless/02104_overcommit_memory.sh @@ -5,21 +5,19 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q 'CREATE USER IF NOT EXISTS u1 IDENTIFIED WITH no_password' -$CLICKHOUSE_CLIENT -q 'GRANT ALL ON *.* TO u1' +$CLICKHOUSE_CLIENT -q 'DROP USER IF EXISTS u02104' +$CLICKHOUSE_CLIENT -q 'CREATE USER IF NOT EXISTS u02104 IDENTIFIED WITH no_password' +$CLICKHOUSE_CLIENT -q 'GRANT ALL ON *.* TO u02104' function overcommited() { - while true; do - $CLICKHOUSE_CLIENT -u u1 -q 'SELECT number FROM numbers(130000) GROUP BY number SETTINGS max_guaranteed_memory_usage=1,memory_usage_overcommit_max_wait_microseconds=500' 2>&1 | grep -F -q "MEMORY_LIMIT_EXCEEDED" && echo "OVERCOMMITED WITH USER LIMIT IS KILLED" - done + $CLICKHOUSE_CLIENT -u u02104 -q 'SELECT number FROM numbers(130000) GROUP BY number SETTINGS max_guaranteed_memory_usage=1,memory_usage_overcommit_max_wait_microseconds=500' 2>&1 \ + | grep -F -q "MEMORY_LIMIT_EXCEEDED" && echo "OVERCOMMITED WITH USER LIMIT IS KILLED" } function expect_execution() { - while true; do - $CLICKHOUSE_CLIENT -u u1 -q 'SELECT number FROM numbers(130000) GROUP BY number SETTINGS max_memory_usage_for_user=5000000,max_guaranteed_memory_usage=2,memory_usage_overcommit_max_wait_microseconds=500' >/dev/null 2>/dev/null - done + $CLICKHOUSE_CLIENT -u u02104 -q 'SELECT number FROM numbers(130000) GROUP BY number SETTINGS max_memory_usage_for_user=5000000,max_guaranteed_memory_usage=2,memory_usage_overcommit_max_wait_microseconds=500' >/dev/null 2>/dev/null } export -f overcommited @@ -29,9 +27,9 @@ function user_test() { for _ in {1..10}; do - timeout 10 bash -c overcommited & - timeout 10 bash -c expect_execution & - done; + clickhouse_client_loop_timeout 10 overcommited & + clickhouse_client_loop_timeout 10 expect_execution & + done wait } @@ -45,4 +43,4 @@ else echo "OVERCOMMITED WITH USER LIMIT WAS KILLED" fi -$CLICKHOUSE_CLIENT -q 'DROP USER IF EXISTS u1' +$CLICKHOUSE_CLIENT -q 'DROP USER IF EXISTS u02104' diff --git a/tests/queries/0_stateless/02151_http_s_structure_set_eof.sh b/tests/queries/0_stateless/02151_http_s_structure_set_eof.sh index 448fa9bfede..c3dfc4d03a8 100755 --- a/tests/queries/0_stateless/02151_http_s_structure_set_eof.sh +++ b/tests/queries/0_stateless/02151_http_s_structure_set_eof.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: long CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -25,3 +26,9 @@ timeout 0.15s ${CLICKHOUSE_CURL} -sS -F "s=@$tmp_file;" "${CLICKHOUSE_URL}&s_str echo $? timeout 0.15s ${CLICKHOUSE_CURL} -sS -F "s=@$tmp_file;" "${CLICKHOUSE_URL}&s_structure=key+Int&query=SELECT+dummy+IN+s&input_format_parallel_parsing=false" -o /dev/null echo $? + +# wait until the query above will start, +# so that clickhouse_test_wait_queries will see them. +sleep 5 + +clickhouse_test_wait_queries 60