diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 810bae86cb0..b404889a925 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -293,6 +293,7 @@ def clickhouse_execute_http( "http_receive_timeout": timeout, "http_send_timeout": timeout, "output_format_parallel_formatting": 0, + "max_rows_to_read": 0, # Some queries read from system.text_log which might get too big } if settings is not None: params.update(settings) diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index 0f13217c236..917789aec10 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -9,7 +9,7 @@ system flush logs; drop table if exists logs; create view logs as select * from system.text_log where now() - toIntervalMinute(120) < event_time; -SET max_rows_to_read = 0; +SET max_rows_to_read = 0; -- system.text_log can be really big -- Check that we don't have too many messages formatted with fmt::runtime or strings concatenation. -- 0.001 threshold should be always enough, the value was about 0.00025 diff --git a/tests/queries/0_stateless/00956_sensitive_data_masking.sh b/tests/queries/0_stateless/00956_sensitive_data_masking.sh index bd65b937648..55f24b7e888 100755 --- a/tests/queries/0_stateless/00956_sensitive_data_masking.sh +++ b/tests/queries/0_stateless/00956_sensitive_data_masking.sh @@ -144,8 +144,7 @@ echo 9 $CLICKHOUSE_CLIENT \ --server_logs_file=/dev/null \ --query="SELECT if( count() > 0, 'text_log non empty', 'text_log empty') FROM system.text_log WHERE event_date >= yesterday() and message like '%find_me%'; - select * from system.text_log where event_date >= yesterday() and message like '%TOPSECRET=TOPSECRET%';" --ignore-error - + select * from system.text_log where event_date >= yesterday() and message like '%TOPSECRET=TOPSECRET%' SETTINGS max_rows_to_read = 0" --ignore-error echo 'finish' rm -f "$tmp_file" >/dev/null 2>&1 rm -f "$tmp_file2" >/dev/null 2>&1 diff --git a/tests/queries/0_stateless/00974_text_log_table_not_empty.sh b/tests/queries/0_stateless/00974_text_log_table_not_empty.sh index ab1b32ad90e..7fdf144a068 100755 --- a/tests/queries/0_stateless/00974_text_log_table_not_empty.sh +++ b/tests/queries/0_stateless/00974_text_log_table_not_empty.sh @@ -6,12 +6,12 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="SELECT 6103" -for (( i=1; i <= 50; i++ )) +for (( i=1; i <= 50; i++ )) do ${CLICKHOUSE_CLIENT} --query="SYSTEM FLUSH LOGS" sleep 0.1; -if [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() > 0 FROM system.text_log WHERE position(system.text_log.message, 'SELECT 6103') > 0 AND event_date >= yesterday()") == 1 ]]; then echo 1; exit; fi; +if [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() > 0 FROM system.text_log WHERE position(system.text_log.message, 'SELECT 6103') > 0 AND event_date >= yesterday() SETTINGS max_rows_to_read = 0") == 1 ]]; then echo 1; exit; fi; done; diff --git a/tests/queries/0_stateless/01165_lost_part_empty_partition.sql b/tests/queries/0_stateless/01165_lost_part_empty_partition.sql index b8998adbc52..787d4567218 100644 --- a/tests/queries/0_stateless/01165_lost_part_empty_partition.sql +++ b/tests/queries/0_stateless/01165_lost_part_empty_partition.sql @@ -1,5 +1,7 @@ -- Tags: zookeeper +SET max_rows_to_read = 0; -- system.text_log can be really big + create table rmt1 (d DateTime, n int) engine=ReplicatedMergeTree('/test/01165/{database}/rmt', '1') order by n partition by toYYYYMMDD(d); create table rmt2 (d DateTime, n int) engine=ReplicatedMergeTree('/test/01165/{database}/rmt', '2') order by n partition by toYYYYMMDD(d); diff --git a/tests/queries/0_stateless/01319_query_formatting_in_server_log.sql b/tests/queries/0_stateless/01319_query_formatting_in_server_log.sql index dc88d3d48f7..4428d4fbf9c 100644 --- a/tests/queries/0_stateless/01319_query_formatting_in_server_log.sql +++ b/tests/queries/0_stateless/01319_query_formatting_in_server_log.sql @@ -2,5 +2,6 @@ SeLeCt 'ab cd' /* hello */ -- world , 1; +SET max_rows_to_read = 0; -- system.text_log can be really big SYSTEM FLUSH LOGS; SELECT extract(message, 'SeL.+?;') FROM system.text_log WHERE event_date >= yesterday() AND message LIKE '%SeLeCt \'ab\n%' ORDER BY event_time DESC LIMIT 1 FORMAT TSVRaw; diff --git a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh index eca2db359bb..66cebf16662 100755 --- a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh +++ b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh @@ -30,7 +30,7 @@ $CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 --max_block_siz # Now wait for cleanup thread for _ in {1..60}; do $CLICKHOUSE_CLIENT --query "SYSTEM FLUSH LOGS" - [[ $($CLICKHOUSE_CLIENT --query "SELECT sum(toUInt32(extract(message, 'Removed (\d+) old log entries'))) FROM system.text_log WHERE event_date >= yesterday() AND logger_name LIKE '%' || '$CLICKHOUSE_DATABASE' || '%r1%(ReplicatedMergeTreeCleanupThread)%' AND message LIKE '%Removed % old log entries%'") -gt $((SCALE - 10)) ]] && break; + [[ $($CLICKHOUSE_CLIENT --query "SELECT sum(toUInt32(extract(message, 'Removed (\d+) old log entries'))) FROM system.text_log WHERE event_date >= yesterday() AND logger_name LIKE '%' || '$CLICKHOUSE_DATABASE' || '%r1%(ReplicatedMergeTreeCleanupThread)%' AND message LIKE '%Removed % old log entries%' SETTINGS max_rows_to_read = 0") -gt $((SCALE - 10)) ]] && break; sleep 1 done diff --git a/tests/queries/0_stateless/01666_merge_tree_max_query_limit.sh b/tests/queries/0_stateless/01666_merge_tree_max_query_limit.sh index ec318db98bf..b38d59cf6a6 100755 --- a/tests/queries/0_stateless/01666_merge_tree_max_query_limit.sh +++ b/tests/queries/0_stateless/01666_merge_tree_max_query_limit.sh @@ -72,6 +72,6 @@ ${CLICKHOUSE_CLIENT} --query_id "$query_id" --query "select i from simple where # We have to search the server's error log because the following warning message # is generated during pipeline destruction and thus is not sent to the client. ${CLICKHOUSE_CLIENT} --query "system flush logs" -if [[ $(${CLICKHOUSE_CLIENT} --query "select count() > 0 from system.text_log where query_id = '$query_id' and level = 'Warning' and message like '%We have query_id removed but it\'s not recorded. This is a bug%' format TSVRaw") == 1 ]]; then echo "We have query_id removed but it's not recorded. This is a bug." >&2; exit 1; fi +if [[ $(${CLICKHOUSE_CLIENT} --query "select count() > 0 from system.text_log where query_id = '$query_id' and level = 'Warning' and message like '%We have query_id removed but it\'s not recorded. This is a bug%' format TSVRaw SETTINGS max_rows_to_read = 0") == 1 ]]; then echo "We have query_id removed but it's not recorded. This is a bug." >&2; exit 1; fi ${CLICKHOUSE_CLIENT} --query "drop table simple" diff --git a/tests/queries/0_stateless/02262_column_ttl.sh b/tests/queries/0_stateless/02262_column_ttl.sh index c620d3b6d9c..30730c2a074 100755 --- a/tests/queries/0_stateless/02262_column_ttl.sh +++ b/tests/queries/0_stateless/02262_column_ttl.sh @@ -35,7 +35,7 @@ $CLICKHOUSE_CLIENT -m -q " -- OPTIMIZE TABLE x FINAL will be done in background -- attach to it's log, via table UUID in query_id (see merger/mutator code). create materialized view this_text_log engine=Memory() as - select * from system.text_log where query_id like '%${ttl_02262_uuid}%'; + select * from system.text_log where query_id like '%${ttl_02262_uuid}%' SETTINGS max_rows_to_read = 0; optimize table ttl_02262 final; system flush logs; diff --git a/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql b/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql index 948ec9e9e8a..0e7a14ddf99 100644 --- a/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql +++ b/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql @@ -64,5 +64,6 @@ drop table rmt; drop table rmt2; system flush logs; +SET max_rows_to_read = 0; -- system.text_log can be really big select count() > 0 from system.text_log where yesterday() <= event_date and logger_name like '%' || currentDatabase() || '%' and message like '%Removing % parts from filesystem (concurrently): Parts:%'; select count() > 1, countDistinct(thread_id) > 1 from system.text_log where yesterday() <= event_date and logger_name like '%' || currentDatabase() || '%' and message like '%Removing % parts in blocks range%'; diff --git a/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh b/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh index 1548bef857f..f3f8886f691 100755 --- a/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh +++ b/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh @@ -99,4 +99,4 @@ $CLICKHOUSE_CLIENT -q 'system flush logs' $CLICKHOUSE_CLIENT -q "select count() > 0 from system.text_log where event_date >= yesterday() and query_id like '$TEST_MARK%' and ( message_format_string in ('Unexpected end of file while reading chunk header of HTTP chunked data', 'Unexpected EOF, got {} of {} bytes', 'Query was cancelled or a client has unexpectedly dropped the connection') or - message like '%Connection reset by peer%' or message like '%Broken pipe, while writing to socket%')" + message like '%Connection reset by peer%' or message like '%Broken pipe, while writing to socket%') SETTINGS max_rows_to_read = 0" diff --git a/tests/queries/0_stateless/02477_projection_materialize_and_zero_copy.sql b/tests/queries/0_stateless/02477_projection_materialize_and_zero_copy.sql index 0597ac10cd7..c89f92ac081 100644 --- a/tests/queries/0_stateless/02477_projection_materialize_and_zero_copy.sql +++ b/tests/queries/0_stateless/02477_projection_materialize_and_zero_copy.sql @@ -15,6 +15,7 @@ alter table t materialize projection p_norm settings mutations_sync = 1; SYSTEM FLUSH LOGS; +SET max_rows_to_read = 0; -- system.text_log can be really big SELECT * FROM system.text_log WHERE event_time >= now() - 30 and level == 'Error' and message like '%BAD_DATA_PART_NAME%'and message like '%p_norm%'; DROP TABLE IF EXISTS t; diff --git a/tests/queries/0_stateless/02531_two_level_aggregation_bug.sh b/tests/queries/0_stateless/02531_two_level_aggregation_bug.sh index d93fe59134f..e1bd64e19ae 100755 --- a/tests/queries/0_stateless/02531_two_level_aggregation_bug.sh +++ b/tests/queries/0_stateless/02531_two_level_aggregation_bug.sh @@ -12,5 +12,5 @@ do query_id=$(echo "select queryID() from (select sum(s), k from remote('127.0.0.{1,2}', view(select sum(number) s, bitAnd(number, 3) k from numbers_mt(1000000) group by k)) group by k) limit 1 settings group_by_two_level_threshold=1, max_threads=3, prefer_localhost_replica=1" | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data-binary @- 2>&1) ${CLICKHOUSE_CLIENT} --query="system flush logs" - ${CLICKHOUSE_CLIENT} --query="select count() from system.text_log where event_date >= yesterday() and query_id = '${query_id}' and message like '%Converting aggregation data to two-level%'" | grep -P '^6$' && break; + ${CLICKHOUSE_CLIENT} --query="select count() from system.text_log where event_date >= yesterday() and query_id = '${query_id}' and message like '%Converting aggregation data to two-level%' SETTINGS max_rows_to_read = 0" | grep -P '^6$' && break; done diff --git a/tests/queries/0_stateless/02570_fallback_from_async_insert.sh b/tests/queries/0_stateless/02570_fallback_from_async_insert.sh index d7c8944b89d..23417e5366b 100755 --- a/tests/queries/0_stateless/02570_fallback_from_async_insert.sh +++ b/tests/queries/0_stateless/02570_fallback_from_async_insert.sh @@ -48,6 +48,7 @@ $CLICKHOUSE_CLIENT --query " SELECT 'id_' || splitByChar('_', query_id)[1] AS id FROM system.text_log WHERE query_id LIKE '%$query_id_suffix' AND message LIKE '%$message%' ORDER BY id + SETTINGS max_rows_to_read = 0 " $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS t_async_insert_fallback" diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql index 6b0677a80ae..8b6574562bf 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql +++ b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql @@ -30,6 +30,7 @@ SELECT count() FROM 02581_trips SETTINGS select_sequential_consistency = 1; DELETE FROM 02581_trips WHERE id IN (SELECT (number*10 + 9)::UInt32 FROM numbers(10000000)) SETTINGS lightweight_deletes_sync = 2; SELECT count(), _part from 02581_trips WHERE description = '' GROUP BY _part ORDER BY _part SETTINGS select_sequential_consistency=1; +SET max_rows_to_read = 0; -- system.text_log can be really big SYSTEM FLUSH LOGS; -- Check that in every mutation there were parts that built sets (log messages like 'Created Set with 10000000 entries from 10000000 rows in 0.388989187 sec.' ) -- and parts that shared sets (log messages like 'Got set from cache in 0.388930505 sec.' ) diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql index 091a9c8171d..b1facadc790 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql +++ b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql @@ -58,6 +58,7 @@ WHERE SETTINGS mutations_sync=2; SELECT count() from 02581_trips WHERE description = ''; +SET max_rows_to_read = 0; -- system.text_log can be really big SYSTEM FLUSH LOGS; -- Check that in every mutation there were parts that built sets (log messages like 'Created Set with 10000000 entries from 10000000 rows in 0.388989187 sec.' ) -- and parts that shared sets (log messages like 'Got set from cache in 0.388930505 sec.' ) diff --git a/tests/queries/0_stateless/02675_sparse_columns_clear_column.sql b/tests/queries/0_stateless/02675_sparse_columns_clear_column.sql index 781030ef7b4..1dac8eb23d0 100644 --- a/tests/queries/0_stateless/02675_sparse_columns_clear_column.sql +++ b/tests/queries/0_stateless/02675_sparse_columns_clear_column.sql @@ -30,5 +30,5 @@ ORDER BY column; DROP TABLE t_sparse_columns_clear SYNC; SYSTEM FLUSH LOGS; - +SET max_rows_to_read = 0; -- system.text_log can be really big SELECT count(), groupArray(message) FROM system.text_log WHERE logger_name LIKE '%' || currentDatabase() || '.t_sparse_columns_clear' || '%' AND level = 'Error'; diff --git a/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql b/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql index 52d55bdbe11..dccac8fceb4 100644 --- a/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql +++ b/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql @@ -1,5 +1,6 @@ -- Tags: no-parallel +SET max_rows_to_read = 0; -- system.text_log can be really big SELECT 'Hello', throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/02813_starting_in_text_log.sql b/tests/queries/0_stateless/02813_starting_in_text_log.sql index e007f58189e..a320ab7eead 100644 --- a/tests/queries/0_stateless/02813_starting_in_text_log.sql +++ b/tests/queries/0_stateless/02813_starting_in_text_log.sql @@ -1,2 +1,3 @@ SYSTEM FLUSH LOGS; +SET max_rows_to_read = 0; -- system.text_log can be really big SELECT count() > 0 FROM system.text_log WHERE event_date >= yesterday() AND message LIKE '%Starting ClickHouse%'; diff --git a/tests/queries/0_stateless/02875_parallel_replicas_cluster_all_replicas.sql b/tests/queries/0_stateless/02875_parallel_replicas_cluster_all_replicas.sql index f59d38ceb04..3c82958aead 100644 --- a/tests/queries/0_stateless/02875_parallel_replicas_cluster_all_replicas.sql +++ b/tests/queries/0_stateless/02875_parallel_replicas_cluster_all_replicas.sql @@ -8,6 +8,7 @@ SELECT count() FROM clusterAllReplicas('test_cluster_two_shard_three_replicas_lo SYSTEM FLUSH LOGS; SET allow_experimental_parallel_reading_from_replicas=0; +SET max_rows_to_read = 0; -- system.text_log can be really big SELECT count() > 0 FROM system.text_log WHERE query_id in (select query_id from system.query_log where current_database = currentDatabase() AND log_comment = '02875_190aed82-2423-413b-ad4c-24dcca50f65b') AND message LIKE '%Parallel reading from replicas is disabled for cluster%'; diff --git a/tests/queries/0_stateless/02875_parallel_replicas_remote.sql b/tests/queries/0_stateless/02875_parallel_replicas_remote.sql index 5fbaf34b621..e8e6f884f2a 100644 --- a/tests/queries/0_stateless/02875_parallel_replicas_remote.sql +++ b/tests/queries/0_stateless/02875_parallel_replicas_remote.sql @@ -8,6 +8,7 @@ SELECT count() FROM remote('127.0.0.{1..6}', currentDatabase(), tt) settings log SYSTEM FLUSH LOGS; SET allow_experimental_parallel_reading_from_replicas=0; +SET max_rows_to_read = 0; -- system.text_log can be really big SELECT count() > 0 FROM system.text_log WHERE query_id in (select query_id from system.query_log where current_database = currentDatabase() AND log_comment = '02875_89f3c39b-1919-48cb-b66e-ef9904e73146') AND message LIKE '%Parallel reading from replicas is disabled for cluster%'; diff --git a/tests/queries/0_stateless/02882_replicated_fetch_checksums_doesnt_match.sql b/tests/queries/0_stateless/02882_replicated_fetch_checksums_doesnt_match.sql index dc500aaff3b..a745625f17a 100644 --- a/tests/queries/0_stateless/02882_replicated_fetch_checksums_doesnt_match.sql +++ b/tests/queries/0_stateless/02882_replicated_fetch_checksums_doesnt_match.sql @@ -34,6 +34,7 @@ SELECT count() FROM checksums_r3; SYSTEM FLUSH LOGS; +SET max_rows_to_read = 0; -- system.text_log can be really big SELECT * FROM system.text_log WHERE event_time >= now() - INTERVAL 120 SECOND and level == 'Error' and message like '%CHECKSUM_DOESNT_MATCH%' and logger_name like ('%' || currentDatabase() || '%checksums_r%'); DROP TABLE IF EXISTS checksums_r3; diff --git a/tests/queries/0_stateless/02898_parallel_replicas_progress_bar.sql b/tests/queries/0_stateless/02898_parallel_replicas_progress_bar.sql index 9348ea1dc32..975803fb4b9 100644 --- a/tests/queries/0_stateless/02898_parallel_replicas_progress_bar.sql +++ b/tests/queries/0_stateless/02898_parallel_replicas_progress_bar.sql @@ -21,6 +21,7 @@ SELECT count(), min(k), max(k), avg(k) FROM t1 SETTINGS log_comment='02898_defau -- check logs SYSTEM FLUSH LOGS; +SET max_rows_to_read = 0; -- system.text_log can be really big SELECT count() > 0 FROM system.text_log WHERE query_id in (select query_id from system.query_log where current_database = currentDatabase() AND log_comment='02898_default_190aed82-2423-413b-ad4c-24dcca50f65b') AND message LIKE '%Total rows to read: 3000%' SETTINGS allow_experimental_parallel_reading_from_replicas=0; diff --git a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh index b77e5b0b402..aaecc7301e4 100755 --- a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh +++ b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh @@ -56,7 +56,8 @@ for _ in {0..50}; do ( (logger_name = 'MergeTreeBackgroundExecutor' and message like '%{$table_uuid::$part_name}%No active replica has part $part_name or covering part%') or (logger_name like '$table_uuid::$part_name (MergeFromLogEntryTask)' and message like '%No active replica has part $part_name or covering part%') - ); + ) + SETTINGS max_rows_to_read = 0; ") if [[ $no_active_repilica_messages -gt 0 ]]; then break @@ -78,5 +79,6 @@ $CLICKHOUSE_CLIENT -m -q " (logger_name = 'MergeTreeBackgroundExecutor' and message like '%{$table_uuid::$part_name}%No active replica has part $part_name or covering part%') or (logger_name like '$table_uuid::$part_name (MergeFromLogEntryTask)' and message like '%No active replica has part $part_name or covering part%') ) - group by level; + group by level + SETTINGS max_rows_to_read = 0; " diff --git a/tests/queries/0_stateless/02935_parallel_replicas_settings.sql b/tests/queries/0_stateless/02935_parallel_replicas_settings.sql index be6f1c2958c..0e7478443a5 100644 --- a/tests/queries/0_stateless/02935_parallel_replicas_settings.sql +++ b/tests/queries/0_stateless/02935_parallel_replicas_settings.sql @@ -11,7 +11,7 @@ SET cluster_for_parallel_replicas='parallel_replicas'; SELECT count() FROM test_parallel_replicas_settings WHERE NOT ignore(*) settings log_comment='0_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f'; SYSTEM FLUSH LOGS; - +SET max_rows_to_read = 0; -- system.text_log can be really big SELECT count() > 0 FROM system.text_log WHERE yesterday() <= event_date AND query_id in (select query_id from system.query_log where current_database=currentDatabase() AND log_comment='0_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f') diff --git a/tests/queries/0_stateless/03096_text_log_format_string_args_not_empty.sql b/tests/queries/0_stateless/03096_text_log_format_string_args_not_empty.sql index bb036a6c133..a08f35cfc1d 100644 --- a/tests/queries/0_stateless/03096_text_log_format_string_args_not_empty.sql +++ b/tests/queries/0_stateless/03096_text_log_format_string_args_not_empty.sql @@ -6,6 +6,7 @@ select conut(); -- { serverError UNKNOWN_FUNCTION } system flush logs; +SET max_rows_to_read = 0; -- system.text_log can be really big select count() > 0 from system.text_log where message_format_string = 'Peak memory usage{}: {}.' and value1 is not null and value2 like '% MiB'; select count() > 0 from system.text_log where level = 'Error' and message_format_string = 'Unknown {}{} identifier {} in scope {}{}' and value1 = 'expression' and value3 = '`count`' and value4 = 'SELECT count'; diff --git a/tests/queries/0_stateless/03141_fetches_errors_stress.sql b/tests/queries/0_stateless/03141_fetches_errors_stress.sql index 2f6b0b08574..69829a44b58 100644 --- a/tests/queries/0_stateless/03141_fetches_errors_stress.sql +++ b/tests/queries/0_stateless/03141_fetches_errors_stress.sql @@ -11,6 +11,7 @@ system disable failpoint replicated_sends_failpoint; system sync replica data_r2; system flush logs; +SET max_rows_to_read = 0; -- system.text_log can be really big select event_time_microseconds, logger_name, message from system.text_log where level = 'Error' and message like '%Malformed chunked encoding%' order by 1 format LineAsString; -- { echoOn } diff --git a/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql index 96221f27e73..968cc4a40a7 100644 --- a/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql +++ b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql @@ -24,7 +24,7 @@ SELECT count() FROM t_ind_merge_1 WHERE b < 100 SETTINGS force_data_skipping_ind EXPLAIN indexes = 1 SELECT count() FROM t_ind_merge_1 WHERE b < 100; SYSTEM FLUSH LOGS; - +SET max_rows_to_read = 0; -- system.text_log can be really big WITH (SELECT uuid FROM system.tables WHERE database = currentDatabase() AND table = 't_ind_merge_1') AS uuid, extractAllGroupsVertical(message, 'containing (\\d+) columns \((\\d+) merged, (\\d+) gathered\)')[1] AS groups diff --git a/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_2.sql b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_2.sql index b749e0c84b0..bf145d875fd 100644 --- a/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_2.sql +++ b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_2.sql @@ -26,6 +26,7 @@ INSERT INTO t_ind_merge_2 SELECT number, number, rand(), rand(), rand(), rand() OPTIMIZE TABLE t_ind_merge_2 FINAL; SYSTEM FLUSH LOGS; +SET max_rows_to_read = 0; -- system.text_log can be really big --- merged: a, c, d; gathered: b, e, f WITH diff --git a/tests/queries/1_stateful/00177_memory_bound_merging.sh b/tests/queries/1_stateful/00177_memory_bound_merging.sh index 3744d89f354..6d1e3871714 100755 --- a/tests/queries/1_stateful/00177_memory_bound_merging.sh +++ b/tests/queries/1_stateful/00177_memory_bound_merging.sh @@ -15,7 +15,8 @@ check_replicas_read_in_order() { SELECT COUNT() > 0 FROM system.text_log WHERE query_id IN (SELECT query_id FROM system.query_log WHERE query_id != '$1' AND initial_query_id = '$1' AND event_date >= yesterday()) - AND event_date >= yesterday() AND message ILIKE '%Reading%ranges in order%'" + AND event_date >= yesterday() AND message ILIKE '%Reading%ranges in order%' + SETTINGS max_rows_to_read=0" } # replicas should use reading in order following initiator's decision to execute aggregation in order.