Disable query_metric_log collection setting the interval to 0

This commit is contained in:
Pablo Marcos 2024-08-29 15:43:04 +00:00
parent d8959c2daa
commit e39cec986b
7 changed files with 30 additions and 11 deletions

View File

@ -1834,9 +1834,12 @@ Default value: 0 (no restriction).
## query_metric_log_interval (#query_metric_log_interval) ## query_metric_log_interval (#query_metric_log_interval)
The interval in milliseconds at which the [query_metric_log](../../operations/system-tables/query_metric_log.md) for individual queries is collected. The interval in milliseconds at which the [query_metric_log](../../operations/system-tables/query_metric_log.md) for individual queries is collected.
If set to 0, it will take the `collect_interval_milliseconds` from the [query_metric_log setting](../../operations/server-configuration-parameters/settings.md#query_metric_log).
Default value: 0 If set to any negative value, it will take the value `collect_interval_milliseconds` from the [query_metric_log setting](../../operations/server-configuration-parameters/settings.md#query_metric_log) or default to 1000 if not present.
To disable the collection of a single query, set `query_metric_log_interval` to 0.
Default value: -1
## insert_quorum {#insert_quorum} ## insert_quorum {#insert_quorum}

View File

@ -6,7 +6,7 @@ slug: /en/operations/system-tables/query_metric_log
Contains history of memory and metric values from table `system.events` for individual queries, periodically flushed to disk. Contains history of memory and metric values from table `system.events` for individual queries, periodically flushed to disk.
Once a query starts, data is collected at periodic intervals of `query_metric_log_interval` milliseconds (which is set to 1000 Once a query starts, data is collected at periodic intervals of `query_metric_log_interval` milliseconds (which is set to 1000
by default) and when the query finishes. by default).
Columns: Columns:
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query. - `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query.

View File

@ -514,7 +514,7 @@ class IColumn;
M(Bool, log_query_threads, false, "Log query threads into system.query_thread_log table. This setting have effect only when 'log_queries' is true.", 0) \ M(Bool, log_query_threads, false, "Log query threads into system.query_thread_log table. This setting have effect only when 'log_queries' is true.", 0) \
M(Bool, log_query_views, true, "Log query dependent views into system.query_views_log table. This setting have effect only when 'log_queries' is true.", 0) \ M(Bool, log_query_views, true, "Log query dependent views into system.query_views_log table. This setting have effect only when 'log_queries' is true.", 0) \
M(String, log_comment, "", "Log comment into system.query_log table and server log. It can be set to arbitrary string no longer than max_query_size.", 0) \ M(String, log_comment, "", "Log comment into system.query_log table and server log. It can be set to arbitrary string no longer than max_query_size.", 0) \
M(UInt64, query_metric_log_interval, 0, "Periodic interval in milliseconds to collect query metric logs.", 0) \ M(Int64, query_metric_log_interval, -1, "Periodic interval in milliseconds to collect query metric logs.", 0) \
M(LogsLevel, send_logs_level, LogsLevel::fatal, "Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'", 0) \ M(LogsLevel, send_logs_level, LogsLevel::fatal, "Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'", 0) \
M(String, send_logs_source_regexp, "", "Send server text logs with specified regexp to match log source name. Empty means all sources.", 0) \ M(String, send_logs_source_regexp, "", "Send server text logs with specified regexp to match log source name. Empty means all sources.", 0) \
M(Bool, enable_optimize_predicate_expression, true, "If it is set to true, optimize predicates to subqueries.", 0) \ M(Bool, enable_optimize_predicate_expression, true, "If it is set to true, optimize predicates to subqueries.", 0) \

View File

@ -75,6 +75,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."}, {"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
{"create_if_not_exists", false, false, "New setting."}, {"create_if_not_exists", false, false, "New setting."},
{"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"}, {"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"},
{"query_metric_log_interval", 0, -1, "New setting."},
} }
}, },
{"24.8", {"24.8",
@ -84,7 +85,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"restore_replace_external_engines_to_null", false, false, "New setting."}, {"restore_replace_external_engines_to_null", false, false, "New setting."},
{"input_format_json_max_depth", 1000000, 1000, "It was unlimited in previous versions, but that was unsafe."}, {"input_format_json_max_depth", 1000000, 1000, "It was unlimited in previous versions, but that was unsafe."},
{"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, {"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"},
{"query_metric_log_interval", 0, 0, "New setting."},
{"use_hive_partitioning", false, false, "Allows to use hive partitioning for File, URL, S3, AzureBlobStorage and HDFS engines."}, {"use_hive_partitioning", false, false, "Allows to use hive partitioning for File, URL, S3, AzureBlobStorage and HDFS engines."},
{"allow_experimental_kafka_offsets_storage_in_keeper", false, false, "Allow the usage of experimental Kafka storage engine that stores the committed offsets in ClickHouse Keeper"}, {"allow_experimental_kafka_offsets_storage_in_keeper", false, false, "Allow the usage of experimental Kafka storage engine that stores the committed offsets in ClickHouse Keeper"},
{"allow_archive_path_syntax", true, true, "Added new setting to allow disabling archive path syntax."}, {"allow_archive_path_syntax", true, true, "Added new setting to allow disabling archive path syntax."},

View File

@ -302,6 +302,14 @@ addStatusInfoToQueryLogElement(QueryLogElement & element, const QueryStatusInfo
addPrivilegesInfoToQueryLogElement(element, context_ptr); addPrivilegesInfoToQueryLogElement(element, context_ptr);
} }
static Int64 getQueryMetricLogInterval(ContextPtr context)
{
auto interval_milliseconds = context->getSettingsRef().query_metric_log_interval;
if (interval_milliseconds < 0)
interval_milliseconds = context->getConfigRef().getUInt64("query_metric_log.collect_interval_milliseconds", 1000);
return interval_milliseconds;
}
QueryLogElement logQueryStart( QueryLogElement logQueryStart(
const std::chrono::time_point<std::chrono::system_clock> & query_start_time, const std::chrono::time_point<std::chrono::system_clock> & query_start_time,
@ -376,9 +384,8 @@ QueryLogElement logQueryStart(
if (auto query_metric_log = context->getQueryMetricLog(); query_metric_log && !internal) if (auto query_metric_log = context->getQueryMetricLog(); query_metric_log && !internal)
{ {
auto interval_milliseconds = context->getSettingsRef().query_metric_log_interval; auto interval_milliseconds = getQueryMetricLogInterval(context);
if (interval_milliseconds == 0) if (interval_milliseconds > 0)
interval_milliseconds = context->getConfigRef().getUInt64("query_metric_log.collect_interval_milliseconds", 1000);
query_metric_log->startQuery(elem.client_info.current_query_id, query_start_time, interval_milliseconds); query_metric_log->startQuery(elem.client_info.current_query_id, query_start_time, interval_milliseconds);
} }
@ -515,8 +522,12 @@ void logQueryFinish(
} }
if (auto query_metric_log = context->getQueryMetricLog(); query_metric_log && !internal) if (auto query_metric_log = context->getQueryMetricLog(); query_metric_log && !internal)
{
auto interval_milliseconds = getQueryMetricLogInterval(context);
if (interval_milliseconds > 0)
query_metric_log->finishQuery(elem.client_info.current_query_id); query_metric_log->finishQuery(elem.client_info.current_query_id);
} }
}
void logQueryException( void logQueryException(
QueryLogElement & elem, QueryLogElement & elem,

View File

@ -1,3 +1,4 @@
1 1 1 1 1 1
1 1 1 1 1 1
1 1 1 1 1 1
0

View File

@ -10,6 +10,7 @@ readonly query_prefix=$CLICKHOUSE_DATABASE
$CLICKHOUSE_CLIENT --query-id="${query_prefix}_1000" -q "SELECT sleep(3) + sleep(2) FORMAT Null" & $CLICKHOUSE_CLIENT --query-id="${query_prefix}_1000" -q "SELECT sleep(3) + sleep(2) FORMAT Null" &
$CLICKHOUSE_CLIENT --query-id="${query_prefix}_1234" -q "SELECT sleep(3) + sleep(2) SETTINGS query_metric_log_interval=1234 FORMAT Null" & $CLICKHOUSE_CLIENT --query-id="${query_prefix}_1234" -q "SELECT sleep(3) + sleep(2) SETTINGS query_metric_log_interval=1234 FORMAT Null" &
$CLICKHOUSE_CLIENT --query-id="${query_prefix}_123" -q "SELECT sleep(3) + sleep(2) SETTINGS query_metric_log_interval=123 FORMAT Null" & $CLICKHOUSE_CLIENT --query-id="${query_prefix}_123" -q "SELECT sleep(3) + sleep(2) SETTINGS query_metric_log_interval=123 FORMAT Null" &
$CLICKHOUSE_CLIENT --query-id="${query_prefix}_0" -q "SELECT sleep(3) + sleep(2) SETTINGS query_metric_log_interval=0 FORMAT Null" &
wait wait
@ -29,10 +30,13 @@ function check_log()
ORDER BY event_time_microseconds ORDER BY event_time_microseconds
OFFSET 1 OFFSET 1
) )
SELECT count() BETWEEN least(5000 / $interval - 2, 5000 / $interval * 0.9) AND (5000 / $interval - 1) * 1.1, avg(diff) BETWEEN $interval * 0.9 AND $interval * 1.1, stddevPopStable(diff) BETWEEN 0 AND $interval * 0.5 FROM diff SELECT count() BETWEEN least(5000 / $interval - 2, 5000 / $interval * 0.9) AND (5000 / $interval - 1) * 1.1, avg(diff) BETWEEN $interval * 0.9 AND $interval * 1.1, stddevPopStable(diff) BETWEEN 0 AND $interval * 0.2 FROM diff
""" """
} }
check_log 1000 check_log 1000
check_log 1234 check_log 1234
check_log 123 check_log 123
# query_metric_log_interval=0 disables the collection altogether
$CLICKHOUSE_CLIENT -m -q """SELECT count() FROM system.query_metric_log WHERE query_id = '${query_prefix}_0'"""