mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-30 03:22:14 +00:00
Merge pull request #55296 from rschu1ze/qc-better-logging
Improve logging in query cache
This commit is contained in:
commit
59a3b3356f
@ -34,10 +34,10 @@ Returns an array of selected substrings. Empty substrings may be selected when:
|
||||
Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||
|
||||
:::note
|
||||
The behavior of parameter `max_substrings` changed starting with ClickHouse v22.11. In versions older than that, `max_substrings` > 0 meant that `max_substring`-many splits were performed and that the remainder of the string was returned as the final element of the list.
|
||||
The behavior of parameter `max_substrings` changed starting with ClickHouse v22.11. In versions older than that, `max_substrings > 0` meant that `max_substring`-many splits were performed and that the remainder of the string was returned as the final element of the list.
|
||||
For example,
|
||||
- in v22.10: `SELECT splitByChar('=', 'a=b=c=d', 2); -- ['a','b','c=d']`
|
||||
- in v22.11: `SELECT splitByChar('=', 'a=b=c=d', 2); -- ['a','b']`
|
||||
- in v22.10: `SELECT splitByChar('=', 'a=b=c=d', 2);` returned `['a','b','c=d']`
|
||||
- in v22.11: `SELECT splitByChar('=', 'a=b=c=d', 2);` returned `['a','b']`
|
||||
|
||||
A behavior similar to ClickHouse pre-v22.11 can be achieved by setting
|
||||
[splitby_max_substrings_includes_remaining_string](../../operations/settings/settings.md#splitby_max_substrings_includes_remaining_string)
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/TTLCachePolicy.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <base/defines.h> /// chassert
|
||||
|
||||
@ -190,7 +192,7 @@ QueryCache::Writer::Writer(
|
||||
if (auto entry = cache.getWithKey(key); entry.has_value() && !IsStale()(entry->key))
|
||||
{
|
||||
skip_insert = true; /// Key already contained in cache and did not expire yet --> don't replace it
|
||||
LOG_TRACE(logger, "Skipped insert (non-stale entry found), query: {}", key.query_string);
|
||||
LOG_TRACE(logger, "Skipped insert because the cache contains a non-stale query result for query {}", doubleQuoteString(key.query_string));
|
||||
}
|
||||
}
|
||||
|
||||
@ -260,16 +262,17 @@ void QueryCache::Writer::finalizeWrite()
|
||||
|
||||
/// Check some reasons why the entry must not be cached:
|
||||
|
||||
if (std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - query_start_time) < min_query_runtime)
|
||||
if (auto query_runtime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - query_start_time); query_runtime < min_query_runtime)
|
||||
{
|
||||
LOG_TRACE(logger, "Skipped insert (query not expensive enough), query: {}", key.query_string);
|
||||
LOG_TRACE(logger, "Skipped insert because the query is not expensive enough, query runtime: {} msec (minimum query runtime: {} msec), query: {}",
|
||||
query_runtime.count(), min_query_runtime.count(), doubleQuoteString(key.query_string));
|
||||
return;
|
||||
}
|
||||
|
||||
if (auto entry = cache.getWithKey(key); entry.has_value() && !IsStale()(entry->key))
|
||||
{
|
||||
/// Same check as in ctor because a parallel Writer could have inserted the current key in the meantime
|
||||
LOG_TRACE(logger, "Skipped insert (non-stale entry found), query: {}", key.query_string);
|
||||
LOG_TRACE(logger, "Skipped insert because the cache contains a non-stale query result for query {}", doubleQuoteString(key.query_string));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -352,13 +355,14 @@ void QueryCache::Writer::finalizeWrite()
|
||||
|
||||
if ((new_entry_size_in_bytes > max_entry_size_in_bytes) || (new_entry_size_in_rows > max_entry_size_in_rows))
|
||||
{
|
||||
LOG_TRACE(logger, "Skipped insert (query result too big), new_entry_size_in_bytes: {} ({}), new_entry_size_in_rows: {} ({}), query: {}", new_entry_size_in_bytes, max_entry_size_in_bytes, new_entry_size_in_rows, max_entry_size_in_rows, key.query_string);
|
||||
LOG_TRACE(logger, "Skipped insert because the query result is too big, query result size: {} (maximum size: {}), query result size in rows: {} (maximum size: {}), query: {}",
|
||||
formatReadableSizeWithBinarySuffix(new_entry_size_in_bytes, 0), formatReadableSizeWithBinarySuffix(max_entry_size_in_bytes, 0), new_entry_size_in_rows, max_entry_size_in_rows, doubleQuoteString(key.query_string));
|
||||
return;
|
||||
}
|
||||
|
||||
cache.set(key, query_result);
|
||||
|
||||
LOG_TRACE(logger, "Stored query result, query: {}", key.query_string);
|
||||
LOG_TRACE(logger, "Stored query result of query {}", doubleQuoteString(key.query_string));
|
||||
|
||||
was_finalized = true;
|
||||
}
|
||||
@ -389,7 +393,7 @@ QueryCache::Reader::Reader(Cache & cache_, const Key & key, const std::lock_guar
|
||||
|
||||
if (!entry.has_value())
|
||||
{
|
||||
LOG_TRACE(logger, "No entry found for query {}", key.query_string);
|
||||
LOG_TRACE(logger, "No query result found for query {}", doubleQuoteString(key.query_string));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -398,13 +402,13 @@ QueryCache::Reader::Reader(Cache & cache_, const Key & key, const std::lock_guar
|
||||
|
||||
if (!entry_key.is_shared && entry_key.user_name != key.user_name)
|
||||
{
|
||||
LOG_TRACE(logger, "Inaccessible entry found for query {}", key.query_string);
|
||||
LOG_TRACE(logger, "Inaccessible query result found for query {}", doubleQuoteString(key.query_string));
|
||||
return;
|
||||
}
|
||||
|
||||
if (IsStale()(entry_key))
|
||||
{
|
||||
LOG_TRACE(logger, "Stale entry found for query {}", key.query_string);
|
||||
LOG_TRACE(logger, "Stale query result found for query {}", doubleQuoteString(key.query_string));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -442,7 +446,7 @@ QueryCache::Reader::Reader(Cache & cache_, const Key & key, const std::lock_guar
|
||||
buildSourceFromChunks(entry_key.header, std::move(decompressed_chunks), entry_mapped->totals, entry_mapped->extremes);
|
||||
}
|
||||
|
||||
LOG_TRACE(logger, "Entry found for query {}", key.query_string);
|
||||
LOG_TRACE(logger, "Query result found for query {}", doubleQuoteString(key.query_string));
|
||||
}
|
||||
|
||||
bool QueryCache::Reader::hasCacheEntryForKey() const
|
||||
|
@ -1098,7 +1098,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
{
|
||||
if (astContainsNonDeterministicFunctions(ast, context) && !settings.query_cache_store_results_of_queries_with_nondeterministic_functions)
|
||||
throw Exception(ErrorCodes::CANNOT_USE_QUERY_CACHE_WITH_NONDETERMINISTIC_FUNCTIONS,
|
||||
"Unable to cache the query result because the query contains a non-deterministic function. Use setting query_cache_store_results_of_queries_with_nondeterministic_functions = 1 to store the query result regardless.");
|
||||
"Unable to cache the query result because the query contains a non-deterministic function. Use setting `query_cache_store_results_of_queries_with_nondeterministic_functions = 1` to cache the query result regardless");
|
||||
|
||||
QueryCache::Key key(
|
||||
ast, res.pipeline.getHeader(),
|
||||
@ -1107,7 +1107,11 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
settings.query_cache_compress_entries);
|
||||
|
||||
const size_t num_query_runs = query_cache->recordQueryRun(key);
|
||||
if (num_query_runs > settings.query_cache_min_query_runs)
|
||||
if (num_query_runs <= settings.query_cache_min_query_runs)
|
||||
{
|
||||
LOG_TRACE(&Poco::Logger::get("QueryCache"), "Skipped insert because the query ran {} times but the minimum required number of query runs to cache the query result is {}", num_query_runs, settings.query_cache_min_query_runs);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto query_cache_writer = std::make_shared<QueryCache::Writer>(query_cache->createWriter(
|
||||
key,
|
||||
|
Loading…
Reference in New Issue
Block a user