mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-16 03:12:43 +00:00
Simplify the code a bit
This commit is contained in:
parent
4cb7ac57e4
commit
814c6a899d
@ -180,8 +180,8 @@ try
|
|||||||
return res;
|
return res;
|
||||||
};
|
};
|
||||||
|
|
||||||
auto result = std::make_shared<Chunk>(to_single_chunk(partial_results));
|
auto result = to_single_chunk(partial_results);
|
||||||
new_entry_size_in_bytes = result->allocatedBytes(); // updated because compression potentially affects the size of the single chunk vs the aggregate size of individual chunks
|
new_entry_size_in_bytes = result.allocatedBytes(); // updated because compression potentially affects the size of the single chunk vs the aggregate size of individual chunks
|
||||||
|
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
@ -200,7 +200,7 @@ try
|
|||||||
for (auto it = cache.begin(); it != cache.end();)
|
for (auto it = cache.begin(); it != cache.end();)
|
||||||
if (is_stale(it->first))
|
if (is_stale(it->first))
|
||||||
{
|
{
|
||||||
cache_size_in_bytes -= it->second->allocatedBytes();
|
cache_size_in_bytes -= it->second.allocatedBytes();
|
||||||
it = cache.erase(it);
|
it = cache.erase(it);
|
||||||
++removed_items;
|
++removed_items;
|
||||||
}
|
}
|
||||||
@ -212,13 +212,13 @@ try
|
|||||||
/// Insert or replace if enough space
|
/// Insert or replace if enough space
|
||||||
if (sufficient_space_in_cache())
|
if (sufficient_space_in_cache())
|
||||||
{
|
{
|
||||||
cache_size_in_bytes += result->allocatedBytes();
|
cache_size_in_bytes += result.allocatedBytes();
|
||||||
if (auto it = cache.find(key); it != cache.end())
|
if (auto it = cache.find(key); it != cache.end())
|
||||||
cache_size_in_bytes -= it->second->allocatedBytes(); /// key replacement
|
cache_size_in_bytes -= it->second.allocatedBytes(); /// key replacement
|
||||||
|
|
||||||
/// cache[key] = result; /// does no replacement for unclear reasons
|
/// cache[key] = result; /// does no replacement for unclear reasons
|
||||||
cache.erase(key);
|
cache.erase(key);
|
||||||
cache[key] = result;
|
cache[key] = std::move(result);
|
||||||
|
|
||||||
LOG_DEBUG(&Poco::Logger::get("QueryResultCache"), "Stored result of query {}", key.queryStringFromAst());
|
LOG_DEBUG(&Poco::Logger::get("QueryResultCache"), "Stored result of query {}", key.queryStringFromAst());
|
||||||
}
|
}
|
||||||
@ -260,14 +260,14 @@ QueryResultCache::Reader::Reader(const Cache & cache_, const Key & key, size_t &
|
|||||||
if (it->first.expires_at < std::chrono::system_clock::now())
|
if (it->first.expires_at < std::chrono::system_clock::now())
|
||||||
{
|
{
|
||||||
Cache & cache_rw = const_cast<Cache &>(cache_);
|
Cache & cache_rw = const_cast<Cache &>(cache_);
|
||||||
cache_size_in_bytes_ -= it->second->allocatedBytes();
|
cache_size_in_bytes_ -= it->second.allocatedBytes();
|
||||||
cache_rw.erase(it);
|
cache_rw.erase(it);
|
||||||
LOG_DEBUG(&Poco::Logger::get("QueryResultCache"), "Stale entry found and removed for query {}", key.queryStringFromAst());
|
LOG_DEBUG(&Poco::Logger::get("QueryResultCache"), "Stale entry found and removed for query {}", key.queryStringFromAst());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG(&Poco::Logger::get("QueryResultCache"), "Entry found for query {}", key.queryStringFromAst());
|
LOG_DEBUG(&Poco::Logger::get("QueryResultCache"), "Entry found for query {}", key.queryStringFromAst());
|
||||||
pipe = Pipe(std::make_shared<SourceFromSingleChunk>(key.header, it->second->clone()));
|
pipe = Pipe(std::make_shared<SourceFromSingleChunk>(key.header, it->second.clone()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool QueryResultCache::Reader::hasCacheEntryForKey() const
|
bool QueryResultCache::Reader::hasCacheEntryForKey() const
|
||||||
|
@ -66,10 +66,8 @@ private:
|
|||||||
size_t operator()(const Key & key) const;
|
size_t operator()(const Key & key) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
using Entry = std::shared_ptr<Chunk>;
|
|
||||||
|
|
||||||
/// query --> query result
|
/// query --> query result
|
||||||
using Cache = std::unordered_map<Key, Entry, KeyHasher>;
|
using Cache = std::unordered_map<Key, Chunk, KeyHasher>;
|
||||||
|
|
||||||
/// query --> query execution count
|
/// query --> query execution count
|
||||||
using TimesExecutedMap = std::unordered_map<Key, size_t, KeyHasher>;
|
using TimesExecutedMap = std::unordered_map<Key, size_t, KeyHasher>;
|
||||||
|
@ -38,7 +38,7 @@ void StorageSystemQueryResultCache::fillData(MutableColumns & res_columns, Conte
|
|||||||
|
|
||||||
std::lock_guard lock(query_result_cache->mutex);
|
std::lock_guard lock(query_result_cache->mutex);
|
||||||
|
|
||||||
for (const auto & [key, entry] : query_result_cache->cache)
|
for (const auto & [key, result] : query_result_cache->cache)
|
||||||
{
|
{
|
||||||
/// Showing other user's queries is considered a security risk
|
/// Showing other user's queries is considered a security risk
|
||||||
if (key.username.has_value() && key.username != username)
|
if (key.username.has_value() && key.username != username)
|
||||||
@ -50,7 +50,7 @@ void StorageSystemQueryResultCache::fillData(MutableColumns & res_columns, Conte
|
|||||||
res_columns[3]->insert(key.expires_at < std::chrono::system_clock::now());
|
res_columns[3]->insert(key.expires_at < std::chrono::system_clock::now());
|
||||||
res_columns[4]->insert(!key.username.has_value());
|
res_columns[4]->insert(!key.username.has_value());
|
||||||
res_columns[5]->insert(key.partition_key);
|
res_columns[5]->insert(key.partition_key);
|
||||||
res_columns[6]->insert(entry->allocatedBytes());
|
res_columns[6]->insert(result.allocatedBytes());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user