mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 17:44:23 +00:00
Caches for multiple mutations
This commit is contained in:
parent
9e93ddc689
commit
a7b05589b6
@ -2184,16 +2184,29 @@ std::unique_ptr<MergeTreeSettings> StorageMergeTree::getDefaultSettings() const
|
|||||||
PreparedSetsCachePtr StorageMergeTree::getPreparedSetsCache(Int64 mutation_id)
|
PreparedSetsCachePtr StorageMergeTree::getPreparedSetsCache(Int64 mutation_id)
|
||||||
{
|
{
|
||||||
auto l = std::lock_guard(mutation_prepared_sets_cache_mutex);
|
auto l = std::lock_guard(mutation_prepared_sets_cache_mutex);
|
||||||
if (mutation_id_of_prepared_sets_cache == mutation_id)
|
|
||||||
|
/// Cleanup stale entries where the shared_ptr is expired.
|
||||||
|
while (!mutation_prepared_sets_cache.empty())
|
||||||
{
|
{
|
||||||
auto existing_set_cache = mutation_prepared_sets_cache.lock();
|
auto it = mutation_prepared_sets_cache.begin();
|
||||||
|
if (it->second.lock())
|
||||||
|
break;
|
||||||
|
mutation_prepared_sets_cache.erase(it);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Look up an existing entry.
|
||||||
|
auto it = mutation_prepared_sets_cache.find(mutation_id);
|
||||||
|
if (it != mutation_prepared_sets_cache.end())
|
||||||
|
{
|
||||||
|
/// If the entry is still alive, return it.
|
||||||
|
auto existing_set_cache = it->second.lock();
|
||||||
if (existing_set_cache)
|
if (existing_set_cache)
|
||||||
return existing_set_cache;
|
return existing_set_cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create new entry.
|
||||||
auto cache = std::make_shared<PreparedSetsCache>();
|
auto cache = std::make_shared<PreparedSetsCache>();
|
||||||
mutation_prepared_sets_cache = cache;
|
mutation_prepared_sets_cache[mutation_id] = cache;
|
||||||
mutation_id_of_prepared_sets_cache = mutation_id;
|
|
||||||
return cache;
|
return cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,8 +156,7 @@ private:
|
|||||||
/// The goal is to avoiding consuming a lot of memory when the same big sets are used by multiple tasks at the same time.
|
/// The goal is to avoiding consuming a lot of memory when the same big sets are used by multiple tasks at the same time.
|
||||||
/// If the tasks are executed without time overlap, we will destroy the cache to free memory, and the next task might rebuild the same sets.
|
/// If the tasks are executed without time overlap, we will destroy the cache to free memory, and the next task might rebuild the same sets.
|
||||||
std::mutex mutation_prepared_sets_cache_mutex;
|
std::mutex mutation_prepared_sets_cache_mutex;
|
||||||
PreparedSetsCachePtr::weak_type mutation_prepared_sets_cache;
|
std::map<Int64, PreparedSetsCachePtr::weak_type> mutation_prepared_sets_cache;
|
||||||
Int64 mutation_id_of_prepared_sets_cache = 0;
|
|
||||||
|
|
||||||
void loadMutations();
|
void loadMutations();
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user