mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-14 19:45:11 +00:00
Review fixes
This commit is contained in:
parent
cbf0b98102
commit
3939498ef7
@ -476,7 +476,7 @@ KeyMetadata::iterator FileCache::addFileSegment(
|
||||
auto & stash_records = stash->records;
|
||||
|
||||
stash_records.emplace(
|
||||
stash_key, stash->queue->add(key, offset, 0, locked_key.getKeyMetadata(), *lock));
|
||||
stash_key, stash->queue->add(locked_key.getKeyMetadata(), offset, 0, *lock));
|
||||
|
||||
if (stash->queue->getElementsCount(*lock) > stash->queue->getElementsLimit())
|
||||
stash->queue->pop(*lock);
|
||||
@ -498,7 +498,7 @@ KeyMetadata::iterator FileCache::addFileSegment(
|
||||
PriorityIterator cache_it;
|
||||
if (state == FileSegment::State::DOWNLOADED)
|
||||
{
|
||||
cache_it = main_priority->add(key, offset, size, locked_key.getKeyMetadata(), *lock);
|
||||
cache_it = main_priority->add(locked_key.getKeyMetadata(), offset, size, *lock);
|
||||
}
|
||||
|
||||
try
|
||||
@ -559,6 +559,30 @@ bool FileCache::tryReserve(FileSegment & file_segment, size_t size)
|
||||
queue_size += 1;
|
||||
|
||||
size_t removed_size = 0;
|
||||
|
||||
class EvictionCandidates final : public std::vector<FileSegmentMetadataPtr>
|
||||
{
|
||||
public:
|
||||
explicit EvictionCandidates(KeyMetadataPtr key_metadata_) : key_metadata(key_metadata_) {}
|
||||
|
||||
KeyMetadata & getMetadata() { return *key_metadata; }
|
||||
|
||||
void add(FileSegmentMetadataPtr candidate)
|
||||
{
|
||||
candidate->removal_candidate = true;
|
||||
push_back(candidate);
|
||||
}
|
||||
|
||||
~EvictionCandidates()
|
||||
{
|
||||
for (const auto & candidate : *this)
|
||||
candidate->removal_candidate = false;
|
||||
}
|
||||
|
||||
private:
|
||||
KeyMetadataPtr key_metadata;
|
||||
};
|
||||
|
||||
std::unordered_map<Key, EvictionCandidates> to_delete;
|
||||
|
||||
auto iterate_func = [&](LockedKey & locked_key, FileSegmentMetadataPtr segment_metadata)
|
||||
@ -654,7 +678,7 @@ bool FileCache::tryReserve(FileSegment & file_segment, size_t size)
|
||||
/// Space reservation is incremental, so file_segment_metadata is created first (with state empty),
|
||||
/// and getQueueIterator() is assigned on first space reservation attempt.
|
||||
file_segment.setQueueIterator(main_priority->add(
|
||||
file_segment.key(), file_segment.offset(), size, file_segment.getKeyMetadata(), cache_lock));
|
||||
file_segment.getKeyMetadata(), file_segment.offset(), size, cache_lock));
|
||||
}
|
||||
|
||||
if (query_context)
|
||||
|
@ -208,30 +208,6 @@ private:
|
||||
const CacheGuard::Lock *);
|
||||
|
||||
void cleanupThreadFunc();
|
||||
|
||||
class EvictionCandidates : public std::vector<FileSegmentMetadataPtr>
|
||||
{
|
||||
public:
|
||||
explicit EvictionCandidates(KeyMetadataPtr key_metadata_) : key_metadata(key_metadata_) {}
|
||||
|
||||
KeyMetadata & getMetadata() { return *key_metadata; }
|
||||
|
||||
void add(FileSegmentMetadataPtr candidate)
|
||||
{
|
||||
candidate->removal_candidate = true;
|
||||
push_back(candidate);
|
||||
}
|
||||
|
||||
~EvictionCandidates()
|
||||
{
|
||||
for (const auto & candidate : *this)
|
||||
candidate->removal_candidate = false;
|
||||
}
|
||||
|
||||
private:
|
||||
KeyMetadataPtr key_metadata;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -728,7 +728,7 @@ bool FileSegment::assertCorrectnessUnlocked(const FileSegmentGuard::Lock &) cons
|
||||
if (!it)
|
||||
return;
|
||||
|
||||
const auto entry = it->getEntry();
|
||||
const auto & entry = it->getEntry();
|
||||
UNUSED(entry);
|
||||
chassert(entry.size == reserved_size);
|
||||
chassert(entry.key == key());
|
||||
|
@ -23,9 +23,12 @@ public:
|
||||
Entry(const Key & key_, size_t offset_, size_t size_, KeyMetadataPtr key_metadata_)
|
||||
: key(key_), offset(offset_), size(size_), key_metadata(key_metadata_) {}
|
||||
|
||||
Entry(const Entry & other)
|
||||
: key(other.key), offset(other.offset), size(other.size.load()), hits(other.hits), key_metadata(other.key_metadata) {}
|
||||
|
||||
const Key key;
|
||||
const size_t offset;
|
||||
size_t size;
|
||||
std::atomic<size_t> size;
|
||||
size_t hits = 0;
|
||||
const KeyMetadataPtr key_metadata;
|
||||
};
|
||||
@ -76,8 +79,7 @@ public:
|
||||
virtual size_t getElementsCount(const CacheGuard::Lock &) const = 0;
|
||||
|
||||
virtual Iterator add(
|
||||
const Key & key, size_t offset, size_t size,
|
||||
KeyMetadataPtr key_metadata, const CacheGuard::Lock &) = 0;
|
||||
KeyMetadataPtr key_metadata, size_t offset, size_t size, const CacheGuard::Lock &) = 0;
|
||||
|
||||
virtual void pop(const CacheGuard::Lock &) = 0;
|
||||
|
||||
|
@ -19,12 +19,12 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
IFileCachePriority::Iterator LRUFileCachePriority::add(
|
||||
const Key & key,
|
||||
KeyMetadataPtr key_metadata,
|
||||
size_t offset,
|
||||
size_t size,
|
||||
KeyMetadataPtr key_metadata,
|
||||
const CacheGuard::Lock &)
|
||||
{
|
||||
const auto & key = key_metadata->key;
|
||||
#ifndef NDEBUG
|
||||
for (const auto & entry : queue)
|
||||
{
|
||||
|
@ -24,7 +24,7 @@ public:
|
||||
|
||||
size_t getElementsCount(const CacheGuard::Lock &) const override { return queue.size(); }
|
||||
|
||||
Iterator add(const Key & key, size_t offset, size_t size, KeyMetadataPtr key_metadata, const CacheGuard::Lock &) override;
|
||||
Iterator add(KeyMetadataPtr key_metadata, size_t offset, size_t size, const CacheGuard::Lock &) override;
|
||||
|
||||
void pop(const CacheGuard::Lock &) override;
|
||||
|
||||
|
@ -91,7 +91,7 @@ std::string KeyMetadata::getFileSegmentPath(const FileSegment & file_segment)
|
||||
}
|
||||
|
||||
|
||||
struct CleanupQueue
|
||||
class CleanupQueue
|
||||
{
|
||||
friend struct CacheMetadata;
|
||||
public:
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct CleanupQueue;
|
||||
class CleanupQueue;
|
||||
using CleanupQueuePtr = std::shared_ptr<CleanupQueue>;
|
||||
|
||||
|
||||
|
@ -72,7 +72,7 @@ void FileCacheQueryLimit::QueryContext::add(
|
||||
const auto offset = file_segment.offset();
|
||||
|
||||
auto it = getPriority().add(
|
||||
key, offset, file_segment.range().size(), file_segment.getKeyMetadata(), lock);
|
||||
file_segment.getKeyMetadata(), offset, file_segment.range().size(), lock);
|
||||
|
||||
auto [_, inserted] = records.emplace(FileCacheKeyAndOffset{key, offset}, it);
|
||||
if (!inserted)
|
||||
|
Loading…
Reference in New Issue
Block a user