Move assertions to a better place

This commit is contained in:
kssenii 2023-03-29 15:27:00 +02:00
parent 4cf2862a0e
commit ba582dd74c
7 changed files with 38 additions and 52 deletions

View File

@ -1131,48 +1131,12 @@ size_t FileCache::getFileSegmentsNum() const
void FileCache::assertCacheCorrectness()
{
auto lock = metadata.lock();
iterateCacheMetadata(lock, [&](KeyMetadata & key_metadata)
{
auto lock = metadata.lock();
iterateCacheMetadata(lock, [&](KeyMetadata & key_metadata)
{
for (auto & [offset, file_segment_metadata] : key_metadata)
{
file_segment_metadata.file_segment->assertCorrectness();
if (file_segment_metadata.size())
chassert(file_segment_metadata.queue_iterator);
}
});
}
{
auto lock = cache_guard.lock();
LockedCachePriority queue(lock, *main_priority);
[[maybe_unused]] size_t total_size = 0;
using QueueEntry = IFileCachePriority::Entry;
using IterationResult = IFileCachePriority::IterationResult;
queue.iterate([&](const QueueEntry & entry) -> IterationResult
{
auto locked_key = lockKeyMetadata(entry.key, entry.getKeyMetadata());
auto * file_segment_metadata = locked_key->getKeyMetadata()->getByOffset(entry.offset);
if (file_segment_metadata->size() != entry.size)
{
throw Exception(
ErrorCodes::LOGICAL_ERROR, "Expected {} == {} size ({})",
file_segment_metadata->size(), entry.size, file_segment_metadata->file_segment->getInfoForLog());
}
total_size += entry.size;
return IterationResult::CONTINUE;
});
assert(queue.getSize() == total_size);
assert(queue.getSize() <= queue.getSizeLimit());
assert(queue.getElementsCount() <= queue.getElementsLimit());
}
for (auto & [offset, file_segment_metadata] : key_metadata)
file_segment_metadata.file_segment->assertCorrectness();
});
}
FileCache::QueryContextHolder::QueryContextHolder(

View File

@ -466,6 +466,7 @@ bool FileSegment::reserve(size_t size_to_reserve)
/// It is made atomic because of getInfoForLog.
reserved_size += size_to_reserve;
}
chassert(assertCorrectness());
}
return reserved;
@ -689,18 +690,36 @@ String FileSegment::stateToString(FileSegment::State state)
UNREACHABLE();
}
void FileSegment::assertCorrectness() const
bool FileSegment::assertCorrectness() const
{
auto lock = segment_guard.lock();
assertCorrectnessUnlocked(lock);
return true;
}
void FileSegment::assertCorrectnessUnlocked(const FileSegmentGuard::Lock & lock) const
bool FileSegment::assertCorrectnessUnlocked(const FileSegmentGuard::Lock & lock) const
{
auto current_downloader = getDownloaderUnlocked(lock);
chassert(current_downloader.empty() == (download_state != FileSegment::State::DOWNLOADING));
chassert(!current_downloader.empty() == (download_state == FileSegment::State::DOWNLOADING));
chassert(download_state != FileSegment::State::DOWNLOADED || std::filesystem::file_size(file_path) > 0);
auto locked_key = lockKeyMetadata(true);
const auto & file_segment_metadata = locked_key->getKeyMetadata()->tryGetByOffset(offset());
chassert(reserved_size == 0 || file_segment_metadata->queue_iterator);
if (file_segment_metadata->queue_iterator)
{
const auto & entry = *file_segment_metadata->queue_iterator;
if (isCompleted(false))
chassert(reserved_size == entry.getEntry().size);
else
/// We cannot check == here because reserved_size is not
/// guarded by any mutex, it is just an atomic.
chassert(reserved_size <= entry.getEntry().size);
}
return true;
}
void FileSegment::throwIfDetachedUnlocked(const FileSegmentGuard::Lock & lock) const

View File

@ -222,7 +222,7 @@ public:
/// Completed states: DOWNALODED, DETACHED.
bool isCompleted(bool sync = false) const;
void assertCorrectness() const;
bool assertCorrectness() const;
/**
* ========== Methods for _only_ file segment's `downloader` ==================
@ -282,7 +282,7 @@ private:
void assertNotDetached() const;
void assertNotDetachedUnlocked(const FileSegmentGuard::Lock &) const;
void assertIsDownloaderUnlocked(const std::string & operation, const FileSegmentGuard::Lock &) const;
void assertCorrectnessUnlocked(const FileSegmentGuard::Lock &) const;
bool assertCorrectnessUnlocked(const FileSegmentGuard::Lock &) const;
LockedKeyMetadataPtr lockKeyMetadata(bool assert_exists = true) const;
KeyMetadataPtr getKeyMetadata() const;

View File

@ -61,9 +61,10 @@ public:
public:
virtual ~IIterator() = default;
virtual const Entry & getEntry() const = 0;
protected:
virtual Entry & operator *() = 0;
virtual const Entry & operator *() const = 0;
virtual Entry & getEntry() = 0;
virtual size_t use() = 0;

View File

@ -48,8 +48,10 @@ public:
LRUFileCachePriority * cache_priority_,
LRUFileCachePriority::LRUQueueIterator queue_iter_);
Entry & operator *() override { return *queue_iter; }
const Entry & operator *() const override { return *queue_iter; }
const Entry & getEntry() const override { return *queue_iter; }
protected:
Entry & getEntry() override { return *queue_iter; }
size_t use() override;

View File

@ -41,8 +41,8 @@ public:
LockedCachePriorityIterator(const CacheGuard::Lock & lock_, IFileCachePriority::Iterator & iterator_)
: lock(lock_), iterator(iterator_) {}
IFileCachePriority::Entry & operator *() { return **iterator; }
const IFileCachePriority::Entry & operator *() const { return **iterator; }
IFileCachePriority::Entry & getEntry() { return iterator->getEntry(); }
const IFileCachePriority::Entry & getEntry() const { return iterator->getEntry(); }
size_t use() { return iterator->use(); }

View File

@ -247,7 +247,7 @@ void LockedKeyMetadata::shrinkFileSegmentToDownloadedSize(
file_segment->getInfoForLogUnlocked(segment_lock));
}
auto & entry = *LockedCachePriorityIterator(cache_lock, file_segment_metadata->queue_iterator);
auto & entry = LockedCachePriorityIterator(cache_lock, file_segment_metadata->queue_iterator).getEntry();
assert(file_segment->downloaded_size <= file_segment->reserved_size);
assert(entry.size == file_segment->reserved_size);
assert(entry.size >= file_segment->downloaded_size);