mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 16:12:01 +00:00
Fix
This commit is contained in:
parent
37d11cfb1c
commit
9b38c9f7eb
@ -653,9 +653,7 @@ void FileSegment::assertNotDetached() const
|
|||||||
|
|
||||||
void FileSegment::assertDetachedStatus() const
|
void FileSegment::assertDetachedStatus() const
|
||||||
{
|
{
|
||||||
assert(
|
assert(download_state == State::EMPTY || hasFinalizedState());
|
||||||
(download_state == State::EMPTY) || (download_state == State::PARTIALLY_DOWNLOADED_NO_CONTINUATION)
|
|
||||||
|| (download_state == State::SKIP_CACHE));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
FileSegmentPtr FileSegment::getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard<std::mutex> & /* cache_lock */)
|
FileSegmentPtr FileSegment::getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard<std::mutex> & /* cache_lock */)
|
||||||
@ -675,18 +673,21 @@ FileSegmentPtr FileSegment::getSnapshot(const FileSegmentPtr & file_segment, std
|
|||||||
return snapshot;
|
return snapshot;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool FileSegment::hasFinalizedState() const
|
||||||
|
{
|
||||||
|
return download_state == State::DOWNLOADED
|
||||||
|
|| download_state == State::PARTIALLY_DOWNLOADED_NO_CONTINUATION
|
||||||
|
|| download_state == State::SKIP_CACHE;
|
||||||
|
}
|
||||||
|
|
||||||
void FileSegment::detach(std::lock_guard<std::mutex> & cache_lock, std::lock_guard<std::mutex> & segment_lock)
|
void FileSegment::detach(std::lock_guard<std::mutex> & cache_lock, std::lock_guard<std::mutex> & segment_lock)
|
||||||
{
|
{
|
||||||
if (detached)
|
if (detached)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bool has_finalized_state = download_state == State::DOWNLOADED
|
|
||||||
|| download_state == State::PARTIALLY_DOWNLOADED_NO_CONTINUATION
|
|
||||||
|| download_state == State::SKIP_CACHE;
|
|
||||||
|
|
||||||
detached = true;
|
detached = true;
|
||||||
|
|
||||||
if (!has_finalized_state)
|
if (!hasFinalizedState())
|
||||||
{
|
{
|
||||||
completeUnlocked(cache_lock, segment_lock);
|
completeUnlocked(cache_lock, segment_lock);
|
||||||
}
|
}
|
||||||
|
@ -154,7 +154,7 @@ private:
|
|||||||
void assertCorrectnessImpl(std::lock_guard<std::mutex> & segment_lock) const;
|
void assertCorrectnessImpl(std::lock_guard<std::mutex> & segment_lock) const;
|
||||||
void assertNotDetached() const;
|
void assertNotDetached() const;
|
||||||
void assertDetachedStatus() const;
|
void assertDetachedStatus() const;
|
||||||
|
bool hasFinalizedState() const;
|
||||||
|
|
||||||
void setDownloaded(std::lock_guard<std::mutex> & segment_lock);
|
void setDownloaded(std::lock_guard<std::mutex> & segment_lock);
|
||||||
void setDownloadFailed(std::lock_guard<std::mutex> & segment_lock);
|
void setDownloadFailed(std::lock_guard<std::mutex> & segment_lock);
|
||||||
|
@ -92,19 +92,10 @@ void WriteBufferFromS3::nextImpl()
|
|||||||
? CurrentThread::get().getThreadGroup()
|
? CurrentThread::get().getThreadGroup()
|
||||||
: MainThreadStatus::getInstance().getThreadGroup();
|
: MainThreadStatus::getInstance().getThreadGroup();
|
||||||
|
|
||||||
ProfileEvents::increment(ProfileEvents::S3WriteBytes, offset());
|
|
||||||
|
|
||||||
last_part_size += offset();
|
|
||||||
|
|
||||||
/// Data size exceeds singlepart upload threshold, need to use multipart upload.
|
|
||||||
if (multipart_upload_id.empty() && last_part_size > s3_settings.max_single_part_upload_size)
|
|
||||||
createMultipartUpload();
|
|
||||||
|
|
||||||
if (!multipart_upload_id.empty() && last_part_size > upload_part_size)
|
|
||||||
{
|
|
||||||
if (cacheEnabled())
|
if (cacheEnabled())
|
||||||
{
|
{
|
||||||
auto cache_key = cache->hash(key);
|
auto cache_key = cache->hash(key);
|
||||||
|
|
||||||
file_segments_holder.emplace(cache->setDownloading(cache_key, current_download_offset, size));
|
file_segments_holder.emplace(cache->setDownloading(cache_key, current_download_offset, size));
|
||||||
current_download_offset += size;
|
current_download_offset += size;
|
||||||
|
|
||||||
@ -130,6 +121,16 @@ void WriteBufferFromS3::nextImpl()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ProfileEvents::increment(ProfileEvents::S3WriteBytes, offset());
|
||||||
|
|
||||||
|
last_part_size += offset();
|
||||||
|
|
||||||
|
/// Data size exceeds singlepart upload threshold, need to use multipart upload.
|
||||||
|
if (multipart_upload_id.empty() && last_part_size > s3_settings.max_single_part_upload_size)
|
||||||
|
createMultipartUpload();
|
||||||
|
|
||||||
|
if (!multipart_upload_id.empty() && last_part_size > upload_part_size)
|
||||||
|
{
|
||||||
writePart();
|
writePart();
|
||||||
|
|
||||||
allocateBuffer();
|
allocateBuffer();
|
||||||
|
Loading…
Reference in New Issue
Block a user