Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected (#51636)

Co-authored-by: Nikita Mikhaylov <mikhaylovnikitka@gmail.com>
This commit is contained in:
Michael Kolupaev 2023-06-30 02:52:03 -07:00 committed by GitHub
parent 15fb4880b3
commit 2b55734ccf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -135,6 +135,7 @@ MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksImpl()
if (!index_granularity_info.mark_type.adaptive)
{
/// Read directly to marks.
chassert(expected_uncompressed_size == plain_marks.size() * sizeof(MarkInCompressedFile));
reader->readStrict(reinterpret_cast<char *>(plain_marks.data()), expected_uncompressed_size);
if (!reader->eof())
@ -148,23 +149,25 @@ MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksImpl()
}
else
{
size_t i = 0;
size_t granularity;
while (!reader->eof())
{
reader->readStrict(
reinterpret_cast<char *>(plain_marks.data() + i * columns_in_mark), columns_in_mark * sizeof(MarkInCompressedFile));
readIntBinary(granularity, *reader);
++i;
}
if (i * mark_size != expected_uncompressed_size)
for (size_t i = 0; i < marks_count; ++i)
{
if (reader->eof())
throw Exception(
ErrorCodes::CANNOT_READ_ALL_DATA,
"Cannot read all marks from file {}, marks expected {} (bytes size {}), marks read {} (bytes size {})",
mrk_path, marks_count, expected_uncompressed_size, i, reader->count());
size_t granularity;
reader->readStrict(
reinterpret_cast<char *>(plain_marks.data() + i * columns_in_mark), columns_in_mark * sizeof(MarkInCompressedFile));
readIntBinary(granularity, *reader);
}
if (!reader->eof())
throw Exception(
ErrorCodes::CANNOT_READ_ALL_DATA,
"Too many marks in file {}, marks expected {} (bytes size {})",
mrk_path, marks_count, expected_uncompressed_size);
}
auto res = std::make_shared<MarksInCompressedFile>(plain_marks);