ClickHouse/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

253 lines
7.5 KiB
C++
Raw Normal View History

#include <cerrno>
#include <ctime>
2021-07-26 00:34:36 +00:00
#include <optional>
#include <Common/ProfileEvents.h>
#include <Common/Stopwatch.h>
#include <Common/Exception.h>
#include <Common/CurrentMetrics.h>
#include <Common/Throttler.h>
#include <Common/filesystemHelpers.h>
#include <Common/ElapsedTimeProfileEventIncrement.h>
2021-07-26 00:34:36 +00:00
#include <IO/AsynchronousReadBufferFromFileDescriptor.h>
#include <IO/WriteHelpers.h>
2021-08-27 00:08:10 +00:00
namespace ProfileEvents
{
extern const Event AsynchronousReadWaitMicroseconds;
extern const Event SynchronousReadWaitMicroseconds;
extern const Event LocalReadThrottlerBytes;
extern const Event LocalReadThrottlerSleepMicroseconds;
2021-08-27 00:08:10 +00:00
}
namespace CurrentMetrics
{
extern const Metric AsynchronousReadWait;
}
2021-07-26 00:34:36 +00:00
namespace DB
{
2021-07-28 05:28:30 +00:00
namespace ErrorCodes
{
extern const int ARGUMENT_OUT_OF_BOUND;
2022-02-21 14:42:43 +00:00
extern const int LOGICAL_ERROR;
2021-07-28 05:28:30 +00:00
}
2021-07-26 00:34:36 +00:00
std::string AsynchronousReadBufferFromFileDescriptor::getFileName() const
{
return "(fd = " + toString(fd) + ")";
}
std::future<IAsynchronousReader::Result> AsynchronousReadBufferFromFileDescriptor::asyncReadInto(char * data, size_t size, Priority priority)
{
IAsynchronousReader::Request request;
request.descriptor = std::make_shared<IAsynchronousReader::LocalFileDescriptor>(fd);
request.buf = data;
request.size = size;
request.offset = file_offset_of_buffer_end;
request.priority = Priority{base_priority.value + priority.value};
2022-02-16 04:26:51 +00:00
request.ignore = bytes_to_ignore;
bytes_to_ignore = 0;
Do not try to read pass EOF (to workaround a bug in a kernel) For unaligned offset pread() may return EINVAL even if the offset pass EOF, although it should not, since otherwise there is no abiliity to rely on read() == 0 is EOF (with pread() loop). Here is a reproducer for the problem on 4.9.0-12-amd64: $ head -c27 /dev/urandom > /tmp/pread.issue $ xfs_io xfs_io> open -d /tmp/pread.issue xfs_io> pread 1000 4096 pread: Invalid argument And this is how it should work: xfs_io> pread 29 4096 read 0/4096 bytes at offset 29 Note, here I use interactive mode since we had old xfs_io that does not allow to execute multiple commands at once, and to avoid EMFILE issue Here is some history of a patches that affects this behaviour in the linux kernel: - the issue had been introduced in torvalds/linux@9fe55eea7e4b444bafc42fa0000cc2d1d2847275 v3.14 ("Fix race when checking i_size on direct i/o read") - an attempt to fix it had been made in torvalds/linux@74cedf9b6c603f2278a05bc91b140b32b434d0b5 v4.4 ("direct-io: Fix negative return from dio read beyond eof") - but this wasn't enough, since alignment check was earlier, so eventually fixed in torvalds/linux@41b21af388f94baf7433d4e7845703c7275251de v5.10 ("direct-io: defer alignment check until after the EOF check") Someone may ask why CI does not shows the issue, since: - it had 4.19 kernel when CI was in yandex - now it has 5.4 when CI is in AWS Since both of those kernels does not have the last patch. But, this bug requires the following conditions to met: - index_granularity_bytes=0 - min_merge_bytes_to_use_direct_io=1 Which was not covered by CI yet.
2022-01-03 16:21:50 +00:00
/// This is a workaround of a read pass EOF bug in linux kernel with pread()
if (file_size.has_value() && file_offset_of_buffer_end >= *file_size)
{
2022-04-16 10:41:18 +00:00
return std::async(std::launch::deferred, [] { return IAsynchronousReader::Result{.size = 0, .offset = 0}; });
Do not try to read pass EOF (to workaround a bug in a kernel) For unaligned offset pread() may return EINVAL even if the offset pass EOF, although it should not, since otherwise there is no abiliity to rely on read() == 0 is EOF (with pread() loop). Here is a reproducer for the problem on 4.9.0-12-amd64: $ head -c27 /dev/urandom > /tmp/pread.issue $ xfs_io xfs_io> open -d /tmp/pread.issue xfs_io> pread 1000 4096 pread: Invalid argument And this is how it should work: xfs_io> pread 29 4096 read 0/4096 bytes at offset 29 Note, here I use interactive mode since we had old xfs_io that does not allow to execute multiple commands at once, and to avoid EMFILE issue Here is some history of a patches that affects this behaviour in the linux kernel: - the issue had been introduced in torvalds/linux@9fe55eea7e4b444bafc42fa0000cc2d1d2847275 v3.14 ("Fix race when checking i_size on direct i/o read") - an attempt to fix it had been made in torvalds/linux@74cedf9b6c603f2278a05bc91b140b32b434d0b5 v4.4 ("direct-io: Fix negative return from dio read beyond eof") - but this wasn't enough, since alignment check was earlier, so eventually fixed in torvalds/linux@41b21af388f94baf7433d4e7845703c7275251de v5.10 ("direct-io: defer alignment check until after the EOF check") Someone may ask why CI does not shows the issue, since: - it had 4.19 kernel when CI was in yandex - now it has 5.4 when CI is in AWS Since both of those kernels does not have the last patch. But, this bug requires the following conditions to met: - index_granularity_bytes=0 - min_merge_bytes_to_use_direct_io=1 Which was not covered by CI yet.
2022-01-03 16:21:50 +00:00
}
2022-09-23 17:35:16 +00:00
return reader.submit(request);
}
void AsynchronousReadBufferFromFileDescriptor::prefetch(Priority priority)
2021-07-26 00:34:36 +00:00
{
2021-08-04 00:07:04 +00:00
if (prefetch_future.valid())
2021-07-26 00:34:36 +00:00
return;
/// Will request the same amount of data that is read in nextImpl.
prefetch_buffer.resize(internal_buffer.size());
2023-02-07 17:50:31 +00:00
prefetch_future = asyncReadInto(prefetch_buffer.data(), prefetch_buffer.size(), priority);
2021-07-26 00:34:36 +00:00
}
bool AsynchronousReadBufferFromFileDescriptor::nextImpl()
{
/// If internal_buffer size is empty, then read() cannot be distinguished from EOF
assert(!internal_buffer.empty());
IAsynchronousReader::Result result;
if (prefetch_future.valid())
{
/// Read request already in flight. Wait for its completion.
2021-07-26 00:34:36 +00:00
CurrentMetrics::Increment metric_increment{CurrentMetrics::AsynchronousReadWait};
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::AsynchronousReadWaitMicroseconds);
2021-08-27 00:08:10 +00:00
result = prefetch_future.get();
prefetch_future = {};
if (result.size - result.offset > 0)
prefetch_buffer.swap(memory);
2021-07-26 00:34:36 +00:00
}
else
{
/// No pending request. Do synchronous read.
2023-02-07 17:50:31 +00:00
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::SynchronousReadWaitMicroseconds);
2023-12-20 14:57:52 +00:00
result = asyncReadInto(memory.data(), memory.size(), DEFAULT_PREFETCH_PRIORITY).get();
}
2021-07-26 00:34:36 +00:00
chassert(result.size >= result.offset);
size_t bytes_read = result.size - result.offset;
file_offset_of_buffer_end += result.size;
if (throttler)
throttler->add(result.size, ProfileEvents::LocalReadThrottlerBytes, ProfileEvents::LocalReadThrottlerSleepMicroseconds);
if (bytes_read)
{
/// Adjust the working buffer so that it ignores `offset` bytes.
2023-12-20 14:57:52 +00:00
internal_buffer = Buffer(memory.data(), memory.data() + memory.size());
working_buffer = Buffer(memory.data() + result.offset, memory.data() + result.size);
pos = working_buffer.begin();
}
return bytes_read;
2021-07-26 00:34:36 +00:00
}
2021-07-27 23:47:28 +00:00
void AsynchronousReadBufferFromFileDescriptor::finalize()
2021-07-26 00:34:36 +00:00
{
2021-08-04 00:07:04 +00:00
if (prefetch_future.valid())
2021-07-26 00:34:36 +00:00
{
2021-08-04 00:07:04 +00:00
prefetch_future.wait();
prefetch_future = {};
2021-07-26 00:34:36 +00:00
}
}
2022-02-21 14:42:43 +00:00
AsynchronousReadBufferFromFileDescriptor::AsynchronousReadBufferFromFileDescriptor(
2022-09-23 17:35:16 +00:00
IAsynchronousReader & reader_,
Priority priority_,
2022-02-21 14:42:43 +00:00
int fd_,
size_t buf_size,
char * existing_memory,
size_t alignment,
std::optional<size_t> file_size_,
2023-12-20 14:57:52 +00:00
ThrottlerPtr throttler_)
2022-02-21 14:42:43 +00:00
: ReadBufferFromFileBase(buf_size, existing_memory, alignment, file_size_)
2022-09-23 17:35:16 +00:00
, reader(reader_)
2023-02-07 17:50:31 +00:00
, base_priority(priority_)
2022-02-21 14:42:43 +00:00
, required_alignment(alignment)
, fd(fd_)
, throttler(throttler_)
2022-02-21 14:42:43 +00:00
{
if (required_alignment > buf_size)
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Too large alignment. Cannot have required_alignment greater than buf_size: {} > {}. It is a bug",
required_alignment,
buf_size);
prefetch_buffer.alignment = alignment;
}
2021-07-27 23:47:28 +00:00
AsynchronousReadBufferFromFileDescriptor::~AsynchronousReadBufferFromFileDescriptor()
{
finalize();
}
2021-07-26 00:34:36 +00:00
/// If 'offset' is small enough to stay in buffer after seek, then true seek in file does not happen.
off_t AsynchronousReadBufferFromFileDescriptor::seek(off_t offset, int whence)
{
size_t new_pos;
if (whence == SEEK_SET)
{
assert(offset >= 0);
new_pos = offset;
}
else if (whence == SEEK_CUR)
{
new_pos = file_offset_of_buffer_end - (working_buffer.end() - pos) + offset;
}
else
{
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "ReadBufferFromFileDescriptor::seek expects SEEK_SET or SEEK_CUR as whence");
2021-07-26 00:34:36 +00:00
}
/// Position is unchanged.
if (new_pos + (working_buffer.end() - pos) == file_offset_of_buffer_end)
return new_pos;
2022-02-16 04:26:51 +00:00
while (true)
2021-07-26 00:34:36 +00:00
{
2022-02-16 04:26:51 +00:00
if (file_offset_of_buffer_end - working_buffer.size() <= new_pos && new_pos <= file_offset_of_buffer_end)
{
/// Position is still inside the buffer.
/// Probably it is at the end of the buffer - then we will load data on the following 'next' call.
2021-07-26 00:34:36 +00:00
2022-02-16 04:26:51 +00:00
pos = working_buffer.end() - file_offset_of_buffer_end + new_pos;
assert(pos >= working_buffer.begin());
assert(pos <= working_buffer.end());
2021-07-26 00:34:36 +00:00
2022-02-16 04:26:51 +00:00
return new_pos;
}
else if (prefetch_future.valid())
2021-07-26 00:34:36 +00:00
{
2022-02-16 04:26:51 +00:00
/// Read from prefetch buffer and recheck if the new position is valid inside.
if (nextImpl())
continue;
2021-07-26 00:34:36 +00:00
}
2022-02-16 04:26:51 +00:00
break;
}
2021-07-26 00:34:36 +00:00
2022-02-16 04:26:51 +00:00
assert(!prefetch_future.valid());
2021-07-26 00:34:36 +00:00
2022-02-16 04:26:51 +00:00
/// Position is out of the buffer, we need to do real seek.
off_t seek_pos = required_alignment > 1
? new_pos / required_alignment * required_alignment
: new_pos;
2021-07-26 00:34:36 +00:00
2022-02-16 04:26:51 +00:00
/// First reset the buffer so the next read will fetch new data to the buffer.
resetWorkingBuffer();
2021-07-26 00:34:36 +00:00
2022-02-16 04:26:51 +00:00
/// Just update the info about the next position in file.
2021-07-26 00:34:36 +00:00
2022-02-16 04:26:51 +00:00
file_offset_of_buffer_end = seek_pos;
bytes_to_ignore = new_pos - seek_pos;
2021-07-26 00:34:36 +00:00
2023-12-20 14:57:52 +00:00
if (bytes_to_ignore >= internal_buffer.size())
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Logical error in AsynchronousReadBufferFromFileDescriptor, bytes_to_ignore ({}"
") >= internal_buffer.size() ({})", bytes_to_ignore, internal_buffer.size());
2022-02-21 14:42:43 +00:00
2022-02-16 04:26:51 +00:00
return seek_pos;
2021-07-26 00:34:36 +00:00
}
void AsynchronousReadBufferFromFileDescriptor::rewind()
{
2021-08-04 00:07:04 +00:00
if (prefetch_future.valid())
2021-07-26 01:51:12 +00:00
{
2021-08-04 00:07:04 +00:00
prefetch_future.wait();
prefetch_future = {};
2021-07-26 01:51:12 +00:00
}
2021-07-26 00:34:36 +00:00
/// Clearing the buffer with existing data. New data will be read on subsequent call to 'next'.
working_buffer.resize(0);
pos = working_buffer.begin();
file_offset_of_buffer_end = 0;
}
2022-05-25 14:49:40 +00:00
size_t AsynchronousReadBufferFromFileDescriptor::getFileSize()
{
return getSizeFromFileDescriptor(fd, getFileName());
}
2021-07-26 00:34:36 +00:00
}