ClickHouse/dbms/Processors/Sources/SourceWithProgress.cpp

98 lines
3.2 KiB
C++
Raw Normal View History

2019-10-04 15:40:05 +00:00
#include <Processors/Sources/SourceWithProgress.h>
#include <Interpreters/ProcessList.h>
#include <Access/EnabledQuota.h>
2019-10-04 15:40:05 +00:00
namespace DB
{
namespace ErrorCodes
{
extern const int TOO_MANY_ROWS;
extern const int TOO_MANY_BYTES;
2020-01-27 16:58:25 +00:00
}
void SourceWithProgress::work()
{
if (!limits.speed_limits.checkTimeLimit(total_stopwatch.elapsed(), limits.timeout_overflow_mode))
cancel();
else
{
was_progress_called = false;
2020-01-27 16:58:25 +00:00
ISourceWithProgress::work();
if (!was_progress_called && has_input)
progress({ current_chunk.chunk.getNumRows(), current_chunk.chunk.bytes() });
}
2019-10-04 15:40:05 +00:00
}
2019-10-10 14:16:15 +00:00
/// Aggregated copy-paste from IBlockInputStream::progressImpl.
/// Most of this must be done in PipelineExecutor outside. Now it's done for compatibility with IBlockInputStream.
2019-10-04 15:40:05 +00:00
void SourceWithProgress::progress(const Progress & value)
{
was_progress_called = true;
2019-10-09 09:44:24 +00:00
if (total_rows_approx != 0)
2019-10-04 15:40:05 +00:00
{
2019-10-09 09:40:30 +00:00
Progress total_rows_progress = {0, 0, total_rows_approx};
if (progress_callback)
progress_callback(total_rows_progress);
2019-10-09 09:44:24 +00:00
if (process_list_elem)
process_list_elem->updateProgressIn(total_rows_progress);
2019-10-04 15:40:05 +00:00
total_rows_approx = 0;
}
if (progress_callback)
progress_callback(value);
if (process_list_elem)
{
if (!process_list_elem->updateProgressIn(value))
cancel();
2019-10-10 14:16:15 +00:00
/// The total amount of data processed or intended for processing in all sources, possibly on remote servers.
2019-10-04 15:40:05 +00:00
ProgressValues progress = process_list_elem->getProgressIn();
size_t total_rows_estimate = std::max(progress.read_rows, progress.total_rows_to_read);
2019-10-10 14:16:15 +00:00
/// Check the restrictions on the
/// * amount of data to read
/// * speed of the query
/// * quota on the amount of data to read
2019-10-04 15:40:05 +00:00
/// NOTE: Maybe it makes sense to have them checked directly in ProcessList?
2019-10-10 14:16:15 +00:00
2019-10-04 15:40:05 +00:00
if (limits.mode == LimitsMode::LIMITS_TOTAL)
{
if (!limits.size_limits.check(total_rows_estimate, progress.read_bytes, "rows to read",
ErrorCodes::TOO_MANY_ROWS, ErrorCodes::TOO_MANY_BYTES))
cancel();
}
size_t total_rows = progress.total_rows_to_read;
constexpr UInt64 profile_events_update_period_microseconds = 10 * 1000; // 10 milliseconds
UInt64 total_elapsed_microseconds = total_stopwatch.elapsedMicroseconds();
if (last_profile_events_update_time + profile_events_update_period_microseconds < total_elapsed_microseconds)
{
/// Should be done in PipelineExecutor.
/// It is here for compatibility with IBlockInputsStream.
CurrentThread::updatePerformanceCounters();
last_profile_events_update_time = total_elapsed_microseconds;
}
/// Should be done in PipelineExecutor.
/// It is here for compatibility with IBlockInputsStream.
2019-10-10 14:16:15 +00:00
limits.speed_limits.throttle(progress.read_rows, progress.read_bytes, total_rows, total_elapsed_microseconds);
2019-10-04 15:40:05 +00:00
2019-11-04 19:17:27 +00:00
if (quota && limits.mode == LimitsMode::LIMITS_TOTAL)
quota->used({Quota::READ_ROWS, value.read_rows}, {Quota::READ_BYTES, value.read_bytes});
2019-10-04 15:40:05 +00:00
}
}
}