ClickHouse/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h

157 lines
4.7 KiB
C++
Raw Normal View History

2021-08-30 19:37:03 +00:00
#pragma once
#include <deque>
#include <functional>
#include <atomic>
#include <mutex>
#include <future>
2021-08-30 19:37:03 +00:00
#include <condition_variable>
#include <set>
2021-08-30 19:37:03 +00:00
2021-09-03 22:15:20 +00:00
#include <boost/circular_buffer.hpp>
2021-08-30 19:37:03 +00:00
#include <common/shared_ptr_helper.h>
2021-09-03 13:02:49 +00:00
#include <common/logger_useful.h>
2021-08-30 19:37:03 +00:00
#include <Common/ThreadPool.h>
2021-09-02 17:40:29 +00:00
#include <Common/Stopwatch.h>
2021-09-06 22:07:41 +00:00
#include <Storages/MergeTree/IExecutableTask.h>
2021-08-30 19:37:03 +00:00
namespace DB
{
2021-09-03 22:15:20 +00:00
/**
* Executor for a background MergeTree related operations such as merges, mutations, fetches an so on.
* It can execute only successors of ExecutableTask interface.
2021-09-08 12:42:03 +00:00
* Which is a self-written coroutine. It suspends, when returns true from executeStep() method.
2021-09-03 22:15:20 +00:00
*
* There are two queues of a tasks: pending (main queue for all the tasks) and active (currently executing).
2021-09-08 00:21:21 +00:00
* Pending queue is needed since the number of tasks will be more than thread to execute.
* Pending tasks are tasks that successfully scheduled to an executor or tasks that have some extra steps to execute.
2021-09-03 22:15:20 +00:00
* There is an invariant, that task may occur only in one of these queue. It can occur in both queues only in critical sections.
*
2021-09-08 00:21:21 +00:00
* Pending: Active:
*
* |s| |s| |s| |s| |s| |s| |s| |s| |s| |s| |s|
* |s| |s| |s| |s| |s| |s| |s| |s| |s| |s|
* |s| |s| |s| |s| |s| |s| |s|
* |s| |s| |s| |s|
* |s| |s|
* |s|
*
* Each task is simply a sequence of steps. Heavier tasks have longer sequences.
* When a step of a task is executed, we move tasks to pending queue. And take another from the queue's head.
* With these architecture all small merges / mutations will be executed faster, than bigger ones.
*
* We use boost::circular_buffer as a container for queues not to do any allocations.
2021-09-03 22:15:20 +00:00
*
2021-09-06 12:01:16 +00:00
* Another nuisance that we faces with is than background operations always interact with an associated Storage.
2021-09-03 22:15:20 +00:00
* So, when a Storage want to shutdown, it must wait until all its background operaions are finished.
*/
2021-08-30 19:37:03 +00:00
class MergeTreeBackgroundExecutor : public shared_ptr_helper<MergeTreeBackgroundExecutor>
{
public:
2021-09-02 10:39:27 +00:00
enum class Type
{
MERGE_MUTATE,
FETCH,
MOVE
};
2021-08-30 19:37:03 +00:00
2021-09-02 17:40:29 +00:00
MergeTreeBackgroundExecutor(
Type type_,
2021-09-08 12:42:03 +00:00
size_t threads_count_,
size_t max_tasks_count_,
2021-09-02 17:40:29 +00:00
CurrentMetrics::Metric metric_)
: type(type_)
2021-09-08 12:42:03 +00:00
, threads_count(threads_count_)
, max_tasks_count(max_tasks_count_)
2021-09-02 17:40:29 +00:00
, metric(metric_)
2021-08-30 19:37:03 +00:00
{
2021-09-02 10:39:27 +00:00
name = toString(type);
2021-09-02 17:40:29 +00:00
2021-09-08 20:30:29 +00:00
pending.set_capacity(max_tasks_count);
active.set_capacity(max_tasks_count);
2021-09-08 12:42:03 +00:00
pool.setMaxThreads(std::max(1UL, threads_count));
pool.setMaxFreeThreads(std::max(1UL, threads_count));
pool.setQueueSize(std::max(1UL, threads_count));
for (size_t number = 0; number < threads_count; ++number)
pool.scheduleOrThrowOnError([this] { threadFunction(); });
2021-08-30 19:37:03 +00:00
}
~MergeTreeBackgroundExecutor()
{
wait();
}
2021-09-03 22:15:20 +00:00
bool trySchedule(ExecutableTaskPtr task);
2021-08-30 19:37:03 +00:00
2021-08-31 11:02:39 +00:00
void removeTasksCorrespondingToStorage(StorageID id);
2021-08-30 19:37:03 +00:00
2021-09-03 22:15:20 +00:00
void wait();
2021-08-30 19:37:03 +00:00
2021-08-31 23:20:23 +00:00
size_t activeCount()
2021-08-31 14:54:24 +00:00
{
2021-08-31 23:20:23 +00:00
std::lock_guard lock(mutex);
return active.size();
2021-08-31 14:54:24 +00:00
}
2021-08-31 23:20:23 +00:00
size_t pendingCount()
2021-08-31 14:54:24 +00:00
{
std::lock_guard lock(mutex);
2021-08-31 23:20:23 +00:00
return pending.size();
2021-08-31 14:54:24 +00:00
}
2021-08-30 19:37:03 +00:00
private:
2021-09-02 10:39:27 +00:00
static String toString(Type type);
2021-08-30 19:37:03 +00:00
2021-09-02 10:39:27 +00:00
Type type;
String name;
2021-09-02 17:40:29 +00:00
size_t threads_count{0};
size_t max_tasks_count{0};
2021-09-08 12:42:03 +00:00
CurrentMetrics::Metric metric;
2021-09-02 17:40:29 +00:00
2021-09-08 00:21:21 +00:00
/**
* Has RAII class to determine how many tasks are waiting for the execution and executing at the moment.
* Also has some flags and primitives to wait for current task to be executed.
*/
struct TaskRuntimeData
2021-08-31 18:07:24 +00:00
{
2021-09-08 00:21:21 +00:00
TaskRuntimeData(ExecutableTaskPtr && task_, CurrentMetrics::Metric metric_)
2021-08-31 23:20:23 +00:00
: task(std::move(task_))
, increment(std::move(metric_))
2021-09-08 00:21:21 +00:00
{}
2021-08-31 18:07:24 +00:00
ExecutableTaskPtr task;
CurrentMetrics::Increment increment;
2021-09-22 11:52:47 +00:00
bool is_currently_deleting{false};
2021-09-07 13:51:30 +00:00
/// Actually autoreset=false is needed only for unit test
/// where multiple threads could remove tasks corresponding to the same storage
/// This scenario in not possible in reality.
Poco::Event is_done{/*autoreset=*/false};
2021-08-31 18:07:24 +00:00
};
2021-09-08 00:21:21 +00:00
using TaskRuntimeDataPtr = std::shared_ptr<TaskRuntimeData>;
void routine(TaskRuntimeDataPtr item);
2021-08-31 18:07:24 +00:00
2021-09-08 12:42:03 +00:00
void threadFunction();
2021-09-03 12:27:49 +00:00
2021-09-02 17:40:29 +00:00
/// Initially it will be empty
2021-09-08 00:21:21 +00:00
boost::circular_buffer<TaskRuntimeDataPtr> pending{0};
boost::circular_buffer<TaskRuntimeDataPtr> active{0};
2021-08-31 11:02:39 +00:00
2021-08-30 19:37:03 +00:00
std::mutex mutex;
std::condition_variable has_tasks;
2021-09-03 22:15:20 +00:00
std::atomic_bool shutdown{false};
2021-08-30 19:37:03 +00:00
ThreadPool pool;
};
}