ClickHouse/src/Databases/DatabaseReplicatedWorker.cpp

260 lines
10 KiB
C++
Raw Normal View History

2020-11-27 14:04:03 +00:00
#include <Databases/DatabaseReplicatedWorker.h>
#include <Databases/DatabaseReplicated.h>
#include <Interpreters/DDLTask.h>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int DATABASE_REPLICATION_FAILED;
2021-02-04 19:41:44 +00:00
extern const int NOT_A_LEADER;
extern const int UNFINISHED;
2020-11-27 14:04:03 +00:00
}
DatabaseReplicatedDDLWorker::DatabaseReplicatedDDLWorker(DatabaseReplicated * db, const Context & context_)
: DDLWorker(/* pool_size */ 1, db->zookeeper_path + "/log", context_, nullptr, {}, fmt::format("DDLWorker({})", db->getDatabaseName()))
, database(db)
{
/// Pool size must be 1 to avoid reordering of log entries.
/// TODO Make a dependency graph of DDL queries. It will allow to execute independent entries in parallel.
/// We also need similar graph to load tables on server startup in order of topsort.
2020-11-27 14:04:03 +00:00
}
void DatabaseReplicatedDDLWorker::initializeMainThread()
{
while (!stop_flag)
{
try
{
auto zookeeper = getAndSetZooKeeper();
initializeReplication();
initialized = true;
return;
}
catch (...)
{
tryLogCurrentException(log, fmt::format("Error on initialization of {}", database->getDatabaseName()));
sleepForSeconds(5);
}
}
}
2021-02-12 16:22:01 +00:00
void DatabaseReplicatedDDLWorker::shutdown()
{
DDLWorker::shutdown();
wait_current_task_change.notify_all();
}
void DatabaseReplicatedDDLWorker::initializeReplication()
2020-11-27 14:04:03 +00:00
{
/// Check if we need to recover replica.
2021-02-04 19:41:44 +00:00
/// Invariant: replica is lost if it's log_ptr value is less then max_log_ptr - logs_to_keep.
2020-11-27 14:04:03 +00:00
2021-02-09 15:14:20 +00:00
String log_ptr_str = current_zookeeper->get(database->replica_path + "/log_ptr");
UInt32 our_log_ptr = parse<UInt32>(log_ptr_str);
2021-02-04 19:41:44 +00:00
UInt32 max_log_ptr = parse<UInt32>(current_zookeeper->get(database->zookeeper_path + "/max_log_ptr"));
2021-02-09 15:14:20 +00:00
logs_to_keep = parse<UInt32>(current_zookeeper->get(database->zookeeper_path + "/logs_to_keep"));
2021-02-08 19:36:17 +00:00
if (our_log_ptr == 0 || our_log_ptr + logs_to_keep < max_log_ptr)
2021-02-09 15:14:20 +00:00
database->recoverLostReplica(current_zookeeper, our_log_ptr, max_log_ptr);
else
last_skipped_entry_name.emplace(log_ptr_str);
2020-11-27 14:04:03 +00:00
}
String DatabaseReplicatedDDLWorker::enqueueQuery(DDLLogEntry & entry)
{
auto zookeeper = getAndSetZooKeeper();
const String query_path_prefix = queue_dir + "/query-";
/// We cannot create sequential node and it's ephemeral child in a single transaction, so allocate sequential number another way
String counter_prefix = database->zookeeper_path + "/counter/cnt-";
String counter_path = zookeeper->create(counter_prefix, "", zkutil::CreateMode::EphemeralSequential);
String node_path = query_path_prefix + counter_path.substr(counter_prefix.size());
Coordination::Requests ops;
/// Query is not committed yet, but we have to write it into log to avoid reordering
ops.emplace_back(zkutil::makeCreateRequest(node_path, entry.toString(), zkutil::CreateMode::Persistent));
/// '/try' will be replaced with '/committed' or will be removed due to expired session or other error
ops.emplace_back(zkutil::makeCreateRequest(node_path + "/try", database->getFullReplicaName(), zkutil::CreateMode::Ephemeral));
/// We don't need it anymore
ops.emplace_back(zkutil::makeRemoveRequest(counter_path, -1));
/// Create status dirs
ops.emplace_back(zkutil::makeCreateRequest(node_path + "/active", "", zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest(node_path + "/finished", "", zkutil::CreateMode::Persistent));
zookeeper->multi(ops);
return node_path;
}
2021-02-11 22:23:40 +00:00
String DatabaseReplicatedDDLWorker::tryEnqueueAndExecuteEntry(DDLLogEntry & entry, const Context & /*query_context*/)
{
2021-02-04 19:41:44 +00:00
/// NOTE Possibly it would be better to execute initial query on the most up-to-date node,
/// but it requires more complex logic around /try node.
auto zookeeper = getAndSetZooKeeper();
2021-02-04 19:41:44 +00:00
UInt32 our_log_ptr = parse<UInt32>(zookeeper->get(database->replica_path + "/log_ptr"));
UInt32 max_log_ptr = parse<UInt32>(zookeeper->get(database->zookeeper_path + "/max_log_ptr"));
assert(our_log_ptr <= max_log_ptr);
constexpr UInt32 max_replication_lag = 16;
if (max_replication_lag < max_log_ptr - our_log_ptr)
throw Exception(ErrorCodes::NOT_A_LEADER, "Cannot enqueue query on this replica, "
"because it has replication lag of {} queries. Try other replica.", max_log_ptr - our_log_ptr);
String entry_path = enqueueQuery(entry);
auto try_node = zkutil::EphemeralNodeHolder::existing(entry_path + "/try", *zookeeper);
String entry_name = entry_path.substr(entry_path.rfind('/') + 1);
auto task = std::make_unique<DatabaseReplicatedTask>(entry_name, entry_path, database);
task->entry = entry;
task->parseQueryFromEntry(context);
assert(!task->entry.query.empty());
assert(!zookeeper->exists(task->getFinishedNodePath()));
task->is_initial_query = true;
LOG_DEBUG(log, "Waiting for worker thread to process all entries before {}", entry_name);
2021-02-11 22:23:40 +00:00
UInt64 timeout = 600;
{
std::unique_lock lock{mutex};
2021-02-04 19:41:44 +00:00
bool processed = wait_current_task_change.wait_for(lock, std::chrono::seconds(timeout), [&]()
{
assert(zookeeper->expired() || current_task <= entry_name);
return zookeeper->expired() || current_task == entry_name || stop_flag;
});
if (!processed)
throw Exception(ErrorCodes::UNFINISHED, "Timeout: Cannot enqueue query on this replica,"
"most likely because replica is busy with previous queue entries");
}
2021-02-12 16:22:01 +00:00
if (zookeeper->expired() || stop_flag)
throw Exception(ErrorCodes::DATABASE_REPLICATION_FAILED, "ZooKeeper session expired or replication stopped, try again");
processTask(*task);
if (!task->was_executed)
{
throw Exception(ErrorCodes::LOGICAL_ERROR, "Entry {} was executed, but was not committed: code {}: {}",
task->execution_status.code, task->execution_status.message);
}
try_node->reset();
return entry_path;
}
2020-11-27 14:04:03 +00:00
DDLTaskPtr DatabaseReplicatedDDLWorker::initAndCheckTask(const String & entry_name, String & out_reason, const ZooKeeperPtr & zookeeper)
{
{
std::lock_guard lock{mutex};
2021-02-04 19:41:44 +00:00
if (current_task < entry_name)
{
current_task = entry_name;
wait_current_task_change.notify_all();
}
}
2020-11-27 14:04:03 +00:00
UInt32 our_log_ptr = parse<UInt32>(current_zookeeper->get(database->replica_path + "/log_ptr"));
UInt32 entry_num = DatabaseReplicatedTask::getLogEntryNumber(entry_name);
if (entry_num <= our_log_ptr)
{
out_reason = fmt::format("Task {} already executed according to log pointer {}", entry_name, our_log_ptr);
return {};
}
String entry_path = queue_dir + "/" + entry_name;
auto task = std::make_unique<DatabaseReplicatedTask>(entry_name, entry_path, database);
String initiator_name;
zkutil::EventPtr wait_committed_or_failed = std::make_shared<Poco::Event>();
2021-02-04 19:41:44 +00:00
String try_node_path = entry_path + "/try";
if (zookeeper->tryGet(try_node_path, initiator_name, nullptr, wait_committed_or_failed))
2020-11-27 14:04:03 +00:00
{
task->is_initial_query = initiator_name == task->host_id_str;
2021-02-04 19:41:44 +00:00
2020-11-27 14:04:03 +00:00
/// Query is not committed yet. We cannot just skip it and execute next one, because reordering may break replication.
LOG_TRACE(log, "Waiting for initiator {} to commit or rollback entry {}", initiator_name, entry_path);
2021-02-04 19:41:44 +00:00
constexpr size_t wait_time_ms = 1000;
constexpr size_t max_iterations = 3600;
size_t iteration = 0;
while (!wait_committed_or_failed->tryWait(wait_time_ms))
{
if (stop_flag)
{
/// We cannot return task to process and we cannot return nullptr too,
/// because nullptr means "task should not be executed".
/// We can only exit by exception.
throw Exception(ErrorCodes::UNFINISHED, "Replication was stopped");
}
if (max_iterations <= ++iteration)
{
/// What can we do if initiator hangs for some reason? Seems like we can remove /try node.
/// Initiator will fail to commit entry to ZK (including ops for replicated table) if /try does not exist.
/// But it's questionable.
/// We use tryRemove(...) because multiple hosts (including initiator) may try to do it concurrently.
auto code = zookeeper->tryRemove(try_node_path);
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNONODE)
throw Coordination::Exception(code, try_node_path);
if (!zookeeper->exists(entry_path + "/committed"))
{
out_reason = fmt::format("Entry {} was forcefully cancelled due to timeout", entry_name);
return {};
}
}
}
2020-11-27 14:04:03 +00:00
}
if (!zookeeper->exists(entry_path + "/committed"))
2020-11-27 14:04:03 +00:00
{
2021-02-04 19:41:44 +00:00
out_reason = fmt::format("Entry {} hasn't been committed", entry_name);
2020-11-27 14:04:03 +00:00
return {};
}
if (task->is_initial_query)
{
assert(!zookeeper->exists(entry_path + "/try"));
2021-02-08 09:14:17 +00:00
assert(zookeeper->exists(entry_path + "/committed") == (zookeeper->get(task->getFinishedNodePath()) == ExecutionStatus(0).serializeText()));
2021-02-04 19:41:44 +00:00
out_reason = fmt::format("Entry {} has been executed as initial query", entry_name);
return {};
}
2020-11-27 14:04:03 +00:00
String node_data;
if (!zookeeper->tryGet(entry_path, node_data))
{
LOG_ERROR(log, "Cannot get log entry {}", entry_path);
throw Exception(ErrorCodes::LOGICAL_ERROR, "should be unreachable");
}
2020-11-29 11:45:32 +00:00
task->entry.parse(node_data);
if (task->entry.query.empty())
2020-11-27 14:04:03 +00:00
{
2021-02-04 19:41:44 +00:00
out_reason = fmt::format("Entry {} is a dummy task", entry_name);
return {};
2020-11-29 11:45:32 +00:00
}
task->parseQueryFromEntry(context);
if (zookeeper->exists(task->getFinishedNodePath()))
2020-11-29 11:45:32 +00:00
{
2021-02-04 19:41:44 +00:00
out_reason = fmt::format("Task {} has been already processed", entry_name);
return {};
2020-11-27 14:04:03 +00:00
}
return task;
}
2021-02-09 15:14:20 +00:00
bool DatabaseReplicatedDDLWorker::canRemoveQueueEntry(const String & entry_name, const Coordination::Stat &)
{
UInt32 entry_number = DDLTaskBase::getLogEntryNumber(entry_name);
UInt32 max_log_ptr = parse<UInt32>(getAndSetZooKeeper()->get(database->zookeeper_path + "/max_log_ptr"));
return entry_number + logs_to_keep < max_log_ptr;
}
2020-11-27 14:04:03 +00:00
}