ClickHouse/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp

461 lines
18 KiB
C++
Raw Normal View History

#include <Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h>
#include <Storages/StorageReplicatedMergeTree.h>
#include <Poco/Timestamp.h>
2014-10-15 01:22:06 +00:00
#include <random>
2018-08-28 00:10:05 +00:00
#include <unordered_set>
2014-10-15 01:22:06 +00:00
namespace DB
{
namespace ErrorCodes
{
extern const int NOT_FOUND_NODE;
extern const int ALL_REPLICAS_LOST;
2018-08-23 13:55:59 +00:00
extern const int REPLICA_STATUS_CHANGED;
}
2014-10-15 01:22:06 +00:00
ReplicatedMergeTreeCleanupThread::ReplicatedMergeTreeCleanupThread(StorageReplicatedMergeTree & storage_)
: storage(storage_)
, log_name(storage.database_name + "." + storage.table_name + " (ReplicatedMergeTreeCleanupThread)")
, log(&Logger::get(log_name))
{
task = storage.global_context.getSchedulePool().createTask(log_name, [this]{ run(); });
}
2014-10-15 01:22:06 +00:00
void ReplicatedMergeTreeCleanupThread::run()
{
2019-05-03 02:00:57 +00:00
const auto CLEANUP_SLEEP_MS = storage.settings.cleanup_delay_period * 1000
+ std::uniform_int_distribution<UInt64>(0, storage.settings.cleanup_delay_period_random_add * 1000)(rng);
2014-10-15 01:22:06 +00:00
try
{
iterate();
}
catch (const Coordination::Exception & e)
{
tryLogCurrentException(log, __PRETTY_FUNCTION__);
if (e.code == Coordination::ZSESSIONEXPIRED)
return;
}
catch (...)
{
tryLogCurrentException(log, __PRETTY_FUNCTION__);
}
2014-10-15 01:22:06 +00:00
task->scheduleAfter(CLEANUP_SLEEP_MS);
2014-10-15 01:22:06 +00:00
}
2014-10-15 01:22:06 +00:00
void ReplicatedMergeTreeCleanupThread::iterate()
{
storage.clearOldPartsAndRemoveFromZK();
{
/// TODO: Implement tryLockStructureForShare.
auto lock = storage.lockStructureForShare(false, "");
2019-05-03 02:00:57 +00:00
storage.clearOldTemporaryDirectories();
}
2017-11-19 21:17:58 +00:00
/// This is loose condition: no problem if we actually had lost leadership at this moment
/// and two replicas will try to do cleanup simultaneously.
if (storage.is_leader)
{
clearOldLogs();
clearOldBlocks();
clearOldMutations();
}
2014-10-15 01:22:06 +00:00
}
void ReplicatedMergeTreeCleanupThread::clearOldLogs()
{
auto zookeeper = storage.getZooKeeper();
Coordination::Stat stat;
if (!zookeeper->exists(storage.zookeeper_path + "/log", &stat))
throw Exception(storage.zookeeper_path + "/log doesn't exist", ErrorCodes::NOT_FOUND_NODE);
int children_count = stat.numChildren;
/// We will wait for 1.1 times more records to accumulate than necessary.
2019-05-03 02:00:57 +00:00
if (static_cast<double>(children_count) < storage.settings.min_replicated_logs_to_keep * 1.1)
return;
Strings replicas = zookeeper->getChildren(storage.zookeeper_path + "/replicas", &stat);
2018-08-28 00:10:05 +00:00
/// We will keep logs after and including this threshold.
2018-08-23 13:55:59 +00:00
UInt64 min_saved_log_pointer = std::numeric_limits<UInt64>::max();
2018-08-28 00:10:05 +00:00
2018-08-27 16:22:39 +00:00
UInt64 min_log_pointer_lost_candidate = std::numeric_limits<UInt64>::max();
Strings entries = zookeeper->getChildren(storage.zookeeper_path + "/log");
if (entries.empty())
return;
std::sort(entries.begin(), entries.end());
2018-08-28 00:17:27 +00:00
String min_saved_record_log_str = entries[
2019-08-09 13:02:01 +00:00
entries.size() > storage.settings.max_replicated_logs_to_keep
? entries.size() - storage.settings.max_replicated_logs_to_keep
2018-08-28 00:17:27 +00:00
: 0];
2018-08-28 00:10:05 +00:00
/// Replicas that were marked is_lost but are active.
std::unordered_set<String> recovering_replicas;
2018-08-28 00:10:05 +00:00
/// Lost replica -> a version of 'host' node.
2018-08-27 15:44:51 +00:00
std::unordered_map<String, UInt32> host_versions_lost_replicas;
2018-08-28 00:10:05 +00:00
2018-08-28 00:17:27 +00:00
/// Replica -> log pointer.
2018-08-27 15:44:51 +00:00
std::unordered_map<String, String> log_pointers_candidate_lost_replicas;
2018-08-23 15:58:29 +00:00
2018-08-27 15:44:51 +00:00
size_t num_replicas_were_marked_is_lost = 0;
for (const String & replica : replicas)
{
2018-08-27 13:51:22 +00:00
Coordination::Stat host_stat;
2018-08-20 13:31:24 +00:00
zookeeper->get(storage.zookeeper_path + "/replicas/" + replica + "/host", &host_stat);
String pointer = zookeeper->get(storage.zookeeper_path + "/replicas/" + replica + "/log_pointer");
2019-01-09 15:44:20 +00:00
UInt64 log_pointer = 0;
2018-08-27 15:44:51 +00:00
if (!pointer.empty())
log_pointer = parse<UInt64>(pointer);
2018-08-27 19:06:32 +00:00
/// Check status of replica (active or not).
2018-08-28 00:10:05 +00:00
/// If replica was not active, we could check when its log_pointer locates.
2018-08-28 00:22:32 +00:00
/// There can be three possibilities for "is_lost" node:
/// It doesn't exist: in old version of ClickHouse.
/// It exists and value is 0.
/// It exists and value is 1.
2018-08-27 15:44:51 +00:00
String is_lost_str;
2018-08-28 00:10:05 +00:00
bool has_is_lost_node = zookeeper->tryGet(storage.zookeeper_path + "/replicas/" + replica + "/is_lost", is_lost_str);
2018-08-23 13:55:59 +00:00
if (zookeeper->exists(storage.zookeeper_path + "/replicas/" + replica + "/is_active"))
2018-08-27 15:44:51 +00:00
{
2018-08-28 00:10:05 +00:00
if (has_is_lost_node && is_lost_str == "1")
2018-08-27 15:44:51 +00:00
{
2018-08-28 00:17:27 +00:00
/// Lost and active: recovering.
recovering_replicas.insert(replica);
2018-08-27 15:44:51 +00:00
++num_replicas_were_marked_is_lost;
}
else
2018-08-28 00:17:27 +00:00
{
/// Not lost and active: usual case.
min_saved_log_pointer = std::min(min_saved_log_pointer, log_pointer);
2018-08-28 00:17:27 +00:00
}
2018-08-27 15:44:51 +00:00
}
else
{
2018-08-28 00:10:05 +00:00
if (!has_is_lost_node)
{
/// Only to support old versions CH.
/// If replica did not have "/is_lost" we must save it's log_pointer.
/// Because old version CH can not work with recovering.
2018-08-23 13:55:59 +00:00
min_saved_log_pointer = std::min(min_saved_log_pointer, log_pointer);
}
2018-08-23 13:55:59 +00:00
else
2018-08-27 15:44:51 +00:00
{
if (is_lost_str == "0")
{
2018-08-28 00:17:27 +00:00
/// Not active and not lost: a candidate to be marked as lost.
2018-08-23 13:55:59 +00:00
String log_pointer_str = "log-" + padIndex(log_pointer);
if (log_pointer_str >= min_saved_record_log_str)
2018-08-28 00:17:27 +00:00
{
/// Its log pointer is fresh enough.
2018-08-23 13:55:59 +00:00
min_saved_log_pointer = std::min(min_saved_log_pointer, log_pointer);
2018-08-28 00:17:27 +00:00
}
2018-08-23 13:55:59 +00:00
else
{
2018-08-28 00:17:27 +00:00
/// Its log pointer is stale: will mark replica as lost.
2018-08-27 15:44:51 +00:00
host_versions_lost_replicas[replica] = host_stat.version;
log_pointers_candidate_lost_replicas[replica] = log_pointer_str;
2018-08-27 16:22:39 +00:00
min_log_pointer_lost_candidate = std::min(min_log_pointer_lost_candidate, log_pointer);
2018-08-23 13:55:59 +00:00
}
}
2018-08-23 13:55:59 +00:00
else
{
2018-08-27 15:44:51 +00:00
++num_replicas_were_marked_is_lost;
host_versions_lost_replicas[replica] = host_stat.version;
}
2018-08-27 15:44:51 +00:00
}
}
}
2018-08-27 15:44:51 +00:00
/// We must check log_pointer of recovering replicas at the end.
/// Because log pointer of recovering replicas can move backward.
for (const String & replica : recovering_replicas)
{
String pointer = zookeeper->get(storage.zookeeper_path + "/replicas/" + replica + "/log_pointer");
2019-01-09 15:44:20 +00:00
UInt64 log_pointer = 0;
2018-08-27 15:44:51 +00:00
if (!pointer.empty())
log_pointer = parse<UInt64>(pointer);
min_saved_log_pointer = std::min(min_saved_log_pointer, log_pointer);
}
2018-08-27 13:51:22 +00:00
if (!recovering_replicas.empty())
2018-08-27 16:22:39 +00:00
min_saved_log_pointer = std::min(min_saved_log_pointer, min_log_pointer_lost_candidate);
/// We will not touch the last `min_replicated_logs_to_keep` records.
2019-08-09 13:02:01 +00:00
entries.erase(entries.end() - std::min<UInt64>(entries.size(), storage.settings.min_replicated_logs_to_keep), entries.end());
2018-08-28 00:01:03 +00:00
/// We will not touch records that are no less than `min_saved_log_pointer`.
2018-08-23 13:55:59 +00:00
entries.erase(std::lower_bound(entries.begin(), entries.end(), "log-" + padIndex(min_saved_log_pointer)), entries.end());
if (entries.empty())
return;
2018-08-27 15:44:51 +00:00
markLostReplicas(host_versions_lost_replicas, log_pointers_candidate_lost_replicas, replicas.size() - num_replicas_were_marked_is_lost, zookeeper);
2018-08-27 19:06:32 +00:00
Coordination::Requests ops;
for (size_t i = 0; i < entries.size(); ++i)
{
ops.emplace_back(zkutil::makeRemoveRequest(storage.zookeeper_path + "/log/" + entries[i], -1));
if (ops.size() > 4 * zkutil::MULTI_BATCH_SIZE || i + 1 == entries.size())
{
2018-08-27 15:44:51 +00:00
/// We need to check this because the replica that was restored from one of the marked replicas does not copy a non-valid log_pointer.
2018-08-27 23:59:49 +00:00
for (const auto & host_version : host_versions_lost_replicas)
2018-08-27 15:44:51 +00:00
ops.emplace_back(zkutil::makeCheckRequest(storage.zookeeper_path + "/replicas/" + host_version.first + "/host", host_version.second));
2018-08-23 15:58:29 +00:00
/// Simultaneously with clearing the log, we check to see if replica was added since we received replicas list.
ops.emplace_back(zkutil::makeCheckRequest(storage.zookeeper_path + "/replicas", stat.version));
zookeeper->multi(ops);
ops.clear();
}
}
LOG_DEBUG(log, "Removed " << entries.size() << " old log entries: " << entries.front() << " - " << entries.back());
2014-10-15 01:22:06 +00:00
}
2018-08-27 15:44:51 +00:00
void ReplicatedMergeTreeCleanupThread::markLostReplicas(const std::unordered_map<String, UInt32> & host_versions_lost_replicas,
const std::unordered_map<String, String> & log_pointers_candidate_lost_replicas,
2018-08-23 15:58:29 +00:00
size_t replicas_count, const zkutil::ZooKeeperPtr & zookeeper)
{
2018-08-27 15:54:07 +00:00
Strings candidate_lost_replicas;
2018-08-27 13:51:22 +00:00
std::vector<Coordination::Requests> requests;
2018-08-27 16:22:39 +00:00
for (const auto & pair : log_pointers_candidate_lost_replicas)
{
String replica = pair.first;
2018-08-27 13:51:22 +00:00
Coordination::Requests ops;
2018-08-23 15:58:29 +00:00
/// If host changed version we can not mark replicas, because replica started to be active.
2018-08-27 15:44:51 +00:00
ops.emplace_back(zkutil::makeCheckRequest(storage.zookeeper_path + "/replicas/" + replica + "/host", host_versions_lost_replicas.at(replica)));
2018-08-23 15:58:29 +00:00
ops.emplace_back(zkutil::makeSetRequest(storage.zookeeper_path + "/replicas/" + replica + "/is_lost", "1", -1));
2018-08-27 15:44:51 +00:00
candidate_lost_replicas.push_back(replica);
requests.push_back(ops);
}
2018-08-27 19:06:32 +00:00
2018-08-27 15:44:51 +00:00
if (candidate_lost_replicas.size() == replicas_count)
2018-08-28 00:21:02 +00:00
throw Exception("All replicas are stale: we won't mark any replica as lost", ErrorCodes::ALL_REPLICAS_LOST);
2018-08-27 19:06:32 +00:00
2018-08-28 00:17:27 +00:00
std::vector<zkutil::ZooKeeper::FutureMulti> futures;
2018-08-27 15:54:07 +00:00
for (size_t i = 0; i < candidate_lost_replicas.size(); ++i)
2018-08-27 15:44:51 +00:00
futures.emplace_back(zookeeper->tryAsyncMulti(requests[i]));
2018-08-27 15:54:07 +00:00
for (size_t i = 0; i < candidate_lost_replicas.size(); ++i)
2018-08-20 13:31:24 +00:00
{
2018-08-28 00:23:38 +00:00
auto multi_responses = futures[i].get();
2018-08-27 13:51:22 +00:00
if (multi_responses.responses[0]->error == Coordination::Error::ZBADVERSION)
2018-08-28 00:23:52 +00:00
throw Exception(candidate_lost_replicas[i] + " became active when we marked lost replicas.", DB::ErrorCodes::REPLICA_STATUS_CHANGED);
2018-08-27 15:44:51 +00:00
zkutil::KeeperMultiException::check(multi_responses.error, requests[i], multi_responses.responses);
2018-08-20 13:31:24 +00:00
}
}
struct ReplicatedMergeTreeCleanupThread::NodeWithStat
{
String node;
Int64 ctime = 0;
NodeWithStat(String node_, Int64 ctime_) : node(std::move(node_)), ctime(ctime_) {}
static bool greaterByTime(const NodeWithStat & lhs, const NodeWithStat & rhs)
{
return std::forward_as_tuple(lhs.ctime, lhs.node) > std::forward_as_tuple(rhs.ctime, rhs.node);
}
};
2014-10-15 01:22:06 +00:00
void ReplicatedMergeTreeCleanupThread::clearOldBlocks()
{
auto zookeeper = storage.getZooKeeper();
2014-12-12 20:50:32 +00:00
std::vector<NodeWithStat> timed_blocks;
getBlocksSortedByTime(*zookeeper, timed_blocks);
if (timed_blocks.empty())
return;
/// Use ZooKeeper's first node (last according to time) timestamp as "current" time.
Int64 current_time = timed_blocks.front().ctime;
2019-05-03 02:00:57 +00:00
Int64 time_threshold = std::max(static_cast<Int64>(0), current_time - static_cast<Int64>(1000 * storage.settings.replicated_deduplication_window_seconds));
/// Virtual node, all nodes that are "greater" than this one will be deleted
NodeWithStat block_threshold{{}, time_threshold};
2019-08-09 13:02:01 +00:00
size_t current_deduplication_window = std::min<size_t>(timed_blocks.size(), storage.settings.replicated_deduplication_window);
auto first_outdated_block_fixed_threshold = timed_blocks.begin() + current_deduplication_window;
auto first_outdated_block_time_threshold = std::upper_bound(timed_blocks.begin(), timed_blocks.end(), block_threshold, NodeWithStat::greaterByTime);
auto first_outdated_block = std::min(first_outdated_block_fixed_threshold, first_outdated_block_time_threshold);
zkutil::AsyncResponses<Coordination::RemoveResponse> try_remove_futures;
for (auto it = first_outdated_block; it != timed_blocks.end(); ++it)
{
String path = storage.zookeeper_path + "/blocks/" + it->node;
try_remove_futures.emplace_back(path, zookeeper->asyncTryRemove(path));
}
for (auto & pair : try_remove_futures)
{
const String & path = pair.first;
int32_t rc = pair.second.get().error;
if (rc == Coordination::ZNOTEMPTY)
{
2017-12-21 17:43:32 +00:00
/// Can happen if there are leftover block nodes with children created by previous server versions.
zookeeper->removeRecursive(path);
2019-06-06 15:28:02 +00:00
cached_block_stats.erase(first_outdated_block->node);
}
else if (rc)
LOG_WARNING(log,
"Error while deleting ZooKeeper path `" << path << "`: " + zkutil::ZooKeeper::error2string(rc) << ", ignoring.");
2019-06-06 15:28:02 +00:00
else
{
/// Successfully removed blocks have to be removed from cache
cached_block_stats.erase(first_outdated_block->node);
}
first_outdated_block++;
}
auto num_nodes_to_delete = timed_blocks.end() - first_outdated_block;
if (num_nodes_to_delete)
LOG_TRACE(log, "Cleared " << num_nodes_to_delete << " old blocks from ZooKeeper");
}
void ReplicatedMergeTreeCleanupThread::getBlocksSortedByTime(zkutil::ZooKeeper & zookeeper, std::vector<NodeWithStat> & timed_blocks)
{
timed_blocks.clear();
Strings blocks;
Coordination::Stat stat;
if (zookeeper.tryGetChildren(storage.zookeeper_path + "/blocks", blocks, &stat))
throw Exception(storage.zookeeper_path + "/blocks doesn't exist", ErrorCodes::NOT_FOUND_NODE);
2014-10-15 01:22:06 +00:00
2019-06-06 15:28:02 +00:00
/// Seems like this code is obsolete, because we delete blocks from cache
/// when they are deleted from zookeeper. But we don't know about all (maybe future) places in code
/// where they can be removed, so just to be sure that cache would not leak we check it here.
{
NameSet blocks_set(blocks.begin(), blocks.end());
for (auto it = cached_block_stats.begin(); it != cached_block_stats.end();)
{
if (!blocks_set.count(it->first))
it = cached_block_stats.erase(it);
else
++it;
}
}
2014-10-15 01:22:06 +00:00
auto not_cached_blocks = stat.numChildren - cached_block_stats.size();
2017-09-26 15:17:31 +00:00
if (not_cached_blocks)
{
LOG_TRACE(log, "Checking " << stat.numChildren << " blocks (" << not_cached_blocks << " are not cached)"
<< " to clear old ones from ZooKeeper.");
2017-09-26 15:17:31 +00:00
}
2014-10-15 01:22:06 +00:00
zkutil::AsyncResponses<Coordination::ExistsResponse> exists_futures;
for (const String & block : blocks)
{
auto it = cached_block_stats.find(block);
if (it == cached_block_stats.end())
{
/// New block. Fetch its stat asynchronously.
exists_futures.emplace_back(block, zookeeper.asyncExists(storage.zookeeper_path + "/blocks/" + block));
}
else
{
/// Cached block
timed_blocks.emplace_back(block, it->second);
}
}
2014-10-15 01:22:06 +00:00
/// Put fetched stats into the cache
for (auto & elem : exists_futures)
{
auto status = elem.second.get();
if (status.error != Coordination::ZNONODE)
{
cached_block_stats.emplace(elem.first, status.stat.ctime);
timed_blocks.emplace_back(elem.first, status.stat.ctime);
}
}
std::sort(timed_blocks.begin(), timed_blocks.end(), NodeWithStat::greaterByTime);
}
void ReplicatedMergeTreeCleanupThread::clearOldMutations()
{
2019-05-03 02:00:57 +00:00
if (!storage.settings.finished_mutations_to_keep)
return;
2019-05-03 02:00:57 +00:00
if (storage.queue.countFinishedMutations() <= storage.settings.finished_mutations_to_keep)
{
/// Not strictly necessary, but helps to avoid unnecessary ZooKeeper requests.
/// If even this replica hasn't finished enough mutations yet, then we don't need to clean anything.
return;
}
auto zookeeper = storage.getZooKeeper();
Coordination::Stat replicas_stat;
Strings replicas = zookeeper->getChildren(storage.zookeeper_path + "/replicas", &replicas_stat);
UInt64 min_pointer = std::numeric_limits<UInt64>::max();
for (const String & replica : replicas)
{
String pointer;
zookeeper->tryGet(storage.zookeeper_path + "/replicas/" + replica + "/mutation_pointer", pointer);
if (pointer.empty())
return; /// One replica hasn't done anything yet so we can't delete any mutations.
min_pointer = std::min(parse<UInt64>(pointer), min_pointer);
}
Strings entries = zookeeper->getChildren(storage.zookeeper_path + "/mutations");
std::sort(entries.begin(), entries.end());
/// Do not remove entries that are greater than `min_pointer` (they are not done yet).
entries.erase(std::upper_bound(entries.begin(), entries.end(), padIndex(min_pointer)), entries.end());
2019-05-03 02:00:57 +00:00
/// Do not remove last `storage.settings.finished_mutations_to_keep` entries.
if (entries.size() <= storage.settings.finished_mutations_to_keep)
return;
2019-05-03 02:00:57 +00:00
entries.erase(entries.end() - storage.settings.finished_mutations_to_keep, entries.end());
if (entries.empty())
return;
Coordination::Requests ops;
size_t batch_start_i = 0;
for (size_t i = 0; i < entries.size(); ++i)
{
ops.emplace_back(zkutil::makeRemoveRequest(storage.zookeeper_path + "/mutations/" + entries[i], -1));
if (ops.size() > 4 * zkutil::MULTI_BATCH_SIZE || i + 1 == entries.size())
{
/// Simultaneously with clearing the log, we check to see if replica was added since we received replicas list.
ops.emplace_back(zkutil::makeCheckRequest(storage.zookeeper_path + "/replicas", replicas_stat.version));
zookeeper->multi(ops);
LOG_DEBUG(log, "Removed " << (i + 1 - batch_start_i) << " old mutation entries: " << entries[batch_start_i] << " - " << entries[i]);
batch_start_i = i + 1;
ops.clear();
}
}
}
2014-10-15 01:22:06 +00:00
}