ClickHouse/dbms/src/Interpreters/DDLWorker.cpp

1185 lines
40 KiB
C++
Raw Normal View History

#include <Interpreters/DDLWorker.h>
#include <Parsers/ASTAlterQuery.h>
2017-04-21 12:39:28 +00:00
#include <Parsers/ASTQueryWithOnCluster.h>
2017-04-25 15:21:03 +00:00
#include <Parsers/ParserQuery.h>
#include <Parsers/parseQuery.h>
#include <Parsers/queryToString.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
#include <IO/Operators.h>
#include <IO/ReadBufferFromString.h>
2017-04-25 15:21:03 +00:00
#include <Storages/IStorage.h>
2017-10-13 17:47:59 +00:00
#include <DataStreams/IProfilingBlockInputStream.h>
#include <Interpreters/executeQuery.h>
#include <Interpreters/Cluster.h>
2018-04-19 13:56:14 +00:00
#include <Common/DNSResolver.h>
#include <Common/Macros.h>
#include <Common/getFQDNOrHostName.h>
#include <Common/setThreadName.h>
#include <Common/Stopwatch.h>
#include <Common/randomSeed.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeArray.h>
#include <Columns/ColumnsNumber.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnArray.h>
#include <Common/ZooKeeper/ZooKeeper.h>
2018-04-03 17:35:48 +00:00
#include <Common/ZooKeeper/KeeperException.h>
#include <Common/ZooKeeper/Lock.h>
#include <Common/isLocalAddress.h>
2017-04-19 14:21:27 +00:00
#include <Poco/Timestamp.h>
#include <random>
#include <pcg_random.hpp>
#include <Poco/Net/NetException.h>
namespace DB
{
namespace ErrorCodes
{
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
extern const int INVALID_CONFIG_PARAMETER;
extern const int UNKNOWN_FORMAT_VERSION;
2017-04-25 15:21:03 +00:00
extern const int INCONSISTENT_TABLE_ACCROSS_SHARDS;
extern const int INCONSISTENT_CLUSTER_DEFINITION;
extern const int TIMEOUT_EXCEEDED;
extern const int UNKNOWN_TYPE_OF_QUERY;
extern const int UNFINISHED;
extern const int UNKNOWN_STATUS_OF_DISTRIBUTED_DDL_TASK;
extern const int QUERY_IS_PROHIBITED;
}
2017-07-28 16:14:49 +00:00
namespace
{
struct HostID
{
String host_name;
UInt16 port;
HostID() = default;
explicit HostID(const Cluster::Address & address)
2017-07-28 16:14:49 +00:00
: host_name(address.host_name), port(address.port) {}
static HostID fromString(const String & host_port_str)
{
HostID res;
Cluster::Address::fromString(host_port_str, res.host_name, res.port);
return res;
}
String toString() const
{
return Cluster::Address::toString(host_name, port);
}
String readableString() const
{
return host_name + ":" + DB::toString(port);
}
bool isLocalAddress(UInt16 clickhouse_port) const
2017-07-28 16:14:49 +00:00
{
try
{
2018-04-19 13:56:14 +00:00
return DB::isLocalAddress(DNSResolver::instance().resolveAddress(host_name, port), clickhouse_port);
}
catch (const Poco::Net::NetException &)
{
/// Avoid "Host not found" exceptions
return false;
}
2017-07-28 16:14:49 +00:00
}
static String applyToString(const HostID & host_id)
{
return host_id.toString();
}
};
}
struct DDLLogEntry
{
String query;
2017-07-28 16:14:49 +00:00
std::vector<HostID> hosts;
String initiator; // optional
static constexpr int CURRENT_VERSION = 1;
String toString()
{
2017-07-31 21:39:24 +00:00
WriteBufferFromOwnString wb;
2017-07-28 16:14:49 +00:00
Strings host_id_strings(hosts.size());
std::transform(hosts.begin(), hosts.end(), host_id_strings.begin(), HostID::applyToString);
2017-07-31 21:39:24 +00:00
auto version = CURRENT_VERSION;
wb << "version: " << version << "\n";
wb << "query: " << escape << query << "\n";
2017-07-28 16:14:49 +00:00
wb << "hosts: " << host_id_strings << "\n";
2017-07-31 21:39:24 +00:00
wb << "initiator: " << initiator << "\n";
return wb.str();
}
void parse(const String & data)
{
ReadBufferFromString rb(data);
int version;
rb >> "version: " >> version >> "\n";
if (version != CURRENT_VERSION)
throw Exception("Unknown DDLLogEntry format version: " + DB::toString(version), ErrorCodes::UNKNOWN_FORMAT_VERSION);
2017-07-28 16:14:49 +00:00
Strings host_id_strings;
2017-05-31 14:01:08 +00:00
rb >> "query: " >> escape >> query >> "\n";
2017-07-28 16:14:49 +00:00
rb >> "hosts: " >> host_id_strings >> "\n";
if (!rb.eof())
rb >> "initiator: " >> initiator >> "\n";
else
initiator.clear();
assertEOF(rb);
2017-07-28 16:14:49 +00:00
hosts.resize(host_id_strings.size());
std::transform(host_id_strings.begin(), host_id_strings.end(), hosts.begin(), HostID::fromString);
}
};
struct DDLTask
2017-04-25 15:21:03 +00:00
{
2017-07-28 16:14:49 +00:00
/// Stages of task lifetime correspond ordering of these data fields:
/// Stage 1: parse entry
String entry_name;
2017-07-28 16:14:49 +00:00
String entry_path;
DDLLogEntry entry;
/// Stage 2: resolve host_id and check that
HostID host_id;
String host_id_str;
2017-07-28 16:14:49 +00:00
/// Stage 3.1: parse query
ASTPtr query;
ASTQueryWithOnCluster * query_on_cluster = nullptr;
2017-07-28 16:14:49 +00:00
/// Stage 3.2: check cluster and find the host in cluster
String cluster_name;
ClusterPtr cluster;
2017-07-28 16:14:49 +00:00
Cluster::Address address_in_cluster;
size_t host_shard_num;
size_t host_replica_num;
2017-07-28 16:14:49 +00:00
/// Stage 3.3: execute query
ExecutionStatus execution_status;
bool was_executed = false;
2017-07-28 16:14:49 +00:00
/// Stage 4: commit results to ZooKeeper
};
2017-04-25 15:21:03 +00:00
static std::unique_ptr<zkutil::Lock> createSimpleZooKeeperLock(
std::shared_ptr<zkutil::ZooKeeper> & zookeeper, const String & lock_prefix, const String & lock_name, const String & lock_message)
{
auto zookeeper_holder = std::make_shared<zkutil::ZooKeeperHolder>();
zookeeper_holder->initFromInstance(zookeeper);
return std::make_unique<zkutil::Lock>(std::move(zookeeper_holder), lock_prefix, lock_name, lock_message);
}
2017-04-25 15:21:03 +00:00
static bool isSupportedAlterType(int type)
{
static const std::unordered_set<int> supported_alter_types{
ASTAlterCommand::ADD_COLUMN,
ASTAlterCommand::DROP_COLUMN,
ASTAlterCommand::MODIFY_COLUMN,
ASTAlterCommand::MODIFY_PRIMARY_KEY,
ASTAlterCommand::DROP_PARTITION,
ASTAlterCommand::DELETE,
2017-04-25 15:21:03 +00:00
};
return supported_alter_types.count(type) != 0;
2017-04-25 15:21:03 +00:00
}
DDLWorker::DDLWorker(const std::string & zk_root_dir, Context & context_, const Poco::Util::AbstractConfiguration * config, const String & prefix)
: context(context_), log(&Logger::get("DDLWorker"))
{
queue_dir = zk_root_dir;
if (queue_dir.back() == '/')
queue_dir.resize(queue_dir.size() - 1);
if (config)
{
2017-08-14 05:44:04 +00:00
task_max_lifetime = config->getUInt64(prefix + ".task_max_lifetime", static_cast<UInt64>(task_max_lifetime));
cleanup_delay_period = config->getUInt64(prefix + ".cleanup_delay_period", static_cast<UInt64>(cleanup_delay_period));
max_tasks_in_queue = std::max(static_cast<UInt64>(1), config->getUInt64(prefix + ".max_tasks_in_queue", max_tasks_in_queue));
if (config->has(prefix + ".profile"))
context.setSetting("profile", config->getString(prefix + ".profile"));
}
if (context.getSettingsRef().readonly)
{
LOG_WARNING(log, "Distributed DDL worker is run with readonly settings, it will not be able to execute DDL queries"
<< " Set apropriate system_profile or distributed_ddl.profile to fix this.");
}
host_fqdn = getFQDNOrHostName();
host_fqdn_id = Cluster::Address::toString(host_fqdn, context.getTCPPort());
event_queue_updated = std::make_shared<Poco::Event>();
thread = std::thread(&DDLWorker::run, this);
}
DDLWorker::~DDLWorker()
{
stop_flag = true;
event_queue_updated->set();
thread.join();
}
bool DDLWorker::initAndCheckTask(const String & entry_name, String & out_reason)
{
2017-07-28 16:14:49 +00:00
String node_data;
String entry_path = queue_dir + "/" + entry_name;
if (!zookeeper->tryGet(entry_path, node_data))
2017-07-28 16:14:49 +00:00
{
/// It is Ok that node could be deleted just now. It means that there are no current host in node's host list.
out_reason = "The task was deleted";
2017-07-28 16:14:49 +00:00
return false;
}
auto task = std::make_unique<DDLTask>();
task->entry_name = entry_name;
task->entry_path = entry_path;
2017-07-28 16:14:49 +00:00
try
{
task->entry.parse(node_data);
2017-07-28 16:14:49 +00:00
}
catch (...)
{
/// What should we do if we even cannot parse host name and therefore cannot properly submit execution status?
/// We can try to create fail node using FQDN if it equal to host name in cluster config attempt will be sucessfull.
/// Otherwise, that node will be ignored by DDLQueryStatusInputSream.
tryLogCurrentException(log, "Cannot parse DDL task " + entry_name + ", will try to send error status");
2017-07-28 16:14:49 +00:00
String status = ExecutionStatus::fromCurrentException().serializeText();
try
{
createStatusDirs(entry_path);
zookeeper->tryCreate(entry_path + "/finished/" + host_fqdn_id, status, zkutil::CreateMode::Persistent);
}
catch (...)
{
tryLogCurrentException(log, "Can't report the task has invalid format");
}
out_reason = "Incorrect task format";
2017-07-28 16:14:49 +00:00
return false;
}
bool host_in_hostlist = false;
for (const HostID & host : task->entry.hosts)
2017-07-28 16:14:49 +00:00
{
if (!host.isLocalAddress(context.getTCPPort()))
continue;
2017-07-28 16:14:49 +00:00
if (host_in_hostlist)
{
2017-07-28 16:14:49 +00:00
/// This check could be slow a little bit
LOG_WARNING(log, "There are two the same ClickHouse instances in task " << entry_name
<< ": " << task->host_id.readableString() << " and " << host.readableString() << ". Will use the first one only.");
}
2017-07-28 16:14:49 +00:00
else
{
2017-07-28 16:14:49 +00:00
host_in_hostlist = true;
task->host_id = host;
task->host_id_str = host.toString();
2017-07-28 16:14:49 +00:00
}
}
if (host_in_hostlist)
current_task = std::move(task);
else
out_reason = "There is no a local address in host list";
2017-07-28 16:14:49 +00:00
return host_in_hostlist;
}
static void filterAndSortQueueNodes(Strings & all_nodes)
{
all_nodes.erase(std::remove_if(all_nodes.begin(), all_nodes.end(), [] (const String & s) { return !startsWith(s, "query-"); }), all_nodes.end());
std::sort(all_nodes.begin(), all_nodes.end());
}
2017-07-28 16:14:49 +00:00
void DDLWorker::processTasks()
{
LOG_DEBUG(log, "Processing tasks");
2017-07-28 16:14:49 +00:00
Strings queue_nodes = zookeeper->getChildren(queue_dir, nullptr, event_queue_updated);
filterAndSortQueueNodes(queue_nodes);
2017-07-28 16:14:49 +00:00
if (queue_nodes.empty())
return;
2017-07-28 16:14:49 +00:00
bool server_startup = last_processed_task_name.empty();
auto begin_node = server_startup
? queue_nodes.begin()
: std::upper_bound(queue_nodes.begin(), queue_nodes.end(), last_processed_task_name);
for (auto it = begin_node; it != queue_nodes.end(); ++it)
{
String entry_name = *it;
if (current_task)
{
2017-07-28 16:14:49 +00:00
if (current_task->entry_name == entry_name)
{
2017-07-28 16:14:49 +00:00
LOG_INFO(log, "Trying to process task " << entry_name << " again");
}
else
{
LOG_INFO(log, "Task " << current_task->entry_name << " was deleted from ZooKeeper before current host committed it");
2017-07-28 16:14:49 +00:00
current_task = nullptr;
}
}
2017-07-28 16:14:49 +00:00
if (!current_task)
{
String reason;
if (!initAndCheckTask(entry_name, reason))
2017-07-28 16:14:49 +00:00
{
2018-03-06 11:59:25 +00:00
LOG_DEBUG(log, "Will not execute task " << entry_name << ": " << reason);
last_processed_task_name = entry_name;
continue;
}
}
2017-07-28 16:14:49 +00:00
DDLTask & task = *current_task;
bool already_processed = zookeeper->exists(task.entry_path + "/finished/" + task.host_id_str);
if (!server_startup && !task.was_executed && already_processed)
{
throw Exception(
2017-07-28 16:14:49 +00:00
"Server expects that DDL task " + task.entry_name + " should be processed, but it was already processed according to ZK",
ErrorCodes::LOGICAL_ERROR);
}
if (!already_processed)
{
try
{
processTask(task);
}
catch (...)
{
LOG_WARNING(log, "An error occurred while processing task " << task.entry_name << " (" << task.entry.query << ") : "
<< getCurrentExceptionMessage(true));
throw;
}
}
else
{
2017-07-28 16:14:49 +00:00
LOG_DEBUG(log, "Task " << task.entry_name << " (" << task.entry.query << ") has been already processed");
}
last_processed_task_name = task.entry_name;
current_task.reset();
if (stop_flag)
break;
2017-07-28 16:14:49 +00:00
}
}
/// Parses query and resolves cluster and host in cluster
void DDLWorker::parseQueryAndResolveHost(DDLTask & task)
{
{
const char * begin = task.entry.query.data();
const char * end = begin + task.entry.query.size();
ParserQuery parser_query(end);
String description;
2018-04-16 15:11:13 +00:00
task.query = parseQuery(parser_query, begin, end, description, 0);
2017-07-28 16:14:49 +00:00
}
if (!task.query || !(task.query_on_cluster = dynamic_cast<ASTQueryWithOnCluster *>(task.query.get())))
throw Exception("Recieved unknown DDL query", ErrorCodes::UNKNOWN_TYPE_OF_QUERY);
task.cluster_name = task.query_on_cluster->cluster;
task.cluster = context.tryGetCluster(task.cluster_name);
if (!task.cluster)
{
throw Exception("DDL task " + task.entry_name + " contains current host " + task.host_id.readableString()
+ " in cluster " + task.cluster_name + ", but there are no such cluster here.", ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
}
/// Try to find host from task host list in cluster
2017-07-28 16:14:49 +00:00
/// At the first, try find exact match (host name and ports should be literally equal)
/// If the attempt fails, try find it resolving host name of each instance
const auto & shards = task.cluster->getShardsAddresses();
2017-07-28 16:14:49 +00:00
bool found_exact_match = false;
for (size_t shard_num = 0; shard_num < shards.size(); ++shard_num)
2017-08-11 19:56:32 +00:00
{
2017-07-28 16:14:49 +00:00
for (size_t replica_num = 0; replica_num < shards[shard_num].size(); ++replica_num)
{
const Cluster::Address & address = shards[shard_num][replica_num];
if (address.host_name == task.host_id.host_name && address.port == task.host_id.port)
{
if (found_exact_match)
{
throw Exception("There are two exactly the same ClickHouse instances " + address.readableString()
+ " in cluster " + task.cluster_name, ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
}
found_exact_match = true;
task.host_shard_num = shard_num;
task.host_replica_num = replica_num;
task.address_in_cluster = address;
}
}
2017-08-11 19:56:32 +00:00
}
2017-07-28 16:14:49 +00:00
if (found_exact_match)
return;
LOG_WARNING(log, "Not found the exact match of host " << task.host_id.readableString() << " from task " << task.entry_name
<< " in cluster " << task.cluster_name << " definition. Will try to find it using host name resolving.");
2017-07-28 16:14:49 +00:00
bool found_via_resolving = false;
for (size_t shard_num = 0; shard_num < shards.size(); ++shard_num)
2017-08-11 20:20:15 +00:00
{
2017-07-28 16:14:49 +00:00
for (size_t replica_num = 0; replica_num < shards[shard_num].size(); ++replica_num)
{
const Cluster::Address & address = shards[shard_num][replica_num];
if (isLocalAddress(address.getResolvedAddress(), context.getTCPPort()))
2017-07-28 16:14:49 +00:00
{
if (found_via_resolving)
{
throw Exception("There are two the same ClickHouse instances in cluster " + task.cluster_name + " : "
+ task.address_in_cluster.readableString() + " and " + address.readableString(), ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
}
else
{
found_via_resolving = true;
task.host_shard_num = shard_num;
task.host_replica_num = replica_num;
task.address_in_cluster = address;
}
}
}
2017-08-11 20:20:15 +00:00
}
2017-07-28 16:14:49 +00:00
if (!found_via_resolving)
{
throw Exception("Not found host " + task.host_id.readableString() + " in definition of cluster " + task.cluster_name,
ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
}
else
{
LOG_INFO(log, "Resolved host " << task.host_id.readableString() << " from task " << task.entry_name
<< " as host " << task.address_in_cluster.readableString() << " in definition of cluster " << task.cluster_name);
}
}
2017-04-25 15:21:03 +00:00
bool DDLWorker::tryExecuteQuery(const String & query, const DDLTask & task, ExecutionStatus & status)
2017-04-25 15:21:03 +00:00
{
2017-07-28 16:14:49 +00:00
/// Add special comment at the start of query to easily identify DDL-produced queries in query_log
2017-08-11 20:20:15 +00:00
String query_prefix = "/* ddl_entry=" + task.entry_name + " */ ";
String query_to_execute = query_prefix + query;
ReadBufferFromString istr(query_to_execute);
String dummy_string;
WriteBufferFromString ostr(dummy_string);
2017-04-25 15:21:03 +00:00
try
{
Context local_context(context);
local_context.setCurrentQueryId(""); // generate random query_id
executeQuery(istr, ostr, false, local_context, nullptr);
2017-04-25 15:21:03 +00:00
}
catch (...)
{
status = ExecutionStatus::fromCurrentException();
tryLogCurrentException(log, "Query " + query + " wasn't finished successfully");
2017-04-25 15:21:03 +00:00
return false;
}
status = ExecutionStatus(0);
LOG_DEBUG(log, "Executed query: " << query);
2017-04-25 15:21:03 +00:00
return true;
}
void DDLWorker::processTask(DDLTask & task)
2017-04-25 15:21:03 +00:00
{
2017-07-28 16:14:49 +00:00
LOG_DEBUG(log, "Processing task " << task.entry_name << " (" << task.entry.query << ")");
2017-04-25 15:21:03 +00:00
String dummy;
2017-07-28 16:14:49 +00:00
String active_node_path = task.entry_path + "/active/" + task.host_id_str;
String finished_node_path = task.entry_path + "/finished/" + task.host_id_str;
auto code = zookeeper->tryCreate(active_node_path, "", zkutil::CreateMode::Ephemeral, dummy);
if (code == Coordination::ZOK || code == Coordination::ZNODEEXISTS)
{
// Ok
}
else if (code == Coordination::ZNONODE)
{
/// There is no parent
createStatusDirs(task.entry_path);
if (Coordination::ZOK != zookeeper->tryCreate(active_node_path, "", zkutil::CreateMode::Ephemeral, dummy))
throw Coordination::Exception(code, active_node_path);
}
else
throw Coordination::Exception(code, active_node_path);
2017-04-25 15:21:03 +00:00
2017-07-28 16:14:49 +00:00
if (!task.was_executed)
{
try
{
2017-07-28 16:14:49 +00:00
parseQueryAndResolveHost(task);
ASTPtr rewritten_ast = task.query_on_cluster->getRewrittenASTWithoutOnCluster(task.address_in_cluster.default_database);
String rewritten_query = queryToString(rewritten_ast);
LOG_DEBUG(log, "Executing query: " << rewritten_query);
2017-04-25 15:21:03 +00:00
2017-07-28 16:14:49 +00:00
if (auto ast_alter = dynamic_cast<const ASTAlterQuery *>(rewritten_ast.get()))
{
2017-07-28 16:14:49 +00:00
processTaskAlter(task, ast_alter, rewritten_query, task.entry_path);
}
else
{
2017-07-28 16:14:49 +00:00
tryExecuteQuery(rewritten_query, task, task.execution_status);
}
}
catch (const Coordination::Exception &)
{
throw;
}
catch (...)
{
2017-07-28 16:14:49 +00:00
task.execution_status = ExecutionStatus::fromCurrentException("An error occured before execution");
}
/// We need to distinguish ZK errors occured before and after query executing
2017-07-28 16:14:49 +00:00
task.was_executed = true;
2017-04-25 15:21:03 +00:00
}
2017-07-28 16:14:49 +00:00
/// FIXME: if server fails right here, the task will be executed twice. We need WAL here.
/// Delete active flag and create finish flag
Coordination::Requests ops;
ops.emplace_back(zkutil::makeRemoveRequest(active_node_path, -1));
ops.emplace_back(zkutil::makeCreateRequest(finished_node_path, task.execution_status.serializeText(), zkutil::CreateMode::Persistent));
zookeeper->multi(ops);
}
2017-04-25 15:21:03 +00:00
void DDLWorker::processTaskAlter(
DDLTask & task,
2017-07-28 16:14:49 +00:00
const ASTAlterQuery * ast_alter,
const String & rewritten_query,
const String & node_path)
{
2017-07-28 16:14:49 +00:00
String database = ast_alter->database.empty() ? context.getCurrentDatabase() : ast_alter->database;
StoragePtr storage = context.getTable(database, ast_alter->table);
2017-04-25 15:21:03 +00:00
bool execute_once_on_replica = storage->supportsReplication();
bool execute_on_leader_replica = false;
2017-04-25 15:21:03 +00:00
for (const auto & command : ast_alter->command_list->commands)
2017-04-25 15:21:03 +00:00
{
if (!isSupportedAlterType(command->type))
throw Exception("Unsupported type of ALTER query", ErrorCodes::NOT_IMPLEMENTED);
2017-04-25 15:21:03 +00:00
if (execute_once_on_replica)
execute_on_leader_replica |= command->type == ASTAlterCommand::DROP_PARTITION;
}
2017-04-25 15:21:03 +00:00
const auto & shard_info = task.cluster->getShardsInfo().at(task.host_shard_num);
bool config_is_replicated_shard = shard_info.hasInternalReplication();
2017-04-25 15:21:03 +00:00
if (execute_once_on_replica && !config_is_replicated_shard)
{
2017-07-28 16:14:49 +00:00
throw Exception("Table " + ast_alter->table + " is replicated, but shard #" + toString(task.host_shard_num + 1) +
" isn't replicated according to its cluster definition."
" Possibly <internal_replication>true</internal_replication> is forgotten in the cluster config.",
ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
}
if (!execute_once_on_replica && config_is_replicated_shard)
{
2017-07-28 16:14:49 +00:00
throw Exception("Table " + ast_alter->table + " isn't replicated, but shard #" + toString(task.host_shard_num + 1) +
2017-08-11 20:20:15 +00:00
" is replicated according to its cluster definition", ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
}
2017-04-25 15:21:03 +00:00
/// Generate unique name for shard node, it will be used to execute the query by only single host
/// Shard node name has format 'replica_name1,replica_name2,...,replica_nameN'
/// Where replica_name is 'replica_config_host_name:replica_port'
auto get_shard_name = [] (const Cluster::Addresses & shard_addresses)
{
Strings replica_names;
for (const Cluster::Address & address : shard_addresses)
replica_names.emplace_back(address.readableString());
std::sort(replica_names.begin(), replica_names.end());
2017-04-25 15:21:03 +00:00
String res;
for (auto it = replica_names.begin(); it != replica_names.end(); ++it)
res += *it + (std::next(it) != replica_names.end() ? "," : "");
return res;
};
2017-04-25 15:21:03 +00:00
if (execute_once_on_replica)
{
String shard_node_name = get_shard_name(task.cluster->getShardsAddresses().at(task.host_shard_num));
String shard_path = node_path + "/shards/" + shard_node_name;
String is_executed_path = shard_path + "/executed";
zookeeper->createAncestors(shard_path + "/");
2017-04-25 15:21:03 +00:00
bool is_executed_by_any_replica = false;
{
auto lock = createSimpleZooKeeperLock(zookeeper, shard_path, "lock", task.host_id_str);
pcg64 rng(randomSeed());
static const size_t max_tries = 20;
for (size_t num_tries = 0; num_tries < max_tries; ++num_tries)
2017-04-25 15:21:03 +00:00
{
if (zookeeper->exists(is_executed_path))
{
is_executed_by_any_replica = true;
break;
}
2017-04-25 15:21:03 +00:00
if (lock->tryLock())
2017-04-25 15:21:03 +00:00
{
2017-07-28 16:14:49 +00:00
tryExecuteQuery(rewritten_query, task, task.execution_status);
2017-04-25 15:21:03 +00:00
2017-07-28 16:14:49 +00:00
if (execute_on_leader_replica && task.execution_status.code == ErrorCodes::NOT_IMPLEMENTED)
2017-04-25 15:21:03 +00:00
{
/// TODO: it is ok to receive exception "host is not leader"
2017-04-25 15:21:03 +00:00
}
2017-07-28 16:14:49 +00:00
zookeeper->create(is_executed_path, task.host_id_str, zkutil::CreateMode::Persistent);
lock->unlock();
is_executed_by_any_replica = true;
break;
2017-04-25 15:21:03 +00:00
}
std::this_thread::sleep_for(std::chrono::milliseconds(std::uniform_int_distribution<long>(0, 1000)(rng)));
2017-04-25 15:21:03 +00:00
}
}
if (!is_executed_by_any_replica)
{
task.execution_status = ExecutionStatus(ErrorCodes::NOT_IMPLEMENTED,
"Cannot enqueue replicated DDL query for a replicated shard");
}
2017-04-25 15:21:03 +00:00
}
else
{
2017-07-28 16:14:49 +00:00
tryExecuteQuery(rewritten_query, task, task.execution_status);
2017-04-25 15:21:03 +00:00
}
}
void DDLWorker::cleanupQueue()
2017-04-19 14:21:27 +00:00
{
/// Both ZK and Poco use Unix epoch
Int64 current_time_seconds = Poco::Timestamp().epochTime();
constexpr UInt64 zookeeper_time_resolution = 1000;
2017-04-19 14:21:27 +00:00
2017-08-11 20:20:15 +00:00
/// Too early to check
if (last_cleanup_time_seconds && current_time_seconds < last_cleanup_time_seconds + cleanup_delay_period)
2017-04-19 14:21:27 +00:00
return;
last_cleanup_time_seconds = current_time_seconds;
LOG_DEBUG(log, "Cleaning queue");
Strings queue_nodes = zookeeper->getChildren(queue_dir);
filterAndSortQueueNodes(queue_nodes);
size_t num_outdated_nodes = (queue_nodes.size() > max_tasks_in_queue) ? queue_nodes.size() - max_tasks_in_queue : 0;
auto first_non_outdated_node = queue_nodes.begin() + num_outdated_nodes;
2017-04-19 14:21:27 +00:00
for (auto it = queue_nodes.cbegin(); it < queue_nodes.cend(); ++it)
2017-04-19 14:21:27 +00:00
{
String node_name = *it;
String node_path = queue_dir + "/" + node_name;
String lock_path = node_path + "/lock";
Coordination::Stat stat;
String dummy;
2017-04-19 14:21:27 +00:00
try
{
/// Already deleted
if (!zookeeper->exists(node_path, &stat))
continue;
/// Delete node if its lifetmie is expired (according to task_max_lifetime parameter)
Int64 zookeeper_time_seconds = stat.ctime / zookeeper_time_resolution;
bool node_lifetime_is_expired = zookeeper_time_seconds + task_max_lifetime < current_time_seconds;
/// If too many nodes in task queue (> max_tasks_in_queue), delete oldest one
bool node_is_outside_max_window = it < first_non_outdated_node;
if (!node_lifetime_is_expired && !node_is_outside_max_window)
continue;
/// Skip if there are active nodes (it is weak guard)
if (zookeeper->exists(node_path + "/active", &stat) && stat.numChildren > 0)
{
LOG_INFO(log, "Task " << node_name << " should be deleted, but there are active workers. Skipping it.");
continue;
}
/// Usage of the lock is not necessary now (tryRemoveRecursive correctly removes node in a presence of concurrent cleaners)
/// But the lock will be required to implement system.distributed_ddl_queue table
auto lock = createSimpleZooKeeperLock(zookeeper, node_path, "lock", host_fqdn_id);
if (!lock->tryLock())
{
LOG_INFO(log, "Task " << node_name << " should be deleted, but it is locked. Skipping it.");
continue;
}
if (node_lifetime_is_expired)
LOG_INFO(log, "Lifetime of task " << node_name << " is expired, deleting it");
else if (node_is_outside_max_window)
LOG_INFO(log, "Task " << node_name << " is outdated, deleting it");
/// Deleting
{
Strings childs = zookeeper->getChildren(node_path);
for (const String & child : childs)
{
if (child != "lock")
zookeeper->tryRemoveRecursive(node_path + "/" + child);
}
2017-04-19 14:21:27 +00:00
/// Remove the lock node and its parent atomically
Coordination::Requests ops;
ops.emplace_back(zkutil::makeRemoveRequest(lock_path, -1));
ops.emplace_back(zkutil::makeRemoveRequest(node_path, -1));
zookeeper->multi(ops);
lock->unlockAssumeLockNodeRemovedManually();
2017-04-19 14:21:27 +00:00
}
}
catch (...)
{
2017-07-28 16:14:49 +00:00
LOG_INFO(log, "An error occured while checking and cleaning task " + node_name + " from queue: " + getCurrentExceptionMessage(false));
2017-04-19 14:21:27 +00:00
}
}
}
2017-08-11 20:20:15 +00:00
/// Try to create nonexisting "status" dirs for a node
void DDLWorker::createStatusDirs(const std::string & node_path)
{
Coordination::Requests ops;
{
Coordination::CreateRequest request;
request.path = node_path + "/active";
ops.emplace_back(std::make_shared<Coordination::CreateRequest>(std::move(request)));
}
{
Coordination::CreateRequest request;
request.path = node_path + "/finished";
ops.emplace_back(std::make_shared<Coordination::CreateRequest>(std::move(request)));
}
Coordination::Responses responses;
int code = zookeeper->tryMulti(ops, responses);
if (code && code != Coordination::ZNODEEXISTS)
throw Coordination::Exception(code);
}
String DDLWorker::enqueueQuery(DDLLogEntry & entry)
{
if (entry.hosts.empty())
throw Exception("Empty host list in a distributed DDL task", ErrorCodes::LOGICAL_ERROR);
String query_path_prefix = queue_dir + "/query-";
zookeeper->createAncestors(query_path_prefix);
String node_path = zookeeper->create(query_path_prefix, entry.toString(), zkutil::CreateMode::PersistentSequential);
/// Optional step
try
{
createStatusDirs(node_path);
}
catch (...)
{
LOG_INFO(log, "An error occurred while creating auxiliary ZooKeeper directories in " << node_path << " . They will be created later"
<< ". Error : " << getCurrentExceptionMessage(true));
}
return node_path;
}
void DDLWorker::run()
{
setThreadName("DDLWorker");
LOG_DEBUG(log, "Started DDLWorker thread");
bool initialized = false;
do
{
try
{
try
{
zookeeper = context.getZooKeeper();
zookeeper->createAncestors(queue_dir + "/");
initialized = true;
}
catch (const Coordination::Exception & e)
{
if (!Coordination::isHardwareError(e.code))
throw;
}
}
catch (...)
{
tryLogCurrentException(log, "Terminating. Cannot initialize DDL queue.");
return;
}
} while (!initialized && !stop_flag);
while (!stop_flag)
{
try
{
2017-04-25 15:21:03 +00:00
processTasks();
LOG_DEBUG(log, "Waiting a watch");
event_queue_updated->wait();
if (stop_flag)
break;
2017-04-25 15:21:03 +00:00
/// TODO: it might delay the execution, move it to separate thread.
2017-04-25 15:21:03 +00:00
cleanupQueue();
2017-04-19 14:21:27 +00:00
}
catch (Coordination::Exception & e)
{
if (Coordination::isHardwareError(e.code))
2017-07-28 16:14:49 +00:00
{
LOG_DEBUG(log, "Recovering ZooKeeper session after: " << getCurrentExceptionMessage(false));
while (!stop_flag)
{
try
{
zookeeper = context.getZooKeeper();
break;
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
using namespace std::chrono_literals;
std::this_thread::sleep_for(5s);
}
}
2017-07-28 16:14:49 +00:00
}
else
{
LOG_ERROR(log, "Unexpected ZooKeeper error: " << getCurrentExceptionMessage(true) << ". Terminating.");
return;
2017-07-28 16:14:49 +00:00
}
/// Unlock the processing just in case
event_queue_updated->set();
}
2017-04-19 14:21:27 +00:00
catch (...)
{
LOG_ERROR(log, "Unexpected error: " << getCurrentExceptionMessage(true) << ". Terminating.");
return;
2017-04-19 14:21:27 +00:00
}
}
}
class DDLQueryStatusInputSream : public IProfilingBlockInputStream
{
public:
2017-04-19 14:21:27 +00:00
DDLQueryStatusInputSream(const String & zk_node_path, const DDLLogEntry & entry, const Context & context)
: node_path(zk_node_path), context(context), watch(CLOCK_MONOTONIC_COARSE), log(&Logger::get("DDLQueryStatusInputSream"))
{
sample = Block{
{std::make_shared<DataTypeString>(), "host"},
{std::make_shared<DataTypeUInt16>(), "port"},
2017-07-27 13:11:16 +00:00
{std::make_shared<DataTypeInt64>(), "status"},
{std::make_shared<DataTypeString>(), "error"},
{std::make_shared<DataTypeUInt64>(), "num_hosts_remaining"},
{std::make_shared<DataTypeUInt64>(), "num_hosts_active"},
};
2017-07-28 16:14:49 +00:00
for (const HostID & host: entry.hosts)
waiting_hosts.emplace(host.toString());
2018-02-23 10:02:29 +00:00
addTotalRowsApprox(entry.hosts.size());
timeout_seconds = context.getSettingsRef().distributed_ddl_task_timeout;
}
String getName() const override
{
return "DDLQueryStatusInputSream";
}
Block getHeader() const override { return sample; }
Block readImpl() override
{
Block res;
2017-07-27 13:11:16 +00:00
if (num_hosts_finished >= waiting_hosts.size())
{
if (first_exception)
throw Exception(*first_exception);
return res;
}
auto zookeeper = context.getZooKeeper();
size_t try_number = 0;
while (res.rows() == 0)
{
if (isCancelled())
{
if (first_exception)
throw Exception(*first_exception);
return res;
}
if (timeout_seconds >= 0 && watch.elapsedSeconds() > timeout_seconds)
{
size_t num_unfinished_hosts = waiting_hosts.size() - num_hosts_finished;
size_t num_active_hosts = current_active_hosts.size();
std::stringstream msg;
msg << "Watching task " << node_path << " is executing longer than distributed_ddl_task_timeout"
<< " (=" << timeout_seconds << ") seconds."
<< " There are " << num_unfinished_hosts << " unfinished hosts"
<< " (" << num_active_hosts << " of them are currently active)"
<< ", they are going to execute the query in background";
throw Exception(msg.str(), ErrorCodes::TIMEOUT_EXCEEDED);
}
if (num_hosts_finished != 0 || try_number != 0)
{
auto current_sleep_for = std::chrono::milliseconds(std::min(static_cast<size_t>(1000), 50 * (try_number + 1)));
std::this_thread::sleep_for(current_sleep_for);
}
/// TODO: add shared lock
if (!zookeeper->exists(node_path))
2017-04-21 12:39:28 +00:00
{
throw Exception("Cannot provide query execution status. The query's node " + node_path
+ " has been deleted by the cleaner since it was finished (or its lifetime is expired)",
2017-04-21 12:39:28 +00:00
ErrorCodes::UNFINISHED);
}
Strings new_hosts = getNewAndUpdate(getChildrenAllowNoNode(zookeeper, node_path + "/finished"));
++try_number;
if (new_hosts.empty())
continue;
current_active_hosts = getChildrenAllowNoNode(zookeeper, node_path + "/active");
2017-04-21 12:39:28 +00:00
MutableColumns columns = sample.cloneEmptyColumns();
for (const String & host_id : new_hosts)
{
2017-07-27 13:11:16 +00:00
ExecutionStatus status(-1, "Cannot obtain error message");
{
String status_data;
if (zookeeper->tryGet(node_path + "/finished/" + host_id, status_data))
2017-07-27 13:11:16 +00:00
status.tryDeserializeText(status_data);
}
String host;
UInt16 port;
Cluster::Address::fromString(host_id, host, port);
if (status.code != 0 && first_exception == nullptr)
first_exception = std::make_unique<Exception>("There was an error on " + host + ": " + status.message, status.code);
++num_hosts_finished;
columns[0]->insert(host);
columns[1]->insert(static_cast<UInt64>(port));
columns[2]->insert(static_cast<Int64>(status.code));
columns[3]->insert(status.message);
columns[4]->insert(static_cast<UInt64>(waiting_hosts.size() - num_hosts_finished));
columns[5]->insert(static_cast<UInt64>(current_active_hosts.size()));
}
res = sample.cloneWithColumns(std::move(columns));
}
return res;
}
Block getSampleBlock() const
{
return sample.cloneEmpty();
}
~DDLQueryStatusInputSream() override = default;
private:
2017-04-21 12:39:28 +00:00
static Strings getChildrenAllowNoNode(const std::shared_ptr<zkutil::ZooKeeper> & zookeeper, const String & node_path)
{
Strings res;
int code = zookeeper->tryGetChildren(node_path, res);
if (code && code != Coordination::ZNONODE)
throw Coordination::Exception(code, node_path);
2017-04-21 12:39:28 +00:00
return res;
}
Strings getNewAndUpdate(const Strings & current_list_of_finished_hosts)
{
Strings diff;
for (const String & host : current_list_of_finished_hosts)
{
if (!waiting_hosts.count(host))
{
if (!ignoring_hosts.count(host))
{
ignoring_hosts.emplace(host);
LOG_INFO(log, "Unexpected host " << host << " appeared " << " in task " << node_path);
}
continue;
}
if (!finished_hosts.count(host))
2017-04-19 14:21:27 +00:00
{
diff.emplace_back(host);
finished_hosts.emplace(host);
2017-04-19 14:21:27 +00:00
}
}
return diff;
}
private:
String node_path;
const Context & context;
2017-07-27 13:11:16 +00:00
Stopwatch watch;
Logger * log;
Block sample;
NameSet waiting_hosts; /// hosts from task host list
NameSet finished_hosts; /// finished hosts from host list
NameSet ignoring_hosts; /// appeared hosts that are not in hosts list
Strings current_active_hosts; /// Hosts that were in active state at the last check
size_t num_hosts_finished = 0;
/// Save the first detected error and throw it at the end of excecution
std::unique_ptr<Exception> first_exception;
Int64 timeout_seconds = 120;
};
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & context, const NameSet & query_databases)
{
/// Remove FORMAT <fmt> and INTO OUTFILE <file> if exists
ASTPtr query_ptr = query_ptr_->clone();
ASTQueryWithOutput::resetOutputASTIfExist(*query_ptr);
auto query = dynamic_cast<ASTQueryWithOnCluster *>(query_ptr.get());
2017-04-25 15:21:03 +00:00
if (!query)
{
throw Exception("Distributed execution is not supported for such DDL queries", ErrorCodes::NOT_IMPLEMENTED);
2017-04-25 15:21:03 +00:00
}
if (!context.getSettingsRef().allow_distributed_ddl)
throw Exception("Distributed DDL queries are prohibited for the user", ErrorCodes::QUERY_IS_PROHIBITED);
if (auto query_alter = dynamic_cast<const ASTAlterQuery *>(query_ptr.get()))
2017-04-25 15:21:03 +00:00
{
for (const auto & command : query_alter->command_list->commands)
2017-04-25 15:21:03 +00:00
{
if (!isSupportedAlterType(command->type))
2017-04-25 15:21:03 +00:00
throw Exception("Unsupported type of ALTER query", ErrorCodes::NOT_IMPLEMENTED);
}
}
query->cluster = context.getMacros()->expand(query->cluster);
2017-04-25 15:21:03 +00:00
ClusterPtr cluster = context.getCluster(query->cluster);
DDLWorker & ddl_worker = context.getDDLWorker();
2017-04-21 12:39:28 +00:00
DDLLogEntry entry;
2017-04-25 15:21:03 +00:00
entry.query = queryToString(query_ptr);
entry.initiator = ddl_worker.getCommonHostID();
/// Check database access rights, assume that all servers have the same users config
NameSet databases_to_check_access_rights;
Cluster::AddressesWithFailover shards = cluster->getShardsAddresses();
for (const auto & shard : shards)
2017-04-25 15:21:03 +00:00
{
for (const auto & addr : shard)
{
2017-07-28 16:14:49 +00:00
entry.hosts.emplace_back(addr);
/// Expand empty database name to shards' default database name
for (const String & database : query_databases)
databases_to_check_access_rights.emplace(database.empty() ? addr.default_database : database);
}
2017-04-25 15:21:03 +00:00
}
for (const String & database : databases_to_check_access_rights)
context.checkDatabaseAccessRights(database.empty() ? context.getCurrentDatabase() : database);
String node_path = ddl_worker.enqueueQuery(entry);
BlockIO io;
if (context.getSettingsRef().distributed_ddl_task_timeout == 0)
return io;
auto stream = std::make_shared<DDLQueryStatusInputSream>(node_path, entry, context);
io.in = std::move(stream);
return io;
}
}