2017-04-13 13:42:29 +00:00
|
|
|
#include <Interpreters/DDLWorker.h>
|
2017-04-13 16:12:56 +00:00
|
|
|
#include <Parsers/ASTAlterQuery.h>
|
2017-04-21 12:39:28 +00:00
|
|
|
#include <Parsers/ASTQueryWithOnCluster.h>
|
2017-04-25 15:21:03 +00:00
|
|
|
#include <Parsers/ParserQuery.h>
|
|
|
|
#include <Parsers/parseQuery.h>
|
|
|
|
#include <Parsers/queryToString.h>
|
2017-04-17 17:04:31 +00:00
|
|
|
#include <IO/WriteHelpers.h>
|
|
|
|
#include <IO/ReadHelpers.h>
|
|
|
|
#include <IO/Operators.h>
|
|
|
|
#include <IO/ReadBufferFromString.h>
|
2017-04-25 15:21:03 +00:00
|
|
|
#include <Storages/IStorage.h>
|
2017-10-13 17:47:59 +00:00
|
|
|
#include <DataStreams/IProfilingBlockInputStream.h>
|
2017-04-13 13:42:29 +00:00
|
|
|
#include <Interpreters/executeQuery.h>
|
2017-04-13 16:12:56 +00:00
|
|
|
#include <Interpreters/Cluster.h>
|
2018-04-19 13:56:14 +00:00
|
|
|
#include <Common/DNSResolver.h>
|
2018-01-22 15:56:30 +00:00
|
|
|
#include <Common/Macros.h>
|
2017-04-13 16:12:56 +00:00
|
|
|
#include <Common/getFQDNOrHostName.h>
|
2017-04-27 15:19:11 +00:00
|
|
|
#include <Common/setThreadName.h>
|
|
|
|
#include <Common/Stopwatch.h>
|
2017-09-09 23:17:38 +00:00
|
|
|
#include <Common/randomSeed.h>
|
2017-04-13 16:12:56 +00:00
|
|
|
#include <DataTypes/DataTypesNumber.h>
|
|
|
|
#include <DataTypes/DataTypeString.h>
|
|
|
|
#include <DataTypes/DataTypeArray.h>
|
|
|
|
#include <Columns/ColumnsNumber.h>
|
|
|
|
#include <Columns/ColumnString.h>
|
|
|
|
#include <Columns/ColumnArray.h>
|
2017-06-19 20:06:35 +00:00
|
|
|
#include <Common/ZooKeeper/ZooKeeper.h>
|
2018-04-03 17:35:48 +00:00
|
|
|
#include <Common/ZooKeeper/KeeperException.h>
|
2017-06-19 20:06:35 +00:00
|
|
|
#include <Common/ZooKeeper/Lock.h>
|
2017-07-26 19:31:32 +00:00
|
|
|
#include <Common/isLocalAddress.h>
|
2017-04-19 14:21:27 +00:00
|
|
|
#include <Poco/Timestamp.h>
|
2017-09-09 23:17:38 +00:00
|
|
|
#include <random>
|
|
|
|
#include <pcg_random.hpp>
|
2018-03-26 14:12:07 +00:00
|
|
|
#include <Poco/Net/NetException.h>
|
2017-09-09 23:17:38 +00:00
|
|
|
|
2017-04-13 13:42:29 +00:00
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
|
|
|
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
|
|
|
extern const int INVALID_CONFIG_PARAMETER;
|
2017-04-17 17:04:31 +00:00
|
|
|
extern const int UNKNOWN_FORMAT_VERSION;
|
2017-04-25 15:21:03 +00:00
|
|
|
extern const int INCONSISTENT_TABLE_ACCROSS_SHARDS;
|
|
|
|
extern const int INCONSISTENT_CLUSTER_DEFINITION;
|
2017-04-27 15:19:11 +00:00
|
|
|
extern const int TIMEOUT_EXCEEDED;
|
2017-07-26 19:31:32 +00:00
|
|
|
extern const int UNKNOWN_TYPE_OF_QUERY;
|
2017-08-02 14:42:35 +00:00
|
|
|
extern const int UNFINISHED;
|
|
|
|
extern const int UNKNOWN_STATUS_OF_DISTRIBUTED_DDL_TASK;
|
2018-02-28 13:23:40 +00:00
|
|
|
extern const int QUERY_IS_PROHIBITED;
|
2017-04-13 13:42:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
namespace
|
|
|
|
{
|
|
|
|
|
|
|
|
struct HostID
|
|
|
|
{
|
|
|
|
String host_name;
|
|
|
|
UInt16 port;
|
|
|
|
|
|
|
|
HostID() = default;
|
|
|
|
|
2017-09-07 21:04:48 +00:00
|
|
|
explicit HostID(const Cluster::Address & address)
|
2017-07-28 16:14:49 +00:00
|
|
|
: host_name(address.host_name), port(address.port) {}
|
|
|
|
|
|
|
|
static HostID fromString(const String & host_port_str)
|
|
|
|
{
|
|
|
|
HostID res;
|
|
|
|
Cluster::Address::fromString(host_port_str, res.host_name, res.port);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
String toString() const
|
|
|
|
{
|
|
|
|
return Cluster::Address::toString(host_name, port);
|
|
|
|
}
|
|
|
|
|
|
|
|
String readableString() const
|
|
|
|
{
|
|
|
|
return host_name + ":" + DB::toString(port);
|
|
|
|
}
|
|
|
|
|
2017-09-07 14:38:35 +00:00
|
|
|
bool isLocalAddress(UInt16 clickhouse_port) const
|
2017-07-28 16:14:49 +00:00
|
|
|
{
|
2017-08-01 14:41:00 +00:00
|
|
|
try
|
|
|
|
{
|
2018-04-19 13:56:14 +00:00
|
|
|
return DB::isLocalAddress(DNSResolver::instance().resolveAddress(host_name, port), clickhouse_port);
|
2017-08-01 14:41:00 +00:00
|
|
|
}
|
2018-08-10 04:02:56 +00:00
|
|
|
catch (const Poco::Net::NetException &)
|
2017-08-01 14:41:00 +00:00
|
|
|
{
|
|
|
|
/// Avoid "Host not found" exceptions
|
|
|
|
return false;
|
|
|
|
}
|
2017-07-28 16:14:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static String applyToString(const HostID & host_id)
|
|
|
|
{
|
|
|
|
return host_id.toString();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-17 17:04:31 +00:00
|
|
|
struct DDLLogEntry
|
|
|
|
{
|
|
|
|
String query;
|
2017-07-28 16:14:49 +00:00
|
|
|
std::vector<HostID> hosts;
|
2017-05-30 11:49:17 +00:00
|
|
|
String initiator; // optional
|
2017-04-17 17:04:31 +00:00
|
|
|
|
2017-06-01 09:22:22 +00:00
|
|
|
static constexpr int CURRENT_VERSION = 1;
|
2017-04-17 17:04:31 +00:00
|
|
|
|
|
|
|
String toString()
|
|
|
|
{
|
2017-07-31 21:39:24 +00:00
|
|
|
WriteBufferFromOwnString wb;
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
Strings host_id_strings(hosts.size());
|
|
|
|
std::transform(hosts.begin(), hosts.end(), host_id_strings.begin(), HostID::applyToString);
|
|
|
|
|
2017-07-31 21:39:24 +00:00
|
|
|
auto version = CURRENT_VERSION;
|
|
|
|
wb << "version: " << version << "\n";
|
|
|
|
wb << "query: " << escape << query << "\n";
|
2017-07-28 16:14:49 +00:00
|
|
|
wb << "hosts: " << host_id_strings << "\n";
|
2017-07-31 21:39:24 +00:00
|
|
|
wb << "initiator: " << initiator << "\n";
|
|
|
|
|
|
|
|
return wb.str();
|
2017-04-17 17:04:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void parse(const String & data)
|
|
|
|
{
|
|
|
|
ReadBufferFromString rb(data);
|
|
|
|
|
2017-05-30 11:49:17 +00:00
|
|
|
int version;
|
|
|
|
rb >> "version: " >> version >> "\n";
|
|
|
|
|
2017-04-17 17:04:31 +00:00
|
|
|
if (version != CURRENT_VERSION)
|
2017-06-13 04:45:30 +00:00
|
|
|
throw Exception("Unknown DDLLogEntry format version: " + DB::toString(version), ErrorCodes::UNKNOWN_FORMAT_VERSION);
|
2017-04-17 17:04:31 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
Strings host_id_strings;
|
2017-05-31 14:01:08 +00:00
|
|
|
rb >> "query: " >> escape >> query >> "\n";
|
2017-07-28 16:14:49 +00:00
|
|
|
rb >> "hosts: " >> host_id_strings >> "\n";
|
2017-05-30 11:49:17 +00:00
|
|
|
|
|
|
|
if (!rb.eof())
|
|
|
|
rb >> "initiator: " >> initiator >> "\n";
|
|
|
|
else
|
|
|
|
initiator.clear();
|
2017-04-17 17:04:31 +00:00
|
|
|
|
|
|
|
assertEOF(rb);
|
2017-07-28 16:14:49 +00:00
|
|
|
|
|
|
|
hosts.resize(host_id_strings.size());
|
|
|
|
std::transform(host_id_strings.begin(), host_id_strings.end(), hosts.begin(), HostID::fromString);
|
2017-04-17 17:04:31 +00:00
|
|
|
}
|
|
|
|
};
|
2017-04-13 13:42:29 +00:00
|
|
|
|
|
|
|
|
2017-07-26 19:31:32 +00:00
|
|
|
struct DDLTask
|
2017-04-25 15:21:03 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
/// Stages of task lifetime correspond ordering of these data fields:
|
|
|
|
|
|
|
|
/// Stage 1: parse entry
|
2017-07-26 19:31:32 +00:00
|
|
|
String entry_name;
|
2017-07-28 16:14:49 +00:00
|
|
|
String entry_path;
|
|
|
|
DDLLogEntry entry;
|
|
|
|
|
|
|
|
/// Stage 2: resolve host_id and check that
|
|
|
|
HostID host_id;
|
|
|
|
String host_id_str;
|
2017-07-26 19:31:32 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
/// Stage 3.1: parse query
|
2017-07-26 19:31:32 +00:00
|
|
|
ASTPtr query;
|
|
|
|
ASTQueryWithOnCluster * query_on_cluster = nullptr;
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
/// Stage 3.2: check cluster and find the host in cluster
|
2017-07-26 19:31:32 +00:00
|
|
|
String cluster_name;
|
|
|
|
ClusterPtr cluster;
|
2017-07-28 16:14:49 +00:00
|
|
|
Cluster::Address address_in_cluster;
|
2017-07-26 19:31:32 +00:00
|
|
|
size_t host_shard_num;
|
|
|
|
size_t host_replica_num;
|
2018-09-28 15:35:30 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
/// Stage 3.3: execute query
|
|
|
|
ExecutionStatus execution_status;
|
|
|
|
bool was_executed = false;
|
2017-07-26 19:31:32 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
/// Stage 4: commit results to ZooKeeper
|
2017-07-26 19:31:32 +00:00
|
|
|
};
|
2017-04-25 15:21:03 +00:00
|
|
|
|
|
|
|
|
2017-08-02 14:42:35 +00:00
|
|
|
static std::unique_ptr<zkutil::Lock> createSimpleZooKeeperLock(
|
|
|
|
std::shared_ptr<zkutil::ZooKeeper> & zookeeper, const String & lock_prefix, const String & lock_name, const String & lock_message)
|
|
|
|
{
|
|
|
|
auto zookeeper_holder = std::make_shared<zkutil::ZooKeeperHolder>();
|
|
|
|
zookeeper_holder->initFromInstance(zookeeper);
|
|
|
|
return std::make_unique<zkutil::Lock>(std::move(zookeeper_holder), lock_prefix, lock_name, lock_message);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-25 15:21:03 +00:00
|
|
|
static bool isSupportedAlterType(int type)
|
|
|
|
{
|
|
|
|
static const std::unordered_set<int> supported_alter_types{
|
2018-06-09 15:53:14 +00:00
|
|
|
ASTAlterCommand::ADD_COLUMN,
|
|
|
|
ASTAlterCommand::DROP_COLUMN,
|
|
|
|
ASTAlterCommand::MODIFY_COLUMN,
|
|
|
|
ASTAlterCommand::MODIFY_PRIMARY_KEY,
|
|
|
|
ASTAlterCommand::DROP_PARTITION,
|
|
|
|
ASTAlterCommand::DELETE,
|
2018-09-19 11:01:09 +00:00
|
|
|
ASTAlterCommand::UPDATE,
|
2017-04-25 15:21:03 +00:00
|
|
|
};
|
|
|
|
|
2017-08-10 19:12:52 +00:00
|
|
|
return supported_alter_types.count(type) != 0;
|
2017-04-25 15:21:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-08-01 14:41:00 +00:00
|
|
|
DDLWorker::DDLWorker(const std::string & zk_root_dir, Context & context_, const Poco::Util::AbstractConfiguration * config, const String & prefix)
|
2017-08-10 19:12:52 +00:00
|
|
|
: context(context_), log(&Logger::get("DDLWorker"))
|
2017-04-13 16:12:56 +00:00
|
|
|
{
|
2017-04-27 15:19:11 +00:00
|
|
|
queue_dir = zk_root_dir;
|
|
|
|
if (queue_dir.back() == '/')
|
|
|
|
queue_dir.resize(queue_dir.size() - 1);
|
2017-04-13 13:42:29 +00:00
|
|
|
|
2017-08-01 14:41:00 +00:00
|
|
|
if (config)
|
|
|
|
{
|
2017-08-14 05:44:04 +00:00
|
|
|
task_max_lifetime = config->getUInt64(prefix + ".task_max_lifetime", static_cast<UInt64>(task_max_lifetime));
|
|
|
|
cleanup_delay_period = config->getUInt64(prefix + ".cleanup_delay_period", static_cast<UInt64>(cleanup_delay_period));
|
2017-09-01 17:21:03 +00:00
|
|
|
max_tasks_in_queue = std::max(static_cast<UInt64>(1), config->getUInt64(prefix + ".max_tasks_in_queue", max_tasks_in_queue));
|
2018-02-01 13:52:29 +00:00
|
|
|
|
|
|
|
if (config->has(prefix + ".profile"))
|
|
|
|
context.setSetting("profile", config->getString(prefix + ".profile"));
|
|
|
|
}
|
|
|
|
|
2018-03-11 00:15:26 +00:00
|
|
|
if (context.getSettingsRef().readonly)
|
2018-02-01 13:52:29 +00:00
|
|
|
{
|
|
|
|
LOG_WARNING(log, "Distributed DDL worker is run with readonly settings, it will not be able to execute DDL queries"
|
|
|
|
<< " Set apropriate system_profile or distributed_ddl.profile to fix this.");
|
2017-08-01 14:41:00 +00:00
|
|
|
}
|
|
|
|
|
2017-07-26 19:31:32 +00:00
|
|
|
host_fqdn = getFQDNOrHostName();
|
|
|
|
host_fqdn_id = Cluster::Address::toString(host_fqdn, context.getTCPPort());
|
2017-04-13 13:42:29 +00:00
|
|
|
|
2017-04-17 17:04:31 +00:00
|
|
|
event_queue_updated = std::make_shared<Poco::Event>();
|
|
|
|
|
2017-04-13 13:42:29 +00:00
|
|
|
thread = std::thread(&DDLWorker::run, this);
|
|
|
|
}
|
|
|
|
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2017-04-13 13:42:29 +00:00
|
|
|
DDLWorker::~DDLWorker()
|
|
|
|
{
|
|
|
|
stop_flag = true;
|
2017-04-18 15:44:31 +00:00
|
|
|
event_queue_updated->set();
|
2017-04-13 13:42:29 +00:00
|
|
|
thread.join();
|
|
|
|
}
|
|
|
|
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2017-08-12 20:00:00 +00:00
|
|
|
bool DDLWorker::initAndCheckTask(const String & entry_name, String & out_reason)
|
2017-04-13 13:42:29 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
String node_data;
|
2017-08-01 14:41:00 +00:00
|
|
|
String entry_path = queue_dir + "/" + entry_name;
|
|
|
|
|
|
|
|
if (!zookeeper->tryGet(entry_path, node_data))
|
2017-07-28 16:14:49 +00:00
|
|
|
{
|
|
|
|
/// It is Ok that node could be deleted just now. It means that there are no current host in node's host list.
|
2017-08-12 20:00:00 +00:00
|
|
|
out_reason = "The task was deleted";
|
2017-07-28 16:14:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2017-08-01 14:41:00 +00:00
|
|
|
auto task = std::make_unique<DDLTask>();
|
|
|
|
task->entry_name = entry_name;
|
|
|
|
task->entry_path = entry_path;
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
try
|
|
|
|
{
|
2017-08-01 14:41:00 +00:00
|
|
|
task->entry.parse(node_data);
|
2017-07-28 16:14:49 +00:00
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
/// What should we do if we even cannot parse host name and therefore cannot properly submit execution status?
|
|
|
|
/// We can try to create fail node using FQDN if it equal to host name in cluster config attempt will be sucessfull.
|
|
|
|
/// Otherwise, that node will be ignored by DDLQueryStatusInputSream.
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2017-08-01 14:41:00 +00:00
|
|
|
tryLogCurrentException(log, "Cannot parse DDL task " + entry_name + ", will try to send error status");
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
String status = ExecutionStatus::fromCurrentException().serializeText();
|
2017-08-01 14:41:00 +00:00
|
|
|
try
|
|
|
|
{
|
|
|
|
createStatusDirs(entry_path);
|
|
|
|
zookeeper->tryCreate(entry_path + "/finished/" + host_fqdn_id, status, zkutil::CreateMode::Persistent);
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
tryLogCurrentException(log, "Can't report the task has invalid format");
|
|
|
|
}
|
2017-04-27 15:19:11 +00:00
|
|
|
|
2017-08-12 20:00:00 +00:00
|
|
|
out_reason = "Incorrect task format";
|
2017-07-28 16:14:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool host_in_hostlist = false;
|
2017-08-01 14:41:00 +00:00
|
|
|
for (const HostID & host : task->entry.hosts)
|
2017-07-28 16:14:49 +00:00
|
|
|
{
|
2017-09-07 14:38:35 +00:00
|
|
|
if (!host.isLocalAddress(context.getTCPPort()))
|
2017-04-18 15:44:31 +00:00
|
|
|
continue;
|
2017-04-17 17:04:31 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
if (host_in_hostlist)
|
2017-07-26 19:31:32 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
/// This check could be slow a little bit
|
2017-08-01 14:41:00 +00:00
|
|
|
LOG_WARNING(log, "There are two the same ClickHouse instances in task " << entry_name
|
|
|
|
<< ": " << task->host_id.readableString() << " and " << host.readableString() << ". Will use the first one only.");
|
2017-07-26 19:31:32 +00:00
|
|
|
}
|
2017-07-28 16:14:49 +00:00
|
|
|
else
|
2017-07-26 19:31:32 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
host_in_hostlist = true;
|
2017-08-01 14:41:00 +00:00
|
|
|
task->host_id = host;
|
|
|
|
task->host_id_str = host.toString();
|
2017-07-28 16:14:49 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-27 11:30:27 +00:00
|
|
|
|
2017-08-01 14:41:00 +00:00
|
|
|
if (host_in_hostlist)
|
|
|
|
current_task = std::move(task);
|
2017-08-12 20:00:00 +00:00
|
|
|
else
|
|
|
|
out_reason = "There is no a local address in host list";
|
2017-08-01 14:41:00 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
return host_in_hostlist;
|
|
|
|
}
|
2017-07-27 11:30:27 +00:00
|
|
|
|
|
|
|
|
2017-08-02 14:42:35 +00:00
|
|
|
static void filterAndSortQueueNodes(Strings & all_nodes)
|
|
|
|
{
|
|
|
|
all_nodes.erase(std::remove_if(all_nodes.begin(), all_nodes.end(), [] (const String & s) { return !startsWith(s, "query-"); }), all_nodes.end());
|
|
|
|
std::sort(all_nodes.begin(), all_nodes.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
void DDLWorker::processTasks()
|
|
|
|
{
|
|
|
|
LOG_DEBUG(log, "Processing tasks");
|
2017-07-27 11:30:27 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
Strings queue_nodes = zookeeper->getChildren(queue_dir, nullptr, event_queue_updated);
|
2017-08-02 14:42:35 +00:00
|
|
|
filterAndSortQueueNodes(queue_nodes);
|
2017-07-28 16:14:49 +00:00
|
|
|
if (queue_nodes.empty())
|
|
|
|
return;
|
2017-07-26 19:31:32 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
bool server_startup = last_processed_task_name.empty();
|
|
|
|
|
|
|
|
auto begin_node = server_startup
|
|
|
|
? queue_nodes.begin()
|
|
|
|
: std::upper_bound(queue_nodes.begin(), queue_nodes.end(), last_processed_task_name);
|
|
|
|
|
|
|
|
for (auto it = begin_node; it != queue_nodes.end(); ++it)
|
|
|
|
{
|
|
|
|
String entry_name = *it;
|
|
|
|
|
|
|
|
if (current_task)
|
2017-07-26 19:31:32 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
if (current_task->entry_name == entry_name)
|
2017-07-26 19:31:32 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
LOG_INFO(log, "Trying to process task " << entry_name << " again");
|
2017-07-26 19:31:32 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2017-08-12 20:00:00 +00:00
|
|
|
LOG_INFO(log, "Task " << current_task->entry_name << " was deleted from ZooKeeper before current host committed it");
|
2017-07-28 16:14:49 +00:00
|
|
|
current_task = nullptr;
|
2017-07-26 19:31:32 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-28 16:14:49 +00:00
|
|
|
|
|
|
|
if (!current_task)
|
2017-07-26 19:31:32 +00:00
|
|
|
{
|
2017-08-12 20:00:00 +00:00
|
|
|
String reason;
|
|
|
|
if (!initAndCheckTask(entry_name, reason))
|
2017-07-28 16:14:49 +00:00
|
|
|
{
|
2018-03-06 11:59:25 +00:00
|
|
|
LOG_DEBUG(log, "Will not execute task " << entry_name << ": " << reason);
|
2017-08-01 14:41:00 +00:00
|
|
|
last_processed_task_name = entry_name;
|
2017-07-26 19:31:32 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
DDLTask & task = *current_task;
|
|
|
|
|
|
|
|
bool already_processed = zookeeper->exists(task.entry_path + "/finished/" + task.host_id_str);
|
2017-07-31 18:57:13 +00:00
|
|
|
if (!server_startup && !task.was_executed && already_processed)
|
2017-04-17 17:04:31 +00:00
|
|
|
{
|
|
|
|
throw Exception(
|
2017-07-28 16:14:49 +00:00
|
|
|
"Server expects that DDL task " + task.entry_name + " should be processed, but it was already processed according to ZK",
|
2017-04-17 17:04:31 +00:00
|
|
|
ErrorCodes::LOGICAL_ERROR);
|
|
|
|
}
|
|
|
|
|
2017-07-26 19:31:32 +00:00
|
|
|
if (!already_processed)
|
2017-04-17 17:04:31 +00:00
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
2017-07-26 19:31:32 +00:00
|
|
|
processTask(task);
|
2017-04-17 17:04:31 +00:00
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
2017-08-01 14:41:00 +00:00
|
|
|
LOG_WARNING(log, "An error occurred while processing task " << task.entry_name << " (" << task.entry.query << ") : "
|
|
|
|
<< getCurrentExceptionMessage(true));
|
2017-04-17 17:04:31 +00:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
2017-04-27 15:19:11 +00:00
|
|
|
else
|
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
LOG_DEBUG(log, "Task " << task.entry_name << " (" << task.entry.query << ") has been already processed");
|
|
|
|
}
|
|
|
|
|
|
|
|
last_processed_task_name = task.entry_name;
|
|
|
|
current_task.reset();
|
2017-08-01 14:41:00 +00:00
|
|
|
|
|
|
|
if (stop_flag)
|
|
|
|
break;
|
2017-07-28 16:14:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// Parses query and resolves cluster and host in cluster
|
|
|
|
void DDLWorker::parseQueryAndResolveHost(DDLTask & task)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
const char * begin = task.entry.query.data();
|
|
|
|
const char * end = begin + task.entry.query.size();
|
|
|
|
|
|
|
|
ParserQuery parser_query(end);
|
|
|
|
String description;
|
2018-04-16 15:11:13 +00:00
|
|
|
task.query = parseQuery(parser_query, begin, end, description, 0);
|
2017-07-28 16:14:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!task.query || !(task.query_on_cluster = dynamic_cast<ASTQueryWithOnCluster *>(task.query.get())))
|
|
|
|
throw Exception("Recieved unknown DDL query", ErrorCodes::UNKNOWN_TYPE_OF_QUERY);
|
|
|
|
|
|
|
|
task.cluster_name = task.query_on_cluster->cluster;
|
|
|
|
task.cluster = context.tryGetCluster(task.cluster_name);
|
|
|
|
if (!task.cluster)
|
|
|
|
{
|
|
|
|
throw Exception("DDL task " + task.entry_name + " contains current host " + task.host_id.readableString()
|
|
|
|
+ " in cluster " + task.cluster_name + ", but there are no such cluster here.", ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
|
|
|
|
}
|
|
|
|
|
2017-08-02 20:33:29 +00:00
|
|
|
/// Try to find host from task host list in cluster
|
2017-07-28 16:14:49 +00:00
|
|
|
/// At the first, try find exact match (host name and ports should be literally equal)
|
|
|
|
/// If the attempt fails, try find it resolving host name of each instance
|
2017-08-12 20:00:00 +00:00
|
|
|
const auto & shards = task.cluster->getShardsAddresses();
|
2017-07-28 16:14:49 +00:00
|
|
|
|
|
|
|
bool found_exact_match = false;
|
|
|
|
for (size_t shard_num = 0; shard_num < shards.size(); ++shard_num)
|
2017-08-11 19:56:32 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
for (size_t replica_num = 0; replica_num < shards[shard_num].size(); ++replica_num)
|
|
|
|
{
|
|
|
|
const Cluster::Address & address = shards[shard_num][replica_num];
|
|
|
|
|
|
|
|
if (address.host_name == task.host_id.host_name && address.port == task.host_id.port)
|
|
|
|
{
|
|
|
|
if (found_exact_match)
|
|
|
|
{
|
|
|
|
throw Exception("There are two exactly the same ClickHouse instances " + address.readableString()
|
|
|
|
+ " in cluster " + task.cluster_name, ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
|
|
|
|
}
|
|
|
|
|
|
|
|
found_exact_match = true;
|
|
|
|
task.host_shard_num = shard_num;
|
|
|
|
task.host_replica_num = replica_num;
|
|
|
|
task.address_in_cluster = address;
|
|
|
|
}
|
|
|
|
}
|
2017-08-11 19:56:32 +00:00
|
|
|
}
|
2017-07-28 16:14:49 +00:00
|
|
|
|
|
|
|
if (found_exact_match)
|
|
|
|
return;
|
|
|
|
|
|
|
|
LOG_WARNING(log, "Not found the exact match of host " << task.host_id.readableString() << " from task " << task.entry_name
|
2017-08-02 14:42:35 +00:00
|
|
|
<< " in cluster " << task.cluster_name << " definition. Will try to find it using host name resolving.");
|
2017-07-28 16:14:49 +00:00
|
|
|
|
|
|
|
bool found_via_resolving = false;
|
|
|
|
for (size_t shard_num = 0; shard_num < shards.size(); ++shard_num)
|
2017-08-11 20:20:15 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
for (size_t replica_num = 0; replica_num < shards[shard_num].size(); ++replica_num)
|
|
|
|
{
|
|
|
|
const Cluster::Address & address = shards[shard_num][replica_num];
|
|
|
|
|
2018-03-29 20:21:01 +00:00
|
|
|
if (isLocalAddress(address.getResolvedAddress(), context.getTCPPort()))
|
2017-07-28 16:14:49 +00:00
|
|
|
{
|
|
|
|
if (found_via_resolving)
|
|
|
|
{
|
|
|
|
throw Exception("There are two the same ClickHouse instances in cluster " + task.cluster_name + " : "
|
|
|
|
+ task.address_in_cluster.readableString() + " and " + address.readableString(), ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
found_via_resolving = true;
|
|
|
|
task.host_shard_num = shard_num;
|
|
|
|
task.host_replica_num = replica_num;
|
|
|
|
task.address_in_cluster = address;
|
|
|
|
}
|
|
|
|
}
|
2017-04-27 15:19:11 +00:00
|
|
|
}
|
2017-08-11 20:20:15 +00:00
|
|
|
}
|
2017-04-17 17:04:31 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
if (!found_via_resolving)
|
|
|
|
{
|
|
|
|
throw Exception("Not found host " + task.host_id.readableString() + " in definition of cluster " + task.cluster_name,
|
|
|
|
ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LOG_INFO(log, "Resolved host " << task.host_id.readableString() << " from task " << task.entry_name
|
|
|
|
<< " as host " << task.address_in_cluster.readableString() << " in definition of cluster " << task.cluster_name);
|
2017-04-13 16:12:56 +00:00
|
|
|
}
|
2017-04-13 13:42:29 +00:00
|
|
|
}
|
|
|
|
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-07-27 18:44:55 +00:00
|
|
|
bool DDLWorker::tryExecuteQuery(const String & query, const DDLTask & task, ExecutionStatus & status)
|
2017-04-25 15:21:03 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
/// Add special comment at the start of query to easily identify DDL-produced queries in query_log
|
2017-08-11 20:20:15 +00:00
|
|
|
String query_prefix = "/* ddl_entry=" + task.entry_name + " */ ";
|
2017-07-27 18:44:55 +00:00
|
|
|
String query_to_execute = query_prefix + query;
|
|
|
|
|
|
|
|
ReadBufferFromString istr(query_to_execute);
|
|
|
|
String dummy_string;
|
|
|
|
WriteBufferFromString ostr(dummy_string);
|
|
|
|
|
2017-04-25 15:21:03 +00:00
|
|
|
try
|
|
|
|
{
|
2018-08-27 11:03:22 +00:00
|
|
|
current_context = std::make_unique<Context>(context);
|
|
|
|
current_context->setCurrentQueryId(""); // generate random query_id
|
|
|
|
executeQuery(istr, ostr, false, *current_context, nullptr);
|
2017-04-25 15:21:03 +00:00
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
2017-04-27 15:19:11 +00:00
|
|
|
status = ExecutionStatus::fromCurrentException();
|
2017-07-27 18:44:55 +00:00
|
|
|
tryLogCurrentException(log, "Query " + query + " wasn't finished successfully");
|
2017-04-25 15:21:03 +00:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-27 15:19:11 +00:00
|
|
|
status = ExecutionStatus(0);
|
2017-07-27 18:44:55 +00:00
|
|
|
LOG_DEBUG(log, "Executed query: " << query);
|
2017-04-25 15:21:03 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-09-28 13:44:39 +00:00
|
|
|
void DDLWorker::attachToThreadGroup()
|
|
|
|
{
|
|
|
|
if (thread_group)
|
|
|
|
{
|
|
|
|
/// Put all threads to one thread pool
|
|
|
|
CurrentThread::attachToIfDetached(thread_group);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
CurrentThread::initializeQuery();
|
|
|
|
thread_group = CurrentThread::getGroup();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-07-26 19:31:32 +00:00
|
|
|
void DDLWorker::processTask(DDLTask & task)
|
2017-04-25 15:21:03 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
LOG_DEBUG(log, "Processing task " << task.entry_name << " (" << task.entry.query << ")");
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-08-02 14:42:35 +00:00
|
|
|
String dummy;
|
2017-07-28 16:14:49 +00:00
|
|
|
String active_node_path = task.entry_path + "/active/" + task.host_id_str;
|
2017-08-02 14:42:35 +00:00
|
|
|
String finished_node_path = task.entry_path + "/finished/" + task.host_id_str;
|
|
|
|
|
2018-03-24 00:45:04 +00:00
|
|
|
auto code = zookeeper->tryCreate(active_node_path, "", zkutil::CreateMode::Ephemeral, dummy);
|
2018-08-25 01:58:14 +00:00
|
|
|
if (code == Coordination::ZOK || code == Coordination::ZNODEEXISTS)
|
2017-08-02 14:42:35 +00:00
|
|
|
{
|
|
|
|
// Ok
|
|
|
|
}
|
2018-08-25 01:58:14 +00:00
|
|
|
else if (code == Coordination::ZNONODE)
|
2017-08-02 14:42:35 +00:00
|
|
|
{
|
|
|
|
/// There is no parent
|
|
|
|
createStatusDirs(task.entry_path);
|
2018-08-25 01:58:14 +00:00
|
|
|
if (Coordination::ZOK != zookeeper->tryCreate(active_node_path, "", zkutil::CreateMode::Ephemeral, dummy))
|
|
|
|
throw Coordination::Exception(code, active_node_path);
|
2017-08-02 14:42:35 +00:00
|
|
|
}
|
|
|
|
else
|
2018-08-25 01:58:14 +00:00
|
|
|
throw Coordination::Exception(code, active_node_path);
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
if (!task.was_executed)
|
2017-04-27 15:19:11 +00:00
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
parseQueryAndResolveHost(task);
|
|
|
|
|
|
|
|
ASTPtr rewritten_ast = task.query_on_cluster->getRewrittenASTWithoutOnCluster(task.address_in_cluster.default_database);
|
2017-04-27 15:19:11 +00:00
|
|
|
String rewritten_query = queryToString(rewritten_ast);
|
|
|
|
LOG_DEBUG(log, "Executing query: " << rewritten_query);
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
if (auto ast_alter = dynamic_cast<const ASTAlterQuery *>(rewritten_ast.get()))
|
2017-04-27 15:19:11 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
processTaskAlter(task, ast_alter, rewritten_query, task.entry_path);
|
2017-04-27 15:19:11 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
tryExecuteQuery(rewritten_query, task, task.execution_status);
|
2017-04-27 15:19:11 +00:00
|
|
|
}
|
|
|
|
}
|
2018-08-25 01:58:14 +00:00
|
|
|
catch (const Coordination::Exception &)
|
2017-04-27 15:19:11 +00:00
|
|
|
{
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
task.execution_status = ExecutionStatus::fromCurrentException("An error occured before execution");
|
2017-04-27 15:19:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// We need to distinguish ZK errors occured before and after query executing
|
2017-07-28 16:14:49 +00:00
|
|
|
task.was_executed = true;
|
2017-04-25 15:21:03 +00:00
|
|
|
}
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
/// FIXME: if server fails right here, the task will be executed twice. We need WAL here.
|
|
|
|
|
2017-04-27 15:19:11 +00:00
|
|
|
/// Delete active flag and create finish flag
|
2018-08-25 01:58:14 +00:00
|
|
|
Coordination::Requests ops;
|
2018-03-24 00:45:04 +00:00
|
|
|
ops.emplace_back(zkutil::makeRemoveRequest(active_node_path, -1));
|
|
|
|
ops.emplace_back(zkutil::makeCreateRequest(finished_node_path, task.execution_status.serializeText(), zkutil::CreateMode::Persistent));
|
2017-08-02 14:42:35 +00:00
|
|
|
zookeeper->multi(ops);
|
2017-04-27 15:19:11 +00:00
|
|
|
}
|
2017-04-25 15:21:03 +00:00
|
|
|
|
|
|
|
|
2017-04-27 15:19:11 +00:00
|
|
|
void DDLWorker::processTaskAlter(
|
2017-07-26 19:31:32 +00:00
|
|
|
DDLTask & task,
|
2017-07-28 16:14:49 +00:00
|
|
|
const ASTAlterQuery * ast_alter,
|
2017-04-27 15:19:11 +00:00
|
|
|
const String & rewritten_query,
|
|
|
|
const String & node_path)
|
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
String database = ast_alter->database.empty() ? context.getCurrentDatabase() : ast_alter->database;
|
|
|
|
StoragePtr storage = context.getTable(database, ast_alter->table);
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-04-27 15:19:11 +00:00
|
|
|
bool execute_once_on_replica = storage->supportsReplication();
|
|
|
|
bool execute_on_leader_replica = false;
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2018-06-09 15:53:14 +00:00
|
|
|
for (const auto & command : ast_alter->command_list->commands)
|
2017-04-25 15:21:03 +00:00
|
|
|
{
|
2018-06-09 15:53:14 +00:00
|
|
|
if (!isSupportedAlterType(command->type))
|
2017-04-27 15:19:11 +00:00
|
|
|
throw Exception("Unsupported type of ALTER query", ErrorCodes::NOT_IMPLEMENTED);
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-04-27 15:19:11 +00:00
|
|
|
if (execute_once_on_replica)
|
2018-06-09 15:53:14 +00:00
|
|
|
execute_on_leader_replica |= command->type == ASTAlterCommand::DROP_PARTITION;
|
2017-04-27 15:19:11 +00:00
|
|
|
}
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-07-26 19:31:32 +00:00
|
|
|
const auto & shard_info = task.cluster->getShardsInfo().at(task.host_shard_num);
|
2017-04-27 15:19:11 +00:00
|
|
|
bool config_is_replicated_shard = shard_info.hasInternalReplication();
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-04-27 15:19:11 +00:00
|
|
|
if (execute_once_on_replica && !config_is_replicated_shard)
|
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
throw Exception("Table " + ast_alter->table + " is replicated, but shard #" + toString(task.host_shard_num + 1) +
|
2018-04-19 19:25:54 +00:00
|
|
|
" isn't replicated according to its cluster definition."
|
|
|
|
" Possibly <internal_replication>true</internal_replication> is forgotten in the cluster config.",
|
|
|
|
ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
|
2017-04-27 15:19:11 +00:00
|
|
|
}
|
2018-04-19 19:25:54 +00:00
|
|
|
if (!execute_once_on_replica && config_is_replicated_shard)
|
2017-04-27 15:19:11 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
throw Exception("Table " + ast_alter->table + " isn't replicated, but shard #" + toString(task.host_shard_num + 1) +
|
2017-08-11 20:20:15 +00:00
|
|
|
" is replicated according to its cluster definition", ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION);
|
2017-04-27 15:19:11 +00:00
|
|
|
}
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2018-04-19 19:25:54 +00:00
|
|
|
/// Generate unique name for shard node, it will be used to execute the query by only single host
|
|
|
|
/// Shard node name has format 'replica_name1,replica_name2,...,replica_nameN'
|
|
|
|
/// Where replica_name is 'replica_config_host_name:replica_port'
|
|
|
|
auto get_shard_name = [] (const Cluster::Addresses & shard_addresses)
|
2017-04-27 15:19:11 +00:00
|
|
|
{
|
|
|
|
Strings replica_names;
|
2018-04-19 19:25:54 +00:00
|
|
|
for (const Cluster::Address & address : shard_addresses)
|
|
|
|
replica_names.emplace_back(address.readableString());
|
2017-04-27 15:19:11 +00:00
|
|
|
std::sort(replica_names.begin(), replica_names.end());
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2018-04-19 19:25:54 +00:00
|
|
|
String res;
|
2017-04-27 15:19:11 +00:00
|
|
|
for (auto it = replica_names.begin(); it != replica_names.end(); ++it)
|
2018-04-19 19:25:54 +00:00
|
|
|
res += *it + (std::next(it) != replica_names.end() ? "," : "");
|
|
|
|
|
|
|
|
return res;
|
|
|
|
};
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2018-04-19 19:25:54 +00:00
|
|
|
if (execute_once_on_replica)
|
|
|
|
{
|
|
|
|
String shard_node_name = get_shard_name(task.cluster->getShardsAddresses().at(task.host_shard_num));
|
2017-08-02 20:33:29 +00:00
|
|
|
String shard_path = node_path + "/shards/" + shard_node_name;
|
2017-04-27 15:19:11 +00:00
|
|
|
String is_executed_path = shard_path + "/executed";
|
|
|
|
zookeeper->createAncestors(shard_path + "/");
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2018-04-19 19:25:54 +00:00
|
|
|
bool is_executed_by_any_replica = false;
|
2017-04-27 15:19:11 +00:00
|
|
|
{
|
2017-08-02 14:42:35 +00:00
|
|
|
auto lock = createSimpleZooKeeperLock(zookeeper, shard_path, "lock", task.host_id_str);
|
2017-09-09 23:17:38 +00:00
|
|
|
pcg64 rng(randomSeed());
|
2017-04-27 15:19:11 +00:00
|
|
|
|
2018-04-19 19:25:54 +00:00
|
|
|
static const size_t max_tries = 20;
|
|
|
|
for (size_t num_tries = 0; num_tries < max_tries; ++num_tries)
|
2017-04-25 15:21:03 +00:00
|
|
|
{
|
2017-04-27 15:19:11 +00:00
|
|
|
if (zookeeper->exists(is_executed_path))
|
|
|
|
{
|
2018-04-19 19:25:54 +00:00
|
|
|
is_executed_by_any_replica = true;
|
2017-04-27 15:19:11 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-08-02 14:42:35 +00:00
|
|
|
if (lock->tryLock())
|
2017-04-25 15:21:03 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
tryExecuteQuery(rewritten_query, task, task.execution_status);
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
if (execute_on_leader_replica && task.execution_status.code == ErrorCodes::NOT_IMPLEMENTED)
|
2017-04-25 15:21:03 +00:00
|
|
|
{
|
2017-08-10 19:12:52 +00:00
|
|
|
/// TODO: it is ok to receive exception "host is not leader"
|
2017-04-25 15:21:03 +00:00
|
|
|
}
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
zookeeper->create(is_executed_path, task.host_id_str, zkutil::CreateMode::Persistent);
|
2017-08-02 14:42:35 +00:00
|
|
|
lock->unlock();
|
2018-04-19 19:25:54 +00:00
|
|
|
is_executed_by_any_replica = true;
|
2017-04-27 15:19:11 +00:00
|
|
|
break;
|
2017-04-25 15:21:03 +00:00
|
|
|
}
|
|
|
|
|
2018-04-19 19:25:54 +00:00
|
|
|
std::this_thread::sleep_for(std::chrono::milliseconds(std::uniform_int_distribution<long>(0, 1000)(rng)));
|
2017-04-25 15:21:03 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-27 15:19:11 +00:00
|
|
|
|
2018-04-19 19:25:54 +00:00
|
|
|
if (!is_executed_by_any_replica)
|
|
|
|
{
|
|
|
|
task.execution_status = ExecutionStatus(ErrorCodes::NOT_IMPLEMENTED,
|
|
|
|
"Cannot enqueue replicated DDL query for a replicated shard");
|
|
|
|
}
|
2017-04-25 15:21:03 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
tryExecuteQuery(rewritten_query, task, task.execution_status);
|
2017-04-25 15:21:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-08-02 14:42:35 +00:00
|
|
|
void DDLWorker::cleanupQueue()
|
2017-04-19 14:21:27 +00:00
|
|
|
{
|
|
|
|
/// Both ZK and Poco use Unix epoch
|
2017-08-10 19:12:52 +00:00
|
|
|
Int64 current_time_seconds = Poco::Timestamp().epochTime();
|
2017-08-12 17:39:14 +00:00
|
|
|
constexpr UInt64 zookeeper_time_resolution = 1000;
|
2017-04-19 14:21:27 +00:00
|
|
|
|
2017-08-11 20:20:15 +00:00
|
|
|
/// Too early to check
|
2017-08-01 14:41:00 +00:00
|
|
|
if (last_cleanup_time_seconds && current_time_seconds < last_cleanup_time_seconds + cleanup_delay_period)
|
2017-04-19 14:21:27 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
last_cleanup_time_seconds = current_time_seconds;
|
|
|
|
|
2017-04-27 15:19:11 +00:00
|
|
|
LOG_DEBUG(log, "Cleaning queue");
|
|
|
|
|
2017-08-02 14:42:35 +00:00
|
|
|
Strings queue_nodes = zookeeper->getChildren(queue_dir);
|
|
|
|
filterAndSortQueueNodes(queue_nodes);
|
|
|
|
|
|
|
|
size_t num_outdated_nodes = (queue_nodes.size() > max_tasks_in_queue) ? queue_nodes.size() - max_tasks_in_queue : 0;
|
|
|
|
auto first_non_outdated_node = queue_nodes.begin() + num_outdated_nodes;
|
2017-04-19 14:21:27 +00:00
|
|
|
|
2017-08-02 14:42:35 +00:00
|
|
|
for (auto it = queue_nodes.cbegin(); it < queue_nodes.cend(); ++it)
|
2017-04-19 14:21:27 +00:00
|
|
|
{
|
2017-08-02 14:42:35 +00:00
|
|
|
String node_name = *it;
|
2017-08-01 14:41:00 +00:00
|
|
|
String node_path = queue_dir + "/" + node_name;
|
2017-08-02 14:42:35 +00:00
|
|
|
String lock_path = node_path + "/lock";
|
2017-08-01 14:41:00 +00:00
|
|
|
|
2018-08-25 01:58:14 +00:00
|
|
|
Coordination::Stat stat;
|
2017-08-02 14:42:35 +00:00
|
|
|
String dummy;
|
2017-08-01 14:41:00 +00:00
|
|
|
|
2017-04-19 14:21:27 +00:00
|
|
|
try
|
|
|
|
{
|
2017-08-12 17:39:14 +00:00
|
|
|
/// Already deleted
|
|
|
|
if (!zookeeper->exists(node_path, &stat))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/// Delete node if its lifetmie is expired (according to task_max_lifetime parameter)
|
2017-08-13 09:27:38 +00:00
|
|
|
Int64 zookeeper_time_seconds = stat.ctime / zookeeper_time_resolution;
|
2017-08-12 17:39:14 +00:00
|
|
|
bool node_lifetime_is_expired = zookeeper_time_seconds + task_max_lifetime < current_time_seconds;
|
|
|
|
|
|
|
|
/// If too many nodes in task queue (> max_tasks_in_queue), delete oldest one
|
|
|
|
bool node_is_outside_max_window = it < first_non_outdated_node;
|
|
|
|
|
|
|
|
if (!node_lifetime_is_expired && !node_is_outside_max_window)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/// Skip if there are active nodes (it is weak guard)
|
|
|
|
if (zookeeper->exists(node_path + "/active", &stat) && stat.numChildren > 0)
|
|
|
|
{
|
2017-08-12 20:00:00 +00:00
|
|
|
LOG_INFO(log, "Task " << node_name << " should be deleted, but there are active workers. Skipping it.");
|
2017-08-12 17:39:14 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-08-10 19:12:52 +00:00
|
|
|
/// Usage of the lock is not necessary now (tryRemoveRecursive correctly removes node in a presence of concurrent cleaners)
|
|
|
|
/// But the lock will be required to implement system.distributed_ddl_queue table
|
2017-08-02 14:42:35 +00:00
|
|
|
auto lock = createSimpleZooKeeperLock(zookeeper, node_path, "lock", host_fqdn_id);
|
|
|
|
if (!lock->tryLock())
|
2017-08-12 17:39:14 +00:00
|
|
|
{
|
2017-08-12 20:00:00 +00:00
|
|
|
LOG_INFO(log, "Task " << node_name << " should be deleted, but it is locked. Skipping it.");
|
2017-08-02 14:42:35 +00:00
|
|
|
continue;
|
2017-08-12 17:39:14 +00:00
|
|
|
}
|
2017-08-02 14:42:35 +00:00
|
|
|
|
2017-08-12 17:39:14 +00:00
|
|
|
if (node_lifetime_is_expired)
|
|
|
|
LOG_INFO(log, "Lifetime of task " << node_name << " is expired, deleting it");
|
|
|
|
else if (node_is_outside_max_window)
|
|
|
|
LOG_INFO(log, "Task " << node_name << " is outdated, deleting it");
|
|
|
|
|
|
|
|
/// Deleting
|
2017-08-01 14:41:00 +00:00
|
|
|
{
|
2017-08-02 14:42:35 +00:00
|
|
|
Strings childs = zookeeper->getChildren(node_path);
|
|
|
|
for (const String & child : childs)
|
2017-08-01 14:41:00 +00:00
|
|
|
{
|
2017-08-02 14:42:35 +00:00
|
|
|
if (child != "lock")
|
|
|
|
zookeeper->tryRemoveRecursive(node_path + "/" + child);
|
2017-08-01 14:41:00 +00:00
|
|
|
}
|
2017-04-19 14:21:27 +00:00
|
|
|
|
2017-08-02 14:42:35 +00:00
|
|
|
/// Remove the lock node and its parent atomically
|
2018-08-25 01:58:14 +00:00
|
|
|
Coordination::Requests ops;
|
2018-03-24 00:45:04 +00:00
|
|
|
ops.emplace_back(zkutil::makeRemoveRequest(lock_path, -1));
|
|
|
|
ops.emplace_back(zkutil::makeRemoveRequest(node_path, -1));
|
2017-08-02 14:42:35 +00:00
|
|
|
zookeeper->multi(ops);
|
2017-08-01 14:41:00 +00:00
|
|
|
|
2017-08-02 14:42:35 +00:00
|
|
|
lock->unlockAssumeLockNodeRemovedManually();
|
2017-04-19 14:21:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
LOG_INFO(log, "An error occured while checking and cleaning task " + node_name + " from queue: " + getCurrentExceptionMessage(false));
|
2017-04-19 14:21:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2017-08-11 20:20:15 +00:00
|
|
|
/// Try to create nonexisting "status" dirs for a node
|
2017-04-17 17:04:31 +00:00
|
|
|
void DDLWorker::createStatusDirs(const std::string & node_path)
|
2017-04-13 13:42:29 +00:00
|
|
|
{
|
2018-08-25 01:58:14 +00:00
|
|
|
Coordination::Requests ops;
|
2018-03-24 00:45:04 +00:00
|
|
|
{
|
2018-08-25 01:58:14 +00:00
|
|
|
Coordination::CreateRequest request;
|
2018-03-24 00:45:04 +00:00
|
|
|
request.path = node_path + "/active";
|
2018-08-25 01:58:14 +00:00
|
|
|
ops.emplace_back(std::make_shared<Coordination::CreateRequest>(std::move(request)));
|
2018-03-24 00:45:04 +00:00
|
|
|
}
|
|
|
|
{
|
2018-08-25 01:58:14 +00:00
|
|
|
Coordination::CreateRequest request;
|
2018-03-24 00:45:04 +00:00
|
|
|
request.path = node_path + "/finished";
|
2018-08-25 01:58:14 +00:00
|
|
|
ops.emplace_back(std::make_shared<Coordination::CreateRequest>(std::move(request)));
|
2018-03-24 00:45:04 +00:00
|
|
|
}
|
2018-08-25 01:58:14 +00:00
|
|
|
Coordination::Responses responses;
|
2018-03-25 00:15:52 +00:00
|
|
|
int code = zookeeper->tryMulti(ops, responses);
|
2018-08-25 01:58:14 +00:00
|
|
|
if (code && code != Coordination::ZNODEEXISTS)
|
|
|
|
throw Coordination::Exception(code);
|
2017-04-17 17:04:31 +00:00
|
|
|
}
|
2017-04-13 13:42:29 +00:00
|
|
|
|
|
|
|
|
2017-04-18 15:44:31 +00:00
|
|
|
String DDLWorker::enqueueQuery(DDLLogEntry & entry)
|
2017-04-13 16:12:56 +00:00
|
|
|
{
|
2017-04-17 17:04:31 +00:00
|
|
|
if (entry.hosts.empty())
|
2017-08-02 14:42:35 +00:00
|
|
|
throw Exception("Empty host list in a distributed DDL task", ErrorCodes::LOGICAL_ERROR);
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2017-04-27 15:19:11 +00:00
|
|
|
String query_path_prefix = queue_dir + "/query-";
|
2017-04-17 17:04:31 +00:00
|
|
|
zookeeper->createAncestors(query_path_prefix);
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2018-03-21 21:40:53 +00:00
|
|
|
String node_path = zookeeper->create(query_path_prefix, entry.toString(), zkutil::CreateMode::PersistentSequential);
|
2017-08-02 14:42:35 +00:00
|
|
|
|
|
|
|
/// Optional step
|
|
|
|
try
|
|
|
|
{
|
|
|
|
createStatusDirs(node_path);
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
LOG_INFO(log, "An error occurred while creating auxiliary ZooKeeper directories in " << node_path << " . They will be created later"
|
|
|
|
<< ". Error : " << getCurrentExceptionMessage(true));
|
|
|
|
}
|
2017-04-18 15:44:31 +00:00
|
|
|
|
|
|
|
return node_path;
|
2017-04-13 13:42:29 +00:00
|
|
|
}
|
|
|
|
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2017-04-13 13:42:29 +00:00
|
|
|
void DDLWorker::run()
|
|
|
|
{
|
2017-04-27 15:19:11 +00:00
|
|
|
setThreadName("DDLWorker");
|
2017-04-18 15:44:31 +00:00
|
|
|
LOG_DEBUG(log, "Started DDLWorker thread");
|
|
|
|
|
2017-08-01 14:41:00 +00:00
|
|
|
bool initialized = false;
|
|
|
|
do
|
2017-07-31 18:57:13 +00:00
|
|
|
{
|
2017-08-01 14:41:00 +00:00
|
|
|
try
|
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
|
|
|
zookeeper = context.getZooKeeper();
|
|
|
|
zookeeper->createAncestors(queue_dir + "/");
|
|
|
|
initialized = true;
|
|
|
|
}
|
2018-08-25 01:58:14 +00:00
|
|
|
catch (const Coordination::Exception & e)
|
2017-08-01 14:41:00 +00:00
|
|
|
{
|
2018-08-25 01:58:14 +00:00
|
|
|
if (!Coordination::isHardwareError(e.code))
|
2017-08-01 14:41:00 +00:00
|
|
|
throw;
|
2018-09-24 19:09:12 +00:00
|
|
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
2017-08-01 14:41:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
2017-08-14 05:51:03 +00:00
|
|
|
tryLogCurrentException(log, "Terminating. Cannot initialize DDL queue.");
|
|
|
|
return;
|
2017-08-01 14:41:00 +00:00
|
|
|
}
|
2017-08-17 19:22:56 +00:00
|
|
|
} while (!initialized && !stop_flag);
|
2017-08-01 14:41:00 +00:00
|
|
|
|
2017-04-13 13:42:29 +00:00
|
|
|
while (!stop_flag)
|
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
2018-09-28 15:30:03 +00:00
|
|
|
attachToThreadGroup();
|
|
|
|
|
2017-04-25 15:21:03 +00:00
|
|
|
processTasks();
|
2017-04-13 13:42:29 +00:00
|
|
|
|
2017-07-27 18:44:55 +00:00
|
|
|
LOG_DEBUG(log, "Waiting a watch");
|
2017-04-27 15:19:11 +00:00
|
|
|
event_queue_updated->wait();
|
|
|
|
|
|
|
|
if (stop_flag)
|
|
|
|
break;
|
2017-04-25 15:21:03 +00:00
|
|
|
|
2017-08-12 20:00:00 +00:00
|
|
|
/// TODO: it might delay the execution, move it to separate thread.
|
2017-04-25 15:21:03 +00:00
|
|
|
cleanupQueue();
|
2017-04-19 14:21:27 +00:00
|
|
|
}
|
2018-09-24 18:44:09 +00:00
|
|
|
catch (const Coordination::Exception & e)
|
2017-04-27 15:19:11 +00:00
|
|
|
{
|
2018-08-25 01:58:14 +00:00
|
|
|
if (Coordination::isHardwareError(e.code))
|
2017-07-28 16:14:49 +00:00
|
|
|
{
|
2018-03-21 21:40:53 +00:00
|
|
|
LOG_DEBUG(log, "Recovering ZooKeeper session after: " << getCurrentExceptionMessage(false));
|
2017-08-25 16:19:57 +00:00
|
|
|
|
2018-03-21 21:40:53 +00:00
|
|
|
while (!stop_flag)
|
|
|
|
{
|
|
|
|
try
|
2017-08-25 16:19:57 +00:00
|
|
|
{
|
2018-03-21 21:40:53 +00:00
|
|
|
zookeeper = context.getZooKeeper();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
|
|
|
|
|
|
using namespace std::chrono_literals;
|
|
|
|
std::this_thread::sleep_for(5s);
|
2017-08-25 16:19:57 +00:00
|
|
|
}
|
2017-08-01 14:41:00 +00:00
|
|
|
}
|
2017-07-28 16:14:49 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2017-08-14 05:51:03 +00:00
|
|
|
LOG_ERROR(log, "Unexpected ZooKeeper error: " << getCurrentExceptionMessage(true) << ". Terminating.");
|
|
|
|
return;
|
2017-07-28 16:14:49 +00:00
|
|
|
}
|
2017-08-10 19:12:52 +00:00
|
|
|
|
|
|
|
/// Unlock the processing just in case
|
|
|
|
event_queue_updated->set();
|
2017-04-27 15:19:11 +00:00
|
|
|
}
|
2017-04-19 14:21:27 +00:00
|
|
|
catch (...)
|
|
|
|
{
|
2017-08-14 05:51:03 +00:00
|
|
|
LOG_ERROR(log, "Unexpected error: " << getCurrentExceptionMessage(true) << ". Terminating.");
|
|
|
|
return;
|
2017-04-19 14:21:27 +00:00
|
|
|
}
|
2017-04-13 13:42:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2017-04-18 15:44:31 +00:00
|
|
|
class DDLQueryStatusInputSream : public IProfilingBlockInputStream
|
2017-04-13 16:12:56 +00:00
|
|
|
{
|
2017-04-18 15:44:31 +00:00
|
|
|
public:
|
2017-04-19 14:21:27 +00:00
|
|
|
|
2017-09-04 17:49:39 +00:00
|
|
|
DDLQueryStatusInputSream(const String & zk_node_path, const DDLLogEntry & entry, const Context & context)
|
2018-01-06 18:10:44 +00:00
|
|
|
: node_path(zk_node_path), context(context), watch(CLOCK_MONOTONIC_COARSE), log(&Logger::get("DDLQueryStatusInputSream"))
|
2017-04-18 15:44:31 +00:00
|
|
|
{
|
|
|
|
sample = Block{
|
|
|
|
{std::make_shared<DataTypeString>(), "host"},
|
2017-07-27 11:30:27 +00:00
|
|
|
{std::make_shared<DataTypeUInt16>(), "port"},
|
2017-07-27 13:11:16 +00:00
|
|
|
{std::make_shared<DataTypeInt64>(), "status"},
|
2017-04-18 15:44:31 +00:00
|
|
|
{std::make_shared<DataTypeString>(), "error"},
|
|
|
|
{std::make_shared<DataTypeUInt64>(), "num_hosts_remaining"},
|
|
|
|
{std::make_shared<DataTypeUInt64>(), "num_hosts_active"},
|
|
|
|
};
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
for (const HostID & host: entry.hosts)
|
|
|
|
waiting_hosts.emplace(host.toString());
|
|
|
|
|
2018-02-23 10:02:29 +00:00
|
|
|
addTotalRowsApprox(entry.hosts.size());
|
2017-08-02 21:37:04 +00:00
|
|
|
|
|
|
|
timeout_seconds = context.getSettingsRef().distributed_ddl_task_timeout;
|
2017-04-18 15:44:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
String getName() const override
|
|
|
|
{
|
|
|
|
return "DDLQueryStatusInputSream";
|
|
|
|
}
|
|
|
|
|
2018-06-03 20:39:06 +00:00
|
|
|
Block getHeader() const override { return sample; }
|
2018-01-06 18:10:44 +00:00
|
|
|
|
2017-04-18 15:44:31 +00:00
|
|
|
Block readImpl() override
|
|
|
|
{
|
|
|
|
Block res;
|
2017-07-27 13:11:16 +00:00
|
|
|
if (num_hosts_finished >= waiting_hosts.size())
|
2018-04-17 19:33:58 +00:00
|
|
|
{
|
|
|
|
if (first_exception)
|
|
|
|
throw Exception(*first_exception);
|
|
|
|
|
2017-04-18 15:44:31 +00:00
|
|
|
return res;
|
2018-04-17 19:33:58 +00:00
|
|
|
}
|
2017-04-18 15:44:31 +00:00
|
|
|
|
|
|
|
auto zookeeper = context.getZooKeeper();
|
|
|
|
size_t try_number = 0;
|
|
|
|
|
2018-04-17 19:33:58 +00:00
|
|
|
while (res.rows() == 0)
|
2017-04-18 15:44:31 +00:00
|
|
|
{
|
2018-03-05 21:09:39 +00:00
|
|
|
if (isCancelled())
|
2018-04-17 19:33:58 +00:00
|
|
|
{
|
|
|
|
if (first_exception)
|
|
|
|
throw Exception(*first_exception);
|
|
|
|
|
2017-04-18 15:44:31 +00:00
|
|
|
return res;
|
2018-04-17 19:33:58 +00:00
|
|
|
}
|
2017-04-18 15:44:31 +00:00
|
|
|
|
2018-02-28 13:23:40 +00:00
|
|
|
if (timeout_seconds >= 0 && watch.elapsedSeconds() > timeout_seconds)
|
2017-08-12 20:00:00 +00:00
|
|
|
{
|
2018-02-28 13:23:40 +00:00
|
|
|
size_t num_unfinished_hosts = waiting_hosts.size() - num_hosts_finished;
|
|
|
|
size_t num_active_hosts = current_active_hosts.size();
|
|
|
|
|
|
|
|
std::stringstream msg;
|
|
|
|
msg << "Watching task " << node_path << " is executing longer than distributed_ddl_task_timeout"
|
|
|
|
<< " (=" << timeout_seconds << ") seconds."
|
|
|
|
<< " There are " << num_unfinished_hosts << " unfinished hosts"
|
|
|
|
<< " (" << num_active_hosts << " of them are currently active)"
|
|
|
|
<< ", they are going to execute the query in background";
|
|
|
|
|
|
|
|
throw Exception(msg.str(), ErrorCodes::TIMEOUT_EXCEEDED);
|
2017-08-12 20:00:00 +00:00
|
|
|
}
|
2017-04-27 15:19:11 +00:00
|
|
|
|
2017-04-18 15:44:31 +00:00
|
|
|
if (num_hosts_finished != 0 || try_number != 0)
|
2018-02-28 13:23:40 +00:00
|
|
|
{
|
|
|
|
auto current_sleep_for = std::chrono::milliseconds(std::min(static_cast<size_t>(1000), 50 * (try_number + 1)));
|
|
|
|
std::this_thread::sleep_for(current_sleep_for);
|
|
|
|
}
|
2017-04-18 15:44:31 +00:00
|
|
|
|
2017-04-27 15:19:11 +00:00
|
|
|
/// TODO: add shared lock
|
2017-04-18 15:44:31 +00:00
|
|
|
if (!zookeeper->exists(node_path))
|
2017-04-21 12:39:28 +00:00
|
|
|
{
|
|
|
|
throw Exception("Cannot provide query execution status. The query's node " + node_path
|
2018-02-28 13:23:40 +00:00
|
|
|
+ " has been deleted by the cleaner since it was finished (or its lifetime is expired)",
|
2017-04-21 12:39:28 +00:00
|
|
|
ErrorCodes::UNFINISHED);
|
|
|
|
}
|
2017-04-18 15:44:31 +00:00
|
|
|
|
2017-07-27 11:30:27 +00:00
|
|
|
Strings new_hosts = getNewAndUpdate(getChildrenAllowNoNode(zookeeper, node_path + "/finished"));
|
2017-04-18 15:44:31 +00:00
|
|
|
++try_number;
|
|
|
|
if (new_hosts.empty())
|
|
|
|
continue;
|
|
|
|
|
2018-02-28 13:23:40 +00:00
|
|
|
current_active_hosts = getChildrenAllowNoNode(zookeeper, node_path + "/active");
|
2017-04-21 12:39:28 +00:00
|
|
|
|
2017-12-15 18:23:05 +00:00
|
|
|
MutableColumns columns = sample.cloneEmptyColumns();
|
2017-07-26 19:31:32 +00:00
|
|
|
for (const String & host_id : new_hosts)
|
2017-04-18 15:44:31 +00:00
|
|
|
{
|
2017-07-27 13:11:16 +00:00
|
|
|
ExecutionStatus status(-1, "Cannot obtain error message");
|
2017-04-27 15:19:11 +00:00
|
|
|
{
|
|
|
|
String status_data;
|
2017-07-26 19:31:32 +00:00
|
|
|
if (zookeeper->tryGet(node_path + "/finished/" + host_id, status_data))
|
2017-07-27 13:11:16 +00:00
|
|
|
status.tryDeserializeText(status_data);
|
2017-04-27 15:19:11 +00:00
|
|
|
}
|
|
|
|
|
2017-07-26 19:31:32 +00:00
|
|
|
String host;
|
|
|
|
UInt16 port;
|
|
|
|
Cluster::Address::fromString(host_id, host, port);
|
|
|
|
|
2018-04-17 19:33:58 +00:00
|
|
|
if (status.code != 0 && first_exception == nullptr)
|
|
|
|
first_exception = std::make_unique<Exception>("There was an error on " + host + ": " + status.message, status.code);
|
|
|
|
|
2018-02-28 13:23:40 +00:00
|
|
|
++num_hosts_finished;
|
|
|
|
|
2017-12-15 18:23:05 +00:00
|
|
|
columns[0]->insert(host);
|
|
|
|
columns[1]->insert(static_cast<UInt64>(port));
|
|
|
|
columns[2]->insert(static_cast<Int64>(status.code));
|
|
|
|
columns[3]->insert(status.message);
|
2018-02-28 13:23:40 +00:00
|
|
|
columns[4]->insert(static_cast<UInt64>(waiting_hosts.size() - num_hosts_finished));
|
|
|
|
columns[5]->insert(static_cast<UInt64>(current_active_hosts.size()));
|
2017-04-18 15:44:31 +00:00
|
|
|
}
|
2017-12-15 18:23:05 +00:00
|
|
|
res = sample.cloneWithColumns(std::move(columns));
|
2017-04-18 15:44:31 +00:00
|
|
|
}
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2017-04-18 15:44:31 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2017-07-27 18:44:55 +00:00
|
|
|
Block getSampleBlock() const
|
|
|
|
{
|
|
|
|
return sample.cloneEmpty();
|
|
|
|
}
|
|
|
|
|
|
|
|
~DDLQueryStatusInputSream() override = default;
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
2017-04-21 12:39:28 +00:00
|
|
|
static Strings getChildrenAllowNoNode(const std::shared_ptr<zkutil::ZooKeeper> & zookeeper, const String & node_path)
|
|
|
|
{
|
|
|
|
Strings res;
|
|
|
|
int code = zookeeper->tryGetChildren(node_path, res);
|
2018-08-25 01:58:14 +00:00
|
|
|
if (code && code != Coordination::ZNONODE)
|
|
|
|
throw Coordination::Exception(code, node_path);
|
2017-04-21 12:39:28 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2017-07-27 11:30:27 +00:00
|
|
|
Strings getNewAndUpdate(const Strings & current_list_of_finished_hosts)
|
2017-04-18 15:44:31 +00:00
|
|
|
{
|
|
|
|
Strings diff;
|
2017-07-27 11:30:27 +00:00
|
|
|
for (const String & host : current_list_of_finished_hosts)
|
2017-04-18 15:44:31 +00:00
|
|
|
{
|
2017-07-27 11:30:27 +00:00
|
|
|
if (!waiting_hosts.count(host))
|
|
|
|
{
|
|
|
|
if (!ignoring_hosts.count(host))
|
|
|
|
{
|
|
|
|
ignoring_hosts.emplace(host);
|
|
|
|
LOG_INFO(log, "Unexpected host " << host << " appeared " << " in task " << node_path);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!finished_hosts.count(host))
|
2017-04-19 14:21:27 +00:00
|
|
|
{
|
2017-07-27 11:30:27 +00:00
|
|
|
diff.emplace_back(host);
|
|
|
|
finished_hosts.emplace(host);
|
2017-04-19 14:21:27 +00:00
|
|
|
}
|
2017-04-18 15:44:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return diff;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
String node_path;
|
2017-09-04 17:49:39 +00:00
|
|
|
const Context & context;
|
2017-07-27 13:11:16 +00:00
|
|
|
Stopwatch watch;
|
|
|
|
Logger * log;
|
|
|
|
|
|
|
|
Block sample;
|
2017-04-18 15:44:31 +00:00
|
|
|
|
2017-07-27 11:30:27 +00:00
|
|
|
NameSet waiting_hosts; /// hosts from task host list
|
|
|
|
NameSet finished_hosts; /// finished hosts from host list
|
|
|
|
NameSet ignoring_hosts; /// appeared hosts that are not in hosts list
|
2018-02-28 13:23:40 +00:00
|
|
|
Strings current_active_hosts; /// Hosts that were in active state at the last check
|
2017-04-18 15:44:31 +00:00
|
|
|
size_t num_hosts_finished = 0;
|
2017-08-02 21:37:04 +00:00
|
|
|
|
2018-04-17 19:33:58 +00:00
|
|
|
/// Save the first detected error and throw it at the end of excecution
|
|
|
|
std::unique_ptr<Exception> first_exception;
|
|
|
|
|
2017-08-02 21:37:04 +00:00
|
|
|
Int64 timeout_seconds = 120;
|
2017-04-17 17:04:31 +00:00
|
|
|
};
|
2017-04-13 16:12:56 +00:00
|
|
|
|
|
|
|
|
2018-04-17 19:33:58 +00:00
|
|
|
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & context, const NameSet & query_databases)
|
2017-04-13 16:12:56 +00:00
|
|
|
{
|
2018-02-12 18:41:53 +00:00
|
|
|
/// Remove FORMAT <fmt> and INTO OUTFILE <file> if exists
|
2018-01-22 15:56:30 +00:00
|
|
|
ASTPtr query_ptr = query_ptr_->clone();
|
2018-02-12 18:41:53 +00:00
|
|
|
ASTQueryWithOutput::resetOutputASTIfExist(*query_ptr);
|
2017-08-10 19:12:52 +00:00
|
|
|
|
2018-01-22 15:56:30 +00:00
|
|
|
auto query = dynamic_cast<ASTQueryWithOnCluster *>(query_ptr.get());
|
2017-04-25 15:21:03 +00:00
|
|
|
if (!query)
|
|
|
|
{
|
2017-08-10 19:12:52 +00:00
|
|
|
throw Exception("Distributed execution is not supported for such DDL queries", ErrorCodes::NOT_IMPLEMENTED);
|
2017-04-25 15:21:03 +00:00
|
|
|
}
|
2017-04-17 17:04:31 +00:00
|
|
|
|
2018-02-28 13:23:40 +00:00
|
|
|
if (!context.getSettingsRef().allow_distributed_ddl)
|
|
|
|
throw Exception("Distributed DDL queries are prohibited for the user", ErrorCodes::QUERY_IS_PROHIBITED);
|
|
|
|
|
2017-08-10 19:12:52 +00:00
|
|
|
if (auto query_alter = dynamic_cast<const ASTAlterQuery *>(query_ptr.get()))
|
2017-04-25 15:21:03 +00:00
|
|
|
{
|
2018-06-09 15:53:14 +00:00
|
|
|
for (const auto & command : query_alter->command_list->commands)
|
2017-04-25 15:21:03 +00:00
|
|
|
{
|
2018-06-09 15:53:14 +00:00
|
|
|
if (!isSupportedAlterType(command->type))
|
2017-04-25 15:21:03 +00:00
|
|
|
throw Exception("Unsupported type of ALTER query", ErrorCodes::NOT_IMPLEMENTED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:16:51 +00:00
|
|
|
query->cluster = context.getMacros()->expand(query->cluster);
|
2017-04-25 15:21:03 +00:00
|
|
|
ClusterPtr cluster = context.getCluster(query->cluster);
|
|
|
|
DDLWorker & ddl_worker = context.getDDLWorker();
|
2017-04-21 12:39:28 +00:00
|
|
|
|
2017-04-17 17:04:31 +00:00
|
|
|
DDLLogEntry entry;
|
2017-04-25 15:21:03 +00:00
|
|
|
entry.query = queryToString(query_ptr);
|
2017-07-26 19:31:32 +00:00
|
|
|
entry.initiator = ddl_worker.getCommonHostID();
|
2017-04-17 17:04:31 +00:00
|
|
|
|
2018-04-17 19:33:58 +00:00
|
|
|
/// Check database access rights, assume that all servers have the same users config
|
|
|
|
NameSet databases_to_check_access_rights;
|
|
|
|
|
2017-08-11 15:02:07 +00:00
|
|
|
Cluster::AddressesWithFailover shards = cluster->getShardsAddresses();
|
2018-04-17 19:33:58 +00:00
|
|
|
|
2017-04-13 16:12:56 +00:00
|
|
|
for (const auto & shard : shards)
|
2017-04-25 15:21:03 +00:00
|
|
|
{
|
2017-04-13 16:12:56 +00:00
|
|
|
for (const auto & addr : shard)
|
2018-04-17 19:33:58 +00:00
|
|
|
{
|
2017-07-28 16:14:49 +00:00
|
|
|
entry.hosts.emplace_back(addr);
|
2018-04-17 19:33:58 +00:00
|
|
|
|
|
|
|
/// Expand empty database name to shards' default database name
|
|
|
|
for (const String & database : query_databases)
|
|
|
|
databases_to_check_access_rights.emplace(database.empty() ? addr.default_database : database);
|
|
|
|
}
|
2017-04-25 15:21:03 +00:00
|
|
|
}
|
2017-04-13 16:12:56 +00:00
|
|
|
|
2018-04-17 19:33:58 +00:00
|
|
|
for (const String & database : databases_to_check_access_rights)
|
|
|
|
context.checkDatabaseAccessRights(database.empty() ? context.getCurrentDatabase() : database);
|
|
|
|
|
2017-04-18 15:44:31 +00:00
|
|
|
String node_path = ddl_worker.enqueueQuery(entry);
|
2017-04-13 16:12:56 +00:00
|
|
|
|
|
|
|
BlockIO io;
|
2017-08-02 21:37:04 +00:00
|
|
|
if (context.getSettingsRef().distributed_ddl_task_timeout == 0)
|
2017-04-18 15:44:31 +00:00
|
|
|
return io;
|
|
|
|
|
2017-07-27 11:30:27 +00:00
|
|
|
auto stream = std::make_shared<DDLQueryStatusInputSream>(node_path, entry, context);
|
2017-04-18 15:44:31 +00:00
|
|
|
io.in = std::move(stream);
|
2017-04-13 16:12:56 +00:00
|
|
|
return io;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-13 13:42:29 +00:00
|
|
|
}
|