2017-04-01 09:19:00 +00:00
|
|
|
#include <Interpreters/Cluster.h>
|
2018-04-19 13:56:14 +00:00
|
|
|
#include <Common/DNSResolver.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Common/escapeForFileName.h>
|
|
|
|
#include <Common/isLocalAddress.h>
|
2018-01-15 19:07:47 +00:00
|
|
|
#include <Common/StringUtils/StringUtils.h>
|
2017-12-28 04:28:05 +00:00
|
|
|
#include <Common/parseAddress.h>
|
2021-03-12 21:17:19 +00:00
|
|
|
#include <Common/Config/AbstractConfigurationComparison.h>
|
2021-12-30 15:34:11 +00:00
|
|
|
#include <Common/Config/ConfigHelper.h>
|
2020-12-10 22:05:02 +00:00
|
|
|
#include <Core/Settings.h>
|
2017-04-13 16:12:56 +00:00
|
|
|
#include <IO/WriteHelpers.h>
|
2017-07-26 19:31:32 +00:00
|
|
|
#include <IO/ReadHelpers.h>
|
2013-12-07 16:51:29 +00:00
|
|
|
#include <Poco/Util/AbstractConfiguration.h>
|
|
|
|
#include <Poco/Util/Application.h>
|
2021-10-02 07:13:14 +00:00
|
|
|
#include <base/range.h>
|
2022-01-30 19:49:48 +00:00
|
|
|
#include <base/sort.h>
|
2020-09-12 22:27:44 +00:00
|
|
|
#include <boost/range/algorithm_ext/erase.hpp>
|
2013-12-07 16:51:29 +00:00
|
|
|
|
2022-01-30 19:49:48 +00:00
|
|
|
|
2013-12-07 16:51:29 +00:00
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2016-01-12 02:21:15 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
|
|
|
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
|
|
|
extern const int LOGICAL_ERROR;
|
|
|
|
extern const int SHARD_HAS_NO_CONNECTIONS;
|
2022-02-20 18:27:14 +00:00
|
|
|
extern const int NO_ELEMENTS_IN_CONFIG;
|
2017-07-26 19:31:32 +00:00
|
|
|
extern const int SYNTAX_ERROR;
|
2022-06-18 22:01:08 +00:00
|
|
|
extern const int INVALID_SHARD_ID;
|
|
|
|
extern const int NO_SUCH_REPLICA;
|
2016-01-12 02:21:15 +00:00
|
|
|
}
|
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
namespace
|
|
|
|
{
|
|
|
|
|
2016-08-22 20:34:21 +00:00
|
|
|
/// Default shard weight.
|
2020-03-08 23:48:08 +00:00
|
|
|
constexpr UInt32 default_weight = 1;
|
2015-10-20 14:59:29 +00:00
|
|
|
|
2019-07-08 01:43:41 +00:00
|
|
|
inline bool isLocalImpl(const Cluster::Address & address, const Poco::Net::SocketAddress & resolved_address, UInt16 clickhouse_port)
|
2015-10-20 14:59:29 +00:00
|
|
|
{
|
2019-07-08 01:43:41 +00:00
|
|
|
/// If there is replica, for which:
|
2017-04-01 07:20:54 +00:00
|
|
|
/// - its port is the same that the server is listening;
|
|
|
|
/// - its host is resolved to set of addresses, one of which is the same as one of addresses of network interfaces of the server machine*;
|
|
|
|
/// then we must go to this shard without any inter-process communication.
|
|
|
|
///
|
|
|
|
/// * - this criteria is somewhat approximate.
|
|
|
|
///
|
|
|
|
/// Also, replica is considered non-local, if it has default database set
|
|
|
|
/// (only reason is to avoid query rewrite).
|
|
|
|
|
2018-03-29 20:21:01 +00:00
|
|
|
return address.default_database.empty() && isLocalAddress(resolved_address, clickhouse_port);
|
2015-10-20 14:59:29 +00:00
|
|
|
}
|
|
|
|
|
2020-11-07 23:44:35 +00:00
|
|
|
void concatInsertPath(std::string & insert_path, const std::string & dir_name)
|
|
|
|
{
|
|
|
|
if (insert_path.empty())
|
|
|
|
insert_path = dir_name;
|
|
|
|
else
|
|
|
|
insert_path += "," + dir_name;
|
|
|
|
}
|
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
}
|
|
|
|
|
2016-08-22 20:34:21 +00:00
|
|
|
/// Implementation of Cluster::Address class
|
2015-10-16 16:10:10 +00:00
|
|
|
|
2019-07-08 01:43:41 +00:00
|
|
|
std::optional<Poco::Net::SocketAddress> Cluster::Address::getResolvedAddress() const
|
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
|
|
|
return DNSResolver::instance().resolveAddress(host_name, port);
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
/// Failure in DNS resolution in cluster initialization is Ok.
|
|
|
|
tryLogCurrentException("Cluster");
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Cluster::Address::isLocal(UInt16 clickhouse_port) const
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2019-07-08 01:43:41 +00:00
|
|
|
if (auto resolved = getResolvedAddress())
|
|
|
|
return isLocalImpl(*this, *resolved, clickhouse_port);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-07 14:38:35 +00:00
|
|
|
|
2020-07-05 00:35:57 +00:00
|
|
|
Cluster::Address::Address(
|
2020-09-14 21:55:43 +00:00
|
|
|
const Poco::Util::AbstractConfiguration & config,
|
|
|
|
const String & config_prefix,
|
|
|
|
const String & cluster_,
|
|
|
|
const String & cluster_secret_,
|
|
|
|
UInt32 shard_index_,
|
|
|
|
UInt32 replica_index_)
|
|
|
|
: cluster(cluster_)
|
|
|
|
, cluster_secret(cluster_secret_)
|
|
|
|
, shard_index(shard_index_)
|
|
|
|
, replica_index(replica_index_)
|
2019-07-08 01:43:41 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
host_name = config.getString(config_prefix + ".host");
|
2018-12-28 17:11:52 +00:00
|
|
|
if (config.has(config_prefix + ".user"))
|
|
|
|
user_specified = true;
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
user = config.getString(config_prefix + ".user", "default");
|
|
|
|
password = config.getString(config_prefix + ".password", "");
|
|
|
|
default_database = config.getString(config_prefix + ".default_database", "");
|
2021-12-30 15:34:11 +00:00
|
|
|
secure = ConfigHelper::getBool(config, config_prefix + ".secure", false, /* empty_as */true) ? Protocol::Secure::Enable : Protocol::Secure::Disable;
|
2020-06-27 06:52:10 +00:00
|
|
|
priority = config.getInt(config_prefix + ".priority", 1);
|
2022-02-20 18:27:14 +00:00
|
|
|
|
2019-12-19 19:39:49 +00:00
|
|
|
const char * port_type = secure == Protocol::Secure::Enable ? "tcp_port_secure" : "tcp_port";
|
2022-02-20 18:27:14 +00:00
|
|
|
auto default_port = config.getInt(port_type, 0);
|
|
|
|
|
|
|
|
port = static_cast<UInt16>(config.getInt(config_prefix + ".port", default_port));
|
|
|
|
if (!port)
|
|
|
|
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Port is not specified in cluster configuration: {}", config_prefix + ".port");
|
|
|
|
|
2019-12-19 19:39:49 +00:00
|
|
|
is_local = isLocal(config.getInt(port_type, 0));
|
2021-03-29 00:43:14 +00:00
|
|
|
|
|
|
|
/// By default compression is disabled if address looks like localhost.
|
|
|
|
/// NOTE: it's still enabled when interacting with servers on different port, but we don't want to complicate the logic.
|
|
|
|
compression = config.getBool(config_prefix + ".compression", !is_local)
|
|
|
|
? Protocol::Compression::Enable : Protocol::Compression::Disable;
|
2014-08-12 13:46:46 +00:00
|
|
|
}
|
2014-02-22 21:50:27 +00:00
|
|
|
|
2015-10-16 16:10:10 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
Cluster::Address::Address(
|
2021-07-23 11:16:35 +00:00
|
|
|
const String & host_port_,
|
|
|
|
const String & user_,
|
|
|
|
const String & password_,
|
|
|
|
UInt16 clickhouse_port,
|
|
|
|
bool treat_local_port_as_remote,
|
|
|
|
bool secure_,
|
|
|
|
Int64 priority_,
|
|
|
|
UInt32 shard_index_,
|
2022-03-24 23:14:26 +00:00
|
|
|
UInt32 replica_index_,
|
|
|
|
String cluster_name_,
|
|
|
|
String cluster_secret_)
|
2021-07-23 11:16:35 +00:00
|
|
|
: user(user_), password(password_)
|
2014-02-22 21:50:27 +00:00
|
|
|
{
|
2021-07-23 11:16:35 +00:00
|
|
|
bool can_be_local = true;
|
|
|
|
std::pair<std::string, UInt16> parsed_host_port;
|
|
|
|
if (!treat_local_port_as_remote)
|
|
|
|
{
|
|
|
|
parsed_host_port = parseAddress(host_port_, clickhouse_port);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/// For clickhouse-local (treat_local_port_as_remote) try to read the address without passing a default port
|
|
|
|
/// If it works we have a full address that includes a port, which means it won't be local
|
|
|
|
/// since clickhouse-local doesn't listen in any port
|
|
|
|
/// If it doesn't include a port then use the default one and it could be local (if the address is)
|
|
|
|
try
|
|
|
|
{
|
|
|
|
parsed_host_port = parseAddress(host_port_, 0);
|
|
|
|
can_be_local = false;
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
parsed_host_port = parseAddress(host_port_, clickhouse_port);
|
|
|
|
}
|
|
|
|
}
|
2018-01-11 18:55:31 +00:00
|
|
|
host_name = parsed_host_port.first;
|
|
|
|
port = parsed_host_port.second;
|
2019-01-17 17:55:44 +00:00
|
|
|
secure = secure_ ? Protocol::Secure::Enable : Protocol::Secure::Disable;
|
2020-06-27 06:52:10 +00:00
|
|
|
priority = priority_;
|
2021-07-23 11:16:35 +00:00
|
|
|
is_local = can_be_local && isLocal(clickhouse_port);
|
2021-03-09 17:05:24 +00:00
|
|
|
shard_index = shard_index_;
|
|
|
|
replica_index = replica_index_;
|
2022-03-24 23:14:26 +00:00
|
|
|
cluster = cluster_name_;
|
|
|
|
cluster_secret = cluster_secret_;
|
2014-02-22 21:50:27 +00:00
|
|
|
}
|
|
|
|
|
2017-05-30 11:49:17 +00:00
|
|
|
|
2017-04-13 16:12:56 +00:00
|
|
|
String Cluster::Address::toString() const
|
|
|
|
{
|
2017-05-30 11:49:17 +00:00
|
|
|
return toString(host_name, port);
|
|
|
|
}
|
|
|
|
|
|
|
|
String Cluster::Address::toString(const String & host_name, UInt16 port)
|
|
|
|
{
|
|
|
|
return escapeForFileName(host_name) + ':' + DB::toString(port);
|
2017-04-13 16:12:56 +00:00
|
|
|
}
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
String Cluster::Address::readableString() const
|
|
|
|
{
|
2018-04-19 19:25:54 +00:00
|
|
|
String res;
|
|
|
|
|
|
|
|
/// If it looks like IPv6 address add braces to avoid ambiguity in ipv6_host:port notation
|
|
|
|
if (host_name.find_first_of(':') != std::string::npos && !host_name.empty() && host_name.back() != ']')
|
|
|
|
res += '[' + host_name + ']';
|
|
|
|
else
|
|
|
|
res += host_name;
|
|
|
|
|
|
|
|
res += ':' + DB::toString(port);
|
|
|
|
return res;
|
2017-07-28 16:14:49 +00:00
|
|
|
}
|
|
|
|
|
2019-01-21 19:45:26 +00:00
|
|
|
std::pair<String, UInt16> Cluster::Address::fromString(const String & host_port_string)
|
2017-07-26 19:31:32 +00:00
|
|
|
{
|
|
|
|
auto pos = host_port_string.find_last_of(':');
|
|
|
|
if (pos == std::string::npos)
|
2017-07-27 18:44:55 +00:00
|
|
|
throw Exception("Incorrect <host>:<port> format " + host_port_string, ErrorCodes::SYNTAX_ERROR);
|
2017-07-26 19:31:32 +00:00
|
|
|
|
2019-01-21 19:45:26 +00:00
|
|
|
return {unescapeForFileName(host_port_string.substr(0, pos)), parse<UInt16>(host_port_string.substr(pos + 1))};
|
2017-07-26 19:31:32 +00:00
|
|
|
}
|
|
|
|
|
2017-05-30 11:49:17 +00:00
|
|
|
|
2020-03-13 18:49:46 +00:00
|
|
|
String Cluster::Address::toFullString(bool use_compact_format) const
|
2017-05-30 11:49:17 +00:00
|
|
|
{
|
2020-03-13 18:49:46 +00:00
|
|
|
if (use_compact_format)
|
|
|
|
{
|
2020-06-08 19:06:32 +00:00
|
|
|
if (shard_index == 0 || replica_index == 0)
|
|
|
|
// shard_num/replica_num like in system.clusters table
|
|
|
|
throw Exception("shard_num/replica_num cannot be zero", ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
return fmt::format("shard{}_replica{}", shard_index, replica_index);
|
2020-03-13 18:49:46 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return
|
|
|
|
escapeForFileName(user)
|
|
|
|
+ (password.empty() ? "" : (':' + escapeForFileName(password))) + '@'
|
|
|
|
+ escapeForFileName(host_name) + ':' + std::to_string(port)
|
|
|
|
+ (default_database.empty() ? "" : ('#' + escapeForFileName(default_database)))
|
|
|
|
+ ((secure == Protocol::Secure::Enable) ? "+secure" : "");
|
|
|
|
}
|
2017-05-30 11:49:17 +00:00
|
|
|
}
|
|
|
|
|
2019-01-21 19:45:26 +00:00
|
|
|
Cluster::Address Cluster::Address::fromFullString(const String & full_string)
|
2018-12-02 02:17:08 +00:00
|
|
|
{
|
|
|
|
const char * address_begin = full_string.data();
|
2020-01-31 10:49:10 +00:00
|
|
|
const char * address_end = address_begin + full_string.size();
|
2018-12-02 02:17:08 +00:00
|
|
|
|
2020-01-31 10:49:10 +00:00
|
|
|
const char * user_pw_end = strchr(full_string.data(), '@');
|
|
|
|
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
/// parsing with the new shard{shard_index}[_replica{replica_index}] format
|
2020-02-03 13:18:00 +00:00
|
|
|
if (!user_pw_end && startsWith(full_string, "shard"))
|
2020-01-31 10:49:10 +00:00
|
|
|
{
|
|
|
|
const char * underscore = strchr(full_string.data(), '_');
|
|
|
|
|
|
|
|
Address address;
|
2020-02-16 04:12:48 +00:00
|
|
|
address.shard_index = parse<UInt32>(address_begin + strlen("shard"));
|
|
|
|
address.replica_index = underscore ? parse<UInt32>(underscore + strlen("_replica")) : 0;
|
2020-02-16 04:11:19 +00:00
|
|
|
|
2020-01-31 10:49:10 +00:00
|
|
|
return address;
|
|
|
|
}
|
2020-02-16 04:11:19 +00:00
|
|
|
else
|
2020-02-12 10:13:11 +00:00
|
|
|
{
|
2020-02-16 04:11:19 +00:00
|
|
|
/// parsing with the old user[:password]@host:port#default_database format
|
|
|
|
/// This format is appeared to be inconvenient for the following reasons:
|
|
|
|
/// - credentials are exposed in file name;
|
|
|
|
/// - the file name can be too long.
|
|
|
|
|
|
|
|
Protocol::Secure secure = Protocol::Secure::Disable;
|
|
|
|
const char * secure_tag = "+secure";
|
|
|
|
if (endsWith(full_string, secure_tag))
|
|
|
|
{
|
|
|
|
address_end -= strlen(secure_tag);
|
|
|
|
secure = Protocol::Secure::Enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char * colon = strchr(full_string.data(), ':');
|
|
|
|
if (!user_pw_end || !colon)
|
|
|
|
throw Exception("Incorrect user[:password]@host:port#default_database format " + full_string, ErrorCodes::SYNTAX_ERROR);
|
|
|
|
|
|
|
|
const bool has_pw = colon < user_pw_end;
|
|
|
|
const char * host_end = has_pw ? strchr(user_pw_end + 1, ':') : colon;
|
|
|
|
if (!host_end)
|
|
|
|
throw Exception("Incorrect address '" + full_string + "', it does not contain port", ErrorCodes::SYNTAX_ERROR);
|
|
|
|
|
|
|
|
const char * has_db = strchr(full_string.data(), '#');
|
|
|
|
const char * port_end = has_db ? has_db : address_end;
|
2020-02-12 10:13:11 +00:00
|
|
|
|
2020-02-16 04:11:19 +00:00
|
|
|
Address address;
|
|
|
|
address.secure = secure;
|
|
|
|
address.port = parse<UInt16>(host_end + 1, port_end - (host_end + 1));
|
|
|
|
address.host_name = unescapeForFileName(std::string(user_pw_end + 1, host_end));
|
|
|
|
address.user = unescapeForFileName(std::string(address_begin, has_pw ? colon : user_pw_end));
|
|
|
|
address.password = has_pw ? unescapeForFileName(std::string(colon + 1, user_pw_end)) : std::string();
|
|
|
|
address.default_database = has_db ? unescapeForFileName(std::string(has_db + 1, address_end)) : std::string();
|
2020-06-27 06:52:10 +00:00
|
|
|
// address.priority ignored
|
2020-02-16 04:11:19 +00:00
|
|
|
return address;
|
|
|
|
}
|
2018-12-02 02:17:08 +00:00
|
|
|
}
|
|
|
|
|
2017-05-30 11:49:17 +00:00
|
|
|
|
2016-08-22 20:34:21 +00:00
|
|
|
/// Implementation of Clusters class
|
2014-02-22 21:50:27 +00:00
|
|
|
|
2022-04-27 23:32:49 +00:00
|
|
|
Clusters::Clusters(const Poco::Util::AbstractConfiguration & config, const Settings & settings, MultiVersion<Macros>::Version macros, const String & config_prefix)
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2022-04-27 23:32:49 +00:00
|
|
|
this->macros_ = macros;
|
2020-09-14 21:55:43 +00:00
|
|
|
updateClusters(config, settings, config_prefix);
|
2016-10-10 08:44:52 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 15:06:46 +00:00
|
|
|
|
|
|
|
ClusterPtr Clusters::getCluster(const std::string & cluster_name) const
|
|
|
|
{
|
2018-01-25 12:18:27 +00:00
|
|
|
std::lock_guard lock(mutex);
|
2016-10-14 15:06:46 +00:00
|
|
|
|
2022-04-27 23:32:49 +00:00
|
|
|
auto expanded_cluster_name = macros_->expand(cluster_name);
|
|
|
|
auto it = impl.find(expanded_cluster_name);
|
2017-04-01 07:20:54 +00:00
|
|
|
return (it != impl.end()) ? it->second : nullptr;
|
2016-10-14 15:06:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-03 19:53:10 +00:00
|
|
|
void Clusters::setCluster(const String & cluster_name, const std::shared_ptr<Cluster> & cluster)
|
|
|
|
{
|
2018-01-25 12:18:27 +00:00
|
|
|
std::lock_guard lock(mutex);
|
2017-11-03 19:53:10 +00:00
|
|
|
impl[cluster_name] = cluster;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-03-12 21:17:19 +00:00
|
|
|
void Clusters::updateClusters(const Poco::Util::AbstractConfiguration & new_config, const Settings & settings, const String & config_prefix, Poco::Util::AbstractConfiguration * old_config)
|
2016-10-10 08:44:52 +00:00
|
|
|
{
|
2021-03-12 21:17:19 +00:00
|
|
|
Poco::Util::AbstractConfiguration::Keys new_config_keys;
|
|
|
|
new_config.keys(config_prefix, new_config_keys);
|
|
|
|
|
|
|
|
/// If old config is set, we will update only clusters with updated config.
|
|
|
|
/// In this case, we first need to find clusters that were deleted from config.
|
|
|
|
Poco::Util::AbstractConfiguration::Keys deleted_keys;
|
|
|
|
if (old_config)
|
|
|
|
{
|
2022-01-30 19:49:48 +00:00
|
|
|
::sort(new_config_keys.begin(), new_config_keys.end());
|
2021-03-12 21:17:19 +00:00
|
|
|
|
|
|
|
Poco::Util::AbstractConfiguration::Keys old_config_keys;
|
|
|
|
old_config->keys(config_prefix, old_config_keys);
|
2022-01-30 19:49:48 +00:00
|
|
|
::sort(old_config_keys.begin(), old_config_keys.end());
|
2021-03-12 21:17:19 +00:00
|
|
|
|
|
|
|
std::set_difference(
|
|
|
|
old_config_keys.begin(), old_config_keys.end(), new_config_keys.begin(), new_config_keys.end(), std::back_inserter(deleted_keys));
|
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-01-25 12:18:27 +00:00
|
|
|
std::lock_guard lock(mutex);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2021-04-01 10:19:28 +00:00
|
|
|
/// If old config is set, remove deleted clusters from impl, otherwise just clear it.
|
2021-03-12 21:17:19 +00:00
|
|
|
if (old_config)
|
|
|
|
{
|
|
|
|
for (const auto & key : deleted_keys)
|
2021-11-29 11:25:52 +00:00
|
|
|
{
|
|
|
|
if (!automatic_clusters.contains(key))
|
|
|
|
impl.erase(key);
|
|
|
|
}
|
2021-03-12 21:17:19 +00:00
|
|
|
}
|
|
|
|
else
|
2021-11-29 11:25:52 +00:00
|
|
|
{
|
|
|
|
if (!automatic_clusters.empty())
|
|
|
|
std::erase_if(impl, [this](const auto & e) { return automatic_clusters.contains(e.first); });
|
|
|
|
else
|
|
|
|
impl.clear();
|
|
|
|
}
|
|
|
|
|
2021-03-12 21:17:19 +00:00
|
|
|
|
|
|
|
for (const auto & key : new_config_keys)
|
2018-10-22 12:38:04 +00:00
|
|
|
{
|
2021-11-18 09:45:57 +00:00
|
|
|
if (new_config.has(config_prefix + "." + key + ".discovery"))
|
2021-11-29 11:25:52 +00:00
|
|
|
{
|
2021-11-18 09:45:57 +00:00
|
|
|
/// Handled in ClusterDiscovery
|
2021-11-29 11:25:52 +00:00
|
|
|
automatic_clusters.insert(key);
|
2021-11-18 09:45:57 +00:00
|
|
|
continue;
|
2021-11-29 11:25:52 +00:00
|
|
|
}
|
2021-11-18 09:45:57 +00:00
|
|
|
|
2018-10-22 12:38:04 +00:00
|
|
|
if (key.find('.') != String::npos)
|
2019-06-15 12:06:22 +00:00
|
|
|
throw Exception("Cluster names with dots are not supported: '" + key + "'", ErrorCodes::SYNTAX_ERROR);
|
2018-10-22 12:38:04 +00:00
|
|
|
|
2021-03-12 21:17:19 +00:00
|
|
|
/// If old config is set and cluster config wasn't changed, don't update this cluster.
|
|
|
|
if (!old_config || !isSameConfiguration(new_config, *old_config, config_prefix + "." + key))
|
|
|
|
impl[key] = std::make_shared<Cluster>(new_config, settings, config_prefix, key);
|
2018-10-22 12:38:04 +00:00
|
|
|
}
|
2016-10-10 08:44:52 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 15:06:46 +00:00
|
|
|
Clusters::Impl Clusters::getContainer() const
|
2016-10-10 08:44:52 +00:00
|
|
|
{
|
2018-01-25 12:18:27 +00:00
|
|
|
std::lock_guard lock(mutex);
|
2017-04-01 07:20:54 +00:00
|
|
|
/// The following line copies container of shared_ptrs to return value under lock
|
|
|
|
return impl;
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
|
|
|
|
2017-11-03 19:53:10 +00:00
|
|
|
|
2017-04-02 17:37:49 +00:00
|
|
|
/// Implementation of `Cluster` class
|
2013-12-07 16:51:29 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
Cluster::Cluster(const Poco::Util::AbstractConfiguration & config,
|
|
|
|
const Settings & settings,
|
|
|
|
const String & config_prefix_,
|
2021-07-01 10:18:29 +00:00
|
|
|
const String & cluster_name) : name(cluster_name)
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2020-09-14 21:55:43 +00:00
|
|
|
auto config_prefix = config_prefix_ + "." + cluster_name;
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
Poco::Util::AbstractConfiguration::Keys config_keys;
|
2020-09-14 21:55:43 +00:00
|
|
|
config.keys(config_prefix, config_keys);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
config_prefix += ".";
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
secret = config.getString(config_prefix + "secret", "");
|
2020-09-12 22:27:44 +00:00
|
|
|
boost::range::remove_erase(config_keys, "secret");
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
if (config_keys.empty())
|
|
|
|
throw Exception("No cluster elements (shard, node) specified in config at path " + config_prefix, ErrorCodes::SHARD_HAS_NO_CONNECTIONS);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
UInt32 current_shard_num = 1;
|
2017-04-01 07:20:54 +00:00
|
|
|
for (const auto & key : config_keys)
|
|
|
|
{
|
|
|
|
if (startsWith(key, "node"))
|
|
|
|
{
|
2017-04-02 17:37:49 +00:00
|
|
|
/// Shard without replicas.
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-08-11 15:02:07 +00:00
|
|
|
Addresses addresses;
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
const auto & prefix = config_prefix + key;
|
|
|
|
const auto weight = config.getInt(prefix + ".weight", default_weight);
|
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
addresses.emplace_back(config, prefix, cluster_name, secret, current_shard_num, 1);
|
2017-04-01 07:20:54 +00:00
|
|
|
const auto & address = addresses.back();
|
|
|
|
|
|
|
|
ShardInfo info;
|
|
|
|
info.shard_num = current_shard_num;
|
|
|
|
info.weight = weight;
|
|
|
|
|
2017-08-11 15:02:07 +00:00
|
|
|
if (address.is_local)
|
2017-04-01 07:20:54 +00:00
|
|
|
info.local_addresses.push_back(address);
|
2018-08-10 01:27:54 +00:00
|
|
|
|
2022-06-02 09:46:33 +00:00
|
|
|
info.all_addresses.push_back(address);
|
|
|
|
|
2021-07-01 10:18:29 +00:00
|
|
|
auto pool = ConnectionPoolFactory::instance().get(
|
2018-08-10 01:27:54 +00:00
|
|
|
settings.distributed_connections_pool_size,
|
|
|
|
address.host_name, address.port,
|
|
|
|
address.default_database, address.user, address.password,
|
2020-09-14 21:55:43 +00:00
|
|
|
address.cluster, address.cluster_secret,
|
2020-06-27 06:52:10 +00:00
|
|
|
"server", address.compression,
|
|
|
|
address.secure, address.priority);
|
2018-08-10 01:27:54 +00:00
|
|
|
|
|
|
|
info.pool = std::make_shared<ConnectionPoolWithFailover>(
|
2019-03-01 23:14:11 +00:00
|
|
|
ConnectionPoolPtrs{pool}, settings.load_balancing);
|
2018-08-10 01:27:54 +00:00
|
|
|
info.per_replica_pools = {std::move(pool)};
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-08-08 00:06:21 +00:00
|
|
|
if (weight)
|
|
|
|
slot_to_shard.insert(std::end(slot_to_shard), weight, shards_info.size());
|
|
|
|
|
2018-02-14 15:11:39 +00:00
|
|
|
shards_info.emplace_back(std::move(info));
|
|
|
|
addresses_with_failover.emplace_back(std::move(addresses));
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
else if (startsWith(key, "shard"))
|
|
|
|
{
|
2017-04-02 17:37:49 +00:00
|
|
|
/// Shard with replicas.
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
Poco::Util::AbstractConfiguration::Keys replica_keys;
|
|
|
|
config.keys(config_prefix + key, replica_keys);
|
|
|
|
|
|
|
|
addresses_with_failover.emplace_back();
|
|
|
|
Addresses & replica_addresses = addresses_with_failover.back();
|
|
|
|
UInt32 current_replica_num = 1;
|
|
|
|
|
|
|
|
const auto & partial_prefix = config_prefix + key + ".";
|
2017-08-08 00:06:21 +00:00
|
|
|
const auto weight = config.getUInt(partial_prefix + ".weight", default_weight);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-05-30 11:49:17 +00:00
|
|
|
bool internal_replication = config.getBool(partial_prefix + ".internal_replication", false);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-11-07 23:44:35 +00:00
|
|
|
ShardInfoInsertPathForInternalReplication insert_paths;
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
/// "_all_replicas" is a marker that will be replaced with all replicas
|
|
|
|
/// (for creating connections in the Distributed engine)
|
|
|
|
insert_paths.compact = fmt::format("shard{}_all_replicas", current_shard_num);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
for (const auto & replica_key : replica_keys)
|
|
|
|
{
|
|
|
|
if (startsWith(replica_key, "weight") || startsWith(replica_key, "internal_replication"))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (startsWith(replica_key, "replica"))
|
|
|
|
{
|
2020-09-14 21:55:43 +00:00
|
|
|
replica_addresses.emplace_back(config,
|
|
|
|
partial_prefix + replica_key,
|
|
|
|
cluster_name,
|
|
|
|
secret,
|
|
|
|
current_shard_num,
|
|
|
|
current_replica_num);
|
2017-04-01 07:20:54 +00:00
|
|
|
++current_replica_num;
|
|
|
|
|
2020-05-14 00:02:28 +00:00
|
|
|
if (internal_replication)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
auto dir_name = replica_addresses.back().toFullString(/* use_compact_format= */ false);
|
|
|
|
if (!replica_addresses.back().is_local)
|
|
|
|
concatInsertPath(insert_paths.prefer_localhost_replica, dir_name);
|
|
|
|
concatInsertPath(insert_paths.no_prefer_localhost_replica, dir_name);
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
throw Exception("Unknown element in config: " + replica_key, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
|
|
|
}
|
|
|
|
|
|
|
|
Addresses shard_local_addresses;
|
2022-06-02 09:46:33 +00:00
|
|
|
Addresses shard_all_addresses;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-02-14 15:11:39 +00:00
|
|
|
ConnectionPoolPtrs all_replicas_pools;
|
|
|
|
all_replicas_pools.reserve(replica_addresses.size());
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
for (const auto & replica : replica_addresses)
|
|
|
|
{
|
2021-07-01 10:18:29 +00:00
|
|
|
auto replica_pool = ConnectionPoolFactory::instance().get(
|
2018-08-10 01:27:54 +00:00
|
|
|
settings.distributed_connections_pool_size,
|
|
|
|
replica.host_name, replica.port,
|
|
|
|
replica.default_database, replica.user, replica.password,
|
2020-09-14 21:55:43 +00:00
|
|
|
replica.cluster, replica.cluster_secret,
|
2020-06-27 06:52:10 +00:00
|
|
|
"server", replica.compression,
|
|
|
|
replica.secure, replica.priority);
|
2018-08-10 01:27:54 +00:00
|
|
|
|
|
|
|
all_replicas_pools.emplace_back(replica_pool);
|
2017-08-11 15:02:07 +00:00
|
|
|
if (replica.is_local)
|
2017-04-01 07:20:54 +00:00
|
|
|
shard_local_addresses.push_back(replica);
|
2022-06-02 09:46:33 +00:00
|
|
|
shard_all_addresses.push_back(replica);
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
|
2018-08-10 01:27:54 +00:00
|
|
|
ConnectionPoolWithFailoverPtr shard_pool = std::make_shared<ConnectionPoolWithFailover>(
|
2019-08-23 05:04:45 +00:00
|
|
|
all_replicas_pools, settings.load_balancing,
|
2019-09-05 10:35:36 +00:00
|
|
|
settings.distributed_replica_error_half_life.totalSeconds(), settings.distributed_replica_error_cap);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-08-08 00:06:21 +00:00
|
|
|
if (weight)
|
|
|
|
slot_to_shard.insert(std::end(slot_to_shard), weight, shards_info.size());
|
|
|
|
|
2020-05-14 00:02:28 +00:00
|
|
|
shards_info.push_back({
|
2020-11-07 23:44:35 +00:00
|
|
|
std::move(insert_paths),
|
2020-05-14 00:02:28 +00:00
|
|
|
current_shard_num,
|
|
|
|
weight,
|
|
|
|
std::move(shard_local_addresses),
|
2022-06-02 09:46:33 +00:00
|
|
|
std::move(shard_all_addresses),
|
2020-05-14 00:02:28 +00:00
|
|
|
std::move(shard_pool),
|
|
|
|
std::move(all_replicas_pools),
|
|
|
|
internal_replication
|
|
|
|
});
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
throw Exception("Unknown element in config: " + key, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
|
|
|
|
|
|
|
++current_shard_num;
|
|
|
|
}
|
|
|
|
|
2017-08-11 15:02:07 +00:00
|
|
|
if (addresses_with_failover.empty())
|
|
|
|
throw Exception("There must be either 'node' or 'shard' elements in config", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
initMisc();
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
|
|
|
|
2014-02-22 21:50:27 +00:00
|
|
|
|
2021-07-23 11:16:35 +00:00
|
|
|
Cluster::Cluster(
|
|
|
|
const Settings & settings,
|
|
|
|
const std::vector<std::vector<String>> & names,
|
|
|
|
const String & username,
|
|
|
|
const String & password,
|
|
|
|
UInt16 clickhouse_port,
|
|
|
|
bool treat_local_as_remote,
|
|
|
|
bool treat_local_port_as_remote,
|
|
|
|
bool secure,
|
2022-03-24 23:14:26 +00:00
|
|
|
Int64 priority,
|
|
|
|
String cluster_name,
|
|
|
|
String cluster_secret)
|
2014-02-07 15:11:57 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
UInt32 current_shard_num = 1;
|
|
|
|
|
2022-03-24 23:14:26 +00:00
|
|
|
secret = cluster_secret;
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
for (const auto & shard : names)
|
|
|
|
{
|
|
|
|
Addresses current;
|
2020-04-22 06:01:33 +00:00
|
|
|
for (const auto & replica : shard)
|
2021-07-23 11:16:35 +00:00
|
|
|
current.emplace_back(
|
|
|
|
replica,
|
|
|
|
username,
|
|
|
|
password,
|
|
|
|
clickhouse_port,
|
|
|
|
treat_local_port_as_remote,
|
|
|
|
secure,
|
|
|
|
priority,
|
|
|
|
current_shard_num,
|
2022-03-24 23:14:26 +00:00
|
|
|
current.size() + 1,
|
|
|
|
cluster_name,
|
|
|
|
cluster_secret);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
addresses_with_failover.emplace_back(current);
|
|
|
|
|
2017-12-01 17:13:14 +00:00
|
|
|
Addresses shard_local_addresses;
|
2022-06-02 09:46:33 +00:00
|
|
|
Addresses all_addresses;
|
2018-02-14 15:11:39 +00:00
|
|
|
ConnectionPoolPtrs all_replicas;
|
|
|
|
all_replicas.reserve(current.size());
|
2017-12-01 17:13:14 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
for (const auto & replica : current)
|
|
|
|
{
|
2021-07-01 10:18:29 +00:00
|
|
|
auto replica_pool = ConnectionPoolFactory::instance().get(
|
2017-12-01 17:13:14 +00:00
|
|
|
settings.distributed_connections_pool_size,
|
2018-03-29 20:21:01 +00:00
|
|
|
replica.host_name, replica.port,
|
2017-12-01 17:13:14 +00:00
|
|
|
replica.default_database, replica.user, replica.password,
|
2020-09-14 21:55:43 +00:00
|
|
|
replica.cluster, replica.cluster_secret,
|
2020-06-27 06:52:10 +00:00
|
|
|
"server", replica.compression, replica.secure, replica.priority);
|
2018-08-10 01:27:54 +00:00
|
|
|
all_replicas.emplace_back(replica_pool);
|
|
|
|
if (replica.is_local && !treat_local_as_remote)
|
|
|
|
shard_local_addresses.push_back(replica);
|
2022-06-02 09:46:33 +00:00
|
|
|
all_addresses.push_back(replica);
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 17:40:55 +00:00
|
|
|
ConnectionPoolWithFailoverPtr shard_pool = std::make_shared<ConnectionPoolWithFailover>(
|
2019-08-23 05:04:45 +00:00
|
|
|
all_replicas, settings.load_balancing,
|
2019-09-05 10:35:36 +00:00
|
|
|
settings.distributed_replica_error_half_life.totalSeconds(), settings.distributed_replica_error_cap);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
slot_to_shard.insert(std::end(slot_to_shard), default_weight, shards_info.size());
|
2020-05-14 00:02:28 +00:00
|
|
|
shards_info.push_back({
|
2020-11-07 23:44:35 +00:00
|
|
|
{}, // insert_path_for_internal_replication
|
2020-05-14 00:02:28 +00:00
|
|
|
current_shard_num,
|
|
|
|
default_weight,
|
|
|
|
std::move(shard_local_addresses),
|
2022-06-02 09:46:33 +00:00
|
|
|
std::move(all_addresses),
|
2020-05-14 00:02:28 +00:00
|
|
|
std::move(shard_pool),
|
|
|
|
std::move(all_replicas),
|
2020-06-27 06:52:10 +00:00
|
|
|
false // has_internal_replication
|
2020-05-14 00:02:28 +00:00
|
|
|
});
|
2017-04-01 07:20:54 +00:00
|
|
|
++current_shard_num;
|
|
|
|
}
|
|
|
|
|
|
|
|
initMisc();
|
2014-02-07 15:11:57 +00:00
|
|
|
}
|
|
|
|
|
2014-02-22 21:50:27 +00:00
|
|
|
|
2021-04-29 16:11:20 +00:00
|
|
|
Poco::Timespan Cluster::saturate(Poco::Timespan v, Poco::Timespan limit)
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
if (limit.totalMicroseconds() == 0)
|
|
|
|
return v;
|
|
|
|
else
|
|
|
|
return (v > limit) ? limit : v;
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
void Cluster::initMisc()
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
for (const auto & shard_info : shards_info)
|
|
|
|
{
|
|
|
|
if (!shard_info.isLocal() && !shard_info.hasRemoteConnections())
|
|
|
|
throw Exception("Found shard without any specified connection",
|
|
|
|
ErrorCodes::SHARD_HAS_NO_CONNECTIONS);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const auto & shard_info : shards_info)
|
|
|
|
{
|
|
|
|
if (shard_info.isLocal())
|
|
|
|
++local_shard_count;
|
|
|
|
else
|
|
|
|
++remote_shard_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto & shard_info : shards_info)
|
|
|
|
{
|
|
|
|
if (!shard_info.isLocal())
|
|
|
|
{
|
|
|
|
any_remote_shard_info = &shard_info;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
|
|
|
|
2020-01-07 10:26:16 +00:00
|
|
|
std::unique_ptr<Cluster> Cluster::getClusterWithReplicasAsShards(const Settings & settings) const
|
|
|
|
{
|
2020-01-10 17:44:34 +00:00
|
|
|
return std::unique_ptr<Cluster>{ new Cluster(ReplicasAsShardsTag{}, *this, settings)};
|
2020-01-07 10:26:16 +00:00
|
|
|
}
|
2016-05-13 03:22:16 +00:00
|
|
|
|
|
|
|
std::unique_ptr<Cluster> Cluster::getClusterWithSingleShard(size_t index) const
|
|
|
|
{
|
2020-01-10 17:44:34 +00:00
|
|
|
return std::unique_ptr<Cluster>{ new Cluster(SubclusterTag{}, *this, {index}) };
|
2016-05-13 03:22:16 +00:00
|
|
|
}
|
|
|
|
|
2018-11-21 04:04:05 +00:00
|
|
|
std::unique_ptr<Cluster> Cluster::getClusterWithMultipleShards(const std::vector<size_t> & indices) const
|
2016-05-13 03:22:16 +00:00
|
|
|
{
|
2020-01-10 17:44:34 +00:00
|
|
|
return std::unique_ptr<Cluster>{ new Cluster(SubclusterTag{}, *this, indices) };
|
2016-05-13 03:22:16 +00:00
|
|
|
}
|
|
|
|
|
2020-01-10 17:44:34 +00:00
|
|
|
Cluster::Cluster(Cluster::ReplicasAsShardsTag, const Cluster & from, const Settings & settings)
|
2020-01-07 10:26:16 +00:00
|
|
|
{
|
2020-01-10 17:44:34 +00:00
|
|
|
if (from.addresses_with_failover.empty())
|
|
|
|
throw Exception("Cluster is empty", ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
2021-03-29 20:21:08 +00:00
|
|
|
UInt32 shard_num = 0;
|
2020-01-10 17:44:34 +00:00
|
|
|
std::set<std::pair<String, int>> unique_hosts;
|
2021-06-15 19:55:21 +00:00
|
|
|
for (size_t shard_index : collections::range(0, from.shards_info.size()))
|
2020-01-07 10:26:16 +00:00
|
|
|
{
|
2020-01-10 17:44:34 +00:00
|
|
|
const auto & replicas = from.addresses_with_failover[shard_index];
|
|
|
|
for (const auto & address : replicas)
|
2020-01-07 10:26:16 +00:00
|
|
|
{
|
2020-01-10 17:44:34 +00:00
|
|
|
if (!unique_hosts.emplace(address.host_name, address.port).second)
|
|
|
|
continue; /// Duplicate host, skip.
|
|
|
|
|
|
|
|
ShardInfo info;
|
2021-03-29 20:21:08 +00:00
|
|
|
info.shard_num = ++shard_num;
|
|
|
|
|
2020-01-10 17:44:34 +00:00
|
|
|
if (address.is_local)
|
|
|
|
info.local_addresses.push_back(address);
|
|
|
|
|
2022-06-02 09:46:33 +00:00
|
|
|
info.all_addresses.push_back(address);
|
|
|
|
|
2021-07-01 10:18:29 +00:00
|
|
|
auto pool = ConnectionPoolFactory::instance().get(
|
2020-01-10 17:44:34 +00:00
|
|
|
settings.distributed_connections_pool_size,
|
|
|
|
address.host_name,
|
|
|
|
address.port,
|
|
|
|
address.default_database,
|
|
|
|
address.user,
|
|
|
|
address.password,
|
2020-09-14 21:55:43 +00:00
|
|
|
address.cluster,
|
|
|
|
address.cluster_secret,
|
2020-01-10 17:44:34 +00:00
|
|
|
"server",
|
|
|
|
address.compression,
|
2020-06-27 06:52:10 +00:00
|
|
|
address.secure,
|
|
|
|
address.priority);
|
2020-01-10 17:44:34 +00:00
|
|
|
|
|
|
|
info.pool = std::make_shared<ConnectionPoolWithFailover>(ConnectionPoolPtrs{pool}, settings.load_balancing);
|
|
|
|
info.per_replica_pools = {std::move(pool)};
|
|
|
|
|
|
|
|
addresses_with_failover.emplace_back(Addresses{address});
|
|
|
|
shards_info.emplace_back(std::move(info));
|
2020-01-07 10:26:16 +00:00
|
|
|
}
|
|
|
|
}
|
2020-01-10 17:44:34 +00:00
|
|
|
|
2022-03-24 23:14:26 +00:00
|
|
|
secret = from.secret;
|
|
|
|
name = from.name;
|
|
|
|
|
2020-01-07 10:26:16 +00:00
|
|
|
initMisc();
|
|
|
|
}
|
|
|
|
|
2020-01-10 17:44:34 +00:00
|
|
|
|
|
|
|
Cluster::Cluster(Cluster::SubclusterTag, const Cluster & from, const std::vector<size_t> & indices)
|
2016-05-13 03:22:16 +00:00
|
|
|
{
|
2018-11-21 04:02:19 +00:00
|
|
|
for (size_t index : indices)
|
|
|
|
{
|
2018-11-21 04:06:40 +00:00
|
|
|
shards_info.emplace_back(from.shards_info.at(index));
|
2018-11-16 09:55:16 +00:00
|
|
|
|
|
|
|
if (!from.addresses_with_failover.empty())
|
2018-11-21 04:06:40 +00:00
|
|
|
addresses_with_failover.emplace_back(from.addresses_with_failover.at(index));
|
2018-11-16 09:55:16 +00:00
|
|
|
}
|
2016-05-13 03:22:16 +00:00
|
|
|
|
2022-03-24 23:14:26 +00:00
|
|
|
secret = from.secret;
|
|
|
|
name = from.name;
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
initMisc();
|
2016-05-13 03:22:16 +00:00
|
|
|
}
|
|
|
|
|
2022-05-01 13:36:32 +00:00
|
|
|
std::vector<Strings> Cluster::getHostIDs() const
|
|
|
|
{
|
|
|
|
std::vector<Strings> host_ids;
|
|
|
|
host_ids.resize(addresses_with_failover.size());
|
|
|
|
for (size_t i = 0; i != addresses_with_failover.size(); ++i)
|
|
|
|
{
|
|
|
|
const auto & addresses = addresses_with_failover[i];
|
|
|
|
host_ids[i].resize(addresses.size());
|
|
|
|
for (size_t j = 0; j != addresses.size(); ++j)
|
|
|
|
host_ids[i][j] = addresses[j].toString();
|
|
|
|
}
|
|
|
|
return host_ids;
|
|
|
|
}
|
|
|
|
|
2022-06-18 22:01:08 +00:00
|
|
|
std::vector<const Cluster::Address *> Cluster::filterAddressesByShardOrReplica(size_t only_shard_num, size_t only_replica_num) const
|
|
|
|
{
|
|
|
|
std::vector<const Address *> res;
|
|
|
|
|
|
|
|
auto enumerate_replicas = [&](size_t shard_index)
|
|
|
|
{
|
|
|
|
if (shard_index > addresses_with_failover.size())
|
|
|
|
throw Exception(ErrorCodes::INVALID_SHARD_ID, "Cluster {} doesn't have shard #{}", name, shard_index);
|
|
|
|
const auto & replicas = addresses_with_failover[shard_index - 1];
|
|
|
|
if (only_replica_num)
|
|
|
|
{
|
|
|
|
if (only_replica_num > replicas.size())
|
|
|
|
throw Exception(ErrorCodes::NO_SUCH_REPLICA, "Cluster {} doesn't have replica #{} in shard #{}", name, only_replica_num, shard_index);
|
|
|
|
res.emplace_back(&replicas[only_replica_num - 1]);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (const auto & addr : replicas)
|
|
|
|
res.emplace_back(&addr);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if (only_shard_num)
|
|
|
|
{
|
|
|
|
enumerate_replicas(only_shard_num);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (size_t shard_index = 1; shard_index <= addresses_with_failover.size(); ++shard_index)
|
|
|
|
enumerate_replicas(shard_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2020-11-07 23:44:35 +00:00
|
|
|
const std::string & Cluster::ShardInfo::insertPathForInternalReplication(bool prefer_localhost_replica, bool use_compact_format) const
|
2020-05-14 00:02:28 +00:00
|
|
|
{
|
|
|
|
if (!has_internal_replication)
|
|
|
|
throw Exception("internal_replication is not set", ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
2020-11-08 08:21:07 +00:00
|
|
|
const auto & paths = insert_path_for_internal_replication;
|
2020-11-07 23:44:35 +00:00
|
|
|
if (!use_compact_format)
|
2020-10-25 11:14:52 +00:00
|
|
|
{
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
const auto & path = prefer_localhost_replica ? paths.prefer_localhost_replica : paths.no_prefer_localhost_replica;
|
|
|
|
if (path.size() > NAME_MAX)
|
|
|
|
{
|
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
|
|
|
"Path '{}' for async distributed INSERT is too long (exceed {} limit)", path, NAME_MAX);
|
|
|
|
}
|
|
|
|
return path;
|
2020-10-25 11:14:52 +00:00
|
|
|
}
|
2020-05-14 00:02:28 +00:00
|
|
|
else
|
2020-10-25 11:14:52 +00:00
|
|
|
{
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
return paths.compact;
|
2020-10-25 11:14:52 +00:00
|
|
|
}
|
2020-05-14 00:02:28 +00:00
|
|
|
}
|
|
|
|
|
2020-09-23 18:28:59 +00:00
|
|
|
bool Cluster::maybeCrossReplication() const
|
|
|
|
{
|
|
|
|
/// Cluster can be used for cross-replication if some replicas have different default database names,
|
|
|
|
/// so one clickhouse-server instance can contain multiple replicas.
|
|
|
|
|
|
|
|
if (addresses_with_failover.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const String & database_name = addresses_with_failover.front().front().default_database;
|
|
|
|
for (const auto & shard : addresses_with_failover)
|
|
|
|
for (const auto & replica : shard)
|
|
|
|
if (replica.default_database != database_name)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|