2017-04-01 09:19:00 +00:00
|
|
|
#include <Interpreters/Cluster.h>
|
2018-04-19 13:56:14 +00:00
|
|
|
#include <Common/DNSResolver.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Common/escapeForFileName.h>
|
|
|
|
#include <Common/isLocalAddress.h>
|
2018-01-15 19:07:47 +00:00
|
|
|
#include <Common/StringUtils/StringUtils.h>
|
2017-12-28 04:28:05 +00:00
|
|
|
#include <Common/parseAddress.h>
|
2021-03-12 21:17:19 +00:00
|
|
|
#include <Common/Config/AbstractConfigurationComparison.h>
|
2021-12-30 15:34:11 +00:00
|
|
|
#include <Common/Config/ConfigHelper.h>
|
2020-12-10 22:05:02 +00:00
|
|
|
#include <Core/Settings.h>
|
2017-04-13 16:12:56 +00:00
|
|
|
#include <IO/WriteHelpers.h>
|
2017-07-26 19:31:32 +00:00
|
|
|
#include <IO/ReadHelpers.h>
|
2013-12-07 16:51:29 +00:00
|
|
|
#include <Poco/Util/AbstractConfiguration.h>
|
|
|
|
#include <Poco/Util/Application.h>
|
2021-10-02 07:13:14 +00:00
|
|
|
#include <base/range.h>
|
2022-01-30 19:49:48 +00:00
|
|
|
#include <base/sort.h>
|
2020-09-12 22:27:44 +00:00
|
|
|
#include <boost/range/algorithm_ext/erase.hpp>
|
2013-12-07 16:51:29 +00:00
|
|
|
|
2023-01-19 12:04:07 +00:00
|
|
|
#include <span>
|
2022-01-30 19:49:48 +00:00
|
|
|
|
2013-12-07 16:51:29 +00:00
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2016-01-12 02:21:15 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
|
|
|
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
|
|
|
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
|
|
|
extern const int LOGICAL_ERROR;
|
|
|
|
extern const int SHARD_HAS_NO_CONNECTIONS;
|
2022-02-20 18:27:14 +00:00
|
|
|
extern const int NO_ELEMENTS_IN_CONFIG;
|
2017-07-26 19:31:32 +00:00
|
|
|
extern const int SYNTAX_ERROR;
|
2022-06-18 22:01:08 +00:00
|
|
|
extern const int INVALID_SHARD_ID;
|
|
|
|
extern const int NO_SUCH_REPLICA;
|
2023-06-28 15:53:14 +00:00
|
|
|
extern const int BAD_ARGUMENTS;
|
2016-01-12 02:21:15 +00:00
|
|
|
}
|
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
namespace
|
|
|
|
{
|
|
|
|
|
2016-08-22 20:34:21 +00:00
|
|
|
/// Default shard weight.
|
2020-03-08 23:48:08 +00:00
|
|
|
constexpr UInt32 default_weight = 1;
|
2015-10-20 14:59:29 +00:00
|
|
|
|
2019-07-08 01:43:41 +00:00
|
|
|
inline bool isLocalImpl(const Cluster::Address & address, const Poco::Net::SocketAddress & resolved_address, UInt16 clickhouse_port)
|
2015-10-20 14:59:29 +00:00
|
|
|
{
|
2019-07-08 01:43:41 +00:00
|
|
|
/// If there is replica, for which:
|
2016-08-22 20:34:21 +00:00
|
|
|
/// - its port is the same that the server is listening;
|
|
|
|
/// - its host is resolved to set of addresses, one of which is the same as one of addresses of network interfaces of the server machine*;
|
|
|
|
/// then we must go to this shard without any inter-process communication.
|
|
|
|
///
|
|
|
|
/// * - this criteria is somewhat approximate.
|
|
|
|
///
|
|
|
|
/// Also, replica is considered non-local, if it has default database set
|
|
|
|
/// (only reason is to avoid query rewrite).
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-03-29 20:21:01 +00:00
|
|
|
return address.default_database.empty() && isLocalAddress(resolved_address, clickhouse_port);
|
2015-10-20 14:59:29 +00:00
|
|
|
}
|
|
|
|
|
2020-11-07 23:44:35 +00:00
|
|
|
void concatInsertPath(std::string & insert_path, const std::string & dir_name)
|
|
|
|
{
|
|
|
|
if (insert_path.empty())
|
|
|
|
insert_path = dir_name;
|
|
|
|
else
|
|
|
|
insert_path += "," + dir_name;
|
|
|
|
}
|
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
}
|
|
|
|
|
2016-08-22 20:34:21 +00:00
|
|
|
/// Implementation of Cluster::Address class
|
2015-10-16 16:10:10 +00:00
|
|
|
|
2019-07-08 01:43:41 +00:00
|
|
|
std::optional<Poco::Net::SocketAddress> Cluster::Address::getResolvedAddress() const
|
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
|
|
|
return DNSResolver::instance().resolveAddress(host_name, port);
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
/// Failure in DNS resolution in cluster initialization is Ok.
|
|
|
|
tryLogCurrentException("Cluster");
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Cluster::Address::isLocal(UInt16 clickhouse_port) const
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2019-07-08 01:43:41 +00:00
|
|
|
if (auto resolved = getResolvedAddress())
|
|
|
|
return isLocalImpl(*this, *resolved, clickhouse_port);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-07 14:38:35 +00:00
|
|
|
|
2020-07-05 00:35:57 +00:00
|
|
|
Cluster::Address::Address(
|
2020-09-14 21:55:43 +00:00
|
|
|
const Poco::Util::AbstractConfiguration & config,
|
|
|
|
const String & config_prefix,
|
|
|
|
const String & cluster_,
|
|
|
|
const String & cluster_secret_,
|
|
|
|
UInt32 shard_index_,
|
|
|
|
UInt32 replica_index_)
|
|
|
|
: cluster(cluster_)
|
|
|
|
, cluster_secret(cluster_secret_)
|
|
|
|
, shard_index(shard_index_)
|
|
|
|
, replica_index(replica_index_)
|
2019-07-08 01:43:41 +00:00
|
|
|
{
|
2015-05-05 10:13:45 +00:00
|
|
|
host_name = config.getString(config_prefix + ".host");
|
2018-12-28 17:11:52 +00:00
|
|
|
if (config.has(config_prefix + ".user"))
|
|
|
|
user_specified = true;
|
|
|
|
|
2014-08-12 13:46:46 +00:00
|
|
|
user = config.getString(config_prefix + ".user", "default");
|
|
|
|
password = config.getString(config_prefix + ".password", "");
|
2016-08-22 20:34:21 +00:00
|
|
|
default_database = config.getString(config_prefix + ".default_database", "");
|
2021-12-30 15:34:11 +00:00
|
|
|
secure = ConfigHelper::getBool(config, config_prefix + ".secure", false, /* empty_as */true) ? Protocol::Secure::Enable : Protocol::Secure::Disable;
|
2023-06-07 16:25:52 +00:00
|
|
|
priority = Priority{config.getInt(config_prefix + ".priority", 1)};
|
2022-02-20 18:27:14 +00:00
|
|
|
|
2019-12-19 19:39:49 +00:00
|
|
|
const char * port_type = secure == Protocol::Secure::Enable ? "tcp_port_secure" : "tcp_port";
|
2022-02-20 18:27:14 +00:00
|
|
|
auto default_port = config.getInt(port_type, 0);
|
|
|
|
|
|
|
|
port = static_cast<UInt16>(config.getInt(config_prefix + ".port", default_port));
|
|
|
|
if (!port)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Port is not specified in cluster configuration: {}.port", config_prefix);
|
2022-02-20 18:27:14 +00:00
|
|
|
|
2019-12-19 19:39:49 +00:00
|
|
|
is_local = isLocal(config.getInt(port_type, 0));
|
2021-03-29 00:43:14 +00:00
|
|
|
|
|
|
|
/// By default compression is disabled if address looks like localhost.
|
|
|
|
/// NOTE: it's still enabled when interacting with servers on different port, but we don't want to complicate the logic.
|
|
|
|
compression = config.getBool(config_prefix + ".compression", !is_local)
|
|
|
|
? Protocol::Compression::Enable : Protocol::Compression::Disable;
|
2014-08-12 13:46:46 +00:00
|
|
|
}
|
2014-02-22 21:50:27 +00:00
|
|
|
|
2015-10-16 16:10:10 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
Cluster::Address::Address(
|
2023-04-07 16:26:23 +00:00
|
|
|
const DatabaseReplicaInfo & info,
|
|
|
|
const ClusterConnectionParameters & params,
|
2021-07-23 11:16:35 +00:00
|
|
|
UInt32 shard_index_,
|
2023-04-07 16:26:23 +00:00
|
|
|
UInt32 replica_index_)
|
|
|
|
: user(params.username), password(params.password)
|
2014-02-22 21:50:27 +00:00
|
|
|
{
|
2021-07-23 11:16:35 +00:00
|
|
|
bool can_be_local = true;
|
|
|
|
std::pair<std::string, UInt16> parsed_host_port;
|
2023-04-07 16:26:23 +00:00
|
|
|
if (!params.treat_local_port_as_remote)
|
2021-07-23 11:16:35 +00:00
|
|
|
{
|
2023-04-07 16:26:23 +00:00
|
|
|
parsed_host_port = parseAddress(info.hostname, params.clickhouse_port);
|
2021-07-23 11:16:35 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/// For clickhouse-local (treat_local_port_as_remote) try to read the address without passing a default port
|
|
|
|
/// If it works we have a full address that includes a port, which means it won't be local
|
|
|
|
/// since clickhouse-local doesn't listen in any port
|
|
|
|
/// If it doesn't include a port then use the default one and it could be local (if the address is)
|
|
|
|
try
|
|
|
|
{
|
2023-04-07 16:26:23 +00:00
|
|
|
parsed_host_port = parseAddress(info.hostname, 0);
|
2021-07-23 11:16:35 +00:00
|
|
|
can_be_local = false;
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
2023-04-07 16:26:23 +00:00
|
|
|
parsed_host_port = parseAddress(info.hostname, params.clickhouse_port);
|
2021-07-23 11:16:35 +00:00
|
|
|
}
|
|
|
|
}
|
2018-01-11 18:55:31 +00:00
|
|
|
host_name = parsed_host_port.first;
|
2023-04-07 16:26:23 +00:00
|
|
|
database_shard_name = info.shard_name;
|
|
|
|
database_replica_name = info.replica_name;
|
2018-01-11 18:55:31 +00:00
|
|
|
port = parsed_host_port.second;
|
2023-04-07 16:26:23 +00:00
|
|
|
secure = params.secure ? Protocol::Secure::Enable : Protocol::Secure::Disable;
|
|
|
|
priority = params.priority;
|
|
|
|
is_local = can_be_local && isLocal(params.clickhouse_port);
|
2021-03-09 17:05:24 +00:00
|
|
|
shard_index = shard_index_;
|
|
|
|
replica_index = replica_index_;
|
2023-04-07 16:26:23 +00:00
|
|
|
cluster = params.cluster_name;
|
|
|
|
cluster_secret = params.cluster_secret;
|
2014-02-22 21:50:27 +00:00
|
|
|
}
|
|
|
|
|
2017-05-30 11:49:17 +00:00
|
|
|
|
2017-04-13 16:12:56 +00:00
|
|
|
String Cluster::Address::toString() const
|
|
|
|
{
|
2017-05-30 11:49:17 +00:00
|
|
|
return toString(host_name, port);
|
|
|
|
}
|
|
|
|
|
|
|
|
String Cluster::Address::toString(const String & host_name, UInt16 port)
|
|
|
|
{
|
|
|
|
return escapeForFileName(host_name) + ':' + DB::toString(port);
|
2017-04-13 16:12:56 +00:00
|
|
|
}
|
|
|
|
|
2017-07-28 16:14:49 +00:00
|
|
|
String Cluster::Address::readableString() const
|
|
|
|
{
|
2018-04-19 19:25:54 +00:00
|
|
|
String res;
|
|
|
|
|
|
|
|
/// If it looks like IPv6 address add braces to avoid ambiguity in ipv6_host:port notation
|
|
|
|
if (host_name.find_first_of(':') != std::string::npos && !host_name.empty() && host_name.back() != ']')
|
|
|
|
res += '[' + host_name + ']';
|
|
|
|
else
|
|
|
|
res += host_name;
|
|
|
|
|
|
|
|
res += ':' + DB::toString(port);
|
|
|
|
return res;
|
2017-07-28 16:14:49 +00:00
|
|
|
}
|
|
|
|
|
2019-01-21 19:45:26 +00:00
|
|
|
std::pair<String, UInt16> Cluster::Address::fromString(const String & host_port_string)
|
2017-07-26 19:31:32 +00:00
|
|
|
{
|
|
|
|
auto pos = host_port_string.find_last_of(':');
|
|
|
|
if (pos == std::string::npos)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::SYNTAX_ERROR, "Incorrect <host>:<port> format {}", host_port_string);
|
2017-07-26 19:31:32 +00:00
|
|
|
|
2019-01-21 19:45:26 +00:00
|
|
|
return {unescapeForFileName(host_port_string.substr(0, pos)), parse<UInt16>(host_port_string.substr(pos + 1))};
|
2017-07-26 19:31:32 +00:00
|
|
|
}
|
|
|
|
|
2017-05-30 11:49:17 +00:00
|
|
|
|
2020-03-13 18:49:46 +00:00
|
|
|
String Cluster::Address::toFullString(bool use_compact_format) const
|
2017-05-30 11:49:17 +00:00
|
|
|
{
|
2020-03-13 18:49:46 +00:00
|
|
|
if (use_compact_format)
|
|
|
|
{
|
2020-06-08 19:06:32 +00:00
|
|
|
if (shard_index == 0 || replica_index == 0)
|
|
|
|
// shard_num/replica_num like in system.clusters table
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "shard_num/replica_num cannot be zero");
|
2020-06-08 19:06:32 +00:00
|
|
|
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
return fmt::format("shard{}_replica{}", shard_index, replica_index);
|
2020-03-13 18:49:46 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return
|
|
|
|
escapeForFileName(user)
|
|
|
|
+ (password.empty() ? "" : (':' + escapeForFileName(password))) + '@'
|
|
|
|
+ escapeForFileName(host_name) + ':' + std::to_string(port)
|
|
|
|
+ (default_database.empty() ? "" : ('#' + escapeForFileName(default_database)))
|
|
|
|
+ ((secure == Protocol::Secure::Enable) ? "+secure" : "");
|
|
|
|
}
|
2017-05-30 11:49:17 +00:00
|
|
|
}
|
|
|
|
|
2019-01-21 19:45:26 +00:00
|
|
|
Cluster::Address Cluster::Address::fromFullString(const String & full_string)
|
2018-12-02 02:17:08 +00:00
|
|
|
{
|
|
|
|
const char * address_begin = full_string.data();
|
2020-01-31 10:49:10 +00:00
|
|
|
const char * address_end = address_begin + full_string.size();
|
2018-12-02 02:17:08 +00:00
|
|
|
|
2020-01-31 10:49:10 +00:00
|
|
|
const char * user_pw_end = strchr(full_string.data(), '@');
|
|
|
|
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
/// parsing with the new shard{shard_index}[_replica{replica_index}] format
|
2020-02-03 13:18:00 +00:00
|
|
|
if (!user_pw_end && startsWith(full_string, "shard"))
|
2020-01-31 10:49:10 +00:00
|
|
|
{
|
|
|
|
const char * underscore = strchr(full_string.data(), '_');
|
|
|
|
|
|
|
|
Address address;
|
2020-02-16 04:12:48 +00:00
|
|
|
address.shard_index = parse<UInt32>(address_begin + strlen("shard"));
|
|
|
|
address.replica_index = underscore ? parse<UInt32>(underscore + strlen("_replica")) : 0;
|
2020-02-16 04:11:19 +00:00
|
|
|
|
2020-01-31 10:49:10 +00:00
|
|
|
return address;
|
|
|
|
}
|
2020-02-16 04:11:19 +00:00
|
|
|
else
|
2020-02-12 10:13:11 +00:00
|
|
|
{
|
2020-02-16 04:11:19 +00:00
|
|
|
/// parsing with the old user[:password]@host:port#default_database format
|
|
|
|
/// This format is appeared to be inconvenient for the following reasons:
|
|
|
|
/// - credentials are exposed in file name;
|
|
|
|
/// - the file name can be too long.
|
|
|
|
|
|
|
|
Protocol::Secure secure = Protocol::Secure::Disable;
|
|
|
|
const char * secure_tag = "+secure";
|
|
|
|
if (endsWith(full_string, secure_tag))
|
|
|
|
{
|
|
|
|
address_end -= strlen(secure_tag);
|
|
|
|
secure = Protocol::Secure::Enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char * colon = strchr(full_string.data(), ':');
|
|
|
|
if (!user_pw_end || !colon)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::SYNTAX_ERROR, "Incorrect user[:password]@host:port#default_database format {}", full_string);
|
2020-02-16 04:11:19 +00:00
|
|
|
|
|
|
|
const bool has_pw = colon < user_pw_end;
|
|
|
|
const char * host_end = has_pw ? strchr(user_pw_end + 1, ':') : colon;
|
|
|
|
if (!host_end)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::SYNTAX_ERROR, "Incorrect address '{}', it does not contain port", full_string);
|
2020-02-16 04:11:19 +00:00
|
|
|
|
|
|
|
const char * has_db = strchr(full_string.data(), '#');
|
|
|
|
const char * port_end = has_db ? has_db : address_end;
|
2020-02-12 10:13:11 +00:00
|
|
|
|
2020-02-16 04:11:19 +00:00
|
|
|
Address address;
|
|
|
|
address.secure = secure;
|
|
|
|
address.port = parse<UInt16>(host_end + 1, port_end - (host_end + 1));
|
|
|
|
address.host_name = unescapeForFileName(std::string(user_pw_end + 1, host_end));
|
|
|
|
address.user = unescapeForFileName(std::string(address_begin, has_pw ? colon : user_pw_end));
|
|
|
|
address.password = has_pw ? unescapeForFileName(std::string(colon + 1, user_pw_end)) : std::string();
|
|
|
|
address.default_database = has_db ? unescapeForFileName(std::string(has_db + 1, address_end)) : std::string();
|
2020-06-27 06:52:10 +00:00
|
|
|
// address.priority ignored
|
2020-02-16 04:11:19 +00:00
|
|
|
return address;
|
|
|
|
}
|
2018-12-02 02:17:08 +00:00
|
|
|
}
|
|
|
|
|
2017-05-30 11:49:17 +00:00
|
|
|
|
2016-08-22 20:34:21 +00:00
|
|
|
/// Implementation of Clusters class
|
2014-02-22 21:50:27 +00:00
|
|
|
|
2022-04-27 23:32:49 +00:00
|
|
|
Clusters::Clusters(const Poco::Util::AbstractConfiguration & config, const Settings & settings, MultiVersion<Macros>::Version macros, const String & config_prefix)
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2022-04-27 23:32:49 +00:00
|
|
|
this->macros_ = macros;
|
2020-09-14 21:55:43 +00:00
|
|
|
updateClusters(config, settings, config_prefix);
|
2016-10-10 08:44:52 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 15:06:46 +00:00
|
|
|
|
|
|
|
ClusterPtr Clusters::getCluster(const std::string & cluster_name) const
|
|
|
|
{
|
2018-01-25 12:18:27 +00:00
|
|
|
std::lock_guard lock(mutex);
|
2016-10-14 15:06:46 +00:00
|
|
|
|
2022-04-27 23:32:49 +00:00
|
|
|
auto expanded_cluster_name = macros_->expand(cluster_name);
|
|
|
|
auto it = impl.find(expanded_cluster_name);
|
2016-10-14 15:06:46 +00:00
|
|
|
return (it != impl.end()) ? it->second : nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-03 19:53:10 +00:00
|
|
|
void Clusters::setCluster(const String & cluster_name, const std::shared_ptr<Cluster> & cluster)
|
|
|
|
{
|
2018-01-25 12:18:27 +00:00
|
|
|
std::lock_guard lock(mutex);
|
2017-11-03 19:53:10 +00:00
|
|
|
impl[cluster_name] = cluster;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-03-12 21:17:19 +00:00
|
|
|
void Clusters::updateClusters(const Poco::Util::AbstractConfiguration & new_config, const Settings & settings, const String & config_prefix, Poco::Util::AbstractConfiguration * old_config)
|
2016-10-10 08:44:52 +00:00
|
|
|
{
|
2021-03-12 21:17:19 +00:00
|
|
|
Poco::Util::AbstractConfiguration::Keys new_config_keys;
|
|
|
|
new_config.keys(config_prefix, new_config_keys);
|
|
|
|
|
|
|
|
/// If old config is set, we will update only clusters with updated config.
|
|
|
|
/// In this case, we first need to find clusters that were deleted from config.
|
|
|
|
Poco::Util::AbstractConfiguration::Keys deleted_keys;
|
|
|
|
if (old_config)
|
|
|
|
{
|
2022-01-30 19:49:48 +00:00
|
|
|
::sort(new_config_keys.begin(), new_config_keys.end());
|
2021-03-12 21:17:19 +00:00
|
|
|
|
|
|
|
Poco::Util::AbstractConfiguration::Keys old_config_keys;
|
|
|
|
old_config->keys(config_prefix, old_config_keys);
|
2022-01-30 19:49:48 +00:00
|
|
|
::sort(old_config_keys.begin(), old_config_keys.end());
|
2021-03-12 21:17:19 +00:00
|
|
|
|
|
|
|
std::set_difference(
|
|
|
|
old_config_keys.begin(), old_config_keys.end(), new_config_keys.begin(), new_config_keys.end(), std::back_inserter(deleted_keys));
|
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-01-25 12:18:27 +00:00
|
|
|
std::lock_guard lock(mutex);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2021-04-01 10:19:28 +00:00
|
|
|
/// If old config is set, remove deleted clusters from impl, otherwise just clear it.
|
2021-03-12 21:17:19 +00:00
|
|
|
if (old_config)
|
|
|
|
{
|
|
|
|
for (const auto & key : deleted_keys)
|
2021-11-29 11:25:52 +00:00
|
|
|
{
|
|
|
|
if (!automatic_clusters.contains(key))
|
|
|
|
impl.erase(key);
|
|
|
|
}
|
2021-03-12 21:17:19 +00:00
|
|
|
}
|
|
|
|
else
|
2021-11-29 11:25:52 +00:00
|
|
|
{
|
|
|
|
if (!automatic_clusters.empty())
|
|
|
|
std::erase_if(impl, [this](const auto & e) { return automatic_clusters.contains(e.first); });
|
|
|
|
else
|
|
|
|
impl.clear();
|
|
|
|
}
|
|
|
|
|
2021-03-12 21:17:19 +00:00
|
|
|
|
|
|
|
for (const auto & key : new_config_keys)
|
2018-10-22 12:38:04 +00:00
|
|
|
{
|
2021-11-18 09:45:57 +00:00
|
|
|
if (new_config.has(config_prefix + "." + key + ".discovery"))
|
2021-11-29 11:25:52 +00:00
|
|
|
{
|
2021-11-18 09:45:57 +00:00
|
|
|
/// Handled in ClusterDiscovery
|
2021-11-29 11:25:52 +00:00
|
|
|
automatic_clusters.insert(key);
|
2021-11-18 09:45:57 +00:00
|
|
|
continue;
|
2021-11-29 11:25:52 +00:00
|
|
|
}
|
2021-11-18 09:45:57 +00:00
|
|
|
|
2018-10-22 12:38:04 +00:00
|
|
|
if (key.find('.') != String::npos)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::SYNTAX_ERROR, "Cluster names with dots are not supported: '{}'", key);
|
2018-10-22 12:38:04 +00:00
|
|
|
|
2021-03-12 21:17:19 +00:00
|
|
|
/// If old config is set and cluster config wasn't changed, don't update this cluster.
|
|
|
|
if (!old_config || !isSameConfiguration(new_config, *old_config, config_prefix + "." + key))
|
|
|
|
impl[key] = std::make_shared<Cluster>(new_config, settings, config_prefix, key);
|
2018-10-22 12:38:04 +00:00
|
|
|
}
|
2016-10-10 08:44:52 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 15:06:46 +00:00
|
|
|
Clusters::Impl Clusters::getContainer() const
|
2016-10-10 08:44:52 +00:00
|
|
|
{
|
2018-01-25 12:18:27 +00:00
|
|
|
std::lock_guard lock(mutex);
|
2016-10-14 15:06:46 +00:00
|
|
|
/// The following line copies container of shared_ptrs to return value under lock
|
2016-10-10 08:44:52 +00:00
|
|
|
return impl;
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
|
|
|
|
2017-11-03 19:53:10 +00:00
|
|
|
|
2017-04-02 17:37:49 +00:00
|
|
|
/// Implementation of `Cluster` class
|
2013-12-07 16:51:29 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
Cluster::Cluster(const Poco::Util::AbstractConfiguration & config,
|
|
|
|
const Settings & settings,
|
|
|
|
const String & config_prefix_,
|
2021-07-01 10:18:29 +00:00
|
|
|
const String & cluster_name) : name(cluster_name)
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2020-09-14 21:55:43 +00:00
|
|
|
auto config_prefix = config_prefix_ + "." + cluster_name;
|
|
|
|
|
2013-12-07 16:51:29 +00:00
|
|
|
Poco::Util::AbstractConfiguration::Keys config_keys;
|
2020-09-14 21:55:43 +00:00
|
|
|
config.keys(config_prefix, config_keys);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
config_prefix += ".";
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
secret = config.getString(config_prefix + "secret", "");
|
2020-09-12 22:27:44 +00:00
|
|
|
boost::range::remove_erase(config_keys, "secret");
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2023-03-29 13:37:58 +00:00
|
|
|
allow_distributed_ddl_queries = config.getBool(config_prefix + "allow_distributed_ddl_queries", true);
|
|
|
|
boost::range::remove_erase(config_keys, "allow_distributed_ddl_queries");
|
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
if (config_keys.empty())
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::SHARD_HAS_NO_CONNECTIONS, "No cluster elements (shard, node) specified in config at path {}", config_prefix);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
UInt32 current_shard_num = 1;
|
2015-10-20 14:59:29 +00:00
|
|
|
for (const auto & key : config_keys)
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2016-07-14 05:22:09 +00:00
|
|
|
if (startsWith(key, "node"))
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2017-04-02 17:37:49 +00:00
|
|
|
/// Shard without replicas.
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-08-11 15:02:07 +00:00
|
|
|
Addresses addresses;
|
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
const auto & prefix = config_prefix + key;
|
|
|
|
const auto weight = config.getInt(prefix + ".weight", default_weight);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-09-14 21:55:43 +00:00
|
|
|
addresses.emplace_back(config, prefix, cluster_name, secret, current_shard_num, 1);
|
2015-10-20 14:59:29 +00:00
|
|
|
const auto & address = addresses.back();
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
ShardInfo info;
|
|
|
|
info.shard_num = current_shard_num;
|
|
|
|
info.weight = weight;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-08-11 15:02:07 +00:00
|
|
|
if (address.is_local)
|
2015-10-20 14:59:29 +00:00
|
|
|
info.local_addresses.push_back(address);
|
2018-08-10 01:27:54 +00:00
|
|
|
|
2022-06-02 09:46:33 +00:00
|
|
|
info.all_addresses.push_back(address);
|
|
|
|
|
2021-07-01 10:18:29 +00:00
|
|
|
auto pool = ConnectionPoolFactory::instance().get(
|
2022-10-07 10:46:45 +00:00
|
|
|
static_cast<unsigned>(settings.distributed_connections_pool_size),
|
2018-08-10 01:27:54 +00:00
|
|
|
address.host_name, address.port,
|
2022-08-03 19:44:08 +00:00
|
|
|
address.default_database, address.user, address.password, address.quota_key,
|
2020-09-14 21:55:43 +00:00
|
|
|
address.cluster, address.cluster_secret,
|
2020-06-27 06:52:10 +00:00
|
|
|
"server", address.compression,
|
|
|
|
address.secure, address.priority);
|
2018-08-10 01:27:54 +00:00
|
|
|
|
|
|
|
info.pool = std::make_shared<ConnectionPoolWithFailover>(
|
2019-03-01 23:14:11 +00:00
|
|
|
ConnectionPoolPtrs{pool}, settings.load_balancing);
|
2018-08-10 01:27:54 +00:00
|
|
|
info.per_replica_pools = {std::move(pool)};
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-08-08 00:06:21 +00:00
|
|
|
if (weight)
|
|
|
|
slot_to_shard.insert(std::end(slot_to_shard), weight, shards_info.size());
|
|
|
|
|
2018-02-14 15:11:39 +00:00
|
|
|
shards_info.emplace_back(std::move(info));
|
|
|
|
addresses_with_failover.emplace_back(std::move(addresses));
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
2016-07-14 05:22:09 +00:00
|
|
|
else if (startsWith(key, "shard"))
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2017-04-02 17:37:49 +00:00
|
|
|
/// Shard with replicas.
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2013-12-07 16:51:29 +00:00
|
|
|
Poco::Util::AbstractConfiguration::Keys replica_keys;
|
2015-10-20 14:59:29 +00:00
|
|
|
config.keys(config_prefix + key, replica_keys);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2014-08-14 10:38:06 +00:00
|
|
|
addresses_with_failover.emplace_back();
|
2013-12-07 16:51:29 +00:00
|
|
|
Addresses & replica_addresses = addresses_with_failover.back();
|
2015-05-05 11:53:09 +00:00
|
|
|
UInt32 current_replica_num = 1;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
const auto & partial_prefix = config_prefix + key + ".";
|
2017-08-08 00:06:21 +00:00
|
|
|
const auto weight = config.getUInt(partial_prefix + ".weight", default_weight);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-05-30 11:49:17 +00:00
|
|
|
bool internal_replication = config.getBool(partial_prefix + ".internal_replication", false);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-11-07 23:44:35 +00:00
|
|
|
ShardInfoInsertPathForInternalReplication insert_paths;
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
/// "_all_replicas" is a marker that will be replaced with all replicas
|
|
|
|
/// (for creating connections in the Distributed engine)
|
|
|
|
insert_paths.compact = fmt::format("shard{}_all_replicas", current_shard_num);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
for (const auto & replica_key : replica_keys)
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2016-07-14 05:22:09 +00:00
|
|
|
if (startsWith(replica_key, "weight") || startsWith(replica_key, "internal_replication"))
|
2014-08-12 13:46:46 +00:00
|
|
|
continue;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2016-07-14 05:22:09 +00:00
|
|
|
if (startsWith(replica_key, "replica"))
|
2014-08-12 13:46:46 +00:00
|
|
|
{
|
2020-09-14 21:55:43 +00:00
|
|
|
replica_addresses.emplace_back(config,
|
|
|
|
partial_prefix + replica_key,
|
|
|
|
cluster_name,
|
|
|
|
secret,
|
|
|
|
current_shard_num,
|
|
|
|
current_replica_num);
|
2015-04-30 12:43:16 +00:00
|
|
|
++current_replica_num;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-05-14 00:02:28 +00:00
|
|
|
if (internal_replication)
|
2014-08-15 09:50:05 +00:00
|
|
|
{
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
auto dir_name = replica_addresses.back().toFullString(/* use_compact_format= */ false);
|
|
|
|
if (!replica_addresses.back().is_local)
|
|
|
|
concatInsertPath(insert_paths.prefer_localhost_replica, dir_name);
|
|
|
|
concatInsertPath(insert_paths.no_prefer_localhost_replica, dir_name);
|
2014-08-15 09:50:05 +00:00
|
|
|
}
|
2014-08-12 13:46:46 +00:00
|
|
|
}
|
2013-12-07 16:51:29 +00:00
|
|
|
else
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown element in config: {}", replica_key);
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2023-05-19 17:53:22 +00:00
|
|
|
addShard(settings, std::move(replica_addresses), /* treat_local_as_remote = */ false, current_shard_num,
|
|
|
|
std::move(insert_paths), weight, internal_replication);
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
2015-10-20 14:59:29 +00:00
|
|
|
else
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown element in config: {}", key);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
++current_shard_num;
|
2015-01-13 00:56:43 +00:00
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-08-11 15:02:07 +00:00
|
|
|
if (addresses_with_failover.empty())
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG, "There must be either 'node' or 'shard' elements in config");
|
2017-08-11 15:02:07 +00:00
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
initMisc();
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
|
|
|
|
2014-02-22 21:50:27 +00:00
|
|
|
|
2021-07-23 11:16:35 +00:00
|
|
|
Cluster::Cluster(
|
|
|
|
const Settings & settings,
|
|
|
|
const std::vector<std::vector<String>> & names,
|
2023-04-07 16:26:23 +00:00
|
|
|
const ClusterConnectionParameters & params)
|
2014-02-07 15:11:57 +00:00
|
|
|
{
|
2015-10-20 14:59:29 +00:00
|
|
|
UInt32 current_shard_num = 1;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2023-04-07 16:26:23 +00:00
|
|
|
secret = params.cluster_secret;
|
2022-03-24 23:14:26 +00:00
|
|
|
|
2015-01-13 00:56:43 +00:00
|
|
|
for (const auto & shard : names)
|
2014-02-07 15:11:57 +00:00
|
|
|
{
|
|
|
|
Addresses current;
|
2020-04-22 06:01:33 +00:00
|
|
|
for (const auto & replica : shard)
|
2021-07-23 11:16:35 +00:00
|
|
|
current.emplace_back(
|
2023-04-07 16:26:23 +00:00
|
|
|
DatabaseReplicaInfo{replica, "", ""},
|
|
|
|
params,
|
2021-07-23 11:16:35 +00:00
|
|
|
current_shard_num,
|
2023-04-07 16:26:23 +00:00
|
|
|
current.size() + 1);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2014-08-14 10:38:06 +00:00
|
|
|
addresses_with_failover.emplace_back(current);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2023-04-07 16:26:23 +00:00
|
|
|
addShard(settings, std::move(current), params.treat_local_as_remote, current_shard_num);
|
|
|
|
++current_shard_num;
|
|
|
|
}
|
2017-12-01 17:13:14 +00:00
|
|
|
|
2023-04-07 16:26:23 +00:00
|
|
|
initMisc();
|
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2023-04-07 16:26:23 +00:00
|
|
|
Cluster::Cluster(
|
|
|
|
const Settings & settings,
|
|
|
|
const std::vector<std::vector<DatabaseReplicaInfo>> & infos,
|
|
|
|
const ClusterConnectionParameters & params)
|
|
|
|
{
|
|
|
|
UInt32 current_shard_num = 1;
|
|
|
|
|
|
|
|
secret = params.cluster_secret;
|
|
|
|
|
|
|
|
for (const auto & shard : infos)
|
|
|
|
{
|
|
|
|
Addresses current;
|
|
|
|
for (const auto & replica : shard)
|
|
|
|
current.emplace_back(
|
|
|
|
replica,
|
|
|
|
params,
|
|
|
|
current_shard_num,
|
|
|
|
current.size() + 1);
|
|
|
|
|
|
|
|
addresses_with_failover.emplace_back(current);
|
|
|
|
|
|
|
|
addShard(settings, std::move(current), params.treat_local_as_remote, current_shard_num);
|
2015-10-20 14:59:29 +00:00
|
|
|
++current_shard_num;
|
2014-02-07 15:11:57 +00:00
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
initMisc();
|
2014-02-07 15:11:57 +00:00
|
|
|
}
|
|
|
|
|
2023-04-07 16:26:23 +00:00
|
|
|
void Cluster::addShard(const Settings & settings, Addresses && addresses, bool treat_local_as_remote, UInt32 current_shard_num,
|
|
|
|
ShardInfoInsertPathForInternalReplication && insert_paths, UInt32 weight, bool internal_replication)
|
|
|
|
{
|
|
|
|
Addresses shard_local_addresses;
|
|
|
|
Addresses shard_all_addresses;
|
|
|
|
|
|
|
|
ConnectionPoolPtrs all_replicas_pools;
|
|
|
|
all_replicas_pools.reserve(addresses.size());
|
|
|
|
|
|
|
|
for (const auto & replica : addresses)
|
|
|
|
{
|
|
|
|
auto replica_pool = ConnectionPoolFactory::instance().get(
|
|
|
|
static_cast<unsigned>(settings.distributed_connections_pool_size),
|
|
|
|
replica.host_name, replica.port,
|
|
|
|
replica.default_database, replica.user, replica.password, replica.quota_key,
|
|
|
|
replica.cluster, replica.cluster_secret,
|
|
|
|
"server", replica.compression,
|
|
|
|
replica.secure, replica.priority);
|
|
|
|
|
|
|
|
all_replicas_pools.emplace_back(replica_pool);
|
|
|
|
if (replica.is_local && !treat_local_as_remote)
|
|
|
|
shard_local_addresses.push_back(replica);
|
|
|
|
shard_all_addresses.push_back(replica);
|
|
|
|
}
|
|
|
|
ConnectionPoolWithFailoverPtr shard_pool = std::make_shared<ConnectionPoolWithFailover>(
|
|
|
|
all_replicas_pools, settings.load_balancing,
|
|
|
|
settings.distributed_replica_error_half_life.totalSeconds(), settings.distributed_replica_error_cap);
|
|
|
|
|
|
|
|
if (weight)
|
|
|
|
slot_to_shard.insert(std::end(slot_to_shard), weight, shards_info.size());
|
|
|
|
|
|
|
|
shards_info.push_back({
|
|
|
|
std::move(insert_paths),
|
|
|
|
current_shard_num,
|
|
|
|
weight,
|
|
|
|
std::move(shard_local_addresses),
|
|
|
|
std::move(shard_all_addresses),
|
|
|
|
std::move(shard_pool),
|
|
|
|
std::move(all_replicas_pools),
|
|
|
|
internal_replication
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2014-02-22 21:50:27 +00:00
|
|
|
|
2021-04-29 16:11:20 +00:00
|
|
|
Poco::Timespan Cluster::saturate(Poco::Timespan v, Poco::Timespan limit)
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
|
|
|
if (limit.totalMicroseconds() == 0)
|
|
|
|
return v;
|
|
|
|
else
|
2015-10-20 14:59:29 +00:00
|
|
|
return (v > limit) ? limit : v;
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
void Cluster::initMisc()
|
2013-12-07 16:51:29 +00:00
|
|
|
{
|
2023-06-28 15:53:14 +00:00
|
|
|
/// NOTE: It is possible to have cluster w/o shards for
|
|
|
|
/// optimize_skip_unused_shards (i.e. WHERE 0 expression), so check the
|
|
|
|
/// slots only if shards is not empty.
|
|
|
|
if (!shards_info.empty() && slot_to_shard.empty())
|
|
|
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cluster with zero weight on all shards is prohibited");
|
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
for (const auto & shard_info : shards_info)
|
|
|
|
{
|
|
|
|
if (!shard_info.isLocal() && !shard_info.hasRemoteConnections())
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::SHARD_HAS_NO_CONNECTIONS, "Found shard without any specified connection");
|
2015-10-20 14:59:29 +00:00
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
for (const auto & shard_info : shards_info)
|
|
|
|
{
|
|
|
|
if (shard_info.isLocal())
|
|
|
|
++local_shard_count;
|
|
|
|
else
|
|
|
|
++remote_shard_count;
|
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
for (auto & shard_info : shards_info)
|
|
|
|
{
|
|
|
|
if (!shard_info.isLocal())
|
|
|
|
{
|
|
|
|
any_remote_shard_info = &shard_info;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|
|
|
|
|
2023-01-19 09:20:40 +00:00
|
|
|
std::unique_ptr<Cluster> Cluster::getClusterWithReplicasAsShards(const Settings & settings, size_t max_replicas_from_shard) const
|
2020-01-07 10:26:16 +00:00
|
|
|
{
|
2023-01-19 09:20:40 +00:00
|
|
|
return std::unique_ptr<Cluster>{ new Cluster(ReplicasAsShardsTag{}, *this, settings, max_replicas_from_shard)};
|
2020-01-07 10:26:16 +00:00
|
|
|
}
|
2016-05-13 03:22:16 +00:00
|
|
|
|
|
|
|
std::unique_ptr<Cluster> Cluster::getClusterWithSingleShard(size_t index) const
|
|
|
|
{
|
2020-01-10 17:44:34 +00:00
|
|
|
return std::unique_ptr<Cluster>{ new Cluster(SubclusterTag{}, *this, {index}) };
|
2016-05-13 03:22:16 +00:00
|
|
|
}
|
|
|
|
|
2018-11-21 04:04:05 +00:00
|
|
|
std::unique_ptr<Cluster> Cluster::getClusterWithMultipleShards(const std::vector<size_t> & indices) const
|
2016-05-13 03:22:16 +00:00
|
|
|
{
|
2020-01-10 17:44:34 +00:00
|
|
|
return std::unique_ptr<Cluster>{ new Cluster(SubclusterTag{}, *this, indices) };
|
2016-05-13 03:22:16 +00:00
|
|
|
}
|
|
|
|
|
2023-01-19 14:47:58 +00:00
|
|
|
namespace
|
|
|
|
{
|
|
|
|
|
2023-03-07 10:56:11 +00:00
|
|
|
void shuffleReplicas(std::vector<Cluster::Address> & replicas, const Settings & settings, size_t replicas_needed)
|
2023-01-19 14:47:58 +00:00
|
|
|
{
|
|
|
|
std::random_device rd;
|
|
|
|
std::mt19937 gen{rd()};
|
|
|
|
|
|
|
|
if (settings.prefer_localhost_replica)
|
|
|
|
{
|
|
|
|
// force for local replica to always be included
|
2023-03-07 10:56:11 +00:00
|
|
|
auto first_non_local_replica = std::partition(replicas.begin(), replicas.end(), [](const auto & replica) { return replica.is_local; });
|
|
|
|
size_t local_replicas_count = first_non_local_replica - replicas.begin();
|
|
|
|
|
|
|
|
if (local_replicas_count == replicas_needed)
|
2023-01-19 14:47:58 +00:00
|
|
|
{
|
2023-03-07 10:56:11 +00:00
|
|
|
/// we have exact amount of local replicas as needed, no need to do anything
|
|
|
|
return;
|
|
|
|
}
|
2023-01-19 14:47:58 +00:00
|
|
|
|
2023-03-07 10:56:11 +00:00
|
|
|
if (local_replicas_count > replicas_needed)
|
|
|
|
{
|
|
|
|
/// we can use only local replicas, shuffle them
|
|
|
|
std::shuffle(replicas.begin(), first_non_local_replica, gen);
|
2023-01-19 14:47:58 +00:00
|
|
|
return;
|
|
|
|
}
|
2023-03-07 10:56:11 +00:00
|
|
|
|
|
|
|
/// shuffle just non local replicas
|
|
|
|
std::shuffle(first_non_local_replica, replicas.end(), gen);
|
|
|
|
return;
|
2023-01-19 14:47:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::shuffle(replicas.begin(), replicas.end(), gen);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2023-01-19 09:20:40 +00:00
|
|
|
Cluster::Cluster(Cluster::ReplicasAsShardsTag, const Cluster & from, const Settings & settings, size_t max_replicas_from_shard)
|
2020-01-07 10:26:16 +00:00
|
|
|
{
|
2020-01-10 17:44:34 +00:00
|
|
|
if (from.addresses_with_failover.empty())
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cluster is empty");
|
2020-01-10 17:44:34 +00:00
|
|
|
|
2021-03-29 20:21:08 +00:00
|
|
|
UInt32 shard_num = 0;
|
2020-01-10 17:44:34 +00:00
|
|
|
std::set<std::pair<String, int>> unique_hosts;
|
2021-06-15 19:55:21 +00:00
|
|
|
for (size_t shard_index : collections::range(0, from.shards_info.size()))
|
2020-01-07 10:26:16 +00:00
|
|
|
{
|
2023-01-19 12:04:07 +00:00
|
|
|
auto create_shards_from_replicas = [&](std::span<const Address> replicas)
|
2020-01-07 10:26:16 +00:00
|
|
|
{
|
2023-01-19 12:04:07 +00:00
|
|
|
for (const auto & address : replicas)
|
|
|
|
{
|
|
|
|
if (!unique_hosts.emplace(address.host_name, address.port).second)
|
|
|
|
continue; /// Duplicate host, skip.
|
2020-01-10 17:44:34 +00:00
|
|
|
|
2023-01-19 12:04:07 +00:00
|
|
|
ShardInfo info;
|
|
|
|
info.shard_num = ++shard_num;
|
2021-03-29 20:21:08 +00:00
|
|
|
|
2023-01-19 12:04:07 +00:00
|
|
|
if (address.is_local)
|
|
|
|
info.local_addresses.push_back(address);
|
2020-01-10 17:44:34 +00:00
|
|
|
|
2023-01-19 12:04:07 +00:00
|
|
|
info.all_addresses.push_back(address);
|
2022-06-02 09:46:33 +00:00
|
|
|
|
2023-01-19 12:04:07 +00:00
|
|
|
auto pool = ConnectionPoolFactory::instance().get(
|
|
|
|
static_cast<unsigned>(settings.distributed_connections_pool_size),
|
|
|
|
address.host_name,
|
|
|
|
address.port,
|
|
|
|
address.default_database,
|
|
|
|
address.user,
|
|
|
|
address.password,
|
|
|
|
address.quota_key,
|
|
|
|
address.cluster,
|
|
|
|
address.cluster_secret,
|
|
|
|
"server",
|
|
|
|
address.compression,
|
|
|
|
address.secure,
|
|
|
|
address.priority);
|
|
|
|
|
|
|
|
info.pool = std::make_shared<ConnectionPoolWithFailover>(ConnectionPoolPtrs{pool}, settings.load_balancing);
|
|
|
|
info.per_replica_pools = {std::move(pool)};
|
|
|
|
|
|
|
|
addresses_with_failover.emplace_back(Addresses{address});
|
|
|
|
shards_info.emplace_back(std::move(info));
|
|
|
|
}
|
|
|
|
};
|
2020-01-10 17:44:34 +00:00
|
|
|
|
2023-01-19 12:04:07 +00:00
|
|
|
const auto & replicas = from.addresses_with_failover[shard_index];
|
|
|
|
if (!max_replicas_from_shard || replicas.size() <= max_replicas_from_shard)
|
|
|
|
{
|
|
|
|
create_shards_from_replicas(replicas);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
auto shuffled_replicas = replicas;
|
2023-01-19 14:47:58 +00:00
|
|
|
// shuffle replicas so we don't always pick the same subset
|
2023-03-07 10:56:11 +00:00
|
|
|
shuffleReplicas(shuffled_replicas, settings, max_replicas_from_shard);
|
2023-03-03 15:14:49 +00:00
|
|
|
create_shards_from_replicas(std::span{shuffled_replicas.begin(), max_replicas_from_shard});
|
2020-01-07 10:26:16 +00:00
|
|
|
}
|
|
|
|
}
|
2020-01-10 17:44:34 +00:00
|
|
|
|
2022-03-24 23:14:26 +00:00
|
|
|
secret = from.secret;
|
|
|
|
name = from.name;
|
|
|
|
|
2020-01-07 10:26:16 +00:00
|
|
|
initMisc();
|
|
|
|
}
|
|
|
|
|
2020-01-10 17:44:34 +00:00
|
|
|
|
|
|
|
Cluster::Cluster(Cluster::SubclusterTag, const Cluster & from, const std::vector<size_t> & indices)
|
2016-05-13 03:22:16 +00:00
|
|
|
{
|
2018-11-21 04:02:19 +00:00
|
|
|
for (size_t index : indices)
|
|
|
|
{
|
2018-11-21 04:06:40 +00:00
|
|
|
shards_info.emplace_back(from.shards_info.at(index));
|
2018-11-16 09:55:16 +00:00
|
|
|
|
|
|
|
if (!from.addresses_with_failover.empty())
|
2018-11-21 04:06:40 +00:00
|
|
|
addresses_with_failover.emplace_back(from.addresses_with_failover.at(index));
|
2018-11-16 09:55:16 +00:00
|
|
|
}
|
2016-05-13 03:22:16 +00:00
|
|
|
|
2022-03-24 23:14:26 +00:00
|
|
|
secret = from.secret;
|
|
|
|
name = from.name;
|
|
|
|
|
2016-05-13 03:22:16 +00:00
|
|
|
initMisc();
|
|
|
|
}
|
|
|
|
|
2022-05-01 13:36:32 +00:00
|
|
|
std::vector<Strings> Cluster::getHostIDs() const
|
|
|
|
{
|
|
|
|
std::vector<Strings> host_ids;
|
|
|
|
host_ids.resize(addresses_with_failover.size());
|
|
|
|
for (size_t i = 0; i != addresses_with_failover.size(); ++i)
|
|
|
|
{
|
|
|
|
const auto & addresses = addresses_with_failover[i];
|
|
|
|
host_ids[i].resize(addresses.size());
|
|
|
|
for (size_t j = 0; j != addresses.size(); ++j)
|
|
|
|
host_ids[i][j] = addresses[j].toString();
|
|
|
|
}
|
|
|
|
return host_ids;
|
|
|
|
}
|
|
|
|
|
2022-06-18 22:01:08 +00:00
|
|
|
std::vector<const Cluster::Address *> Cluster::filterAddressesByShardOrReplica(size_t only_shard_num, size_t only_replica_num) const
|
|
|
|
{
|
|
|
|
std::vector<const Address *> res;
|
|
|
|
|
|
|
|
auto enumerate_replicas = [&](size_t shard_index)
|
|
|
|
{
|
|
|
|
if (shard_index > addresses_with_failover.size())
|
|
|
|
throw Exception(ErrorCodes::INVALID_SHARD_ID, "Cluster {} doesn't have shard #{}", name, shard_index);
|
|
|
|
const auto & replicas = addresses_with_failover[shard_index - 1];
|
|
|
|
if (only_replica_num)
|
|
|
|
{
|
|
|
|
if (only_replica_num > replicas.size())
|
|
|
|
throw Exception(ErrorCodes::NO_SUCH_REPLICA, "Cluster {} doesn't have replica #{} in shard #{}", name, only_replica_num, shard_index);
|
|
|
|
res.emplace_back(&replicas[only_replica_num - 1]);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (const auto & addr : replicas)
|
|
|
|
res.emplace_back(&addr);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if (only_shard_num)
|
|
|
|
{
|
|
|
|
enumerate_replicas(only_shard_num);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (size_t shard_index = 1; shard_index <= addresses_with_failover.size(); ++shard_index)
|
|
|
|
enumerate_replicas(shard_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2020-11-07 23:44:35 +00:00
|
|
|
const std::string & Cluster::ShardInfo::insertPathForInternalReplication(bool prefer_localhost_replica, bool use_compact_format) const
|
2020-05-14 00:02:28 +00:00
|
|
|
{
|
|
|
|
if (!has_internal_replication)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "internal_replication is not set");
|
2020-05-14 00:02:28 +00:00
|
|
|
|
2020-11-08 08:21:07 +00:00
|
|
|
const auto & paths = insert_path_for_internal_replication;
|
2020-11-07 23:44:35 +00:00
|
|
|
if (!use_compact_format)
|
2020-10-25 11:14:52 +00:00
|
|
|
{
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
const auto & path = prefer_localhost_replica ? paths.prefer_localhost_replica : paths.no_prefer_localhost_replica;
|
|
|
|
if (path.size() > NAME_MAX)
|
|
|
|
{
|
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
|
|
|
"Path '{}' for async distributed INSERT is too long (exceed {} limit)", path, NAME_MAX);
|
|
|
|
}
|
|
|
|
return path;
|
2020-10-25 11:14:52 +00:00
|
|
|
}
|
2020-05-14 00:02:28 +00:00
|
|
|
else
|
2020-10-25 11:14:52 +00:00
|
|
|
{
|
Drop replicas from dirname for internal_replication=true
Under use_compact_format_in_distributed_parts_names=1 and
internal_replication=true the server encodes all replicas for the
directory name for async INSERT into Distributed, and the directory name
looks like:
shard1_replica1,shard1_replica2,shard3_replica3
This is required for creating connections (to specific replicas only),
but in case of internal_replication=true, this can be avoided, since
this path will always includes all replicas.
This patch replaces all replicas with "_all_replicas" marker.
Note, that initial problem was that this path may overflow the NAME_MAX
if you will have more then 15 replicas, and the server will fail to
create the directory.
Also note, that changed directory name should not be a problem, since:
- empty directories will be removed since #16729
- and replicas encoded in the directory name is also supported anyway.
2021-06-20 13:50:01 +00:00
|
|
|
return paths.compact;
|
2020-10-25 11:14:52 +00:00
|
|
|
}
|
2020-05-14 00:02:28 +00:00
|
|
|
}
|
|
|
|
|
2020-09-23 18:28:59 +00:00
|
|
|
bool Cluster::maybeCrossReplication() const
|
|
|
|
{
|
|
|
|
/// Cluster can be used for cross-replication if some replicas have different default database names,
|
|
|
|
/// so one clickhouse-server instance can contain multiple replicas.
|
|
|
|
|
|
|
|
if (addresses_with_failover.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const String & database_name = addresses_with_failover.front().front().default_database;
|
|
|
|
for (const auto & shard : addresses_with_failover)
|
|
|
|
for (const auto & replica : shard)
|
|
|
|
if (replica.default_database != database_name)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-12-07 16:51:29 +00:00
|
|
|
}
|