ClickHouse/programs/server/Server.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

3106 lines
139 KiB
C++
Raw Normal View History

#include "Server.h"
#include <memory>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <pwd.h>
#include <unistd.h>
#include <Poco/Net/HTTPServer.h>
#include <Poco/Net/NetException.h>
2019-02-02 14:05:27 +00:00
#include <Poco/Util/HelpFormatter.h>
#include <Poco/Environment.h>
#include <Poco/Config.h>
2022-04-27 15:05:45 +00:00
#include <Common/scope_guard_safe.h>
#include <Common/logger_useful.h>
2021-10-02 07:13:14 +00:00
#include <base/phdr_cache.h>
2022-04-27 15:05:45 +00:00
#include <Common/ErrorHandlers.h>
2021-10-02 07:13:14 +00:00
#include <base/getMemoryAmount.h>
#include <base/getAvailableMemoryAmount.h>
2021-10-02 07:13:14 +00:00
#include <base/errnoToString.h>
#include <base/coverage.h>
#include <base/getFQDNOrHostName.h>
#include <base/safeExit.h>
2024-07-28 09:51:09 +00:00
#include <base/Numa.h>
#include <Common/PoolId.h>
2022-01-18 12:21:59 +00:00
#include <Common/MemoryTracker.h>
2024-07-18 08:31:24 +00:00
#include <Common/MemoryWorker.h>
2017-08-09 14:33:07 +00:00
#include <Common/ClickHouseRevision.h>
2018-04-19 13:56:14 +00:00
#include <Common/DNSResolver.h>
2024-02-01 21:09:39 +00:00
#include <Common/CgroupsMemoryUsageObserver.h>
2017-08-09 14:33:07 +00:00
#include <Common/CurrentMetrics.h>
#include <Common/ConcurrencyControl.h>
#include <Common/Macros.h>
#include <Common/ShellCommand.h>
2017-08-09 14:33:07 +00:00
#include <Common/ZooKeeper/ZooKeeper.h>
#include <Common/ZooKeeper/ZooKeeperNodeCache.h>
#include <Common/formatReadable.h>
#include <Common/getMultipleKeysFromConfig.h>
2024-09-30 15:25:24 +00:00
#include <Common/getNumberOfCPUCoresToUse.h>
2019-08-11 20:37:53 +00:00
#include <Common/getExecutablePath.h>
#include <Common/ProfileEvents.h>
2024-03-19 16:04:29 +00:00
#include <Common/Scheduler/IResourceManager.h>
#include <Common/ThreadProfileEvents.h>
#include <Common/ThreadStatus.h>
2020-09-17 12:53:52 +00:00
#include <Common/getMappedArea.h>
#include <Common/remapExecutable.h>
#include <Common/TLDListsHolder.h>
#include <Common/Config/AbstractConfigurationComparison.h>
2023-05-19 14:23:56 +00:00
#include <Common/assertProcessUserMatchesDataOwner.h>
#include <Common/makeSocketAddress.h>
#include <Common/FailPoint.h>
2024-02-16 12:06:57 +00:00
#include <Common/CPUID.h>
2024-03-03 13:22:40 +00:00
#include <Common/HTTPConnectionPool.h>
#include <Common/NamedCollections/NamedCollectionsFactory.h>
#include <Server/waitServersToFinish.h>
2023-12-07 17:30:43 +00:00
#include <Interpreters/Cache/FileCacheFactory.h>
2021-08-18 12:15:31 +00:00
#include <Core/ServerUUID.h>
#include <IO/ReadHelpers.h>
2022-03-29 16:20:20 +00:00
#include <IO/ReadBufferFromFile.h>
#include <IO/SharedThreadPools.h>
#include <IO/UseSSL.h>
#include <Interpreters/ServerAsynchronousMetrics.h>
2017-08-09 14:33:07 +00:00
#include <Interpreters/DDLWorker.h>
#include <Interpreters/DNSCacheUpdater.h>
#include <Interpreters/DatabaseCatalog.h>
#include <Interpreters/ExternalDictionariesLoader.h>
#include <Interpreters/ProcessList.h>
#include <Interpreters/loadMetadata.h>
#include <Interpreters/registerInterpreters.h>
#include <Interpreters/JIT/CompiledExpressionCache.h>
#include <Access/AccessControl.h>
#include <Storages/MaterializedView/RefreshSet.h>
#include <Storages/MergeTree/MergeTreeSettings.h>
#include <Storages/StorageReplicatedMergeTree.h>
#include <Storages/System/attachSystemTables.h>
#include <Storages/System/attachInformationSchemaTables.h>
2021-12-23 03:50:26 +00:00
#include <Storages/Cache/ExternalDataSourceCache.h>
2022-01-04 07:06:19 +00:00
#include <Storages/Cache/registerRemoteFileMetadatas.h>
2017-08-09 14:33:07 +00:00
#include <AggregateFunctions/registerAggregateFunctions.h>
#include <Functions/UserDefined/IUserDefinedSQLObjectsStorage.h>
2017-08-09 14:33:07 +00:00
#include <Functions/registerFunctions.h>
#include <TableFunctions/registerTableFunctions.h>
2020-10-29 03:39:43 +00:00
#include <Formats/registerFormats.h>
#include <Storages/registerStorages.h>
2023-12-29 21:52:20 +00:00
#include <Databases/registerDatabases.h>
#include <Dictionaries/registerDictionaries.h>
2019-11-27 09:39:44 +00:00
#include <Disks/registerDisks.h>
#include <Common/Scheduler/Nodes/registerSchedulerNodes.h>
#include <Common/Scheduler/Workload/IWorkloadEntityStorage.h>
#include <Common/Config/ConfigReloader.h>
#include <Server/HTTPHandlerFactory.h>
2016-01-17 13:34:36 +00:00
#include "MetricsTransmitter.h"
2018-06-05 20:09:51 +00:00
#include <Common/StatusFile.h>
#include <Server/TCPHandlerFactory.h>
#include <Server/TCPServer.h>
#include <Common/SensitiveDataMasker.h>
2020-03-01 21:58:50 +00:00
#include <Common/ThreadFuzzer.h>
#include <Common/getHashOfLoadedBinary.h>
#include <Common/filesystemHelpers.h>
2022-06-01 20:07:26 +00:00
#include <Compression/CompressionCodecEncrypted.h>
#include <Server/HTTP/HTTPServerConnectionFactory.h>
#include <Server/MySQLHandlerFactory.h>
#include <Server/PostgreSQLHandlerFactory.h>
#include <Server/ProxyV1HandlerFactory.h>
#include <Server/TLSHandlerFactory.h>
#include <Server/ProtocolServerAdapter.h>
#include <Server/KeeperReadinessHandler.h>
#include <Server/HTTP/HTTPServer.h>
2024-02-13 09:40:20 +00:00
#include <Server/CloudPlacementInfo.h>
#include <Interpreters/AsynchronousInsertQueue.h>
2023-02-18 04:46:09 +00:00
#include <Core/ServerSettings.h>
#include <filesystem>
#include <unordered_set>
2019-07-30 14:04:18 +00:00
2024-07-18 08:31:24 +00:00
#include <Common/Jemalloc.h>
#include "config.h"
2023-11-13 09:09:23 +00:00
#include <Common/config_version.h>
2019-09-15 10:35:12 +00:00
#if defined(OS_LINUX)
# include <cstdlib>
# include <sys/un.h>
# include <sys/mman.h>
# include <sys/ptrace.h>
# include <Common/hasLinuxCapability.h>
#endif
#if USE_SSL
# include <Poco/Net/SecureServerSocket.h>
2023-07-11 12:23:10 +00:00
# include <Server/CertificateReloader.h>
#endif
#if USE_GRPC
# include <Server/GRPCServer.h>
#endif
#if USE_NURAFT
# include <Coordination/FourLetterCommand.h>
# include <Server/KeeperTCPHandlerFactory.h>
#endif
2023-01-30 19:00:48 +00:00
#if USE_AZURE_BLOB_STORAGE
# include <azure/storage/common/internal/xml_wrapper.hpp>
2024-04-12 15:38:50 +00:00
# include <azure/core/diagnostics/logger.hpp>
2023-01-30 19:00:48 +00:00
#endif
2023-07-23 02:56:47 +00:00
#include <incbin.h>
/// A minimal file used when the server is run without installation
2023-07-23 22:03:40 +00:00
INCBIN(resource_embedded_xml, SOURCE_DIR "/programs/server/embedded.xml");
2023-07-23 02:56:47 +00:00
2024-09-18 12:20:53 +00:00
namespace DB
{
namespace Setting
{
extern const SettingsSeconds http_receive_timeout;
extern const SettingsSeconds http_send_timeout;
extern const SettingsSeconds receive_timeout;
extern const SettingsSeconds send_timeout;
}
2024-10-02 17:58:25 +00:00
namespace MergeTreeSetting
{
extern const MergeTreeSettingsBool allow_remote_fs_zero_copy_replication;
}
2024-10-16 19:13:26 +00:00
namespace ServerSetting
{
extern const ServerSettingsUInt32 allowed_feature_tier;
2024-10-16 19:13:26 +00:00
extern const ServerSettingsUInt32 asynchronous_heavy_metrics_update_period_s;
extern const ServerSettingsUInt32 asynchronous_metrics_update_period_s;
extern const ServerSettingsBool asynchronous_metrics_enable_heavy_metrics;
2024-10-16 19:13:26 +00:00
extern const ServerSettingsBool async_insert_queue_flush_on_shutdown;
extern const ServerSettingsUInt64 async_insert_threads;
extern const ServerSettingsBool async_load_databases;
2024-10-24 17:12:21 +00:00
extern const ServerSettingsBool async_load_system_database;
2024-10-16 19:13:26 +00:00
extern const ServerSettingsUInt64 background_buffer_flush_schedule_pool_size;
extern const ServerSettingsUInt64 background_common_pool_size;
extern const ServerSettingsUInt64 background_distributed_schedule_pool_size;
extern const ServerSettingsUInt64 background_fetches_pool_size;
extern const ServerSettingsFloat background_merges_mutations_concurrency_ratio;
extern const ServerSettingsString background_merges_mutations_scheduling_policy;
extern const ServerSettingsUInt64 background_message_broker_schedule_pool_size;
extern const ServerSettingsUInt64 background_move_pool_size;
extern const ServerSettingsUInt64 background_pool_size;
extern const ServerSettingsUInt64 background_schedule_pool_size;
extern const ServerSettingsUInt64 backups_io_thread_pool_queue_size;
extern const ServerSettingsDouble cache_size_to_ram_max_ratio;
extern const ServerSettingsDouble cannot_allocate_thread_fault_injection_probability;
extern const ServerSettingsUInt64 cgroups_memory_usage_observer_wait_time;
extern const ServerSettingsUInt64 compiled_expression_cache_elements_size;
extern const ServerSettingsUInt64 compiled_expression_cache_size;
extern const ServerSettingsUInt64 concurrent_threads_soft_limit_num;
extern const ServerSettingsUInt64 concurrent_threads_soft_limit_ratio_to_cores;
extern const ServerSettingsUInt64 config_reload_interval_ms;
extern const ServerSettingsUInt64 database_catalog_drop_table_concurrency;
extern const ServerSettingsString default_database;
extern const ServerSettingsBool disable_internal_dns_cache;
extern const ServerSettingsUInt64 disk_connections_soft_limit;
extern const ServerSettingsUInt64 disk_connections_store_limit;
extern const ServerSettingsUInt64 disk_connections_warn_limit;
extern const ServerSettingsBool dns_allow_resolve_names_to_ipv4;
extern const ServerSettingsBool dns_allow_resolve_names_to_ipv6;
extern const ServerSettingsUInt64 dns_cache_max_entries;
extern const ServerSettingsInt32 dns_cache_update_period;
extern const ServerSettingsUInt32 dns_max_consecutive_failures;
extern const ServerSettingsBool enable_azure_sdk_logging;
extern const ServerSettingsBool format_alter_operations_with_parentheses;
extern const ServerSettingsUInt64 global_profiler_cpu_time_period_ns;
extern const ServerSettingsUInt64 global_profiler_real_time_period_ns;
extern const ServerSettingsUInt64 http_connections_soft_limit;
extern const ServerSettingsUInt64 http_connections_store_limit;
extern const ServerSettingsUInt64 http_connections_warn_limit;
extern const ServerSettingsString index_mark_cache_policy;
extern const ServerSettingsUInt64 index_mark_cache_size;
extern const ServerSettingsDouble index_mark_cache_size_ratio;
extern const ServerSettingsString index_uncompressed_cache_policy;
extern const ServerSettingsUInt64 index_uncompressed_cache_size;
extern const ServerSettingsDouble index_uncompressed_cache_size_ratio;
extern const ServerSettingsUInt64 io_thread_pool_queue_size;
extern const ServerSettingsSeconds keep_alive_timeout;
extern const ServerSettingsString mark_cache_policy;
extern const ServerSettingsUInt64 mark_cache_size;
extern const ServerSettingsDouble mark_cache_size_ratio;
extern const ServerSettingsUInt64 max_active_parts_loading_thread_pool_size;
extern const ServerSettingsUInt64 max_backups_io_thread_pool_free_size;
extern const ServerSettingsUInt64 max_backups_io_thread_pool_size;
extern const ServerSettingsUInt64 max_concurrent_insert_queries;
extern const ServerSettingsUInt64 max_concurrent_queries;
extern const ServerSettingsUInt64 max_concurrent_select_queries;
extern const ServerSettingsInt32 max_connections;
extern const ServerSettingsUInt64 max_database_num_to_warn;
extern const ServerSettingsUInt32 max_database_replicated_create_table_thread_pool_size;
extern const ServerSettingsUInt64 max_dictionary_num_to_warn;
extern const ServerSettingsUInt64 max_io_thread_pool_free_size;
extern const ServerSettingsUInt64 max_io_thread_pool_size;
extern const ServerSettingsUInt64 max_keep_alive_requests;
extern const ServerSettingsUInt64 max_outdated_parts_loading_thread_pool_size;
extern const ServerSettingsUInt64 max_partition_size_to_drop;
extern const ServerSettingsUInt64 max_part_num_to_warn;
extern const ServerSettingsUInt64 max_parts_cleaning_thread_pool_size;
extern const ServerSettingsUInt64 max_server_memory_usage;
extern const ServerSettingsDouble max_server_memory_usage_to_ram_ratio;
extern const ServerSettingsUInt64 max_table_num_to_warn;
extern const ServerSettingsUInt64 max_table_size_to_drop;
extern const ServerSettingsUInt64 max_temporary_data_on_disk_size;
extern const ServerSettingsUInt64 max_thread_pool_free_size;
extern const ServerSettingsUInt64 max_thread_pool_size;
extern const ServerSettingsUInt64 max_unexpected_parts_loading_thread_pool_size;
extern const ServerSettingsUInt64 max_view_num_to_warn;
extern const ServerSettingsUInt64 max_waiting_queries;
extern const ServerSettingsUInt64 memory_worker_period_ms;
extern const ServerSettingsUInt64 merges_mutations_memory_usage_soft_limit;
extern const ServerSettingsDouble merges_mutations_memory_usage_to_ram_ratio;
extern const ServerSettingsString merge_workload;
extern const ServerSettingsUInt64 mmap_cache_size;
extern const ServerSettingsString mutation_workload;
extern const ServerSettingsUInt64 page_cache_chunk_size;
extern const ServerSettingsUInt64 page_cache_mmap_size;
extern const ServerSettingsUInt64 page_cache_size;
extern const ServerSettingsBool page_cache_use_madv_free;
extern const ServerSettingsBool page_cache_use_transparent_huge_pages;
extern const ServerSettingsBool prepare_system_log_tables_on_startup;
extern const ServerSettingsBool show_addresses_in_stack_traces;
extern const ServerSettingsBool shutdown_wait_backups_and_restores;
extern const ServerSettingsUInt64 shutdown_wait_unfinished;
extern const ServerSettingsBool shutdown_wait_unfinished_queries;
extern const ServerSettingsUInt64 storage_connections_soft_limit;
extern const ServerSettingsUInt64 storage_connections_store_limit;
extern const ServerSettingsUInt64 storage_connections_warn_limit;
extern const ServerSettingsUInt64 tables_loader_background_pool_size;
extern const ServerSettingsUInt64 tables_loader_foreground_pool_size;
extern const ServerSettingsString temporary_data_in_cache;
extern const ServerSettingsUInt64 thread_pool_queue_size;
extern const ServerSettingsString tmp_policy;
extern const ServerSettingsUInt64 total_memory_profiler_sample_max_allocation_size;
extern const ServerSettingsUInt64 total_memory_profiler_sample_min_allocation_size;
extern const ServerSettingsUInt64 total_memory_profiler_step;
extern const ServerSettingsDouble total_memory_tracker_sample_probability;
extern const ServerSettingsString uncompressed_cache_policy;
extern const ServerSettingsUInt64 uncompressed_cache_size;
extern const ServerSettingsDouble uncompressed_cache_size_ratio;
extern const ServerSettingsBool use_legacy_mongodb_integration;
}
2024-09-18 12:20:53 +00:00
}
namespace CurrentMetrics
{
extern const Metric Revision;
extern const Metric VersionInteger;
2020-04-19 21:43:06 +00:00
extern const Metric MemoryTracking;
extern const Metric MergesMutationsMemoryTracking;
2021-02-15 10:26:34 +00:00
extern const Metric MaxDDLEntryID;
2021-08-11 03:40:06 +00:00
extern const Metric MaxPushedDDLEntryID;
}
namespace ProfileEvents
{
extern const Event MainConfigLoads;
extern const Event ServerStartupMilliseconds;
extern const Event InterfaceNativeSendBytes;
extern const Event InterfaceNativeReceiveBytes;
extern const Event InterfaceHTTPSendBytes;
extern const Event InterfaceHTTPReceiveBytes;
extern const Event InterfacePrometheusSendBytes;
extern const Event InterfacePrometheusReceiveBytes;
extern const Event InterfaceInterserverSendBytes;
extern const Event InterfaceInterserverReceiveBytes;
extern const Event InterfaceMySQLSendBytes;
extern const Event InterfaceMySQLReceiveBytes;
extern const Event InterfacePostgreSQLSendBytes;
extern const Event InterfacePostgreSQLReceiveBytes;
}
2021-05-16 22:06:09 +00:00
namespace fs = std::filesystem;
2020-09-17 12:39:37 +00:00
int mainEntryClickHouseServer(int argc, char ** argv)
{
DB::Server app;
/// Do not fork separate process from watchdog if we attached to terminal.
/// Otherwise it breaks gdb usage.
/// Can be overridden by environment variable (cannot use server config at this moment).
if (argc > 0)
{
const char * env_watchdog = getenv("CLICKHOUSE_WATCHDOG_ENABLE"); // NOLINT(concurrency-mt-unsafe)
if (env_watchdog)
{
if (0 == strcmp(env_watchdog, "1"))
app.shouldSetupWatchdog(argv[0]);
/// Other values disable watchdog explicitly.
}
else if (!isatty(STDIN_FILENO) && !isatty(STDOUT_FILENO) && !isatty(STDERR_FILENO))
app.shouldSetupWatchdog(argv[0]);
}
2020-09-17 12:39:37 +00:00
try
{
return app.run(argc, argv);
}
catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
2024-11-07 20:28:06 +00:00
return static_cast<UInt8>(code) ? code : 1;
2020-09-17 12:39:37 +00:00
}
}
2012-03-09 03:06:09 +00:00
namespace DB
{
namespace ErrorCodes
{
extern const int NO_ELEMENTS_IN_CONFIG;
extern const int SUPPORT_IS_DISABLED;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
extern const int INVALID_CONFIG_PARAMETER;
extern const int NETWORK_ERROR;
extern const int CORRUPTED_DATA;
}
static std::string getCanonicalPath(std::string && path)
2012-03-09 03:06:09 +00:00
{
Poco::trimInPlace(path);
if (path.empty())
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "path configuration parameter is empty");
if (path.back() != '/')
path += '/';
return std::move(path);
}
Poco::Net::SocketAddress Server::socketBindListen(
const Poco::Util::AbstractConfiguration & config,
Poco::Net::ServerSocket & socket,
const std::string & host,
UInt16 port,
[[maybe_unused]] bool secure) const
{
auto address = makeSocketAddress(host, port, &logger());
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config.getBool("listen_reuse_port", false));
/// If caller requests any available port from the OS, discover it after binding.
if (port == 0)
{
address = socket.address();
LOG_DEBUG(&logger(), "Requested any available port (port == 0), actual port is {:d}", address.port());
}
socket.listen(/* backlog = */ config.getUInt("listen_backlog", 4096));
return address;
}
Strings getListenHosts(const Poco::Util::AbstractConfiguration & config)
{
auto listen_hosts = DB::getMultipleValuesFromConfig(config, "", "listen_host");
if (listen_hosts.empty())
{
listen_hosts.emplace_back("::1");
listen_hosts.emplace_back("127.0.0.1");
}
return listen_hosts;
}
Strings getInterserverListenHosts(const Poco::Util::AbstractConfiguration & config)
{
auto interserver_listen_hosts = DB::getMultipleValuesFromConfig(config, "", "interserver_listen_host");
if (!interserver_listen_hosts.empty())
return interserver_listen_hosts;
/// Use more general restriction in case of emptiness
return getListenHosts(config);
}
bool getListenTry(const Poco::Util::AbstractConfiguration & config)
{
bool listen_try = config.getBool("listen_try", false);
if (!listen_try)
{
Poco::Util::AbstractConfiguration::Keys protocols;
config.keys("protocols", protocols);
listen_try =
DB::getMultipleValuesFromConfig(config, "", "listen_host").empty() &&
std::none_of(protocols.begin(), protocols.end(), [&](const auto & protocol)
{
return config.has("protocols." + protocol + ".host") && config.has("protocols." + protocol + ".port");
});
}
return listen_try;
}
void Server::createServer(
Poco::Util::AbstractConfiguration & config,
const std::string & listen_host,
const char * port_name,
bool listen_try,
bool start_server,
std::vector<ProtocolServerAdapter> & servers,
CreateServerFunc && func) const
{
/// For testing purposes, user may omit tcp_port or http_port or https_port in configuration file.
if (config.getString(port_name, "").empty())
return;
/// If we already have an active server for this listen_host/port_name, don't create it again
for (const auto & server : servers)
{
if (!server.isStopping() && server.getListenHost() == listen_host && server.getPortName() == port_name)
return;
}
auto port = config.getInt(port_name);
try
{
servers.push_back(func(port));
if (start_server)
{
servers.back().start();
LOG_INFO(&logger(), "Listening for {}", servers.back().getDescription());
}
global_context->registerServerPort(port_name, port);
}
catch (const Poco::Exception &)
{
if (listen_try)
{
LOG_WARNING(&logger(), "Listen [{}]:{} failed: {}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, "
"then consider to "
"specify not disabled IPv4 or IPv6 address to listen in <listen_host> element of configuration "
"file. Example for disabled IPv6: <listen_host>0.0.0.0</listen_host> ."
" Example for disabled IPv4: <listen_host>::</listen_host>",
listen_host, port, getCurrentExceptionMessage(false));
}
else
{
throw Exception(ErrorCodes::NETWORK_ERROR, "Listen [{}]:{} failed: {}", listen_host, port, getCurrentExceptionMessage(false));
}
}
}
2023-01-01 22:22:26 +00:00
2023-01-02 02:27:21 +00:00
#if defined(OS_LINUX)
2023-01-01 22:22:26 +00:00
namespace
{
2024-01-23 17:04:50 +00:00
void setOOMScore(int value, LoggerRawPtr log)
2023-01-01 22:22:26 +00:00
{
try
{
std::string value_string = std::to_string(value);
DB::WriteBufferFromFile buf("/proc/self/oom_score_adj");
buf.write(value_string.c_str(), value_string.size());
buf.next();
buf.close();
}
catch (const Poco::Exception & e)
{
LOG_WARNING(log, "Failed to adjust OOM score: '{}'.", e.displayText());
return;
}
LOG_INFO(log, "Set OOM score adjustment to {}", value);
}
}
2023-01-02 02:27:21 +00:00
#endif
2023-01-01 22:22:26 +00:00
void Server::uninitialize()
{
logger().information("shutting down");
BaseDaemon::uninitialize();
}
2019-02-02 13:17:55 +00:00
int Server::run()
{
if (config().hasOption("help"))
{
Poco::Util::HelpFormatter help_formatter(Server::options());
2020-11-10 18:22:26 +00:00
auto header_str = fmt::format("{} [OPTION] [-- [ARG]...]\n"
"positional arguments can be used to rewrite config.xml properties, for example, --http_port=8010",
commandName());
2020-11-10 18:22:26 +00:00
help_formatter.setHeader(header_str);
help_formatter.format(std::cout);
2019-02-02 13:17:55 +00:00
return 0;
}
if (config().hasOption("version"))
{
2023-08-09 03:02:50 +00:00
std::cout << VERSION_NAME << " server version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
return 0;
}
2020-03-18 02:02:24 +00:00
return Application::run(); // NOLINT
2019-02-02 13:17:55 +00:00
}
void Server::initialize(Poco::Util::Application & self)
{
2023-07-23 02:56:47 +00:00
ConfigProcessor::registerEmbeddedConfig("config.xml", std::string_view(reinterpret_cast<const char *>(gresource_embedded_xmlData), gresource_embedded_xmlSize));
BaseDaemon::initialize(self);
logger().information("starting up");
2021-06-22 23:02:57 +00:00
LOG_INFO(&logger(), "OS name: {}, version: {}, architecture: {}",
Poco::Environment::osName(),
Poco::Environment::osVersion(),
Poco::Environment::osArchitecture());
}
std::string Server::getDefaultCorePath() const
{
return getCanonicalPath(config().getString("path", DBMS_DEFAULT_PATH)) + "cores";
}
2019-08-03 11:02:40 +00:00
void Server::defineOptions(Poco::Util::OptionSet & options)
2019-02-02 13:17:55 +00:00
{
2019-08-03 11:02:40 +00:00
options.addOption(
2019-02-02 13:17:55 +00:00
Poco::Util::Option("help", "h", "show help and exit")
.required(false)
.repeatable(false)
.binding("help"));
2019-08-03 11:02:40 +00:00
options.addOption(
2019-02-04 12:49:54 +00:00
Poco::Util::Option("version", "V", "show version and exit")
.required(false)
.repeatable(false)
.binding("version"));
2019-08-03 11:02:40 +00:00
BaseDaemon::defineOptions(options);
2019-02-02 13:17:55 +00:00
}
2020-06-04 19:30:30 +00:00
void checkForUsersNotInMainConfig(
const Poco::Util::AbstractConfiguration & config,
const std::string & config_path,
const std::string & users_config_path,
2024-01-23 17:04:50 +00:00
LoggerPtr log)
{
if (config.getBool("skip_check_for_incorrect_settings", false))
return;
if (config.has("users") || config.has("profiles") || config.has("quotas"))
{
/// We cannot throw exception here, because we have support for obsolete 'conf.d' directory
/// (that does not correspond to config.d or users.d) but substitute configuration to both of them.
LOG_ERROR(log, "The <users>, <profiles> and <quotas> elements should be located in users config file: {} not in main config {}."
" Also note that you should place configuration changes to the appropriate *.d directory like 'users.d'.",
users_config_path, config_path);
}
}
2024-07-18 07:01:08 +00:00
namespace
{
2022-03-29 12:20:46 +00:00
/// Unused in other builds
#if defined(OS_LINUX)
2024-07-18 07:01:08 +00:00
String readLine(const String & path)
{
ReadBufferFromFile in(path);
String contents;
readStringUntilNewlineInto(contents, in);
return contents;
}
2024-07-18 07:01:08 +00:00
int readNumber(const String & path)
{
ReadBufferFromFile in(path);
int result;
readText(result, in);
return result;
}
2022-03-29 12:20:46 +00:00
#endif
2024-07-18 07:01:08 +00:00
void sanityChecks(Server & server)
{
std::string data_path = getCanonicalPath(server.config().getString("path", DBMS_DEFAULT_PATH));
std::string logs_path = server.config().getString("logger.log", "");
if (server.logger().is(Poco::Message::PRIO_TEST))
server.context()->addWarningMessage("Server logging level is set to 'test' and performance is degraded. This cannot be used in production.");
#if defined(OS_LINUX)
try
{
2024-10-29 08:19:49 +00:00
const std::unordered_set<std::string> fast_clock_sources = {
// ARM clock
"arch_sys_counter",
// KVM guest clock
"kvm-clock",
// X86 clock
"tsc",
};
2022-05-31 11:50:09 +00:00
const char * filename = "/sys/devices/system/clocksource/clocksource0/current_clocksource";
2024-10-29 08:19:49 +00:00
if (!fast_clock_sources.contains(readLine(filename)))
2022-09-13 10:44:42 +00:00
server.context()->addWarningMessage("Linux is not using a fast clock source. Performance can be degraded. Check " + String(filename));
}
2023-09-25 20:19:09 +00:00
catch (...) // NOLINT(bugprone-empty-catch)
{
}
try
{
2022-05-31 11:50:09 +00:00
const char * filename = "/proc/sys/vm/overcommit_memory";
if (readNumber(filename) == 2)
server.context()->addWarningMessage("Linux memory overcommit is disabled. Check " + String(filename));
}
2023-09-25 20:19:09 +00:00
catch (...) // NOLINT(bugprone-empty-catch)
{
}
try
{
2022-05-31 11:50:09 +00:00
const char * filename = "/sys/kernel/mm/transparent_hugepage/enabled";
if (readLine(filename).find("[always]") != std::string::npos)
2022-05-31 11:50:09 +00:00
server.context()->addWarningMessage("Linux transparent hugepages are set to \"always\". Check " + String(filename));
}
2023-09-25 20:19:09 +00:00
catch (...) // NOLINT(bugprone-empty-catch)
{
}
try
{
2022-05-31 11:50:09 +00:00
const char * filename = "/proc/sys/kernel/pid_max";
if (readNumber(filename) < 30000)
server.context()->addWarningMessage("Linux max PID is too low. Check " + String(filename));
}
2023-09-25 20:19:09 +00:00
catch (...) // NOLINT(bugprone-empty-catch)
{
}
try
{
2022-05-31 11:50:09 +00:00
const char * filename = "/proc/sys/kernel/threads-max";
if (readNumber(filename) < 30000)
server.context()->addWarningMessage("Linux threads max count is too low. Check " + String(filename));
}
catch (...) // NOLINT(bugprone-empty-catch)
{
}
try
{
const char * filename = "/proc/sys/kernel/task_delayacct";
if (readNumber(filename) == 0)
server.context()->addWarningMessage("Delay accounting is not enabled, OSIOWaitMicroseconds will not be gathered. You can enable it using `echo 1 > " + String(filename) + "` or by using sysctl.");
}
2023-09-25 20:19:09 +00:00
catch (...) // NOLINT(bugprone-empty-catch)
{
}
2022-02-15 10:39:45 +00:00
std::string dev_id = getBlockDeviceId(data_path);
if (getBlockDeviceType(dev_id) == BlockDeviceType::ROT && getBlockDeviceReadAheadBytes(dev_id) == 0)
2022-05-31 07:26:26 +00:00
server.context()->addWarningMessage("Rotational disk with disabled readahead is in use. Performance can be degraded. Used for data: " + String(data_path));
#endif
2022-03-30 22:22:36 +00:00
try
{
if (getAvailableMemoryAmount() < (2l << 30))
server.context()->addWarningMessage("Available memory at server startup is too low (2GiB).");
2022-08-13 23:49:00 +00:00
}
2023-09-25 20:19:09 +00:00
catch (...) // NOLINT(bugprone-empty-catch)
2022-08-13 23:49:00 +00:00
{
}
2022-08-13 23:49:00 +00:00
try
{
2022-03-30 22:22:36 +00:00
if (!enoughSpaceInDirectory(data_path, 1ull << 30))
server.context()->addWarningMessage("Available disk space for data at server startup is too low (1GiB): " + String(data_path));
2022-08-13 23:49:00 +00:00
}
2023-09-25 20:19:09 +00:00
catch (...) // NOLINT(bugprone-empty-catch)
2022-08-13 23:49:00 +00:00
{
}
2022-08-13 23:49:00 +00:00
try
{
2022-03-30 22:22:36 +00:00
if (!logs_path.empty())
{
2022-04-21 05:45:08 +00:00
auto logs_parent = fs::path(logs_path).parent_path();
if (!enoughSpaceInDirectory(logs_parent, 1ull << 30))
server.context()->addWarningMessage("Available disk space for logs at server startup is too low (1GiB): " + String(logs_parent));
2022-03-30 22:22:36 +00:00
}
}
2023-09-25 20:19:09 +00:00
catch (...) // NOLINT(bugprone-empty-catch)
2022-02-15 10:39:45 +00:00
{
}
2022-08-13 23:49:00 +00:00
2024-10-02 17:58:25 +00:00
if (server.context()->getMergeTreeSettings()[MergeTreeSetting::allow_remote_fs_zero_copy_replication])
2022-08-13 23:49:00 +00:00
{
server.context()->addWarningMessage("The setting 'allow_remote_fs_zero_copy_replication' is enabled for MergeTree tables."
" But the feature of 'zero-copy replication' is under development and is not ready for production."
" The usage of this feature can lead to data corruption and loss. The setting should be disabled in production.");
}
}
2021-04-25 03:06:38 +00:00
2024-07-18 07:01:08 +00:00
}
2024-06-06 05:42:07 +00:00
void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, ContextMutablePtr context, Poco::Logger * log)
{
try
{
Poco::Util::AbstractConfiguration::Keys keys;
config.keys("startup_scripts", keys);
SetResultDetailsFunc callback;
for (const auto & key : keys)
{
std::string full_prefix = "startup_scripts." + key;
if (config.has(full_prefix + ".condition"))
{
auto condition = config.getString(full_prefix + ".condition");
auto condition_read_buffer = ReadBufferFromString(condition);
auto condition_write_buffer = WriteBufferFromOwnString();
LOG_DEBUG(log, "Checking startup query condition `{}`", condition);
2024-10-03 11:17:38 +00:00
auto startup_context = Context::createCopy(context);
2024-10-03 11:21:48 +00:00
startup_context->makeQueryContext();
2024-10-03 11:17:38 +00:00
executeQuery(condition_read_buffer, condition_write_buffer, true, startup_context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
2024-06-06 05:42:07 +00:00
auto result = condition_write_buffer.str();
if (result != "1\n" && result != "true\n")
2024-06-21 06:02:30 +00:00
{
if (result != "0\n" && result != "false\n")
context->addWarningMessage(fmt::format("The condition query returned `{}`, which can't be interpreted as a boolean (`0`, `false`, `1`, `true`). Will skip this query.", result));
2024-06-06 05:42:07 +00:00
continue;
2024-06-21 06:02:30 +00:00
}
2024-06-06 05:42:07 +00:00
LOG_DEBUG(log, "Condition is true, will execute the query next");
}
auto query = config.getString(full_prefix + ".query");
auto read_buffer = ReadBufferFromString(query);
auto write_buffer = WriteBufferFromOwnString();
LOG_DEBUG(log, "Executing query `{}`", query);
2024-10-03 11:17:38 +00:00
auto startup_context = Context::createCopy(context);
2024-10-03 11:21:48 +00:00
startup_context->makeQueryContext();
2024-10-03 11:17:38 +00:00
executeQuery(read_buffer, write_buffer, true, startup_context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
2024-06-06 05:42:07 +00:00
}
}
2024-06-21 06:02:30 +00:00
catch (...)
2024-06-06 05:42:07 +00:00
{
2024-06-21 06:02:30 +00:00
tryLogCurrentException(log, "Failed to parse startup scripts file");
2024-06-06 05:42:07 +00:00
}
}
2024-04-12 18:41:40 +00:00
static void initializeAzureSDKLogger(
[[ maybe_unused ]] const ServerSettings & server_settings,
[[ maybe_unused ]] int server_logs_level)
2024-04-12 15:38:50 +00:00
{
#if USE_AZURE_BLOB_STORAGE
2024-10-16 19:13:26 +00:00
if (!server_settings[ServerSetting::enable_azure_sdk_logging])
2024-04-12 15:38:50 +00:00
return;
using AzureLogsLevel = Azure::Core::Diagnostics::Logger::Level;
static const std::unordered_map<AzureLogsLevel, std::pair<Poco::Message::Priority, DB::LogsLevel>> azure_to_server_mapping =
{
{AzureLogsLevel::Error, {Poco::Message::PRIO_DEBUG, LogsLevel::debug}},
{AzureLogsLevel::Warning, {Poco::Message::PRIO_DEBUG, LogsLevel::debug}},
{AzureLogsLevel::Informational, {Poco::Message::PRIO_TRACE, LogsLevel::trace}},
{AzureLogsLevel::Verbose, {Poco::Message::PRIO_TEST, LogsLevel::test}},
};
static const std::map<Poco::Message::Priority, AzureLogsLevel> server_to_azure_mapping =
{
{Poco::Message::PRIO_DEBUG, AzureLogsLevel::Warning},
{Poco::Message::PRIO_TRACE, AzureLogsLevel::Informational},
{Poco::Message::PRIO_TEST, AzureLogsLevel::Verbose},
};
static const LoggerPtr azure_sdk_logger = getLogger("AzureSDK");
auto it = server_to_azure_mapping.lower_bound(static_cast<Poco::Message::Priority>(server_logs_level));
chassert(it != server_to_azure_mapping.end());
Azure::Core::Diagnostics::Logger::SetLevel(it->second);
Azure::Core::Diagnostics::Logger::SetListener([](AzureLogsLevel level, const std::string & message)
{
auto [poco_level, db_level] = azure_to_server_mapping.at(level);
LOG_IMPL(azure_sdk_logger, db_level, poco_level, fmt::runtime(message));
});
#endif
}
#if defined(SANITIZER)
static std::vector<String> getSanitizerNames()
{
std::vector<String> names;
#if defined(ADDRESS_SANITIZER)
names.push_back("address");
#endif
#if defined(THREAD_SANITIZER)
names.push_back("thread");
#endif
#if defined(MEMORY_SANITIZER)
names.push_back("memory");
#endif
#if defined(UNDEFINED_BEHAVIOR_SANITIZER)
names.push_back("undefined behavior");
#endif
return names;
}
#endif
2017-12-02 02:47:12 +00:00
int Server::main(const std::vector<std::string> & /*args*/)
try
{
2024-07-04 11:57:47 +00:00
#if USE_JEMALLOC
setJemallocBackgroundThreads(true);
#endif
Stopwatch startup_watch;
2020-05-30 21:57:37 +00:00
Poco::Logger * log = &logger();
2020-08-16 11:52:55 +00:00
UseSSL use_ssl;
MainThreadStatus::getInstance();
2023-02-18 04:46:09 +00:00
ServerSettings server_settings;
server_settings.loadSettingsFromConfig(config());
2024-10-16 19:13:26 +00:00
ASTAlterCommand::setFormatAlterCommandsWithParentheses(server_settings[ServerSetting::format_alter_operations_with_parentheses]);
2024-10-16 19:13:26 +00:00
StackTrace::setShowAddresses(server_settings[ServerSetting::show_addresses_in_stack_traces]);
2022-08-20 15:09:20 +00:00
#if USE_HDFS
/// This will point libhdfs3 to the right location for its config.
/// Note: this has to be done once at server initialization, because 'setenv' is not thread-safe.
String libhdfs3_conf = config().getString("hdfs.libhdfs3_conf", "");
if (!libhdfs3_conf.empty())
{
if (std::filesystem::path{libhdfs3_conf}.is_relative() && !std::filesystem::exists(libhdfs3_conf))
{
const String config_path = config().getString("config-file", "config.xml");
const auto config_dir = std::filesystem::path{config_path}.remove_filename();
if (std::filesystem::exists(config_dir / libhdfs3_conf))
libhdfs3_conf = std::filesystem::absolute(config_dir / libhdfs3_conf);
}
setenv("LIBHDFS3_CONF", libhdfs3_conf.c_str(), true /* overwrite */); // NOLINT
}
#endif
2022-12-06 22:18:35 +00:00
/// When building openssl into clickhouse, clickhouse owns the configuration
2022-12-06 22:44:10 +00:00
/// Therefore, the clickhouse openssl configuration should be kept separate from
/// the OS. Default to the one in the standard config directory, unless overridden
2022-12-06 22:18:35 +00:00
/// by a key in the config.
/// Note: this has to be done once at server initialization, because 'setenv' is not thread-safe.
2022-12-06 22:18:35 +00:00
if (config().has("opensslconf"))
{
std::string opensslconf_path = config().getString("opensslconf");
setenv("OPENSSL_CONF", opensslconf_path.c_str(), true); /// NOLINT
2022-12-06 22:18:35 +00:00
}
else
{
const String config_path = config().getString("config-file", "config.xml");
2022-12-16 20:31:17 +00:00
const auto config_dir = std::filesystem::path{config_path}.replace_filename("openssl.conf");
setenv("OPENSSL_CONF", config_dir.c_str(), true); /// NOLINT
2022-12-06 22:18:35 +00:00
}
2024-07-28 09:51:09 +00:00
if (auto total_numa_memory = getNumaNodesTotalMemory(); total_numa_memory.has_value())
{
2024-07-28 09:51:09 +00:00
LOG_INFO(
log, "ClickHouse is bound to a subset of NUMA nodes. Total memory of all available nodes: {}", ReadableSize(*total_numa_memory));
}
registerInterpreters();
2017-04-21 17:47:27 +00:00
registerFunctions();
registerAggregateFunctions();
2024-10-16 19:13:26 +00:00
registerTableFunctions(server_settings[ServerSetting::use_legacy_mongodb_integration]);
2023-12-29 21:52:20 +00:00
registerDatabases();
2024-10-16 19:13:26 +00:00
registerStorages(server_settings[ServerSetting::use_legacy_mongodb_integration]);
registerDictionaries(server_settings[ServerSetting::use_legacy_mongodb_integration]);
registerDisks(/* global_skip_access_check= */ false);
2020-10-29 03:39:43 +00:00
registerFormats();
2022-01-04 07:06:19 +00:00
registerRemoteFileMetadatas();
2022-09-27 13:26:41 +00:00
registerSchedulerNodes();
2017-04-21 17:47:27 +00:00
2020-09-17 12:15:05 +00:00
CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision());
CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger());
/** Context contains all that query execution is dependent:
2020-07-08 10:40:02 +00:00
* settings, available functions, data types, aggregate functions, databases, ...
*/
2020-04-16 14:37:38 +00:00
auto shared_context = Context::createShared();
global_context = Context::createGlobal(shared_context.get());
2019-07-08 02:14:32 +00:00
global_context->makeGlobalContext();
global_context->setApplicationType(Context::ApplicationType::SERVER);
2021-07-09 14:40:32 +00:00
#if !defined(NDEBUG) || !defined(__OPTIMIZE__)
global_context->addWarningMessage("Server was built in debug mode. It will work slowly.");
#endif
if (ThreadFuzzer::instance().isEffective())
global_context->addWarningMessage("ThreadFuzzer is enabled. Application will run slowly and unstable.");
2021-07-12 10:57:39 +00:00
#if defined(SANITIZER)
auto sanitizers = getSanitizerNames();
String log_message;
if (sanitizers.empty())
log_message = "sanitizer";
else if (sanitizers.size() == 1)
log_message = fmt::format("{} sanitizer", sanitizers.front());
else
log_message = fmt::format("sanitizers ({})", fmt::join(sanitizers, ", "));
global_context->addWarningMessage(fmt::format("Server was built with {}. It will work slowly.", log_message));
2021-07-12 10:57:39 +00:00
#endif
2023-10-29 15:55:47 +00:00
#if defined(SANITIZE_COVERAGE) || WITH_COVERAGE
global_context->addWarningMessage("Server was built with code coverage. It will work slowly.");
2021-07-12 10:57:39 +00:00
#endif
const size_t physical_server_memory = getMemoryAmount();
2024-09-30 15:25:24 +00:00
LOG_INFO(
log,
"Available RAM: {}; logical cores: {}; used cores: {}.",
formatReadableSizeWithBinarySuffix(physical_server_memory),
std::thread::hardware_concurrency(),
2024-09-30 15:25:24 +00:00
getNumberOfCPUCoresToUse() // on ARM processors it can show only enabled at current moment cores
);
2024-02-17 00:54:54 +00:00
#if defined(__x86_64__)
2024-02-16 12:06:57 +00:00
String cpu_info;
#define COLLECT_FLAG(X) \
if (CPU::have##X()) \
{ \
if (!cpu_info.empty()) \
cpu_info += ", "; \
cpu_info += #X; \
}
CPU_ID_ENUMERATE(COLLECT_FLAG)
#undef COLLECT_FLAG
2024-02-16 12:09:07 +00:00
LOG_INFO(log, "Available CPU instruction sets: {}", cpu_info);
2024-02-17 00:54:54 +00:00
#endif
2024-02-16 12:06:57 +00:00
2024-06-06 09:11:08 +00:00
bool has_trace_collector = false;
/// Disable it if we collect test coverage information, because it will work extremely slow.
#if !WITH_COVERAGE
/// Profilers cannot work reliably with any other libunwind or without PHDR cache.
2024-06-06 09:29:42 +00:00
has_trace_collector = hasPHDRCache() && config().has("trace_log");
2024-06-06 09:11:08 +00:00
#endif
/// Describe multiple reasons when query profiler cannot work.
#if WITH_COVERAGE
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they work extremely slow with test coverage.");
#endif
#if defined(SANITIZER)
2024-08-03 16:30:33 +00:00
LOG_INFO(log, "Query Profiler is disabled because it cannot work under sanitizers"
2024-06-06 09:11:08 +00:00
" when two different stack unwinding methods will interfere with each other.");
#endif
if (!hasPHDRCache())
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they require PHDR cache to be created"
" (otherwise the function 'dl_iterate_phdr' is not lock free and not async-signal safe).");
// Initialize global thread pool. Do it before we fetch configs from zookeeper
// nodes (`from_zk`), because ZooKeeper interface uses the pool. We will
// ignore `max_thread_pool_size` in configs we fetch from ZK, but oh well.
GlobalThreadPool::initialize(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_thread_pool_size],
server_settings[ServerSetting::max_thread_pool_free_size],
server_settings[ServerSetting::thread_pool_queue_size],
has_trace_collector ? server_settings[ServerSetting::global_profiler_real_time_period_ns] : 0,
has_trace_collector ? server_settings[ServerSetting::global_profiler_cpu_time_period_ns] : 0);
2024-06-06 09:11:08 +00:00
if (has_trace_collector)
2024-06-06 09:29:42 +00:00
{
2024-06-06 09:11:08 +00:00
global_context->createTraceCollector();
2024-06-06 09:29:42 +00:00
/// Set up server-wide memory profiler (for total memory tracker).
2024-10-16 19:13:26 +00:00
if (server_settings[ServerSetting::total_memory_profiler_step])
total_memory_tracker.setProfilerStep(server_settings[ServerSetting::total_memory_profiler_step]);
2024-06-06 09:29:42 +00:00
2024-10-16 19:13:26 +00:00
if (server_settings[ServerSetting::total_memory_tracker_sample_probability] > 0.0)
total_memory_tracker.setSampleProbability(server_settings[ServerSetting::total_memory_tracker_sample_probability]);
2024-06-06 09:29:42 +00:00
2024-10-16 19:13:26 +00:00
if (server_settings[ServerSetting::total_memory_profiler_sample_min_allocation_size])
total_memory_tracker.setSampleMinAllocationSize(server_settings[ServerSetting::total_memory_profiler_sample_min_allocation_size]);
2024-06-06 09:29:42 +00:00
2024-10-16 19:13:26 +00:00
if (server_settings[ServerSetting::total_memory_profiler_sample_max_allocation_size])
total_memory_tracker.setSampleMaxAllocationSize(server_settings[ServerSetting::total_memory_profiler_sample_max_allocation_size]);
2024-06-06 09:29:42 +00:00
}
Poco::ThreadPool server_pool(
/* minCapacity */3,
2024-10-16 19:13:26 +00:00
/* maxCapacity */server_settings[ServerSetting::max_connections],
/* idleTime */60,
/* stackSize */POCO_THREAD_STACK_SIZE,
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::global_profiler_real_time_period_ns],
server_settings[ServerSetting::global_profiler_cpu_time_period_ns]);
std::mutex servers_lock;
std::vector<ProtocolServerAdapter> servers;
std::vector<ProtocolServerAdapter> servers_to_start_before_tables;
/// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed).
SCOPE_EXIT({
Stopwatch watch;
LOG_INFO(log, "Waiting for background threads");
GlobalThreadPool::instance().shutdown();
LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds());
});
2024-10-16 19:13:26 +00:00
MemoryWorker memory_worker(global_context->getServerSettings()[ServerSetting::memory_worker_period_ms]);
2024-07-22 19:47:46 +00:00
/// This object will periodically calculate some metrics.
ServerAsynchronousMetrics async_metrics(
global_context,
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::asynchronous_metrics_update_period_s],
server_settings[ServerSetting::asynchronous_metrics_enable_heavy_metrics],
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::asynchronous_heavy_metrics_update_period_s],
[&]() -> std::vector<ProtocolServerMetrics>
{
std::vector<ProtocolServerMetrics> metrics;
std::lock_guard lock(servers_lock);
metrics.reserve(servers_to_start_before_tables.size() + servers.size());
for (const auto & server : servers_to_start_before_tables)
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
for (const auto & server : servers)
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
return metrics;
2024-07-22 19:47:46 +00:00
},
/*update_jemalloc_epoch_=*/memory_worker.getSource() != MemoryWorker::MemoryUsageSource::Jemalloc,
/*update_rss_=*/memory_worker.getSource() == MemoryWorker::MemoryUsageSource::None);
/// NOTE: global context should be destroyed *before* GlobalThreadPool::shutdown()
/// Otherwise GlobalThreadPool::shutdown() will hang, since Context holds some threads.
SCOPE_EXIT({
async_metrics.stop();
/** Ask to cancel background jobs all table engines,
* and also query_log.
* It is important to do early, not in destructor of Context, because
* table engines could use Context on destroy.
*/
LOG_INFO(log, "Shutting down storages.");
global_context->shutdown();
LOG_DEBUG(log, "Shut down storages.");
if (!servers_to_start_before_tables.empty())
{
LOG_DEBUG(log, "Waiting for current connections to servers for tables to finish.");
size_t current_connections = 0;
{
std::lock_guard lock(servers_lock);
for (auto & server : servers_to_start_before_tables)
{
server.stop();
current_connections += server.currentConnections();
}
}
if (current_connections)
LOG_INFO(log, "Closed all listening sockets. Waiting for {} outstanding connections.", current_connections);
else
LOG_INFO(log, "Closed all listening sockets.");
if (current_connections > 0)
2024-10-16 19:13:26 +00:00
current_connections = waitServersToFinish(servers_to_start_before_tables, servers_lock, server_settings[ServerSetting::shutdown_wait_unfinished]);
if (current_connections)
LOG_INFO(log, "Closed connections to servers for tables. But {} remain. Probably some tables of other users cannot finish their connections after context shutdown.", current_connections);
else
LOG_INFO(log, "Closed connections to servers for tables.");
}
global_context->shutdownKeeperDispatcher();
/// Wait server pool to avoid use-after-free of destroyed context in the handlers
server_pool.joinAll();
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
* At this moment, no one could own shared part of Context.
*/
global_context->resetSharedContext();
global_context.reset();
shared_context.reset();
LOG_DEBUG(log, "Destroyed global context.");
});
2023-01-30 19:00:48 +00:00
#if USE_AZURE_BLOB_STORAGE
/// It makes sense to deinitialize libxml after joining of all threads
/// in global pool because libxml uses thread-local memory allocations via
/// 'pthread_key_create' and 'pthread_setspecific' which should be deallocated
/// at 'pthread_exit'. Deinitialization of libxml leads to call of 'pthread_key_delete'
/// and if it is done before joining of threads, allocated memory will not be freed
/// and there may be memory leaks in threads that used libxml.
GlobalThreadPool::instance().addOnDestroyCallback([]
{
Azure::Storage::_internal::XmlGlobalDeinitialize();
});
#endif
getIOThreadPool().initialize(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_io_thread_pool_size],
server_settings[ServerSetting::max_io_thread_pool_free_size],
server_settings[ServerSetting::io_thread_pool_queue_size]);
2021-11-29 08:22:43 +00:00
getBackupsIOThreadPool().initialize(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_backups_io_thread_pool_size],
server_settings[ServerSetting::max_backups_io_thread_pool_free_size],
server_settings[ServerSetting::backups_io_thread_pool_queue_size]);
getActivePartsLoadingThreadPool().initialize(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_active_parts_loading_thread_pool_size],
0, // We don't need any threads once all the parts will be loaded
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_active_parts_loading_thread_pool_size]);
getOutdatedPartsLoadingThreadPool().initialize(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_outdated_parts_loading_thread_pool_size],
0, // We don't need any threads once all the parts will be loaded
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_outdated_parts_loading_thread_pool_size]);
/// It could grow if we need to synchronously wait until all the data parts will be loaded.
getOutdatedPartsLoadingThreadPool().setMaxTurboThreads(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_active_parts_loading_thread_pool_size]
);
2024-05-08 13:04:16 +00:00
getUnexpectedPartsLoadingThreadPool().initialize(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_unexpected_parts_loading_thread_pool_size],
2024-05-08 13:04:16 +00:00
0, // We don't need any threads once all the parts will be loaded
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_unexpected_parts_loading_thread_pool_size]);
2024-05-08 13:04:16 +00:00
/// It could grow if we need to synchronously wait until all the data parts will be loaded.
getUnexpectedPartsLoadingThreadPool().setMaxTurboThreads(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_active_parts_loading_thread_pool_size]
2024-05-08 13:04:16 +00:00
);
getPartsCleaningThreadPool().initialize(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_parts_cleaning_thread_pool_size],
0, // We don't need any threads one all the parts will be deleted
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::max_parts_cleaning_thread_pool_size]);
2024-10-16 19:13:26 +00:00
auto max_database_replicated_create_table_thread_pool_size = server_settings[ServerSetting::max_database_replicated_create_table_thread_pool_size]
? server_settings[ServerSetting::max_database_replicated_create_table_thread_pool_size]
2024-09-30 15:25:24 +00:00
: getNumberOfCPUCoresToUse();
getDatabaseReplicatedCreateTablesThreadPool().initialize(
2024-02-06 18:09:04 +00:00
max_database_replicated_create_table_thread_pool_size,
0, // We don't need any threads once all the tables will be created
2024-02-06 18:09:04 +00:00
max_database_replicated_create_table_thread_pool_size);
getDatabaseCatalogDropTablesThreadPool().initialize(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::database_catalog_drop_table_concurrency],
0, // We don't need any threads if there are no DROP queries.
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::database_catalog_drop_table_concurrency]);
2021-11-29 08:22:43 +00:00
/// Initialize global local cache for remote filesystem.
if (config().has("local_cache_for_remote_fs"))
{
bool enable = config().getBool("local_cache_for_remote_fs.enable", false);
if (enable)
{
String root_dir = config().getString("local_cache_for_remote_fs.root_dir");
UInt64 limit_size = config().getUInt64("local_cache_for_remote_fs.limit_size");
UInt64 bytes_read_before_flush
= config().getUInt64("local_cache_for_remote_fs.bytes_read_before_flush", DBMS_DEFAULT_BUFFER_SIZE);
2021-12-23 03:50:26 +00:00
ExternalDataSourceCache::instance().initOnce(global_context, root_dir, limit_size, bytes_read_before_flush);
2021-11-29 08:22:43 +00:00
}
}
std::string path_str = getCanonicalPath(config().getString("path", DBMS_DEFAULT_PATH));
fs::path path = path_str;
/// Check that the process user id matches the owner of the data.
assertProcessUserMatchesDataOwner(path_str, [&](const std::string & message){ global_context->addWarningMessage(message); });
global_context->setPath(path_str);
StatusFile status{path / "status", StatusFile::write_full_info};
ServerUUID::load(path / "uuid", log);
2024-06-26 00:03:22 +00:00
PlacementInfo::PlacementInfo::instance().initialize(config());
2023-03-23 08:58:56 +00:00
zkutil::validateZooKeeperConfig(config());
bool has_zookeeper = zkutil::hasZooKeeperConfig(config());
zkutil::ZooKeeperNodeCache main_config_zk_node_cache([&] { return global_context->getZooKeeper(); });
zkutil::EventPtr main_config_zk_changed_event = std::make_shared<Poco::Event>();
if (loaded_config.has_zk_includes)
{
auto old_configuration = loaded_config.configuration;
ConfigProcessor config_processor(config_path);
loaded_config = config_processor.loadConfigWithZooKeeperIncludes(
main_config_zk_node_cache, main_config_zk_changed_event, /* fallback_to_preprocessed = */ true);
config_processor.savePreprocessedConfig(loaded_config, path_str);
config().removeConfiguration(old_configuration.get());
config().add(loaded_config.configuration.duplicate(), PRIO_DEFAULT, false);
2024-03-13 12:12:48 +00:00
global_context->setConfig(loaded_config.configuration);
}
Settings::checkNoSettingNamesAtTopLevel(config(), config_path);
2020-06-04 19:30:30 +00:00
2023-02-18 04:46:09 +00:00
/// We need to reload server settings because config could be updated via zookeeper.
server_settings.loadSettingsFromConfig(config());
#if defined(OS_LINUX)
2019-08-11 20:37:53 +00:00
std::string executable_path = getExecutablePath();
if (!executable_path.empty())
{
/// Integrity check based on checksum of the executable code.
/// Note: it is not intended to protect from malicious party,
/// because the reference checksum can be easily modified as well.
/// And we don't involve asymmetric encryption with PKI yet.
/// It's only intended to protect from faulty hardware.
/// Note: it is only based on machine code.
/// But there are other sections of the binary (e.g. exception handling tables)
/// that are interpreted (not executed) but can alter the behaviour of the program as well.
2022-06-15 22:07:59 +00:00
/// Please keep the below log messages in-sync with the ones in daemon/BaseDaemon.cpp
if (stored_binary_hash.empty())
{
LOG_WARNING(log, "Integrity check of the executable skipped because the reference checksum could not be read.");
}
else
{
String calculated_binary_hash = getHashOfLoadedBinaryHex();
if (calculated_binary_hash == stored_binary_hash)
{
LOG_INFO(log, "Integrity check of the executable successfully passed (checksum: {})", calculated_binary_hash);
}
else
{
/// If program is run under debugger, ptrace will fail.
if (ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == -1)
{
/// Program is run under debugger. Modification of it's binary image is ok for breakpoints.
global_context->addWarningMessage(fmt::format(
"Server is run under debugger and its binary image is modified (most likely with breakpoints).",
calculated_binary_hash));
}
else
{
throw Exception(
ErrorCodes::CORRUPTED_DATA,
"Calculated checksum of the executable ({0}) does not correspond"
" to the reference checksum stored in the executable ({1})."
" This may indicate one of the following:"
" - the executable {2} was changed just after startup;"
" - the executable {2} was corrupted on disk due to faulty hardware;"
" - the loaded executable was corrupted in memory due to faulty hardware;"
" - the file {2} was intentionally modified;"
" - a logical error in the code.",
calculated_binary_hash,
stored_binary_hash,
executable_path);
}
}
}
}
else
2019-08-11 20:37:53 +00:00
executable_path = "/usr/bin/clickhouse"; /// It is used for information messages.
2018-11-14 01:20:46 +00:00
/// After full config loaded
{
2020-09-14 18:08:09 +00:00
if (config().getBool("remap_executable", false))
{
LOG_DEBUG(log, "Will remap executable in memory.");
size_t size = remapExecutable();
LOG_DEBUG(log, "The code ({}) in memory has been successfully remapped.", ReadableSize(size));
2020-09-14 18:08:09 +00:00
}
2020-09-14 18:08:09 +00:00
if (config().getBool("mlock_executable", false))
{
if (hasLinuxCapability(CAP_IPC_LOCK))
{
2020-09-25 02:03:58 +00:00
try
{
/// Get the memory area with (current) code segment.
/// It's better to lock only the code segment instead of calling "mlockall",
/// because otherwise debug info will be also locked in memory, and it can be huge.
auto [addr, len] = getMappedArea(reinterpret_cast<void *>(mainEntryClickHouseServer));
LOG_TRACE(log, "Will do mlock to prevent executable memory from being paged out. It may take a few seconds.");
if (0 != mlock(addr, len))
2022-08-20 15:09:20 +00:00
LOG_WARNING(log, "Failed mlock: {}", errnoToString());
2020-09-25 02:03:58 +00:00
else
LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed, total {}", ReadableSize(len));
}
catch (...)
{
LOG_WARNING(log, "Cannot mlock: {}", getCurrentExceptionMessage(false));
}
}
else
{
2020-05-23 22:24:01 +00:00
LOG_INFO(log, "It looks like the process has no CAP_IPC_LOCK capability, binary mlock will be disabled."
2020-05-23 22:21:29 +00:00
" It could happen due to incorrect ClickHouse package installation."
" You could resolve the problem manually with 'sudo setcap cap_ipc_lock=+ep {}'."
" Note that it will not work on 'nosuid' mounted filesystems.", executable_path);
2020-09-14 18:08:09 +00:00
}
}
}
2023-01-01 22:22:26 +00:00
FailPointInjection::enableFromGlobalConfig(config());
#endif
2024-07-22 19:47:46 +00:00
memory_worker.start();
2024-07-22 12:58:00 +00:00
#if defined(OS_LINUX)
2023-01-01 22:22:26 +00:00
int default_oom_score = 0;
#if !defined(NDEBUG)
/// In debug version on Linux, increase oom score so that clickhouse is killed
/// first, instead of some service. Use a carefully chosen random score of 555:
/// the maximum is 1000, and chromium uses 300 for its tab processes. Ignore
/// whatever errors that occur, because it's just a debugging aid and we don't
/// care if it breaks.
default_oom_score = 555;
#endif
int oom_score = config().getInt("oom_score", default_oom_score);
if (oom_score)
setOOMScore(oom_score, log);
2018-11-14 01:20:46 +00:00
#endif
global_context->setRemoteHostFilter(config());
2023-06-15 13:49:49 +00:00
global_context->setHTTPHeaderFilter(config());
/// Try to increase limit on number of open files.
{
rlimit rlim;
if (getrlimit(RLIMIT_NOFILE, &rlim))
throw Poco::Exception("Cannot getrlimit");
if (rlim.rlim_cur == rlim.rlim_max)
{
2020-05-23 22:24:01 +00:00
LOG_DEBUG(log, "rlimit on number of file descriptors is {}", rlim.rlim_cur);
}
else
{
rlim_t old = rlim.rlim_cur;
rlim.rlim_cur = config().getUInt("max_open_files", static_cast<unsigned>(rlim.rlim_max));
int rc = setrlimit(RLIMIT_NOFILE, &rlim);
if (rc != 0)
2022-08-20 15:09:20 +00:00
LOG_WARNING(log, "Cannot set max number of file descriptors to {}. Try to specify max_open_files according to your system limits. error: {}", rlim.rlim_cur, errnoToString());
else
2020-05-23 22:24:01 +00:00
LOG_DEBUG(log, "Set max number of file descriptors to {} (was {}).", rlim.rlim_cur, old);
}
}
/// Try to increase limit on number of threads.
{
rlimit rlim;
if (getrlimit(RLIMIT_NPROC, &rlim))
throw Poco::Exception("Cannot getrlimit");
if (rlim.rlim_cur == rlim.rlim_max)
{
LOG_DEBUG(log, "rlimit on number of threads is {}", rlim.rlim_cur);
}
else
{
rlim_t old = rlim.rlim_cur;
rlim.rlim_cur = rlim.rlim_max;
int rc = setrlimit(RLIMIT_NPROC, &rlim);
2022-02-15 10:39:45 +00:00
if (rc != 0)
{
2022-08-20 15:09:20 +00:00
LOG_WARNING(log, "Cannot set max number of threads to {}. error: {}", rlim.rlim_cur, errnoToString());
rlim.rlim_cur = old;
}
else
{
LOG_DEBUG(log, "Set max number of threads to {} (was {}).", rlim.rlim_cur, old);
}
}
2022-02-15 10:39:45 +00:00
if (rlim.rlim_cur < 30000)
{
global_context->addWarningMessage("Maximum number of threads is lower than 30000. There could be problems with handling a lot of simultaneous queries.");
}
}
static ServerErrorHandler error_handler;
Poco::ErrorHandler::set(&error_handler);
/// Initialize DateLUT early, to not interfere with running time of first query.
2020-05-23 22:24:01 +00:00
LOG_DEBUG(log, "Initializing DateLUT.");
DateLUT::serverTimezoneInstance();
LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::serverTimezoneInstance().getTimeZone());
/// Storage with temporary data for processing of heavy queries.
2024-10-16 19:13:26 +00:00
if (!server_settings[ServerSetting::tmp_policy].value.empty())
2022-12-06 10:04:15 +00:00
{
2024-10-16 19:13:26 +00:00
global_context->setTemporaryStoragePolicy(server_settings[ServerSetting::tmp_policy], server_settings[ServerSetting::max_temporary_data_on_disk_size]);
2022-12-06 10:04:15 +00:00
}
2024-10-16 19:13:26 +00:00
else if (!server_settings[ServerSetting::temporary_data_in_cache].value.empty())
2022-12-06 10:04:15 +00:00
{
2024-10-16 19:13:26 +00:00
global_context->setTemporaryStorageInCache(server_settings[ServerSetting::temporary_data_in_cache], server_settings[ServerSetting::max_temporary_data_on_disk_size]);
2022-12-06 10:04:15 +00:00
}
else
{
2022-10-05 16:35:10 +00:00
std::string temporary_path = config().getString("tmp_path", path / "tmp/");
2024-10-16 19:13:26 +00:00
global_context->setTemporaryStoragePath(temporary_path, server_settings[ServerSetting::max_temporary_data_on_disk_size]);
}
/** Directory with 'flags': files indicating temporary settings for the server set by system administrator.
* Flags may be cleared automatically after being applied by the server.
* Examples: do repair of local data; clone all replicated tables from replica.
*/
{
2021-08-16 18:30:53 +00:00
auto flags_path = path / "flags/";
2021-05-16 22:06:09 +00:00
fs::create_directories(flags_path);
global_context->setFlagsPath(flags_path);
}
/** Directory with user provided files that are usable by 'file' table function.
*/
{
2021-08-16 18:30:53 +00:00
std::string user_files_path = config().getString("user_files_path", path / "user_files/");
global_context->setUserFilesPath(user_files_path);
2021-05-16 22:06:09 +00:00
fs::create_directories(user_files_path);
}
{
2021-08-16 18:30:53 +00:00
std::string dictionaries_lib_path = config().getString("dictionaries_lib_path", path / "dictionaries_lib/");
global_context->setDictionariesLibPath(dictionaries_lib_path);
2021-05-16 22:06:09 +00:00
fs::create_directories(dictionaries_lib_path);
}
2021-09-01 23:46:23 +00:00
{
std::string user_scripts_path = config().getString("user_scripts_path", path / "user_scripts/");
global_context->setUserScriptsPath(user_scripts_path);
fs::create_directories(user_scripts_path);
}
/// top_level_domains_lists
{
2021-08-16 18:30:53 +00:00
const std::string & top_level_domains_path = config().getString("top_level_domains_path", path / "top_level_domains/");
2021-05-16 22:06:09 +00:00
TLDListsHolder::getInstance().parseConfig(fs::path(top_level_domains_path) / "", config());
}
2020-03-19 21:14:52 +00:00
{
2024-04-27 16:47:03 +00:00
fs::create_directories(path / "data");
fs::create_directories(path / "metadata");
2020-08-12 20:40:13 +00:00
2020-03-19 21:14:52 +00:00
/// Directory with metadata of tables, which was marked as dropped by Atomic database
2024-04-27 16:47:03 +00:00
fs::create_directories(path / "metadata_dropped");
2020-03-19 21:14:52 +00:00
}
if (config().has("interserver_http_port") && config().has("interserver_https_port"))
throw Exception(ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG, "Both http and https interserver ports are specified");
static const auto interserver_tags =
2014-04-03 08:47:59 +00:00
{
std::make_tuple("interserver_http_host", "interserver_http_port", "http"),
std::make_tuple("interserver_https_host", "interserver_https_port", "https")
};
for (auto [host_tag, port_tag, scheme] : interserver_tags)
{
if (config().has(port_tag))
{
String this_host = config().getString(host_tag, "");
if (this_host.empty())
{
this_host = getFQDNOrHostName();
2020-05-23 22:24:01 +00:00
LOG_DEBUG(log, "Configuration parameter '{}' doesn't exist or exists and empty. Will use '{}' as replica host.",
2020-05-23 22:21:29 +00:00
host_tag, this_host);
}
String port_str = config().getString(port_tag);
int port = parse<int>(port_str);
if (port < 0 || port > 0xFFFF)
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Out of range '{}': {}", String(port_tag), port);
global_context->setInterserverIOAddress(this_host, port);
global_context->setInterserverScheme(scheme);
}
2014-04-03 08:47:59 +00:00
}
2022-09-05 01:50:24 +00:00
LOG_DEBUG(log, "Initializing interserver credentials.");
global_context->updateInterserverCredentials(config());
2014-08-11 15:59:01 +00:00
if (config().has("macros"))
2020-09-26 19:18:28 +00:00
global_context->setMacros(std::make_unique<Macros>(config(), "macros", log));
/// Set up caches.
2024-10-16 19:13:26 +00:00
const size_t max_cache_size = static_cast<size_t>(physical_server_memory * server_settings[ServerSetting::cache_size_to_ram_max_ratio]);
2024-10-16 19:13:26 +00:00
String uncompressed_cache_policy = server_settings[ServerSetting::uncompressed_cache_policy];
size_t uncompressed_cache_size = server_settings[ServerSetting::uncompressed_cache_size];
double uncompressed_cache_size_ratio = server_settings[ServerSetting::uncompressed_cache_size_ratio];
if (uncompressed_cache_size > max_cache_size)
{
uncompressed_cache_size = max_cache_size;
LOG_INFO(log, "Lowered uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
}
global_context->setUncompressedCache(uncompressed_cache_policy, uncompressed_cache_size, uncompressed_cache_size_ratio);
2024-10-16 19:13:26 +00:00
String mark_cache_policy = server_settings[ServerSetting::mark_cache_policy];
size_t mark_cache_size = server_settings[ServerSetting::mark_cache_size];
double mark_cache_size_ratio = server_settings[ServerSetting::mark_cache_size_ratio];
if (mark_cache_size > max_cache_size)
{
mark_cache_size = max_cache_size;
LOG_INFO(log, "Lowered mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mark_cache_size));
}
global_context->setMarkCache(mark_cache_policy, mark_cache_size, mark_cache_size_ratio);
2024-10-16 19:13:26 +00:00
size_t page_cache_size = server_settings[ServerSetting::page_cache_size];
if (page_cache_size != 0)
global_context->setPageCache(
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::page_cache_chunk_size], server_settings[ServerSetting::page_cache_mmap_size],
page_cache_size, server_settings[ServerSetting::page_cache_use_madv_free],
server_settings[ServerSetting::page_cache_use_transparent_huge_pages]);
2024-10-16 19:13:26 +00:00
String index_uncompressed_cache_policy = server_settings[ServerSetting::index_uncompressed_cache_policy];
size_t index_uncompressed_cache_size = server_settings[ServerSetting::index_uncompressed_cache_size];
double index_uncompressed_cache_size_ratio = server_settings[ServerSetting::index_uncompressed_cache_size_ratio];
if (index_uncompressed_cache_size > max_cache_size)
{
index_uncompressed_cache_size = max_cache_size;
2024-08-19 17:28:27 +00:00
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_uncompressed_cache_size));
}
global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio);
2024-10-16 19:13:26 +00:00
String index_mark_cache_policy = server_settings[ServerSetting::index_mark_cache_policy];
size_t index_mark_cache_size = server_settings[ServerSetting::index_mark_cache_size];
double index_mark_cache_size_ratio = server_settings[ServerSetting::index_mark_cache_size_ratio];
if (index_mark_cache_size > max_cache_size)
{
index_mark_cache_size = max_cache_size;
2024-08-19 17:28:27 +00:00
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_mark_cache_size));
}
global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio);
2024-10-16 19:13:26 +00:00
size_t mmap_cache_size = server_settings[ServerSetting::mmap_cache_size];
if (mmap_cache_size > max_cache_size)
{
mmap_cache_size = max_cache_size;
2024-08-19 17:28:27 +00:00
LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mmap_cache_size));
}
global_context->setMMappedFileCache(mmap_cache_size);
size_t query_cache_max_size_in_bytes = config().getUInt64("query_cache.max_size_in_bytes", DEFAULT_QUERY_CACHE_MAX_SIZE);
size_t query_cache_max_entries = config().getUInt64("query_cache.max_entries", DEFAULT_QUERY_CACHE_MAX_ENTRIES);
size_t query_cache_query_cache_max_entry_size_in_bytes = config().getUInt64("query_cache.max_entry_size_in_bytes", DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_BYTES);
size_t query_cache_max_entry_size_in_rows = config().getUInt64("query_cache.max_entry_rows_in_rows", DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_ROWS);
if (query_cache_max_size_in_bytes > max_cache_size)
{
query_cache_max_size_in_bytes = max_cache_size;
2024-08-19 17:28:27 +00:00
LOG_INFO(log, "Lowered query cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(query_cache_max_size_in_bytes));
}
global_context->setQueryCache(query_cache_max_size_in_bytes, query_cache_max_entries, query_cache_query_cache_max_entry_size_in_bytes, query_cache_max_entry_size_in_rows);
#if USE_EMBEDDED_COMPILER
2024-10-16 19:13:26 +00:00
size_t compiled_expression_cache_max_size_in_bytes = server_settings[ServerSetting::compiled_expression_cache_size];
size_t compiled_expression_cache_max_elements = server_settings[ServerSetting::compiled_expression_cache_elements_size];
CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_max_size_in_bytes, compiled_expression_cache_max_elements);
#endif
NamedCollectionFactory::instance().loadIfNot();
2023-11-23 12:22:06 +00:00
FileCacheFactory::instance().loadDefaultCaches(config());
/// Initialize main config reloader.
std::string include_from_path = config().getString("include_from", "/etc/metrika.xml");
2019-06-20 07:17:21 +00:00
if (config().has("query_masking_rules"))
{
SensitiveDataMasker::setInstance(std::make_unique<SensitiveDataMasker>(config(), "query_masking_rules"));
2019-06-20 07:17:21 +00:00
}
2024-02-01 21:09:39 +00:00
std::optional<CgroupsMemoryUsageObserver> cgroups_memory_usage_observer;
try
{
2024-10-16 19:13:26 +00:00
auto wait_time = server_settings[ServerSetting::cgroups_memory_usage_observer_wait_time];
2024-02-01 21:09:39 +00:00
if (wait_time != 0)
cgroups_memory_usage_observer.emplace(std::chrono::seconds(wait_time));
}
catch (Exception &)
{
tryLogCurrentException(log, "Disabling cgroup memory observer because of an error during initialization");
}
std::string cert_path = config().getString("openSSL.server.certificateFile", "");
std::string key_path = config().getString("openSSL.server.privateKeyFile", "");
std::vector<std::string> extra_paths = {include_from_path};
2023-07-23 02:56:47 +00:00
if (!cert_path.empty())
extra_paths.emplace_back(cert_path);
if (!key_path.empty())
extra_paths.emplace_back(key_path);
Poco::Util::AbstractConfiguration::Keys protocols;
config().keys("protocols", protocols);
for (const auto & protocol : protocols)
{
cert_path = config().getString("protocols." + protocol + ".certificateFile", "");
key_path = config().getString("protocols." + protocol + ".privateKeyFile", "");
if (!cert_path.empty())
extra_paths.emplace_back(cert_path);
if (!key_path.empty())
extra_paths.emplace_back(key_path);
}
2020-06-04 19:30:30 +00:00
auto main_config_reloader = std::make_unique<ConfigReloader>(
config_path,
extra_paths,
config().getString("path", DBMS_DEFAULT_PATH),
std::move(main_config_zk_node_cache),
main_config_zk_changed_event,
[&, config_file = config().getString("config-file", "config.xml")](ConfigurationPtr config, bool initial_loading)
{
2024-10-20 13:21:57 +00:00
if (!initial_loading)
{
/// Add back "config-file" key which is absent in the reloaded config.
config->setString("config-file", config_file);
2024-10-20 13:21:57 +00:00
/// Apply config updates in global context.
global_context->setConfig(config);
}
2024-04-25 13:14:36 +00:00
Settings::checkNoSettingNamesAtTopLevel(*config, config_path);
2020-06-04 19:30:30 +00:00
ServerSettings new_server_settings;
new_server_settings.loadSettingsFromConfig(*config);
2023-02-18 04:46:09 +00:00
2024-10-16 19:13:26 +00:00
size_t max_server_memory_usage = new_server_settings[ServerSetting::max_server_memory_usage];
double max_server_memory_usage_to_ram_ratio = new_server_settings[ServerSetting::max_server_memory_usage_to_ram_ratio];
size_t current_physical_server_memory = getMemoryAmount(); /// With cgroups, the amount of memory available to the server can be changed dynamically.
size_t default_max_server_memory_usage = static_cast<size_t>(current_physical_server_memory * max_server_memory_usage_to_ram_ratio);
if (max_server_memory_usage == 0)
{
max_server_memory_usage = default_max_server_memory_usage;
LOG_INFO(log, "Setting max_server_memory_usage was set to {}"
" ({} available * {:.2f} max_server_memory_usage_to_ram_ratio)",
formatReadableSizeWithBinarySuffix(max_server_memory_usage),
formatReadableSizeWithBinarySuffix(current_physical_server_memory),
max_server_memory_usage_to_ram_ratio);
}
else if (max_server_memory_usage > default_max_server_memory_usage)
{
max_server_memory_usage = default_max_server_memory_usage;
LOG_INFO(log, "Setting max_server_memory_usage was lowered to {}"
" because the system has low amount of memory. The amount was"
" calculated as {} available"
" * {:.2f} max_server_memory_usage_to_ram_ratio",
formatReadableSizeWithBinarySuffix(max_server_memory_usage),
formatReadableSizeWithBinarySuffix(current_physical_server_memory),
max_server_memory_usage_to_ram_ratio);
}
total_memory_tracker.setHardLimit(max_server_memory_usage);
total_memory_tracker.setDescription("(total)");
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
2021-10-22 15:15:33 +00:00
2024-10-16 19:13:26 +00:00
size_t merges_mutations_memory_usage_soft_limit = new_server_settings[ServerSetting::merges_mutations_memory_usage_soft_limit];
2024-10-16 19:13:26 +00:00
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(current_physical_server_memory * new_server_settings[ServerSetting::merges_mutations_memory_usage_to_ram_ratio]);
2023-07-06 01:04:58 +00:00
if (merges_mutations_memory_usage_soft_limit == 0)
{
merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage;
LOG_INFO(log, "Setting merges_mutations_memory_usage_soft_limit was set to {}"
" ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)",
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit),
formatReadableSizeWithBinarySuffix(current_physical_server_memory),
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::merges_mutations_memory_usage_to_ram_ratio]);
2023-07-06 01:04:58 +00:00
}
else if (merges_mutations_memory_usage_soft_limit > default_merges_mutations_server_memory_usage)
{
merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage;
LOG_WARNING(log, "Setting merges_mutations_memory_usage_soft_limit was set to {}"
" ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)",
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit),
formatReadableSizeWithBinarySuffix(current_physical_server_memory),
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::merges_mutations_memory_usage_to_ram_ratio]);
}
LOG_INFO(log, "Merges and mutations memory limit is set to {}",
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit));
background_memory_tracker.setSoftLimit(merges_mutations_memory_usage_soft_limit);
background_memory_tracker.setDescription("(background)");
background_memory_tracker.setMetric(CurrentMetrics::MergesMutationsMemoryTracking);
2021-10-22 15:15:33 +00:00
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);
// FIXME logging-related things need synchronization -- see the 'Logger * log' saved
// in a lot of places. For now, disable updating log configuration without server restart.
//setTextLog(global_context->getTextLog());
2021-09-30 19:46:12 +00:00
updateLevels(*config, logger());
global_context->setClustersConfig(config, has_zookeeper);
2020-09-26 19:18:28 +00:00
global_context->setMacros(std::make_unique<Macros>(*config, "macros", log));
global_context->setExternalAuthenticatorsConfig(*config);
global_context->setDashboardsConfig(config);
2023-05-05 13:25:18 +00:00
if (global_context->isServerCompletelyStarted())
{
/// It does not make sense to reload anything before server has started.
/// Moreover, it may break initialization order.
global_context->loadOrReloadDictionaries(*config);
global_context->loadOrReloadUserDefinedExecutableFunctions(*config);
}
global_context->setRemoteHostFilter(*config);
2023-06-15 13:49:49 +00:00
global_context->setHTTPHeaderFilter(*config);
2024-10-16 19:13:26 +00:00
global_context->setMaxTableSizeToDrop(new_server_settings[ServerSetting::max_table_size_to_drop]);
global_context->setMaxPartitionSizeToDrop(new_server_settings[ServerSetting::max_partition_size_to_drop]);
global_context->setMaxTableNumToWarn(new_server_settings[ServerSetting::max_table_num_to_warn]);
global_context->setMaxViewNumToWarn(new_server_settings[ServerSetting::max_view_num_to_warn]);
global_context->setMaxDictionaryNumToWarn(new_server_settings[ServerSetting::max_dictionary_num_to_warn]);
global_context->setMaxDatabaseNumToWarn(new_server_settings[ServerSetting::max_database_num_to_warn]);
global_context->setMaxPartNumToWarn(new_server_settings[ServerSetting::max_part_num_to_warn]);
global_context->getAccessControl().setAllowTierSettings(new_server_settings[ServerSetting::allowed_feature_tier]);
2024-06-21 17:16:27 +00:00
/// Only for system.server_settings
2024-10-16 19:13:26 +00:00
global_context->setConfigReloaderInterval(new_server_settings[ServerSetting::config_reload_interval_ms]);
SlotCount concurrent_threads_soft_limit = UnlimitedSlots;
2024-10-16 19:13:26 +00:00
if (new_server_settings[ServerSetting::concurrent_threads_soft_limit_num] > 0 && new_server_settings[ServerSetting::concurrent_threads_soft_limit_num] < concurrent_threads_soft_limit)
concurrent_threads_soft_limit = new_server_settings[ServerSetting::concurrent_threads_soft_limit_num];
if (new_server_settings[ServerSetting::concurrent_threads_soft_limit_ratio_to_cores] > 0)
{
2024-10-16 19:13:26 +00:00
auto value = new_server_settings[ServerSetting::concurrent_threads_soft_limit_ratio_to_cores] * getNumberOfCPUCoresToUse();
2022-08-18 17:30:33 +00:00
if (value > 0 && value < concurrent_threads_soft_limit)
concurrent_threads_soft_limit = value;
}
ConcurrencyControl::instance().setMaxConcurrency(concurrent_threads_soft_limit);
LOG_INFO(log, "ConcurrencyControl limit is set to {}", concurrent_threads_soft_limit);
2024-10-16 19:13:26 +00:00
global_context->getProcessList().setMaxSize(new_server_settings[ServerSetting::max_concurrent_queries]);
global_context->getProcessList().setMaxInsertQueriesAmount(new_server_settings[ServerSetting::max_concurrent_insert_queries]);
global_context->getProcessList().setMaxSelectQueriesAmount(new_server_settings[ServerSetting::max_concurrent_select_queries]);
global_context->getProcessList().setMaxWaitingQueriesAmount(new_server_settings[ServerSetting::max_waiting_queries]);
2021-10-18 15:27:51 +00:00
if (config->has("keeper_server"))
global_context->updateKeeperConfiguration(*config);
2022-04-19 15:01:41 +00:00
/// Reload the number of threads for global pools.
/// Note: If you specified it in the top level config (not it config of default profile)
/// then ClickHouse will use it exactly.
/// This is done for backward compatibility.
2023-02-22 20:15:09 +00:00
if (global_context->areBackgroundExecutorsInitialized())
2022-04-19 15:01:41 +00:00
{
2024-10-16 19:13:26 +00:00
auto new_pool_size = new_server_settings[ServerSetting::background_pool_size];
auto new_ratio = new_server_settings[ServerSetting::background_merges_mutations_concurrency_ratio];
global_context->getMergeMutateExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, static_cast<size_t>(new_pool_size * new_ratio));
2024-10-16 19:13:26 +00:00
global_context->getMergeMutateExecutor()->updateSchedulingPolicy(new_server_settings[ServerSetting::background_merges_mutations_scheduling_policy].toString());
2022-04-19 15:01:41 +00:00
}
2023-02-22 20:15:09 +00:00
if (global_context->areBackgroundExecutorsInitialized())
2022-04-19 15:01:41 +00:00
{
2024-10-16 19:13:26 +00:00
auto new_pool_size = new_server_settings[ServerSetting::background_move_pool_size];
2022-04-19 15:01:41 +00:00
global_context->getMovesExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
}
2023-02-22 20:15:09 +00:00
if (global_context->areBackgroundExecutorsInitialized())
2022-04-19 15:01:41 +00:00
{
2024-10-16 19:13:26 +00:00
auto new_pool_size = new_server_settings[ServerSetting::background_fetches_pool_size];
2022-04-19 15:01:41 +00:00
global_context->getFetchesExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
}
2023-02-22 20:15:09 +00:00
if (global_context->areBackgroundExecutorsInitialized())
2022-04-19 15:01:41 +00:00
{
2024-10-16 19:13:26 +00:00
auto new_pool_size = new_server_settings[ServerSetting::background_common_pool_size];
2022-04-19 15:01:41 +00:00
global_context->getCommonExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
}
2024-10-16 19:13:26 +00:00
global_context->getBufferFlushSchedulePool().increaseThreadsCount(new_server_settings[ServerSetting::background_buffer_flush_schedule_pool_size]);
global_context->getSchedulePool().increaseThreadsCount(new_server_settings[ServerSetting::background_schedule_pool_size]);
global_context->getMessageBrokerSchedulePool().increaseThreadsCount(new_server_settings[ServerSetting::background_message_broker_schedule_pool_size]);
global_context->getDistributedSchedulePool().increaseThreadsCount(new_server_settings[ServerSetting::background_distributed_schedule_pool_size]);
2022-04-20 13:35:13 +00:00
2024-10-16 19:13:26 +00:00
global_context->getAsyncLoader().setMaxThreads(TablesLoaderForegroundPoolId, new_server_settings[ServerSetting::tables_loader_foreground_pool_size]);
global_context->getAsyncLoader().setMaxThreads(TablesLoaderBackgroundLoadPoolId, new_server_settings[ServerSetting::tables_loader_background_pool_size]);
global_context->getAsyncLoader().setMaxThreads(TablesLoaderBackgroundStartupPoolId, new_server_settings[ServerSetting::tables_loader_background_pool_size]);
2023-04-26 18:27:23 +00:00
getIOThreadPool().reloadConfiguration(
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::max_io_thread_pool_size],
new_server_settings[ServerSetting::max_io_thread_pool_free_size],
new_server_settings[ServerSetting::io_thread_pool_queue_size]);
getBackupsIOThreadPool().reloadConfiguration(
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::max_backups_io_thread_pool_size],
new_server_settings[ServerSetting::max_backups_io_thread_pool_free_size],
new_server_settings[ServerSetting::backups_io_thread_pool_queue_size]);
getActivePartsLoadingThreadPool().reloadConfiguration(
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::max_active_parts_loading_thread_pool_size],
0, // We don't need any threads once all the parts will be loaded
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::max_active_parts_loading_thread_pool_size]);
getOutdatedPartsLoadingThreadPool().reloadConfiguration(
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::max_outdated_parts_loading_thread_pool_size],
0, // We don't need any threads once all the parts will be loaded
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::max_outdated_parts_loading_thread_pool_size]);
/// It could grow if we need to synchronously wait until all the data parts will be loaded.
getOutdatedPartsLoadingThreadPool().setMaxTurboThreads(
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::max_active_parts_loading_thread_pool_size]
);
getPartsCleaningThreadPool().reloadConfiguration(
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::max_parts_cleaning_thread_pool_size],
0, // We don't need any threads one all the parts will be deleted
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::max_parts_cleaning_thread_pool_size]);
2024-10-16 19:13:26 +00:00
global_context->setMergeWorkload(new_server_settings[ServerSetting::merge_workload]);
global_context->setMutationWorkload(new_server_settings[ServerSetting::mutation_workload]);
2022-09-27 13:26:41 +00:00
if (config->has("resources"))
{
global_context->getResourceManager()->updateConfiguration(*config);
}
2021-02-18 14:13:23 +00:00
if (!initial_loading)
{
/// We do not load ZooKeeper configuration on the first config loading
/// because TestKeeper server is not started yet.
2023-03-23 12:58:39 +00:00
if (zkutil::hasZooKeeperConfig(*config))
2021-02-18 14:13:23 +00:00
global_context->reloadZooKeeperIfChanged(config);
2021-02-18 14:13:23 +00:00
global_context->reloadAuxiliaryZooKeepersConfigIfChanged(config);
global_context->reloadQueryMaskingRulesIfChanged(config);
if (global_context->isServerCompletelyStarted())
{
std::lock_guard lock(servers_lock);
updateServers(*config, server_pool, async_metrics, servers, servers_to_start_before_tables);
}
2021-02-18 14:13:23 +00:00
}
global_context->updateStorageConfiguration(*config);
global_context->updateInterserverCredentials(*config);
global_context->updateUncompressedCacheConfiguration(*config);
global_context->updateMarkCacheConfiguration(*config);
global_context->updateIndexUncompressedCacheConfiguration(*config);
global_context->updateIndexMarkCacheConfiguration(*config);
global_context->updateMMappedFileCacheConfiguration(*config);
global_context->updateQueryCacheConfiguration(*config);
CompressionCodecEncrypted::Configuration::instance().tryLoad(*config, "encryption_codecs");
2021-07-10 02:57:44 +00:00
#if USE_SSL
CertificateReloader::instance().tryReloadAll(*config);
2021-07-10 02:57:44 +00:00
#endif
NamedCollectionFactory::instance().reloadFromConfig(*config);
2022-11-15 14:49:25 +00:00
2023-12-07 17:30:43 +00:00
FileCacheFactory::instance().updateSettingsFromConfig(*config);
2024-03-03 13:22:40 +00:00
HTTPConnectionPools::instance().setLimits(
HTTPConnectionPools::Limits{
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::disk_connections_soft_limit],
new_server_settings[ServerSetting::disk_connections_warn_limit],
new_server_settings[ServerSetting::disk_connections_store_limit],
2024-03-03 13:22:40 +00:00
},
HTTPConnectionPools::Limits{
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::storage_connections_soft_limit],
new_server_settings[ServerSetting::storage_connections_warn_limit],
new_server_settings[ServerSetting::storage_connections_store_limit],
2024-03-03 13:22:40 +00:00
},
HTTPConnectionPools::Limits{
2024-10-16 19:13:26 +00:00
new_server_settings[ServerSetting::http_connections_soft_limit],
new_server_settings[ServerSetting::http_connections_warn_limit],
new_server_settings[ServerSetting::http_connections_store_limit],
2024-03-03 13:22:40 +00:00
});
2024-10-16 19:13:26 +00:00
DNSResolver::instance().setFilterSettings(new_server_settings[ServerSetting::dns_allow_resolve_names_to_ipv4], new_server_settings[ServerSetting::dns_allow_resolve_names_to_ipv6]);
2024-04-03 18:57:12 +00:00
if (global_context->isServerCompletelyStarted())
2024-10-16 19:13:26 +00:00
CannotAllocateThreadFaultInjector::setFaultProbability(new_server_settings[ServerSetting::cannot_allocate_thread_fault_injection_probability]);
2024-04-03 17:49:23 +00:00
ProfileEvents::increment(ProfileEvents::MainConfigLoads);
/// Must be the last.
latest_config = config;
});
const auto listen_hosts = getListenHosts(config());
const auto interserver_listen_hosts = getInterserverListenHosts(config());
const auto listen_try = getListenTry(config());
if (config().has("keeper_server.server_id"))
{
#if USE_NURAFT
//// If we don't have configured connection probably someone trying to use clickhouse-server instead
//// of clickhouse-keeper, so start synchronously.
bool can_initialize_keeper_async = false;
if (has_zookeeper) /// We have configured connection to some zookeeper cluster
{
/// If we cannot connect to some other node from our cluster then we have to wait our Keeper start
/// synchronously.
can_initialize_keeper_async = global_context->tryCheckClientConnectionToMyKeeperCluster();
}
/// Initialize keeper RAFT.
global_context->initializeKeeperDispatcher(can_initialize_keeper_async);
FourLetterCommandFactory::registerCommands(*global_context->getKeeperDispatcher());
auto config_getter = [this] () -> const Poco::Util::AbstractConfiguration &
{
return global_context->getConfigRef();
};
for (const auto & listen_host : listen_hosts)
{
/// TCP Keeper
const char * port_name = "keeper_server.tcp_port";
createServer(
config(), listen_host, port_name, listen_try, /* start_server: */ false,
servers_to_start_before_tables,
[&](UInt16 port) -> ProtocolServerAdapter
{
Poco::Net::ServerSocket socket;
auto address = socketBindListen(config(), socket, listen_host, port);
socket.setReceiveTimeout(Poco::Timespan(config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC), 0));
socket.setSendTimeout(Poco::Timespan(config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), 0));
return ProtocolServerAdapter(
listen_host,
port_name,
"Keeper (tcp): " + address.toString(),
std::make_unique<TCPServer>(
new KeeperTCPHandlerFactory(
2024-09-18 12:20:53 +00:00
config_getter,
global_context->getKeeperDispatcher(),
global_context->getSettingsRef()[Setting::receive_timeout].totalSeconds(),
global_context->getSettingsRef()[Setting::send_timeout].totalSeconds(),
false),
server_pool,
socket));
});
const char * secure_port_name = "keeper_server.tcp_port_secure";
createServer(
config(), listen_host, secure_port_name, listen_try, /* start_server: */ false,
servers_to_start_before_tables,
[&](UInt16 port) -> ProtocolServerAdapter
{
#if USE_SSL
Poco::Net::SecureServerSocket socket;
auto address = socketBindListen(config(), socket, listen_host, port, /* secure = */ true);
socket.setReceiveTimeout(Poco::Timespan(config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC), 0));
socket.setSendTimeout(Poco::Timespan(config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), 0));
return ProtocolServerAdapter(
listen_host,
secure_port_name,
"Keeper with secure protocol (tcp_secure): " + address.toString(),
std::make_unique<TCPServer>(
new KeeperTCPHandlerFactory(
2024-09-18 12:20:53 +00:00
config_getter,
global_context->getKeeperDispatcher(),
global_context->getSettingsRef()[Setting::receive_timeout].totalSeconds(),
global_context->getSettingsRef()[Setting::send_timeout].totalSeconds(),
true),
server_pool,
socket));
#else
UNUSED(port);
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
#endif
});
/// HTTP control endpoints
port_name = "keeper_server.http_control.port";
createServer(config(), listen_host, port_name, listen_try, /* start_server: */ false,
servers_to_start_before_tables,
[&](UInt16 port) -> ProtocolServerAdapter
{
auto http_context = httpContext();
Poco::Timespan keep_alive_timeout(config().getUInt("keep_alive_timeout", 10), 0);
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
http_params->setTimeout(http_context->getReceiveTimeout());
http_params->setKeepAliveTimeout(keep_alive_timeout);
Poco::Net::ServerSocket socket;
auto address = socketBindListen(config(), socket, listen_host, port);
socket.setReceiveTimeout(http_context->getReceiveTimeout());
socket.setSendTimeout(http_context->getSendTimeout());
return ProtocolServerAdapter(
listen_host,
port_name,
"HTTP Control: http://" + address.toString(),
std::make_unique<HTTPServer>(
std::move(http_context),
createKeeperHTTPControlMainHandlerFactory(
config_getter(),
global_context->getKeeperDispatcher(),
"KeeperHTTPControlHandler-factory"), server_pool, socket, http_params));
});
}
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "ClickHouse server built without NuRaft library. Cannot use internal coordination.");
#endif
}
{
std::lock_guard lock(servers_lock);
/// We should start interserver communications before (and more important shutdown after) tables.
/// Because server can wait for a long-running queries (for example in tcp_handler) after interserver handler was already shut down.
/// In this case we will have replicated tables which are unable to send any parts to other replicas, but still can
/// communicate with zookeeper, execute merges, etc.
createInterserverServers(
config(),
interserver_listen_hosts,
listen_try,
server_pool,
async_metrics,
servers_to_start_before_tables,
/* start_servers= */ false);
for (auto & server : servers_to_start_before_tables)
{
server.start();
LOG_INFO(log, "Listening for {}", server.getDescription());
}
}
/// Initialize access storages.
auto & access_control = global_context->getAccessControl();
try
{
access_control.setupFromMainConfig(config(), config_path, [&] { return global_context->getZooKeeper(); });
}
catch (...)
{
2022-12-06 10:04:15 +00:00
tryLogCurrentException(log, "Caught exception while setting up access control.");
throw;
}
2024-03-08 16:30:58 +00:00
if (cgroups_memory_usage_observer)
2024-03-09 01:28:39 +00:00
{
2024-03-18 10:44:56 +00:00
cgroups_memory_usage_observer->setOnMemoryAmountAvailableChangedFn([&]() { main_config_reloader->reload(); });
2024-03-12 22:31:33 +00:00
cgroups_memory_usage_observer->startThread();
2024-03-09 01:28:39 +00:00
}
2024-03-08 16:30:58 +00:00
/// Reload config in SYSTEM RELOAD CONFIG query.
2018-05-07 02:01:11 +00:00
global_context->setConfigReloadCallback([&]()
{
main_config_reloader->reload();
2022-09-16 11:19:39 +00:00
access_control.reload(AccessControl::ReloadMode::USERS_CONFIG_ONLY);
});
global_context->setStopServersCallback([&](const ServerType & server_type)
{
2023-09-22 20:20:21 +00:00
std::lock_guard lock(servers_lock);
stopServers(servers, server_type);
});
global_context->setStartServersCallback([&](const ServerType & server_type)
{
2023-09-22 20:20:21 +00:00
std::lock_guard lock(servers_lock);
createServers(
config(),
listen_hosts,
listen_try,
server_pool,
async_metrics,
servers,
/* start_servers= */ true,
server_type);
});
2017-08-09 15:34:09 +00:00
/// Limit on total number of concurrently executed queries.
2024-10-16 19:13:26 +00:00
global_context->getProcessList().setMaxSize(server_settings[ServerSetting::max_concurrent_queries]);
/// Load global settings from default_profile and system_profile.
global_context->setDefaultProfiles(config());
/// Initialize background executors after we load default_profile config.
/// This is needed to load proper values of background_pool_size etc.
2021-10-19 08:19:43 +00:00
global_context->initializeBackgroundExecutorsIfNeeded();
2024-10-16 19:13:26 +00:00
if (server_settings[ServerSetting::async_insert_threads])
2023-04-25 21:30:03 +00:00
{
global_context->setAsynchronousInsertQueue(std::make_shared<AsynchronousInsertQueue>(
global_context,
2024-10-16 19:13:26 +00:00
server_settings[ServerSetting::async_insert_threads],
server_settings[ServerSetting::async_insert_queue_flush_on_shutdown]));
2023-04-25 21:30:03 +00:00
}
2021-04-19 19:16:34 +00:00
/// Set path for format schema files
2021-08-16 18:30:53 +00:00
fs::path format_schema_path(config().getString("format_schema_path", path / "format_schemas/"));
2021-05-16 22:06:09 +00:00
global_context->setFormatSchemaPath(format_schema_path);
fs::create_directories(format_schema_path);
/// Set the path for google proto files
if (config().has("google_protos_path"))
global_context->setGoogleProtosPath(fs::weakly_canonical(config().getString("google_protos_path")));
2023-08-07 15:38:30 +00:00
/// Set path for filesystem caches
fs::path filesystem_caches_path(config().getString("filesystem_caches_path", ""));
if (!filesystem_caches_path.empty())
global_context->setFilesystemCachesPath(filesystem_caches_path);
/// NOTE: Do sanity checks after we loaded all possible substitutions (for the configuration) from ZK
/// Additionally, making the check after the default profile is initialized.
/// It is important to initialize MergeTreeSettings after Settings, to support compatibility for MergeTreeSettings.
sanityChecks(*this);
2020-07-30 19:08:13 +00:00
/// Check sanity of MergeTreeSettings on server startup
{
/// All settings can be changed in the global config
2024-11-19 13:56:26 +00:00
bool allowed_experimental = true;
bool allowed_beta = true;
size_t background_pool_tasks = global_context->getMergeMutateExecutor()->getMaxTasksCount();
global_context->getMergeTreeSettings().sanityCheck(background_pool_tasks, allowed_experimental, allowed_beta);
global_context->getReplicatedMergeTreeSettings().sanityCheck(background_pool_tasks, allowed_experimental, allowed_beta);
}
/// try set up encryption. There are some errors in config, error will be printed and server wouldn't start.
CompressionCodecEncrypted::Configuration::instance().load(config(), "encryption_codecs");
/// DNSCacheUpdater uses BackgroundSchedulePool which lives in shared context
/// and thus this object must be created after the SCOPE_EXIT object where shared
/// context is destroyed.
/// In addition this object has to be created before the loading of the tables.
std::unique_ptr<DNSCacheUpdater> dns_cache_updater;
2024-10-16 19:13:26 +00:00
if (server_settings[ServerSetting::disable_internal_dns_cache])
{
/// Disable DNS caching at all
DNSResolver::instance().setDisableCacheFlag();
LOG_DEBUG(log, "DNS caching disabled");
}
else
{
2024-10-16 19:13:26 +00:00
DNSResolver::instance().setCacheMaxEntries(server_settings[ServerSetting::dns_cache_max_entries]);
/// Initialize a watcher periodically updating DNS cache
dns_cache_updater = std::make_unique<DNSCacheUpdater>(
2024-10-16 19:13:26 +00:00
global_context, server_settings[ServerSetting::dns_cache_update_period], server_settings[ServerSetting::dns_max_consecutive_failures]);
}
if (dns_cache_updater)
dns_cache_updater->start();
2020-09-25 11:27:00 +00:00
/// Set current database name before loading tables and databases because
/// system logs may copy global context.
2024-10-16 19:13:26 +00:00
std::string default_database = server_settings[ServerSetting::default_database].toString();
2020-09-25 11:27:00 +00:00
global_context->setCurrentDatabaseNameInGlobalContext(default_database);
2021-08-16 18:30:53 +00:00
LOG_INFO(log, "Loading metadata from {}", path_str);
2019-07-30 14:04:18 +00:00
LoadTaskPtrs load_system_metadata_tasks;
2023-10-24 21:49:15 +00:00
LoadTaskPtrs load_metadata_tasks;
// Make sure that if exception is thrown during startup async, new async loading jobs are not going to be called.
// This is important for the case when exception is thrown from loading of metadata with `async_load_databases = false`
// to avoid simultaneously running table startups and destructing databases.
SCOPE_EXIT_SAFE(
LOG_INFO(log, "Stopping AsyncLoader.");
// Waits for all currently running jobs to finish and do not run any other pending jobs.
global_context->getAsyncLoader().stop();
);
2018-12-10 23:21:03 +00:00
try
{
/// Don't run background queries until we loaded tables.
/// (In particular things would break if a background drop query happens before the
/// loadMarkedAsDroppedTables() call below - it'll see dropped table metadata and try to
/// drop the table a second time and throw an exception.)
global_context->getRefreshSet().setRefreshesStopped(true);
2021-09-03 19:21:01 +00:00
auto & database_catalog = DatabaseCatalog::instance();
/// We load temporary database first, because projections need it.
database_catalog.initializeAndLoadTemporaryDatabase();
2024-10-24 17:12:21 +00:00
load_system_metadata_tasks = loadMetadataSystem(global_context, server_settings[ServerSetting::async_load_system_database]);
maybeConvertSystemDatabase(global_context, load_system_metadata_tasks);
2024-06-21 06:02:30 +00:00
/// Startup scripts can depend on the system log tables.
2024-10-16 19:13:26 +00:00
if (config().has("startup_scripts") && !server_settings[ServerSetting::prepare_system_log_tables_on_startup].changed)
2024-06-21 06:02:30 +00:00
global_context->setServerSetting("prepare_system_log_tables_on_startup", true);
2018-12-10 23:21:03 +00:00
/// After attaching system databases we can initialize system log.
global_context->initializeSystemLogs();
global_context->setSystemZooKeeperLogAfterInitializationIfNeeded();
/// Build loggers before tables startup to make log messages from tables
/// attach available in system.text_log
2023-07-12 15:48:09 +00:00
buildLoggers(config(), logger());
2024-04-12 15:38:50 +00:00
initializeAzureSDKLogger(server_settings, logger().getLevel());
2018-12-10 23:21:03 +00:00
/// After the system database is created, attach virtual system tables (in addition to query_log and part_log)
attachSystemTablesServer(global_context, *database_catalog.getSystemDatabase(), has_zookeeper);
attachInformationSchema(global_context, *database_catalog.getDatabase(DatabaseCatalog::INFORMATION_SCHEMA));
attachInformationSchema(global_context, *database_catalog.getDatabase(DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE));
Fix UUID overlap in DROP TABLE for internal DDL from MaterializeMySQL This will fix race with DatabaseCatalog::loadMarkedAsDroppedTables(), since MaterializeMySQL, and MaterializedMySQLSyncThread in background, will be started earlier then DatabaseCatalog::loadMarkedAsDroppedTables() and will move those tables to metadata_dropped, and after loadMarkedAsDroppedTables() will start and try to load partially dropped tables and will hit UUID overlap: 12:02:51.536783 [ 3026034 ] {} <Information> Application: starting up 12:02:53.019282 [ 3026034 ] {} <Information> DatabaseMaterializeMySQL<Atomic> (mysql): Total 9 tables and 0 dictionaries. 12:02:53.041699 [ 3026200 ] {} <Debug> mysql.data (7143b65f-6982-4600-b143-b65f6982e600): Loading data parts 12:02:53.041740 [ 3026200 ] {} <Debug> mysql.data (7143b65f-6982-4600-b143-b65f6982e600): There are no data parts 12:02:53.620382 [ 3026034 ] {} <Information> DatabaseMaterializeMySQL<Atomic> (mysql): Starting up tables. 12:03:00.669730 [ 3026183 ] {} <Debug> executeQuery: (internal) /*Materialize MySQL step 1: execute MySQL DDL for dump data*/ DROP TABLE mysql.data 12:03:00.741894 [ 3026269 ] {} <Information> DatabaseCatalog: Trying load partially dropped table mysql.data (7143b65f-6982-4600-b143-b65f6982e600) from /var/lib/clickhouse/metadata_dropped/mysql.data.7143b65f-6982-4600-b143-b65f6982e600.sql 12:03:00.742582 [ 3026269 ] {} <Debug> mysql.data (7143b65f-6982-4600-b143-b65f6982e600): Loading data parts 12:03:00.742650 [ 3026269 ] {} <Debug> mysql.data (7143b65f-6982-4600-b143-b65f6982e600): There are no data parts 12:03:00.773137 [ 3026034 ] {} <Error> Application: Caught exception while loading metadata: Code: 57, e.displayText() = DB::Exception: Mapping for table with UUID=7143b65f-6982-4600-b143-b65f6982e600 already exists. It happened due to UUID collision, most likely because some not random UUIDs were manually specified in CREATE queries., Stack trace (when copying this message, always include the lines below): 12:03:01.224557 [ 3026034 ] {} <Error> Application: DB::Exception: Mapping for table with UUID=7143b65f-6982-4600-b143-b65f6982e600 already exists. It happened due to UUID collision, most likely because some not random UUIDs were manually specified in CREATE queries. Cc: @zhang2014
2021-09-02 18:05:13 +00:00
/// Firstly remove partially dropped databases, to avoid race with MaterializedMySQLSyncThread,
/// that may execute DROP before loadMarkedAsDroppedTables() in background,
/// and so loadMarkedAsDroppedTables() will find it and try to add, and UUID will overlap.
database_catalog.loadMarkedAsDroppedTables();
2023-02-17 16:57:49 +00:00
database_catalog.createBackgroundTasks();
2023-05-01 11:56:00 +00:00
/// Then, load remaining databases (some of them maybe be loaded asynchronously)
2024-10-16 19:13:26 +00:00
load_metadata_tasks = loadMetadata(global_context, default_database, server_settings[ServerSetting::async_load_databases]);
2023-05-01 11:56:00 +00:00
/// If we need to convert database engines, disable async tables loading
convertDatabasesEnginesIfNeed(load_metadata_tasks, global_context);
database_catalog.startupBackgroundTasks();
2020-09-25 11:27:00 +00:00
/// After loading validate that default database exists
database_catalog.assertDatabaseExists(default_database);
/// Load user-defined SQL functions.
global_context->getUserDefinedSQLObjectsStorage().loadObjects();
/// Load WORKLOADs and RESOURCEs.
global_context->getWorkloadEntityStorage().loadEntities();
global_context->getRefreshSet().setRefreshesStopped(false);
2018-12-10 23:21:03 +00:00
}
catch (...)
{
tryLogCurrentException(log, "Caught exception while loading metadata");
throw;
}
2024-10-26 18:59:45 +00:00
bool found_stop_flag = false;
if (has_zookeeper && global_context->getMacros()->getMacroMap().contains("replica"))
{
2024-10-28 03:25:28 +00:00
try
{
auto zookeeper = global_context->getZooKeeper();
String stop_flag_path = "/clickhouse/stop_replicated_ddl_queries/{replica}";
stop_flag_path = global_context->getMacros()->expand(stop_flag_path);
found_stop_flag = zookeeper->exists(stop_flag_path);
}
catch (const Coordination::Exception & e)
{
if (e.code != Coordination::Error::ZCONNECTIONLOSS)
throw;
tryLogCurrentException(log);
}
2024-10-26 18:59:45 +00:00
}
if (found_stop_flag)
LOG_INFO(log, "Found a stop flag for replicated DDL queries. They will be disabled");
else
DatabaseCatalog::instance().startReplicatedDDLQueries();
2020-05-23 22:24:01 +00:00
LOG_DEBUG(log, "Loaded metadata.");
2024-06-06 09:11:08 +00:00
if (has_trace_collector)
2019-07-25 22:35:47 +00:00
global_context->initializeTraceCollector();
2020-04-22 17:52:21 +00:00
#if defined(OS_LINUX)
auto tasks_stats_provider = TasksStatsCounters::findBestAvailableProvider();
if (tasks_stats_provider == TasksStatsCounters::MetricsProvider::None)
{
2020-05-23 22:24:01 +00:00
LOG_INFO(log, "It looks like this system does not have procfs mounted at /proc location,"
2020-05-23 22:21:29 +00:00
" neither clickhouse-server process has CAP_NET_ADMIN capability."
" 'taskstats' performance statistics will be disabled."
" It could happen due to incorrect ClickHouse package installation."
" You can try to resolve the problem manually with 'sudo setcap cap_net_admin=+ep {}'."
" Note that it will not work on 'nosuid' mounted filesystems."
" It also doesn't work if you run clickhouse-server inside network namespace as it happens in some containers.",
executable_path);
2019-07-06 18:02:28 +00:00
}
else
{
LOG_INFO(log, "Tasks stats provider: {}", TasksStatsCounters::metricsProviderString(tasks_stats_provider));
}
2019-07-06 18:02:28 +00:00
if (!hasLinuxCapability(CAP_SYS_NICE))
{
2020-06-27 12:56:06 +00:00
LOG_INFO(log, "It looks like the process has no CAP_SYS_NICE capability, the setting 'os_thread_priority' will have no effect."
2020-05-23 22:21:29 +00:00
" It could happen due to incorrect ClickHouse package installation."
" You could resolve the problem manually with 'sudo setcap cap_sys_nice=+ep {}'."
" Note that it will not work on 'nosuid' mounted filesystems.",
executable_path);
}
#else
2020-05-23 22:24:01 +00:00
LOG_INFO(log, "TaskStats is not implemented for this OS. IO accounting will be disabled.");
#endif
2013-09-14 05:14:22 +00:00
{
attachSystemTablesAsync(global_context, *DatabaseCatalog::instance().getSystemDatabase(), async_metrics);
2019-11-26 20:27:24 +00:00
{
std::lock_guard lock(servers_lock);
createServers(config(), listen_hosts, listen_try, server_pool, async_metrics, servers);
if (servers.empty())
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' "
"to configuration file.)");
2014-03-21 13:42:14 +00:00
}
if (servers.empty())
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' "
"to configuration file.)");
2021-07-10 02:59:11 +00:00
#if USE_SSL
CertificateReloader::instance().tryLoad(config());
CertificateReloader::instance().tryLoadClient(config());
2021-07-10 02:59:11 +00:00
#endif
2020-12-17 13:47:03 +00:00
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
async_metrics.start();
global_context->setAsynchronousMetrics(&async_metrics);
main_config_reloader->start();
access_control.startPeriodicReloading();
/// try to load dictionaries immediately, throw on error and die
try
{
global_context->loadOrReloadDictionaries(config());
if (!config().getBool("dictionaries_lazy_load", true) && config().getBool("wait_dictionaries_load_at_startup", true))
global_context->waitForDictionariesLoad();
}
catch (...)
{
2021-10-01 13:44:09 +00:00
tryLogCurrentException(log, "Caught exception while loading dictionaries.");
throw;
}
/// try to load embedded dictionaries immediately, throw on error and die
try
{
global_context->tryCreateEmbeddedDictionaries(config());
}
catch (...)
{
2021-10-01 13:44:09 +00:00
tryLogCurrentException(log, "Caught exception while loading embedded dictionaries.");
throw;
}
2021-09-15 20:52:18 +00:00
/// try to load user defined executable functions, throw on error and die
try
{
global_context->loadOrReloadUserDefinedExecutableFunctions(config());
}
catch (...)
{
2021-10-01 13:44:09 +00:00
tryLogCurrentException(log, "Caught exception while loading user defined executable functions.");
throw;
}
if (has_zookeeper && config().has("distributed_ddl"))
{
/// DDL worker should be started after all tables were loaded
String ddl_queue_path = config().getString("distributed_ddl.path", "/clickhouse/task_queue/ddl/");
String ddl_replicas_path = config().getString("distributed_ddl.replicas_path", "/clickhouse/task_queue/replicas/");
int pool_size = config().getInt("distributed_ddl.pool_size", 1);
if (pool_size < 1)
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "distributed_ddl.pool_size should be greater then 0");
global_context->setDDLWorker(
std::make_unique<DDLWorker>(
pool_size,
ddl_queue_path,
ddl_replicas_path,
global_context,
&config(),
"distributed_ddl",
"DDLWorker",
&CurrentMetrics::MaxDDLEntryID,
&CurrentMetrics::MaxPushedDDLEntryID),
joinTasks(load_system_metadata_tasks, load_metadata_tasks));
}
/// Do not keep tasks in server, they should be kept inside databases. Used here to make dependent tasks only.
load_system_metadata_tasks.clear();
load_system_metadata_tasks.shrink_to_fit();
load_metadata_tasks.clear();
load_metadata_tasks.shrink_to_fit();
2024-06-06 05:42:07 +00:00
if (config().has("startup_scripts"))
loadStartupScripts(config(), global_context, log);
{
std::lock_guard lock(servers_lock);
for (auto & server : servers)
{
server.start();
LOG_INFO(log, "Listening for {}", server.getDescription());
}
2022-03-14 20:43:34 +00:00
global_context->setServerCompletelyStarted();
LOG_INFO(log, "Ready for connections.");
}
startup_watch.stop();
ProfileEvents::increment(ProfileEvents::ServerStartupMilliseconds, startup_watch.elapsedMilliseconds());
2024-10-16 19:13:26 +00:00
CannotAllocateThreadFaultInjector::setFaultProbability(server_settings[ServerSetting::cannot_allocate_thread_fault_injection_probability]);
2024-04-03 17:49:23 +00:00
2024-06-03 09:51:35 +00:00
#if USE_GWP_ASAN
GWPAsan::initFinished();
2024-06-03 09:51:35 +00:00
#endif
try
{
global_context->startClusterDiscovery();
}
catch (...)
{
tryLogCurrentException(log, "Caught exception while starting cluster discovery");
}
#if defined(OS_LINUX)
/// Tell the service manager that service startup is finished.
/// NOTE: the parent clickhouse-watchdog process must do systemdNotify("MAINPID={}\n", child_pid); before
/// the child process notifies 'READY=1'.
2022-11-21 14:59:50 +00:00
systemdNotify("READY=1\n");
#endif
Fix uncaught exception during server termination Example of a stacktrace: <details> ``` [ 47463 ] {} <Trace> BaseDaemon: Received signal 15 [ 47463 ] {} <Information> Application: Received termination signal (Terminated) [ 47462 ] {} <Debug> Application: Received termination signal. [ 47462 ] {} <Debug> Application: Waiting for current connections to close. [ 47463 ] {} <Trace> BaseDaemon: Received signal 15 [ 47463 ] {} <Information> Application: Received termination signal (Terminated) ... [ 47463 ] {} <Trace> BaseDaemon: Received signal -1 [ 47463 ] {} <Fatal> BaseDaemon: (version 21.9.1.1, build id: 63945F58FC2C28ED) (from thread 47462) Terminate called for uncaught exception: [ 47463 ] {} <Fatal> BaseDaemon: Code: 210. DB::NetException: Connection reset by peer, while writing to socket (10.7.141.42:9000). (NETWORK_ERROR), Stack trace (when copying this message, always include the lines below): [ 47463 ] {} <Fatal> BaseDaemon: [ 47463 ] {} <Fatal> BaseDaemon: 0. DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int, bool) @ 0x94ca99a in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 1. DB::WriteBufferFromPocoSocket::nextImpl() @ 0x10676a3b in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 2. DB::Connection::sendCancel() @ 0x11554701 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 3. DB::MultiplexedConnections::sendCancel() @ 0x1157e766 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 4. DB::RemoteQueryExecutor::tryCancel(char const*, std::__1::unique_ptr<DB::RemoteQueryExecutorReadContext, std::__1::default_delete<DB::RemoteQueryExecutorReadContext> >*) @ 0x10392000 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 5. DB::PipelineExecutor::cancel() @ 0x11697ffe in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 6. DB::QueryStatus::cancelQuery(bool) @ 0x10c19fc8 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 7. DB::ProcessList::killAllQueries() @ 0x10c1a6ae in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 8. basic_scope_guard<DB::Server::main(std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&)::$_18>::~basic_scope_guard() @ 0x95587ad in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 9. DB::Server::main(std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&) @ 0x95528a2 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 10. Poco::Util::Application::run() @ 0x141e85a3 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 11. DB::Server::run() @ 0x9541dac in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 12. mainEntryClickHouseServer(int, char**) @ 0x9540153 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 13. main @ 0x94c569e in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: 14. __libc_start_main @ 0x26d0a in /usr/lib/x86_64-linux-gnu/libc-2.31.so [ 47463 ] {} <Fatal> BaseDaemon: 15. _start @ 0x9490a2a in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 47463 ] {} <Fatal> BaseDaemon: (version 21.9.1.1) [ 47463 ] {} <Trace> BaseDaemon: Received signal 6 [ 11858 ] {} <Fatal> BaseDaemon: ######################################## [ 11858 ] {} <Fatal> BaseDaemon: (version 21.9.1.1, build id: 63945F58FC2C28ED) (from thread 47462) (no query) Received signal Aborted (6) [ 11858 ] {} <Fatal> BaseDaemon: [ 11858 ] {} <Fatal> BaseDaemon: Stack trace: 0x7ff04c196ce1 0x7ff04c180537 0xff91f28 0x163304e3 0x1633044c 0x94c60cb 0x10c1a135 0x10c1a6ae 0x95587ad 0x95528a2 0x141e85a3 0x9541dac 0x9540153 0x94c569e 0x7ff04c181d0a 0x9490a2a [ 11858 ] {} <Fatal> BaseDaemon: 1. raise @ 0x3bce1 in /usr/lib/x86_64-linux-gnu/libc-2.31.so [ 11858 ] {} <Fatal> BaseDaemon: 2. abort @ 0x25537 in /usr/lib/x86_64-linux-gnu/libc-2.31.so [ 11858 ] {} <Fatal> BaseDaemon: 3. terminate_handler() @ 0xff91f28 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 4. std::__terminate(void (*)()) @ 0x163304e3 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 5. std::terminate() @ 0x1633044c in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 6. ? @ 0x94c60cb in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 7. ? @ 0x10c1a135 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 8. DB::ProcessList::killAllQueries() @ 0x10c1a6ae in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 9. basic_scope_guard<DB::Server::main(std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&)::$_18>::~basic_scope_guard() @ 0x95587ad in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 10. DB::Server::main(std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&) @ 0x95528a2 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 11. Poco::Util::Application::run() @ 0x141e85a3 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 12. DB::Server::run() @ 0x9541dac in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 13. mainEntryClickHouseServer(int, char**) @ 0x9540153 in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 14. main @ 0x94c569e in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug [ 11858 ] {} <Fatal> BaseDaemon: 15. __libc_start_main @ 0x26d0a in /usr/lib/x86_64-linux-gnu/libc-2.31.so [ 11858 ] {} <Fatal> BaseDaemon: 16. _start @ 0x9490a2a in /usr/lib/debug/.build-id/63/945f58fc2c28ed.debug ``` </details>
2021-09-08 18:10:34 +00:00
SCOPE_EXIT_SAFE({
2020-05-23 22:24:01 +00:00
LOG_DEBUG(log, "Received termination signal.");
/// Stop reloading of the main config. This must be done before everything else because it
/// can try to access/modify already deleted objects.
/// E.g. it can recreate new servers or it may pass a changed config to some destroyed parts of ContextSharedPart.
main_config_reloader.reset();
access_control.stopPeriodicReloading();
2017-03-03 21:10:41 +00:00
is_cancelled = true;
LOG_DEBUG(log, "Waiting for current connections to close.");
size_t current_connections = 0;
{
std::lock_guard lock(servers_lock);
for (auto & server : servers)
{
server.stop();
current_connections += server.currentConnections();
}
}
if (current_connections)
LOG_WARNING(log, "Closed all listening sockets. Waiting for {} outstanding connections.", current_connections);
else
LOG_INFO(log, "Closed all listening sockets.");
/// Wait for unfinished backups and restores.
/// This must be done after closing listening sockets (no more backups/restores) but before ProcessList::killAllQueries
/// (because killAllQueries() will cancel all running backups/restores).
2024-10-16 19:13:26 +00:00
if (server_settings[ServerSetting::shutdown_wait_backups_and_restores])
global_context->waitAllBackupsAndRestores();
/// Killing remaining queries.
2024-10-16 19:13:26 +00:00
if (!server_settings[ServerSetting::shutdown_wait_unfinished_queries])
global_context->getProcessList().killAllQueries();
if (current_connections)
2024-10-16 19:13:26 +00:00
current_connections = waitServersToFinish(servers, servers_lock, server_settings[ServerSetting::shutdown_wait_unfinished]);
if (current_connections)
LOG_WARNING(log, "Closed connections. But {} remain."
" Tip: To increase wait time add to config: <shutdown_wait_unfinished>60</shutdown_wait_unfinished>", current_connections);
else
LOG_INFO(log, "Closed connections.");
dns_cache_updater.reset();
2019-07-30 23:12:04 +00:00
if (current_connections)
2019-07-30 23:12:04 +00:00
{
/// There is no better way to force connections to close in Poco.
/// Otherwise connection handlers will continue to live
/// (they are effectively dangling objects, but they use global thread pool
/// and global thread pool destructor will wait for threads, preventing server shutdown).
/// Dump coverage here, because std::atexit callback would not be called.
dumpCoverageReportIfPossible();
LOG_WARNING(log, "Will shutdown forcefully.");
safeExit(0);
2019-07-30 23:12:04 +00:00
}
2017-03-03 21:10:41 +00:00
});
std::vector<std::unique_ptr<MetricsTransmitter>> metrics_transmitters;
for (const auto & graphite_key : DB::getMultipleKeysFromConfig(config(), "", "graphite"))
{
metrics_transmitters.emplace_back(std::make_unique<MetricsTransmitter>(
2019-07-04 19:08:37 +00:00
global_context->getConfigRef(), graphite_key, async_metrics));
}
waitForTerminationRequest();
}
2012-03-09 03:06:09 +00:00
return Application::EXIT_OK;
}
catch (...)
{
/// Poco does not provide stacktrace.
tryLogCurrentException("Application");
2023-11-23 17:13:12 +00:00
auto code = getCurrentExceptionCode();
2024-11-07 20:28:06 +00:00
return static_cast<UInt8>(code) ? code : -1;
}
std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
const Poco::Util::AbstractConfiguration & config,
const std::string & protocol,
Poco::Net::HTTPServerParams::Ptr http_params,
AsynchronousMetrics & async_metrics,
bool & is_secure)
{
auto create_factory = [&](const std::string & type, const std::string & conf_name) -> TCPServerConnectionFactory::Ptr
{
if (type == "tcp")
return TCPServerConnectionFactory::Ptr(new TCPHandlerFactory(*this, false, false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes));
if (type == "tls")
#if USE_SSL
return TCPServerConnectionFactory::Ptr(new TLSHandlerFactory(*this, conf_name));
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
#endif
if (type == "proxy1")
return TCPServerConnectionFactory::Ptr(new ProxyV1HandlerFactory(*this, conf_name));
if (type == "mysql")
return TCPServerConnectionFactory::Ptr(new MySQLHandlerFactory(*this, ProfileEvents::InterfaceMySQLReceiveBytes, ProfileEvents::InterfaceMySQLSendBytes));
if (type == "postgres")
return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this, ProfileEvents::InterfacePostgreSQLReceiveBytes, ProfileEvents::InterfacePostgreSQLSendBytes));
if (type == "http")
return TCPServerConnectionFactory::Ptr(
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes)
);
if (type == "prometheus")
return TCPServerConnectionFactory::Ptr(
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), ProfileEvents::InterfacePrometheusReceiveBytes, ProfileEvents::InterfacePrometheusSendBytes)
);
if (type == "interserver")
return TCPServerConnectionFactory::Ptr(
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"), ProfileEvents::InterfaceInterserverReceiveBytes, ProfileEvents::InterfaceInterserverSendBytes)
);
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol configuration error, unknown protocol name '{}'", type);
};
std::string conf_name = "protocols." + protocol;
std::string prefix = conf_name + ".";
std::unordered_set<std::string> pset {conf_name};
auto stack = std::make_unique<TCPProtocolStackFactory>(*this, conf_name);
while (true)
{
// if there is no "type" - it's a reference to another protocol and this is just an endpoint
if (config.has(prefix + "type"))
{
std::string type = config.getString(prefix + "type");
if (type == "tls")
{
if (is_secure)
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol '{}' contains more than one TLS layer", protocol);
is_secure = true;
}
stack->append(create_factory(type, conf_name));
}
if (!config.has(prefix + "impl"))
break;
conf_name = "protocols." + config.getString(prefix + "impl");
prefix = conf_name + ".";
if (!pset.insert(conf_name).second)
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol '{}' configuration contains a loop on '{}'", protocol, conf_name);
}
return stack;
}
HTTPContextPtr Server::httpContext() const
{
return std::make_shared<HTTPContext>(context());
}
void Server::createServers(
Poco::Util::AbstractConfiguration & config,
const Strings & listen_hosts,
bool listen_try,
Poco::ThreadPool & server_pool,
AsynchronousMetrics & async_metrics,
std::vector<ProtocolServerAdapter> & servers,
bool start_servers,
const ServerType & server_type)
{
const Settings & settings = global_context->getSettingsRef();
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
2024-09-18 12:20:53 +00:00
http_params->setTimeout(settings[Setting::http_receive_timeout]);
2024-10-16 19:13:26 +00:00
http_params->setKeepAliveTimeout(global_context->getServerSettings()[ServerSetting::keep_alive_timeout]);
http_params->setMaxKeepAliveRequests(static_cast<int>(global_context->getServerSettings()[ServerSetting::max_keep_alive_requests]));
Poco::Util::AbstractConfiguration::Keys protocols;
config.keys("protocols", protocols);
for (const auto & protocol : protocols)
{
if (!server_type.shouldStart(ServerType::Type::CUSTOM, protocol))
continue;
std::string prefix = "protocols." + protocol + ".";
std::string port_name = prefix + "port";
std::string description {"<undefined> protocol"};
if (config.has(prefix + "description"))
description = config.getString(prefix + "description");
if (!config.has(prefix + "port"))
continue;
std::vector<std::string> hosts;
if (config.has(prefix + "host"))
hosts.push_back(config.getString(prefix + "host"));
else
hosts = listen_hosts;
for (const auto & host : hosts)
{
bool is_secure = false;
auto stack = buildProtocolStackFromConfig(config, protocol, http_params, async_metrics, is_secure);
if (stack->empty())
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol '{}' stack empty", protocol);
createServer(config, host, port_name.c_str(), listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
Poco::Net::ServerSocket socket;
auto address = socketBindListen(config, socket, host, port, is_secure);
2024-09-18 12:20:53 +00:00
socket.setReceiveTimeout(settings[Setting::receive_timeout]);
socket.setSendTimeout(settings[Setting::send_timeout]);
return ProtocolServerAdapter(
host,
port_name.c_str(),
description + ": " + address.toString(),
std::make_unique<TCPServer>(
stack.release(),
server_pool,
socket,
new Poco::Net::TCPServerParams));
});
}
}
for (const auto & listen_host : listen_hosts)
{
const char * port_name;
if (server_type.shouldStart(ServerType::Type::HTTP))
{
/// HTTP
port_name = "http_port";
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
Poco::Net::ServerSocket socket;
auto address = socketBindListen(config, socket, listen_host, port);
2024-09-18 12:20:53 +00:00
socket.setReceiveTimeout(settings[Setting::http_receive_timeout]);
socket.setSendTimeout(settings[Setting::http_send_timeout]);
return ProtocolServerAdapter(
listen_host,
port_name,
"http://" + address.toString(),
std::make_unique<HTTPServer>(
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes));
});
}
if (server_type.shouldStart(ServerType::Type::HTTPS))
{
/// HTTPS
port_name = "https_port";
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
#if USE_SSL
Poco::Net::SecureServerSocket socket;
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
2024-09-18 12:20:53 +00:00
socket.setReceiveTimeout(settings[Setting::http_receive_timeout]);
socket.setSendTimeout(settings[Setting::http_send_timeout]);
return ProtocolServerAdapter(
listen_host,
port_name,
"https://" + address.toString(),
std::make_unique<HTTPServer>(
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes));
#else
UNUSED(port);
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "HTTPS protocol is disabled because Poco library was built without NetSSL support.");
#endif
});
}
if (server_type.shouldStart(ServerType::Type::TCP))
{
/// TCP
port_name = "tcp_port";
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
Poco::Net::ServerSocket socket;
auto address = socketBindListen(config, socket, listen_host, port);
2024-09-18 12:20:53 +00:00
socket.setReceiveTimeout(settings[Setting::receive_timeout]);
socket.setSendTimeout(settings[Setting::send_timeout]);
return ProtocolServerAdapter(
listen_host,
port_name,
"native protocol (tcp): " + address.toString(),
std::make_unique<TCPServer>(
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes),
server_pool,
socket,
new Poco::Net::TCPServerParams));
});
}
if (server_type.shouldStart(ServerType::Type::TCP_WITH_PROXY))
{
/// TCP with PROXY protocol, see https://github.com/wolfeidau/proxyv2/blob/master/docs/proxy-protocol.txt
port_name = "tcp_with_proxy_port";
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
Poco::Net::ServerSocket socket;
auto address = socketBindListen(config, socket, listen_host, port);
2024-09-18 12:20:53 +00:00
socket.setReceiveTimeout(settings[Setting::receive_timeout]);
socket.setSendTimeout(settings[Setting::send_timeout]);
return ProtocolServerAdapter(
listen_host,
port_name,
"native protocol (tcp) with PROXY: " + address.toString(),
std::make_unique<TCPServer>(
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ true, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes),
server_pool,
socket,
new Poco::Net::TCPServerParams));
});
}
if (server_type.shouldStart(ServerType::Type::TCP_SECURE))
{
/// TCP with SSL
port_name = "tcp_port_secure";
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
#if USE_SSL
Poco::Net::SecureServerSocket socket;
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
2024-09-18 12:20:53 +00:00
socket.setReceiveTimeout(settings[Setting::receive_timeout]);
socket.setSendTimeout(settings[Setting::send_timeout]);
return ProtocolServerAdapter(
listen_host,
port_name,
"secure native protocol (tcp_secure): " + address.toString(),
std::make_unique<TCPServer>(
new TCPHandlerFactory(*this, /* secure */ true, /* proxy protocol */ false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes),
server_pool,
socket,
new Poco::Net::TCPServerParams));
#else
UNUSED(port);
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
#endif
});
}
if (server_type.shouldStart(ServerType::Type::MYSQL))
{
port_name = "mysql_port";
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
Poco::Net::ServerSocket socket;
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
socket.setReceiveTimeout(Poco::Timespan());
2024-09-18 12:20:53 +00:00
socket.setSendTimeout(settings[Setting::send_timeout]);
return ProtocolServerAdapter(
listen_host,
port_name,
"MySQL compatibility protocol: " + address.toString(),
std::make_unique<TCPServer>(new MySQLHandlerFactory(*this, ProfileEvents::InterfaceMySQLReceiveBytes, ProfileEvents::InterfaceMySQLSendBytes), server_pool, socket, new Poco::Net::TCPServerParams));
});
}
if (server_type.shouldStart(ServerType::Type::POSTGRESQL))
{
port_name = "postgresql_port";
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
Poco::Net::ServerSocket socket;
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
socket.setReceiveTimeout(Poco::Timespan());
2024-09-18 12:20:53 +00:00
socket.setSendTimeout(settings[Setting::send_timeout]);
return ProtocolServerAdapter(
listen_host,
port_name,
"PostgreSQL compatibility protocol: " + address.toString(),
std::make_unique<TCPServer>(new PostgreSQLHandlerFactory(*this, ProfileEvents::InterfacePostgreSQLReceiveBytes, ProfileEvents::InterfacePostgreSQLSendBytes), server_pool, socket, new Poco::Net::TCPServerParams));
});
}
#if USE_GRPC
if (server_type.shouldStart(ServerType::Type::GRPC))
{
port_name = "grpc_port";
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
Poco::Net::SocketAddress server_address(listen_host, port);
return ProtocolServerAdapter(
listen_host,
port_name,
"gRPC protocol: " + server_address.toString(),
std::make_unique<GRPCServer>(*this, makeSocketAddress(listen_host, port, &logger())));
});
}
#endif
if (server_type.shouldStart(ServerType::Type::PROMETHEUS))
{
/// Prometheus (if defined and not setup yet with http_port)
port_name = "prometheus.port";
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
Poco::Net::ServerSocket socket;
auto address = socketBindListen(config, socket, listen_host, port);
2024-09-18 12:20:53 +00:00
socket.setReceiveTimeout(settings[Setting::http_receive_timeout]);
socket.setSendTimeout(settings[Setting::http_send_timeout]);
return ProtocolServerAdapter(
listen_host,
port_name,
"Prometheus: http://" + address.toString(),
std::make_unique<HTTPServer>(
httpContext(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfacePrometheusReceiveBytes, ProfileEvents::InterfacePrometheusSendBytes));
});
}
}
}
void Server::createInterserverServers(
Poco::Util::AbstractConfiguration & config,
const Strings & interserver_listen_hosts,
bool listen_try,
Poco::ThreadPool & server_pool,
AsynchronousMetrics & async_metrics,
std::vector<ProtocolServerAdapter> & servers,
bool start_servers,
const ServerType & server_type)
{
const Settings & settings = global_context->getSettingsRef();
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
2024-09-18 12:20:53 +00:00
http_params->setTimeout(settings[Setting::http_receive_timeout]);
2024-10-16 19:13:26 +00:00
http_params->setKeepAliveTimeout(global_context->getServerSettings()[ServerSetting::keep_alive_timeout]);
/// Now iterate over interserver_listen_hosts
for (const auto & interserver_listen_host : interserver_listen_hosts)
{
const char * port_name;
if (server_type.shouldStart(ServerType::Type::INTERSERVER_HTTP))
{
/// Interserver IO HTTP
port_name = "interserver_http_port";
createServer(config, interserver_listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
Poco::Net::ServerSocket socket;
auto address = socketBindListen(config, socket, interserver_listen_host, port);
2024-09-18 12:20:53 +00:00
socket.setReceiveTimeout(settings[Setting::http_receive_timeout]);
socket.setSendTimeout(settings[Setting::http_send_timeout]);
return ProtocolServerAdapter(
interserver_listen_host,
port_name,
"replica communication (interserver): http://" + address.toString(),
std::make_unique<HTTPServer>(
httpContext(),
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"),
server_pool,
socket,
http_params,
ProfileEvents::InterfaceInterserverReceiveBytes,
ProfileEvents::InterfaceInterserverSendBytes));
});
}
if (server_type.shouldStart(ServerType::Type::INTERSERVER_HTTPS))
{
port_name = "interserver_https_port";
createServer(config, interserver_listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
#if USE_SSL
Poco::Net::SecureServerSocket socket;
auto address = socketBindListen(config, socket, interserver_listen_host, port, /* secure = */ true);
2024-09-18 12:20:53 +00:00
socket.setReceiveTimeout(settings[Setting::http_receive_timeout]);
socket.setSendTimeout(settings[Setting::http_send_timeout]);
return ProtocolServerAdapter(
interserver_listen_host,
port_name,
"secure replica communication (interserver): https://" + address.toString(),
std::make_unique<HTTPServer>(
httpContext(),
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPSHandler-factory"),
server_pool,
socket,
http_params,
ProfileEvents::InterfaceInterserverReceiveBytes,
ProfileEvents::InterfaceInterserverSendBytes));
#else
UNUSED(port);
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
#endif
});
}
}
}
void Server::stopServers(
std::vector<ProtocolServerAdapter> & servers,
2024-07-18 01:33:36 +00:00
const ServerType & server_type) const
{
LoggerRawPtr log = &logger();
/// Remove servers once all their connections are closed
auto check_server = [&log](const char prefix[], auto & server)
{
if (!server.isStopping())
return false;
size_t current_connections = server.currentConnections();
LOG_DEBUG(log, "Server {}{}: {} ({} connections)",
server.getDescription(),
prefix,
!current_connections ? "finished" : "waiting",
current_connections);
return !current_connections;
};
std::erase_if(servers, std::bind_front(check_server, " (from one of previous remove)"));
for (auto & server : servers)
{
if (!server.isStopping())
{
const std::string server_port_name = server.getPortName();
if (server_type.shouldStop(server_port_name))
server.stop();
}
}
std::erase_if(servers, std::bind_front(check_server, ""));
}
void Server::updateServers(
Poco::Util::AbstractConfiguration & config,
Poco::ThreadPool & server_pool,
AsynchronousMetrics & async_metrics,
std::vector<ProtocolServerAdapter> & servers,
std::vector<ProtocolServerAdapter> & servers_to_start_before_tables)
{
LoggerRawPtr log = &logger();
const auto listen_hosts = getListenHosts(config);
const auto interserver_listen_hosts = getInterserverListenHosts(config);
const auto listen_try = getListenTry(config);
/// Remove servers once all their connections are closed
auto check_server = [&log](const char prefix[], auto & server)
{
if (!server.isStopping())
return false;
size_t current_connections = server.currentConnections();
LOG_DEBUG(log, "Server {}{}: {} ({} connections)",
server.getDescription(),
prefix,
!current_connections ? "finished" : "waiting",
current_connections);
return !current_connections;
};
std::erase_if(servers, std::bind_front(check_server, " (from one of previous reload)"));
Poco::Util::AbstractConfiguration & previous_config = latest_config ? *latest_config : this->config();
std::vector<ProtocolServerAdapter *> all_servers;
all_servers.reserve(servers.size() + servers_to_start_before_tables.size());
for (auto & server : servers)
all_servers.push_back(&server);
for (auto & server : servers_to_start_before_tables)
all_servers.push_back(&server);
for (auto * server : all_servers)
{
2024-10-25 14:00:41 +00:00
if (server->supportsRuntimeReconfiguration() && !server->isStopping())
{
std::string port_name = server->getPortName();
bool has_host = false;
bool is_http = false;
if (port_name.starts_with("protocols."))
{
std::string protocol = port_name.substr(0, port_name.find_last_of('.'));
has_host = config.has(protocol + ".host");
std::string conf_name = protocol;
std::string prefix = protocol + ".";
std::unordered_set<std::string> pset {conf_name};
while (true)
{
if (config.has(prefix + "type"))
{
std::string type = config.getString(prefix + "type");
if (type == "http")
{
is_http = true;
break;
}
}
if (!config.has(prefix + "impl"))
break;
conf_name = "protocols." + config.getString(prefix + "impl");
prefix = conf_name + ".";
if (!pset.insert(conf_name).second)
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol '{}' configuration contains a loop on '{}'", protocol, conf_name);
}
}
else
{
/// NOTE: better to compare using getPortName() over using
/// dynamic_cast<> since HTTPServer is also used for prometheus and
/// internal replication communications.
is_http = server->getPortName() == "http_port" || server->getPortName() == "https_port";
}
if (!has_host)
has_host = std::find(listen_hosts.begin(), listen_hosts.end(), server->getListenHost()) != listen_hosts.end();
bool has_port = !config.getString(port_name, "").empty();
bool force_restart = is_http && !isSameConfiguration(previous_config, config, "http_handlers");
if (force_restart)
LOG_TRACE(log, "<http_handlers> had been changed, will reload {}", server->getDescription());
if (!has_host || !has_port || config.getInt(server->getPortName()) != server->portNumber() || force_restart)
{
server->stop();
LOG_INFO(log, "Stopped listening for {}", server->getDescription());
}
}
}
createServers(config, listen_hosts, listen_try, server_pool, async_metrics, servers, /* start_servers= */ true);
createInterserverServers(config, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers_to_start_before_tables, /* start_servers= */ true);
std::erase_if(servers, std::bind_front(check_server, ""));
std::erase_if(servers_to_start_before_tables, std::bind_front(check_server, ""));
}
2012-03-09 03:06:09 +00:00
}