mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Fix clang-tidy
This commit is contained in:
parent
b0a5ce7743
commit
f54435e7fd
@ -289,7 +289,7 @@ private:
|
|||||||
connection_entries.emplace_back(std::make_shared<Entry>(
|
connection_entries.emplace_back(std::make_shared<Entry>(
|
||||||
connection->get(ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings))));
|
connection->get(ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings))));
|
||||||
|
|
||||||
pool.scheduleOrThrowOnError(std::bind(&Benchmark::thread, this, connection_entries));
|
pool.scheduleOrThrowOnError([this, connection_entries]() mutable { thread(connection_entries); });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
|
@ -485,7 +485,7 @@ private:
|
|||||||
history_file = config().getString("history_file");
|
history_file = config().getString("history_file");
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto history_file_from_env = getenv("CLICKHOUSE_HISTORY_FILE");
|
auto * history_file_from_env = getenv("CLICKHOUSE_HISTORY_FILE");
|
||||||
if (history_file_from_env)
|
if (history_file_from_env)
|
||||||
history_file = history_file_from_env;
|
history_file = history_file_from_env;
|
||||||
else if (!home_path.empty())
|
else if (!home_path.empty())
|
||||||
@ -1480,7 +1480,7 @@ private:
|
|||||||
"\033[1m↗\033[0m",
|
"\033[1m↗\033[0m",
|
||||||
};
|
};
|
||||||
|
|
||||||
auto indicator = indicators[increment % 8];
|
const char * indicator = indicators[increment % 8];
|
||||||
|
|
||||||
if (!send_logs && written_progress_chars)
|
if (!send_logs && written_progress_chars)
|
||||||
message << '\r';
|
message << '\r';
|
||||||
|
@ -51,7 +51,7 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
|
|||||||
{
|
{
|
||||||
std::string prompt{"Password for user (" + user + "): "};
|
std::string prompt{"Password for user (" + user + "): "};
|
||||||
char buf[1000] = {};
|
char buf[1000] = {};
|
||||||
if (auto result = readpassphrase(prompt.c_str(), buf, sizeof(buf), 0))
|
if (auto * result = readpassphrase(prompt.c_str(), buf, sizeof(buf), 0))
|
||||||
password = result;
|
password = result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -442,7 +442,7 @@ bool ClusterCopier::checkPartitionPieceIsDone(const TaskTable & task_table, cons
|
|||||||
|
|
||||||
/// Collect all shards that contain partition piece number piece_number.
|
/// Collect all shards that contain partition piece number piece_number.
|
||||||
Strings piece_status_paths;
|
Strings piece_status_paths;
|
||||||
for (auto & shard : shards_with_partition)
|
for (const auto & shard : shards_with_partition)
|
||||||
{
|
{
|
||||||
ShardPartition & task_shard_partition = shard->partition_tasks.find(partition_name)->second;
|
ShardPartition & task_shard_partition = shard->partition_tasks.find(partition_name)->second;
|
||||||
ShardPartitionPiece & shard_partition_piece = task_shard_partition.pieces[piece_number];
|
ShardPartitionPiece & shard_partition_piece = task_shard_partition.pieces[piece_number];
|
||||||
@ -702,7 +702,7 @@ ASTPtr ClusterCopier::removeAliasColumnsFromCreateQuery(const ASTPtr & query_ast
|
|||||||
|
|
||||||
auto new_columns_list = std::make_shared<ASTColumns>();
|
auto new_columns_list = std::make_shared<ASTColumns>();
|
||||||
new_columns_list->set(new_columns_list->columns, new_columns);
|
new_columns_list->set(new_columns_list->columns, new_columns);
|
||||||
if (auto indices = query_ast->as<ASTCreateQuery>()->columns_list->indices)
|
if (const auto * indices = query_ast->as<ASTCreateQuery>()->columns_list->indices)
|
||||||
new_columns_list->set(new_columns_list->indices, indices->clone());
|
new_columns_list->set(new_columns_list->indices, indices->clone());
|
||||||
|
|
||||||
new_query.replace(new_query.columns_list, new_columns_list);
|
new_query.replace(new_query.columns_list, new_columns_list);
|
||||||
|
@ -94,7 +94,7 @@ void ClusterCopierApp::mainImpl()
|
|||||||
StatusFile status_file(process_path + "/status");
|
StatusFile status_file(process_path + "/status");
|
||||||
ThreadStatus thread_status;
|
ThreadStatus thread_status;
|
||||||
|
|
||||||
auto log = &logger();
|
auto * log = &logger();
|
||||||
LOG_INFO(log, "Starting clickhouse-copier ("
|
LOG_INFO(log, "Starting clickhouse-copier ("
|
||||||
<< "id " << process_id << ", "
|
<< "id " << process_id << ", "
|
||||||
<< "host_id " << host_id << ", "
|
<< "host_id " << host_id << ", "
|
||||||
|
@ -260,7 +260,7 @@ ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std
|
|||||||
return res;
|
return res;
|
||||||
|
|
||||||
res.is_remote = 1;
|
res.is_remote = 1;
|
||||||
for (auto & replica : replicas)
|
for (const auto & replica : replicas)
|
||||||
{
|
{
|
||||||
if (isLocalAddress(DNSResolver::instance().resolveHost(replica.host_name)))
|
if (isLocalAddress(DNSResolver::instance().resolveHost(replica.host_name)))
|
||||||
{
|
{
|
||||||
@ -270,7 +270,7 @@ ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std
|
|||||||
}
|
}
|
||||||
|
|
||||||
res.hostname_difference = std::numeric_limits<size_t>::max();
|
res.hostname_difference = std::numeric_limits<size_t>::max();
|
||||||
for (auto & replica : replicas)
|
for (const auto & replica : replicas)
|
||||||
{
|
{
|
||||||
size_t difference = getHostNameDifference(local_hostname, replica.host_name);
|
size_t difference = getHostNameDifference(local_hostname, replica.host_name);
|
||||||
res.hostname_difference = std::min(difference, res.hostname_difference);
|
res.hostname_difference = std::min(difference, res.hostname_difference);
|
||||||
|
@ -937,10 +937,10 @@ public:
|
|||||||
if (typeid_cast<const DataTypeFixedString *>(&data_type))
|
if (typeid_cast<const DataTypeFixedString *>(&data_type))
|
||||||
return std::make_unique<FixedStringModel>(seed);
|
return std::make_unique<FixedStringModel>(seed);
|
||||||
|
|
||||||
if (auto type = typeid_cast<const DataTypeArray *>(&data_type))
|
if (const auto * type = typeid_cast<const DataTypeArray *>(&data_type))
|
||||||
return std::make_unique<ArrayModel>(get(*type->getNestedType(), seed, markov_model_params));
|
return std::make_unique<ArrayModel>(get(*type->getNestedType(), seed, markov_model_params));
|
||||||
|
|
||||||
if (auto type = typeid_cast<const DataTypeNullable *>(&data_type))
|
if (const auto * type = typeid_cast<const DataTypeNullable *>(&data_type))
|
||||||
return std::make_unique<NullableModel>(get(*type->getNestedType(), seed, markov_model_params));
|
return std::make_unique<NullableModel>(get(*type->getNestedType(), seed, markov_model_params));
|
||||||
|
|
||||||
throw Exception("Unsupported data type", ErrorCodes::NOT_IMPLEMENTED);
|
throw Exception("Unsupported data type", ErrorCodes::NOT_IMPLEMENTED);
|
||||||
|
@ -195,7 +195,7 @@ void HTTPHandler::pushDelayedResults(Output & used_output)
|
|||||||
std::vector<ReadBufferPtr> read_buffers;
|
std::vector<ReadBufferPtr> read_buffers;
|
||||||
std::vector<ReadBuffer *> read_buffers_raw_ptr;
|
std::vector<ReadBuffer *> read_buffers_raw_ptr;
|
||||||
|
|
||||||
auto cascade_buffer = typeid_cast<CascadeWriteBuffer *>(used_output.out_maybe_delayed_and_compressed.get());
|
auto * cascade_buffer = typeid_cast<CascadeWriteBuffer *>(used_output.out_maybe_delayed_and_compressed.get());
|
||||||
if (!cascade_buffer)
|
if (!cascade_buffer)
|
||||||
throw Exception("Expected CascadeWriteBuffer", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Expected CascadeWriteBuffer", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
@ -383,7 +383,7 @@ void HTTPHandler::processQuery(
|
|||||||
{
|
{
|
||||||
auto push_memory_buffer_and_continue = [next_buffer = used_output.out_maybe_compressed] (const WriteBufferPtr & prev_buf)
|
auto push_memory_buffer_and_continue = [next_buffer = used_output.out_maybe_compressed] (const WriteBufferPtr & prev_buf)
|
||||||
{
|
{
|
||||||
auto prev_memory_buffer = typeid_cast<MemoryWriteBuffer *>(prev_buf.get());
|
auto * prev_memory_buffer = typeid_cast<MemoryWriteBuffer *>(prev_buf.get());
|
||||||
if (!prev_memory_buffer)
|
if (!prev_memory_buffer)
|
||||||
throw Exception("Expected MemoryWriteBuffer", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Expected MemoryWriteBuffer", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ HTTPRequestHandlerFactoryMain::HTTPRequestHandlerFactoryMain(const std::string &
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::Net::HTTPRequestHandler * HTTPRequestHandlerFactoryMain::createRequestHandler(const Poco::Net::HTTPServerRequest & request) // override
|
Poco::Net::HTTPRequestHandler * HTTPRequestHandlerFactoryMain::createRequestHandler(const Poco::Net::HTTPServerRequest & request)
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "HTTP Request for " << name << ". "
|
LOG_TRACE(log, "HTTP Request for " << name << ". "
|
||||||
<< "Method: " << request.getMethod()
|
<< "Method: " << request.getMethod()
|
||||||
@ -40,7 +40,7 @@ Poco::Net::HTTPRequestHandler * HTTPRequestHandlerFactoryMain::createRequestHand
|
|||||||
|
|
||||||
for (auto & handler_factory : child_factories)
|
for (auto & handler_factory : child_factories)
|
||||||
{
|
{
|
||||||
auto handler = handler_factory->createRequestHandler(request);
|
auto * handler = handler_factory->createRequestHandler(request);
|
||||||
if (handler != nullptr)
|
if (handler != nullptr)
|
||||||
return handler;
|
return handler;
|
||||||
}
|
}
|
||||||
@ -72,10 +72,8 @@ HTTPRequestHandlerFactoryMain::TThis * HTTPRequestHandlerFactoryMain::addHandler
|
|||||||
|
|
||||||
static inline auto createHandlersFactoryFromConfig(IServer & server, const std::string & name, const String & prefix)
|
static inline auto createHandlersFactoryFromConfig(IServer & server, const std::string & name, const String & prefix)
|
||||||
{
|
{
|
||||||
auto main_handler_factory = new HTTPRequestHandlerFactoryMain(name);
|
auto main_handler_factory = std::make_unique<HTTPRequestHandlerFactoryMain>(name);
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
Poco::Util::AbstractConfiguration::Keys keys;
|
Poco::Util::AbstractConfiguration::Keys keys;
|
||||||
server.config().keys(prefix, keys);
|
server.config().keys(prefix, keys);
|
||||||
|
|
||||||
@ -100,52 +98,70 @@ static inline auto createHandlersFactoryFromConfig(IServer & server, const std::
|
|||||||
prefix + "." + key + ".handler.type",ErrorCodes::INVALID_CONFIG_PARAMETER);
|
prefix + "." + key + ".handler.type",ErrorCodes::INVALID_CONFIG_PARAMETER);
|
||||||
}
|
}
|
||||||
|
|
||||||
return main_handler_factory;
|
return main_handler_factory.release();
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
delete main_handler_factory;
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const auto ping_response_expression = "Ok.\n";
|
static const auto ping_response_expression = "Ok.\n";
|
||||||
static const auto root_response_expression = "config://http_server_default_response";
|
static const auto root_response_expression = "config://http_server_default_response";
|
||||||
|
|
||||||
static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory(IServer & server, const std::string & name, AsynchronousMetrics & async_metrics)
|
static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory(
|
||||||
|
IServer & server, const std::string & name, AsynchronousMetrics & async_metrics)
|
||||||
{
|
{
|
||||||
if (server.config().has("http_handlers"))
|
if (server.config().has("http_handlers"))
|
||||||
return createHandlersFactoryFromConfig(server, name, "http_handlers");
|
return createHandlersFactoryFromConfig(server, name, "http_handlers");
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto factory = (new HTTPRequestHandlerFactoryMain(name))
|
auto factory = std::make_unique<HTTPRequestHandlerFactoryMain>(name);
|
||||||
->addHandler((new HandlingRuleHTTPHandlerFactory<StaticRequestHandler>(server, root_response_expression))
|
|
||||||
->attachStrictPath("/")->allowGetAndHeadRequest())
|
auto root_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(server, root_response_expression);
|
||||||
->addHandler((new HandlingRuleHTTPHandlerFactory<StaticRequestHandler>(server, ping_response_expression))
|
root_handler->attachStrictPath("/")->allowGetAndHeadRequest();
|
||||||
->attachStrictPath("/ping")->allowGetAndHeadRequest())
|
factory->addHandler(root_handler.release());
|
||||||
->addHandler((new HandlingRuleHTTPHandlerFactory<ReplicasStatusHandler>(server))
|
|
||||||
->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest())
|
auto ping_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(server, ping_response_expression);
|
||||||
->addHandler((new HandlingRuleHTTPHandlerFactory<DynamicQueryHandler>(server, "query"))->allowPostAndGetParamsRequest());
|
ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest();
|
||||||
|
factory->addHandler(ping_handler.release());
|
||||||
|
|
||||||
|
auto replicas_status_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<ReplicasStatusHandler>>(server);
|
||||||
|
replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest();
|
||||||
|
factory->addHandler(replicas_status_handler.release());
|
||||||
|
|
||||||
|
auto query_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<DynamicQueryHandler>>(server, "query");
|
||||||
|
query_handler->allowPostAndGetParamsRequest();
|
||||||
|
factory->addHandler(query_handler.release());
|
||||||
|
|
||||||
if (server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0)
|
if (server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0)
|
||||||
factory->addHandler((new HandlingRuleHTTPHandlerFactory<PrometheusRequestHandler>(
|
{
|
||||||
server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics)))
|
auto prometheus_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<PrometheusRequestHandler>>(
|
||||||
->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest());
|
server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics));
|
||||||
|
prometheus_handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest();
|
||||||
|
factory->addHandler(prometheus_handler.release());
|
||||||
|
}
|
||||||
|
|
||||||
return factory;
|
return factory.release();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline Poco::Net::HTTPRequestHandlerFactory * createInterserverHTTPHandlerFactory(IServer & server, const std::string & name)
|
static inline Poco::Net::HTTPRequestHandlerFactory * createInterserverHTTPHandlerFactory(IServer & server, const std::string & name)
|
||||||
{
|
{
|
||||||
return (new HTTPRequestHandlerFactoryMain(name))
|
auto factory = std::make_unique<HTTPRequestHandlerFactoryMain>(name);
|
||||||
->addHandler((new HandlingRuleHTTPHandlerFactory<StaticRequestHandler>(server, root_response_expression))
|
|
||||||
->attachStrictPath("/")->allowGetAndHeadRequest())
|
auto root_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(server, root_response_expression);
|
||||||
->addHandler((new HandlingRuleHTTPHandlerFactory<StaticRequestHandler>(server, ping_response_expression))
|
root_handler->attachStrictPath("/")->allowGetAndHeadRequest();
|
||||||
->attachStrictPath("/ping")->allowGetAndHeadRequest())
|
factory->addHandler(root_handler.release());
|
||||||
->addHandler((new HandlingRuleHTTPHandlerFactory<ReplicasStatusHandler>(server))
|
|
||||||
->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest())
|
auto ping_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(server, ping_response_expression);
|
||||||
->addHandler((new HandlingRuleHTTPHandlerFactory<InterserverIOHTTPHandler>(server))->allowPostAndGetParamsRequest());
|
ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest();
|
||||||
|
factory->addHandler(ping_handler.release());
|
||||||
|
|
||||||
|
auto replicas_status_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<ReplicasStatusHandler>>(server);
|
||||||
|
replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest();
|
||||||
|
factory->addHandler(replicas_status_handler.release());
|
||||||
|
|
||||||
|
auto main_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<InterserverIOHTTPHandler>>(server);
|
||||||
|
main_handler->allowPostAndGetParamsRequest();
|
||||||
|
factory->addHandler(main_handler.release());
|
||||||
|
|
||||||
|
return factory.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, AsynchronousMetrics & async_metrics, const std::string & name)
|
Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, AsynchronousMetrics & async_metrics, const std::string & name)
|
||||||
@ -155,9 +171,14 @@ Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, As
|
|||||||
else if (name == "InterserverIOHTTPHandler-factory" || name == "InterserverIOHTTPSHandler-factory")
|
else if (name == "InterserverIOHTTPHandler-factory" || name == "InterserverIOHTTPSHandler-factory")
|
||||||
return createInterserverHTTPHandlerFactory(server, name);
|
return createInterserverHTTPHandlerFactory(server, name);
|
||||||
else if (name == "PrometheusHandler-factory")
|
else if (name == "PrometheusHandler-factory")
|
||||||
return (new HTTPRequestHandlerFactoryMain(name))->addHandler((new HandlingRuleHTTPHandlerFactory<PrometheusRequestHandler>(
|
{
|
||||||
server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics)))
|
auto factory = std::make_unique<HTTPRequestHandlerFactoryMain>(name);
|
||||||
->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest());
|
auto handler = std::make_unique<HandlingRuleHTTPHandlerFactory<PrometheusRequestHandler>>(
|
||||||
|
server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics));
|
||||||
|
handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest();
|
||||||
|
factory->addHandler(handler.release());
|
||||||
|
return factory.release();
|
||||||
|
}
|
||||||
|
|
||||||
throw Exception("LOGICAL ERROR: Unknown HTTP handler factory name.", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("LOGICAL ERROR: Unknown HTTP handler factory name.", ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ void ReplicasStatusHandler::handleRequest(Poco::Net::HTTPServerRequest & request
|
|||||||
|
|
||||||
for (auto iterator = db.second->getTablesIterator(); iterator->isValid(); iterator->next())
|
for (auto iterator = db.second->getTablesIterator(); iterator->isValid(); iterator->next())
|
||||||
{
|
{
|
||||||
auto & table = iterator->table();
|
const auto & table = iterator->table();
|
||||||
StorageReplicatedMergeTree * table_replicated = dynamic_cast<StorageReplicatedMergeTree *>(table.get());
|
StorageReplicatedMergeTree * table_replicated = dynamic_cast<StorageReplicatedMergeTree *>(table.get());
|
||||||
|
|
||||||
if (!table_replicated)
|
if (!table_replicated)
|
||||||
|
@ -331,18 +331,13 @@ void ReplicatedMergeTreeQueue::updateTimesInZooKeeper(
|
|||||||
|
|
||||||
void ReplicatedMergeTreeQueue::removeProcessedEntry(zkutil::ZooKeeperPtr zookeeper, LogEntryPtr & entry)
|
void ReplicatedMergeTreeQueue::removeProcessedEntry(zkutil::ZooKeeperPtr zookeeper, LogEntryPtr & entry)
|
||||||
{
|
{
|
||||||
auto code = zookeeper->tryRemove(replica_path + "/queue/" + entry->znode_name);
|
|
||||||
|
|
||||||
if (code)
|
|
||||||
LOG_ERROR(log, "Couldn't remove " << replica_path << "/queue/" << entry->znode_name << ": "
|
|
||||||
<< zkutil::ZooKeeper::error2string(code) << ". This shouldn't happen often.");
|
|
||||||
|
|
||||||
std::optional<time_t> min_unprocessed_insert_time_changed;
|
std::optional<time_t> min_unprocessed_insert_time_changed;
|
||||||
std::optional<time_t> max_processed_insert_time_changed;
|
std::optional<time_t> max_processed_insert_time_changed;
|
||||||
|
|
||||||
bool found = false;
|
bool found = false;
|
||||||
size_t queue_size = 0;
|
size_t queue_size = 0;
|
||||||
|
|
||||||
|
/// First remove from memory then from ZooKeeper
|
||||||
{
|
{
|
||||||
std::unique_lock lock(state_mutex);
|
std::unique_lock lock(state_mutex);
|
||||||
|
|
||||||
@ -372,6 +367,11 @@ void ReplicatedMergeTreeQueue::removeProcessedEntry(zkutil::ZooKeeperPtr zookeep
|
|||||||
|
|
||||||
notifySubscribers(queue_size);
|
notifySubscribers(queue_size);
|
||||||
|
|
||||||
|
auto code = zookeeper->tryRemove(replica_path + "/queue/" + entry->znode_name);
|
||||||
|
if (code)
|
||||||
|
LOG_ERROR(log, "Couldn't remove " << replica_path << "/queue/" << entry->znode_name << ": "
|
||||||
|
<< zkutil::ZooKeeper::error2string(code) << ". This shouldn't happen often.");
|
||||||
|
|
||||||
updateTimesInZooKeeper(zookeeper, min_unprocessed_insert_time_changed, max_processed_insert_time_changed);
|
updateTimesInZooKeeper(zookeeper, min_unprocessed_insert_time_changed, max_processed_insert_time_changed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,7 +199,7 @@ void setCurrentBlockNumber(zkutil::ZooKeeper & zk, const std::string & path, Int
|
|||||||
create_ephemeral_nodes(1); /// Firstly try to create just a single node.
|
create_ephemeral_nodes(1); /// Firstly try to create just a single node.
|
||||||
|
|
||||||
/// Create other nodes in batches of 50 nodes.
|
/// Create other nodes in batches of 50 nodes.
|
||||||
while (current_block_number + 50 <= new_current_block_number)
|
while (current_block_number + 50 <= new_current_block_number) // NOLINT: clang-tidy thinks that the loop is infinite
|
||||||
create_ephemeral_nodes(50);
|
create_ephemeral_nodes(50);
|
||||||
|
|
||||||
create_ephemeral_nodes(new_current_block_number - current_block_number);
|
create_ephemeral_nodes(new_current_block_number - current_block_number);
|
||||||
|
Loading…
Reference in New Issue
Block a user