From 29762240de60e9f2cb2b547b8b211c0500913429 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 23 May 2020 22:31:54 +0300 Subject: [PATCH] Remove duplicate whitespaces (preparation) --- base/daemon/BaseDaemon.cpp | 2 +- base/daemon/GraphiteWriter.h | 2 +- programs/copier/ClusterCopier.cpp | 4 ++-- programs/server/Server.cpp | 2 +- src/Common/Config/ConfigProcessor.cpp | 2 +- src/Common/PoolWithFailoverBase.h | 2 +- .../CollapsingFinalBlockInputStream.cpp | 2 +- src/Databases/DatabaseOnDisk.cpp | 2 +- src/Disks/VolumeJBOD.cpp | 4 ++-- .../ClusterProxy/SelectStreamFactory.cpp | 6 +++--- src/Interpreters/DDLWorker.cpp | 10 +++++----- src/Interpreters/DatabaseCatalog.cpp | 2 +- src/Interpreters/ExternalLoader.cpp | 2 +- src/Storages/Distributed/DirectoryMonitor.cpp | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 6 +++--- src/Storages/MergeTree/MergeTreeData.cpp | 10 +++++----- .../MergeTree/MergeTreeDataMergerMutator.cpp | 4 ++-- .../ReplicatedMergeTreeCleanupThread.cpp | 2 +- .../ReplicatedMergeTreePartCheckThread.cpp | 4 ++-- .../MergeTree/ReplicatedMergeTreeQueue.cpp | 6 +++--- .../ReplicatedMergeTreeRestartingThread.cpp | 2 +- src/Storages/StorageBuffer.cpp | 12 ++++++------ src/Storages/StorageReplicatedMergeTree.cpp | 18 +++++++++--------- 23 files changed, 54 insertions(+), 54 deletions(-) diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index 818721b9e6d..d5b7d1a0f6c 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -498,7 +498,7 @@ void debugIncreaseOOMScore() } catch (const Poco::Exception & e) { - LOG_WARNING(&Logger::root(), "Failed to adjust OOM score: '" + e.displayText() + "'."); + LOG_WARNING(&Logger::root(), "Failed to adjust OOM score: '" + e.displayText() + "'."); return; } LOG_INFO_FORMATTED(&Logger::root(), "Set OOM score adjustment to {}", new_score); diff --git a/base/daemon/GraphiteWriter.h b/base/daemon/GraphiteWriter.h index d368470e5e6..400c99066b2 100644 --- a/base/daemon/GraphiteWriter.h +++ b/base/daemon/GraphiteWriter.h @@ -52,7 +52,7 @@ private: } catch (const Poco::Exception & e) { - LOG_WARNING(&Poco::Util::Application::instance().logger(), "Fail to write to Graphite " << host << ":" << port << ". e.what() = " << e.what() << ", e.message() = " << e.message()); + LOG_WARNING(&Poco::Util::Application::instance().logger(), "Fail to write to Graphite " << host << ":" << port << ". e.what() = " << e.what() << ", e.message() = " << e.message()); } } diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 50b32676bce..eb35768b955 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -164,7 +164,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts, for (const String & missing_partition : missing_partitions) ss << " " << missing_partition; - LOG_WARNING(log, "There are no " << missing_partitions.size() << " partitions from enabled_partitions in shard " << task_shard->getDescription() << " :" << ss.str()); + LOG_WARNING(log, "There are no " << missing_partitions.size() << " partitions from enabled_partitions in shard " << task_shard->getDescription() << " :" << ss.str()); } LOG_DEBUG_FORMATTED(log, "Will copy {} partitions from shard {}", task_shard->partition_tasks.size(), task_shard->getDescription()); @@ -1297,7 +1297,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( /// NOTE: partition is still fresh if dirt discovery happens before cleaning if (stat_shards.numChildren == 0) { - LOG_WARNING(log, "There are no workers for partition " << task_partition.name << " piece " << toString(current_piece_number) << ", but destination table contains " << count << " rows" << ". Partition will be dropped and refilled."); + LOG_WARNING(log, "There are no workers for partition " << task_partition.name << " piece " << toString(current_piece_number) << ", but destination table contains " << count << " rows" << ". Partition will be dropped and refilled."); create_is_dirty_node(clean_state_clock); return TaskStatus::Error; diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index fa24b106a09..78ca90a4280 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -354,7 +354,7 @@ int Server::main(const std::vector & /*args*/) rlim.rlim_cur = config().getUInt("max_open_files", rlim.rlim_max); int rc = setrlimit(RLIMIT_NOFILE, &rlim); if (rc != 0) - LOG_WARNING(log, "Cannot set max number of file descriptors to " << rlim.rlim_cur << ". Try to specify max_open_files according to your system limits. error: " << strerror(errno)); + LOG_WARNING(log, "Cannot set max number of file descriptors to " << rlim.rlim_cur << ". Try to specify max_open_files according to your system limits. error: " << strerror(errno)); else LOG_DEBUG_FORMATTED(log, "Set max number of file descriptors to {} (was {}).", rlim.rlim_cur, old); } diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index 5e9f7f066ca..8ee26226405 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -568,7 +568,7 @@ ConfigProcessor::LoadedConfig ConfigProcessor::loadConfigWithZooKeeperIncludes( if (!zk_exception) throw; - LOG_WARNING(log, "Error while processing from_zk config includes: " + zk_exception->message() + ". Config will be loaded from preprocessed file: " + preprocessed_path); + LOG_WARNING(log, "Error while processing from_zk config includes: " + zk_exception->message() + ". Config will be loaded from preprocessed file: " + preprocessed_path); config_xml = dom_parser.parse(preprocessed_path); } diff --git a/src/Common/PoolWithFailoverBase.h b/src/Common/PoolWithFailoverBase.h index b83648f6d0e..10ab3885be6 100644 --- a/src/Common/PoolWithFailoverBase.h +++ b/src/Common/PoolWithFailoverBase.h @@ -239,7 +239,7 @@ PoolWithFailoverBase::getMany( } else { - LOG_WARNING(log, "Connection failed at try №" << (shuffled_pool.error_count + 1) << ", reason: " << fail_message); + LOG_WARNING(log, "Connection failed at try №" << (shuffled_pool.error_count + 1) << ", reason: " << fail_message); ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry); shuffled_pool.error_count = std::min(max_error_cap, shuffled_pool.error_count + 1); diff --git a/src/DataStreams/CollapsingFinalBlockInputStream.cpp b/src/DataStreams/CollapsingFinalBlockInputStream.cpp index 2ac1c46f0bc..73d3bda5860 100644 --- a/src/DataStreams/CollapsingFinalBlockInputStream.cpp +++ b/src/DataStreams/CollapsingFinalBlockInputStream.cpp @@ -18,7 +18,7 @@ CollapsingFinalBlockInputStream::~CollapsingFinalBlockInputStream() void CollapsingFinalBlockInputStream::reportBadCounts() { /// With inconsistent data, this is an unavoidable error that can not be easily fixed by admins. Therefore Warning. - LOG_WARNING(log, "Incorrect data: number of rows with sign = 1 (" << count_positive << ") differs with number of rows with sign = -1 (" << count_negative << ") by more than one"); + LOG_WARNING(log, "Incorrect data: number of rows with sign = 1 (" << count_positive << ") differs with number of rows with sign = -1 (" << count_negative << ") by more than one"); } void CollapsingFinalBlockInputStream::reportBadSign(Int8 sign) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 3d563b09e0d..27d0d9a7a10 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -466,7 +466,7 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(Poco::Logger * loger, const Contex table_name = unescapeForFileName(table_name); if (create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER) - LOG_WARNING(loger, "File " << metadata_file_path << " contains both UUID and table name. " "Will use name `" << table_name << "` instead of `" << create.table << "`"); + LOG_WARNING(loger, "File " << metadata_file_path << " contains both UUID and table name. " "Will use name `" << table_name << "` instead of `" << create.table << "`"); create.table = table_name; } diff --git a/src/Disks/VolumeJBOD.cpp b/src/Disks/VolumeJBOD.cpp index 59ab3087499..986d69111b3 100644 --- a/src/Disks/VolumeJBOD.cpp +++ b/src/Disks/VolumeJBOD.cpp @@ -48,11 +48,11 @@ VolumeJBOD::VolumeJBOD( max_data_part_size = static_cast(sum_size * ratio / disks.size()); for (size_t i = 0; i < disks.size(); ++i) if (sizes[i] < max_data_part_size) - LOG_WARNING(logger, "Disk " << backQuote(disks[i]->getName()) << " on volume " << backQuote(config_prefix) << " have not enough space (" << formatReadableSizeWithBinarySuffix(sizes[i]) << ") for containing part the size of max_data_part_size (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << ")"); + LOG_WARNING(logger, "Disk " << backQuote(disks[i]->getName()) << " on volume " << backQuote(config_prefix) << " have not enough space (" << formatReadableSizeWithBinarySuffix(sizes[i]) << ") for containing part the size of max_data_part_size (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << ")"); } static constexpr UInt64 MIN_PART_SIZE = 8u * 1024u * 1024u; if (max_data_part_size != 0 && max_data_part_size < MIN_PART_SIZE) - LOG_WARNING(logger, "Volume " << backQuote(name) << " max_data_part_size is too low (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << " < " << formatReadableSizeWithBinarySuffix(MIN_PART_SIZE) << ")"); + LOG_WARNING(logger, "Volume " << backQuote(name) << " max_data_part_size is too low (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << " < " << formatReadableSizeWithBinarySuffix(MIN_PART_SIZE) << ")"); } DiskPtr VolumeJBOD::getNextDisk() diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 5eceac625a2..00c1356d002 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -200,7 +200,7 @@ void SelectStreamFactory::createForShard( ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable); if (shard_info.hasRemoteConnections()) { - LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "There is no table " << main_table.getNameForLogs() << " on local replica of shard " << shard_info.shard_num << ", will try remote replicas."); + LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "There is no table " << main_table.getNameForLogs() << " on local replica of shard " << shard_info.shard_num << ", will try remote replicas."); emplace_remote_stream(); } else @@ -236,7 +236,7 @@ void SelectStreamFactory::createForShard( /// If we reached this point, local replica is stale. ProfileEvents::increment(ProfileEvents::DistributedConnectionStaleReplica); - LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Local replica of shard " << shard_info.shard_num << " is stale (delay: " << local_delay << "s.)"); + LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Local replica of shard " << shard_info.shard_num << " is stale (delay: " << local_delay << "s.)"); if (!settings.fallback_to_stale_replicas_for_distributed_queries) { @@ -284,7 +284,7 @@ void SelectStreamFactory::createForShard( catch (const Exception & ex) { if (ex.code() == ErrorCodes::ALL_CONNECTION_TRIES_FAILED) - LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Connections to remote replicas of local shard " << shard_num << " failed, will use stale local replica"); + LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Connections to remote replicas of local shard " << shard_num << " failed, will use stale local replica"); else throw; } diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 53be380e029..0ba1c184545 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -238,7 +238,7 @@ DDLWorker::DDLWorker(const std::string & zk_root_dir, Context & context_, const if (context.getSettingsRef().readonly) { - LOG_WARNING(log, "Distributed DDL worker is run with readonly settings, it will not be able to execute DDL queries" << " Set appropriate system_profile or distributed_ddl.profile to fix this."); + LOG_WARNING(log, "Distributed DDL worker is run with readonly settings, it will not be able to execute DDL queries" << " Set appropriate system_profile or distributed_ddl.profile to fix this."); } host_fqdn = getFQDNOrHostName(); @@ -334,7 +334,7 @@ bool DDLWorker::initAndCheckTask(const String & entry_name, String & out_reason, if (host_in_hostlist) { /// This check could be slow a little bit - LOG_WARNING(log, "There are two the same ClickHouse instances in task " << entry_name << ": " << task->host_id.readableString() << " and " << host.readableString() << ". Will use the first one only."); + LOG_WARNING(log, "There are two the same ClickHouse instances in task " << entry_name << ": " << task->host_id.readableString() << " and " << host.readableString() << ". Will use the first one only."); } else { @@ -424,7 +424,7 @@ void DDLWorker::processTasks() { if (server_startup && e.code == Coordination::ZNONODE) { - LOG_WARNING(log, "ZooKeeper NONODE error during startup. Ignoring entry " << task.entry_name << " (" << task.entry.query << ") : " << getCurrentExceptionMessage(true)); + LOG_WARNING(log, "ZooKeeper NONODE error during startup. Ignoring entry " << task.entry_name << " (" << task.entry.query << ") : " << getCurrentExceptionMessage(true)); } else { @@ -433,7 +433,7 @@ void DDLWorker::processTasks() } catch (...) { - LOG_WARNING(log, "An error occurred while processing task " << task.entry_name << " (" << task.entry.query << ") : " << getCurrentExceptionMessage(true)); + LOG_WARNING(log, "An error occurred while processing task " << task.entry_name << " (" << task.entry.query << ") : " << getCurrentExceptionMessage(true)); throw; } } @@ -506,7 +506,7 @@ void DDLWorker::parseQueryAndResolveHost(DDLTask & task) if (found_exact_match) return; - LOG_WARNING(log, "Not found the exact match of host " << task.host_id.readableString() << " from task " << task.entry_name << " in cluster " << task.cluster_name << " definition. Will try to find it using host name resolving."); + LOG_WARNING(log, "Not found the exact match of host " << task.host_id.readableString() << " from task " << task.entry_name << " in cluster " << task.cluster_name << " definition. Will try to find it using host name resolving."); bool found_via_resolving = false; for (size_t shard_num = 0; shard_num < shards.size(); ++shard_num) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 40ae756f48d..6c1d00df3bc 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -597,7 +597,7 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr } else { - LOG_WARNING(log, "Cannot parse metadata of partially dropped table " << table_id.getNameForLogs() << " from " << dropped_metadata_path << ". Will remove metadata file and data directory. Garbage may be left in /store directory and ZooKeeper."); + LOG_WARNING(log, "Cannot parse metadata of partially dropped table " << table_id.getNameForLogs() << " from " << dropped_metadata_path << ". Will remove metadata file and data directory. Garbage may be left in /store directory and ZooKeeper."); } drop_time = Poco::File(dropped_metadata_path).getLastModified().epochTime(); diff --git a/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp index 721f4812afe..7157fcd58e9 100644 --- a/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -350,7 +350,7 @@ private: const auto & already_added = already_added_it->second; if (!already_added->from_temp_repository && !repository->isTemporary()) { - LOG_WARNING(log, type_name << " '" << object_name << "' is found " << (((path == already_added->path) && (repository->getName() == already_added->repository_name)) ? ("twice in the same file '" + path + "'") : ("both in file '" + already_added->path + "' and '" + path + "'"))); + LOG_WARNING(log, type_name << " '" << object_name << "' is found " << (((path == already_added->path) && (repository->getName() == already_added->repository_name)) ? ("twice in the same file '" + path + "'") : ("both in file '" + already_added->path + "' and '" + path + "'"))); } } } diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index b6742157335..30879433024 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -317,7 +317,7 @@ void StorageDistributedDirectoryMonitor::readHeader( readVarUInt(initiator_revision, header_buf); if (ClickHouseRevision::get() < initiator_revision) { - LOG_WARNING(log, "ClickHouse shard version is older than ClickHouse initiator version. " << "It may lack support for new features."); + LOG_WARNING(log, "ClickHouse shard version is older than ClickHouse initiator version. " << "It may lack support for new features."); } readStringBinary(insert_query, header_buf); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 0ea1dfddabc..494e5809683 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -668,7 +668,7 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_ Names files; volume->getDisk()->listFiles(to, files); - LOG_WARNING(storage.log, "Part directory " << fullPath(volume->getDisk(), to) << " already exists" << " and contains " << files.size() << " files. Removing it."); + LOG_WARNING(storage.log, "Part directory " << fullPath(volume->getDisk(), to) << " already exists" << " and contains " << files.size() << " files. Removing it."); volume->getDisk()->removeRecursive(to); } @@ -710,7 +710,7 @@ void IMergeTreeDataPart::remove() const if (volume->getDisk()->exists(to)) { - LOG_WARNING(storage.log, "Directory " << fullPath(volume->getDisk(), to) << " (to which part must be renamed before removing) already exists." " Most likely this is due to unclean restart. Removing it."); + LOG_WARNING(storage.log, "Directory " << fullPath(volume->getDisk(), to) << " (to which part must be renamed before removing) already exists." " Most likely this is due to unclean restart. Removing it."); try { @@ -791,7 +791,7 @@ String IMergeTreeDataPart::getRelativePathForDetachedPart(const String & prefix) if (!volume->getDisk()->exists(getFullRelativePath() + res)) return res; - LOG_WARNING(storage.log, "Directory " << res << " (to detach to) already exists." " Will detach to directory with '_tryN' suffix."); + LOG_WARNING(storage.log, "Directory " << res << " (to detach to) already exists." " Will detach to directory with '_tryN' suffix."); } return res; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 3e57a48b135..c35cb29ca62 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2914,9 +2914,9 @@ ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules(UInt64 expected_ if (!destination_ptr) { if (ttl_entry->destination_type == PartDestinationType::VOLUME) - LOG_WARNING(log, "Would like to reserve space on volume '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but volume was not found"); + LOG_WARNING(log, "Would like to reserve space on volume '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but volume was not found"); else if (ttl_entry->destination_type == PartDestinationType::DISK) - LOG_WARNING(log, "Would like to reserve space on disk '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but disk was not found"); + LOG_WARNING(log, "Would like to reserve space on disk '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but disk was not found"); } else { @@ -2925,9 +2925,9 @@ ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules(UInt64 expected_ return reservation; else if (ttl_entry->destination_type == PartDestinationType::VOLUME) - LOG_WARNING(log, "Would like to reserve space on volume '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but there is not enough space"); + LOG_WARNING(log, "Would like to reserve space on volume '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but there is not enough space"); else if (ttl_entry->destination_type == PartDestinationType::DISK) - LOG_WARNING(log, "Would like to reserve space on disk '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but there is not enough space"); + LOG_WARNING(log, "Would like to reserve space on disk '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but there is not enough space"); } } @@ -3053,7 +3053,7 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData: DataPartsVector covered_parts = data.getActivePartsToReplace(part->info, part->name, covering_part, *owing_parts_lock); if (covering_part) { - LOG_WARNING(data.log, "Tried to commit obsolete part " << part->name << " covered by " << covering_part->getNameWithState()); + LOG_WARNING(data.log, "Tried to commit obsolete part " << part->name << " covered by " << covering_part->getNameWithState()); part->remove_time.store(0, std::memory_order_relaxed); /// The part will be removed without waiting for old_parts_lifetime seconds. data.modifyPartState(part, DataPartState::Outdated); diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index f56ef85015a..8eee53cde4a 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -366,7 +366,7 @@ bool MergeTreeDataMergerMutator::selectAllPartsToMergeWithinPartition( if (now - disk_space_warning_time > 3600) { disk_space_warning_time = now; - LOG_WARNING(log, "Won't merge parts from " << parts.front()->name << " to " << (*prev_it)->name << " because not enough free space: " << formatReadableSizeWithBinarySuffix(available_disk_space) << " free and unreserved, " << formatReadableSizeWithBinarySuffix(sum_bytes) << " required now (+" << static_cast((DISK_USAGE_COEFFICIENT_TO_SELECT - 1.0) * 100) << "% on overhead); suppressing similar warnings for the next hour"); + LOG_WARNING(log, "Won't merge parts from " << parts.front()->name << " to " << (*prev_it)->name << " because not enough free space: " << formatReadableSizeWithBinarySuffix(available_disk_space) << " free and unreserved, " << formatReadableSizeWithBinarySuffix(sum_bytes) << " required now (+" << static_cast((DISK_USAGE_COEFFICIENT_TO_SELECT - 1.0) * 100) << "% on overhead); suppressing similar warnings for the next hour"); } if (out_disable_reason) @@ -1214,7 +1214,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart * (NOTE: Merging with part that is not in ZK is not possible, see checks in 'createLogEntryToMergeParts'.) * - and after merge, this part will be removed in addition to parts that was merged. */ - LOG_WARNING(log, "Unexpected number of parts removed when adding " << new_data_part->name << ": " << replaced_parts.size() << " instead of " << parts.size()); + LOG_WARNING(log, "Unexpected number of parts removed when adding " << new_data_part->name << ": " << replaced_parts.size() << " instead of " << parts.size()); } else { diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 80577bd1b20..17dc1a508d2 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -327,7 +327,7 @@ void ReplicatedMergeTreeCleanupThread::clearOldBlocks() cached_block_stats.erase(first_outdated_block->node); } else if (rc) - LOG_WARNING(log, "Error while deleting ZooKeeper path `" << path << "`: " + zkutil::ZooKeeper::error2string(rc) << ", ignoring."); + LOG_WARNING(log, "Error while deleting ZooKeeper path `" << path << "`: " + zkutil::ZooKeeper::error2string(rc) << ", ignoring."); else { /// Successfully removed blocks have to be removed from cache diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 383a1e5e485..8ec073282c9 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -82,7 +82,7 @@ void ReplicatedMergeTreePartCheckThread::searchForMissingPart(const String & par /// If the part is in ZooKeeper, remove it from there and add the task to download it to the queue. if (zookeeper->exists(part_path)) { - LOG_WARNING(log, "Part " << part_name << " exists in ZooKeeper but not locally. " "Removing from ZooKeeper and queueing a fetch."); + LOG_WARNING(log, "Part " << part_name << " exists in ZooKeeper but not locally. " "Removing from ZooKeeper and queueing a fetch."); ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed); storage.removePartAndEnqueueFetch(part_name); @@ -143,7 +143,7 @@ void ReplicatedMergeTreePartCheckThread::searchForMissingPart(const String & par if (found_part_with_the_same_min_block && found_part_with_the_same_max_block) { - LOG_WARNING(log, "Found parts with the same min block and with the same max block as the missing part " << part_name << ". Hoping that it will eventually appear as a result of a merge."); + LOG_WARNING(log, "Found parts with the same min block and with the same max block as the missing part " << part_name << ". Hoping that it will eventually appear as a result of a merge."); return; } } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 027e33a1373..9ee4f7d3f9a 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1328,7 +1328,7 @@ MutationCommands ReplicatedMergeTreeQueue::getMutationCommands( if (part->info.getDataVersion() > desired_mutation_version) { - LOG_WARNING(log, "Data version of part " << part->name << " is already greater than " "desired mutation version " << desired_mutation_version); + LOG_WARNING(log, "Data version of part " << part->name << " is already greater than " "desired mutation version " << desired_mutation_version); return MutationCommands{}; } @@ -1337,7 +1337,7 @@ MutationCommands ReplicatedMergeTreeQueue::getMutationCommands( auto in_partition = mutations_by_partition.find(part->info.partition_id); if (in_partition == mutations_by_partition.end()) { - LOG_WARNING(log, "There are no mutations for partition ID " << part->info.partition_id << " (trying to mutate part " << part->name << " to " << toString(desired_mutation_version) << ")"); + LOG_WARNING(log, "There are no mutations for partition ID " << part->info.partition_id << " (trying to mutate part " << part->name << " to " << toString(desired_mutation_version) << ")"); return MutationCommands{}; } @@ -1345,7 +1345,7 @@ MutationCommands ReplicatedMergeTreeQueue::getMutationCommands( auto end = in_partition->second.lower_bound(desired_mutation_version); if (end == in_partition->second.end() || end->first != desired_mutation_version) - LOG_WARNING(log, "Mutation with version " << desired_mutation_version << " not found in partition ID " << part->info.partition_id << " (trying to mutate part " << part->name + ")"); + LOG_WARNING(log, "Mutation with version " << desired_mutation_version << " not found in partition ID " << part->info.partition_id << " (trying to mutate part " << part->name + ")"); else ++end; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index c60c6953edf..519c063f088 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -268,7 +268,7 @@ void ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart() if (!quorum_entry.replicas.count(storage.replica_name) && zookeeper->exists(storage.replica_path + "/parts/" + quorum_entry.part_name)) { - LOG_WARNING(log, "We have part " << quorum_entry.part_name << " but we is not in quorum. Updating quorum. This shouldn't happen often."); + LOG_WARNING(log, "We have part " << quorum_entry.part_name << " but we is not in quorum. Updating quorum. This shouldn't happen often."); storage.updateQuorum(quorum_entry.part_name); } } diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 82f781b3193..1bb30201e82 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -189,7 +189,7 @@ Pipes StorageBuffer::read( { if (!dest_columns.hasPhysical(column_name)) { - LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " doesn't have column " << backQuoteIfNeed(column_name) << ". The default values are used."); + LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " doesn't have column " << backQuoteIfNeed(column_name) << ". The default values are used."); boost::range::remove_erase(columns_intersection, column_name); continue; } @@ -197,14 +197,14 @@ Pipes StorageBuffer::read( const auto & col = our_columns.getPhysical(column_name); if (!dst_col.type->equals(*col.type)) { - LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " has different type of column " << backQuoteIfNeed(column_name) << " (" << dst_col.type->getName() << " != " << col.type->getName() << "). Data from destination table are converted."); + LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " has different type of column " << backQuoteIfNeed(column_name) << " (" << dst_col.type->getName() << " != " << col.type->getName() << "). Data from destination table are converted."); header_after_adding_defaults.getByName(column_name) = ColumnWithTypeAndName(dst_col.type, column_name); } } if (columns_intersection.empty()) { - LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " has no common columns with block in buffer. Block of data is skipped."); + LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " has no common columns with block in buffer. Block of data is skipped."); } else { @@ -447,7 +447,7 @@ void StorageBuffer::startup() { if (global_context.getSettingsRef().readonly) { - LOG_WARNING(log, "Storage " << getName() << " is run with readonly settings, it will not be able to insert data." << " Set appropriate system_profile to fix this."); + LOG_WARNING(log, "Storage " << getName() << " is run with readonly settings, it will not be able to insert data." << " Set appropriate system_profile to fix this."); } @@ -653,7 +653,7 @@ void StorageBuffer::writeBlockToDestination(const Block & block, StoragePtr tabl auto column = block.getByName(dst_col.name); if (!column.type->equals(*dst_col.type)) { - LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " have different type of column " << backQuoteIfNeed(column.name) << " (" << dst_col.type->getName() << " != " << column.type->getName() << "). Block of data is converted."); + LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " have different type of column " << backQuoteIfNeed(column.name) << " (" << dst_col.type->getName() << " != " << column.type->getName() << "). Block of data is converted."); column.column = castColumn(column, dst_col.type); column.type = dst_col.type; } @@ -669,7 +669,7 @@ void StorageBuffer::writeBlockToDestination(const Block & block, StoragePtr tabl } if (block_to_write.columns() != block.columns()) - LOG_WARNING(log, "Not all columns from block in buffer exist in destination table " << destination_id.getNameForLogs() << ". Some columns are discarded."); + LOG_WARNING(log, "Not all columns from block in buffer exist in destination table " << destination_id.getNameForLogs() << ". Some columns are discarded."); auto list_of_columns = std::make_shared(); insert->columns = list_of_columns; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index d539bd53a11..6adadc6ac72 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -214,7 +214,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( skip_sanity_checks = true; current_zookeeper->remove(replica_path + "/flags/force_restore_data"); - LOG_WARNING(log, "Skipping the limits on severity of changes to data parts and columns (flag " << replica_path << "/flags/force_restore_data)."); + LOG_WARNING(log, "Skipping the limits on severity of changes to data parts and columns (flag " << replica_path << "/flags/force_restore_data)."); } else if (has_force_restore_data_flag) { @@ -342,7 +342,7 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( /// Replica could be inactive. if (!zookeeper->exists(zookeeper_path + "/replicas/" + replica + "/is_active")) { - LOG_WARNING(log, "Replica " << replica << " is not active during mutation. " "Mutation will be done asynchronously when replica becomes active."); + LOG_WARNING(log, "Replica " << replica << " is not active during mutation. " "Mutation will be done asynchronously when replica becomes active."); inactive_replicas.emplace(replica); break; @@ -830,7 +830,7 @@ void StorageReplicatedMergeTree::checkPartChecksumsAndAddCommitOps(const zkutil: } else { - LOG_WARNING(log, "checkPartAndAddToZooKeeper: node " << replica_path + "/parts/" + part_name << " already exists." << " Will not commit any nodes."); + LOG_WARNING(log, "checkPartAndAddToZooKeeper: node " << replica_path + "/parts/" + part_name << " already exists." << " Will not commit any nodes."); } } @@ -1003,7 +1003,7 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) } if (part->name != name) { - LOG_WARNING(log, "Part " << name << " is covered by " << part->name << " but should be merged into " << entry.new_part_name << ". This shouldn't happen often."); + LOG_WARNING(log, "Part " << name << " is covered by " << part->name << " but should be merged into " << entry.new_part_name << ". This shouldn't happen often."); have_all_parts = false; break; } @@ -1370,7 +1370,7 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry) } else { - LOG_WARNING(log, "No active replica has part " << entry.new_part_name << ", but that part needs quorum and /quorum/status contains entry about another part " << quorum_entry.part_name << ". It means that part was successfully written to " << entry.quorum << " replicas, but then all of them goes offline." << " Or it is a bug."); + LOG_WARNING(log, "No active replica has part " << entry.new_part_name << ", but that part needs quorum and /quorum/status contains entry about another part " << quorum_entry.part_name << ". It means that part was successfully written to " << entry.quorum << " replicas, but then all of them goes offline." << " Or it is a bug."); } } } @@ -2297,7 +2297,7 @@ bool StorageReplicatedMergeTree::createLogEntryToMergeParts( const auto & part = parts[i]; if (part->modification_time + MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER < time(nullptr)) { - LOG_WARNING(log, "Part " << part->name << " (that was selected for merge)" << " with age " << (time(nullptr) - part->modification_time) << " seconds exists locally but not in ZooKeeper." << " Won't do merge with that part and will check it."); + LOG_WARNING(log, "Part " << part->name << " (that was selected for merge)" << " with age " << (time(nullptr) - part->modification_time) << " seconds exists locally but not in ZooKeeper." << " Won't do merge with that part and will check it."); enqueuePartForCheck(part->name); } } @@ -2339,7 +2339,7 @@ bool StorageReplicatedMergeTree::createLogEntryToMutatePart(const IMergeTreeData { if (part.modification_time + MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER < time(nullptr)) { - LOG_WARNING(log, "Part " << part.name << " (that was selected for mutation)" << " with age " << (time(nullptr) - part.modification_time) << " seconds exists locally but not in ZooKeeper." << " Won't mutate that part and will check it."); + LOG_WARNING(log, "Part " << part.name << " (that was selected for mutation)" << " with age " << (time(nullptr) - part.modification_time) << " seconds exists locally but not in ZooKeeper." << " Won't mutate that part and will check it."); enqueuePartForCheck(part.name); } @@ -4775,7 +4775,7 @@ void StorageReplicatedMergeTree::removePartsFromZooKeeper( continue; } else - LOG_WARNING(log, "Cannot remove part " << part_names[i] << " from ZooKeeper: " << zkutil::ZooKeeper::error2string(response.error)); + LOG_WARNING(log, "Cannot remove part " << part_names[i] << " from ZooKeeper: " << zkutil::ZooKeeper::error2string(response.error)); } } @@ -4824,7 +4824,7 @@ void StorageReplicatedMergeTree::clearBlocksInPartition( zookeeper.removeRecursive(path); } else if (rc) - LOG_WARNING(log, "Error while deleting ZooKeeper path `" << path << "`: " + zkutil::ZooKeeper::error2string(rc) << ", ignoring."); + LOG_WARNING(log, "Error while deleting ZooKeeper path `" << path << "`: " + zkutil::ZooKeeper::error2string(rc) << ", ignoring."); } LOG_TRACE_FORMATTED(log, "Deleted {} deduplication block IDs in partition ID {}", to_delete_futures.size(), partition_id);