mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Remove duplicate whitespaces (preparation)
This commit is contained in:
parent
7fed65cbe2
commit
29762240de
@ -498,7 +498,7 @@ void debugIncreaseOOMScore()
|
||||
}
|
||||
catch (const Poco::Exception & e)
|
||||
{
|
||||
LOG_WARNING(&Logger::root(), "Failed to adjust OOM score: '" + e.displayText() + "'.");
|
||||
LOG_WARNING(&Logger::root(), "Failed to adjust OOM score: '" + e.displayText() + "'.");
|
||||
return;
|
||||
}
|
||||
LOG_INFO_FORMATTED(&Logger::root(), "Set OOM score adjustment to {}", new_score);
|
||||
|
@ -52,7 +52,7 @@ private:
|
||||
}
|
||||
catch (const Poco::Exception & e)
|
||||
{
|
||||
LOG_WARNING(&Poco::Util::Application::instance().logger(), "Fail to write to Graphite " << host << ":" << port << ". e.what() = " << e.what() << ", e.message() = " << e.message());
|
||||
LOG_WARNING(&Poco::Util::Application::instance().logger(), "Fail to write to Graphite " << host << ":" << port << ". e.what() = " << e.what() << ", e.message() = " << e.message());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,7 +164,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts,
|
||||
for (const String & missing_partition : missing_partitions)
|
||||
ss << " " << missing_partition;
|
||||
|
||||
LOG_WARNING(log, "There are no " << missing_partitions.size() << " partitions from enabled_partitions in shard " << task_shard->getDescription() << " :" << ss.str());
|
||||
LOG_WARNING(log, "There are no " << missing_partitions.size() << " partitions from enabled_partitions in shard " << task_shard->getDescription() << " :" << ss.str());
|
||||
}
|
||||
|
||||
LOG_DEBUG_FORMATTED(log, "Will copy {} partitions from shard {}", task_shard->partition_tasks.size(), task_shard->getDescription());
|
||||
@ -1297,7 +1297,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
/// NOTE: partition is still fresh if dirt discovery happens before cleaning
|
||||
if (stat_shards.numChildren == 0)
|
||||
{
|
||||
LOG_WARNING(log, "There are no workers for partition " << task_partition.name << " piece " << toString(current_piece_number) << ", but destination table contains " << count << " rows" << ". Partition will be dropped and refilled.");
|
||||
LOG_WARNING(log, "There are no workers for partition " << task_partition.name << " piece " << toString(current_piece_number) << ", but destination table contains " << count << " rows" << ". Partition will be dropped and refilled.");
|
||||
|
||||
create_is_dirty_node(clean_state_clock);
|
||||
return TaskStatus::Error;
|
||||
|
@ -354,7 +354,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
rlim.rlim_cur = config().getUInt("max_open_files", rlim.rlim_max);
|
||||
int rc = setrlimit(RLIMIT_NOFILE, &rlim);
|
||||
if (rc != 0)
|
||||
LOG_WARNING(log, "Cannot set max number of file descriptors to " << rlim.rlim_cur << ". Try to specify max_open_files according to your system limits. error: " << strerror(errno));
|
||||
LOG_WARNING(log, "Cannot set max number of file descriptors to " << rlim.rlim_cur << ". Try to specify max_open_files according to your system limits. error: " << strerror(errno));
|
||||
else
|
||||
LOG_DEBUG_FORMATTED(log, "Set max number of file descriptors to {} (was {}).", rlim.rlim_cur, old);
|
||||
}
|
||||
|
@ -568,7 +568,7 @@ ConfigProcessor::LoadedConfig ConfigProcessor::loadConfigWithZooKeeperIncludes(
|
||||
if (!zk_exception)
|
||||
throw;
|
||||
|
||||
LOG_WARNING(log, "Error while processing from_zk config includes: " + zk_exception->message() + ". Config will be loaded from preprocessed file: " + preprocessed_path);
|
||||
LOG_WARNING(log, "Error while processing from_zk config includes: " + zk_exception->message() + ". Config will be loaded from preprocessed file: " + preprocessed_path);
|
||||
|
||||
config_xml = dom_parser.parse(preprocessed_path);
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ PoolWithFailoverBase<TNestedPool>::getMany(
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_WARNING(log, "Connection failed at try №" << (shuffled_pool.error_count + 1) << ", reason: " << fail_message);
|
||||
LOG_WARNING(log, "Connection failed at try №" << (shuffled_pool.error_count + 1) << ", reason: " << fail_message);
|
||||
ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry);
|
||||
|
||||
shuffled_pool.error_count = std::min(max_error_cap, shuffled_pool.error_count + 1);
|
||||
|
@ -18,7 +18,7 @@ CollapsingFinalBlockInputStream::~CollapsingFinalBlockInputStream()
|
||||
void CollapsingFinalBlockInputStream::reportBadCounts()
|
||||
{
|
||||
/// With inconsistent data, this is an unavoidable error that can not be easily fixed by admins. Therefore Warning.
|
||||
LOG_WARNING(log, "Incorrect data: number of rows with sign = 1 (" << count_positive << ") differs with number of rows with sign = -1 (" << count_negative << ") by more than one");
|
||||
LOG_WARNING(log, "Incorrect data: number of rows with sign = 1 (" << count_positive << ") differs with number of rows with sign = -1 (" << count_negative << ") by more than one");
|
||||
}
|
||||
|
||||
void CollapsingFinalBlockInputStream::reportBadSign(Int8 sign)
|
||||
|
@ -466,7 +466,7 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(Poco::Logger * loger, const Contex
|
||||
table_name = unescapeForFileName(table_name);
|
||||
|
||||
if (create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER)
|
||||
LOG_WARNING(loger, "File " << metadata_file_path << " contains both UUID and table name. " "Will use name `" << table_name << "` instead of `" << create.table << "`");
|
||||
LOG_WARNING(loger, "File " << metadata_file_path << " contains both UUID and table name. " "Will use name `" << table_name << "` instead of `" << create.table << "`");
|
||||
create.table = table_name;
|
||||
}
|
||||
|
||||
|
@ -48,11 +48,11 @@ VolumeJBOD::VolumeJBOD(
|
||||
max_data_part_size = static_cast<decltype(max_data_part_size)>(sum_size * ratio / disks.size());
|
||||
for (size_t i = 0; i < disks.size(); ++i)
|
||||
if (sizes[i] < max_data_part_size)
|
||||
LOG_WARNING(logger, "Disk " << backQuote(disks[i]->getName()) << " on volume " << backQuote(config_prefix) << " have not enough space (" << formatReadableSizeWithBinarySuffix(sizes[i]) << ") for containing part the size of max_data_part_size (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << ")");
|
||||
LOG_WARNING(logger, "Disk " << backQuote(disks[i]->getName()) << " on volume " << backQuote(config_prefix) << " have not enough space (" << formatReadableSizeWithBinarySuffix(sizes[i]) << ") for containing part the size of max_data_part_size (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << ")");
|
||||
}
|
||||
static constexpr UInt64 MIN_PART_SIZE = 8u * 1024u * 1024u;
|
||||
if (max_data_part_size != 0 && max_data_part_size < MIN_PART_SIZE)
|
||||
LOG_WARNING(logger, "Volume " << backQuote(name) << " max_data_part_size is too low (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << " < " << formatReadableSizeWithBinarySuffix(MIN_PART_SIZE) << ")");
|
||||
LOG_WARNING(logger, "Volume " << backQuote(name) << " max_data_part_size is too low (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << " < " << formatReadableSizeWithBinarySuffix(MIN_PART_SIZE) << ")");
|
||||
}
|
||||
|
||||
DiskPtr VolumeJBOD::getNextDisk()
|
||||
|
@ -200,7 +200,7 @@ void SelectStreamFactory::createForShard(
|
||||
ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable);
|
||||
if (shard_info.hasRemoteConnections())
|
||||
{
|
||||
LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "There is no table " << main_table.getNameForLogs() << " on local replica of shard " << shard_info.shard_num << ", will try remote replicas.");
|
||||
LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "There is no table " << main_table.getNameForLogs() << " on local replica of shard " << shard_info.shard_num << ", will try remote replicas.");
|
||||
emplace_remote_stream();
|
||||
}
|
||||
else
|
||||
@ -236,7 +236,7 @@ void SelectStreamFactory::createForShard(
|
||||
|
||||
/// If we reached this point, local replica is stale.
|
||||
ProfileEvents::increment(ProfileEvents::DistributedConnectionStaleReplica);
|
||||
LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Local replica of shard " << shard_info.shard_num << " is stale (delay: " << local_delay << "s.)");
|
||||
LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Local replica of shard " << shard_info.shard_num << " is stale (delay: " << local_delay << "s.)");
|
||||
|
||||
if (!settings.fallback_to_stale_replicas_for_distributed_queries)
|
||||
{
|
||||
@ -284,7 +284,7 @@ void SelectStreamFactory::createForShard(
|
||||
catch (const Exception & ex)
|
||||
{
|
||||
if (ex.code() == ErrorCodes::ALL_CONNECTION_TRIES_FAILED)
|
||||
LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Connections to remote replicas of local shard " << shard_num << " failed, will use stale local replica");
|
||||
LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Connections to remote replicas of local shard " << shard_num << " failed, will use stale local replica");
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ DDLWorker::DDLWorker(const std::string & zk_root_dir, Context & context_, const
|
||||
|
||||
if (context.getSettingsRef().readonly)
|
||||
{
|
||||
LOG_WARNING(log, "Distributed DDL worker is run with readonly settings, it will not be able to execute DDL queries" << " Set appropriate system_profile or distributed_ddl.profile to fix this.");
|
||||
LOG_WARNING(log, "Distributed DDL worker is run with readonly settings, it will not be able to execute DDL queries" << " Set appropriate system_profile or distributed_ddl.profile to fix this.");
|
||||
}
|
||||
|
||||
host_fqdn = getFQDNOrHostName();
|
||||
@ -334,7 +334,7 @@ bool DDLWorker::initAndCheckTask(const String & entry_name, String & out_reason,
|
||||
if (host_in_hostlist)
|
||||
{
|
||||
/// This check could be slow a little bit
|
||||
LOG_WARNING(log, "There are two the same ClickHouse instances in task " << entry_name << ": " << task->host_id.readableString() << " and " << host.readableString() << ". Will use the first one only.");
|
||||
LOG_WARNING(log, "There are two the same ClickHouse instances in task " << entry_name << ": " << task->host_id.readableString() << " and " << host.readableString() << ". Will use the first one only.");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -424,7 +424,7 @@ void DDLWorker::processTasks()
|
||||
{
|
||||
if (server_startup && e.code == Coordination::ZNONODE)
|
||||
{
|
||||
LOG_WARNING(log, "ZooKeeper NONODE error during startup. Ignoring entry " << task.entry_name << " (" << task.entry.query << ") : " << getCurrentExceptionMessage(true));
|
||||
LOG_WARNING(log, "ZooKeeper NONODE error during startup. Ignoring entry " << task.entry_name << " (" << task.entry.query << ") : " << getCurrentExceptionMessage(true));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -433,7 +433,7 @@ void DDLWorker::processTasks()
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_WARNING(log, "An error occurred while processing task " << task.entry_name << " (" << task.entry.query << ") : " << getCurrentExceptionMessage(true));
|
||||
LOG_WARNING(log, "An error occurred while processing task " << task.entry_name << " (" << task.entry.query << ") : " << getCurrentExceptionMessage(true));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
@ -506,7 +506,7 @@ void DDLWorker::parseQueryAndResolveHost(DDLTask & task)
|
||||
if (found_exact_match)
|
||||
return;
|
||||
|
||||
LOG_WARNING(log, "Not found the exact match of host " << task.host_id.readableString() << " from task " << task.entry_name << " in cluster " << task.cluster_name << " definition. Will try to find it using host name resolving.");
|
||||
LOG_WARNING(log, "Not found the exact match of host " << task.host_id.readableString() << " from task " << task.entry_name << " in cluster " << task.cluster_name << " definition. Will try to find it using host name resolving.");
|
||||
|
||||
bool found_via_resolving = false;
|
||||
for (size_t shard_num = 0; shard_num < shards.size(); ++shard_num)
|
||||
|
@ -597,7 +597,7 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_WARNING(log, "Cannot parse metadata of partially dropped table " << table_id.getNameForLogs() << " from " << dropped_metadata_path << ". Will remove metadata file and data directory. Garbage may be left in /store directory and ZooKeeper.");
|
||||
LOG_WARNING(log, "Cannot parse metadata of partially dropped table " << table_id.getNameForLogs() << " from " << dropped_metadata_path << ". Will remove metadata file and data directory. Garbage may be left in /store directory and ZooKeeper.");
|
||||
}
|
||||
|
||||
drop_time = Poco::File(dropped_metadata_path).getLastModified().epochTime();
|
||||
|
@ -350,7 +350,7 @@ private:
|
||||
const auto & already_added = already_added_it->second;
|
||||
if (!already_added->from_temp_repository && !repository->isTemporary())
|
||||
{
|
||||
LOG_WARNING(log, type_name << " '" << object_name << "' is found " << (((path == already_added->path) && (repository->getName() == already_added->repository_name)) ? ("twice in the same file '" + path + "'") : ("both in file '" + already_added->path + "' and '" + path + "'")));
|
||||
LOG_WARNING(log, type_name << " '" << object_name << "' is found " << (((path == already_added->path) && (repository->getName() == already_added->repository_name)) ? ("twice in the same file '" + path + "'") : ("both in file '" + already_added->path + "' and '" + path + "'")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ void StorageDistributedDirectoryMonitor::readHeader(
|
||||
readVarUInt(initiator_revision, header_buf);
|
||||
if (ClickHouseRevision::get() < initiator_revision)
|
||||
{
|
||||
LOG_WARNING(log, "ClickHouse shard version is older than ClickHouse initiator version. " << "It may lack support for new features.");
|
||||
LOG_WARNING(log, "ClickHouse shard version is older than ClickHouse initiator version. " << "It may lack support for new features.");
|
||||
}
|
||||
|
||||
readStringBinary(insert_query, header_buf);
|
||||
|
@ -668,7 +668,7 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_
|
||||
Names files;
|
||||
volume->getDisk()->listFiles(to, files);
|
||||
|
||||
LOG_WARNING(storage.log, "Part directory " << fullPath(volume->getDisk(), to) << " already exists" << " and contains " << files.size() << " files. Removing it.");
|
||||
LOG_WARNING(storage.log, "Part directory " << fullPath(volume->getDisk(), to) << " already exists" << " and contains " << files.size() << " files. Removing it.");
|
||||
|
||||
volume->getDisk()->removeRecursive(to);
|
||||
}
|
||||
@ -710,7 +710,7 @@ void IMergeTreeDataPart::remove() const
|
||||
|
||||
if (volume->getDisk()->exists(to))
|
||||
{
|
||||
LOG_WARNING(storage.log, "Directory " << fullPath(volume->getDisk(), to) << " (to which part must be renamed before removing) already exists." " Most likely this is due to unclean restart. Removing it.");
|
||||
LOG_WARNING(storage.log, "Directory " << fullPath(volume->getDisk(), to) << " (to which part must be renamed before removing) already exists." " Most likely this is due to unclean restart. Removing it.");
|
||||
|
||||
try
|
||||
{
|
||||
@ -791,7 +791,7 @@ String IMergeTreeDataPart::getRelativePathForDetachedPart(const String & prefix)
|
||||
if (!volume->getDisk()->exists(getFullRelativePath() + res))
|
||||
return res;
|
||||
|
||||
LOG_WARNING(storage.log, "Directory " << res << " (to detach to) already exists." " Will detach to directory with '_tryN' suffix.");
|
||||
LOG_WARNING(storage.log, "Directory " << res << " (to detach to) already exists." " Will detach to directory with '_tryN' suffix.");
|
||||
}
|
||||
|
||||
return res;
|
||||
|
@ -2914,9 +2914,9 @@ ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules(UInt64 expected_
|
||||
if (!destination_ptr)
|
||||
{
|
||||
if (ttl_entry->destination_type == PartDestinationType::VOLUME)
|
||||
LOG_WARNING(log, "Would like to reserve space on volume '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but volume was not found");
|
||||
LOG_WARNING(log, "Would like to reserve space on volume '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but volume was not found");
|
||||
else if (ttl_entry->destination_type == PartDestinationType::DISK)
|
||||
LOG_WARNING(log, "Would like to reserve space on disk '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but disk was not found");
|
||||
LOG_WARNING(log, "Would like to reserve space on disk '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but disk was not found");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -2925,9 +2925,9 @@ ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules(UInt64 expected_
|
||||
return reservation;
|
||||
else
|
||||
if (ttl_entry->destination_type == PartDestinationType::VOLUME)
|
||||
LOG_WARNING(log, "Would like to reserve space on volume '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but there is not enough space");
|
||||
LOG_WARNING(log, "Would like to reserve space on volume '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but there is not enough space");
|
||||
else if (ttl_entry->destination_type == PartDestinationType::DISK)
|
||||
LOG_WARNING(log, "Would like to reserve space on disk '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but there is not enough space");
|
||||
LOG_WARNING(log, "Would like to reserve space on disk '" << ttl_entry->destination_name << "' by TTL rule of table '" << log_name << "' but there is not enough space");
|
||||
}
|
||||
}
|
||||
|
||||
@ -3053,7 +3053,7 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData:
|
||||
DataPartsVector covered_parts = data.getActivePartsToReplace(part->info, part->name, covering_part, *owing_parts_lock);
|
||||
if (covering_part)
|
||||
{
|
||||
LOG_WARNING(data.log, "Tried to commit obsolete part " << part->name << " covered by " << covering_part->getNameWithState());
|
||||
LOG_WARNING(data.log, "Tried to commit obsolete part " << part->name << " covered by " << covering_part->getNameWithState());
|
||||
|
||||
part->remove_time.store(0, std::memory_order_relaxed); /// The part will be removed without waiting for old_parts_lifetime seconds.
|
||||
data.modifyPartState(part, DataPartState::Outdated);
|
||||
|
@ -366,7 +366,7 @@ bool MergeTreeDataMergerMutator::selectAllPartsToMergeWithinPartition(
|
||||
if (now - disk_space_warning_time > 3600)
|
||||
{
|
||||
disk_space_warning_time = now;
|
||||
LOG_WARNING(log, "Won't merge parts from " << parts.front()->name << " to " << (*prev_it)->name << " because not enough free space: " << formatReadableSizeWithBinarySuffix(available_disk_space) << " free and unreserved, " << formatReadableSizeWithBinarySuffix(sum_bytes) << " required now (+" << static_cast<int>((DISK_USAGE_COEFFICIENT_TO_SELECT - 1.0) * 100) << "% on overhead); suppressing similar warnings for the next hour");
|
||||
LOG_WARNING(log, "Won't merge parts from " << parts.front()->name << " to " << (*prev_it)->name << " because not enough free space: " << formatReadableSizeWithBinarySuffix(available_disk_space) << " free and unreserved, " << formatReadableSizeWithBinarySuffix(sum_bytes) << " required now (+" << static_cast<int>((DISK_USAGE_COEFFICIENT_TO_SELECT - 1.0) * 100) << "% on overhead); suppressing similar warnings for the next hour");
|
||||
}
|
||||
|
||||
if (out_disable_reason)
|
||||
@ -1214,7 +1214,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart
|
||||
* (NOTE: Merging with part that is not in ZK is not possible, see checks in 'createLogEntryToMergeParts'.)
|
||||
* - and after merge, this part will be removed in addition to parts that was merged.
|
||||
*/
|
||||
LOG_WARNING(log, "Unexpected number of parts removed when adding " << new_data_part->name << ": " << replaced_parts.size() << " instead of " << parts.size());
|
||||
LOG_WARNING(log, "Unexpected number of parts removed when adding " << new_data_part->name << ": " << replaced_parts.size() << " instead of " << parts.size());
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -327,7 +327,7 @@ void ReplicatedMergeTreeCleanupThread::clearOldBlocks()
|
||||
cached_block_stats.erase(first_outdated_block->node);
|
||||
}
|
||||
else if (rc)
|
||||
LOG_WARNING(log, "Error while deleting ZooKeeper path `" << path << "`: " + zkutil::ZooKeeper::error2string(rc) << ", ignoring.");
|
||||
LOG_WARNING(log, "Error while deleting ZooKeeper path `" << path << "`: " + zkutil::ZooKeeper::error2string(rc) << ", ignoring.");
|
||||
else
|
||||
{
|
||||
/// Successfully removed blocks have to be removed from cache
|
||||
|
@ -82,7 +82,7 @@ void ReplicatedMergeTreePartCheckThread::searchForMissingPart(const String & par
|
||||
/// If the part is in ZooKeeper, remove it from there and add the task to download it to the queue.
|
||||
if (zookeeper->exists(part_path))
|
||||
{
|
||||
LOG_WARNING(log, "Part " << part_name << " exists in ZooKeeper but not locally. " "Removing from ZooKeeper and queueing a fetch.");
|
||||
LOG_WARNING(log, "Part " << part_name << " exists in ZooKeeper but not locally. " "Removing from ZooKeeper and queueing a fetch.");
|
||||
ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed);
|
||||
|
||||
storage.removePartAndEnqueueFetch(part_name);
|
||||
@ -143,7 +143,7 @@ void ReplicatedMergeTreePartCheckThread::searchForMissingPart(const String & par
|
||||
|
||||
if (found_part_with_the_same_min_block && found_part_with_the_same_max_block)
|
||||
{
|
||||
LOG_WARNING(log, "Found parts with the same min block and with the same max block as the missing part " << part_name << ". Hoping that it will eventually appear as a result of a merge.");
|
||||
LOG_WARNING(log, "Found parts with the same min block and with the same max block as the missing part " << part_name << ". Hoping that it will eventually appear as a result of a merge.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1328,7 +1328,7 @@ MutationCommands ReplicatedMergeTreeQueue::getMutationCommands(
|
||||
|
||||
if (part->info.getDataVersion() > desired_mutation_version)
|
||||
{
|
||||
LOG_WARNING(log, "Data version of part " << part->name << " is already greater than " "desired mutation version " << desired_mutation_version);
|
||||
LOG_WARNING(log, "Data version of part " << part->name << " is already greater than " "desired mutation version " << desired_mutation_version);
|
||||
return MutationCommands{};
|
||||
}
|
||||
|
||||
@ -1337,7 +1337,7 @@ MutationCommands ReplicatedMergeTreeQueue::getMutationCommands(
|
||||
auto in_partition = mutations_by_partition.find(part->info.partition_id);
|
||||
if (in_partition == mutations_by_partition.end())
|
||||
{
|
||||
LOG_WARNING(log, "There are no mutations for partition ID " << part->info.partition_id << " (trying to mutate part " << part->name << " to " << toString(desired_mutation_version) << ")");
|
||||
LOG_WARNING(log, "There are no mutations for partition ID " << part->info.partition_id << " (trying to mutate part " << part->name << " to " << toString(desired_mutation_version) << ")");
|
||||
return MutationCommands{};
|
||||
}
|
||||
|
||||
@ -1345,7 +1345,7 @@ MutationCommands ReplicatedMergeTreeQueue::getMutationCommands(
|
||||
|
||||
auto end = in_partition->second.lower_bound(desired_mutation_version);
|
||||
if (end == in_partition->second.end() || end->first != desired_mutation_version)
|
||||
LOG_WARNING(log, "Mutation with version " << desired_mutation_version << " not found in partition ID " << part->info.partition_id << " (trying to mutate part " << part->name + ")");
|
||||
LOG_WARNING(log, "Mutation with version " << desired_mutation_version << " not found in partition ID " << part->info.partition_id << " (trying to mutate part " << part->name + ")");
|
||||
else
|
||||
++end;
|
||||
|
||||
|
@ -268,7 +268,7 @@ void ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart()
|
||||
if (!quorum_entry.replicas.count(storage.replica_name)
|
||||
&& zookeeper->exists(storage.replica_path + "/parts/" + quorum_entry.part_name))
|
||||
{
|
||||
LOG_WARNING(log, "We have part " << quorum_entry.part_name << " but we is not in quorum. Updating quorum. This shouldn't happen often.");
|
||||
LOG_WARNING(log, "We have part " << quorum_entry.part_name << " but we is not in quorum. Updating quorum. This shouldn't happen often.");
|
||||
storage.updateQuorum(quorum_entry.part_name);
|
||||
}
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ Pipes StorageBuffer::read(
|
||||
{
|
||||
if (!dest_columns.hasPhysical(column_name))
|
||||
{
|
||||
LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " doesn't have column " << backQuoteIfNeed(column_name) << ". The default values are used.");
|
||||
LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " doesn't have column " << backQuoteIfNeed(column_name) << ". The default values are used.");
|
||||
boost::range::remove_erase(columns_intersection, column_name);
|
||||
continue;
|
||||
}
|
||||
@ -197,14 +197,14 @@ Pipes StorageBuffer::read(
|
||||
const auto & col = our_columns.getPhysical(column_name);
|
||||
if (!dst_col.type->equals(*col.type))
|
||||
{
|
||||
LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " has different type of column " << backQuoteIfNeed(column_name) << " (" << dst_col.type->getName() << " != " << col.type->getName() << "). Data from destination table are converted.");
|
||||
LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " has different type of column " << backQuoteIfNeed(column_name) << " (" << dst_col.type->getName() << " != " << col.type->getName() << "). Data from destination table are converted.");
|
||||
header_after_adding_defaults.getByName(column_name) = ColumnWithTypeAndName(dst_col.type, column_name);
|
||||
}
|
||||
}
|
||||
|
||||
if (columns_intersection.empty())
|
||||
{
|
||||
LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " has no common columns with block in buffer. Block of data is skipped.");
|
||||
LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " has no common columns with block in buffer. Block of data is skipped.");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -447,7 +447,7 @@ void StorageBuffer::startup()
|
||||
{
|
||||
if (global_context.getSettingsRef().readonly)
|
||||
{
|
||||
LOG_WARNING(log, "Storage " << getName() << " is run with readonly settings, it will not be able to insert data." << " Set appropriate system_profile to fix this.");
|
||||
LOG_WARNING(log, "Storage " << getName() << " is run with readonly settings, it will not be able to insert data." << " Set appropriate system_profile to fix this.");
|
||||
}
|
||||
|
||||
|
||||
@ -653,7 +653,7 @@ void StorageBuffer::writeBlockToDestination(const Block & block, StoragePtr tabl
|
||||
auto column = block.getByName(dst_col.name);
|
||||
if (!column.type->equals(*dst_col.type))
|
||||
{
|
||||
LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " have different type of column " << backQuoteIfNeed(column.name) << " (" << dst_col.type->getName() << " != " << column.type->getName() << "). Block of data is converted.");
|
||||
LOG_WARNING(log, "Destination table " << destination_id.getNameForLogs() << " have different type of column " << backQuoteIfNeed(column.name) << " (" << dst_col.type->getName() << " != " << column.type->getName() << "). Block of data is converted.");
|
||||
column.column = castColumn(column, dst_col.type);
|
||||
column.type = dst_col.type;
|
||||
}
|
||||
@ -669,7 +669,7 @@ void StorageBuffer::writeBlockToDestination(const Block & block, StoragePtr tabl
|
||||
}
|
||||
|
||||
if (block_to_write.columns() != block.columns())
|
||||
LOG_WARNING(log, "Not all columns from block in buffer exist in destination table " << destination_id.getNameForLogs() << ". Some columns are discarded.");
|
||||
LOG_WARNING(log, "Not all columns from block in buffer exist in destination table " << destination_id.getNameForLogs() << ". Some columns are discarded.");
|
||||
|
||||
auto list_of_columns = std::make_shared<ASTExpressionList>();
|
||||
insert->columns = list_of_columns;
|
||||
|
@ -214,7 +214,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
|
||||
skip_sanity_checks = true;
|
||||
current_zookeeper->remove(replica_path + "/flags/force_restore_data");
|
||||
|
||||
LOG_WARNING(log, "Skipping the limits on severity of changes to data parts and columns (flag " << replica_path << "/flags/force_restore_data).");
|
||||
LOG_WARNING(log, "Skipping the limits on severity of changes to data parts and columns (flag " << replica_path << "/flags/force_restore_data).");
|
||||
}
|
||||
else if (has_force_restore_data_flag)
|
||||
{
|
||||
@ -342,7 +342,7 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas(
|
||||
/// Replica could be inactive.
|
||||
if (!zookeeper->exists(zookeeper_path + "/replicas/" + replica + "/is_active"))
|
||||
{
|
||||
LOG_WARNING(log, "Replica " << replica << " is not active during mutation. " "Mutation will be done asynchronously when replica becomes active.");
|
||||
LOG_WARNING(log, "Replica " << replica << " is not active during mutation. " "Mutation will be done asynchronously when replica becomes active.");
|
||||
|
||||
inactive_replicas.emplace(replica);
|
||||
break;
|
||||
@ -830,7 +830,7 @@ void StorageReplicatedMergeTree::checkPartChecksumsAndAddCommitOps(const zkutil:
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_WARNING(log, "checkPartAndAddToZooKeeper: node " << replica_path + "/parts/" + part_name << " already exists." << " Will not commit any nodes.");
|
||||
LOG_WARNING(log, "checkPartAndAddToZooKeeper: node " << replica_path + "/parts/" + part_name << " already exists." << " Will not commit any nodes.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1003,7 +1003,7 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry)
|
||||
}
|
||||
if (part->name != name)
|
||||
{
|
||||
LOG_WARNING(log, "Part " << name << " is covered by " << part->name << " but should be merged into " << entry.new_part_name << ". This shouldn't happen often.");
|
||||
LOG_WARNING(log, "Part " << name << " is covered by " << part->name << " but should be merged into " << entry.new_part_name << ". This shouldn't happen often.");
|
||||
have_all_parts = false;
|
||||
break;
|
||||
}
|
||||
@ -1370,7 +1370,7 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry)
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_WARNING(log, "No active replica has part " << entry.new_part_name << ", but that part needs quorum and /quorum/status contains entry about another part " << quorum_entry.part_name << ". It means that part was successfully written to " << entry.quorum << " replicas, but then all of them goes offline." << " Or it is a bug.");
|
||||
LOG_WARNING(log, "No active replica has part " << entry.new_part_name << ", but that part needs quorum and /quorum/status contains entry about another part " << quorum_entry.part_name << ". It means that part was successfully written to " << entry.quorum << " replicas, but then all of them goes offline." << " Or it is a bug.");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2297,7 +2297,7 @@ bool StorageReplicatedMergeTree::createLogEntryToMergeParts(
|
||||
const auto & part = parts[i];
|
||||
if (part->modification_time + MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER < time(nullptr))
|
||||
{
|
||||
LOG_WARNING(log, "Part " << part->name << " (that was selected for merge)" << " with age " << (time(nullptr) - part->modification_time) << " seconds exists locally but not in ZooKeeper." << " Won't do merge with that part and will check it.");
|
||||
LOG_WARNING(log, "Part " << part->name << " (that was selected for merge)" << " with age " << (time(nullptr) - part->modification_time) << " seconds exists locally but not in ZooKeeper." << " Won't do merge with that part and will check it.");
|
||||
enqueuePartForCheck(part->name);
|
||||
}
|
||||
}
|
||||
@ -2339,7 +2339,7 @@ bool StorageReplicatedMergeTree::createLogEntryToMutatePart(const IMergeTreeData
|
||||
{
|
||||
if (part.modification_time + MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER < time(nullptr))
|
||||
{
|
||||
LOG_WARNING(log, "Part " << part.name << " (that was selected for mutation)" << " with age " << (time(nullptr) - part.modification_time) << " seconds exists locally but not in ZooKeeper." << " Won't mutate that part and will check it.");
|
||||
LOG_WARNING(log, "Part " << part.name << " (that was selected for mutation)" << " with age " << (time(nullptr) - part.modification_time) << " seconds exists locally but not in ZooKeeper." << " Won't mutate that part and will check it.");
|
||||
enqueuePartForCheck(part.name);
|
||||
}
|
||||
|
||||
@ -4775,7 +4775,7 @@ void StorageReplicatedMergeTree::removePartsFromZooKeeper(
|
||||
continue;
|
||||
}
|
||||
else
|
||||
LOG_WARNING(log, "Cannot remove part " << part_names[i] << " from ZooKeeper: " << zkutil::ZooKeeper::error2string(response.error));
|
||||
LOG_WARNING(log, "Cannot remove part " << part_names[i] << " from ZooKeeper: " << zkutil::ZooKeeper::error2string(response.error));
|
||||
}
|
||||
}
|
||||
|
||||
@ -4824,7 +4824,7 @@ void StorageReplicatedMergeTree::clearBlocksInPartition(
|
||||
zookeeper.removeRecursive(path);
|
||||
}
|
||||
else if (rc)
|
||||
LOG_WARNING(log, "Error while deleting ZooKeeper path `" << path << "`: " + zkutil::ZooKeeper::error2string(rc) << ", ignoring.");
|
||||
LOG_WARNING(log, "Error while deleting ZooKeeper path `" << path << "`: " + zkutil::ZooKeeper::error2string(rc) << ", ignoring.");
|
||||
}
|
||||
|
||||
LOG_TRACE_FORMATTED(log, "Deleted {} deduplication block IDs in partition ID {}", to_delete_futures.size(), partition_id);
|
||||
|
Loading…
Reference in New Issue
Block a user