mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Remove duplicate whitespaces (preparation)
This commit is contained in:
parent
86b4d5a86f
commit
7fed65cbe2
@ -568,7 +568,7 @@ ConfigProcessor::LoadedConfig ConfigProcessor::loadConfigWithZooKeeperIncludes(
|
||||
if (!zk_exception)
|
||||
throw;
|
||||
|
||||
LOG_WARNING( log, "Error while processing from_zk config includes: " + zk_exception->message() + ". Config will be loaded from preprocessed file: " + preprocessed_path);
|
||||
LOG_WARNING(log, "Error while processing from_zk config includes: " + zk_exception->message() + ". Config will be loaded from preprocessed file: " + preprocessed_path);
|
||||
|
||||
config_xml = dom_parser.parse(preprocessed_path);
|
||||
}
|
||||
|
@ -48,11 +48,11 @@ VolumeJBOD::VolumeJBOD(
|
||||
max_data_part_size = static_cast<decltype(max_data_part_size)>(sum_size * ratio / disks.size());
|
||||
for (size_t i = 0; i < disks.size(); ++i)
|
||||
if (sizes[i] < max_data_part_size)
|
||||
LOG_WARNING( logger, "Disk " << backQuote(disks[i]->getName()) << " on volume " << backQuote(config_prefix) << " have not enough space (" << formatReadableSizeWithBinarySuffix(sizes[i]) << ") for containing part the size of max_data_part_size (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << ")");
|
||||
LOG_WARNING(logger, "Disk " << backQuote(disks[i]->getName()) << " on volume " << backQuote(config_prefix) << " have not enough space (" << formatReadableSizeWithBinarySuffix(sizes[i]) << ") for containing part the size of max_data_part_size (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << ")");
|
||||
}
|
||||
static constexpr UInt64 MIN_PART_SIZE = 8u * 1024u * 1024u;
|
||||
if (max_data_part_size != 0 && max_data_part_size < MIN_PART_SIZE)
|
||||
LOG_WARNING( logger, "Volume " << backQuote(name) << " max_data_part_size is too low (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << " < " << formatReadableSizeWithBinarySuffix(MIN_PART_SIZE) << ")");
|
||||
LOG_WARNING(logger, "Volume " << backQuote(name) << " max_data_part_size is too low (" << formatReadableSizeWithBinarySuffix(max_data_part_size) << " < " << formatReadableSizeWithBinarySuffix(MIN_PART_SIZE) << ")");
|
||||
}
|
||||
|
||||
DiskPtr VolumeJBOD::getNextDisk()
|
||||
|
@ -200,7 +200,7 @@ void SelectStreamFactory::createForShard(
|
||||
ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable);
|
||||
if (shard_info.hasRemoteConnections())
|
||||
{
|
||||
LOG_WARNING( &Logger::get("ClusterProxy::SelectStreamFactory"), "There is no table " << main_table.getNameForLogs() << " on local replica of shard " << shard_info.shard_num << ", will try remote replicas.");
|
||||
LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "There is no table " << main_table.getNameForLogs() << " on local replica of shard " << shard_info.shard_num << ", will try remote replicas.");
|
||||
emplace_remote_stream();
|
||||
}
|
||||
else
|
||||
@ -236,7 +236,7 @@ void SelectStreamFactory::createForShard(
|
||||
|
||||
/// If we reached this point, local replica is stale.
|
||||
ProfileEvents::increment(ProfileEvents::DistributedConnectionStaleReplica);
|
||||
LOG_WARNING( &Logger::get("ClusterProxy::SelectStreamFactory"), "Local replica of shard " << shard_info.shard_num << " is stale (delay: " << local_delay << "s.)");
|
||||
LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Local replica of shard " << shard_info.shard_num << " is stale (delay: " << local_delay << "s.)");
|
||||
|
||||
if (!settings.fallback_to_stale_replicas_for_distributed_queries)
|
||||
{
|
||||
@ -284,7 +284,7 @@ void SelectStreamFactory::createForShard(
|
||||
catch (const Exception & ex)
|
||||
{
|
||||
if (ex.code() == ErrorCodes::ALL_CONNECTION_TRIES_FAILED)
|
||||
LOG_WARNING( &Logger::get("ClusterProxy::SelectStreamFactory"), "Connections to remote replicas of local shard " << shard_num << " failed, will use stale local replica");
|
||||
LOG_WARNING(&Logger::get("ClusterProxy::SelectStreamFactory"), "Connections to remote replicas of local shard " << shard_num << " failed, will use stale local replica");
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
@ -350,7 +350,7 @@ private:
|
||||
const auto & already_added = already_added_it->second;
|
||||
if (!already_added->from_temp_repository && !repository->isTemporary())
|
||||
{
|
||||
LOG_WARNING( log, type_name << " '" << object_name << "' is found " << (((path == already_added->path) && (repository->getName() == already_added->repository_name)) ? ("twice in the same file '" + path + "'") : ("both in file '" + already_added->path + "' and '" + path + "'")));
|
||||
LOG_WARNING(log, type_name << " '" << object_name << "' is found " << (((path == already_added->path) && (repository->getName() == already_added->repository_name)) ? ("twice in the same file '" + path + "'") : ("both in file '" + already_added->path + "' and '" + path + "'")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ void StorageDistributedDirectoryMonitor::readHeader(
|
||||
readVarUInt(initiator_revision, header_buf);
|
||||
if (ClickHouseRevision::get() < initiator_revision)
|
||||
{
|
||||
LOG_WARNING( log, "ClickHouse shard version is older than ClickHouse initiator version. " << "It may lack support for new features.");
|
||||
LOG_WARNING(log, "ClickHouse shard version is older than ClickHouse initiator version. " << "It may lack support for new features.");
|
||||
}
|
||||
|
||||
readStringBinary(insert_query, header_buf);
|
||||
|
Loading…
Reference in New Issue
Block a user