Activate clang-tidy warning "readability-container-contains"

This check suggests replacing <Container>.count() by
<Container>.contains() which is more speaking and in case of
multimaps/multisets also faster.
This commit is contained in:
Robert Schulze 2022-04-18 12:18:43 +02:00
parent f5e270b2f8
commit 118e94523c
No known key found for this signature in database
GPG Key ID: 15C08E29800B665D
130 changed files with 382 additions and 381 deletions

View File

@ -37,6 +37,7 @@ Checks: '-*,
readability-avoid-const-params-in-decls,
readability-const-return-type,
readability-container-contains,
readability-container-size-empty,
readability-convert-member-functions-to-static,
readability-delete-null-pointer,

View File

@ -145,7 +145,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts,
for (const String & partition_name : existing_partitions_names)
{
if (!task_table.enabled_partitions_set.count(partition_name))
if (!task_table.enabled_partitions_set.contains(partition_name))
{
LOG_INFO(log, "Partition {} will not be processed, since it is not in enabled_partitions of {}", partition_name, task_table.table_id);
}
@ -933,7 +933,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
/// Process each partition that is present in cluster
for (const String & partition_name : task_table.ordered_partition_names)
{
if (!task_table.cluster_partitions.count(partition_name))
if (!task_table.cluster_partitions.contains(partition_name))
throw Exception("There are no expected partition " + partition_name + ". It is a bug", ErrorCodes::LOGICAL_ERROR);
ClusterPartition & cluster_partition = task_table.cluster_partitions[partition_name];
@ -953,10 +953,10 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
for (const TaskShardPtr & shard : task_table.all_shards)
{
/// Does shard have a node with current partition?
if (shard->partition_tasks.count(partition_name) == 0)
if (!shard->partition_tasks.contains(partition_name))
{
/// If not, did we check existence of that partition previously?
if (shard->checked_partitions.count(partition_name) == 0)
if (!shard->checked_partitions.contains(partition_name))
{
auto check_shard_has_partition = [&] () { return checkShardHasPartition(timeouts, *shard, partition_name); };
bool has_partition = retry(check_shard_has_partition);

View File

@ -145,7 +145,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|| TableFunctionFactory::instance().isTableFunctionName(what)
|| FormatFactory::instance().isOutputFormat(what)
|| FormatFactory::instance().isInputFormat(what)
|| additional_names.count(what);
|| additional_names.contains(what);
};
WriteBufferFromFileDescriptor out(STDOUT_FILENO);

View File

@ -715,7 +715,7 @@ void updateSnapshot(Snapshot & snapshot, const Commit & commit, CommitDiff & fil
uint32_t this_line_in_prev_commit = line_change.hunk_start_line_number_old
+ (line_change.line_number_new - line_change.hunk_start_line_number_new);
if (deleted_lines.count(this_line_in_prev_commit))
if (deleted_lines.contains(this_line_in_prev_commit))
{
const auto & prev_commit = deleted_lines[this_line_in_prev_commit];
if (prev_commit.time <= commit.time)
@ -1150,7 +1150,7 @@ void processLog(const Options & options)
readString(hash, in);
assertChar('\n', in);
if (!options.skip_commits.count(hash))
if (!options.skip_commits.contains(hash))
hashes.emplace_back(std::move(hash));
}

View File

@ -24,7 +24,7 @@ void SharedLibraryHandlerFactory::create(
const std::vector<std::string> & attributes_names)
{
std::lock_guard lock(mutex);
if (!library_handlers.count(dictionary_id))
if (!library_handlers.contains(dictionary_id))
library_handlers.emplace(std::make_pair(dictionary_id, std::make_shared<SharedLibraryHandler>(library_path, library_settings, sample_block, attributes_names)));
else
LOG_WARNING(&Poco::Logger::get("SharedLibraryHandlerFactory"), "Library handler with dictionary id {} already exists", dictionary_id);

View File

@ -178,7 +178,7 @@ namespace
}
else
{
if (nodes.count(keyword))
if (nodes.contains(keyword))
throw Exception(keyword + " declared twice", ErrorCodes::LOGICAL_ERROR);
node = std::make_unique<Node>(keyword, node_type);
nodes[node->keyword] = node.get();
@ -204,7 +204,7 @@ namespace
{
auto parent_node = std::make_unique<Node>(parent_keyword);
it_parent = nodes.emplace(parent_node->keyword, parent_node.get()).first;
assert(!owned_nodes.count(parent_node->keyword));
assert(!owned_nodes.contains(parent_node->keyword));
std::string_view parent_keyword_as_string_view = parent_node->keyword;
owned_nodes[parent_keyword_as_string_view] = std::move(parent_node);
}
@ -224,9 +224,9 @@ namespace
# undef MAKE_ACCESS_FLAGS_NODE
if (!owned_nodes.count("NONE"))
if (!owned_nodes.contains("NONE"))
throw Exception("'NONE' not declared", ErrorCodes::LOGICAL_ERROR);
if (!owned_nodes.count("ALL"))
if (!owned_nodes.contains("ALL"))
throw Exception("'ALL' not declared", ErrorCodes::LOGICAL_ERROR);
all_node = std::move(owned_nodes["ALL"]);

View File

@ -430,7 +430,7 @@ std::vector<UUID> DiskAccessStorage::findAllImpl(AccessEntityType type) const
bool DiskAccessStorage::exists(const UUID & id) const
{
std::lock_guard lock{mutex};
return entries_by_id.count(id);
return entries_by_id.contains(id);
}
@ -611,7 +611,7 @@ bool DiskAccessStorage::updateNoLock(const UUID & id, const UpdateFunc & update_
bool name_changed = (new_name != old_name);
if (name_changed)
{
if (entries_by_name.count(new_name))
if (entries_by_name.contains(new_name))
throwNameCollisionCannotRename(type, old_name, new_name);
scheduleWriteLists(type);
}

View File

@ -279,7 +279,7 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur
if (bracket_pos != std::string::npos)
ldap_server_name.resize(bracket_pos);
if (ldap_client_params_blueprint.count(ldap_server_name) > 0)
if (ldap_client_params_blueprint.contains(ldap_server_name))
throw Exception("Multiple LDAP servers with the same name are not allowed", ErrorCodes::BAD_ARGUMENTS);
LDAPClient::Params ldap_client_params_tmp;

View File

@ -140,7 +140,7 @@ void LDAPAccessStorage::applyRoleChangeNoLock(bool grant, const UUID & role_id,
std::vector<UUID> user_ids;
// Build a list of ids of the relevant users.
if (common_role_names.count(role_name))
if (common_role_names.contains(role_name))
{
user_ids = memory_storage.findAll<User>();
}
@ -254,7 +254,7 @@ void LDAPAccessStorage::assignRolesNoLock(User & user, const LDAPClient::SearchR
// Cleanup users_per_roles and granted_role_* mappings.
for (const auto & old_role_name : old_role_names)
{
if (local_role_names.count(old_role_name))
if (local_role_names.contains(old_role_name))
continue;
const auto rit = users_per_roles.find(old_role_name);
@ -269,7 +269,7 @@ void LDAPAccessStorage::assignRolesNoLock(User & user, const LDAPClient::SearchR
users_per_roles.erase(rit);
if (common_role_names.count(old_role_name))
if (common_role_names.contains(old_role_name))
continue;
const auto iit = granted_role_ids.find(old_role_name);

View File

@ -41,7 +41,7 @@ std::vector<UUID> MemoryAccessStorage::findAllImpl(AccessEntityType type) const
bool MemoryAccessStorage::exists(const UUID & id) const
{
std::lock_guard lock{mutex};
return entries_by_id.count(id);
return entries_by_id.contains(id);
}

View File

@ -398,7 +398,7 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock
for (const auto & storage : *nested_storages)
{
if (!subscriptions.count(storage))
if (!subscriptions.contains(storage))
added_subscriptions[static_cast<size_t>(type)].push_back({storage, nullptr});
}
}
@ -436,7 +436,7 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock
auto & subscriptions = subscriptions_to_nested_storages[static_cast<size_t>(type)];
for (auto & [storage, subscription] : added_subscriptions[static_cast<size_t>(type)])
{
if (!subscriptions.count(storage) && (boost::range::find(*nested_storages, storage) != nested_storages->end())
if (!subscriptions.contains(storage) && (boost::range::find(*nested_storages, storage) != nested_storages->end())
&& !handlers_by_type[static_cast<size_t>(type)].empty())
{
subscriptions.emplace(std::move(storage), std::move(subscription));

View File

@ -575,7 +575,7 @@ std::vector<UUID> ReplicatedAccessStorage::findAllImpl(AccessEntityType type) co
bool ReplicatedAccessStorage::exists(const UUID & id) const
{
std::lock_guard lock{mutex};
return entries_by_id.count(id);
return entries_by_id.contains(id);
}

View File

@ -266,11 +266,11 @@ std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetPrope
bool AggregateFunctionFactory::isAggregateFunctionName(const String & name) const
{
if (aggregate_functions.count(name) || isAlias(name))
if (aggregate_functions.contains(name) || isAlias(name))
return true;
String name_lowercase = Poco::toLower(name);
if (case_insensitive_aggregate_functions.count(name_lowercase) || isAlias(name_lowercase))
if (case_insensitive_aggregate_functions.contains(name_lowercase) || isAlias(name_lowercase))
return true;
if (AggregateFunctionCombinatorPtr combinator = AggregateFunctionCombinatorFactory::instance().tryFindSuffix(name))

View File

@ -300,7 +300,7 @@ Strings BackupImpl::listFiles(const String & prefix, const String & terminator)
bool BackupImpl::fileExists(const String & file_name) const
{
std::lock_guard lock{mutex};
return file_infos.count(file_name) != 0;
return file_infos.contains(file_name);
}
size_t BackupImpl::getFileSize(const String & file_name) const

View File

@ -86,7 +86,7 @@ AvailableCollationLocales::LocalesVector AvailableCollationLocales::getAvailable
bool AvailableCollationLocales::isCollationSupported(const std::string & locale_name) const
{
/// We support locale names in any case, so we have to convert all to lower case
return locales_map.count(Poco::toLower(locale_name));
return locales_map.contains(Poco::toLower(locale_name));
}
Collator::Collator(const std::string & locale_)

View File

@ -29,7 +29,7 @@ TEST(ColumnUnique, InsertRange)
String str = toString(i % mod_to);
column_string->insertData(str.data(), str.size());
if (ref_map.count(str) == 0)
if (!ref_map.contains(str))
ref_map[str] = ref_map.size();
indexes[i]= ref_map[str];
@ -68,7 +68,7 @@ TEST(ColumnUnique, InsertRangeWithOverflow)
String str = toString(i % mod_to);
column_string->insertData(str.data(), str.size());
if (ref_map.count(str) == 0)
if (!ref_map.contains(str))
ref_map[str] = ref_map.size();
indexes[i]= ref_map[str];

View File

@ -77,7 +77,7 @@ bool isSameConfiguration(const Poco::Util::AbstractConfiguration & left, const S
if ((left_subkeys.size() != right_subkeys.size()) || (left_subkeys.size() != subkeys.size()))
return false;
for (const auto & right_subkey : right_subkeys)
if (!left_subkeys.count(right_subkey))
if (!left_subkeys.contains(right_subkey))
return false;
}

View File

@ -179,7 +179,7 @@ std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Contai
CreateResponse response;
Undo undo;
if (container.count(path))
if (container.contains(path))
{
response.error = Error::ZNODEEXISTS;
}

View File

@ -71,7 +71,7 @@ std::string toString(OpNum op_num)
OpNum getOpNum(int32_t raw_op_num)
{
if (!VALID_OPERATIONS.count(raw_op_num))
if (!VALID_OPERATIONS.contains(raw_op_num))
throw Exception("Operation " + std::to_string(raw_op_num) + " is unknown", Error::ZUNIMPLEMENTED);
return static_cast<OpNum>(raw_op_num);
}

View File

@ -45,7 +45,7 @@ uint64_t ACLMap::convertACLs(const Coordination::ACLs & acls)
if (acls.empty())
return 0;
if (acl_to_num.count(acls))
if (acl_to_num.contains(acls))
return acl_to_num[acls];
/// Start from one
@ -62,7 +62,7 @@ Coordination::ACLs ACLMap::convertNumber(uint64_t acls_id) const
if (acls_id == 0)
return Coordination::ACLs{};
if (!num_to_acl.count(acls_id))
if (!num_to_acl.contains(acls_id))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown ACL id {}. It's a bug", acls_id);
return num_to_acl.at(acls_id);
@ -82,7 +82,7 @@ void ACLMap::addUsage(uint64_t acl_id)
void ACLMap::removeUsage(uint64_t acl_id)
{
if (usage_counter.count(acl_id) == 0)
if (!usage_counter.contains(acl_id))
return;
usage_counter[acl_id]--;

View File

@ -201,7 +201,7 @@ void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKe
const Coordination::ZooKeeperSessionIDResponse & session_id_resp = dynamic_cast<const Coordination::ZooKeeperSessionIDResponse &>(*response);
/// Nobody waits for this session id
if (session_id_resp.server_id != server->getServerID() || !new_session_id_response_callback.count(session_id_resp.internal_id))
if (session_id_resp.server_id != server->getServerID() || !new_session_id_response_callback.contains(session_id_resp.internal_id))
return;
auto callback = new_session_id_response_callback[session_id_resp.internal_id];
@ -234,7 +234,7 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ
{
/// If session was already disconnected than we will ignore requests
std::lock_guard lock(session_to_response_callback_mutex);
if (session_to_response_callback.count(session_id) == 0)
if (!session_to_response_callback.contains(session_id))
return false;
}

View File

@ -196,7 +196,7 @@ void KeeperStorageSnapshot::serialize(const KeeperStorageSnapshot & snapshot, Wr
writeBinary(timeout, out);
KeeperStorage::AuthIDs ids;
if (snapshot.session_and_auth.count(session_id))
if (snapshot.session_and_auth.contains(session_id))
ids = snapshot.session_and_auth.at(session_id);
writeBinary(ids.size(), out);

View File

@ -109,7 +109,7 @@ KeeperStateManager::KeeperConfigurationWrapper KeeperStateManager::parseServersC
int32_t priority = config.getInt(full_prefix + ".priority", 1);
bool start_as_follower = config.getBool(full_prefix + ".start_as_follower", false);
if (client_ports.count(port) != 0)
if (client_ports.contains(port))
{
throw Exception(ErrorCodes::RAFT_ERROR, "Raft configuration contains hostname '{}' with port '{}' which is equal to '{}' in server configuration",
hostname, port, client_ports[port]);
@ -133,7 +133,7 @@ KeeperStateManager::KeeperConfigurationWrapper KeeperStateManager::parseServersC
result.servers_start_as_followers.insert(new_server_id);
auto endpoint = hostname + ":" + std::to_string(port);
if (check_duplicated_hostnames.count(endpoint))
if (check_duplicated_hostnames.contains(endpoint))
{
throw Exception(ErrorCodes::RAFT_ERROR, "Raft config contains duplicate endpoints: "
"endpoint {} has been already added with id {}, but going to add it one more time with id {}",
@ -278,14 +278,14 @@ ConfigUpdateActions KeeperStateManager::getConfigurationDiff(const Poco::Util::A
/// First of all add new servers
for (auto [new_id, server_config] : new_ids)
{
if (!old_ids.count(new_id))
if (!old_ids.contains(new_id))
result.emplace_back(ConfigUpdateAction{ConfigUpdateActionType::AddServer, server_config});
}
/// After that remove old ones
for (auto [old_id, server_config] : old_ids)
{
if (!new_ids.count(old_id))
if (!new_ids.contains(old_id))
result.emplace_back(ConfigUpdateAction{ConfigUpdateActionType::RemoveServer, server_config});
}

View File

@ -163,7 +163,7 @@ NamesAndTypesList NamesAndTypesList::filter(const NameSet & names) const
NamesAndTypesList res;
for (const NameAndTypePair & column : *this)
{
if (names.count(column.name))
if (names.contains(column.name))
res.push_back(column);
}
return res;

View File

@ -158,7 +158,7 @@ NamesAndTypesList collect(const NamesAndTypesList & names_and_types)
auto nested_types = getSubcolumnsOfNested(names_and_types);
for (const auto & name_type : names_and_types)
if (!isArray(name_type.type) || !nested_types.count(splitName(name_type.name).first))
if (!isArray(name_type.type) || !nested_types.contains(splitName(name_type.name).first))
res.push_back(name_type);
for (const auto & name_type : nested_types)

View File

@ -682,7 +682,7 @@ void replaceMissedSubcolumnsByConstants(
/// Replace missed subcolumns to default literals of theirs type.
for (const auto & [name, type] : missed_names_types)
if (identifiers.count(name))
if (identifiers.contains(name))
addConstantToWithClause(query, name, type);
}

View File

@ -592,7 +592,7 @@ DataTypePtr getLeastSupertype(const TypeIndexSet & types, bool allow_conversion_
if (types_set.empty())
return std::make_shared<DataTypeNothing>();
if (types.count(TypeIndex::String))
if (types.contains(TypeIndex::String))
{
if (types.size() != 1)
return throw_or_return(getExceptionMessagePrefix(types) + " because some of them are String and some of them are not", ErrorCodes::NO_COMMON_TYPE);

View File

@ -353,7 +353,7 @@ void DatabaseAtomic::assertDetachedTableNotInUse(const UUID & uuid)
/// 3. ATTACH TABLE table; (new instance of Storage with the same UUID is created, instances share data on disk)
/// 4. INSERT INTO table ...; (both Storage instances writes data without any synchronization)
/// To avoid it, we remember UUIDs of detached tables and does not allow ATTACH table with such UUID until detached instance still in use.
if (detached_tables.count(uuid))
if (detached_tables.contains(uuid))
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Cannot attach table with UUID {}, "
"because it was detached but still used by some query. Retry later.", toString(uuid));
}
@ -571,7 +571,7 @@ void DatabaseAtomic::waitDetachedTableNotInUse(const UUID & uuid)
{
std::lock_guard lock{mutex};
not_in_use = cleanupDetachedTables();
if (detached_tables.count(uuid) == 0)
if (!detached_tables.contains(uuid))
return;
}
std::this_thread::sleep_for(std::chrono::milliseconds(100));

View File

@ -82,7 +82,7 @@ bool DatabasePostgreSQL::empty() const
auto tables_list = fetchPostgreSQLTablesList(connection_holder->get(), configuration.schema);
for (const auto & table_name : tables_list)
if (!detached_or_dropped.count(table_name))
if (!detached_or_dropped.contains(table_name))
return false;
return true;
@ -102,7 +102,7 @@ DatabaseTablesIteratorPtr DatabasePostgreSQL::getTablesIterator(ContextPtr local
auto table_names = fetchPostgreSQLTablesList(connection_holder->get(), configuration.schema);
for (const auto & table_name : table_names)
if (!detached_or_dropped.count(table_name))
if (!detached_or_dropped.contains(table_name))
tables[table_name] = fetchTable(table_name, local_context, true);
}
catch (...)
@ -156,7 +156,7 @@ bool DatabasePostgreSQL::isTableExist(const String & table_name, ContextPtr /* c
{
std::lock_guard<std::mutex> lock(mutex);
if (detached_or_dropped.count(table_name))
if (detached_or_dropped.contains(table_name))
return false;
return checkPostgresTable(table_name);
@ -167,7 +167,7 @@ StoragePtr DatabasePostgreSQL::tryGetTable(const String & table_name, ContextPtr
{
std::lock_guard<std::mutex> lock(mutex);
if (!detached_or_dropped.count(table_name))
if (!detached_or_dropped.contains(table_name))
return fetchTable(table_name, local_context, false);
return StoragePtr{};
@ -176,7 +176,7 @@ StoragePtr DatabasePostgreSQL::tryGetTable(const String & table_name, ContextPtr
StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr, bool table_checked) const
{
if (!cache_tables || !cached_tables.count(table_name))
if (!cache_tables || !cached_tables.contains(table_name))
{
if (!table_checked && !checkPostgresTable(table_name))
return StoragePtr{};
@ -217,7 +217,7 @@ void DatabasePostgreSQL::attachTable(ContextPtr /* context_ */, const String & t
"Cannot attach PostgreSQL table {} because it does not exist in PostgreSQL",
getTableNameForLogs(table_name), database_name);
if (!detached_or_dropped.count(table_name))
if (!detached_or_dropped.contains(table_name))
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS,
"Cannot attach PostgreSQL table {} because it already exists",
getTableNameForLogs(table_name), database_name);
@ -237,7 +237,7 @@ StoragePtr DatabasePostgreSQL::detachTable(ContextPtr /* context_ */, const Stri
{
std::lock_guard<std::mutex> lock{mutex};
if (detached_or_dropped.count(table_name))
if (detached_or_dropped.contains(table_name))
throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Cannot detach table {}. It is already dropped/detached", getTableNameForLogs(table_name));
if (!checkPostgresTable(table_name))
@ -271,7 +271,7 @@ void DatabasePostgreSQL::dropTable(ContextPtr, const String & table_name, bool /
if (!checkPostgresTable(table_name))
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Cannot drop table {} because it does not exist", getTableNameForLogs(table_name));
if (detached_or_dropped.count(table_name))
if (detached_or_dropped.contains(table_name))
throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Table {} is already dropped/detached", getTableNameForLogs(table_name));
fs::path mark_table_removed = fs::path(getMetadataPath()) / (escapeForFileName(table_name) + suffix);
@ -323,7 +323,7 @@ void DatabasePostgreSQL::removeOutdatedTables()
/// (Tables are cached only after being accessed at least once)
for (auto iter = cached_tables.begin(); iter != cached_tables.end();)
{
if (!actual_tables.count(iter->first))
if (!actual_tables.contains(iter->first))
iter = cached_tables.erase(iter);
else
++iter;
@ -332,7 +332,7 @@ void DatabasePostgreSQL::removeOutdatedTables()
for (auto iter = detached_or_dropped.begin(); iter != detached_or_dropped.end();)
{
if (!actual_tables.count(*iter))
if (!actual_tables.contains(*iter))
{
auto table_name = *iter;
iter = detached_or_dropped.erase(iter);

View File

@ -68,7 +68,7 @@ DiskSelectorPtr DiskSelector::updateFromConfig(
throw Exception("Disk name can contain only alphanumeric and '_' (" + disk_name + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
auto disk_config_prefix = config_prefix + "." + disk_name;
if (result->getDisksMap().count(disk_name) == 0)
if (!result->getDisksMap().contains(disk_name))
{
result->addToDiskMap(disk_name, factory.create(disk_name, config, disk_config_prefix, context, result->getDisksMap()));
}

View File

@ -282,7 +282,7 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol
for (const auto & volume : getVolumes())
{
if (new_volume_names.count(volume->getName()) == 0)
if (!new_volume_names.contains(volume->getName()))
throw Exception("New storage policy " + backQuote(name) + " shall contain volumes of old one", ErrorCodes::BAD_ARGUMENTS);
std::unordered_set<String> new_disk_names;
@ -290,7 +290,7 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol
new_disk_names.insert(disk->getName());
for (const auto & disk : volume->getDisks())
if (new_disk_names.count(disk->getName()) == 0)
if (!new_disk_names.contains(disk->getName()))
throw Exception("New storage policy " + backQuote(name) + " shall contain disks of old one", ErrorCodes::BAD_ARGUMENTS);
}
}
@ -387,7 +387,7 @@ StoragePolicySelectorPtr StoragePolicySelector::updateFromConfig(const Poco::Uti
/// First pass, check.
for (const auto & [name, policy] : policies)
{
if (result->policies.count(name) == 0)
if (!result->policies.contains(name))
throw Exception("Storage policy " + backQuote(name) + " is missing in new configuration", ErrorCodes::BAD_ARGUMENTS);
policy->checkCompatibleWith(result->policies[name]);

View File

@ -896,7 +896,7 @@ struct JSONExtractTree
if (element.isInt64())
{
Type value;
if (!accurate::convertNumeric(element.getInt64(), value) || !only_values.count(value))
if (!accurate::convertNumeric(element.getInt64(), value) || !only_values.contains(value))
return false;
col_vec.insertValue(value);
return true;
@ -905,7 +905,7 @@ struct JSONExtractTree
if (element.isUInt64())
{
Type value;
if (!accurate::convertNumeric(element.getUInt64(), value) || !only_values.count(value))
if (!accurate::convertNumeric(element.getUInt64(), value) || !only_values.contains(value))
return false;
col_vec.insertValue(value);
return true;

View File

@ -333,7 +333,7 @@ struct MinHashImpl
{
void update(UInt64 hash, BytesRef ref, size_t limit)
{
if (values.count(hash))
if (values.contains(hash))
return;
values[hash] = ref;

View File

@ -38,7 +38,7 @@ public:
FunctionFormatRow(const String & format_name_, ContextPtr context_) : format_name(format_name_), context(context_)
{
if (!FormatFactory::instance().getAllFormats().count(format_name))
if (!FormatFactory::instance().getAllFormats().contains(format_name))
throw Exception("Unknown format " + format_name, ErrorCodes::UNKNOWN_FORMAT);
}

View File

@ -86,7 +86,7 @@ public:
void callLogImpl(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * message)
{
const auto & [level, prio] = convertLogLevel(log_level);
if (tag_loggers.count(tag) > 0)
if (tag_loggers.contains(tag))
{
LOG_IMPL(tag_loggers[tag], level, prio, "{}", message);
}

View File

@ -72,7 +72,7 @@ void ActionLocksManager::remove(const StoragePtr & table, StorageActionBlockType
{
std::lock_guard lock(mutex);
if (storage_locks.count(table.get()))
if (storage_locks.contains(table.get()))
storage_locks[table.get()].erase(action_type);
}

View File

@ -334,7 +334,7 @@ void ActionsDAG::removeUnusedActions(const NameSet & required_names, bool allow_
NameSet added;
for (const auto & node : index)
{
if (required_names.count(node->result_name) && added.count(node->result_name) == 0)
if (required_names.contains(node->result_name) && !added.contains(node->result_name))
{
required_nodes.push_back(node);
added.insert(node->result_name);
@ -344,7 +344,7 @@ void ActionsDAG::removeUnusedActions(const NameSet & required_names, bool allow_
if (added.size() < required_names.size())
{
for (const auto & name : required_names)
if (added.count(name) == 0)
if (!added.contains(name))
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER,
"Unknown column: {}, there are only columns {}", name, dumpNames());
}
@ -392,7 +392,7 @@ void ActionsDAG::removeUnusedActions(bool allow_remove_inputs, bool allow_consta
/// We cannot remove arrayJoin because it changes the number of rows.
bool is_array_join = node.type == ActionType::ARRAY_JOIN;
if (is_array_join && visited_nodes.count(&node) == 0)
if (is_array_join && !visited_nodes.contains(&node))
{
visited_nodes.insert(&node);
stack.push(&node);
@ -426,7 +426,7 @@ void ActionsDAG::removeUnusedActions(bool allow_remove_inputs, bool allow_consta
for (const auto * child : node->children)
{
if (visited_nodes.count(child) == 0)
if (!visited_nodes.contains(child))
{
stack.push(const_cast<Node *>(child));
visited_nodes.insert(child);
@ -434,8 +434,8 @@ void ActionsDAG::removeUnusedActions(bool allow_remove_inputs, bool allow_consta
}
}
nodes.remove_if([&](const Node & node) { return visited_nodes.count(&node) == 0; });
auto it = std::remove_if(inputs.begin(), inputs.end(), [&](const Node * node) { return visited_nodes.count(node) == 0; });
nodes.remove_if([&](const Node & node) { return !visited_nodes.contains(&node); });
auto it = std::remove_if(inputs.begin(), inputs.end(), [&](const Node * node) { return !visited_nodes.contains(node); });
inputs.erase(it, inputs.end());
}
@ -535,7 +535,7 @@ Block ActionsDAG::updateHeader(Block header) const
{
for (const auto * output : index)
{
if (node_to_column.count(output) == 0)
if (!node_to_column.contains(output))
{
std::stack<Frame> stack;
stack.push({.node = output});
@ -548,7 +548,7 @@ Block ActionsDAG::updateHeader(Block header) const
while (frame.next_child < node->children.size())
{
const auto * child = node->children[frame.next_child];
if (node_to_column.count(child) == 0)
if (!node_to_column.contains(child))
{
stack.push({.node = child});
break;
@ -660,7 +660,7 @@ NameSet ActionsDAG::foldActionsByProjection(
for (const auto * child : node->children)
{
if (visited_nodes.count(child) == 0)
if (!visited_nodes.contains(child))
{
stack.push(const_cast<Node *>(child));
visited_nodes.insert(child);
@ -669,9 +669,9 @@ NameSet ActionsDAG::foldActionsByProjection(
}
/// Clean up unused nodes after folding.
std::erase_if(inputs, [&](const Node * node) { return visited_nodes.count(node) == 0; });
std::erase_if(index, [&](const Node * node) { return visited_index_names.count(node->result_name) == 0; });
nodes.remove_if([&](const Node & node) { return visited_nodes.count(&node) == 0; });
std::erase_if(inputs, [&](const Node * node) { return !visited_nodes.contains(node); });
std::erase_if(index, [&](const Node * node) { return !visited_index_names.contains(node->result_name); });
nodes.remove_if([&](const Node & node) { return !visited_nodes.contains(&node); });
/// Calculate the required columns after folding.
NameSet next_required_columns;
@ -1299,7 +1299,7 @@ ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set<const Node *> split
/// DFS. Decide if node is needed by split.
for (const auto & node : nodes)
{
if (split_nodes.count(&node) == 0)
if (!split_nodes.contains(&node))
continue;
auto & cur_data = data[&node];
@ -1476,7 +1476,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet &
/// DFS. Decide if node depends on ARRAY JOIN.
for (const auto & node : nodes)
{
if (visited_nodes.count(&node))
if (visited_nodes.contains(&node))
continue;
visited_nodes.insert(&node);
@ -1491,7 +1491,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet &
{
const auto * child = cur.node->children[cur.next_child_to_visit];
if (visited_nodes.count(child) == 0)
if (!visited_nodes.contains(child))
{
visited_nodes.insert(child);
stack.push({.node = child});
@ -1504,12 +1504,12 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet &
if (cur.next_child_to_visit == cur.node->children.size())
{
bool depend_on_array_join = false;
if (cur.node->type == ActionType::INPUT && array_joined_columns.count(cur.node->result_name))
if (cur.node->type == ActionType::INPUT && array_joined_columns.contains(cur.node->result_name))
depend_on_array_join = true;
for (const auto * child : cur.node->children)
{
if (split_nodes.count(child) == 0)
if (!split_nodes.contains(child))
depend_on_array_join = true;
}
@ -1591,7 +1591,7 @@ ConjunctionNodes getConjunctionNodes(ActionsDAG::Node * predicate, std::unordere
{
for (const auto & child : node->children)
{
if (visited_nodes.count(child) == 0)
if (!visited_nodes.contains(child))
{
visited_nodes.insert(child);
stack.push(child);
@ -1624,7 +1624,7 @@ ConjunctionNodes getConjunctionNodes(ActionsDAG::Node * predicate, std::unordere
{
const auto * child = cur.node->children[cur.next_child_to_visit];
if (visited_nodes.count(child) == 0)
if (!visited_nodes.contains(child))
{
visited_nodes.insert(child);
stack.push({.node = child});
@ -1644,9 +1644,9 @@ ConjunctionNodes getConjunctionNodes(ActionsDAG::Node * predicate, std::unordere
allowed_nodes.emplace(cur.node);
}
if (predicates.count(cur.node))
if (predicates.contains(cur.node))
{
if (allowed_nodes.count(cur.node))
if (allowed_nodes.contains(cur.node))
{
if (allowed.insert(cur.node).second)
conjunction.allowed.push_back(cur.node);
@ -1721,7 +1721,7 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(NodeRawConstPtrs conjunctio
/// DFS. Clone actions.
for (const auto * predicate : conjunction)
{
if (nodes_mapping.count(predicate))
if (nodes_mapping.contains(predicate))
continue;
stack.push({.node = predicate});
@ -1733,7 +1733,7 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(NodeRawConstPtrs conjunctio
{
const auto * child = cur.node->children[cur.next_child_to_visit];
if (nodes_mapping.count(child) == 0)
if (!nodes_mapping.contains(child))
{
stack.push({.node = child});
break;

View File

@ -455,7 +455,7 @@ public:
return *node;
}
bool contains(const std::string & name) const { return map.count(name) > 0; }
bool contains(const std::string & name) const { return map.contains(name); }
};
ActionsMatcher::Data::Data(
@ -525,7 +525,7 @@ size_t ScopeStack::getColumnLevel(const std::string & name)
{
--i;
if (stack[i].inputs.count(name))
if (stack[i].inputs.contains(name))
return i;
const auto * node = stack[i].index->tryGetNode(name);

View File

@ -2736,7 +2736,7 @@ void Aggregator::mergeBlocks(BucketToBlocks bucket_to_blocks, AggregatedDataVari
result.keys_size = params.keys_size;
result.key_sizes = key_sizes;
bool has_blocks_with_unknown_bucket = bucket_to_blocks.count(-1);
bool has_blocks_with_unknown_bucket = bucket_to_blocks.contains(-1);
/// First, parallel the merge for the individual buckets. Then we continue merge the data not allocated to the buckets.
if (has_two_level)

View File

@ -26,7 +26,7 @@ void ApplyWithAliasVisitor::visit(ASTPtr & ast, const Data & data)
}
for (const auto & with_alias : data.exprs)
{
if (!current_names.count(with_alias.first))
if (!current_names.contains(with_alias.first))
with->children.push_back(with_alias.second->clone());
}
}

View File

@ -20,7 +20,7 @@ void ApplyWithGlobalVisitor::visit(ASTSelectQuery & select, const std::map<Strin
}
for (const auto & with_alias : exprs)
{
if (!current_names.count(with_alias.first))
if (!current_names.contains(with_alias.first))
with->children.push_back(with_alias.second->clone());
}
}

View File

@ -85,7 +85,7 @@ const String & changeNameIfNeeded(const String & func_name, const String & child
{ "max", "min" }
};
if (literal.value < zeroField(literal.value) && matches.count(func_name) && matches.find(func_name)->second.count(child_name))
if (literal.value < zeroField(literal.value) && matches.contains(func_name) && matches.find(func_name)->second.contains(child_name))
return swap_to.find(func_name)->second;
return func_name;
@ -103,8 +103,8 @@ ASTPtr tryExchangeFunctions(const ASTFunction & func)
auto lower_name = Poco::toLower(func.name);
const ASTFunction * child_func = getInternalFunction(func);
if (!child_func || !child_func->arguments || child_func->arguments->children.size() != 2 || !supported.count(lower_name)
|| !supported.find(lower_name)->second.count(child_func->name))
if (!child_func || !child_func->arguments || child_func->arguments->children.size() != 2 || !supported.contains(lower_name)
|| !supported.find(lower_name)->second.contains(child_func->name))
return {};
auto original_alias = func.tryGetAlias();

View File

@ -39,7 +39,7 @@ void ArrayJoinAction::prepare(ColumnsWithTypeAndName & sample) const
{
for (auto & current : sample)
{
if (columns.count(current.name) == 0)
if (!columns.contains(current.name))
continue;
const DataTypeArray * array_type = typeid_cast<const DataTypeArray *>(&*current.type);
@ -116,7 +116,7 @@ void ArrayJoinAction::execute(Block & block)
{
ColumnWithTypeAndName & current = block.safeGetByPosition(i);
if (columns.count(current.name))
if (columns.contains(current.name))
{
if (!typeid_cast<const DataTypeArray *>(&*current.type))
throw Exception("ARRAY JOIN of not array: " + current.name, ErrorCodes::TYPE_MISMATCH);

View File

@ -63,8 +63,8 @@ void ColumnAliasesMatcher::visit(ASTIdentifier & node, ASTPtr & ast, Data & data
{
if (auto column_name = IdentifierSemantic::getColumnName(node))
{
if (data.array_join_result_columns.count(*column_name) || data.array_join_source_columns.count(*column_name)
|| data.private_aliases.count(*column_name) || !data.columns.has(*column_name))
if (data.array_join_result_columns.contains(*column_name) || data.array_join_source_columns.contains(*column_name)
|| data.private_aliases.contains(*column_name) || !data.columns.has(*column_name))
return;
const auto & col = data.columns.get(*column_name);
@ -74,7 +74,7 @@ void ColumnAliasesMatcher::visit(ASTIdentifier & node, ASTPtr & ast, Data & data
auto alias_expr = col.default_desc.expression->clone();
auto original_column = alias_expr->getColumnName();
// If expanded alias is used in array join, avoid expansion, otherwise the column will be mis-array joined
if (data.array_join_result_columns.count(original_column) || data.array_join_source_columns.count(original_column))
if (data.array_join_result_columns.contains(original_column) || data.array_join_source_columns.contains(original_column))
return;
ast = addTypeConversionToAST(std::move(alias_expr), col.type->getName(), data.columns.getAll(), data.context);
// We need to set back the original column name, or else the process of naming resolution will complain.

View File

@ -1027,7 +1027,7 @@ bool Context::hasScalar(const String & name) const
if (isGlobalContext())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have scalars");
return scalars.count(name);
return scalars.contains(name);
}

View File

@ -100,7 +100,7 @@ void FindUsedFunctionsMatcher::visit(const ASTPtr & ast, Data & data)
void FindUsedFunctionsMatcher::visit(const ASTFunction & func, Data & data)
{
if (data.names.count(func.name) && !data.call_stack.empty())
if (data.names.contains(func.name) && !data.call_stack.empty())
{
String alias = func.tryGetAlias();
if (!alias.empty())
@ -136,7 +136,7 @@ void ConvertStringsToEnumMatcher::visit(ASTFunction & function_node, Data & data
/// We are not sure we could change the type of function result
/// cause it is present in other function as argument
if (data.used_functions.count(function_node.tryGetAlias()))
if (data.used_functions.contains(function_node.tryGetAlias()))
return;
if (function_node.name == "if")

View File

@ -89,7 +89,7 @@ bool isAllowedToRewriteCrossJoin(const ASTPtr & node, const Aliases & aliases)
auto idents = IdentifiersCollector::collect(node);
for (const auto * ident : idents)
{
if (ident->isShort() && aliases.count(ident->shortName()))
if (ident->isShort() && aliases.contains(ident->shortName()))
return false;
}
return true;

View File

@ -945,7 +945,7 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid)
std::unique_lock lock{tables_marked_dropped_mutex};
wait_table_finally_dropped.wait(lock, [&]()
{
return tables_marked_dropped_ids.count(uuid) == 0;
return !tables_marked_dropped_ids.contains(uuid);
});
}

View File

@ -76,7 +76,7 @@ static bool worthConvertingToLiteral(const Block & scalar)
{
const auto * scalar_type_name = scalar.safeGetByPosition(0).type->getFamilyName();
static const std::set<std::string_view> useless_literal_types = {"Array", "Tuple", "AggregateFunction", "Function", "Set", "LowCardinality"};
return !useless_literal_types.count(scalar_type_name);
return !useless_literal_types.contains(scalar_type_name);
}
static auto getQueryInterpreter(const ASTSubquery & subquery, ExecuteScalarSubqueriesMatcher::Data & data)

View File

@ -929,7 +929,7 @@ void ExpressionActionsChain::addStep(NameSet non_constant_inputs)
ColumnsWithTypeAndName columns = steps.back()->getResultColumns();
for (auto & column : columns)
if (column.column && isColumnConst(*column.column) && non_constant_inputs.count(column.name))
if (column.column && isColumnConst(*column.column) && non_constant_inputs.contains(column.name))
column.column = nullptr;
steps.push_back(std::make_unique<ExpressionActionsStep>(std::make_shared<ActionsDAG>(columns)));
@ -950,7 +950,7 @@ void ExpressionActionsChain::finalize()
const NameSet & additional_input = steps[i + 1]->additional_input;
for (const auto & it : steps[i + 1]->getRequiredColumns())
{
if (additional_input.count(it.name) == 0)
if (!additional_input.contains(it.name))
{
auto iter = required_output.find(it.name);
if (iter == required_output.end())
@ -1001,7 +1001,7 @@ ExpressionActionsChain::ArrayJoinStep::ArrayJoinStep(ArrayJoinActionPtr array_jo
{
required_columns.emplace_back(NameAndTypePair(column.name, column.type));
if (array_join->columns.count(column.name) > 0)
if (array_join->columns.contains(column.name))
{
const auto * array = typeid_cast<const DataTypeArray *>(column.type.get());
column.type = array->getNestedType();
@ -1018,12 +1018,12 @@ void ExpressionActionsChain::ArrayJoinStep::finalize(const NameSet & required_ou
for (const auto & column : result_columns)
{
if (array_join->columns.count(column.name) != 0 || required_output_.count(column.name) != 0)
if (!array_join->columns.contains(column.name) || required_output_.contains(column.name))
new_result_columns.emplace_back(column);
}
for (const auto & column : required_columns)
{
if (array_join->columns.count(column.name) != 0 || required_output_.count(column.name) != 0)
if (array_join->columns.contains(column.name) || required_output_.contains(column.name))
new_required_columns.emplace_back(column);
}
@ -1066,7 +1066,7 @@ void ExpressionActionsChain::JoinStep::finalize(const NameSet & required_output_
for (const auto & column : required_columns)
{
if (required_names.count(column.name) != 0)
if (required_names.contains(column.name))
new_required_columns.emplace_back(column);
}
@ -1076,7 +1076,7 @@ void ExpressionActionsChain::JoinStep::finalize(const NameSet & required_output_
for (const auto & column : result_columns)
{
if (required_names.count(column.name) != 0)
if (required_names.contains(column.name))
new_result_columns.emplace_back(column);
}

View File

@ -261,7 +261,7 @@ NamesAndTypesList ExpressionAnalyzer::getColumnsAfterArrayJoin(ActionsDAGPtr & a
for (auto & column : actions->getResultColumns())
{
if (syntax->array_join_result_to_source.count(column.name))
if (syntax->array_join_result_to_source.contains(column.name))
{
new_columns_after_array_join.emplace_back(column.name, column.type);
added_columns.emplace(column.name);
@ -269,7 +269,7 @@ NamesAndTypesList ExpressionAnalyzer::getColumnsAfterArrayJoin(ActionsDAGPtr & a
}
for (const auto & column : src_columns)
if (added_columns.count(column.name) == 0)
if (!added_columns.contains(column.name))
new_columns_after_array_join.emplace_back(column.name, column.type);
return new_columns_after_array_join;
@ -362,7 +362,7 @@ void ExpressionAnalyzer::analyzeAggregation(ActionsDAGPtr & temp_actions)
NameAndTypePair key{column_name, node->result_type};
/// Aggregation keys are uniqued.
if (!unique_keys.count(key.name))
if (!unique_keys.contains(key.name))
{
unique_keys.insert(key.name);
aggregation_keys.push_back(key);
@ -1075,7 +1075,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere(
/// TODO: add sampling and final execution to common chain.
for (const auto & column : additional_required_columns)
{
if (required_source_columns.count(column))
if (required_source_columns.contains(column))
step.addRequiredOutput(column);
}
@ -1083,7 +1083,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere(
NameSet name_set(names.begin(), names.end());
for (const auto & column : sourceColumns())
if (required_source_columns.count(column.name) == 0)
if (!required_source_columns.contains(column.name))
name_set.erase(column.name);
Names required_output(name_set.begin(), name_set.end());
@ -1109,7 +1109,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere(
for (const auto & column : sourceColumns())
{
if (prewhere_input_names.count(column.name) == 0)
if (!prewhere_input_names.contains(column.name))
{
columns.emplace_back(column.type, column.name);
unused_source_columns.emplace(column.name);
@ -1350,7 +1350,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai
if (auto * ident = fn->as<ASTIdentifier>())
{
/// exclude columns from select expression - they are already available
if (select.count(ident->getColumnName()) == 0)
if (!select.contains(ident->getColumnName()))
step.addRequiredOutput(ident->getColumnName());
return;
}
@ -1381,7 +1381,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai
if (with_fill)
{
for (const auto & column : step.getResultColumns())
if (!order_by_keys.count(column.name))
if (!order_by_keys.contains(column.name))
non_constant_inputs.insert(column.name);
}
@ -1415,7 +1415,7 @@ bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain
replaceForPositionalArguments(child, select_query, ASTSelectQuery::Expression::LIMIT_BY);
auto child_name = child->getColumnName();
if (!aggregated_names.count(child_name))
if (!aggregated_names.contains(child_name))
step.addRequiredOutput(child_name);
}
@ -1434,7 +1434,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendProjectResult(ExpressionActio
for (const auto & ast : asts)
{
String result_name = ast->getAliasOrColumnName();
if (required_result_columns.empty() || required_result_columns.count(result_name))
if (required_result_columns.empty() || required_result_columns.contains(result_name))
{
std::string source_name = ast->getColumnName();
@ -1522,7 +1522,7 @@ ActionsDAGPtr ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool project_r
/// We will not delete the original columns.
for (const auto & column_name_type : sourceColumns())
{
if (name_set.count(column_name_type.name) == 0)
if (!name_set.contains(column_name_type.name))
{
result_names.push_back(column_name_type.name);
name_set.insert(column_name_type.name);

View File

@ -245,7 +245,7 @@ IdentifierSemantic::getIdentsMembership(ASTPtr ast, const std::vector<TableWithC
for (const auto * ident : idents)
{
/// short name clashes with alias, ambiguous
if (ident->isShort() && aliases.count(ident->shortName()))
if (ident->isShort() && aliases.contains(ident->shortName()))
return {};
const auto pos = getIdentMembership(*ident, tables);
if (!pos)

View File

@ -904,7 +904,7 @@ static InterpolateDescriptionPtr getInterpolateDescription(
col_set.insert(column.name);
}
for (const auto & column : result_block)
if (col_set.count(column.name) == 0)
if (!col_set.contains(column.name))
source_columns.emplace_back(column.name, column.type);
}
@ -1774,7 +1774,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions()
else
column_expr = std::make_shared<ASTIdentifier>(column);
if (required_columns_from_prewhere.count(column))
if (required_columns_from_prewhere.contains(column))
{
required_columns_from_prewhere_expr->children.emplace_back(std::move(column_expr));
@ -1802,7 +1802,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions()
if (prewhere_info->remove_prewhere_column && column.name == prewhere_info->prewhere_column_name)
continue;
if (columns_to_remove.count(column.name))
if (columns_to_remove.contains(column.name))
continue;
required_columns_all_expr->children.emplace_back(std::make_shared<ASTIdentifier>(column.name));
@ -1826,7 +1826,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions()
prewhere_info->remove_prewhere_column = false;
/// Remove columns which will be added by prewhere.
std::erase_if(required_columns, [&](const String & name) { return required_columns_after_prewhere_set.count(name) != 0; });
std::erase_if(required_columns, [&](const String & name) { return required_columns_after_prewhere_set.contains(name); });
if (prewhere_info)
{
@ -1849,7 +1849,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions()
/// Add physical columns required by prewhere actions.
for (const auto & column : required_columns_from_prewhere)
if (required_aliases_from_prewhere.count(column) == 0)
if (!required_aliases_from_prewhere.contains(column))
if (required_columns.end() == std::find(required_columns.begin(), required_columns.end(), column))
required_columns.push_back(column);
}

View File

@ -259,7 +259,7 @@ struct CollectColumnIdentifiersMatcher
void addIdentifier(const ASTIdentifier & ident)
{
for (const auto & aliases : ignored)
if (aliases.count(ident.name()))
if (aliases.contains(ident.name()))
return;
identifiers.push_back(const_cast<ASTIdentifier *>(&ident));
}
@ -324,7 +324,7 @@ struct CheckAliasDependencyVisitorData
void visit(ASTIdentifier & ident, ASTPtr &)
{
if (!dependency && aliases.count(ident.name()))
if (!dependency && aliases.contains(ident.name()))
dependency = &ident;
}
};
@ -470,7 +470,7 @@ void restoreName(ASTIdentifier & ident, const String & original_name, NameSet &
if (original_name.empty())
return;
if (!restored_names.count(original_name))
if (!restored_names.contains(original_name))
{
ident.setAlias(original_name);
restored_names.emplace(original_name);
@ -502,7 +502,7 @@ std::vector<TableNeededColumns> normalizeColumnNamesExtractNeeded(
for (ASTIdentifier * ident : identifiers)
{
bool got_alias = aliases.count(ident->name());
bool got_alias = aliases.contains(ident->name());
bool allow_ambiguous = got_alias; /// allow ambiguous column overridden by an alias
if (auto table_pos = IdentifierSemantic::chooseTableColumnMatch(*ident, tables, allow_ambiguous))
@ -520,13 +520,13 @@ std::vector<TableNeededColumns> normalizeColumnNamesExtractNeeded(
}
String short_name = ident->shortName();
String original_long_name;
if (public_identifiers.count(ident))
if (public_identifiers.contains(ident))
original_long_name = ident->name();
size_t count = countTablesWithColumn(tables, short_name);
/// isValidIdentifierBegin retuired to be consistent with TableJoin::deduplicateAndQualifyColumnNames
if (count > 1 || aliases.count(short_name) || !isValidIdentifierBegin(short_name.at(0)))
if (count > 1 || aliases.contains(short_name) || !isValidIdentifierBegin(short_name.at(0)))
{
const auto & table = tables[*table_pos];
IdentifierSemantic::setColumnLongName(*ident, table.table); /// table.column -> table_alias.column
@ -654,7 +654,7 @@ void JoinToSubqueryTransformMatcher::visit(ASTSelectQuery & select, ASTPtr & ast
for (auto * ident : on_identifiers)
{
auto it = data.aliases.find(ident->name());
if (!on_aliases.count(ident->name()) && it != data.aliases.end())
if (!on_aliases.contains(ident->name()) && it != data.aliases.end())
{
auto alias_expression = it->second;
alias_pushdown[table_pos].push_back(alias_expression);
@ -684,7 +684,7 @@ void JoinToSubqueryTransformMatcher::visit(ASTSelectQuery & select, ASTPtr & ast
/// Check same name in aliases, USING and ON sections. Cannot push down alias to ON through USING cause of name masquerading.
for (auto * ident : using_identifiers)
if (on_aliases.count(ident->name()))
if (on_aliases.contains(ident->name()))
throw Exception("Cannot rewrite JOINs. Alias '" + ident->name() + "' appears both in ON and USING", ErrorCodes::NOT_IMPLEMENTED);
using_identifiers.clear();

View File

@ -41,7 +41,7 @@ void LogicalExpressionsOptimizer::perform()
{
if (select_query == nullptr)
return;
if (visited_nodes.count(select_query))
if (visited_nodes.contains(select_query))
return;
size_t position = 0;
@ -96,7 +96,7 @@ void LogicalExpressionsOptimizer::reorderColumns()
void LogicalExpressionsOptimizer::collectDisjunctiveEqualityChains()
{
if (visited_nodes.count(select_query))
if (visited_nodes.contains(select_query))
return;
using Edge = std::pair<IAST *, IAST *>;
@ -161,7 +161,7 @@ void LogicalExpressionsOptimizer::collectDisjunctiveEqualityChains()
{
if (!child->as<ASTSelectQuery>())
{
if (!visited_nodes.count(child.get()))
if (!visited_nodes.contains(child.get()))
to_visit.push_back(Edge(to_node, &*child));
else
{

View File

@ -538,7 +538,7 @@ MergeJoin::MergeJoin(std::shared_ptr<TableJoin> table_join_, const Block & right
const NameSet required_right_keys = table_join->requiredRightKeys();
for (const auto & column : right_table_keys)
if (required_right_keys.count(column.name))
if (required_right_keys.contains(column.name))
right_columns_to_add.insert(ColumnWithTypeAndName{nullptr, column.type, column.name});
JoinCommon::createMissedColumns(right_columns_to_add);

View File

@ -171,7 +171,7 @@ ColumnDependencies getAllColumnDependencies(const StorageMetadataPtr & metadata_
new_updated_columns.clear();
for (const auto & dependency : new_dependencies)
{
if (!dependencies.count(dependency))
if (!dependencies.contains(dependency))
{
dependencies.insert(dependency);
if (!dependency.isReadOnly())
@ -360,7 +360,7 @@ static void validateUpdateColumns(
throw Exception("There is no column " + backQuote(column_name) + " in table", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
}
if (key_columns.count(column_name))
if (key_columns.contains(column_name))
throw Exception("Cannot UPDATE key column " + backQuote(column_name), ErrorCodes::CANNOT_UPDATE_COLUMN);
auto materialized_it = column_to_affected_materialized.find(column_name);
@ -368,7 +368,7 @@ static void validateUpdateColumns(
{
for (const String & materialized : materialized_it->second)
{
if (key_columns.count(materialized))
if (key_columns.contains(materialized))
throw Exception("Updated column " + backQuote(column_name) + " affects MATERIALIZED column "
+ backQuote(materialized) + ", which is a key column. Cannot UPDATE it.",
ErrorCodes::CANNOT_UPDATE_COLUMN);
@ -448,7 +448,7 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run)
auto syntax_result = TreeRewriter(context).analyze(query, all_columns);
for (const String & dependency : syntax_result->requiredSourceColumns())
{
if (updated_columns.count(dependency))
if (updated_columns.contains(dependency))
column_to_affected_materialized[dependency].push_back(column.name);
}
}
@ -1042,7 +1042,7 @@ bool MutationsInterpreter::Stage::isAffectingAllColumns(const Names & storage_co
{
/// is subset
for (const auto & storage_column : storage_columns)
if (!output_columns.count(storage_column))
if (!output_columns.contains(storage_column))
return false;
return true;

View File

@ -85,10 +85,10 @@ static NamesAndTypesList getColumnsList(const ASTExpressionList * columns_defini
{
if (const auto * options = declare_column->column_options->as<MySQLParser::ASTDeclareOptions>())
{
if (options->changes.count("is_null"))
if (options->changes.contains("is_null"))
is_nullable = options->changes.at("is_null")->as<ASTLiteral>()->value.safeGet<UInt64>();
if (options->changes.count("is_unsigned"))
if (options->changes.contains("is_unsigned"))
is_unsigned = options->changes.at("is_unsigned")->as<ASTLiteral>()->value.safeGet<UInt64>();
}
}
@ -161,7 +161,7 @@ static ColumnsDescription createColumnsDescription(const NamesAndTypesList & col
String comment;
if (declare_column->column_options)
if (const auto * options = declare_column->column_options->as<MySQLParser::ASTDeclareOptions>())
if (options->changes.count("comment"))
if (options->changes.contains("comment"))
comment = options->changes.at("comment")->as<ASTLiteral>()->value.safeGet<String>();
ColumnDescription column_description(column_name_and_type->name, column_name_and_type->type);
@ -238,7 +238,7 @@ static std::tuple<NamesAndTypesList, NamesAndTypesList, NamesAndTypesList, NameS
if (const auto & function = index_expression->as<ASTFunction>())
{
/// column_name(int64 literal)
if (columns_name_set.count(function->name) && function->arguments->children.size() == 1)
if (columns_name_set.contains(function->name) && function->arguments->children.size() == 1)
{
const auto & prefix_limit = function->arguments->children[0]->as<ASTLiteral>();
@ -276,13 +276,13 @@ static std::tuple<NamesAndTypesList, NamesAndTypesList, NamesAndTypesList, NameS
{
if (const auto * options = declare_column->column_options->as<MySQLParser::ASTDeclareOptions>())
{
if (options->changes.count("unique_key"))
if (options->changes.contains("unique_key"))
unique_keys->arguments->children.emplace_back(std::make_shared<ASTIdentifier>(declare_column->name));
if (options->changes.count("primary_key"))
if (options->changes.contains("primary_key"))
primary_keys->arguments->children.emplace_back(std::make_shared<ASTIdentifier>(declare_column->name));
if (options->changes.count("auto_increment"))
if (options->changes.contains("auto_increment"))
increment_columns.emplace(declare_column->name);
}
}
@ -385,10 +385,10 @@ static ASTPtr getOrderByPolicy(
for (const auto & [name, type] : names_and_types)
{
if (order_by_columns_set.count(name))
if (order_by_columns_set.contains(name))
continue;
if (increment_columns.count(name))
if (increment_columns.contains(name))
{
order_by_columns_set.emplace(name);
increment_keys.emplace_back(NameAndTypePair(name, type));

View File

@ -108,7 +108,7 @@ void QueryAliasesMatcher<T>::visit(const ASTSubquery & const_subquery, const AST
{
alias = dummy_subquery_name_prefix + std::to_string(++subquery_index);
}
while (aliases.count(alias));
while (aliases.contains(alias));
subquery.setAlias(alias);
aliases[alias] = ast;
@ -126,7 +126,7 @@ void QueryAliasesMatcher<T>::visitOther(const ASTPtr & ast, Data & data)
String alias = ast->tryGetAlias();
if (!alias.empty())
{
if (aliases.count(alias) && ast->getTreeHash() != aliases[alias]->getTreeHash())
if (aliases.contains(alias) && ast->getTreeHash() != aliases[alias]->getTreeHash())
throw Exception(wrongAliasMessage(ast, aliases[alias], alias), ErrorCodes::MULTIPLE_EXPRESSIONS_FOR_ALIAS);
aliases[alias] = ast;

View File

@ -99,7 +99,7 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data)
String node_alias = ast->tryGetAlias();
if (current_asts.count(alias_node.get()) /// We have loop of multiple aliases
if (current_asts.contains(alias_node.get()) /// We have loop of multiple aliases
|| (node.name() == our_alias_or_name && our_name && node_alias == *our_name)) /// Our alias points to node.name, direct loop
throw Exception("Cyclic aliases", ErrorCodes::CYCLIC_ALIASES);
@ -235,7 +235,7 @@ void QueryNormalizer::visit(ASTPtr & ast, Data & data)
auto & finished_asts = data.finished_asts;
auto & current_asts = data.current_asts;
if (finished_asts.count(ast))
if (finished_asts.contains(ast))
{
ast = finished_asts[ast];
return;

View File

@ -13,7 +13,7 @@ bool RequiredSourceColumnsData::addColumnAliasIfAny(const IAST & ast)
if (alias.empty())
return false;
if (required_names.count(alias))
if (required_names.contains(alias))
masked_columns.insert(alias);
complex_aliases.insert(alias);
@ -62,10 +62,10 @@ NameSet RequiredSourceColumnsData::requiredColumns() const
String table_name = Nested::extractTableName(name);
/// Tech debt. There's its own logic for ARRAY JOIN columns.
if (array_join_columns.count(name) || array_join_columns.count(table_name))
if (array_join_columns.contains(name) || array_join_columns.contains(table_name))
continue;
if (!complex_aliases.count(name) || masked_columns.count(name))
if (!complex_aliases.contains(name) || masked_columns.contains(name))
required.insert(name);
}
return required;

View File

@ -135,7 +135,7 @@ void RequiredSourceColumnsMatcher::visit(const ASTSelectQuery & select, const AS
{
if (auto * ident = fn->as<ASTIdentifier>())
{
if (select_columns.count(ident->getColumnName()) == 0)
if (!select_columns.contains(ident->getColumnName()))
data.addColumnIdentifier(*ident);
return;
}
@ -181,7 +181,7 @@ void RequiredSourceColumnsMatcher::visit(const ASTIdentifier & node, const ASTPt
if (node.name().empty())
throw Exception("Expected not empty name", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
if (!data.private_aliases.count(node.name()))
if (!data.private_aliases.contains(node.name()))
data.addColumnIdentifier(node);
}

View File

@ -89,7 +89,7 @@ void RewriteAnyFunctionMatcher::visit(const ASTFunction & func, ASTPtr & ast, Da
return;
/// We have rewritten this function. Just unwrap its argument.
if (data.rewritten.count(ast.get()))
if (data.rewritten.contains(ast.get()))
{
func_arguments[0]->setAlias(func.alias);
ast = func_arguments[0];

View File

@ -168,7 +168,7 @@ void TableJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_colu
for (auto & column : columns_from_joined_table)
{
if (joined_columns.count(column.name))
if (joined_columns.contains(column.name))
continue;
joined_columns.insert(column.name);
@ -178,7 +178,7 @@ void TableJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_colu
/// Also qualify unusual column names - that does not look like identifiers.
if (left_table_columns.count(column.name) || !isValidIdentifierBegin(column.name.at(0)))
if (left_table_columns.contains(column.name) || !isValidIdentifierBegin(column.name.at(0)))
inserted.name = right_table_prefix + column.name;
original_names[inserted.name] = column.name;
@ -280,7 +280,7 @@ Block TableJoin::getRequiredRightKeys(const Block & right_table_keys, std::vecto
forAllKeys(clauses, [&](const auto & left_key_name, const auto & right_key_name)
{
if (required_keys.count(right_key_name) && !required_right_keys.has(right_key_name))
if (required_keys.contains(right_key_name) && !required_right_keys.has(right_key_name))
{
const auto & right_key = right_table_keys.getByName(right_key_name);
required_right_keys.insert(right_key);

View File

@ -97,7 +97,7 @@ void TranslateQualifiedNamesMatcher::visit(ASTIdentifier & identifier, ASTPtr &,
if (IdentifierSemantic::getColumnName(identifier))
{
String short_name = identifier.shortName();
bool allow_ambiguous = data.join_using_columns.count(short_name);
bool allow_ambiguous = data.join_using_columns.contains(short_name);
if (auto best_pos = IdentifierSemantic::chooseTable(identifier, data.tables, allow_ambiguous))
{
size_t table_pos = *best_pos;
@ -228,7 +228,7 @@ void TranslateQualifiedNamesMatcher::visit(ASTExpressionList & node, const ASTPt
{
for (const auto & column : *cols)
{
if (first_table || !data.join_using_columns.count(column.name))
if (first_table || !data.join_using_columns.contains(column.name))
{
addIdentifier(columns, table.table, column.name);
}
@ -256,7 +256,7 @@ void TranslateQualifiedNamesMatcher::visit(ASTExpressionList & node, const ASTPt
{
for (const auto & column : table.columns)
{
if (asterisk_pattern->isColumnMatching(column.name) && (first_table || !data.join_using_columns.count(column.name)))
if (asterisk_pattern->isColumnMatching(column.name) && (first_table || !data.join_using_columns.contains(column.name)))
{
addIdentifier(columns, table.table, column.name);
}

View File

@ -111,7 +111,7 @@ void optimizeGroupBy(ASTSelectQuery * select_query, ContextPtr context)
if (const auto * function = group_exprs[i]->as<ASTFunction>())
{
/// assert function is injective
if (possibly_injective_function_names.count(function->name))
if (possibly_injective_function_names.contains(function->name))
{
/// do not handle semantic errors here
if (function->arguments->children.size() < 2)
@ -233,7 +233,7 @@ void optimizeGroupByFunctionKeys(ASTSelectQuery * select_query)
/// filling the result
for (const auto & group_key : group_by_keys)
if (group_by_keys_data.key_names.count(group_key->getColumnName()))
if (group_by_keys_data.key_names.contains(group_key->getColumnName()))
modified.push_back(group_key);
/// modifying the input
@ -349,7 +349,7 @@ std::unordered_set<String> getDistinctNames(const ASTSelectQuery & select)
{
const String & name = identifier->shortName();
if (select.distinct || implicit_distinct.count(name))
if (select.distinct || implicit_distinct.contains(name))
{
if (alias.empty())
names.insert(name);
@ -392,7 +392,7 @@ void optimizeDuplicateDistinct(ASTSelectQuery & select)
return;
String name = identifier->shortName();
if (!distinct_names.count(name))
if (!distinct_names.contains(name))
return; /// Not a distinct column, keep DISTINCT for it.
selected_names.insert(name);

View File

@ -491,7 +491,7 @@ void removeUnneededColumnsFromSelectClause(ASTSelectQuery * select_query, const
{
for (auto it = children.begin(); it != children.end();)
{
if (remove_columns.count((*it)->as<ASTInterpolateElement>()->column))
if (remove_columns.contains((*it)->as<ASTInterpolateElement>()->column))
it = select_query->interpolate()->children.erase(it);
else
++it;
@ -536,7 +536,7 @@ void getArrayJoinedColumns(ASTPtr & query, TreeRewriterResult & result, const AS
String result_name = expr->getAliasOrColumnName();
/// This is an array.
if (!expr->as<ASTIdentifier>() || source_columns_set.count(source_name))
if (!expr->as<ASTIdentifier>() || source_columns_set.contains(source_name))
{
result.array_join_result_to_source[result_name] = source_name;
}
@ -917,10 +917,10 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select
for (const auto & joined_column : analyzed_join->columnsFromJoinedTable())
{
const auto & name = joined_column.name;
if (available_columns.count(name))
if (available_columns.contains(name))
continue;
if (required.count(name))
if (required.contains(name))
{
/// Optimisation: do not add columns needed only in JOIN ON section.
if (columns_context.nameInclusion(name) > analyzed_join->rightKeyInclusion(name))
@ -939,7 +939,7 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select
array_join_sources.insert(result_source.second);
for (const auto & column_name_type : source_columns)
if (array_join_sources.count(column_name_type.name))
if (array_join_sources.contains(column_name_type.name))
required.insert(column_name_type.name);
}
@ -1016,7 +1016,7 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select
const String & column_name = it->name;
unknown_required_source_columns.erase(column_name);
if (!required.count(column_name))
if (!required.contains(column_name))
it = source_columns.erase(it);
else
++it;

View File

@ -59,7 +59,7 @@ ActionsDAGPtr addMissingDefaults(
String offsets_name = Nested::extractTableName(column.name);
const auto * array_type = typeid_cast<const DataTypeArray *>(column.type.get());
if (array_type && nested_groups.count(offsets_name))
if (array_type && nested_groups.contains(offsets_name))
{
const auto & nested_type = array_type->getNestedType();
ColumnPtr nested_column = nested_type->createColumnConstWithDefaultValue(0);

View File

@ -376,7 +376,7 @@ std::optional<Blocks> evaluateExpressionOverConstantCondition(const ASTPtr & nod
Field prev_value = assert_cast<const ColumnConst &>(*prev.column).getField();
Field curr_value = assert_cast<const ColumnConst &>(*elem.column).getField();
if (!always_false_map.count(elem.name))
if (!always_false_map.contains(elem.name))
{
always_false_map[elem.name] = prev_value != curr_value;
}

View File

@ -47,7 +47,7 @@ bool isSupportedAlterType(int type)
ASTAlterCommand::NO_TYPE,
};
return unsupported_alter_types.count(type) == 0;
return !unsupported_alter_types.contains(type);
}
@ -449,9 +449,9 @@ Strings DDLQueryStatusSource::getNewAndUpdate(const Strings & current_list_of_fi
Strings diff;
for (const String & host : current_list_of_finished_hosts)
{
if (!waiting_hosts.count(host))
if (!waiting_hosts.contains(host))
{
if (!ignoring_hosts.count(host))
if (!ignoring_hosts.contains(host))
{
ignoring_hosts.emplace(host);
LOG_INFO(log, "Unexpected host {} appeared in task {}", host, node_path);
@ -459,7 +459,7 @@ Strings DDLQueryStatusSource::getNewAndUpdate(const Strings & current_list_of_fi
continue;
}
if (!finished_hosts.count(host))
if (!finished_hosts.contains(host))
{
diff.emplace_back(host);
finished_hosts.emplace(host);

View File

@ -45,7 +45,7 @@ void addDefaultRequiredExpressionsRecursively(
if (is_column_in_query)
convert_null_to_default = null_as_default && block.findByName(required_column_name)->type->isNullable() && !required_column_type->isNullable();
if ((is_column_in_query && !convert_null_to_default) || added_columns.count(required_column_name))
if ((is_column_in_query && !convert_null_to_default) || added_columns.contains(required_column_name))
return;
auto column_default = columns.getDefault(required_column_name);

View File

@ -160,7 +160,7 @@ void loadMetadata(ContextMutablePtr context, const String & default_database_nam
/// clickhouse-local creates DatabaseMemory as default database by itself
/// For clickhouse-server we need create default database
bool create_default_db_if_not_exists = !default_database_name.empty();
bool metadata_dir_for_default_db_already_exists = databases.count(default_database_name);
bool metadata_dir_for_default_db_already_exists = databases.contains(default_database_name);
if (create_default_db_if_not_exists && !metadata_dir_for_default_db_already_exists)
databases.emplace(default_database_name, std::filesystem::path(path) / escapeForFileName(default_database_name));

View File

@ -100,7 +100,7 @@ void ASTProjectionSelectQuery::setExpression(Expression expr, ASTPtr && ast)
else
children[it->second] = ast;
}
else if (positions.count(expr))
else if (positions.contains(expr))
{
size_t pos = positions[expr];
children.erase(children.begin() + pos);
@ -113,7 +113,7 @@ void ASTProjectionSelectQuery::setExpression(Expression expr, ASTPtr && ast)
ASTPtr & ASTProjectionSelectQuery::getExpression(Expression expr)
{
if (!positions.count(expr))
if (!positions.contains(expr))
throw Exception("Get expression before set", ErrorCodes::LOGICAL_ERROR);
return children[positions[expr]];
}

View File

@ -405,7 +405,7 @@ void ASTSelectQuery::setExpression(Expression expr, ASTPtr && ast)
else
children[it->second] = ast;
}
else if (positions.count(expr))
else if (positions.contains(expr))
{
size_t pos = positions[expr];
children.erase(children.begin() + pos);
@ -418,7 +418,7 @@ void ASTSelectQuery::setExpression(Expression expr, ASTPtr && ast)
ASTPtr & ASTSelectQuery::getExpression(Expression expr)
{
if (!positions.count(expr))
if (!positions.contains(expr))
throw Exception("Get expression before set", ErrorCodes::LOGICAL_ERROR);
return children[positions[expr]];
}

View File

@ -122,7 +122,7 @@ void ASTTableOverrideList::removeTableOverride(const String & name)
bool ASTTableOverrideList::hasOverride(const String & name) const
{
return positions.count(name);
return positions.contains(name);
}
void ASTTableOverrideList::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const

View File

@ -637,7 +637,7 @@ void obfuscateIdentifier(std::string_view src, WriteBuffer & result, WordMap & o
{
std::string_view word(word_begin, src_pos - word_begin);
if (keep_words.count(word))
if (keep_words.contains(word))
{
result.write(word.data(), word.size());
}
@ -889,7 +889,7 @@ void obfuscateQueries(
std::string whole_token_uppercase(whole_token);
Poco::toUpperInPlace(whole_token_uppercase);
if (keywords.count(whole_token_uppercase)
if (keywords.contains(whole_token_uppercase)
|| known_identifier_func(whole_token))
{
/// Keep keywords as is.

View File

@ -127,7 +127,7 @@ bool ExecutingGraph::expandPipeline(std::stack<uint64_t> & stack, uint64_t pid)
while (nodes.size() < num_processors)
{
auto * processor = processors[nodes.size()].get();
if (processors_map.count(processor))
if (processors_map.contains(processor))
throw Exception("Processor " + processor->getName() + " was already added to pipeline.",
ErrorCodes::LOGICAL_ERROR);

View File

@ -15,7 +15,7 @@ static bool checkColumnsAlreadyDistinct(const Names & columns, const NameSet & d
/// Now we need to check that distinct_names is a subset of columns.
std::unordered_set<std::string_view> columns_set(columns.begin(), columns.end());
for (const auto & name : distinct_names)
if (columns_set.count(name) == 0)
if (!columns_set.contains(name))
return false;
return true;

View File

@ -171,7 +171,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes
Names allowed_inputs;
for (const auto & column : array_join_header)
if (keys.count(column.name) == 0)
if (!keys.contains(column.name))
allowed_inputs.push_back(column.name);
// for (const auto & name : allowed_inputs)

View File

@ -187,7 +187,7 @@ void AddingDefaultsTransform::transform(Chunk & chunk)
{
const String & column_name = column_def.name;
if (column_defaults.count(column_name) == 0 || !res.has(column_name))
if (!column_defaults.contains(column_name) || !res.has(column_name))
continue;
size_t block_column_position = res.getPositionByName(column_name);

View File

@ -26,7 +26,7 @@ Block FillingTransform::transformHeader(Block header, const SortDescription & so
/// Columns which are not from sorting key may not be constant anymore.
for (auto & column : header)
if (column.column && isColumnConst(*column.column) && !sort_keys.count(column.name))
if (column.column && isColumnConst(*column.column) && !sort_keys.contains(column.name))
column.column = column.type->createColumn();
return header;
@ -155,7 +155,7 @@ FillingTransform::FillingTransform(
std::vector<bool> is_fill_column(header_.columns());
for (size_t i = 0, size = sort_description.size(); i < size; ++i)
{
if (interpolate_description && interpolate_description->result_columns_set.count(sort_description[i].column_name))
if (interpolate_description && interpolate_description->result_columns_set.contains(sort_description[i].column_name))
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
"Column '{}' is participating in ORDER BY ... WITH FILL expression and can't be INTERPOLATE output",
sort_description[i].column_name);
@ -193,7 +193,7 @@ FillingTransform::FillingTransform(
p != interpolate_description->required_columns_map.end())
input_positions.emplace_back(idx, p->second);
if (!is_fill_column[idx] && !(interpolate_description && interpolate_description->result_columns_set.count(column.name)))
if (!is_fill_column[idx] && !(interpolate_description && interpolate_description->result_columns_set.contains(column.name)))
other_column_positions.push_back(idx);
++idx;

View File

@ -192,7 +192,7 @@ Pipe::Pipe(Processors processors_) : processors(std::move(processors_))
" has not connected input port", ErrorCodes::LOGICAL_ERROR);
const auto * connected_processor = &port.getOutputPort().getProcessor();
if (set.count(connected_processor) == 0)
if (!set.contains(connected_processor))
throw Exception("Cannot create Pipe because processor " + processor->getName() +
" has input port which is connected with unknown processor " +
connected_processor->getName(), ErrorCodes::LOGICAL_ERROR);
@ -207,7 +207,7 @@ Pipe::Pipe(Processors processors_) : processors(std::move(processors_))
}
const auto * connected_processor = &port.getInputPort().getProcessor();
if (set.count(connected_processor) == 0)
if (!set.contains(connected_processor))
throw Exception("Cannot create Pipe because processor " + processor->getName() +
" has output port which is connected with unknown processor " +
connected_processor->getName(), ErrorCodes::LOGICAL_ERROR);
@ -791,7 +791,7 @@ void Pipe::transform(const Transformer & transformer)
" has not connected input port", ErrorCodes::LOGICAL_ERROR);
const auto * connected_processor = &port.getOutputPort().getProcessor();
if (set.count(connected_processor) == 0)
if (!set.contains(connected_processor))
throw Exception("Transformation of Pipe is not valid because processor " + processor->getName() +
" has input port which is connected with unknown processor " +
connected_processor->getName(), ErrorCodes::LOGICAL_ERROR);
@ -806,7 +806,7 @@ void Pipe::transform(const Transformer & transformer)
}
const auto * connected_processor = &port.getInputPort().getProcessor();
if (set.count(connected_processor) == 0)
if (!set.contains(connected_processor))
throw Exception("Transformation of Pipe is not valid because processor " + processor->getName() +
" has output port which is connected with unknown processor " +
connected_processor->getName(), ErrorCodes::LOGICAL_ERROR);

View File

@ -669,7 +669,7 @@ void HTTPHandler::processQuery(
if (name.empty())
return true;
if (reserved_param_names.count(name))
if (reserved_param_names.contains(name))
return true;
for (const String & suffix : reserved_param_suffixes)
@ -1063,7 +1063,7 @@ PredefinedQueryHandler::PredefinedQueryHandler(
bool PredefinedQueryHandler::customizeQueryParam(ContextMutablePtr context, const std::string & key, const std::string & value)
{
if (receive_params.count(key))
if (receive_params.contains(key))
{
context->setQueryParameter(key, value);
return true;

View File

@ -1070,7 +1070,7 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, ContextPt
continue;
}
if (renamed_columns.count(column_name))
if (renamed_columns.contains(column_name))
throw Exception{"Cannot rename and modify the same column " + backQuote(column_name) + " in a single ALTER query",
ErrorCodes::NOT_IMPLEMENTED};
@ -1232,7 +1232,7 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, ContextPt
throw Exception{"Cannot rename to " + backQuote(command.rename_to) + ": column with this name already exists",
ErrorCodes::DUPLICATE_COLUMN};
if (modified_columns.count(column_name))
if (modified_columns.contains(column_name))
throw Exception{"Cannot rename and modify the same column " + backQuote(column_name) + " in a single ALTER query",
ErrorCodes::NOT_IMPLEMENTED};

View File

@ -114,7 +114,7 @@ std::unique_ptr<ComparisonGraph> ConstraintsDescription::buildGraph() const
CNFQuery::AtomicFormula atom{atomic_formula.negative, atomic_formula.ast->clone()};
pushNotIn(atom);
auto * func = atom.ast->as<ASTFunction>();
if (func && relations.count(func->name))
if (func && relations.contains(func->name))
{
assert(!atom.negative);
constraints_for_graph.push_back(atom.ast);

View File

@ -1021,7 +1021,7 @@ void StorageDistributedDirectoryMonitor::processFilesWithBatching(const std::map
UInt64 file_idx = file.first;
const String & file_path = file.second;
if (file_indices_to_skip.count(file_idx))
if (file_indices_to_skip.contains(file_idx))
continue;
size_t total_rows = 0;

View File

@ -304,7 +304,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk(
writePODBinary(hashing_out.getHash(), out);
if (!file_names_without_checksums.count(file_name))
if (!file_names_without_checksums.contains(file_name))
data_checksums.addFile(file_name, hashing_out.count(), hashing_out.getHash());
}

View File

@ -1831,22 +1831,22 @@ void IMergeTreeDataPart::checkConsistencyBase() const
const auto & partition_key = metadata_snapshot->getPartitionKey();
if (!checksums.empty())
{
if (!pk.column_names.empty() && !checksums.files.count("primary.idx"))
if (!pk.column_names.empty() && !checksums.files.contains("primary.idx"))
throw Exception("No checksum for primary.idx", ErrorCodes::NO_FILE_IN_DATA_PART);
if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING)
{
if (!checksums.files.count("count.txt"))
if (!checksums.files.contains("count.txt"))
throw Exception("No checksum for count.txt", ErrorCodes::NO_FILE_IN_DATA_PART);
if (metadata_snapshot->hasPartitionKey() && !checksums.files.count("partition.dat"))
if (metadata_snapshot->hasPartitionKey() && !checksums.files.contains("partition.dat"))
throw Exception("No checksum for partition.dat", ErrorCodes::NO_FILE_IN_DATA_PART);
if (!isEmpty() && !parent_part)
{
for (const String & col_name : storage.getMinMaxColumnsNames(partition_key))
{
if (!checksums.files.count("minmax_" + escapeForFileName(col_name) + ".idx"))
if (!checksums.files.contains("minmax_" + escapeForFileName(col_name) + ".idx"))
throw Exception("No minmax idx file checksum for column " + col_name, ErrorCodes::NO_FILE_IN_DATA_PART);
}
}
@ -1980,26 +1980,26 @@ bool IMergeTreeDataPart::checkAllTTLCalculated(const StorageMetadataPtr & metada
for (const auto & [column, desc] : metadata_snapshot->getColumnTTLs())
{
/// Part has this column, but we don't calculated TTL for it
if (!ttl_infos.columns_ttl.count(column) && getColumns().contains(column))
if (!ttl_infos.columns_ttl.contains(column) && getColumns().contains(column))
return false;
}
for (const auto & move_desc : metadata_snapshot->getMoveTTLs())
{
/// Move TTL is not calculated
if (!ttl_infos.moves_ttl.count(move_desc.result_column))
if (!ttl_infos.moves_ttl.contains(move_desc.result_column))
return false;
}
for (const auto & group_by_desc : metadata_snapshot->getGroupByTTLs())
{
if (!ttl_infos.group_by_ttl.count(group_by_desc.result_column))
if (!ttl_infos.group_by_ttl.contains(group_by_desc.result_column))
return false;
}
for (const auto & rows_where_desc : metadata_snapshot->getRowsWhereTTLs())
{
if (!ttl_infos.rows_where_ttl.count(rows_where_desc.result_column))
if (!ttl_infos.rows_where_ttl.contains(rows_where_desc.result_column))
return false;
}
@ -2047,7 +2047,7 @@ IMergeTreeDataPart::uint128 IMergeTreeDataPart::getActualChecksumByFile(const St
String file_name = std::filesystem::path(file_path).filename();
const auto filenames_without_checksums = getFileNamesWithoutChecksums();
auto it = checksums.files.find(file_name);
if (filenames_without_checksums.count(file_name) == 0 && it != checksums.files.end())
if (!filenames_without_checksums.contains(file_name) && it != checksums.files.end())
{
return it->second.file_hash;
}

View File

@ -78,7 +78,7 @@ NameSet IMergedBlockOutputStream::removeEmptyColumnsFromPart(
/// Remove files on disk and checksums
for (const String & removed_file : remove_files)
{
if (checksums.files.count(removed_file))
if (checksums.files.contains(removed_file))
checksums.files.erase(removed_file);
}

View File

@ -449,7 +449,7 @@ KeyCondition::KeyCondition(
for (size_t i = 0, size = key_column_names.size(); i < size; ++i)
{
const auto & name = key_column_names[i];
if (!key_columns.count(name))
if (!key_columns.contains(name))
key_columns[name] = i;
}
@ -487,7 +487,7 @@ KeyCondition::KeyCondition(
bool KeyCondition::addCondition(const String & column, const Range & range)
{
if (!key_columns.count(column))
if (!key_columns.contains(column))
return false;
rpn.emplace_back(RPNElement::FUNCTION_IN_RANGE, key_columns[column], range);
rpn.emplace_back(RPNElement::FUNCTION_AND);
@ -776,10 +776,10 @@ bool KeyCondition::canConstantBeWrappedByMonotonicFunctions(
{
String expr_name = node->getColumnNameWithoutAlias();
if (array_joined_columns.count(expr_name))
if (array_joined_columns.contains(expr_name))
return false;
if (key_subexpr_names.count(expr_name) == 0)
if (!key_subexpr_names.contains(expr_name))
return false;
if (out_value.isNull())
@ -807,10 +807,10 @@ bool KeyCondition::canConstantBeWrappedByFunctions(
{
String expr_name = ast->getColumnNameWithoutAlias();
if (array_joined_columns.count(expr_name))
if (array_joined_columns.contains(expr_name))
return false;
if (key_subexpr_names.count(expr_name) == 0)
if (!key_subexpr_names.contains(expr_name))
{
/// Let's check another one case.
/// If our storage was created with moduloLegacy in partition key,
@ -825,7 +825,7 @@ bool KeyCondition::canConstantBeWrappedByFunctions(
KeyDescription::moduloToModuloLegacyRecursive(adjusted_ast);
expr_name = adjusted_ast->getColumnName();
if (key_subexpr_names.count(expr_name) == 0)
if (!key_subexpr_names.contains(expr_name))
return false;
}
@ -1080,7 +1080,7 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctionsImpl(
// Key columns should use canonical names for index analysis
String name = node->getColumnNameWithoutAlias();
if (array_joined_columns.count(name))
if (array_joined_columns.contains(name))
return false;
auto it = key_columns.find(name);

View File

@ -75,7 +75,7 @@ static void extractMergingAndGatheringColumns(
for (const auto & column : storage_columns)
{
if (key_columns.count(column.name))
if (key_columns.contains(column.name))
{
merging_columns.emplace_back(column);
merging_column_names.emplace_back(column.name);

View File

@ -52,7 +52,7 @@ bool injectRequiredColumnsRecursively(
if (part->hasColumnFiles(column_in_part))
{
/// ensure each column is added only once
if (required_columns.count(column_name) == 0)
if (!required_columns.contains(column_name))
{
columns.emplace_back(column_name);
required_columns.emplace(column_name);
@ -162,7 +162,7 @@ void MergeTreeBlockSizePredictor::initialize(const Block & sample_block, const C
const ColumnPtr & column_data = from_update ? columns[pos]
: column_with_type_and_name.column;
if (!from_update && !names_set.count(column_name))
if (!from_update && !names_set.contains(column_name))
continue;
/// At least PREWHERE filter column might be const.
@ -286,7 +286,7 @@ MergeTreeReadTaskColumns getReadTaskColumns(
for (auto & name : prewhere_info->row_level_filter->getRequiredColumnsNames())
{
if (names.count(name) == 0)
if (!names.contains(name))
pre_column_names.push_back(name);
}
}
@ -303,7 +303,7 @@ MergeTreeReadTaskColumns getReadTaskColumns(
Names post_column_names;
for (const auto & name : column_names)
if (!pre_name_set.count(name))
if (!pre_name_set.contains(name))
post_column_names.push_back(name);
column_names = post_column_names;

View File

@ -476,7 +476,7 @@ void MergeTreeData::checkProperties(
"added to the sorting key. You can add expressions that use only the newly added columns",
ErrorCodes::BAD_ARGUMENTS);
if (new_metadata.columns.getDefaults().count(col))
if (new_metadata.columns.getDefaults().contains(col))
throw Exception("Newly added column " + backQuoteIfNeed(col) + " has a default expression, so adding "
"expressions that use it to the sorting key is forbidden",
ErrorCodes::BAD_ARGUMENTS);
@ -655,7 +655,7 @@ void MergeTreeData::checkTTLExpressions(const StorageInMemoryMetadata & new_meta
for (const auto & [name, ttl_description] : new_column_ttls)
{
if (columns_ttl_forbidden.count(name))
if (columns_ttl_forbidden.contains(name))
throw Exception("Trying to set TTL for key column " + name, ErrorCodes::ILLEGAL_COLUMN);
}
}
@ -1219,7 +1219,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
if (disk->isBroken())
continue;
if (defined_disk_names.count(disk_name) == 0 && disk->exists(relative_data_path))
if (!defined_disk_names.contains(disk_name) && disk->exists(relative_data_path))
{
for (const auto it = disk->iterateDirectory(relative_data_path); it->isValid(); it->next())
{
@ -2187,7 +2187,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context
}
if (command.type == AlterCommand::RENAME_COLUMN)
{
if (columns_in_keys.count(command.column_name))
if (columns_in_keys.contains(command.column_name))
{
throw Exception(
"Trying to ALTER RENAME key " + backQuoteIfNeed(command.column_name) + " column which is a part of key expression",
@ -2196,7 +2196,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context
}
else if (command.type == AlterCommand::DROP_COLUMN)
{
if (columns_in_keys.count(command.column_name))
if (columns_in_keys.contains(command.column_name))
{
throw Exception(
"Trying to ALTER DROP key " + backQuoteIfNeed(command.column_name) + " column which is a part of key expression",
@ -2229,13 +2229,13 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context
{
/// This alter will override data on disk. Let's check that it doesn't
/// modify immutable column.
if (columns_alter_type_forbidden.count(command.column_name))
if (columns_alter_type_forbidden.contains(command.column_name))
throw Exception("ALTER of key column " + backQuoteIfNeed(command.column_name) + " is forbidden",
ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN);
if (command.type == AlterCommand::MODIFY_COLUMN)
{
if (columns_alter_type_check_safe_for_partition.count(command.column_name))
if (columns_alter_type_check_safe_for_partition.contains(command.column_name))
{
auto it = old_types.find(command.column_name);
@ -2247,7 +2247,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context
ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN);
}
if (columns_alter_type_metadata_only.count(command.column_name))
if (columns_alter_type_metadata_only.contains(command.column_name))
{
auto it = old_types.find(command.column_name);
assert(it != old_types.end());
@ -2337,7 +2337,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context
bool at_least_one_column_rest = false;
for (const auto & column : part->getColumns())
{
if (!dropped_columns.count(column.name))
if (!dropped_columns.contains(column.name))
{
at_least_one_column_rest = true;
break;
@ -6103,7 +6103,7 @@ MergeTreeData::CurrentlyMovingPartsTagger::~CurrentlyMovingPartsTagger()
for (const auto & moving_part : parts_to_move)
{
/// Something went completely wrong
if (!data.currently_moving_parts.count(moving_part.part))
if (!data.currently_moving_parts.contains(moving_part.part))
std::terminate();
data.currently_moving_parts.erase(moving_part.part);
}
@ -6159,7 +6159,7 @@ MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::selectPartsForMove()
*reason = "part already assigned to background operation.";
return false;
}
if (currently_moving_parts.count(part))
if (currently_moving_parts.contains(part))
{
*reason = "part is already moving.";
return false;
@ -6192,7 +6192,7 @@ MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::checkPartsForMove(co
"Move is not possible: " + fullPath(reserved_disk, relative_data_path + part->name) + " already exists",
ErrorCodes::DIRECTORY_ALREADY_EXISTS);
if (currently_moving_parts.count(part) || partIsAssignedToBackgroundOperation(part))
if (currently_moving_parts.contains(part) || partIsAssignedToBackgroundOperation(part))
throw Exception(
"Cannot move part '" + part->name + "' because it's participating in background process",
ErrorCodes::PART_IS_TEMPORARILY_LOCKED);
@ -6561,7 +6561,7 @@ ReservationPtr MergeTreeData::balancedReservation(
for (const auto & part : covered_parts)
{
if (currently_submerging_big_parts.count(part))
if (currently_submerging_big_parts.contains(part))
LOG_WARNING(log, "currently_submerging_big_parts contains duplicates. JBOD might lose balance");
else
currently_submerging_big_parts.insert(part);
@ -6632,7 +6632,7 @@ CurrentlySubmergingEmergingTagger::~CurrentlySubmergingEmergingTagger()
for (const auto & part : submerging_parts)
{
if (!storage.currently_submerging_big_parts.count(part))
if (!storage.currently_submerging_big_parts.contains(part))
{
LOG_ERROR(log, "currently_submerging_big_parts doesn't contain part {} to erase. This is a bug", part->name);
assert(false);

View File

@ -693,7 +693,7 @@ void MergeTreeDataMergerMutator::splitMutationCommands(
/// from disk we just don't read dropped columns
for (const auto & column : part->getColumns())
{
if (!mutated_columns.count(column.name))
if (!mutated_columns.contains(column.name))
for_interpreter.emplace_back(
MutationCommand{.type = MutationCommand::Type::READ_COLUMN, .column_name = column.name, .data_type = column.type});
}
@ -771,7 +771,7 @@ MergeTreeDataMergerMutator::getColumnsForNewDataPart(
SerializationInfoByName new_serialization_infos;
for (const auto & [name, info] : serialization_infos)
{
if (removed_columns.count(name))
if (removed_columns.contains(name))
continue;
auto it = renamed_columns_from_to.find(name);
@ -799,13 +799,13 @@ MergeTreeDataMergerMutator::getColumnsForNewDataPart(
}
else
{
if (!source_columns_name_set.count(it->name))
if (!source_columns_name_set.contains(it->name))
{
/// Source part doesn't have column but some other column
/// was renamed to it's name.
auto renamed_it = renamed_columns_to_from.find(it->name);
if (renamed_it != renamed_columns_to_from.end()
&& source_columns_name_set.count(renamed_it->second))
&& source_columns_name_set.contains(renamed_it->second))
++it;
else
it = storage_columns.erase(it);
@ -813,19 +813,19 @@ MergeTreeDataMergerMutator::getColumnsForNewDataPart(
else
{
/// Check that this column was renamed to some other name
bool was_renamed = renamed_columns_from_to.count(it->name);
bool was_removed = removed_columns.count(it->name);
bool was_renamed = renamed_columns_from_to.contains(it->name);
bool was_removed = removed_columns.contains(it->name);
/// If we want to rename this column to some other name, than it
/// should it's previous version should be dropped or removed
if (renamed_columns_to_from.count(it->name) && !was_renamed && !was_removed)
if (renamed_columns_to_from.contains(it->name) && !was_renamed && !was_removed)
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Incorrect mutation commands, trying to rename column {} to {}, but part {} already has column {}", renamed_columns_to_from[it->name], it->name, source_part->name, it->name);
/// Column was renamed and no other column renamed to it's name
/// or column is dropped.
if (!renamed_columns_to_from.count(it->name) && (was_renamed || was_removed))
if (!renamed_columns_to_from.contains(it->name) && (was_renamed || was_removed))
it = storage_columns.erase(it);
else
++it;

View File

@ -63,7 +63,7 @@ void MergeTreeDataPartChecksums::checkEqual(const MergeTreeDataPartChecksums & r
{
const String & name = it.first;
if (!files.count(name))
if (!files.contains(name))
throw Exception("Unexpected file " + name + " in data part", ErrorCodes::UNEXPECTED_FILE_IN_DATA_PART);
}

View File

@ -143,14 +143,14 @@ void MergeTreeDataPartCompact::checkConsistency(bool require_part_metadata) cons
if (!checksums.empty())
{
/// count.txt should be present even in non custom-partitioned parts
if (!checksums.files.count("count.txt"))
if (!checksums.files.contains("count.txt"))
throw Exception("No checksum for count.txt", ErrorCodes::NO_FILE_IN_DATA_PART);
if (require_part_metadata)
{
if (!checksums.files.count(mrk_file_name))
if (!checksums.files.contains(mrk_file_name))
throw Exception("No marks file checksum for column in part " + fullPath(volume->getDisk(), path), ErrorCodes::NO_FILE_IN_DATA_PART);
if (!checksums.files.count(DATA_FILE_NAME_WITH_EXTENSION))
if (!checksums.files.contains(DATA_FILE_NAME_WITH_EXTENSION))
throw Exception("No data file checksum for in part " + fullPath(volume->getDisk(), path), ErrorCodes::NO_FILE_IN_DATA_PART);
}
}

View File

@ -167,10 +167,10 @@ void MergeTreeDataPartWide::checkConsistency(bool require_part_metadata) const
String file_name = ISerialization::getFileNameForStream(name_type, substream_path);
String mrk_file_name = file_name + index_granularity_info.marks_file_extension;
String bin_file_name = file_name + DATA_FILE_EXTENSION;
if (!checksums.files.count(mrk_file_name))
if (!checksums.files.contains(mrk_file_name))
throw Exception("No " + mrk_file_name + " file checksum for column " + name_type.name + " in part " + fullPath(volume->getDisk(), path),
ErrorCodes::NO_FILE_IN_DATA_PART);
if (!checksums.files.count(bin_file_name))
if (!checksums.files.contains(bin_file_name))
throw Exception("No " + bin_file_name + " file checksum for column " + name_type.name + " in part " + fullPath(volume->getDisk(), path),
ErrorCodes::NO_FILE_IN_DATA_PART);
});

View File

@ -47,7 +47,7 @@ void MergeTreeDataPartWriterCompact::addStreams(const NameAndTypePair & column,
String stream_name = ISerialization::getFileNameForStream(column, substream_path);
/// Shared offsets for Nested type.
if (compressed_streams.count(stream_name))
if (compressed_streams.contains(stream_name))
return;
const auto & subtype = substream_path.back().data.type;

View File

@ -97,7 +97,7 @@ void MergeTreeDataPartWriterWide::addStreams(
String stream_name = ISerialization::getFileNameForStream(column, substream_path);
/// Shared offsets for Nested type.
if (column_streams.count(stream_name))
if (column_streams.contains(stream_name))
return;
const auto & subtype = substream_path.back().data.type;
@ -134,7 +134,7 @@ ISerialization::OutputStreamGetter MergeTreeDataPartWriterWide::createStreamGett
String stream_name = ISerialization::getFileNameForStream(column, substream_path);
/// Don't write offsets more than one time for Nested type.
if (is_offsets && offset_columns.count(stream_name))
if (is_offsets && offset_columns.contains(stream_name))
return nullptr;
return &column_streams.at(stream_name)->compressed;
@ -284,7 +284,7 @@ StreamsWithMarks MergeTreeDataPartWriterWide::getCurrentMarksForColumn(
String stream_name = ISerialization::getFileNameForStream(column, substream_path);
/// Don't write offsets more than one time for Nested type.
if (is_offsets && offset_columns.count(stream_name))
if (is_offsets && offset_columns.contains(stream_name))
return;
Stream & stream = *column_streams[stream_name];
@ -323,7 +323,7 @@ void MergeTreeDataPartWriterWide::writeSingleGranule(
String stream_name = ISerialization::getFileNameForStream(name_and_type, substream_path);
/// Don't write offsets more than one time for Nested type.
if (is_offsets && offset_columns.count(stream_name))
if (is_offsets && offset_columns.contains(stream_name))
return;
column_streams[stream_name]->compressed.nextIfAtEnd();
@ -363,7 +363,7 @@ void MergeTreeDataPartWriterWide::writeColumn(
if (granule.mark_on_start)
{
if (last_non_written_marks.count(name))
if (last_non_written_marks.contains(name))
throw Exception(ErrorCodes::LOGICAL_ERROR, "We have to add new mark for column, but already have non written mark. Current mark {}, total marks {}, offset {}", getCurrentMark(), index_granularity.getMarksCount(), rows_written_in_last_mark);
last_non_written_marks[name] = getCurrentMarksForColumn(name_and_type, offset_columns, serialize_settings.path);
}

View File

@ -862,7 +862,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd
for (const auto & index_name : forced_indices)
{
if (!useful_indices_names.count(index_name))
if (!useful_indices_names.contains(index_name))
{
throw Exception(
ErrorCodes::INDEX_NOT_USED,

Some files were not shown because too many files have changed in this diff Show More