diff --git a/.clang-tidy b/.clang-tidy index 6fd67876923..fad1486d885 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -37,6 +37,7 @@ Checks: '-*, readability-avoid-const-params-in-decls, readability-const-return-type, + readability-container-contains, readability-container-size-empty, readability-convert-member-functions-to-static, readability-delete-null-pointer, diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 4d491a06795..9adb3b737f3 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -145,7 +145,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts, for (const String & partition_name : existing_partitions_names) { - if (!task_table.enabled_partitions_set.count(partition_name)) + if (!task_table.enabled_partitions_set.contains(partition_name)) { LOG_INFO(log, "Partition {} will not be processed, since it is not in enabled_partitions of {}", partition_name, task_table.table_id); } @@ -933,7 +933,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab /// Process each partition that is present in cluster for (const String & partition_name : task_table.ordered_partition_names) { - if (!task_table.cluster_partitions.count(partition_name)) + if (!task_table.cluster_partitions.contains(partition_name)) throw Exception("There are no expected partition " + partition_name + ". It is a bug", ErrorCodes::LOGICAL_ERROR); ClusterPartition & cluster_partition = task_table.cluster_partitions[partition_name]; @@ -953,10 +953,10 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab for (const TaskShardPtr & shard : task_table.all_shards) { /// Does shard have a node with current partition? - if (shard->partition_tasks.count(partition_name) == 0) + if (!shard->partition_tasks.contains(partition_name)) { /// If not, did we check existence of that partition previously? - if (shard->checked_partitions.count(partition_name) == 0) + if (!shard->checked_partitions.contains(partition_name)) { auto check_shard_has_partition = [&] () { return checkShardHasPartition(timeouts, *shard, partition_name); }; bool has_partition = retry(check_shard_has_partition); diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index d5206da00f5..5eed58b15d0 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -145,7 +145,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv) || TableFunctionFactory::instance().isTableFunctionName(what) || FormatFactory::instance().isOutputFormat(what) || FormatFactory::instance().isInputFormat(what) - || additional_names.count(what); + || additional_names.contains(what); }; WriteBufferFromFileDescriptor out(STDOUT_FILENO); diff --git a/programs/git-import/git-import.cpp b/programs/git-import/git-import.cpp index 18a9bb2627c..54f5d7f75ea 100644 --- a/programs/git-import/git-import.cpp +++ b/programs/git-import/git-import.cpp @@ -715,7 +715,7 @@ void updateSnapshot(Snapshot & snapshot, const Commit & commit, CommitDiff & fil uint32_t this_line_in_prev_commit = line_change.hunk_start_line_number_old + (line_change.line_number_new - line_change.hunk_start_line_number_new); - if (deleted_lines.count(this_line_in_prev_commit)) + if (deleted_lines.contains(this_line_in_prev_commit)) { const auto & prev_commit = deleted_lines[this_line_in_prev_commit]; if (prev_commit.time <= commit.time) @@ -1150,7 +1150,7 @@ void processLog(const Options & options) readString(hash, in); assertChar('\n', in); - if (!options.skip_commits.count(hash)) + if (!options.skip_commits.contains(hash)) hashes.emplace_back(std::move(hash)); } diff --git a/programs/library-bridge/SharedLibraryHandlerFactory.cpp b/programs/library-bridge/SharedLibraryHandlerFactory.cpp index a9358ca552a..2abc208e502 100644 --- a/programs/library-bridge/SharedLibraryHandlerFactory.cpp +++ b/programs/library-bridge/SharedLibraryHandlerFactory.cpp @@ -24,7 +24,7 @@ void SharedLibraryHandlerFactory::create( const std::vector & attributes_names) { std::lock_guard lock(mutex); - if (!library_handlers.count(dictionary_id)) + if (!library_handlers.contains(dictionary_id)) library_handlers.emplace(std::make_pair(dictionary_id, std::make_shared(library_path, library_settings, sample_block, attributes_names))); else LOG_WARNING(&Poco::Logger::get("SharedLibraryHandlerFactory"), "Library handler with dictionary id {} already exists", dictionary_id); diff --git a/src/Access/Common/AccessFlags.cpp b/src/Access/Common/AccessFlags.cpp index e7dddbdcba2..82e1cbfb26b 100644 --- a/src/Access/Common/AccessFlags.cpp +++ b/src/Access/Common/AccessFlags.cpp @@ -178,7 +178,7 @@ namespace } else { - if (nodes.count(keyword)) + if (nodes.contains(keyword)) throw Exception(keyword + " declared twice", ErrorCodes::LOGICAL_ERROR); node = std::make_unique(keyword, node_type); nodes[node->keyword] = node.get(); @@ -204,7 +204,7 @@ namespace { auto parent_node = std::make_unique(parent_keyword); it_parent = nodes.emplace(parent_node->keyword, parent_node.get()).first; - assert(!owned_nodes.count(parent_node->keyword)); + assert(!owned_nodes.contains(parent_node->keyword)); std::string_view parent_keyword_as_string_view = parent_node->keyword; owned_nodes[parent_keyword_as_string_view] = std::move(parent_node); } @@ -224,9 +224,9 @@ namespace # undef MAKE_ACCESS_FLAGS_NODE - if (!owned_nodes.count("NONE")) + if (!owned_nodes.contains("NONE")) throw Exception("'NONE' not declared", ErrorCodes::LOGICAL_ERROR); - if (!owned_nodes.count("ALL")) + if (!owned_nodes.contains("ALL")) throw Exception("'ALL' not declared", ErrorCodes::LOGICAL_ERROR); all_node = std::move(owned_nodes["ALL"]); diff --git a/src/Access/DiskAccessStorage.cpp b/src/Access/DiskAccessStorage.cpp index a9eb27c291c..3a971ec9b40 100644 --- a/src/Access/DiskAccessStorage.cpp +++ b/src/Access/DiskAccessStorage.cpp @@ -430,7 +430,7 @@ std::vector DiskAccessStorage::findAllImpl(AccessEntityType type) const bool DiskAccessStorage::exists(const UUID & id) const { std::lock_guard lock{mutex}; - return entries_by_id.count(id); + return entries_by_id.contains(id); } @@ -611,7 +611,7 @@ bool DiskAccessStorage::updateNoLock(const UUID & id, const UpdateFunc & update_ bool name_changed = (new_name != old_name); if (name_changed) { - if (entries_by_name.count(new_name)) + if (entries_by_name.contains(new_name)) throwNameCollisionCannotRename(type, old_name, new_name); scheduleWriteLists(type); } diff --git a/src/Access/ExternalAuthenticators.cpp b/src/Access/ExternalAuthenticators.cpp index 3e1c289b207..9431fc543d4 100644 --- a/src/Access/ExternalAuthenticators.cpp +++ b/src/Access/ExternalAuthenticators.cpp @@ -279,7 +279,7 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur if (bracket_pos != std::string::npos) ldap_server_name.resize(bracket_pos); - if (ldap_client_params_blueprint.count(ldap_server_name) > 0) + if (ldap_client_params_blueprint.contains(ldap_server_name)) throw Exception("Multiple LDAP servers with the same name are not allowed", ErrorCodes::BAD_ARGUMENTS); LDAPClient::Params ldap_client_params_tmp; diff --git a/src/Access/LDAPAccessStorage.cpp b/src/Access/LDAPAccessStorage.cpp index 4cf42a5017c..1ae6d9c6908 100644 --- a/src/Access/LDAPAccessStorage.cpp +++ b/src/Access/LDAPAccessStorage.cpp @@ -140,7 +140,7 @@ void LDAPAccessStorage::applyRoleChangeNoLock(bool grant, const UUID & role_id, std::vector user_ids; // Build a list of ids of the relevant users. - if (common_role_names.count(role_name)) + if (common_role_names.contains(role_name)) { user_ids = memory_storage.findAll(); } @@ -254,7 +254,7 @@ void LDAPAccessStorage::assignRolesNoLock(User & user, const LDAPClient::SearchR // Cleanup users_per_roles and granted_role_* mappings. for (const auto & old_role_name : old_role_names) { - if (local_role_names.count(old_role_name)) + if (local_role_names.contains(old_role_name)) continue; const auto rit = users_per_roles.find(old_role_name); @@ -269,7 +269,7 @@ void LDAPAccessStorage::assignRolesNoLock(User & user, const LDAPClient::SearchR users_per_roles.erase(rit); - if (common_role_names.count(old_role_name)) + if (common_role_names.contains(old_role_name)) continue; const auto iit = granted_role_ids.find(old_role_name); diff --git a/src/Access/MemoryAccessStorage.cpp b/src/Access/MemoryAccessStorage.cpp index f7989693d87..6aa0688ee3e 100644 --- a/src/Access/MemoryAccessStorage.cpp +++ b/src/Access/MemoryAccessStorage.cpp @@ -41,7 +41,7 @@ std::vector MemoryAccessStorage::findAllImpl(AccessEntityType type) const bool MemoryAccessStorage::exists(const UUID & id) const { std::lock_guard lock{mutex}; - return entries_by_id.count(id); + return entries_by_id.contains(id); } diff --git a/src/Access/MultipleAccessStorage.cpp b/src/Access/MultipleAccessStorage.cpp index 359214eac9f..d71e46c8523 100644 --- a/src/Access/MultipleAccessStorage.cpp +++ b/src/Access/MultipleAccessStorage.cpp @@ -398,7 +398,7 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock for (const auto & storage : *nested_storages) { - if (!subscriptions.count(storage)) + if (!subscriptions.contains(storage)) added_subscriptions[static_cast(type)].push_back({storage, nullptr}); } } @@ -436,7 +436,7 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock auto & subscriptions = subscriptions_to_nested_storages[static_cast(type)]; for (auto & [storage, subscription] : added_subscriptions[static_cast(type)]) { - if (!subscriptions.count(storage) && (boost::range::find(*nested_storages, storage) != nested_storages->end()) + if (!subscriptions.contains(storage) && (boost::range::find(*nested_storages, storage) != nested_storages->end()) && !handlers_by_type[static_cast(type)].empty()) { subscriptions.emplace(std::move(storage), std::move(subscription)); diff --git a/src/Access/ReplicatedAccessStorage.cpp b/src/Access/ReplicatedAccessStorage.cpp index a564fa65223..e56fad720be 100644 --- a/src/Access/ReplicatedAccessStorage.cpp +++ b/src/Access/ReplicatedAccessStorage.cpp @@ -575,7 +575,7 @@ std::vector ReplicatedAccessStorage::findAllImpl(AccessEntityType type) co bool ReplicatedAccessStorage::exists(const UUID & id) const { std::lock_guard lock{mutex}; - return entries_by_id.count(id); + return entries_by_id.contains(id); } diff --git a/src/AggregateFunctions/AggregateFunctionFactory.cpp b/src/AggregateFunctions/AggregateFunctionFactory.cpp index 347f4607dbf..766d8af0d8c 100644 --- a/src/AggregateFunctions/AggregateFunctionFactory.cpp +++ b/src/AggregateFunctions/AggregateFunctionFactory.cpp @@ -266,11 +266,11 @@ std::optional AggregateFunctionFactory::tryGetPrope bool AggregateFunctionFactory::isAggregateFunctionName(const String & name) const { - if (aggregate_functions.count(name) || isAlias(name)) + if (aggregate_functions.contains(name) || isAlias(name)) return true; String name_lowercase = Poco::toLower(name); - if (case_insensitive_aggregate_functions.count(name_lowercase) || isAlias(name_lowercase)) + if (case_insensitive_aggregate_functions.contains(name_lowercase) || isAlias(name_lowercase)) return true; if (AggregateFunctionCombinatorPtr combinator = AggregateFunctionCombinatorFactory::instance().tryFindSuffix(name)) diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp index 21300f2dbae..071ed35bc83 100644 --- a/src/Backups/BackupImpl.cpp +++ b/src/Backups/BackupImpl.cpp @@ -300,7 +300,7 @@ Strings BackupImpl::listFiles(const String & prefix, const String & terminator) bool BackupImpl::fileExists(const String & file_name) const { std::lock_guard lock{mutex}; - return file_infos.count(file_name) != 0; + return file_infos.contains(file_name); } size_t BackupImpl::getFileSize(const String & file_name) const diff --git a/src/Columns/Collator.cpp b/src/Columns/Collator.cpp index 0c2cdcf1baf..953e35f40c5 100644 --- a/src/Columns/Collator.cpp +++ b/src/Columns/Collator.cpp @@ -86,7 +86,7 @@ AvailableCollationLocales::LocalesVector AvailableCollationLocales::getAvailable bool AvailableCollationLocales::isCollationSupported(const std::string & locale_name) const { /// We support locale names in any case, so we have to convert all to lower case - return locales_map.count(Poco::toLower(locale_name)); + return locales_map.contains(Poco::toLower(locale_name)); } Collator::Collator(const std::string & locale_) diff --git a/src/Columns/tests/gtest_column_unique.cpp b/src/Columns/tests/gtest_column_unique.cpp index 23a356afff2..15208da70fb 100644 --- a/src/Columns/tests/gtest_column_unique.cpp +++ b/src/Columns/tests/gtest_column_unique.cpp @@ -29,7 +29,7 @@ TEST(ColumnUnique, InsertRange) String str = toString(i % mod_to); column_string->insertData(str.data(), str.size()); - if (ref_map.count(str) == 0) + if (!ref_map.contains(str)) ref_map[str] = ref_map.size(); indexes[i]= ref_map[str]; @@ -68,7 +68,7 @@ TEST(ColumnUnique, InsertRangeWithOverflow) String str = toString(i % mod_to); column_string->insertData(str.data(), str.size()); - if (ref_map.count(str) == 0) + if (!ref_map.contains(str)) ref_map[str] = ref_map.size(); indexes[i]= ref_map[str]; diff --git a/src/Common/Config/AbstractConfigurationComparison.cpp b/src/Common/Config/AbstractConfigurationComparison.cpp index f304460f914..ea0b3be4b98 100644 --- a/src/Common/Config/AbstractConfigurationComparison.cpp +++ b/src/Common/Config/AbstractConfigurationComparison.cpp @@ -77,7 +77,7 @@ bool isSameConfiguration(const Poco::Util::AbstractConfiguration & left, const S if ((left_subkeys.size() != right_subkeys.size()) || (left_subkeys.size() != subkeys.size())) return false; for (const auto & right_subkey : right_subkeys) - if (!left_subkeys.count(right_subkey)) + if (!left_subkeys.contains(right_subkey)) return false; } diff --git a/src/Common/ZooKeeper/TestKeeper.cpp b/src/Common/ZooKeeper/TestKeeper.cpp index 065b1cf65ba..340b2a87d0e 100644 --- a/src/Common/ZooKeeper/TestKeeper.cpp +++ b/src/Common/ZooKeeper/TestKeeper.cpp @@ -179,7 +179,7 @@ std::pair TestKeeperCreateRequest::process(TestKeeper::Contai CreateResponse response; Undo undo; - if (container.count(path)) + if (container.contains(path)) { response.error = Error::ZNODEEXISTS; } diff --git a/src/Common/ZooKeeper/ZooKeeperConstants.cpp b/src/Common/ZooKeeper/ZooKeeperConstants.cpp index 3f480fb6b2b..b0a05fe6c8d 100644 --- a/src/Common/ZooKeeper/ZooKeeperConstants.cpp +++ b/src/Common/ZooKeeper/ZooKeeperConstants.cpp @@ -71,7 +71,7 @@ std::string toString(OpNum op_num) OpNum getOpNum(int32_t raw_op_num) { - if (!VALID_OPERATIONS.count(raw_op_num)) + if (!VALID_OPERATIONS.contains(raw_op_num)) throw Exception("Operation " + std::to_string(raw_op_num) + " is unknown", Error::ZUNIMPLEMENTED); return static_cast(raw_op_num); } diff --git a/src/Coordination/ACLMap.cpp b/src/Coordination/ACLMap.cpp index 41b759531cf..5c4a8f397ca 100644 --- a/src/Coordination/ACLMap.cpp +++ b/src/Coordination/ACLMap.cpp @@ -45,7 +45,7 @@ uint64_t ACLMap::convertACLs(const Coordination::ACLs & acls) if (acls.empty()) return 0; - if (acl_to_num.count(acls)) + if (acl_to_num.contains(acls)) return acl_to_num[acls]; /// Start from one @@ -62,7 +62,7 @@ Coordination::ACLs ACLMap::convertNumber(uint64_t acls_id) const if (acls_id == 0) return Coordination::ACLs{}; - if (!num_to_acl.count(acls_id)) + if (!num_to_acl.contains(acls_id)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown ACL id {}. It's a bug", acls_id); return num_to_acl.at(acls_id); @@ -82,7 +82,7 @@ void ACLMap::addUsage(uint64_t acl_id) void ACLMap::removeUsage(uint64_t acl_id) { - if (usage_counter.count(acl_id) == 0) + if (!usage_counter.contains(acl_id)) return; usage_counter[acl_id]--; diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 4d71c11221e..bb712a155f7 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -201,7 +201,7 @@ void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKe const Coordination::ZooKeeperSessionIDResponse & session_id_resp = dynamic_cast(*response); /// Nobody waits for this session id - if (session_id_resp.server_id != server->getServerID() || !new_session_id_response_callback.count(session_id_resp.internal_id)) + if (session_id_resp.server_id != server->getServerID() || !new_session_id_response_callback.contains(session_id_resp.internal_id)) return; auto callback = new_session_id_response_callback[session_id_resp.internal_id]; @@ -234,7 +234,7 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ { /// If session was already disconnected than we will ignore requests std::lock_guard lock(session_to_response_callback_mutex); - if (session_to_response_callback.count(session_id) == 0) + if (!session_to_response_callback.contains(session_id)) return false; } diff --git a/src/Coordination/KeeperSnapshotManager.cpp b/src/Coordination/KeeperSnapshotManager.cpp index 43fc8b1ec0d..a661435a2eb 100644 --- a/src/Coordination/KeeperSnapshotManager.cpp +++ b/src/Coordination/KeeperSnapshotManager.cpp @@ -196,7 +196,7 @@ void KeeperStorageSnapshot::serialize(const KeeperStorageSnapshot & snapshot, Wr writeBinary(timeout, out); KeeperStorage::AuthIDs ids; - if (snapshot.session_and_auth.count(session_id)) + if (snapshot.session_and_auth.contains(session_id)) ids = snapshot.session_and_auth.at(session_id); writeBinary(ids.size(), out); diff --git a/src/Coordination/KeeperStateManager.cpp b/src/Coordination/KeeperStateManager.cpp index f9bfea5e69a..7304f7e0e6b 100644 --- a/src/Coordination/KeeperStateManager.cpp +++ b/src/Coordination/KeeperStateManager.cpp @@ -109,7 +109,7 @@ KeeperStateManager::KeeperConfigurationWrapper KeeperStateManager::parseServersC int32_t priority = config.getInt(full_prefix + ".priority", 1); bool start_as_follower = config.getBool(full_prefix + ".start_as_follower", false); - if (client_ports.count(port) != 0) + if (client_ports.contains(port)) { throw Exception(ErrorCodes::RAFT_ERROR, "Raft configuration contains hostname '{}' with port '{}' which is equal to '{}' in server configuration", hostname, port, client_ports[port]); @@ -133,7 +133,7 @@ KeeperStateManager::KeeperConfigurationWrapper KeeperStateManager::parseServersC result.servers_start_as_followers.insert(new_server_id); auto endpoint = hostname + ":" + std::to_string(port); - if (check_duplicated_hostnames.count(endpoint)) + if (check_duplicated_hostnames.contains(endpoint)) { throw Exception(ErrorCodes::RAFT_ERROR, "Raft config contains duplicate endpoints: " "endpoint {} has been already added with id {}, but going to add it one more time with id {}", @@ -278,14 +278,14 @@ ConfigUpdateActions KeeperStateManager::getConfigurationDiff(const Poco::Util::A /// First of all add new servers for (auto [new_id, server_config] : new_ids) { - if (!old_ids.count(new_id)) + if (!old_ids.contains(new_id)) result.emplace_back(ConfigUpdateAction{ConfigUpdateActionType::AddServer, server_config}); } /// After that remove old ones for (auto [old_id, server_config] : old_ids) { - if (!new_ids.count(old_id)) + if (!new_ids.contains(old_id)) result.emplace_back(ConfigUpdateAction{ConfigUpdateActionType::RemoveServer, server_config}); } diff --git a/src/Core/NamesAndTypes.cpp b/src/Core/NamesAndTypes.cpp index be947623a96..bd24a9e82bd 100644 --- a/src/Core/NamesAndTypes.cpp +++ b/src/Core/NamesAndTypes.cpp @@ -163,7 +163,7 @@ NamesAndTypesList NamesAndTypesList::filter(const NameSet & names) const NamesAndTypesList res; for (const NameAndTypePair & column : *this) { - if (names.count(column.name)) + if (names.contains(column.name)) res.push_back(column); } return res; diff --git a/src/DataTypes/NestedUtils.cpp b/src/DataTypes/NestedUtils.cpp index 8f5e40de5b8..ca0f0408abc 100644 --- a/src/DataTypes/NestedUtils.cpp +++ b/src/DataTypes/NestedUtils.cpp @@ -158,7 +158,7 @@ NamesAndTypesList collect(const NamesAndTypesList & names_and_types) auto nested_types = getSubcolumnsOfNested(names_and_types); for (const auto & name_type : names_and_types) - if (!isArray(name_type.type) || !nested_types.count(splitName(name_type.name).first)) + if (!isArray(name_type.type) || !nested_types.contains(splitName(name_type.name).first)) res.push_back(name_type); for (const auto & name_type : nested_types) diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index 629cd9c28b2..044e03afd10 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -682,7 +682,7 @@ void replaceMissedSubcolumnsByConstants( /// Replace missed subcolumns to default literals of theirs type. for (const auto & [name, type] : missed_names_types) - if (identifiers.count(name)) + if (identifiers.contains(name)) addConstantToWithClause(query, name, type); } diff --git a/src/DataTypes/getLeastSupertype.cpp b/src/DataTypes/getLeastSupertype.cpp index 3fcb3fef25b..14cfafb1ef3 100644 --- a/src/DataTypes/getLeastSupertype.cpp +++ b/src/DataTypes/getLeastSupertype.cpp @@ -592,7 +592,7 @@ DataTypePtr getLeastSupertype(const TypeIndexSet & types, bool allow_conversion_ if (types_set.empty()) return std::make_shared(); - if (types.count(TypeIndex::String)) + if (types.contains(TypeIndex::String)) { if (types.size() != 1) return throw_or_return(getExceptionMessagePrefix(types) + " because some of them are String and some of them are not", ErrorCodes::NO_COMMON_TYPE); diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 622d38e01bd..b352d035c57 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -353,7 +353,7 @@ void DatabaseAtomic::assertDetachedTableNotInUse(const UUID & uuid) /// 3. ATTACH TABLE table; (new instance of Storage with the same UUID is created, instances share data on disk) /// 4. INSERT INTO table ...; (both Storage instances writes data without any synchronization) /// To avoid it, we remember UUIDs of detached tables and does not allow ATTACH table with such UUID until detached instance still in use. - if (detached_tables.count(uuid)) + if (detached_tables.contains(uuid)) throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Cannot attach table with UUID {}, " "because it was detached but still used by some query. Retry later.", toString(uuid)); } @@ -571,7 +571,7 @@ void DatabaseAtomic::waitDetachedTableNotInUse(const UUID & uuid) { std::lock_guard lock{mutex}; not_in_use = cleanupDetachedTables(); - if (detached_tables.count(uuid) == 0) + if (!detached_tables.contains(uuid)) return; } std::this_thread::sleep_for(std::chrono::milliseconds(100)); diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index a6b4a978c7b..7e2be7e324a 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -82,7 +82,7 @@ bool DatabasePostgreSQL::empty() const auto tables_list = fetchPostgreSQLTablesList(connection_holder->get(), configuration.schema); for (const auto & table_name : tables_list) - if (!detached_or_dropped.count(table_name)) + if (!detached_or_dropped.contains(table_name)) return false; return true; @@ -102,7 +102,7 @@ DatabaseTablesIteratorPtr DatabasePostgreSQL::getTablesIterator(ContextPtr local auto table_names = fetchPostgreSQLTablesList(connection_holder->get(), configuration.schema); for (const auto & table_name : table_names) - if (!detached_or_dropped.count(table_name)) + if (!detached_or_dropped.contains(table_name)) tables[table_name] = fetchTable(table_name, local_context, true); } catch (...) @@ -156,7 +156,7 @@ bool DatabasePostgreSQL::isTableExist(const String & table_name, ContextPtr /* c { std::lock_guard lock(mutex); - if (detached_or_dropped.count(table_name)) + if (detached_or_dropped.contains(table_name)) return false; return checkPostgresTable(table_name); @@ -167,7 +167,7 @@ StoragePtr DatabasePostgreSQL::tryGetTable(const String & table_name, ContextPtr { std::lock_guard lock(mutex); - if (!detached_or_dropped.count(table_name)) + if (!detached_or_dropped.contains(table_name)) return fetchTable(table_name, local_context, false); return StoragePtr{}; @@ -176,7 +176,7 @@ StoragePtr DatabasePostgreSQL::tryGetTable(const String & table_name, ContextPtr StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr, bool table_checked) const { - if (!cache_tables || !cached_tables.count(table_name)) + if (!cache_tables || !cached_tables.contains(table_name)) { if (!table_checked && !checkPostgresTable(table_name)) return StoragePtr{}; @@ -217,7 +217,7 @@ void DatabasePostgreSQL::attachTable(ContextPtr /* context_ */, const String & t "Cannot attach PostgreSQL table {} because it does not exist in PostgreSQL", getTableNameForLogs(table_name), database_name); - if (!detached_or_dropped.count(table_name)) + if (!detached_or_dropped.contains(table_name)) throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Cannot attach PostgreSQL table {} because it already exists", getTableNameForLogs(table_name), database_name); @@ -237,7 +237,7 @@ StoragePtr DatabasePostgreSQL::detachTable(ContextPtr /* context_ */, const Stri { std::lock_guard lock{mutex}; - if (detached_or_dropped.count(table_name)) + if (detached_or_dropped.contains(table_name)) throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Cannot detach table {}. It is already dropped/detached", getTableNameForLogs(table_name)); if (!checkPostgresTable(table_name)) @@ -271,7 +271,7 @@ void DatabasePostgreSQL::dropTable(ContextPtr, const String & table_name, bool / if (!checkPostgresTable(table_name)) throw Exception(ErrorCodes::UNKNOWN_TABLE, "Cannot drop table {} because it does not exist", getTableNameForLogs(table_name)); - if (detached_or_dropped.count(table_name)) + if (detached_or_dropped.contains(table_name)) throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Table {} is already dropped/detached", getTableNameForLogs(table_name)); fs::path mark_table_removed = fs::path(getMetadataPath()) / (escapeForFileName(table_name) + suffix); @@ -323,7 +323,7 @@ void DatabasePostgreSQL::removeOutdatedTables() /// (Tables are cached only after being accessed at least once) for (auto iter = cached_tables.begin(); iter != cached_tables.end();) { - if (!actual_tables.count(iter->first)) + if (!actual_tables.contains(iter->first)) iter = cached_tables.erase(iter); else ++iter; @@ -332,7 +332,7 @@ void DatabasePostgreSQL::removeOutdatedTables() for (auto iter = detached_or_dropped.begin(); iter != detached_or_dropped.end();) { - if (!actual_tables.count(*iter)) + if (!actual_tables.contains(*iter)) { auto table_name = *iter; iter = detached_or_dropped.erase(iter); diff --git a/src/Disks/DiskSelector.cpp b/src/Disks/DiskSelector.cpp index 4c80b128b4b..d6f6c190124 100644 --- a/src/Disks/DiskSelector.cpp +++ b/src/Disks/DiskSelector.cpp @@ -68,7 +68,7 @@ DiskSelectorPtr DiskSelector::updateFromConfig( throw Exception("Disk name can contain only alphanumeric and '_' (" + disk_name + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG); auto disk_config_prefix = config_prefix + "." + disk_name; - if (result->getDisksMap().count(disk_name) == 0) + if (!result->getDisksMap().contains(disk_name)) { result->addToDiskMap(disk_name, factory.create(disk_name, config, disk_config_prefix, context, result->getDisksMap())); } diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index 20192c3a29f..5993447f9f3 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -282,7 +282,7 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol for (const auto & volume : getVolumes()) { - if (new_volume_names.count(volume->getName()) == 0) + if (!new_volume_names.contains(volume->getName())) throw Exception("New storage policy " + backQuote(name) + " shall contain volumes of old one", ErrorCodes::BAD_ARGUMENTS); std::unordered_set new_disk_names; @@ -290,7 +290,7 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol new_disk_names.insert(disk->getName()); for (const auto & disk : volume->getDisks()) - if (new_disk_names.count(disk->getName()) == 0) + if (!new_disk_names.contains(disk->getName())) throw Exception("New storage policy " + backQuote(name) + " shall contain disks of old one", ErrorCodes::BAD_ARGUMENTS); } } @@ -387,7 +387,7 @@ StoragePolicySelectorPtr StoragePolicySelector::updateFromConfig(const Poco::Uti /// First pass, check. for (const auto & [name, policy] : policies) { - if (result->policies.count(name) == 0) + if (!result->policies.contains(name)) throw Exception("Storage policy " + backQuote(name) + " is missing in new configuration", ErrorCodes::BAD_ARGUMENTS); policy->checkCompatibleWith(result->policies[name]); diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index e8f9f73b805..d2bcb646ecf 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -896,7 +896,7 @@ struct JSONExtractTree if (element.isInt64()) { Type value; - if (!accurate::convertNumeric(element.getInt64(), value) || !only_values.count(value)) + if (!accurate::convertNumeric(element.getInt64(), value) || !only_values.contains(value)) return false; col_vec.insertValue(value); return true; @@ -905,7 +905,7 @@ struct JSONExtractTree if (element.isUInt64()) { Type value; - if (!accurate::convertNumeric(element.getUInt64(), value) || !only_values.count(value)) + if (!accurate::convertNumeric(element.getUInt64(), value) || !only_values.contains(value)) return false; col_vec.insertValue(value); return true; diff --git a/src/Functions/FunctionsStringHash.cpp b/src/Functions/FunctionsStringHash.cpp index c9ad8285485..9ea9fbd8b45 100644 --- a/src/Functions/FunctionsStringHash.cpp +++ b/src/Functions/FunctionsStringHash.cpp @@ -333,7 +333,7 @@ struct MinHashImpl { void update(UInt64 hash, BytesRef ref, size_t limit) { - if (values.count(hash)) + if (values.contains(hash)) return; values[hash] = ref; diff --git a/src/Functions/formatRow.cpp b/src/Functions/formatRow.cpp index ccd013123d7..77fc2118791 100644 --- a/src/Functions/formatRow.cpp +++ b/src/Functions/formatRow.cpp @@ -38,7 +38,7 @@ public: FunctionFormatRow(const String & format_name_, ContextPtr context_) : format_name(format_name_), context(context_) { - if (!FormatFactory::instance().getAllFormats().count(format_name)) + if (!FormatFactory::instance().getAllFormats().contains(format_name)) throw Exception("Unknown format " + format_name, ErrorCodes::UNKNOWN_FORMAT); } diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index e706f0a75c1..fefc296a200 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -86,7 +86,7 @@ public: void callLogImpl(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * message) { const auto & [level, prio] = convertLogLevel(log_level); - if (tag_loggers.count(tag) > 0) + if (tag_loggers.contains(tag)) { LOG_IMPL(tag_loggers[tag], level, prio, "{}", message); } diff --git a/src/Interpreters/ActionLocksManager.cpp b/src/Interpreters/ActionLocksManager.cpp index 0f9b4d01bdf..8f081f3d470 100644 --- a/src/Interpreters/ActionLocksManager.cpp +++ b/src/Interpreters/ActionLocksManager.cpp @@ -72,7 +72,7 @@ void ActionLocksManager::remove(const StoragePtr & table, StorageActionBlockType { std::lock_guard lock(mutex); - if (storage_locks.count(table.get())) + if (storage_locks.contains(table.get())) storage_locks[table.get()].erase(action_type); } diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index f06ac229e94..b367af87adf 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -334,7 +334,7 @@ void ActionsDAG::removeUnusedActions(const NameSet & required_names, bool allow_ NameSet added; for (const auto & node : index) { - if (required_names.count(node->result_name) && added.count(node->result_name) == 0) + if (required_names.contains(node->result_name) && !added.contains(node->result_name)) { required_nodes.push_back(node); added.insert(node->result_name); @@ -344,7 +344,7 @@ void ActionsDAG::removeUnusedActions(const NameSet & required_names, bool allow_ if (added.size() < required_names.size()) { for (const auto & name : required_names) - if (added.count(name) == 0) + if (!added.contains(name)) throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown column: {}, there are only columns {}", name, dumpNames()); } @@ -392,7 +392,7 @@ void ActionsDAG::removeUnusedActions(bool allow_remove_inputs, bool allow_consta /// We cannot remove arrayJoin because it changes the number of rows. bool is_array_join = node.type == ActionType::ARRAY_JOIN; - if (is_array_join && visited_nodes.count(&node) == 0) + if (is_array_join && !visited_nodes.contains(&node)) { visited_nodes.insert(&node); stack.push(&node); @@ -426,7 +426,7 @@ void ActionsDAG::removeUnusedActions(bool allow_remove_inputs, bool allow_consta for (const auto * child : node->children) { - if (visited_nodes.count(child) == 0) + if (!visited_nodes.contains(child)) { stack.push(const_cast(child)); visited_nodes.insert(child); @@ -434,8 +434,8 @@ void ActionsDAG::removeUnusedActions(bool allow_remove_inputs, bool allow_consta } } - nodes.remove_if([&](const Node & node) { return visited_nodes.count(&node) == 0; }); - auto it = std::remove_if(inputs.begin(), inputs.end(), [&](const Node * node) { return visited_nodes.count(node) == 0; }); + nodes.remove_if([&](const Node & node) { return !visited_nodes.contains(&node); }); + auto it = std::remove_if(inputs.begin(), inputs.end(), [&](const Node * node) { return !visited_nodes.contains(node); }); inputs.erase(it, inputs.end()); } @@ -535,7 +535,7 @@ Block ActionsDAG::updateHeader(Block header) const { for (const auto * output : index) { - if (node_to_column.count(output) == 0) + if (!node_to_column.contains(output)) { std::stack stack; stack.push({.node = output}); @@ -548,7 +548,7 @@ Block ActionsDAG::updateHeader(Block header) const while (frame.next_child < node->children.size()) { const auto * child = node->children[frame.next_child]; - if (node_to_column.count(child) == 0) + if (!node_to_column.contains(child)) { stack.push({.node = child}); break; @@ -660,7 +660,7 @@ NameSet ActionsDAG::foldActionsByProjection( for (const auto * child : node->children) { - if (visited_nodes.count(child) == 0) + if (!visited_nodes.contains(child)) { stack.push(const_cast(child)); visited_nodes.insert(child); @@ -669,9 +669,9 @@ NameSet ActionsDAG::foldActionsByProjection( } /// Clean up unused nodes after folding. - std::erase_if(inputs, [&](const Node * node) { return visited_nodes.count(node) == 0; }); - std::erase_if(index, [&](const Node * node) { return visited_index_names.count(node->result_name) == 0; }); - nodes.remove_if([&](const Node & node) { return visited_nodes.count(&node) == 0; }); + std::erase_if(inputs, [&](const Node * node) { return !visited_nodes.contains(node); }); + std::erase_if(index, [&](const Node * node) { return !visited_index_names.contains(node->result_name); }); + nodes.remove_if([&](const Node & node) { return !visited_nodes.contains(&node); }); /// Calculate the required columns after folding. NameSet next_required_columns; @@ -1299,7 +1299,7 @@ ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set split /// DFS. Decide if node is needed by split. for (const auto & node : nodes) { - if (split_nodes.count(&node) == 0) + if (!split_nodes.contains(&node)) continue; auto & cur_data = data[&node]; @@ -1476,7 +1476,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet & /// DFS. Decide if node depends on ARRAY JOIN. for (const auto & node : nodes) { - if (visited_nodes.count(&node)) + if (visited_nodes.contains(&node)) continue; visited_nodes.insert(&node); @@ -1491,7 +1491,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet & { const auto * child = cur.node->children[cur.next_child_to_visit]; - if (visited_nodes.count(child) == 0) + if (!visited_nodes.contains(child)) { visited_nodes.insert(child); stack.push({.node = child}); @@ -1504,12 +1504,12 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet & if (cur.next_child_to_visit == cur.node->children.size()) { bool depend_on_array_join = false; - if (cur.node->type == ActionType::INPUT && array_joined_columns.count(cur.node->result_name)) + if (cur.node->type == ActionType::INPUT && array_joined_columns.contains(cur.node->result_name)) depend_on_array_join = true; for (const auto * child : cur.node->children) { - if (split_nodes.count(child) == 0) + if (!split_nodes.contains(child)) depend_on_array_join = true; } @@ -1591,7 +1591,7 @@ ConjunctionNodes getConjunctionNodes(ActionsDAG::Node * predicate, std::unordere { for (const auto & child : node->children) { - if (visited_nodes.count(child) == 0) + if (!visited_nodes.contains(child)) { visited_nodes.insert(child); stack.push(child); @@ -1624,7 +1624,7 @@ ConjunctionNodes getConjunctionNodes(ActionsDAG::Node * predicate, std::unordere { const auto * child = cur.node->children[cur.next_child_to_visit]; - if (visited_nodes.count(child) == 0) + if (!visited_nodes.contains(child)) { visited_nodes.insert(child); stack.push({.node = child}); @@ -1644,9 +1644,9 @@ ConjunctionNodes getConjunctionNodes(ActionsDAG::Node * predicate, std::unordere allowed_nodes.emplace(cur.node); } - if (predicates.count(cur.node)) + if (predicates.contains(cur.node)) { - if (allowed_nodes.count(cur.node)) + if (allowed_nodes.contains(cur.node)) { if (allowed.insert(cur.node).second) conjunction.allowed.push_back(cur.node); @@ -1721,7 +1721,7 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(NodeRawConstPtrs conjunctio /// DFS. Clone actions. for (const auto * predicate : conjunction) { - if (nodes_mapping.count(predicate)) + if (nodes_mapping.contains(predicate)) continue; stack.push({.node = predicate}); @@ -1733,7 +1733,7 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(NodeRawConstPtrs conjunctio { const auto * child = cur.node->children[cur.next_child_to_visit]; - if (nodes_mapping.count(child) == 0) + if (!nodes_mapping.contains(child)) { stack.push({.node = child}); break; diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 3bad8fba270..c57b85951bc 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -455,7 +455,7 @@ public: return *node; } - bool contains(const std::string & name) const { return map.count(name) > 0; } + bool contains(const std::string & name) const { return map.contains(name); } }; ActionsMatcher::Data::Data( @@ -525,7 +525,7 @@ size_t ScopeStack::getColumnLevel(const std::string & name) { --i; - if (stack[i].inputs.count(name)) + if (stack[i].inputs.contains(name)) return i; const auto * node = stack[i].index->tryGetNode(name); diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 55f9fe602f4..a0406e0ff0c 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -2736,7 +2736,7 @@ void Aggregator::mergeBlocks(BucketToBlocks bucket_to_blocks, AggregatedDataVari result.keys_size = params.keys_size; result.key_sizes = key_sizes; - bool has_blocks_with_unknown_bucket = bucket_to_blocks.count(-1); + bool has_blocks_with_unknown_bucket = bucket_to_blocks.contains(-1); /// First, parallel the merge for the individual buckets. Then we continue merge the data not allocated to the buckets. if (has_two_level) diff --git a/src/Interpreters/ApplyWithAliasVisitor.cpp b/src/Interpreters/ApplyWithAliasVisitor.cpp index 93fe87d5db0..89407f3521e 100644 --- a/src/Interpreters/ApplyWithAliasVisitor.cpp +++ b/src/Interpreters/ApplyWithAliasVisitor.cpp @@ -26,7 +26,7 @@ void ApplyWithAliasVisitor::visit(ASTPtr & ast, const Data & data) } for (const auto & with_alias : data.exprs) { - if (!current_names.count(with_alias.first)) + if (!current_names.contains(with_alias.first)) with->children.push_back(with_alias.second->clone()); } } diff --git a/src/Interpreters/ApplyWithGlobalVisitor.cpp b/src/Interpreters/ApplyWithGlobalVisitor.cpp index 1ac2b19a04e..6f00ed0bab5 100644 --- a/src/Interpreters/ApplyWithGlobalVisitor.cpp +++ b/src/Interpreters/ApplyWithGlobalVisitor.cpp @@ -20,7 +20,7 @@ void ApplyWithGlobalVisitor::visit(ASTSelectQuery & select, const std::mapchildren.push_back(with_alias.second->clone()); } } diff --git a/src/Interpreters/ArithmeticOperationsInAgrFuncOptimize.cpp b/src/Interpreters/ArithmeticOperationsInAgrFuncOptimize.cpp index 383ca3db6f4..d48ad467c2e 100644 --- a/src/Interpreters/ArithmeticOperationsInAgrFuncOptimize.cpp +++ b/src/Interpreters/ArithmeticOperationsInAgrFuncOptimize.cpp @@ -85,7 +85,7 @@ const String & changeNameIfNeeded(const String & func_name, const String & child { "max", "min" } }; - if (literal.value < zeroField(literal.value) && matches.count(func_name) && matches.find(func_name)->second.count(child_name)) + if (literal.value < zeroField(literal.value) && matches.contains(func_name) && matches.find(func_name)->second.contains(child_name)) return swap_to.find(func_name)->second; return func_name; @@ -103,8 +103,8 @@ ASTPtr tryExchangeFunctions(const ASTFunction & func) auto lower_name = Poco::toLower(func.name); const ASTFunction * child_func = getInternalFunction(func); - if (!child_func || !child_func->arguments || child_func->arguments->children.size() != 2 || !supported.count(lower_name) - || !supported.find(lower_name)->second.count(child_func->name)) + if (!child_func || !child_func->arguments || child_func->arguments->children.size() != 2 || !supported.contains(lower_name) + || !supported.find(lower_name)->second.contains(child_func->name)) return {}; auto original_alias = func.tryGetAlias(); diff --git a/src/Interpreters/ArrayJoinAction.cpp b/src/Interpreters/ArrayJoinAction.cpp index 3d176857601..51aaa5fb169 100644 --- a/src/Interpreters/ArrayJoinAction.cpp +++ b/src/Interpreters/ArrayJoinAction.cpp @@ -39,7 +39,7 @@ void ArrayJoinAction::prepare(ColumnsWithTypeAndName & sample) const { for (auto & current : sample) { - if (columns.count(current.name) == 0) + if (!columns.contains(current.name)) continue; const DataTypeArray * array_type = typeid_cast(&*current.type); @@ -116,7 +116,7 @@ void ArrayJoinAction::execute(Block & block) { ColumnWithTypeAndName & current = block.safeGetByPosition(i); - if (columns.count(current.name)) + if (columns.contains(current.name)) { if (!typeid_cast(&*current.type)) throw Exception("ARRAY JOIN of not array: " + current.name, ErrorCodes::TYPE_MISMATCH); diff --git a/src/Interpreters/ColumnAliasesVisitor.cpp b/src/Interpreters/ColumnAliasesVisitor.cpp index 43a6c4d4cf1..f1b92aba79f 100644 --- a/src/Interpreters/ColumnAliasesVisitor.cpp +++ b/src/Interpreters/ColumnAliasesVisitor.cpp @@ -63,8 +63,8 @@ void ColumnAliasesMatcher::visit(ASTIdentifier & node, ASTPtr & ast, Data & data { if (auto column_name = IdentifierSemantic::getColumnName(node)) { - if (data.array_join_result_columns.count(*column_name) || data.array_join_source_columns.count(*column_name) - || data.private_aliases.count(*column_name) || !data.columns.has(*column_name)) + if (data.array_join_result_columns.contains(*column_name) || data.array_join_source_columns.contains(*column_name) + || data.private_aliases.contains(*column_name) || !data.columns.has(*column_name)) return; const auto & col = data.columns.get(*column_name); @@ -74,7 +74,7 @@ void ColumnAliasesMatcher::visit(ASTIdentifier & node, ASTPtr & ast, Data & data auto alias_expr = col.default_desc.expression->clone(); auto original_column = alias_expr->getColumnName(); // If expanded alias is used in array join, avoid expansion, otherwise the column will be mis-array joined - if (data.array_join_result_columns.count(original_column) || data.array_join_source_columns.count(original_column)) + if (data.array_join_result_columns.contains(original_column) || data.array_join_source_columns.contains(original_column)) return; ast = addTypeConversionToAST(std::move(alias_expr), col.type->getName(), data.columns.getAll(), data.context); // We need to set back the original column name, or else the process of naming resolution will complain. diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index f41d4662e99..ca26b4e26ac 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1027,7 +1027,7 @@ bool Context::hasScalar(const String & name) const if (isGlobalContext()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have scalars"); - return scalars.count(name); + return scalars.contains(name); } diff --git a/src/Interpreters/ConvertStringsToEnumVisitor.cpp b/src/Interpreters/ConvertStringsToEnumVisitor.cpp index 4ca0909cd23..745e484022c 100644 --- a/src/Interpreters/ConvertStringsToEnumVisitor.cpp +++ b/src/Interpreters/ConvertStringsToEnumVisitor.cpp @@ -100,7 +100,7 @@ void FindUsedFunctionsMatcher::visit(const ASTPtr & ast, Data & data) void FindUsedFunctionsMatcher::visit(const ASTFunction & func, Data & data) { - if (data.names.count(func.name) && !data.call_stack.empty()) + if (data.names.contains(func.name) && !data.call_stack.empty()) { String alias = func.tryGetAlias(); if (!alias.empty()) @@ -136,7 +136,7 @@ void ConvertStringsToEnumMatcher::visit(ASTFunction & function_node, Data & data /// We are not sure we could change the type of function result /// cause it is present in other function as argument - if (data.used_functions.count(function_node.tryGetAlias())) + if (data.used_functions.contains(function_node.tryGetAlias())) return; if (function_node.name == "if") diff --git a/src/Interpreters/CrossToInnerJoinVisitor.cpp b/src/Interpreters/CrossToInnerJoinVisitor.cpp index 511d0a6b8a6..c986aa9ec5a 100644 --- a/src/Interpreters/CrossToInnerJoinVisitor.cpp +++ b/src/Interpreters/CrossToInnerJoinVisitor.cpp @@ -89,7 +89,7 @@ bool isAllowedToRewriteCrossJoin(const ASTPtr & node, const Aliases & aliases) auto idents = IdentifiersCollector::collect(node); for (const auto * ident : idents) { - if (ident->isShort() && aliases.count(ident->shortName())) + if (ident->isShort() && aliases.contains(ident->shortName())) return false; } return true; diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 7513c3bf849..7ce774b11eb 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -945,7 +945,7 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid) std::unique_lock lock{tables_marked_dropped_mutex}; wait_table_finally_dropped.wait(lock, [&]() { - return tables_marked_dropped_ids.count(uuid) == 0; + return !tables_marked_dropped_ids.contains(uuid); }); } diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index 55fdaffdfe9..2d9d18c2076 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -76,7 +76,7 @@ static bool worthConvertingToLiteral(const Block & scalar) { const auto * scalar_type_name = scalar.safeGetByPosition(0).type->getFamilyName(); static const std::set useless_literal_types = {"Array", "Tuple", "AggregateFunction", "Function", "Set", "LowCardinality"}; - return !useless_literal_types.count(scalar_type_name); + return !useless_literal_types.contains(scalar_type_name); } static auto getQueryInterpreter(const ASTSubquery & subquery, ExecuteScalarSubqueriesMatcher::Data & data) diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 83f8de78fa6..1979759fe7f 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -929,7 +929,7 @@ void ExpressionActionsChain::addStep(NameSet non_constant_inputs) ColumnsWithTypeAndName columns = steps.back()->getResultColumns(); for (auto & column : columns) - if (column.column && isColumnConst(*column.column) && non_constant_inputs.count(column.name)) + if (column.column && isColumnConst(*column.column) && non_constant_inputs.contains(column.name)) column.column = nullptr; steps.push_back(std::make_unique(std::make_shared(columns))); @@ -950,7 +950,7 @@ void ExpressionActionsChain::finalize() const NameSet & additional_input = steps[i + 1]->additional_input; for (const auto & it : steps[i + 1]->getRequiredColumns()) { - if (additional_input.count(it.name) == 0) + if (!additional_input.contains(it.name)) { auto iter = required_output.find(it.name); if (iter == required_output.end()) @@ -1001,7 +1001,7 @@ ExpressionActionsChain::ArrayJoinStep::ArrayJoinStep(ArrayJoinActionPtr array_jo { required_columns.emplace_back(NameAndTypePair(column.name, column.type)); - if (array_join->columns.count(column.name) > 0) + if (array_join->columns.contains(column.name)) { const auto * array = typeid_cast(column.type.get()); column.type = array->getNestedType(); @@ -1018,12 +1018,12 @@ void ExpressionActionsChain::ArrayJoinStep::finalize(const NameSet & required_ou for (const auto & column : result_columns) { - if (array_join->columns.count(column.name) != 0 || required_output_.count(column.name) != 0) + if (!array_join->columns.contains(column.name) || required_output_.contains(column.name)) new_result_columns.emplace_back(column); } for (const auto & column : required_columns) { - if (array_join->columns.count(column.name) != 0 || required_output_.count(column.name) != 0) + if (array_join->columns.contains(column.name) || required_output_.contains(column.name)) new_required_columns.emplace_back(column); } @@ -1066,7 +1066,7 @@ void ExpressionActionsChain::JoinStep::finalize(const NameSet & required_output_ for (const auto & column : required_columns) { - if (required_names.count(column.name) != 0) + if (required_names.contains(column.name)) new_required_columns.emplace_back(column); } @@ -1076,7 +1076,7 @@ void ExpressionActionsChain::JoinStep::finalize(const NameSet & required_output_ for (const auto & column : result_columns) { - if (required_names.count(column.name) != 0) + if (required_names.contains(column.name)) new_result_columns.emplace_back(column); } diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 63fca80516b..9cb1825d3a3 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -261,7 +261,7 @@ NamesAndTypesList ExpressionAnalyzer::getColumnsAfterArrayJoin(ActionsDAGPtr & a for (auto & column : actions->getResultColumns()) { - if (syntax->array_join_result_to_source.count(column.name)) + if (syntax->array_join_result_to_source.contains(column.name)) { new_columns_after_array_join.emplace_back(column.name, column.type); added_columns.emplace(column.name); @@ -269,7 +269,7 @@ NamesAndTypesList ExpressionAnalyzer::getColumnsAfterArrayJoin(ActionsDAGPtr & a } for (const auto & column : src_columns) - if (added_columns.count(column.name) == 0) + if (!added_columns.contains(column.name)) new_columns_after_array_join.emplace_back(column.name, column.type); return new_columns_after_array_join; @@ -362,7 +362,7 @@ void ExpressionAnalyzer::analyzeAggregation(ActionsDAGPtr & temp_actions) NameAndTypePair key{column_name, node->result_type}; /// Aggregation keys are uniqued. - if (!unique_keys.count(key.name)) + if (!unique_keys.contains(key.name)) { unique_keys.insert(key.name); aggregation_keys.push_back(key); @@ -1075,7 +1075,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( /// TODO: add sampling and final execution to common chain. for (const auto & column : additional_required_columns) { - if (required_source_columns.count(column)) + if (required_source_columns.contains(column)) step.addRequiredOutput(column); } @@ -1083,7 +1083,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( NameSet name_set(names.begin(), names.end()); for (const auto & column : sourceColumns()) - if (required_source_columns.count(column.name) == 0) + if (!required_source_columns.contains(column.name)) name_set.erase(column.name); Names required_output(name_set.begin(), name_set.end()); @@ -1109,7 +1109,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( for (const auto & column : sourceColumns()) { - if (prewhere_input_names.count(column.name) == 0) + if (!prewhere_input_names.contains(column.name)) { columns.emplace_back(column.type, column.name); unused_source_columns.emplace(column.name); @@ -1350,7 +1350,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai if (auto * ident = fn->as()) { /// exclude columns from select expression - they are already available - if (select.count(ident->getColumnName()) == 0) + if (!select.contains(ident->getColumnName())) step.addRequiredOutput(ident->getColumnName()); return; } @@ -1381,7 +1381,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai if (with_fill) { for (const auto & column : step.getResultColumns()) - if (!order_by_keys.count(column.name)) + if (!order_by_keys.contains(column.name)) non_constant_inputs.insert(column.name); } @@ -1415,7 +1415,7 @@ bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain replaceForPositionalArguments(child, select_query, ASTSelectQuery::Expression::LIMIT_BY); auto child_name = child->getColumnName(); - if (!aggregated_names.count(child_name)) + if (!aggregated_names.contains(child_name)) step.addRequiredOutput(child_name); } @@ -1434,7 +1434,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendProjectResult(ExpressionActio for (const auto & ast : asts) { String result_name = ast->getAliasOrColumnName(); - if (required_result_columns.empty() || required_result_columns.count(result_name)) + if (required_result_columns.empty() || required_result_columns.contains(result_name)) { std::string source_name = ast->getColumnName(); @@ -1522,7 +1522,7 @@ ActionsDAGPtr ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool project_r /// We will not delete the original columns. for (const auto & column_name_type : sourceColumns()) { - if (name_set.count(column_name_type.name) == 0) + if (!name_set.contains(column_name_type.name)) { result_names.push_back(column_name_type.name); name_set.insert(column_name_type.name); diff --git a/src/Interpreters/IdentifierSemantic.cpp b/src/Interpreters/IdentifierSemantic.cpp index edc087871f8..dbfdba77619 100644 --- a/src/Interpreters/IdentifierSemantic.cpp +++ b/src/Interpreters/IdentifierSemantic.cpp @@ -245,7 +245,7 @@ IdentifierSemantic::getIdentsMembership(ASTPtr ast, const std::vectorisShort() && aliases.count(ident->shortName())) + if (ident->isShort() && aliases.contains(ident->shortName())) return {}; const auto pos = getIdentMembership(*ident, tables); if (!pos) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index e981e6f0648..948dbf8a0b9 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -904,7 +904,7 @@ static InterpolateDescriptionPtr getInterpolateDescription( col_set.insert(column.name); } for (const auto & column : result_block) - if (col_set.count(column.name) == 0) + if (!col_set.contains(column.name)) source_columns.emplace_back(column.name, column.type); } @@ -1774,7 +1774,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() else column_expr = std::make_shared(column); - if (required_columns_from_prewhere.count(column)) + if (required_columns_from_prewhere.contains(column)) { required_columns_from_prewhere_expr->children.emplace_back(std::move(column_expr)); @@ -1802,7 +1802,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() if (prewhere_info->remove_prewhere_column && column.name == prewhere_info->prewhere_column_name) continue; - if (columns_to_remove.count(column.name)) + if (columns_to_remove.contains(column.name)) continue; required_columns_all_expr->children.emplace_back(std::make_shared(column.name)); @@ -1826,7 +1826,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() prewhere_info->remove_prewhere_column = false; /// Remove columns which will be added by prewhere. - std::erase_if(required_columns, [&](const String & name) { return required_columns_after_prewhere_set.count(name) != 0; }); + std::erase_if(required_columns, [&](const String & name) { return required_columns_after_prewhere_set.contains(name); }); if (prewhere_info) { @@ -1849,7 +1849,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() /// Add physical columns required by prewhere actions. for (const auto & column : required_columns_from_prewhere) - if (required_aliases_from_prewhere.count(column) == 0) + if (!required_aliases_from_prewhere.contains(column)) if (required_columns.end() == std::find(required_columns.begin(), required_columns.end(), column)) required_columns.push_back(column); } diff --git a/src/Interpreters/JoinToSubqueryTransformVisitor.cpp b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp index c43302e0de9..ca523e6806c 100644 --- a/src/Interpreters/JoinToSubqueryTransformVisitor.cpp +++ b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp @@ -259,7 +259,7 @@ struct CollectColumnIdentifiersMatcher void addIdentifier(const ASTIdentifier & ident) { for (const auto & aliases : ignored) - if (aliases.count(ident.name())) + if (aliases.contains(ident.name())) return; identifiers.push_back(const_cast(&ident)); } @@ -324,7 +324,7 @@ struct CheckAliasDependencyVisitorData void visit(ASTIdentifier & ident, ASTPtr &) { - if (!dependency && aliases.count(ident.name())) + if (!dependency && aliases.contains(ident.name())) dependency = &ident; } }; @@ -470,7 +470,7 @@ void restoreName(ASTIdentifier & ident, const String & original_name, NameSet & if (original_name.empty()) return; - if (!restored_names.count(original_name)) + if (!restored_names.contains(original_name)) { ident.setAlias(original_name); restored_names.emplace(original_name); @@ -502,7 +502,7 @@ std::vector normalizeColumnNamesExtractNeeded( for (ASTIdentifier * ident : identifiers) { - bool got_alias = aliases.count(ident->name()); + bool got_alias = aliases.contains(ident->name()); bool allow_ambiguous = got_alias; /// allow ambiguous column overridden by an alias if (auto table_pos = IdentifierSemantic::chooseTableColumnMatch(*ident, tables, allow_ambiguous)) @@ -520,13 +520,13 @@ std::vector normalizeColumnNamesExtractNeeded( } String short_name = ident->shortName(); String original_long_name; - if (public_identifiers.count(ident)) + if (public_identifiers.contains(ident)) original_long_name = ident->name(); size_t count = countTablesWithColumn(tables, short_name); /// isValidIdentifierBegin retuired to be consistent with TableJoin::deduplicateAndQualifyColumnNames - if (count > 1 || aliases.count(short_name) || !isValidIdentifierBegin(short_name.at(0))) + if (count > 1 || aliases.contains(short_name) || !isValidIdentifierBegin(short_name.at(0))) { const auto & table = tables[*table_pos]; IdentifierSemantic::setColumnLongName(*ident, table.table); /// table.column -> table_alias.column @@ -654,7 +654,7 @@ void JoinToSubqueryTransformMatcher::visit(ASTSelectQuery & select, ASTPtr & ast for (auto * ident : on_identifiers) { auto it = data.aliases.find(ident->name()); - if (!on_aliases.count(ident->name()) && it != data.aliases.end()) + if (!on_aliases.contains(ident->name()) && it != data.aliases.end()) { auto alias_expression = it->second; alias_pushdown[table_pos].push_back(alias_expression); @@ -684,7 +684,7 @@ void JoinToSubqueryTransformMatcher::visit(ASTSelectQuery & select, ASTPtr & ast /// Check same name in aliases, USING and ON sections. Cannot push down alias to ON through USING cause of name masquerading. for (auto * ident : using_identifiers) - if (on_aliases.count(ident->name())) + if (on_aliases.contains(ident->name())) throw Exception("Cannot rewrite JOINs. Alias '" + ident->name() + "' appears both in ON and USING", ErrorCodes::NOT_IMPLEMENTED); using_identifiers.clear(); diff --git a/src/Interpreters/LogicalExpressionsOptimizer.cpp b/src/Interpreters/LogicalExpressionsOptimizer.cpp index d1cec92ceb7..9e30cac2e19 100644 --- a/src/Interpreters/LogicalExpressionsOptimizer.cpp +++ b/src/Interpreters/LogicalExpressionsOptimizer.cpp @@ -41,7 +41,7 @@ void LogicalExpressionsOptimizer::perform() { if (select_query == nullptr) return; - if (visited_nodes.count(select_query)) + if (visited_nodes.contains(select_query)) return; size_t position = 0; @@ -96,7 +96,7 @@ void LogicalExpressionsOptimizer::reorderColumns() void LogicalExpressionsOptimizer::collectDisjunctiveEqualityChains() { - if (visited_nodes.count(select_query)) + if (visited_nodes.contains(select_query)) return; using Edge = std::pair; @@ -161,7 +161,7 @@ void LogicalExpressionsOptimizer::collectDisjunctiveEqualityChains() { if (!child->as()) { - if (!visited_nodes.count(child.get())) + if (!visited_nodes.contains(child.get())) to_visit.push_back(Edge(to_node, &*child)); else { diff --git a/src/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp index e1e03e53014..81603d6626d 100644 --- a/src/Interpreters/MergeJoin.cpp +++ b/src/Interpreters/MergeJoin.cpp @@ -538,7 +538,7 @@ MergeJoin::MergeJoin(std::shared_ptr table_join_, const Block & right const NameSet required_right_keys = table_join->requiredRightKeys(); for (const auto & column : right_table_keys) - if (required_right_keys.count(column.name)) + if (required_right_keys.contains(column.name)) right_columns_to_add.insert(ColumnWithTypeAndName{nullptr, column.type, column.name}); JoinCommon::createMissedColumns(right_columns_to_add); diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 2c03f109fe4..99032dd9f10 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -171,7 +171,7 @@ ColumnDependencies getAllColumnDependencies(const StorageMetadataPtr & metadata_ new_updated_columns.clear(); for (const auto & dependency : new_dependencies) { - if (!dependencies.count(dependency)) + if (!dependencies.contains(dependency)) { dependencies.insert(dependency); if (!dependency.isReadOnly()) @@ -360,7 +360,7 @@ static void validateUpdateColumns( throw Exception("There is no column " + backQuote(column_name) + " in table", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE); } - if (key_columns.count(column_name)) + if (key_columns.contains(column_name)) throw Exception("Cannot UPDATE key column " + backQuote(column_name), ErrorCodes::CANNOT_UPDATE_COLUMN); auto materialized_it = column_to_affected_materialized.find(column_name); @@ -368,7 +368,7 @@ static void validateUpdateColumns( { for (const String & materialized : materialized_it->second) { - if (key_columns.count(materialized)) + if (key_columns.contains(materialized)) throw Exception("Updated column " + backQuote(column_name) + " affects MATERIALIZED column " + backQuote(materialized) + ", which is a key column. Cannot UPDATE it.", ErrorCodes::CANNOT_UPDATE_COLUMN); @@ -448,7 +448,7 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run) auto syntax_result = TreeRewriter(context).analyze(query, all_columns); for (const String & dependency : syntax_result->requiredSourceColumns()) { - if (updated_columns.count(dependency)) + if (updated_columns.contains(dependency)) column_to_affected_materialized[dependency].push_back(column.name); } } @@ -1042,7 +1042,7 @@ bool MutationsInterpreter::Stage::isAffectingAllColumns(const Names & storage_co { /// is subset for (const auto & storage_column : storage_columns) - if (!output_columns.count(storage_column)) + if (!output_columns.contains(storage_column)) return false; return true; diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index 4c0aa20d795..daa0a144337 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -85,10 +85,10 @@ static NamesAndTypesList getColumnsList(const ASTExpressionList * columns_defini { if (const auto * options = declare_column->column_options->as()) { - if (options->changes.count("is_null")) + if (options->changes.contains("is_null")) is_nullable = options->changes.at("is_null")->as()->value.safeGet(); - if (options->changes.count("is_unsigned")) + if (options->changes.contains("is_unsigned")) is_unsigned = options->changes.at("is_unsigned")->as()->value.safeGet(); } } @@ -161,7 +161,7 @@ static ColumnsDescription createColumnsDescription(const NamesAndTypesList & col String comment; if (declare_column->column_options) if (const auto * options = declare_column->column_options->as()) - if (options->changes.count("comment")) + if (options->changes.contains("comment")) comment = options->changes.at("comment")->as()->value.safeGet(); ColumnDescription column_description(column_name_and_type->name, column_name_and_type->type); @@ -238,7 +238,7 @@ static std::tupleas()) { /// column_name(int64 literal) - if (columns_name_set.count(function->name) && function->arguments->children.size() == 1) + if (columns_name_set.contains(function->name) && function->arguments->children.size() == 1) { const auto & prefix_limit = function->arguments->children[0]->as(); @@ -276,13 +276,13 @@ static std::tuplecolumn_options->as()) { - if (options->changes.count("unique_key")) + if (options->changes.contains("unique_key")) unique_keys->arguments->children.emplace_back(std::make_shared(declare_column->name)); - if (options->changes.count("primary_key")) + if (options->changes.contains("primary_key")) primary_keys->arguments->children.emplace_back(std::make_shared(declare_column->name)); - if (options->changes.count("auto_increment")) + if (options->changes.contains("auto_increment")) increment_columns.emplace(declare_column->name); } } @@ -385,10 +385,10 @@ static ASTPtr getOrderByPolicy( for (const auto & [name, type] : names_and_types) { - if (order_by_columns_set.count(name)) + if (order_by_columns_set.contains(name)) continue; - if (increment_columns.count(name)) + if (increment_columns.contains(name)) { order_by_columns_set.emplace(name); increment_keys.emplace_back(NameAndTypePair(name, type)); diff --git a/src/Interpreters/QueryAliasesVisitor.cpp b/src/Interpreters/QueryAliasesVisitor.cpp index bd0b2e88d2f..1b6f37ac67a 100644 --- a/src/Interpreters/QueryAliasesVisitor.cpp +++ b/src/Interpreters/QueryAliasesVisitor.cpp @@ -108,7 +108,7 @@ void QueryAliasesMatcher::visit(const ASTSubquery & const_subquery, const AST { alias = dummy_subquery_name_prefix + std::to_string(++subquery_index); } - while (aliases.count(alias)); + while (aliases.contains(alias)); subquery.setAlias(alias); aliases[alias] = ast; @@ -126,7 +126,7 @@ void QueryAliasesMatcher::visitOther(const ASTPtr & ast, Data & data) String alias = ast->tryGetAlias(); if (!alias.empty()) { - if (aliases.count(alias) && ast->getTreeHash() != aliases[alias]->getTreeHash()) + if (aliases.contains(alias) && ast->getTreeHash() != aliases[alias]->getTreeHash()) throw Exception(wrongAliasMessage(ast, aliases[alias], alias), ErrorCodes::MULTIPLE_EXPRESSIONS_FOR_ALIAS); aliases[alias] = ast; diff --git a/src/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp index 28b2195c9c3..2a8b256c3d1 100644 --- a/src/Interpreters/QueryNormalizer.cpp +++ b/src/Interpreters/QueryNormalizer.cpp @@ -99,7 +99,7 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data) String node_alias = ast->tryGetAlias(); - if (current_asts.count(alias_node.get()) /// We have loop of multiple aliases + if (current_asts.contains(alias_node.get()) /// We have loop of multiple aliases || (node.name() == our_alias_or_name && our_name && node_alias == *our_name)) /// Our alias points to node.name, direct loop throw Exception("Cyclic aliases", ErrorCodes::CYCLIC_ALIASES); @@ -235,7 +235,7 @@ void QueryNormalizer::visit(ASTPtr & ast, Data & data) auto & finished_asts = data.finished_asts; auto & current_asts = data.current_asts; - if (finished_asts.count(ast)) + if (finished_asts.contains(ast)) { ast = finished_asts[ast]; return; diff --git a/src/Interpreters/RequiredSourceColumnsData.cpp b/src/Interpreters/RequiredSourceColumnsData.cpp index 9118acc38c9..f5004a63dda 100644 --- a/src/Interpreters/RequiredSourceColumnsData.cpp +++ b/src/Interpreters/RequiredSourceColumnsData.cpp @@ -13,7 +13,7 @@ bool RequiredSourceColumnsData::addColumnAliasIfAny(const IAST & ast) if (alias.empty()) return false; - if (required_names.count(alias)) + if (required_names.contains(alias)) masked_columns.insert(alias); complex_aliases.insert(alias); @@ -62,10 +62,10 @@ NameSet RequiredSourceColumnsData::requiredColumns() const String table_name = Nested::extractTableName(name); /// Tech debt. There's its own logic for ARRAY JOIN columns. - if (array_join_columns.count(name) || array_join_columns.count(table_name)) + if (array_join_columns.contains(name) || array_join_columns.contains(table_name)) continue; - if (!complex_aliases.count(name) || masked_columns.count(name)) + if (!complex_aliases.contains(name) || masked_columns.contains(name)) required.insert(name); } return required; diff --git a/src/Interpreters/RequiredSourceColumnsVisitor.cpp b/src/Interpreters/RequiredSourceColumnsVisitor.cpp index cc9a2d86d28..f4305692eb6 100644 --- a/src/Interpreters/RequiredSourceColumnsVisitor.cpp +++ b/src/Interpreters/RequiredSourceColumnsVisitor.cpp @@ -135,7 +135,7 @@ void RequiredSourceColumnsMatcher::visit(const ASTSelectQuery & select, const AS { if (auto * ident = fn->as()) { - if (select_columns.count(ident->getColumnName()) == 0) + if (!select_columns.contains(ident->getColumnName())) data.addColumnIdentifier(*ident); return; } @@ -181,7 +181,7 @@ void RequiredSourceColumnsMatcher::visit(const ASTIdentifier & node, const ASTPt if (node.name().empty()) throw Exception("Expected not empty name", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - if (!data.private_aliases.count(node.name())) + if (!data.private_aliases.contains(node.name())) data.addColumnIdentifier(node); } diff --git a/src/Interpreters/RewriteAnyFunctionVisitor.cpp b/src/Interpreters/RewriteAnyFunctionVisitor.cpp index 5eb14aa4252..33362648ae1 100644 --- a/src/Interpreters/RewriteAnyFunctionVisitor.cpp +++ b/src/Interpreters/RewriteAnyFunctionVisitor.cpp @@ -89,7 +89,7 @@ void RewriteAnyFunctionMatcher::visit(const ASTFunction & func, ASTPtr & ast, Da return; /// We have rewritten this function. Just unwrap its argument. - if (data.rewritten.count(ast.get())) + if (data.rewritten.contains(ast.get())) { func_arguments[0]->setAlias(func.alias); ast = func_arguments[0]; diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index ec5358cf6bc..072e8e13f15 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -168,7 +168,7 @@ void TableJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_colu for (auto & column : columns_from_joined_table) { - if (joined_columns.count(column.name)) + if (joined_columns.contains(column.name)) continue; joined_columns.insert(column.name); @@ -178,7 +178,7 @@ void TableJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_colu /// Also qualify unusual column names - that does not look like identifiers. - if (left_table_columns.count(column.name) || !isValidIdentifierBegin(column.name.at(0))) + if (left_table_columns.contains(column.name) || !isValidIdentifierBegin(column.name.at(0))) inserted.name = right_table_prefix + column.name; original_names[inserted.name] = column.name; @@ -280,7 +280,7 @@ Block TableJoin::getRequiredRightKeys(const Block & right_table_keys, std::vecto forAllKeys(clauses, [&](const auto & left_key_name, const auto & right_key_name) { - if (required_keys.count(right_key_name) && !required_right_keys.has(right_key_name)) + if (required_keys.contains(right_key_name) && !required_right_keys.has(right_key_name)) { const auto & right_key = right_table_keys.getByName(right_key_name); required_right_keys.insert(right_key); diff --git a/src/Interpreters/TranslateQualifiedNamesVisitor.cpp b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp index 6016d54c7dc..6f1c48135eb 100644 --- a/src/Interpreters/TranslateQualifiedNamesVisitor.cpp +++ b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp @@ -97,7 +97,7 @@ void TranslateQualifiedNamesMatcher::visit(ASTIdentifier & identifier, ASTPtr &, if (IdentifierSemantic::getColumnName(identifier)) { String short_name = identifier.shortName(); - bool allow_ambiguous = data.join_using_columns.count(short_name); + bool allow_ambiguous = data.join_using_columns.contains(short_name); if (auto best_pos = IdentifierSemantic::chooseTable(identifier, data.tables, allow_ambiguous)) { size_t table_pos = *best_pos; @@ -228,7 +228,7 @@ void TranslateQualifiedNamesMatcher::visit(ASTExpressionList & node, const ASTPt { for (const auto & column : *cols) { - if (first_table || !data.join_using_columns.count(column.name)) + if (first_table || !data.join_using_columns.contains(column.name)) { addIdentifier(columns, table.table, column.name); } @@ -256,7 +256,7 @@ void TranslateQualifiedNamesMatcher::visit(ASTExpressionList & node, const ASTPt { for (const auto & column : table.columns) { - if (asterisk_pattern->isColumnMatching(column.name) && (first_table || !data.join_using_columns.count(column.name))) + if (asterisk_pattern->isColumnMatching(column.name) && (first_table || !data.join_using_columns.contains(column.name))) { addIdentifier(columns, table.table, column.name); } diff --git a/src/Interpreters/TreeOptimizer.cpp b/src/Interpreters/TreeOptimizer.cpp index 8885db1ad78..badeb3e4ee2 100644 --- a/src/Interpreters/TreeOptimizer.cpp +++ b/src/Interpreters/TreeOptimizer.cpp @@ -111,7 +111,7 @@ void optimizeGroupBy(ASTSelectQuery * select_query, ContextPtr context) if (const auto * function = group_exprs[i]->as()) { /// assert function is injective - if (possibly_injective_function_names.count(function->name)) + if (possibly_injective_function_names.contains(function->name)) { /// do not handle semantic errors here if (function->arguments->children.size() < 2) @@ -233,7 +233,7 @@ void optimizeGroupByFunctionKeys(ASTSelectQuery * select_query) /// filling the result for (const auto & group_key : group_by_keys) - if (group_by_keys_data.key_names.count(group_key->getColumnName())) + if (group_by_keys_data.key_names.contains(group_key->getColumnName())) modified.push_back(group_key); /// modifying the input @@ -349,7 +349,7 @@ std::unordered_set getDistinctNames(const ASTSelectQuery & select) { const String & name = identifier->shortName(); - if (select.distinct || implicit_distinct.count(name)) + if (select.distinct || implicit_distinct.contains(name)) { if (alias.empty()) names.insert(name); @@ -392,7 +392,7 @@ void optimizeDuplicateDistinct(ASTSelectQuery & select) return; String name = identifier->shortName(); - if (!distinct_names.count(name)) + if (!distinct_names.contains(name)) return; /// Not a distinct column, keep DISTINCT for it. selected_names.insert(name); diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index d0e54f9f598..ddf6e32177b 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -491,7 +491,7 @@ void removeUnneededColumnsFromSelectClause(ASTSelectQuery * select_query, const { for (auto it = children.begin(); it != children.end();) { - if (remove_columns.count((*it)->as()->column)) + if (remove_columns.contains((*it)->as()->column)) it = select_query->interpolate()->children.erase(it); else ++it; @@ -536,7 +536,7 @@ void getArrayJoinedColumns(ASTPtr & query, TreeRewriterResult & result, const AS String result_name = expr->getAliasOrColumnName(); /// This is an array. - if (!expr->as() || source_columns_set.count(source_name)) + if (!expr->as() || source_columns_set.contains(source_name)) { result.array_join_result_to_source[result_name] = source_name; } @@ -917,10 +917,10 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select for (const auto & joined_column : analyzed_join->columnsFromJoinedTable()) { const auto & name = joined_column.name; - if (available_columns.count(name)) + if (available_columns.contains(name)) continue; - if (required.count(name)) + if (required.contains(name)) { /// Optimisation: do not add columns needed only in JOIN ON section. if (columns_context.nameInclusion(name) > analyzed_join->rightKeyInclusion(name)) @@ -939,7 +939,7 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select array_join_sources.insert(result_source.second); for (const auto & column_name_type : source_columns) - if (array_join_sources.count(column_name_type.name)) + if (array_join_sources.contains(column_name_type.name)) required.insert(column_name_type.name); } @@ -1016,7 +1016,7 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select const String & column_name = it->name; unknown_required_source_columns.erase(column_name); - if (!required.count(column_name)) + if (!required.contains(column_name)) it = source_columns.erase(it); else ++it; diff --git a/src/Interpreters/addMissingDefaults.cpp b/src/Interpreters/addMissingDefaults.cpp index d043fd16bb5..62cfa7f6f4e 100644 --- a/src/Interpreters/addMissingDefaults.cpp +++ b/src/Interpreters/addMissingDefaults.cpp @@ -59,7 +59,7 @@ ActionsDAGPtr addMissingDefaults( String offsets_name = Nested::extractTableName(column.name); const auto * array_type = typeid_cast(column.type.get()); - if (array_type && nested_groups.count(offsets_name)) + if (array_type && nested_groups.contains(offsets_name)) { const auto & nested_type = array_type->getNestedType(); ColumnPtr nested_column = nested_type->createColumnConstWithDefaultValue(0); diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index e7c5095b7fb..6b47db0f0d9 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -376,7 +376,7 @@ std::optional evaluateExpressionOverConstantCondition(const ASTPtr & nod Field prev_value = assert_cast(*prev.column).getField(); Field curr_value = assert_cast(*elem.column).getField(); - if (!always_false_map.count(elem.name)) + if (!always_false_map.contains(elem.name)) { always_false_map[elem.name] = prev_value != curr_value; } diff --git a/src/Interpreters/executeDDLQueryOnCluster.cpp b/src/Interpreters/executeDDLQueryOnCluster.cpp index 28bcbcf06dc..271602c29f9 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.cpp +++ b/src/Interpreters/executeDDLQueryOnCluster.cpp @@ -47,7 +47,7 @@ bool isSupportedAlterType(int type) ASTAlterCommand::NO_TYPE, }; - return unsupported_alter_types.count(type) == 0; + return !unsupported_alter_types.contains(type); } @@ -449,9 +449,9 @@ Strings DDLQueryStatusSource::getNewAndUpdate(const Strings & current_list_of_fi Strings diff; for (const String & host : current_list_of_finished_hosts) { - if (!waiting_hosts.count(host)) + if (!waiting_hosts.contains(host)) { - if (!ignoring_hosts.count(host)) + if (!ignoring_hosts.contains(host)) { ignoring_hosts.emplace(host); LOG_INFO(log, "Unexpected host {} appeared in task {}", host, node_path); @@ -459,7 +459,7 @@ Strings DDLQueryStatusSource::getNewAndUpdate(const Strings & current_list_of_fi continue; } - if (!finished_hosts.count(host)) + if (!finished_hosts.contains(host)) { diff.emplace_back(host); finished_hosts.emplace(host); diff --git a/src/Interpreters/inplaceBlockConversions.cpp b/src/Interpreters/inplaceBlockConversions.cpp index 15dd9229194..1bde6fe5a8c 100644 --- a/src/Interpreters/inplaceBlockConversions.cpp +++ b/src/Interpreters/inplaceBlockConversions.cpp @@ -45,7 +45,7 @@ void addDefaultRequiredExpressionsRecursively( if (is_column_in_query) convert_null_to_default = null_as_default && block.findByName(required_column_name)->type->isNullable() && !required_column_type->isNullable(); - if ((is_column_in_query && !convert_null_to_default) || added_columns.count(required_column_name)) + if ((is_column_in_query && !convert_null_to_default) || added_columns.contains(required_column_name)) return; auto column_default = columns.getDefault(required_column_name); diff --git a/src/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp index c5bef6f7205..e4dcabcb470 100644 --- a/src/Interpreters/loadMetadata.cpp +++ b/src/Interpreters/loadMetadata.cpp @@ -160,7 +160,7 @@ void loadMetadata(ContextMutablePtr context, const String & default_database_nam /// clickhouse-local creates DatabaseMemory as default database by itself /// For clickhouse-server we need create default database bool create_default_db_if_not_exists = !default_database_name.empty(); - bool metadata_dir_for_default_db_already_exists = databases.count(default_database_name); + bool metadata_dir_for_default_db_already_exists = databases.contains(default_database_name); if (create_default_db_if_not_exists && !metadata_dir_for_default_db_already_exists) databases.emplace(default_database_name, std::filesystem::path(path) / escapeForFileName(default_database_name)); diff --git a/src/Parsers/ASTProjectionSelectQuery.cpp b/src/Parsers/ASTProjectionSelectQuery.cpp index 8526c7aef26..7df0b53da6f 100644 --- a/src/Parsers/ASTProjectionSelectQuery.cpp +++ b/src/Parsers/ASTProjectionSelectQuery.cpp @@ -100,7 +100,7 @@ void ASTProjectionSelectQuery::setExpression(Expression expr, ASTPtr && ast) else children[it->second] = ast; } - else if (positions.count(expr)) + else if (positions.contains(expr)) { size_t pos = positions[expr]; children.erase(children.begin() + pos); @@ -113,7 +113,7 @@ void ASTProjectionSelectQuery::setExpression(Expression expr, ASTPtr && ast) ASTPtr & ASTProjectionSelectQuery::getExpression(Expression expr) { - if (!positions.count(expr)) + if (!positions.contains(expr)) throw Exception("Get expression before set", ErrorCodes::LOGICAL_ERROR); return children[positions[expr]]; } diff --git a/src/Parsers/ASTSelectQuery.cpp b/src/Parsers/ASTSelectQuery.cpp index 048b9dfc38f..43186056077 100644 --- a/src/Parsers/ASTSelectQuery.cpp +++ b/src/Parsers/ASTSelectQuery.cpp @@ -405,7 +405,7 @@ void ASTSelectQuery::setExpression(Expression expr, ASTPtr && ast) else children[it->second] = ast; } - else if (positions.count(expr)) + else if (positions.contains(expr)) { size_t pos = positions[expr]; children.erase(children.begin() + pos); @@ -418,7 +418,7 @@ void ASTSelectQuery::setExpression(Expression expr, ASTPtr && ast) ASTPtr & ASTSelectQuery::getExpression(Expression expr) { - if (!positions.count(expr)) + if (!positions.contains(expr)) throw Exception("Get expression before set", ErrorCodes::LOGICAL_ERROR); return children[positions[expr]]; } diff --git a/src/Parsers/ASTTableOverrides.cpp b/src/Parsers/ASTTableOverrides.cpp index 0f34a9fb247..af846a5dd43 100644 --- a/src/Parsers/ASTTableOverrides.cpp +++ b/src/Parsers/ASTTableOverrides.cpp @@ -122,7 +122,7 @@ void ASTTableOverrideList::removeTableOverride(const String & name) bool ASTTableOverrideList::hasOverride(const String & name) const { - return positions.count(name); + return positions.contains(name); } void ASTTableOverrideList::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const diff --git a/src/Parsers/obfuscateQueries.cpp b/src/Parsers/obfuscateQueries.cpp index a4eb5404552..f8697b862c7 100644 --- a/src/Parsers/obfuscateQueries.cpp +++ b/src/Parsers/obfuscateQueries.cpp @@ -637,7 +637,7 @@ void obfuscateIdentifier(std::string_view src, WriteBuffer & result, WordMap & o { std::string_view word(word_begin, src_pos - word_begin); - if (keep_words.count(word)) + if (keep_words.contains(word)) { result.write(word.data(), word.size()); } @@ -889,7 +889,7 @@ void obfuscateQueries( std::string whole_token_uppercase(whole_token); Poco::toUpperInPlace(whole_token_uppercase); - if (keywords.count(whole_token_uppercase) + if (keywords.contains(whole_token_uppercase) || known_identifier_func(whole_token)) { /// Keep keywords as is. diff --git a/src/Processors/Executors/ExecutingGraph.cpp b/src/Processors/Executors/ExecutingGraph.cpp index 6b3c65dc9d0..8af0fa3d57a 100644 --- a/src/Processors/Executors/ExecutingGraph.cpp +++ b/src/Processors/Executors/ExecutingGraph.cpp @@ -127,7 +127,7 @@ bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) while (nodes.size() < num_processors) { auto * processor = processors[nodes.size()].get(); - if (processors_map.count(processor)) + if (processors_map.contains(processor)) throw Exception("Processor " + processor->getName() + " was already added to pipeline.", ErrorCodes::LOGICAL_ERROR); diff --git a/src/Processors/QueryPlan/DistinctStep.cpp b/src/Processors/QueryPlan/DistinctStep.cpp index 5aeb33fdc7b..16670c53fd5 100644 --- a/src/Processors/QueryPlan/DistinctStep.cpp +++ b/src/Processors/QueryPlan/DistinctStep.cpp @@ -15,7 +15,7 @@ static bool checkColumnsAlreadyDistinct(const Names & columns, const NameSet & d /// Now we need to check that distinct_names is a subset of columns. std::unordered_set columns_set(columns.begin(), columns.end()); for (const auto & name : distinct_names) - if (columns_set.count(name) == 0) + if (!columns_set.contains(name)) return false; return true; diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index e81cec723a1..9b7eed9f5ee 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -171,7 +171,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes Names allowed_inputs; for (const auto & column : array_join_header) - if (keys.count(column.name) == 0) + if (!keys.contains(column.name)) allowed_inputs.push_back(column.name); // for (const auto & name : allowed_inputs) diff --git a/src/Processors/Transforms/AddingDefaultsTransform.cpp b/src/Processors/Transforms/AddingDefaultsTransform.cpp index 82c235d9034..f55d4d88ae8 100644 --- a/src/Processors/Transforms/AddingDefaultsTransform.cpp +++ b/src/Processors/Transforms/AddingDefaultsTransform.cpp @@ -187,7 +187,7 @@ void AddingDefaultsTransform::transform(Chunk & chunk) { const String & column_name = column_def.name; - if (column_defaults.count(column_name) == 0 || !res.has(column_name)) + if (!column_defaults.contains(column_name) || !res.has(column_name)) continue; size_t block_column_position = res.getPositionByName(column_name); diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index c7bcd03ec08..9e5d57a2b43 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -26,7 +26,7 @@ Block FillingTransform::transformHeader(Block header, const SortDescription & so /// Columns which are not from sorting key may not be constant anymore. for (auto & column : header) - if (column.column && isColumnConst(*column.column) && !sort_keys.count(column.name)) + if (column.column && isColumnConst(*column.column) && !sort_keys.contains(column.name)) column.column = column.type->createColumn(); return header; @@ -155,7 +155,7 @@ FillingTransform::FillingTransform( std::vector is_fill_column(header_.columns()); for (size_t i = 0, size = sort_description.size(); i < size; ++i) { - if (interpolate_description && interpolate_description->result_columns_set.count(sort_description[i].column_name)) + if (interpolate_description && interpolate_description->result_columns_set.contains(sort_description[i].column_name)) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "Column '{}' is participating in ORDER BY ... WITH FILL expression and can't be INTERPOLATE output", sort_description[i].column_name); @@ -193,7 +193,7 @@ FillingTransform::FillingTransform( p != interpolate_description->required_columns_map.end()) input_positions.emplace_back(idx, p->second); - if (!is_fill_column[idx] && !(interpolate_description && interpolate_description->result_columns_set.count(column.name))) + if (!is_fill_column[idx] && !(interpolate_description && interpolate_description->result_columns_set.contains(column.name))) other_column_positions.push_back(idx); ++idx; diff --git a/src/QueryPipeline/Pipe.cpp b/src/QueryPipeline/Pipe.cpp index 551841524b3..47047da8de1 100644 --- a/src/QueryPipeline/Pipe.cpp +++ b/src/QueryPipeline/Pipe.cpp @@ -192,7 +192,7 @@ Pipe::Pipe(Processors processors_) : processors(std::move(processors_)) " has not connected input port", ErrorCodes::LOGICAL_ERROR); const auto * connected_processor = &port.getOutputPort().getProcessor(); - if (set.count(connected_processor) == 0) + if (!set.contains(connected_processor)) throw Exception("Cannot create Pipe because processor " + processor->getName() + " has input port which is connected with unknown processor " + connected_processor->getName(), ErrorCodes::LOGICAL_ERROR); @@ -207,7 +207,7 @@ Pipe::Pipe(Processors processors_) : processors(std::move(processors_)) } const auto * connected_processor = &port.getInputPort().getProcessor(); - if (set.count(connected_processor) == 0) + if (!set.contains(connected_processor)) throw Exception("Cannot create Pipe because processor " + processor->getName() + " has output port which is connected with unknown processor " + connected_processor->getName(), ErrorCodes::LOGICAL_ERROR); @@ -791,7 +791,7 @@ void Pipe::transform(const Transformer & transformer) " has not connected input port", ErrorCodes::LOGICAL_ERROR); const auto * connected_processor = &port.getOutputPort().getProcessor(); - if (set.count(connected_processor) == 0) + if (!set.contains(connected_processor)) throw Exception("Transformation of Pipe is not valid because processor " + processor->getName() + " has input port which is connected with unknown processor " + connected_processor->getName(), ErrorCodes::LOGICAL_ERROR); @@ -806,7 +806,7 @@ void Pipe::transform(const Transformer & transformer) } const auto * connected_processor = &port.getInputPort().getProcessor(); - if (set.count(connected_processor) == 0) + if (!set.contains(connected_processor)) throw Exception("Transformation of Pipe is not valid because processor " + processor->getName() + " has output port which is connected with unknown processor " + connected_processor->getName(), ErrorCodes::LOGICAL_ERROR); diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 8e9ce395f83..4deac854b09 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -669,7 +669,7 @@ void HTTPHandler::processQuery( if (name.empty()) return true; - if (reserved_param_names.count(name)) + if (reserved_param_names.contains(name)) return true; for (const String & suffix : reserved_param_suffixes) @@ -1063,7 +1063,7 @@ PredefinedQueryHandler::PredefinedQueryHandler( bool PredefinedQueryHandler::customizeQueryParam(ContextMutablePtr context, const std::string & key, const std::string & value) { - if (receive_params.count(key)) + if (receive_params.contains(key)) { context->setQueryParameter(key, value); return true; diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 286f58739f0..28ef92e4bf3 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1070,7 +1070,7 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, ContextPt continue; } - if (renamed_columns.count(column_name)) + if (renamed_columns.contains(column_name)) throw Exception{"Cannot rename and modify the same column " + backQuote(column_name) + " in a single ALTER query", ErrorCodes::NOT_IMPLEMENTED}; @@ -1232,7 +1232,7 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, ContextPt throw Exception{"Cannot rename to " + backQuote(command.rename_to) + ": column with this name already exists", ErrorCodes::DUPLICATE_COLUMN}; - if (modified_columns.count(column_name)) + if (modified_columns.contains(column_name)) throw Exception{"Cannot rename and modify the same column " + backQuote(column_name) + " in a single ALTER query", ErrorCodes::NOT_IMPLEMENTED}; diff --git a/src/Storages/ConstraintsDescription.cpp b/src/Storages/ConstraintsDescription.cpp index 7085c6e14c8..f73a148ad07 100644 --- a/src/Storages/ConstraintsDescription.cpp +++ b/src/Storages/ConstraintsDescription.cpp @@ -114,7 +114,7 @@ std::unique_ptr ConstraintsDescription::buildGraph() const CNFQuery::AtomicFormula atom{atomic_formula.negative, atomic_formula.ast->clone()}; pushNotIn(atom); auto * func = atom.ast->as(); - if (func && relations.count(func->name)) + if (func && relations.contains(func->name)) { assert(!atom.negative); constraints_for_graph.push_back(atom.ast); diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index d833371a742..7c84cb101ab 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -1021,7 +1021,7 @@ void StorageDistributedDirectoryMonitor::processFilesWithBatching(const std::map UInt64 file_idx = file.first; const String & file_path = file.second; - if (file_indices_to_skip.count(file_idx)) + if (file_indices_to_skip.contains(file_idx)) continue; size_t total_rows = 0; diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 0dcccc33266..bb2855b7da6 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -304,7 +304,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk( writePODBinary(hashing_out.getHash(), out); - if (!file_names_without_checksums.count(file_name)) + if (!file_names_without_checksums.contains(file_name)) data_checksums.addFile(file_name, hashing_out.count(), hashing_out.getHash()); } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index d704d8fc435..b687823f6e2 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1831,22 +1831,22 @@ void IMergeTreeDataPart::checkConsistencyBase() const const auto & partition_key = metadata_snapshot->getPartitionKey(); if (!checksums.empty()) { - if (!pk.column_names.empty() && !checksums.files.count("primary.idx")) + if (!pk.column_names.empty() && !checksums.files.contains("primary.idx")) throw Exception("No checksum for primary.idx", ErrorCodes::NO_FILE_IN_DATA_PART); if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) { - if (!checksums.files.count("count.txt")) + if (!checksums.files.contains("count.txt")) throw Exception("No checksum for count.txt", ErrorCodes::NO_FILE_IN_DATA_PART); - if (metadata_snapshot->hasPartitionKey() && !checksums.files.count("partition.dat")) + if (metadata_snapshot->hasPartitionKey() && !checksums.files.contains("partition.dat")) throw Exception("No checksum for partition.dat", ErrorCodes::NO_FILE_IN_DATA_PART); if (!isEmpty() && !parent_part) { for (const String & col_name : storage.getMinMaxColumnsNames(partition_key)) { - if (!checksums.files.count("minmax_" + escapeForFileName(col_name) + ".idx")) + if (!checksums.files.contains("minmax_" + escapeForFileName(col_name) + ".idx")) throw Exception("No minmax idx file checksum for column " + col_name, ErrorCodes::NO_FILE_IN_DATA_PART); } } @@ -1980,26 +1980,26 @@ bool IMergeTreeDataPart::checkAllTTLCalculated(const StorageMetadataPtr & metada for (const auto & [column, desc] : metadata_snapshot->getColumnTTLs()) { /// Part has this column, but we don't calculated TTL for it - if (!ttl_infos.columns_ttl.count(column) && getColumns().contains(column)) + if (!ttl_infos.columns_ttl.contains(column) && getColumns().contains(column)) return false; } for (const auto & move_desc : metadata_snapshot->getMoveTTLs()) { /// Move TTL is not calculated - if (!ttl_infos.moves_ttl.count(move_desc.result_column)) + if (!ttl_infos.moves_ttl.contains(move_desc.result_column)) return false; } for (const auto & group_by_desc : metadata_snapshot->getGroupByTTLs()) { - if (!ttl_infos.group_by_ttl.count(group_by_desc.result_column)) + if (!ttl_infos.group_by_ttl.contains(group_by_desc.result_column)) return false; } for (const auto & rows_where_desc : metadata_snapshot->getRowsWhereTTLs()) { - if (!ttl_infos.rows_where_ttl.count(rows_where_desc.result_column)) + if (!ttl_infos.rows_where_ttl.contains(rows_where_desc.result_column)) return false; } @@ -2047,7 +2047,7 @@ IMergeTreeDataPart::uint128 IMergeTreeDataPart::getActualChecksumByFile(const St String file_name = std::filesystem::path(file_path).filename(); const auto filenames_without_checksums = getFileNamesWithoutChecksums(); auto it = checksums.files.find(file_name); - if (filenames_without_checksums.count(file_name) == 0 && it != checksums.files.end()) + if (!filenames_without_checksums.contains(file_name) && it != checksums.files.end()) { return it->second.file_hash; } diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp index b4a902499db..2f40c5cb017 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp @@ -78,7 +78,7 @@ NameSet IMergedBlockOutputStream::removeEmptyColumnsFromPart( /// Remove files on disk and checksums for (const String & removed_file : remove_files) { - if (checksums.files.count(removed_file)) + if (checksums.files.contains(removed_file)) checksums.files.erase(removed_file); } diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 29a7f783e8c..f6baae723c9 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -449,7 +449,7 @@ KeyCondition::KeyCondition( for (size_t i = 0, size = key_column_names.size(); i < size; ++i) { const auto & name = key_column_names[i]; - if (!key_columns.count(name)) + if (!key_columns.contains(name)) key_columns[name] = i; } @@ -487,7 +487,7 @@ KeyCondition::KeyCondition( bool KeyCondition::addCondition(const String & column, const Range & range) { - if (!key_columns.count(column)) + if (!key_columns.contains(column)) return false; rpn.emplace_back(RPNElement::FUNCTION_IN_RANGE, key_columns[column], range); rpn.emplace_back(RPNElement::FUNCTION_AND); @@ -776,10 +776,10 @@ bool KeyCondition::canConstantBeWrappedByMonotonicFunctions( { String expr_name = node->getColumnNameWithoutAlias(); - if (array_joined_columns.count(expr_name)) + if (array_joined_columns.contains(expr_name)) return false; - if (key_subexpr_names.count(expr_name) == 0) + if (!key_subexpr_names.contains(expr_name)) return false; if (out_value.isNull()) @@ -807,10 +807,10 @@ bool KeyCondition::canConstantBeWrappedByFunctions( { String expr_name = ast->getColumnNameWithoutAlias(); - if (array_joined_columns.count(expr_name)) + if (array_joined_columns.contains(expr_name)) return false; - if (key_subexpr_names.count(expr_name) == 0) + if (!key_subexpr_names.contains(expr_name)) { /// Let's check another one case. /// If our storage was created with moduloLegacy in partition key, @@ -825,7 +825,7 @@ bool KeyCondition::canConstantBeWrappedByFunctions( KeyDescription::moduloToModuloLegacyRecursive(adjusted_ast); expr_name = adjusted_ast->getColumnName(); - if (key_subexpr_names.count(expr_name) == 0) + if (!key_subexpr_names.contains(expr_name)) return false; } @@ -1080,7 +1080,7 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctionsImpl( // Key columns should use canonical names for index analysis String name = node->getColumnNameWithoutAlias(); - if (array_joined_columns.count(name)) + if (array_joined_columns.contains(name)) return false; auto it = key_columns.find(name); diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index e3146f169a7..08573759524 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -75,7 +75,7 @@ static void extractMergingAndGatheringColumns( for (const auto & column : storage_columns) { - if (key_columns.count(column.name)) + if (key_columns.contains(column.name)) { merging_columns.emplace_back(column); merging_column_names.emplace_back(column.name); diff --git a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp index 97516ea06c6..e300ceeb4fc 100644 --- a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp +++ b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp @@ -52,7 +52,7 @@ bool injectRequiredColumnsRecursively( if (part->hasColumnFiles(column_in_part)) { /// ensure each column is added only once - if (required_columns.count(column_name) == 0) + if (!required_columns.contains(column_name)) { columns.emplace_back(column_name); required_columns.emplace(column_name); @@ -162,7 +162,7 @@ void MergeTreeBlockSizePredictor::initialize(const Block & sample_block, const C const ColumnPtr & column_data = from_update ? columns[pos] : column_with_type_and_name.column; - if (!from_update && !names_set.count(column_name)) + if (!from_update && !names_set.contains(column_name)) continue; /// At least PREWHERE filter column might be const. @@ -286,7 +286,7 @@ MergeTreeReadTaskColumns getReadTaskColumns( for (auto & name : prewhere_info->row_level_filter->getRequiredColumnsNames()) { - if (names.count(name) == 0) + if (!names.contains(name)) pre_column_names.push_back(name); } } @@ -303,7 +303,7 @@ MergeTreeReadTaskColumns getReadTaskColumns( Names post_column_names; for (const auto & name : column_names) - if (!pre_name_set.count(name)) + if (!pre_name_set.contains(name)) post_column_names.push_back(name); column_names = post_column_names; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 29b3083c38f..33064339e03 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -476,7 +476,7 @@ void MergeTreeData::checkProperties( "added to the sorting key. You can add expressions that use only the newly added columns", ErrorCodes::BAD_ARGUMENTS); - if (new_metadata.columns.getDefaults().count(col)) + if (new_metadata.columns.getDefaults().contains(col)) throw Exception("Newly added column " + backQuoteIfNeed(col) + " has a default expression, so adding " "expressions that use it to the sorting key is forbidden", ErrorCodes::BAD_ARGUMENTS); @@ -655,7 +655,7 @@ void MergeTreeData::checkTTLExpressions(const StorageInMemoryMetadata & new_meta for (const auto & [name, ttl_description] : new_column_ttls) { - if (columns_ttl_forbidden.count(name)) + if (columns_ttl_forbidden.contains(name)) throw Exception("Trying to set TTL for key column " + name, ErrorCodes::ILLEGAL_COLUMN); } } @@ -1219,7 +1219,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) if (disk->isBroken()) continue; - if (defined_disk_names.count(disk_name) == 0 && disk->exists(relative_data_path)) + if (!defined_disk_names.contains(disk_name) && disk->exists(relative_data_path)) { for (const auto it = disk->iterateDirectory(relative_data_path); it->isValid(); it->next()) { @@ -2187,7 +2187,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context } if (command.type == AlterCommand::RENAME_COLUMN) { - if (columns_in_keys.count(command.column_name)) + if (columns_in_keys.contains(command.column_name)) { throw Exception( "Trying to ALTER RENAME key " + backQuoteIfNeed(command.column_name) + " column which is a part of key expression", @@ -2196,7 +2196,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context } else if (command.type == AlterCommand::DROP_COLUMN) { - if (columns_in_keys.count(command.column_name)) + if (columns_in_keys.contains(command.column_name)) { throw Exception( "Trying to ALTER DROP key " + backQuoteIfNeed(command.column_name) + " column which is a part of key expression", @@ -2229,13 +2229,13 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context { /// This alter will override data on disk. Let's check that it doesn't /// modify immutable column. - if (columns_alter_type_forbidden.count(command.column_name)) + if (columns_alter_type_forbidden.contains(command.column_name)) throw Exception("ALTER of key column " + backQuoteIfNeed(command.column_name) + " is forbidden", ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN); if (command.type == AlterCommand::MODIFY_COLUMN) { - if (columns_alter_type_check_safe_for_partition.count(command.column_name)) + if (columns_alter_type_check_safe_for_partition.contains(command.column_name)) { auto it = old_types.find(command.column_name); @@ -2247,7 +2247,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN); } - if (columns_alter_type_metadata_only.count(command.column_name)) + if (columns_alter_type_metadata_only.contains(command.column_name)) { auto it = old_types.find(command.column_name); assert(it != old_types.end()); @@ -2337,7 +2337,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context bool at_least_one_column_rest = false; for (const auto & column : part->getColumns()) { - if (!dropped_columns.count(column.name)) + if (!dropped_columns.contains(column.name)) { at_least_one_column_rest = true; break; @@ -6103,7 +6103,7 @@ MergeTreeData::CurrentlyMovingPartsTagger::~CurrentlyMovingPartsTagger() for (const auto & moving_part : parts_to_move) { /// Something went completely wrong - if (!data.currently_moving_parts.count(moving_part.part)) + if (!data.currently_moving_parts.contains(moving_part.part)) std::terminate(); data.currently_moving_parts.erase(moving_part.part); } @@ -6159,7 +6159,7 @@ MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::selectPartsForMove() *reason = "part already assigned to background operation."; return false; } - if (currently_moving_parts.count(part)) + if (currently_moving_parts.contains(part)) { *reason = "part is already moving."; return false; @@ -6192,7 +6192,7 @@ MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::checkPartsForMove(co "Move is not possible: " + fullPath(reserved_disk, relative_data_path + part->name) + " already exists", ErrorCodes::DIRECTORY_ALREADY_EXISTS); - if (currently_moving_parts.count(part) || partIsAssignedToBackgroundOperation(part)) + if (currently_moving_parts.contains(part) || partIsAssignedToBackgroundOperation(part)) throw Exception( "Cannot move part '" + part->name + "' because it's participating in background process", ErrorCodes::PART_IS_TEMPORARILY_LOCKED); @@ -6561,7 +6561,7 @@ ReservationPtr MergeTreeData::balancedReservation( for (const auto & part : covered_parts) { - if (currently_submerging_big_parts.count(part)) + if (currently_submerging_big_parts.contains(part)) LOG_WARNING(log, "currently_submerging_big_parts contains duplicates. JBOD might lose balance"); else currently_submerging_big_parts.insert(part); @@ -6632,7 +6632,7 @@ CurrentlySubmergingEmergingTagger::~CurrentlySubmergingEmergingTagger() for (const auto & part : submerging_parts) { - if (!storage.currently_submerging_big_parts.count(part)) + if (!storage.currently_submerging_big_parts.contains(part)) { LOG_ERROR(log, "currently_submerging_big_parts doesn't contain part {} to erase. This is a bug", part->name); assert(false); diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 31d52cfa8ff..23e4e91a74d 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -693,7 +693,7 @@ void MergeTreeDataMergerMutator::splitMutationCommands( /// from disk we just don't read dropped columns for (const auto & column : part->getColumns()) { - if (!mutated_columns.count(column.name)) + if (!mutated_columns.contains(column.name)) for_interpreter.emplace_back( MutationCommand{.type = MutationCommand::Type::READ_COLUMN, .column_name = column.name, .data_type = column.type}); } @@ -771,7 +771,7 @@ MergeTreeDataMergerMutator::getColumnsForNewDataPart( SerializationInfoByName new_serialization_infos; for (const auto & [name, info] : serialization_infos) { - if (removed_columns.count(name)) + if (removed_columns.contains(name)) continue; auto it = renamed_columns_from_to.find(name); @@ -799,13 +799,13 @@ MergeTreeDataMergerMutator::getColumnsForNewDataPart( } else { - if (!source_columns_name_set.count(it->name)) + if (!source_columns_name_set.contains(it->name)) { /// Source part doesn't have column but some other column /// was renamed to it's name. auto renamed_it = renamed_columns_to_from.find(it->name); if (renamed_it != renamed_columns_to_from.end() - && source_columns_name_set.count(renamed_it->second)) + && source_columns_name_set.contains(renamed_it->second)) ++it; else it = storage_columns.erase(it); @@ -813,19 +813,19 @@ MergeTreeDataMergerMutator::getColumnsForNewDataPart( else { /// Check that this column was renamed to some other name - bool was_renamed = renamed_columns_from_to.count(it->name); - bool was_removed = removed_columns.count(it->name); + bool was_renamed = renamed_columns_from_to.contains(it->name); + bool was_removed = removed_columns.contains(it->name); /// If we want to rename this column to some other name, than it /// should it's previous version should be dropped or removed - if (renamed_columns_to_from.count(it->name) && !was_renamed && !was_removed) + if (renamed_columns_to_from.contains(it->name) && !was_renamed && !was_removed) throw Exception( ErrorCodes::LOGICAL_ERROR, "Incorrect mutation commands, trying to rename column {} to {}, but part {} already has column {}", renamed_columns_to_from[it->name], it->name, source_part->name, it->name); /// Column was renamed and no other column renamed to it's name /// or column is dropped. - if (!renamed_columns_to_from.count(it->name) && (was_renamed || was_removed)) + if (!renamed_columns_to_from.contains(it->name) && (was_renamed || was_removed)) it = storage_columns.erase(it); else ++it; diff --git a/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp index 737e89979a6..81f884ef45a 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp @@ -63,7 +63,7 @@ void MergeTreeDataPartChecksums::checkEqual(const MergeTreeDataPartChecksums & r { const String & name = it.first; - if (!files.count(name)) + if (!files.contains(name)) throw Exception("Unexpected file " + name + " in data part", ErrorCodes::UNEXPECTED_FILE_IN_DATA_PART); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp index f4da730b1f0..14713541d3f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp @@ -143,14 +143,14 @@ void MergeTreeDataPartCompact::checkConsistency(bool require_part_metadata) cons if (!checksums.empty()) { /// count.txt should be present even in non custom-partitioned parts - if (!checksums.files.count("count.txt")) + if (!checksums.files.contains("count.txt")) throw Exception("No checksum for count.txt", ErrorCodes::NO_FILE_IN_DATA_PART); if (require_part_metadata) { - if (!checksums.files.count(mrk_file_name)) + if (!checksums.files.contains(mrk_file_name)) throw Exception("No marks file checksum for column in part " + fullPath(volume->getDisk(), path), ErrorCodes::NO_FILE_IN_DATA_PART); - if (!checksums.files.count(DATA_FILE_NAME_WITH_EXTENSION)) + if (!checksums.files.contains(DATA_FILE_NAME_WITH_EXTENSION)) throw Exception("No data file checksum for in part " + fullPath(volume->getDisk(), path), ErrorCodes::NO_FILE_IN_DATA_PART); } } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index b279c1aba6a..c8ddf3c8e2f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -167,10 +167,10 @@ void MergeTreeDataPartWide::checkConsistency(bool require_part_metadata) const String file_name = ISerialization::getFileNameForStream(name_type, substream_path); String mrk_file_name = file_name + index_granularity_info.marks_file_extension; String bin_file_name = file_name + DATA_FILE_EXTENSION; - if (!checksums.files.count(mrk_file_name)) + if (!checksums.files.contains(mrk_file_name)) throw Exception("No " + mrk_file_name + " file checksum for column " + name_type.name + " in part " + fullPath(volume->getDisk(), path), ErrorCodes::NO_FILE_IN_DATA_PART); - if (!checksums.files.count(bin_file_name)) + if (!checksums.files.contains(bin_file_name)) throw Exception("No " + bin_file_name + " file checksum for column " + name_type.name + " in part " + fullPath(volume->getDisk(), path), ErrorCodes::NO_FILE_IN_DATA_PART); }); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 6caff7c683f..24b6ed4d50f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -47,7 +47,7 @@ void MergeTreeDataPartWriterCompact::addStreams(const NameAndTypePair & column, String stream_name = ISerialization::getFileNameForStream(column, substream_path); /// Shared offsets for Nested type. - if (compressed_streams.count(stream_name)) + if (compressed_streams.contains(stream_name)) return; const auto & subtype = substream_path.back().data.type; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 933814d27ba..faffbec9174 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -97,7 +97,7 @@ void MergeTreeDataPartWriterWide::addStreams( String stream_name = ISerialization::getFileNameForStream(column, substream_path); /// Shared offsets for Nested type. - if (column_streams.count(stream_name)) + if (column_streams.contains(stream_name)) return; const auto & subtype = substream_path.back().data.type; @@ -134,7 +134,7 @@ ISerialization::OutputStreamGetter MergeTreeDataPartWriterWide::createStreamGett String stream_name = ISerialization::getFileNameForStream(column, substream_path); /// Don't write offsets more than one time for Nested type. - if (is_offsets && offset_columns.count(stream_name)) + if (is_offsets && offset_columns.contains(stream_name)) return nullptr; return &column_streams.at(stream_name)->compressed; @@ -284,7 +284,7 @@ StreamsWithMarks MergeTreeDataPartWriterWide::getCurrentMarksForColumn( String stream_name = ISerialization::getFileNameForStream(column, substream_path); /// Don't write offsets more than one time for Nested type. - if (is_offsets && offset_columns.count(stream_name)) + if (is_offsets && offset_columns.contains(stream_name)) return; Stream & stream = *column_streams[stream_name]; @@ -323,7 +323,7 @@ void MergeTreeDataPartWriterWide::writeSingleGranule( String stream_name = ISerialization::getFileNameForStream(name_and_type, substream_path); /// Don't write offsets more than one time for Nested type. - if (is_offsets && offset_columns.count(stream_name)) + if (is_offsets && offset_columns.contains(stream_name)) return; column_streams[stream_name]->compressed.nextIfAtEnd(); @@ -363,7 +363,7 @@ void MergeTreeDataPartWriterWide::writeColumn( if (granule.mark_on_start) { - if (last_non_written_marks.count(name)) + if (last_non_written_marks.contains(name)) throw Exception(ErrorCodes::LOGICAL_ERROR, "We have to add new mark for column, but already have non written mark. Current mark {}, total marks {}, offset {}", getCurrentMark(), index_granularity.getMarksCount(), rows_written_in_last_mark); last_non_written_marks[name] = getCurrentMarksForColumn(name_and_type, offset_columns, serialize_settings.path); } diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index a6c78d3107d..b939f7f269f 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -862,7 +862,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd for (const auto & index_name : forced_indices) { - if (!useful_indices_names.count(index_name)) + if (!useful_indices_names.contains(index_name)) { throw Exception( ErrorCodes::INDEX_NOT_USED, diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesisMergedCondition.cpp b/src/Storages/MergeTree/MergeTreeIndexHypothesisMergedCondition.cpp index 84195eb71b2..3a145c0f505 100644 --- a/src/Storages/MergeTree/MergeTreeIndexHypothesisMergedCondition.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesisMergedCondition.cpp @@ -65,7 +65,7 @@ void MergeTreeIndexhypothesisMergedCondition::addIndex(const MergeTreeIndexPtr & assert(!atom.negative); const auto * func = atom.ast->as(); - if (func && relations.count(func->name)) + if (func && relations.contains(func->name)) compare_hypotheses_data.push_back(atom.ast); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index 6a5151d0d2b..541e6257565 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -250,7 +250,7 @@ MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( , index_sample_block(index_sample_block_) { for (const auto & name : index_sample_block.getNames()) - if (!key_columns.count(name)) + if (!key_columns.contains(name)) key_columns.insert(name); const auto & select = query.query->as(); @@ -356,11 +356,11 @@ bool MergeTreeIndexConditionSet::atomFromAST(ASTPtr & node) const return true; if (const auto * identifier = node->as()) - return key_columns.count(identifier->getColumnName()) != 0; + return key_columns.contains(identifier->getColumnName()); if (auto * func = node->as()) { - if (key_columns.count(func->getColumnName())) + if (key_columns.contains(func->getColumnName())) { /// Function is already calculated. node = std::make_shared(func->getColumnName()); @@ -446,7 +446,7 @@ bool MergeTreeIndexConditionSet::checkASTUseless(const ASTPtr & node, bool atomi if (const auto * func = node->as()) { - if (key_columns.count(func->getColumnName())) + if (key_columns.contains(func->getColumnName())) return false; const ASTs & args = func->arguments->children; diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index 7d7975e0bc0..5ad1ebea455 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -169,10 +169,10 @@ void MergeTreeReaderWide::addStreams(const NameAndTypePair & name_and_type, { String stream_name = ISerialization::getFileNameForStream(name_and_type, substream_path); - if (streams.count(stream_name)) + if (streams.contains(stream_name)) return; - bool data_file_exists = data_part->checksums.files.count(stream_name + DATA_FILE_EXTENSION); + bool data_file_exists = data_part->checksums.files.contains(stream_name + DATA_FILE_EXTENSION); /** If data file is missing then we will not try to open it. * It is necessary since it allows to add new column to structure of the table without creating new files for old parts. @@ -202,7 +202,7 @@ static ReadBuffer * getStream( ISerialization::SubstreamsCache & cache) { /// If substream have already been read. - if (cache.count(ISerialization::getSubcolumnNameForStream(substream_path))) + if (cache.contains(ISerialization::getSubcolumnNameForStream(substream_path))) return nullptr; String stream_name = ISerialization::getFileNameForStream(name_and_type, substream_path); @@ -229,7 +229,7 @@ void MergeTreeReaderWide::deserializePrefix( ISerialization::SubstreamsCache & cache) { const auto & name = name_and_type.name; - if (deserialize_binary_bulk_state_map.count(name) == 0) + if (!deserialize_binary_bulk_state_map.contains(name)) { ISerialization::DeserializeBinaryBulkSettings deserialize_settings; deserialize_settings.getter = [&](const ISerialization::SubstreamPath & substream_path) @@ -255,7 +255,7 @@ void MergeTreeReaderWide::prefetch( { String stream_name = ISerialization::getFileNameForStream(name_and_type, substream_path); - if (!prefetched_streams.count(stream_name)) + if (!prefetched_streams.contains(stream_name)) { bool seek_to_mark = !continue_reading; if (ReadBuffer * buf = getStream(false, substream_path, streams, name_and_type, from_mark, seek_to_mark, current_task_last_mark, cache)) diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index 737e0c9d4b7..de99193e4d3 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -314,7 +314,7 @@ UInt64 MergeTreeWhereOptimizer::getIdentifiersColumnSize(const NameSet & identif UInt64 size = 0; for (const auto & identifier : identifiers) - if (column_sizes.count(identifier)) + if (column_sizes.contains(identifier)) size += column_sizes.at(identifier); return size; @@ -345,7 +345,7 @@ bool MergeTreeWhereOptimizer::isPrimaryKeyAtom(const ASTPtr & ast) const { if (const auto * func = ast->as()) { - if (!KeyCondition::atom_map.count(func->name)) + if (!KeyCondition::atom_map.contains(func->name)) return false; const auto & args = func->arguments->children; @@ -367,7 +367,7 @@ bool MergeTreeWhereOptimizer::isPrimaryKeyAtom(const ASTPtr & ast) const bool MergeTreeWhereOptimizer::isSortingKey(const String & column_name) const { - return sorting_key_names.count(column_name); + return sorting_key_names.contains(column_name); } @@ -383,7 +383,7 @@ bool MergeTreeWhereOptimizer::isConstant(const ASTPtr & expr) const bool MergeTreeWhereOptimizer::isSubsetOfTableColumns(const NameSet & identifiers) const { for (const auto & identifier : identifiers) - if (table_columns.count(identifier) == 0) + if (!table_columns.contains(identifier)) return false; return true; @@ -411,8 +411,8 @@ bool MergeTreeWhereOptimizer::cannotBeMoved(const ASTPtr & ptr, bool is_final) c else if (auto opt_name = IdentifierSemantic::getColumnName(ptr)) { /// disallow moving result of ARRAY JOIN to PREWHERE - if (array_joined_names.count(*opt_name) || - array_joined_names.count(Nested::extractTableName(*opt_name)) || + if (array_joined_names.contains(*opt_name) || + array_joined_names.contains(Nested::extractTableName(*opt_name)) || (is_final && !isSortingKey(*opt_name))) return true; } diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index 4fb993bfcc7..f0d241baa2f 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -85,7 +85,7 @@ MergedColumnOnlyOutputStream::fillChecksums( if (disk->exists(file_path)) disk->removeFile(file_path); - if (all_checksums.files.count(removed_file)) + if (all_checksums.files.contains(removed_file)) all_checksums.files.erase(removed_file); } diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index b9bebc665b2..7fb77ac12f5 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -102,7 +102,7 @@ static void splitMutationCommands( /// from disk we just don't read dropped columns for (const auto & column : part->getColumns()) { - if (!mutated_columns.count(column.name)) + if (!mutated_columns.contains(column.name)) for_interpreter.emplace_back( MutationCommand{.type = MutationCommand::Type::READ_COLUMN, .column_name = column.name, .data_type = column.type}); } @@ -159,7 +159,7 @@ static MergeTreeIndices getIndicesForNewDataPart( MergeTreeIndices new_indices; for (const auto & index : all_indices) - if (!removed_indices.count(index.name)) + if (!removed_indices.contains(index.name)) new_indices.push_back(MergeTreeIndexFactory::instance().get(index)); return new_indices; @@ -176,7 +176,7 @@ static std::vector getProjectionsForNewDataPart( std::vector new_projections; for (const auto & projection : all_projections) - if (!removed_projections.count(projection.name)) + if (!removed_projections.contains(projection.name)) new_projections.push_back(&projection); return new_projections; @@ -207,7 +207,7 @@ static std::set getIndicesToRecalculate( source_part->checksums.has(INDEX_FILE_PREFIX + index.name + ".idx") || source_part->checksums.has(INDEX_FILE_PREFIX + index.name + ".idx2"); // If we ask to materialize and it already exists - if (!has_index && materialized_indices.count(index.name)) + if (!has_index && materialized_indices.contains(index.name)) { if (indices_to_recalc.insert(index_factory.get(index)).second) { @@ -223,7 +223,7 @@ static std::set getIndicesToRecalculate( const auto & index_cols = index.expression->getRequiredColumns(); for (const auto & col : index_cols) { - if (updated_columns.count(col)) + if (updated_columns.contains(col)) { mutate = true; break; @@ -270,7 +270,7 @@ std::set getProjectionsToRecalculate( for (const auto & projection : metadata_snapshot->getProjections()) { // If we ask to materialize and it doesn't exist - if (!source_part->checksums.has(projection.name + ".proj") && materialized_projections.count(projection.name)) + if (!source_part->checksums.has(projection.name + ".proj") && materialized_projections.contains(projection.name)) { projections_to_recalc.insert(&projection); } @@ -281,7 +281,7 @@ std::set getProjectionsToRecalculate( const auto & projection_cols = projection.required_columns; for (const auto & col : projection_cols) { - if (updated_columns.count(col)) + if (updated_columns.contains(col)) { mutate = true; break; @@ -1074,7 +1074,7 @@ private: /// Create hardlinks for unchanged files for (auto it = ctx->disk->iterateDirectory(ctx->source_part->getFullRelativePath()); it->isValid(); it->next()) { - if (ctx->files_to_skip.count(it->name())) + if (ctx->files_to_skip.contains(it->name())) continue; String destination = ctx->new_part_tmp_path; @@ -1164,11 +1164,11 @@ private: for (const auto & [rename_from, rename_to] : ctx->files_to_rename) { - if (rename_to.empty() && ctx->new_data_part->checksums.files.count(rename_from)) + if (rename_to.empty() && ctx->new_data_part->checksums.files.contains(rename_from)) { ctx->new_data_part->checksums.files.erase(rename_from); } - else if (ctx->new_data_part->checksums.files.count(rename_from)) + else if (ctx->new_data_part->checksums.files.contains(rename_from)) { ctx->new_data_part->checksums.files[rename_to] = ctx->new_data_part->checksums.files[rename_from]; ctx->new_data_part->checksums.files.erase(rename_from); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp index db4fe34e702..6e57fe55878 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp @@ -16,7 +16,7 @@ void ReplicatedMergeTreeAltersSequence::addMutationForAlter(int alter_version, s { /// Metadata alter can be added before, or /// maybe already finished if we startup after metadata alter was finished. - if (!queue_state.count(alter_version)) + if (!queue_state.contains(alter_version)) queue_state.emplace(alter_version, AlterState{.metadata_finished=true, .data_finished=false}); else queue_state[alter_version].data_finished = false; @@ -27,7 +27,7 @@ void ReplicatedMergeTreeAltersSequence::addMetadataAlter( { /// Data alter (mutation) always added before. See ReplicatedMergeTreeQueue::pullLogsToQueue. /// So mutation already added to this sequence or doesn't exist. - if (!queue_state.count(alter_version)) + if (!queue_state.contains(alter_version)) queue_state.emplace(alter_version, AlterState{.metadata_finished=false, .data_finished=true}); else queue_state[alter_version].metadata_finished = false; @@ -57,7 +57,7 @@ void ReplicatedMergeTreeAltersSequence::finishDataAlter(int alter_version, std:: if (alter_version >= queue_state.begin()->first) { /// All alter versions bigger than head must present in queue. - assert(queue_state.count(alter_version)); + assert(queue_state.contains(alter_version)); if (queue_state[alter_version].metadata_finished) queue_state.erase(alter_version); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 3b6c727cd02..709e6f77bd7 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -404,7 +404,7 @@ void ReplicatedMergeTreeCleanupThread::getBlocksSortedByTime(zkutil::ZooKeeper & NameSet blocks_set(blocks.begin(), blocks.end()); for (auto it = cached_block_stats.begin(); it != cached_block_stats.end();) { - if (!blocks_set.count(it->first)) + if (!blocks_set.contains(it->first)) it = cached_block_stats.erase(it); else ++it; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 4d6bb9c7def..7733562ca7d 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -59,7 +59,7 @@ void ReplicatedMergeTreePartCheckThread::enqueuePart(const String & name, time_t { std::lock_guard lock(parts_mutex); - if (parts_set.count(name)) + if (parts_set.contains(name)) return; parts_queue.emplace_back(name, time(nullptr) + delay_to_check_seconds); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 35168fc3f49..b4c037f8a99 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -775,7 +775,7 @@ void ReplicatedMergeTreeQueue::updateMutations(zkutil::ZooKeeperPtr zookeeper, C for (auto it = mutations_by_znode.begin(); it != mutations_by_znode.end();) { const ReplicatedMergeTreeMutationEntry & entry = *it->second.entry; - if (!entries_in_zk_set.count(entry.znode_name)) + if (!entries_in_zk_set.contains(entry.znode_name)) { if (!it->second.is_done) { @@ -806,7 +806,7 @@ void ReplicatedMergeTreeQueue::updateMutations(zkutil::ZooKeeperPtr zookeeper, C for (const String & znode : entries_in_zk_set) { - if (!mutations_by_znode.count(znode)) + if (!mutations_by_znode.contains(znode)) entries_to_load.push_back(znode); } } @@ -971,7 +971,7 @@ ReplicatedMergeTreeQueue::StringSet ReplicatedMergeTreeQueue::moveSiblingPartsFo t == LogEntry::GET_PART || t == LogEntry::ATTACH_PART || t == LogEntry::MUTATE_PART) - && parts_for_merge.count((*it0)->new_part_name)) + && parts_for_merge.contains((*it0)->new_part_name)) { queue.splice(queue.end(), queue, it0, it); } @@ -1186,7 +1186,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( size_t sum_parts_size_in_bytes = 0; for (const auto & name : entry.source_parts) { - if (future_parts.count(name)) + if (future_parts.contains(name)) { out_postpone_reason = fmt::format( "Not executing log entry {} of type {} for part {} " @@ -1959,7 +1959,7 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( for (auto & block : block_infos) { Coordination::GetResponse resp = block.contents_future.get(); - if (resp.error == Coordination::Error::ZOK && lock_holder_paths.count(resp.data)) + if (resp.error == Coordination::Error::ZOK && lock_holder_paths.contains(resp.data)) committing_blocks[block.partition].insert(block.number); } } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index dc52660f1f6..8663b7b6999 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -266,7 +266,7 @@ void ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart() { ReplicatedMergeTreeQuorumEntry quorum_entry(quorum_str); - if (!quorum_entry.replicas.count(storage.replica_name) + if (!quorum_entry.replicas.contains(storage.replica_name) && storage.getActiveContainingPart(quorum_entry.part_name)) { LOG_WARNING(log, "We have part {} but we is not in quorum. Updating quorum. This shouldn't happen often.", quorum_entry.part_name); @@ -283,7 +283,7 @@ void ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart() if (zookeeper->tryGet(fs::path(parallel_quorum_parts_path) / part_name, quorum_str)) { ReplicatedMergeTreeQuorumEntry quorum_entry(quorum_str); - if (!quorum_entry.replicas.count(storage.replica_name) + if (!quorum_entry.replicas.contains(storage.replica_name) && storage.getActiveContainingPart(part_name)) { LOG_WARNING(log, "We have part {} but we is not in quorum. Updating quorum. This shouldn't happen often.", part_name); diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 522dffe502d..48e578ea9db 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -175,7 +175,7 @@ IMergeTreeDataPart::Checksums checkDataPart( auto projection_checksum_it = projection_checksums_data.files.find(projection_file_name); /// Skip files that we already calculated. Also skip metadata files that are not checksummed. - if (projection_checksum_it == projection_checksums_data.files.end() && !files_without_checksums.count(projection_file_name)) + if (projection_checksum_it == projection_checksums_data.files.end() && !files_without_checksums.contains(projection_file_name)) { auto projection_txt_checksum_it = projection_checksum_files_txt.find(file_name); if (projection_txt_checksum_it == projection_checksum_files_txt.end() @@ -251,7 +251,7 @@ IMergeTreeDataPart::Checksums checkDataPart( auto checksum_it = checksums_data.files.find(file_name); /// Skip files that we already calculated. Also skip metadata files that are not checksummed. - if (checksum_it == checksums_data.files.end() && !files_without_checksums.count(file_name)) + if (checksum_it == checksums_data.files.end() && !files_without_checksums.contains(file_name)) { auto txt_checksum_it = checksum_files_txt.find(file_name); if (txt_checksum_it == checksum_files_txt.end() || txt_checksum_it->second.uncompressed_size == 0) diff --git a/src/Storages/ProjectionsDescription.cpp b/src/Storages/ProjectionsDescription.cpp index 69d7c5f8ed6..6d5efc91bf4 100644 --- a/src/Storages/ProjectionsDescription.cpp +++ b/src/Storages/ProjectionsDescription.cpp @@ -328,7 +328,7 @@ ProjectionsDescription ProjectionsDescription::parse(const String & str, const C bool ProjectionsDescription::has(const String & projection_name) const { - return map.count(projection_name) > 0; + return map.contains(projection_name); } const ProjectionDescription & ProjectionsDescription::get(const String & projection_name) const diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.cpp b/src/Storages/ReadFinalForExternalReplicaStorage.cpp index a03ccb5cf43..97e0a8b9bdf 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.cpp +++ b/src/Storages/ReadFinalForExternalReplicaStorage.cpp @@ -43,7 +43,7 @@ Pipe readFinalFromNestedStorage( String filter_column_name; Names require_columns_name = column_names; ASTPtr expressions = std::make_shared(); - if (column_names_set.empty() || !column_names_set.count(sign_column.name)) + if (column_names_set.empty() || !column_names_set.contains(sign_column.name)) { require_columns_name.emplace_back(sign_column.name); diff --git a/src/Storages/ReadInOrderOptimizer.cpp b/src/Storages/ReadInOrderOptimizer.cpp index 9e46595e0fc..3ff4baa0b11 100644 --- a/src/Storages/ReadInOrderOptimizer.cpp +++ b/src/Storages/ReadInOrderOptimizer.cpp @@ -193,7 +193,7 @@ InputOrderInfoPtr ReadInOrderOptimizer::getInputOrderImpl( for (size_t i = 0; i < descr_prefix_size; ++i) { - if (forbidden_columns.count(description[i].column_name)) + if (forbidden_columns.contains(description[i].column_name)) break; int current_direction = matchSortDescriptionAndKey( diff --git a/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp index 15a761a5b84..66dcc938aef 100644 --- a/src/Storages/StorageInMemoryMetadata.cpp +++ b/src/Storages/StorageInMemoryMetadata.cpp @@ -239,7 +239,7 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet auto required_columns = expression->getRequiredColumns(); for (const auto & dependency : required_columns) { - if (updated_columns.count(dependency)) + if (updated_columns.contains(dependency)) { to_set.insert(required_columns.begin(), required_columns.end()); return true; @@ -605,7 +605,7 @@ void StorageInMemoryMetadata::check(const Block & block, bool need_all) const for (const auto & column : block) { - if (names_in_block.count(column.name)) + if (names_in_block.contains(column.name)) throw Exception("Duplicate column " + column.name + " in block", ErrorCodes::DUPLICATE_COLUMN); names_in_block.insert(column.name); @@ -634,7 +634,7 @@ void StorageInMemoryMetadata::check(const Block & block, bool need_all) const { for (const auto & available_column : available_columns) { - if (!names_in_block.count(available_column.name)) + if (!names_in_block.contains(available_column.name)) throw Exception("Expected column " + available_column.name, ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK); } } diff --git a/src/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp index 11116780734..bb14ee4cdff 100644 --- a/src/Storages/StorageLog.cpp +++ b/src/Storages/StorageLog.cpp @@ -205,7 +205,7 @@ void LogSource::readData(const NameAndTypePair & name_and_type, ColumnPtr & colu { return [&, stream_for_prefix] (const ISerialization::SubstreamPath & path) -> ReadBuffer * //-V1047 { - if (cache.count(ISerialization::getSubcolumnNameForStream(path))) + if (cache.contains(ISerialization::getSubcolumnNameForStream(path))) return nullptr; String data_file_name = ISerialization::getFileNameForStream(name_and_type, path); @@ -223,7 +223,7 @@ void LogSource::readData(const NameAndTypePair & name_and_type, ColumnPtr & colu }; }; - if (deserialize_states.count(name) == 0) + if (!deserialize_states.contains(name)) { settings.getter = create_stream_getter(true); serialization->deserializeBinaryBulkStatePrefix(settings, deserialize_states[name]); @@ -459,7 +459,7 @@ void LogSink::writeData(const NameAndTypePair & name_and_type, const IColumn & c settings.getter = createStreamGetter(name_and_type); - if (serialize_states.count(name) == 0) + if (!serialize_states.contains(name)) serialization->serializeBinaryBulkStatePrefix(settings, serialize_states[name]); if (storage.use_marks_file) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index a2f25a9db7e..f17f66492dd 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -636,7 +636,7 @@ DatabaseTablesIteratorPtr StorageMerge::getDatabaseIterator(const String & datab if (source_databases_and_tables) { if (auto it = source_databases_and_tables->find(database_name); it != source_databases_and_tables->end()) - return it->second.count(table_name_); + return it->second.contains(table_name_); else return false; } diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index e322d8785fa..31f3f0b4036 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -413,7 +413,7 @@ CurrentlyMergingPartsTagger::CurrentlyMergingPartsTagger( for (const auto & part : future_part->parts) { - if (storage.currently_merging_mutating_parts.count(part)) + if (storage.currently_merging_mutating_parts.contains(part)) throw Exception("Tagging already tagged part " + part->name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR); } storage.currently_merging_mutating_parts.insert(future_part->parts.begin(), future_part->parts.end()); @@ -425,7 +425,7 @@ CurrentlyMergingPartsTagger::~CurrentlyMergingPartsTagger() for (const auto & part : future_part->parts) { - if (!storage.currently_merging_mutating_parts.count(part)) + if (!storage.currently_merging_mutating_parts.contains(part)) std::terminate(); storage.currently_merging_mutating_parts.erase(part); } @@ -806,8 +806,8 @@ std::shared_ptr StorageMergeTree::selectPartsToMerge( /// This predicate is checked for the first part of each range. /// (left = nullptr, right = "first part of partition") if (!left) - return !currently_merging_mutating_parts.count(right); - return !currently_merging_mutating_parts.count(left) && !currently_merging_mutating_parts.count(right) + return !currently_merging_mutating_parts.contains(right); + return !currently_merging_mutating_parts.contains(left) && !currently_merging_mutating_parts.contains(right) && getCurrentMutationVersion(left, lock) == getCurrentMutationVersion(right, lock) && partsContainSameProjections(left, right); }; @@ -949,7 +949,7 @@ bool StorageMergeTree::merge( bool StorageMergeTree::partIsAssignedToBackgroundOperation(const DataPartPtr & part) const { std::lock_guard background_processing_lock(currently_processing_in_background_mutex); - return currently_merging_mutating_parts.count(part); + return currently_merging_mutating_parts.contains(part); } std::shared_ptr StorageMergeTree::selectPartsToMutate( @@ -981,7 +981,7 @@ std::shared_ptr StorageMergeTree::selectPartsToMutate( auto mutations_end_it = current_mutations_by_version.end(); for (const auto & part : getDataPartsVectorForInternalUsage()) { - if (currently_merging_mutating_parts.count(part)) + if (currently_merging_mutating_parts.contains(part)) continue; auto mutations_begin_it = current_mutations_by_version.upper_bound(getUpdatedDataVersion(part, currently_processing_in_background_mutex_lock)); @@ -1443,7 +1443,7 @@ MergeTreeDataPartPtr StorageMergeTree::outdatePart(MergeTreeTransaction * txn, c /// Part will be "removed" by merge or mutation, it's OK in case of some /// background cleanup processes like removing of empty parts. - if (currently_merging_mutating_parts.count(part)) + if (currently_merging_mutating_parts.contains(part)) return nullptr; removePartsFromWorkingSet(txn, {part}, true); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index db1de14f6a6..5e006cab12f 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -532,7 +532,7 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( } /// This replica inactive, don't check anything - if (!inactive_replicas.empty() && inactive_replicas.count(replica)) + if (!inactive_replicas.empty() && inactive_replicas.contains(replica)) break; /// It maybe already removed from zk, but local in-memory mutations @@ -547,7 +547,7 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( ErrorCodes::UNFINISHED); /// Replica inactive, don't check mutation status - if (!inactive_replicas.empty() && inactive_replicas.count(replica)) + if (!inactive_replicas.empty() && inactive_replicas.contains(replica)) continue; /// At least we have our current mutation @@ -1233,7 +1233,7 @@ void StorageReplicatedMergeTree::checkParts(bool skip_sanity_checks) /// Collect unexpected parts for (const auto & part : parts) - if (!expected_parts.count(part->name)) + if (!expected_parts.contains(part->name)) unexpected_parts.insert(part); /// this parts we will place to detached with ignored_ prefix /// Which parts should be taken from other replicas. @@ -2571,7 +2571,7 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo /// but before we copied its active parts set. In this case we will GET_PART entry in our queue /// and later will pull the original GET_PART from replication log. /// It should not cause any issues, but it does not allow to get rid of duplicated entries and add an assertion. - if (created_gets.count(part_name)) + if (created_gets.contains(part_name)) { /// NOTE It would be better to copy log entry instead of creating GET_PART /// if there are GET_PART and log entry of other type with the same new_part_name. @@ -3756,7 +3756,7 @@ void StorageReplicatedMergeTree::cleanLastPartNode(const String & partition_id) parts_with_quorum.fromString(old_added_parts); /// Delete information about particular partition. - if (!parts_with_quorum.added_parts.count(partition_id)) + if (!parts_with_quorum.added_parts.contains(partition_id)) { /// There is no information about interested part. break; @@ -4639,7 +4639,7 @@ PartitionBlockNumbersHolder StorageReplicatedMergeTree::allocateBlockNumbersInAf PartitionBlockNumbersHolder::BlockNumbersType block_numbers; for (const auto & lock : lock_holder.getLocks()) { - if (mutation_affected_partition_ids.empty() || mutation_affected_partition_ids.count(lock.partition_id)) + if (mutation_affected_partition_ids.empty() || mutation_affected_partition_ids.contains(lock.partition_id)) block_numbers[lock.partition_id] = lock.number; } @@ -5168,7 +5168,7 @@ bool StorageReplicatedMergeTree::existsNodeCached(const std::string & path) cons { { std::lock_guard lock(existing_nodes_cache_mutex); - if (existing_nodes_cache.count(path)) + if (existing_nodes_cache.contains(path)) return true; } @@ -6094,7 +6094,7 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() /// Delete normal parts on two sets for (auto & part : parts_to_delete_completely) { - if (part_names_to_retry_deletion.count(part->name) == 0) + if (!part_names_to_retry_deletion.contains(part->name)) parts_to_remove_from_filesystem.emplace_back(part); else parts_to_retry_deletion.emplace_back(part); diff --git a/src/Storages/StorageSnapshot.cpp b/src/Storages/StorageSnapshot.cpp index 8a82c5387c5..a4b64c798f3 100644 --- a/src/Storages/StorageSnapshot.cpp +++ b/src/Storages/StorageSnapshot.cpp @@ -40,7 +40,7 @@ NamesAndTypesList StorageSnapshot::getColumns(const GetColumnsOptions & options) column_names.insert(column.name); for (const auto & [name, type] : virtual_columns) - if (!column_names.count(name)) + if (!column_names.contains(name)) all_columns.emplace_back(name, type); } } @@ -148,7 +148,7 @@ void StorageSnapshot::check(const Names & column_names) const { bool has_column = columns.hasColumnOrSubcolumn(GetColumnsOptions::AllPhysical, name) || object_columns.hasColumnOrSubcolumn(GetColumnsOptions::AllPhysical, name) - || virtual_columns.count(name); + || virtual_columns.contains(name); if (!has_column) { diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index 082b46f5a7e..ca821221611 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -323,7 +323,7 @@ Pipe StorageSystemColumns::read( std::vector columns_mask(sample_block.columns()); for (size_t i = 0, size = columns_mask.size(); i < size; ++i) { - if (names_set.count(sample_block.getByPosition(i).name)) + if (names_set.contains(sample_block.getByPosition(i).name)) { columns_mask[i] = 1; header.insert(sample_block.getByPosition(i)); diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.cpp b/src/Storages/System/StorageSystemDataSkippingIndices.cpp index 42b214bf101..828e77bd83a 100644 --- a/src/Storages/System/StorageSystemDataSkippingIndices.cpp +++ b/src/Storages/System/StorageSystemDataSkippingIndices.cpp @@ -182,7 +182,7 @@ Pipe StorageSystemDataSkippingIndices::read( std::vector columns_mask(sample_block.columns()); for (size_t i = 0, size = columns_mask.size(); i < size; ++i) { - if (names_set.count(sample_block.getByPosition(i).name)) + if (names_set.contains(sample_block.getByPosition(i).name)) { columns_mask[i] = 1; header.insert(sample_block.getByPosition(i)); diff --git a/src/Storages/System/StorageSystemGraphite.cpp b/src/Storages/System/StorageSystemGraphite.cpp index 8711162385f..f5208d67a26 100644 --- a/src/Storages/System/StorageSystemGraphite.cpp +++ b/src/Storages/System/StorageSystemGraphite.cpp @@ -52,7 +52,7 @@ static StorageSystemGraphite::Configs getConfigs(ContextPtr context) const String & config_name = table_data->merging_params.graphite_params.config_name; auto table_id = table_data->getStorageID(); - if (!graphite_configs.count(config_name)) + if (!graphite_configs.contains(config_name)) { StorageSystemGraphite::Config new_config = { diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index 1462cc58a42..84b14b2c8be 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -256,7 +256,7 @@ Pipe StorageSystemPartsBase::read( std::vector columns_mask(sample.columns()); for (size_t i = 0; i < sample.columns(); ++i) { - if (names_set.count(sample.getByPosition(i).name)) + if (names_set.contains(sample.getByPosition(i).name)) { columns_mask[i] = 1; header.insert(sample.getByPosition(i)); diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 98a07d0f4c3..8a7b1f2143d 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -575,7 +575,7 @@ Pipe StorageSystemTables::read( std::vector columns_mask(sample_block.columns()); for (size_t i = 0, size = columns_mask.size(); i < size; ++i) { - if (names_set.count(sample_block.getByPosition(i).name)) + if (names_set.contains(sample_block.getByPosition(i).name)) { columns_mask[i] = 1; res_block.insert(sample_block.getByPosition(i)); diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index ccf924f2827..b745da13484 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -253,7 +253,7 @@ TTLDescription TTLDescription::getTTLFromAST( /// The separate step, because not all primary key columns are ordinary columns. for (size_t i = ttl_element->group_by_key.size(); i < primary_key_expressions.size(); ++i) { - if (!aggregation_columns_set.count(pk_columns[i])) + if (!aggregation_columns_set.contains(pk_columns[i])) { ASTPtr expr = makeASTFunction("any", primary_key_expressions[i]->clone()); aggregations.emplace_back(pk_columns[i], std::move(expr)); @@ -264,7 +264,7 @@ TTLDescription TTLDescription::getTTLFromAST( /// Wrap with 'any' aggregate function other columns, which was not set explicitly. for (const auto & column : columns.getOrdinary()) { - if (!aggregation_columns_set.count(column.name) && !used_primary_key_columns_set.count(column.name)) + if (!aggregation_columns_set.contains(column.name) && !used_primary_key_columns_set.contains(column.name)) { ASTPtr expr = makeASTFunction("any", std::make_shared(column.name)); aggregations.emplace_back(column.name, std::move(expr)); diff --git a/src/TableFunctions/TableFunctionFactory.cpp b/src/TableFunctions/TableFunctionFactory.cpp index 15e61354f6d..8c2245d4abe 100644 --- a/src/TableFunctions/TableFunctionFactory.cpp +++ b/src/TableFunctions/TableFunctionFactory.cpp @@ -80,7 +80,7 @@ TableFunctionPtr TableFunctionFactory::tryGet( bool TableFunctionFactory::isTableFunctionName(const std::string & name) const { - return table_functions.count(name); + return table_functions.contains(name); } TableFunctionFactory & TableFunctionFactory::instance() diff --git a/utils/db-generator/query_db_generator.cpp b/utils/db-generator/query_db_generator.cpp index 6455bc045d6..cdc2c9e2f04 100644 --- a/utils/db-generator/query_db_generator.cpp +++ b/utils/db-generator/query_db_generator.cpp @@ -419,7 +419,7 @@ public: bool columnExists(const std::string & column_name) const { - return columns.count(column_name); // || columns_maybe.count(column_name); + return columns.contains(column_name); // || columns_maybe.contains(column_name); } void addColumn(const std::string & column_name) @@ -440,7 +440,7 @@ public: for (const auto & column : columns) { std::cout << column << "\n"; - if (column_description.count(column)) + if (column_description.contains(column)) column_description[column].print(); std::cout << "\n"; } @@ -519,7 +519,7 @@ public: bool tableExists(const std::string & table_name) const { - return tables.count(table_name); + return tables.contains(table_name); } void addColumn(std::string full_column) @@ -528,12 +528,12 @@ public: std::tie(table, column) = get_table_a_column(full_column); if (!table.empty()) { - if (tables.count(table)) + if (tables.contains(table)) { tables[table].addColumn(column); return; } - if (aliases.count(table)) + if (aliases.contains(table)) { tables[aliases[table]].addColumn(column); return; @@ -545,7 +545,7 @@ public: void addTable(std::string table_name) { - if (tables.count(table_name)) + if (tables.contains(table_name)) return; tables[table_name] = Table(table_name); @@ -556,7 +556,7 @@ public: void addDescription(const Column & description) { std::string table = description.name.first; - if (tables.count(table)) + if (tables.contains(table)) tables[table].setDescription(description); } @@ -566,10 +566,10 @@ public: std::tie(table, column) = get_table_a_column(full_column); if (!table.empty()) { - if (tables.count(table)) + if (tables.contains(table)) return std::make_pair(table, column); - if (aliases.count(table)) + if (aliases.contains(table)) { table = aliases.find(table)->second; return std::make_pair(table, column); @@ -632,7 +632,7 @@ FuncRet arrayJoinFunc(DB::ASTPtr ch, std::map & columns) auto c = Column(indent); c.type = Type::all; c.is_array = true; - if (columns.count(indent)) + if (columns.contains(indent)) columns[indent].merge(c); else columns[indent] = c; @@ -693,7 +693,7 @@ FuncRet inFunc(DB::ASTPtr ch, std::map & columns) { FuncHandler f; auto arg_func_name = std::dynamic_pointer_cast(arg)->name; - if (handlers.count(arg_func_name)) + if (handlers.contains(arg_func_name)) f = handlers[arg_func_name]; else f = handlers[""]; @@ -711,7 +711,7 @@ FuncRet inFunc(DB::ASTPtr ch, std::map & columns) c.type = type_value; c.values.insert(values.begin(), values.end()); c.generateValues(1); - if (columns.count(indent)) + if (columns.contains(indent)) columns[indent].merge(c); else columns[indent] = c; @@ -756,7 +756,7 @@ FuncRet arrayFunc(DB::ASTPtr ch, std::map & columns) { auto c = Column(indent); c.type = type_value; - if (columns.count(indent)) + if (columns.contains(indent)) columns[indent].merge(c); else columns[indent] = c; @@ -797,7 +797,7 @@ FuncRet arithmeticFunc(DB::ASTPtr ch, std::map & columns) { FuncHandler f; auto arg_func_name = std::dynamic_pointer_cast(arg)->name; - if (handlers.count(arg_func_name)) + if (handlers.contains(arg_func_name)) f = handlers[arg_func_name]; else f = handlers[""]; @@ -814,7 +814,7 @@ FuncRet arithmeticFunc(DB::ASTPtr ch, std::map & columns) { auto c = Column(indent); c.type = type_value; - if (columns.count(indent)) + if (columns.contains(indent)) columns[indent].merge(c); else columns[indent] = c; @@ -874,7 +874,7 @@ FuncRet likeFunc(DB::ASTPtr ch, std::map & columns) auto c = Column(indent); c.type = type_value; c.values.insert(values.begin(), values.end()); - if (columns.count(indent)) + if (columns.contains(indent)) columns[indent].merge(c); else columns[indent] = c; @@ -895,7 +895,7 @@ FuncRet simpleFunc(DB::ASTPtr ch, std::map & columns) ColumnType type_value = Type::all; bool is_array = false; bool no_indent = true; - if (func_to_param_type.count(boost::algorithm::to_lower_copy(x->name))) + if (func_to_param_type.contains(boost::algorithm::to_lower_copy(x->name))) { type_value &= func_to_param_type[boost::algorithm::to_lower_copy(x->name)]; is_array = func_to_param_type[boost::algorithm::to_lower_copy(x->name)] & Type::a; @@ -922,7 +922,7 @@ FuncRet simpleFunc(DB::ASTPtr ch, std::map & columns) { FuncHandler f; auto arg_func_name = std::dynamic_pointer_cast(arg)->name; - if (handlers.count(arg_func_name)) + if (handlers.contains(arg_func_name)) f = handlers[arg_func_name]; else f = handlers[""]; @@ -969,7 +969,7 @@ FuncRet simpleFunc(DB::ASTPtr ch, std::map & columns) } } } - if (func_args_same_types.count(boost::algorithm::to_lower_copy(x->name))) + if (func_args_same_types.contains(boost::algorithm::to_lower_copy(x->name))) type_value &= type; } for (const auto & indent : indents) @@ -977,19 +977,19 @@ FuncRet simpleFunc(DB::ASTPtr ch, std::map & columns) auto c = Column(indent); c.type = type_value; c.is_array = is_array; - if (func_args_same_types.count( + if (func_args_same_types.contains( boost::algorithm::to_lower_copy(x->name))) c.values = values; for (const auto & ind : indents) if (ind != indent) c.equals.insert(std::make_pair("", ind)); - if (columns.count(indent)) + if (columns.contains(indent)) columns[indent].merge(c); else columns[indent] = c; } - if (func_to_return_type.count(boost::algorithm::to_lower_copy(x->name))) + if (func_to_return_type.contains(boost::algorithm::to_lower_copy(x->name))) { if (no_indent) { @@ -1001,7 +1001,7 @@ FuncRet simpleFunc(DB::ASTPtr ch, std::map & columns) } return func_to_return_type[boost::algorithm::to_lower_copy(x->name)]; } - else if (func_to_param_type.count( + else if (func_to_param_type.contains( boost::algorithm::to_lower_copy(x->name))) { if (no_indent) @@ -1027,7 +1027,7 @@ void processFunc(DB::ASTPtr ch, std::map & columns) { FuncHandler f; auto arg_func_name = x->name; - if (handlers.count(arg_func_name)) + if (handlers.contains(arg_func_name)) f = handlers[arg_func_name]; else f = handlers[""]; @@ -1084,7 +1084,7 @@ connectedEqualityFind( { std::set result; for (const auto & column : now.equals) - if (!visited.count(column)) + if (!visited.contains(column)) { visited.insert(column); auto sub_r = connectedEqualityFind( @@ -1124,7 +1124,7 @@ unificateColumns( } std::set visited; for (auto & column : result) - if (!visited.count(column.second.name)) + if (!visited.contains(column.second.name)) { auto equal = connectedEqualityFind( result[column.second.name.first + "." + column.second.name.second], @@ -1242,9 +1242,9 @@ void parseSelectQuery(DB::ASTPtr ast, TableList & all_tables) processFunc(ast, columns_descriptions); for (const auto & column : columns) - if (!column_aliases.count(column)) + if (!column_aliases.contains(column)) { - if (!columns_descriptions.count(column)) + if (!columns_descriptions.contains(column)) columns_descriptions[column] = Column(column); all_tables.addColumn(column); } diff --git a/utils/keeper-bench/Generator.cpp b/utils/keeper-bench/Generator.cpp index 77185813a2a..d3a8323b81f 100644 --- a/utils/keeper-bench/Generator.cpp +++ b/utils/keeper-bench/Generator.cpp @@ -123,7 +123,7 @@ ZooKeeperRequestPtr CreateRequestGenerator::generate() plength = *path_length; auto path_candidate = generateRandomPath(path_prefix, plength); - while (paths_created.count(path_candidate)) + while (paths_created.contains(path_candidate)) path_candidate = generateRandomPath(path_prefix, plength); paths_created.insert(path_candidate);