mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Useless changes
This commit is contained in:
parent
224e1940ae
commit
c198bbcafb
@ -93,8 +93,6 @@ namespace
|
||||
break;
|
||||
}
|
||||
|
||||
size_t id_endpos = line.find('\t');
|
||||
String id_as_string = line.substr(0, id_endpos);
|
||||
UUID id = parse<UUID>(line);
|
||||
line.clear();
|
||||
|
||||
|
@ -241,11 +241,11 @@ namespace
|
||||
|
||||
enum Level
|
||||
{
|
||||
GLOBAL_LEVEL,
|
||||
DATABASE_LEVEL,
|
||||
GLOBAL_LEVEL = 0,
|
||||
DATABASE_LEVEL = 1,
|
||||
GLOBAL_WITH_PARAMETER = DATABASE_LEVEL,
|
||||
TABLE_LEVEL,
|
||||
COLUMN_LEVEL,
|
||||
TABLE_LEVEL = 2,
|
||||
COLUMN_LEVEL = 3,
|
||||
};
|
||||
|
||||
AccessFlags getAllGrantableFlags(Level level)
|
||||
|
@ -118,13 +118,16 @@ void AuthenticationData::setPassword(const String & password_)
|
||||
switch (type)
|
||||
{
|
||||
case AuthenticationType::PLAINTEXT_PASSWORD:
|
||||
return setPasswordHashBinary(Util::stringToDigest(password_));
|
||||
setPasswordHashBinary(Util::stringToDigest(password_));
|
||||
return;
|
||||
|
||||
case AuthenticationType::SHA256_PASSWORD:
|
||||
return setPasswordHashBinary(Util::encodeSHA256(password_));
|
||||
setPasswordHashBinary(Util::encodeSHA256(password_));
|
||||
return;
|
||||
|
||||
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
|
||||
return setPasswordHashBinary(Util::encodeDoubleSHA1(password_));
|
||||
setPasswordHashBinary(Util::encodeDoubleSHA1(password_));
|
||||
return;
|
||||
|
||||
case AuthenticationType::BCRYPT_PASSWORD:
|
||||
case AuthenticationType::NO_PASSWORD:
|
||||
@ -146,7 +149,7 @@ void AuthenticationData::setPasswordBcrypt(const String & password_, int workfac
|
||||
if (type != AuthenticationType::BCRYPT_PASSWORD)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot specify bcrypt password for authentication type {}", toString(type));
|
||||
|
||||
return setPasswordHashBinary(Util::encodeBcrypt(password_, workfactor_));
|
||||
setPasswordHashBinary(Util::encodeBcrypt(password_, workfactor_));
|
||||
}
|
||||
|
||||
String AuthenticationData::getPassword() const
|
||||
|
@ -71,7 +71,7 @@ namespace
|
||||
SCOPE_EXIT(
|
||||
{
|
||||
if (!succeeded)
|
||||
std::filesystem::remove(tmp_file_path);
|
||||
(void)std::filesystem::remove(tmp_file_path);
|
||||
});
|
||||
|
||||
/// Write the file.
|
||||
@ -302,7 +302,7 @@ void DiskAccessStorage::writeLists()
|
||||
}
|
||||
|
||||
/// The list files was successfully written, we don't need the 'need_rebuild_lists.mark' file any longer.
|
||||
std::filesystem::remove(getNeedRebuildListsMarkFilePath(directory_path));
|
||||
(void)std::filesystem::remove(getNeedRebuildListsMarkFilePath(directory_path));
|
||||
types_of_lists_to_write.clear();
|
||||
}
|
||||
|
||||
@ -419,7 +419,7 @@ void DiskAccessStorage::removeAllExceptInMemory(const boost::container::flat_set
|
||||
const auto & id = it->first;
|
||||
++it; /// We must go to the next element in the map `entries_by_id` here because otherwise removeNoLock() can invalidate our iterator.
|
||||
if (!ids_to_keep.contains(id))
|
||||
removeNoLock(id, /* throw_if_not_exists */ true, /* write_on_disk= */ false);
|
||||
(void)removeNoLock(id, /* throw_if_not_exists */ true, /* write_on_disk= */ false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -549,7 +549,7 @@ bool DiskAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr & ne
|
||||
if (name_collision && (id_by_name != id))
|
||||
{
|
||||
assert(replace_if_exists);
|
||||
removeNoLock(id_by_name, /* throw_if_not_exists= */ false, write_on_disk);
|
||||
removeNoLock(id_by_name, /* throw_if_not_exists= */ false, write_on_disk); // NOLINT
|
||||
}
|
||||
|
||||
if (id_collision)
|
||||
|
@ -161,9 +161,9 @@ void GrantedRoles::makeUnion(const GrantedRoles & other)
|
||||
|
||||
void GrantedRoles::makeIntersection(const GrantedRoles & other)
|
||||
{
|
||||
boost::range::remove_erase_if(roles, [&other](const UUID & id) { return other.roles.find(id) == other.roles.end(); });
|
||||
boost::range::remove_erase_if(roles, [&other](const UUID & id) { return other.roles.find(id) == other.roles.end(); }); // NOLINT
|
||||
|
||||
boost::range::remove_erase_if(roles_with_admin_option, [&other](const UUID & id)
|
||||
boost::range::remove_erase_if(roles_with_admin_option, [&other](const UUID & id) // NOLINT
|
||||
{
|
||||
return other.roles_with_admin_option.find(id) == other.roles_with_admin_option.end();
|
||||
});
|
||||
|
@ -583,7 +583,7 @@ void IAccessStorage::backup(BackupEntriesCollector & backup_entries_collector, c
|
||||
throwBackupNotAllowed();
|
||||
|
||||
auto entities = readAllWithIDs(type);
|
||||
boost::range::remove_erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
|
||||
boost::range::remove_erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); }); // NOLINT
|
||||
|
||||
if (entities.empty())
|
||||
return;
|
||||
|
@ -76,7 +76,7 @@ void LDAPAccessStorage::setConfiguration(const Poco::Util::AbstractConfiguration
|
||||
config.keys(prefix, all_keys);
|
||||
for (const auto & key : all_keys)
|
||||
{
|
||||
if (key == "role_mapping" || key.find("role_mapping[") == 0)
|
||||
if (key == "role_mapping" || key.starts_with("role_mapping["))
|
||||
parseLDAPRoleSearchParams(role_search_params_cfg.emplace_back(), config, prefix_str + key);
|
||||
}
|
||||
}
|
||||
@ -94,7 +94,7 @@ void LDAPAccessStorage::setConfiguration(const Poco::Util::AbstractConfiguration
|
||||
role_change_subscription = access_control.subscribeForChanges<Role>(
|
||||
[this] (const UUID & id, const AccessEntityPtr & entity)
|
||||
{
|
||||
return this->processRoleChange(id, entity);
|
||||
this->processRoleChange(id, entity);
|
||||
}
|
||||
);
|
||||
}
|
||||
@ -200,7 +200,7 @@ void LDAPAccessStorage::applyRoleChangeNoLock(bool grant, const UUID & role_id,
|
||||
void LDAPAccessStorage::assignRolesNoLock(User & user, const LDAPClient::SearchResultsList & external_roles) const
|
||||
{
|
||||
const auto external_roles_hash = boost::hash<LDAPClient::SearchResultsList>{}(external_roles);
|
||||
return assignRolesNoLock(user, external_roles, external_roles_hash);
|
||||
assignRolesNoLock(user, external_roles, external_roles_hash);
|
||||
}
|
||||
|
||||
|
||||
|
@ -196,7 +196,7 @@ void LDAPClient::handleError(int result_code, String text)
|
||||
}
|
||||
});
|
||||
|
||||
ldap_get_option(handle, LDAP_OPT_DIAGNOSTIC_MESSAGE, &raw_message);
|
||||
ldap_get_option(handle, LDAP_OPT_DIAGNOSTIC_MESSAGE, static_cast<void*>(&raw_message));
|
||||
|
||||
if (raw_message && *raw_message != '\0')
|
||||
{
|
||||
|
@ -106,7 +106,7 @@ bool MemoryAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr &
|
||||
if (name_collision && (id_by_name != id))
|
||||
{
|
||||
assert(replace_if_exists);
|
||||
removeNoLock(id_by_name, /* throw_if_not_exists= */ true);
|
||||
removeNoLock(id_by_name, /* throw_if_not_exists= */ true); // NOLINT
|
||||
}
|
||||
|
||||
if (id_collision)
|
||||
@ -128,7 +128,7 @@ bool MemoryAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr &
|
||||
}
|
||||
return true;
|
||||
}
|
||||
removeNoLock(id, /* throw_if_not_exists= */ true);
|
||||
removeNoLock(id, /* throw_if_not_exists= */ true); // NOLINT
|
||||
}
|
||||
|
||||
/// Do insertion.
|
||||
@ -238,7 +238,7 @@ void MemoryAccessStorage::removeAllExceptNoLock(const boost::container::flat_set
|
||||
const auto & id = it->first;
|
||||
++it; /// We must go to the next element in the map `entries_by_id` here because otherwise removeNoLock() can invalidate our iterator.
|
||||
if (!ids_to_keep.contains(id))
|
||||
removeNoLock(id, /* throw_if_not_exists */ true);
|
||||
removeNoLock(id, /* throw_if_not_exists */ true); // NOLINT
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -238,7 +238,7 @@ void MultipleAccessStorage::moveAccessEntities(const std::vector<UUID> & ids, co
|
||||
|
||||
try
|
||||
{
|
||||
source_storage->remove(ids);
|
||||
source_storage->remove(ids); // NOLINT
|
||||
need_rollback = true;
|
||||
destination_storage->insert(to_move, ids);
|
||||
}
|
||||
|
@ -616,7 +616,7 @@ void ReplicatedAccessStorage::setEntityNoLock(const UUID & id, const AccessEntit
|
||||
void ReplicatedAccessStorage::removeEntityNoLock(const UUID & id)
|
||||
{
|
||||
LOG_DEBUG(getLogger(), "Removing entity with id {}", toString(id));
|
||||
memory_storage.remove(id, /* throw_if_not_exists= */ false);
|
||||
memory_storage.remove(id, /* throw_if_not_exists= */ false); // NOLINT
|
||||
}
|
||||
|
||||
|
||||
@ -654,7 +654,7 @@ void ReplicatedAccessStorage::backup(BackupEntriesCollector & backup_entries_col
|
||||
throwBackupNotAllowed();
|
||||
|
||||
auto entities = readAllWithIDs(type);
|
||||
boost::range::remove_erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
|
||||
boost::range::remove_erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); }); // NOLINT
|
||||
|
||||
if (entities.empty())
|
||||
return;
|
||||
|
@ -190,11 +190,11 @@ void SettingsConstraints::check(const Settings & current_settings, const Setting
|
||||
|
||||
void SettingsConstraints::check(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const
|
||||
{
|
||||
boost::range::remove_erase_if(
|
||||
boost::range::remove_erase_if( // NOLINT
|
||||
changes,
|
||||
[&](SettingChange & change) -> bool
|
||||
{
|
||||
return !checkImpl(current_settings, const_cast<SettingChange &>(change), THROW_ON_VIOLATION, source);
|
||||
return !checkImpl(current_settings, change, THROW_ON_VIOLATION, source);
|
||||
});
|
||||
}
|
||||
|
||||
@ -211,7 +211,7 @@ void SettingsConstraints::check(const MergeTreeSettings & current_settings, cons
|
||||
|
||||
void SettingsConstraints::clamp(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const
|
||||
{
|
||||
boost::range::remove_erase_if(
|
||||
boost::range::remove_erase_if( // NOLINT
|
||||
changes,
|
||||
[&](SettingChange & change) -> bool
|
||||
{
|
||||
|
@ -105,7 +105,7 @@ std::unique_ptr<WriteBuffer> BackupWriterFile::writeFile(const String & file_nam
|
||||
|
||||
void BackupWriterFile::removeFile(const String & file_name)
|
||||
{
|
||||
fs::remove(root_path / file_name);
|
||||
(void)fs::remove(root_path / file_name);
|
||||
if (fs::is_directory(root_path) && fs::is_empty(root_path))
|
||||
fs::remove(root_path);
|
||||
}
|
||||
@ -113,9 +113,9 @@ void BackupWriterFile::removeFile(const String & file_name)
|
||||
void BackupWriterFile::removeFiles(const Strings & file_names)
|
||||
{
|
||||
for (const auto & file_name : file_names)
|
||||
fs::remove(root_path / file_name);
|
||||
(void)fs::remove(root_path / file_name);
|
||||
if (fs::is_directory(root_path) && fs::is_empty(root_path))
|
||||
fs::remove(root_path);
|
||||
(void)fs::remove(root_path);
|
||||
}
|
||||
|
||||
void BackupWriterFile::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||
|
@ -259,23 +259,23 @@ namespace
|
||||
enum class BackupsWorker::ThreadPoolId : uint8_t
|
||||
{
|
||||
/// "BACKUP ON CLUSTER ASYNC" waits in background while "BACKUP ASYNC" is finished on the nodes of the cluster, then finalizes the backup.
|
||||
BACKUP_ASYNC_ON_CLUSTER,
|
||||
BACKUP_ASYNC_ON_CLUSTER = 0,
|
||||
|
||||
/// "BACKUP ASYNC" waits in background while all file infos are built and then it copies the backup's files.
|
||||
BACKUP_ASYNC,
|
||||
BACKUP_ASYNC = 1,
|
||||
|
||||
/// Making a list of files to copy and copying of those files is always sequential, so those operations can share one thread pool.
|
||||
BACKUP_MAKE_FILES_LIST,
|
||||
BACKUP_MAKE_FILES_LIST = 2,
|
||||
BACKUP_COPY_FILES = BACKUP_MAKE_FILES_LIST,
|
||||
|
||||
/// "RESTORE ON CLUSTER ASYNC" waits in background while "BACKUP ASYNC" is finished on the nodes of the cluster, then finalizes the backup.
|
||||
RESTORE_ASYNC_ON_CLUSTER,
|
||||
RESTORE_ASYNC_ON_CLUSTER = 3,
|
||||
|
||||
/// "RESTORE ASYNC" waits in background while the data of all tables are restored.
|
||||
RESTORE_ASYNC,
|
||||
RESTORE_ASYNC = 4,
|
||||
|
||||
/// Restores from backups.
|
||||
RESTORE,
|
||||
RESTORE = 5,
|
||||
};
|
||||
|
||||
|
||||
|
@ -81,8 +81,6 @@ void RestoreCoordinationLocal::generateUUIDForTable(ASTCreateQuery & create_quer
|
||||
|
||||
auto new_uuids = create_query.generateRandomUUID(/* always_generate_new_uuid= */ true);
|
||||
|
||||
String new_query_str = serializeAST(create_query);
|
||||
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (find_in_map())
|
||||
|
@ -798,7 +798,7 @@ void RestorerFromBackup::applyCustomStoragePolicy(ASTPtr query_ptr)
|
||||
{
|
||||
if (restore_settings.storage_policy.value().empty())
|
||||
/// it has been set to "" deliberately, so the source storage policy is erased
|
||||
storage->settings->changes.removeSetting(setting_name);
|
||||
storage->settings->changes.removeSetting(setting_name); // NOLINT
|
||||
else
|
||||
/// it has been set to a custom value, so it either overwrites the existing value or is added as a new one
|
||||
storage->settings->changes.setSetting(setting_name, restore_settings.storage_policy.value());
|
||||
@ -838,7 +838,7 @@ void RestorerFromBackup::removeUnresolvedDependencies()
|
||||
return true; /// Exclude this dependency.
|
||||
};
|
||||
|
||||
tables_dependencies.removeTablesIf(need_exclude_dependency);
|
||||
tables_dependencies.removeTablesIf(need_exclude_dependency); // NOLINT
|
||||
|
||||
if (tables_dependencies.getNumberOfTables() != table_infos.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Number of tables to be restored is not as expected. It's a bug");
|
||||
|
@ -182,7 +182,9 @@ void listKeys(
|
||||
if (enumerate_paths.empty())
|
||||
enumerate_paths.push("");
|
||||
|
||||
const bool do_finish = depth >= 0 && --depth < 0;
|
||||
const bool do_finish = depth == 0;
|
||||
if (depth >= 0)
|
||||
--depth;
|
||||
|
||||
auto initial_paths = std::move(enumerate_paths);
|
||||
enumerate_paths = {};
|
||||
|
@ -253,7 +253,7 @@ public:
|
||||
"Cannot remove collection `{}`, because it doesn't exist",
|
||||
collection_name);
|
||||
}
|
||||
fs::remove(collection_path);
|
||||
(void)fs::remove(collection_path);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -291,7 +291,7 @@ private:
|
||||
files_to_remove.push_back(current_path);
|
||||
}
|
||||
for (const auto & file : files_to_remove)
|
||||
fs::remove(file);
|
||||
(void)fs::remove(file);
|
||||
}
|
||||
|
||||
static ASTCreateNamedCollectionQuery readCreateQueryFromMetadata(
|
||||
@ -402,7 +402,7 @@ void loadIfNot()
|
||||
if (is_loaded_from_sql && is_loaded_from_config)
|
||||
return;
|
||||
auto lock = lockNamedCollectionsTransaction();
|
||||
return loadIfNotUnlocked(lock);
|
||||
loadIfNotUnlocked(lock);
|
||||
}
|
||||
|
||||
void removeFromSQL(const ASTDropNamedCollectionQuery & query, ContextPtr context)
|
||||
|
@ -100,7 +100,7 @@ void NamedCollectionFactory::add(
|
||||
MutableNamedCollectionPtr collection)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return addUnlocked(collection_name, collection, lock);
|
||||
addUnlocked(collection_name, collection, lock);
|
||||
}
|
||||
|
||||
void NamedCollectionFactory::add(NamedCollectionsMap collections)
|
||||
@ -141,7 +141,7 @@ void NamedCollectionFactory::remove(const std::string & collection_name)
|
||||
void NamedCollectionFactory::removeIfExists(const std::string & collection_name)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
removeIfExistsUnlocked(collection_name, lock);
|
||||
removeIfExistsUnlocked(collection_name, lock); // NOLINT
|
||||
}
|
||||
|
||||
bool NamedCollectionFactory::removeIfExistsUnlocked(
|
||||
|
@ -513,49 +513,49 @@ namespace MySQLReplication
|
||||
{
|
||||
UInt8 val = 0;
|
||||
payload.readStrict(reinterpret_cast<char *>(&val), 1);
|
||||
row.push_back(Field{UInt8{val}});
|
||||
row.push_back(Field{val});
|
||||
break;
|
||||
}
|
||||
case MYSQL_TYPE_SHORT:
|
||||
{
|
||||
UInt16 val = 0;
|
||||
payload.readStrict(reinterpret_cast<char *>(&val), 2);
|
||||
row.push_back(Field{UInt16{val}});
|
||||
row.push_back(Field{val});
|
||||
break;
|
||||
}
|
||||
case MYSQL_TYPE_INT24:
|
||||
{
|
||||
Int32 val = 0;
|
||||
payload.readStrict(reinterpret_cast<char *>(&val), 3);
|
||||
row.push_back(Field{Int32{val}});
|
||||
row.push_back(Field{val});
|
||||
break;
|
||||
}
|
||||
case MYSQL_TYPE_LONG:
|
||||
{
|
||||
UInt32 val = 0;
|
||||
payload.readStrict(reinterpret_cast<char *>(&val), 4);
|
||||
row.push_back(Field{UInt32{val}});
|
||||
row.push_back(Field{val});
|
||||
break;
|
||||
}
|
||||
case MYSQL_TYPE_LONGLONG:
|
||||
{
|
||||
UInt64 val = 0;
|
||||
payload.readStrict(reinterpret_cast<char *>(&val), 8);
|
||||
row.push_back(Field{UInt64{val}});
|
||||
row.push_back(Field{val});
|
||||
break;
|
||||
}
|
||||
case MYSQL_TYPE_FLOAT:
|
||||
{
|
||||
Float32 val = 0;
|
||||
payload.readStrict(reinterpret_cast<char *>(&val), 4);
|
||||
row.push_back(Field{Float32{val}});
|
||||
row.push_back(Field{val});
|
||||
break;
|
||||
}
|
||||
case MYSQL_TYPE_DOUBLE:
|
||||
{
|
||||
Float64 val = 0;
|
||||
payload.readStrict(reinterpret_cast<char *>(&val), 8);
|
||||
row.push_back(Field{Float64{val}});
|
||||
row.push_back(Field{val});
|
||||
break;
|
||||
}
|
||||
case MYSQL_TYPE_TIMESTAMP:
|
||||
|
@ -119,7 +119,7 @@ namespace MySQLReplication
|
||||
{
|
||||
BINLOG_CHECKSUM_ALG_OFF = 0,
|
||||
BINLOG_CHECKSUM_ALG_CRC32 = 1,
|
||||
BINLOG_CHECKSUM_ALG_ENUM_END,
|
||||
BINLOG_CHECKSUM_ALG_ENUM_END = 3,
|
||||
BINLOG_CHECKSUM_ALG_UNDEF = 255
|
||||
};
|
||||
|
||||
|
@ -99,8 +99,7 @@ void insertPostgreSQLValue(
|
||||
ReadBufferFromString in(value);
|
||||
time_t time = 0;
|
||||
readDateTimeText(time, in, assert_cast<const DataTypeDateTime *>(data_type.get())->getTimeZone());
|
||||
if (time < 0)
|
||||
time = 0;
|
||||
time = std::max<time_t>(time, 0);
|
||||
assert_cast<ColumnUInt32 &>(column).insertValue(static_cast<UInt32>(time));
|
||||
break;
|
||||
}
|
||||
@ -217,8 +216,7 @@ void preparePostgreSQLArrayInfo(
|
||||
ReadBufferFromString in(field);
|
||||
time_t time = 0;
|
||||
readDateTimeText(time, in, assert_cast<const DataTypeDateTime *>(nested.get())->getTimeZone());
|
||||
if (time < 0)
|
||||
time = 0;
|
||||
time = std::max<time_t>(time, 0);
|
||||
return time;
|
||||
};
|
||||
else if (which.isDateTime64())
|
||||
@ -227,8 +225,7 @@ void preparePostgreSQLArrayInfo(
|
||||
ReadBufferFromString in(field);
|
||||
DateTime64 time = 0;
|
||||
readDateTime64Text(time, 6, in, assert_cast<const DataTypeDateTime64 *>(nested.get())->getTimeZone());
|
||||
if (time < 0)
|
||||
time = 0;
|
||||
time = std::max<time_t>(time, 0);
|
||||
return time;
|
||||
};
|
||||
else if (which.isDecimal32())
|
||||
|
@ -241,7 +241,7 @@ void DatabasePostgreSQL::attachTable(ContextPtr /* context_ */, const String & t
|
||||
|
||||
fs::path table_marked_as_removed = fs::path(getMetadataPath()) / (escapeForFileName(table_name) + suffix);
|
||||
if (fs::exists(table_marked_as_removed))
|
||||
fs::remove(table_marked_as_removed);
|
||||
(void)fs::remove(table_marked_as_removed);
|
||||
}
|
||||
|
||||
|
||||
@ -298,7 +298,7 @@ void DatabasePostgreSQL::dropTable(ContextPtr, const String & table_name, bool /
|
||||
|
||||
void DatabasePostgreSQL::drop(ContextPtr /*context*/)
|
||||
{
|
||||
fs::remove_all(getMetadataPath());
|
||||
(void)fs::remove_all(getMetadataPath());
|
||||
}
|
||||
|
||||
|
||||
@ -368,7 +368,7 @@ void DatabasePostgreSQL::removeOutdatedTables()
|
||||
iter = detached_or_dropped.erase(iter);
|
||||
fs::path table_marked_as_removed = fs::path(getMetadataPath()) / (escapeForFileName(table_name) + suffix);
|
||||
if (fs::exists(table_marked_as_removed))
|
||||
fs::remove(table_marked_as_removed);
|
||||
(void)fs::remove(table_marked_as_removed);
|
||||
}
|
||||
else
|
||||
++iter;
|
||||
|
@ -4,9 +4,9 @@
|
||||
#include <Disks/ObjectStorages/IObjectStorage.h>
|
||||
#include <Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.h>
|
||||
#include <Disks/ObjectStorages/IMetadataStorage.h>
|
||||
#include <Disks/ObjectStorages/DiskObjectStorageTransaction.h>
|
||||
#include <Common/re2.h>
|
||||
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric DiskSpaceReservedForMerge;
|
||||
|
@ -158,7 +158,7 @@ void MetadataStorageFromPlainObjectStorageTransaction::createDirectory(const std
|
||||
|
||||
void MetadataStorageFromPlainObjectStorageTransaction::createDirectoryRecursive(const std::string & path)
|
||||
{
|
||||
return createDirectory(path);
|
||||
createDirectory(path);
|
||||
}
|
||||
|
||||
void MetadataStorageFromPlainObjectStorageTransaction::moveDirectory(const std::string & path_from, const std::string & path_to)
|
||||
|
@ -557,7 +557,6 @@ std::unique_ptr<IObjectStorage> S3ObjectStorage::cloneObjectStorage(
|
||||
{
|
||||
auto new_s3_settings = getSettings(config, config_prefix, context);
|
||||
auto new_client = getClient(config, config_prefix, context, *new_s3_settings);
|
||||
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
|
||||
|
||||
auto new_uri{uri};
|
||||
new_uri.bucket = new_namespace;
|
||||
|
@ -206,7 +206,7 @@ void RemoteCacheController::close()
|
||||
// delete directory
|
||||
LOG_TRACE(log, "Removing the local cache. local path: {}", local_path.string());
|
||||
if (fs::exists(local_path))
|
||||
fs::remove_all(local_path);
|
||||
(void)fs::remove_all(local_path);
|
||||
}
|
||||
|
||||
std::unique_ptr<ReadBufferFromFileBase> RemoteCacheController::allocFile()
|
||||
|
@ -416,7 +416,7 @@ void StorageFileLog::drop()
|
||||
{
|
||||
try
|
||||
{
|
||||
std::filesystem::remove_all(metadata_base_path);
|
||||
(void)std::filesystem::remove_all(metadata_base_path);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -1009,7 +1009,7 @@ bool StorageFileLog::updateFileInfos()
|
||||
file_infos.meta_by_inode.erase(meta);
|
||||
|
||||
if (std::filesystem::exists(getFullMetaPath(file_name)))
|
||||
std::filesystem::remove(getFullMetaPath(file_name));
|
||||
(void)std::filesystem::remove(getFullMetaPath(file_name));
|
||||
file_infos.context_by_name.erase(it);
|
||||
}
|
||||
else
|
||||
|
@ -113,7 +113,6 @@ HDFSBuilderWrapper createHDFSBuilder(const String & uri_str, const Poco::Util::A
|
||||
const Poco::URI uri(uri_str);
|
||||
const auto & host = uri.getHost();
|
||||
auto port = uri.getPort();
|
||||
const String path = "//";
|
||||
if (host.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Illegal HDFS URI: {}", uri.toString());
|
||||
|
||||
|
@ -921,9 +921,7 @@ void ReadFromHive::initializePipeline(QueryPipelineBuilder & pipeline, const Bui
|
||||
}
|
||||
|
||||
sources_info->hive_files = std::move(*hive_files);
|
||||
|
||||
if (num_streams > sources_info->hive_files.size())
|
||||
num_streams = sources_info->hive_files.size();
|
||||
num_streams = std::min(num_streams, sources_info->hive_files.size());
|
||||
|
||||
Pipes pipes;
|
||||
for (size_t i = 0; i < num_streams; ++i)
|
||||
|
@ -70,10 +70,10 @@ private:
|
||||
|
||||
enum class PruneLevel : uint8_t
|
||||
{
|
||||
None, /// Do not prune
|
||||
Partition,
|
||||
File,
|
||||
Split,
|
||||
None = 0, /// Do not prune
|
||||
Partition = 1,
|
||||
File = 2,
|
||||
Split = 3,
|
||||
Max = Split,
|
||||
};
|
||||
|
||||
|
@ -60,7 +60,7 @@ void KafkaProducer::produce(const String & message, size_t rows_in_message, cons
|
||||
{
|
||||
const auto & timestamp_column = assert_cast<const ColumnUInt32 &>(*columns[timestamp_column_index.value()]);
|
||||
const auto timestamp = std::chrono::seconds{timestamp_column.getElement(last_row)};
|
||||
builder.timestamp(timestamp);
|
||||
(void)builder.timestamp(timestamp);
|
||||
}
|
||||
|
||||
while (!shutdown_called)
|
||||
|
@ -100,7 +100,7 @@ EmbeddedRocksDBBulkSink::~EmbeddedRocksDBBulkSink()
|
||||
try
|
||||
{
|
||||
if (fs::exists(insert_directory_queue))
|
||||
fs::remove_all(insert_directory_queue);
|
||||
(void)fs::remove_all(insert_directory_queue);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -210,7 +210,7 @@ void EmbeddedRocksDBBulkSink::consume(Chunk chunk_)
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "RocksDB write error: {}", status.ToString());
|
||||
|
||||
if (fs::exists(sst_file_path))
|
||||
fs::remove(sst_file_path);
|
||||
(void)fs::remove(sst_file_path);
|
||||
}
|
||||
|
||||
void EmbeddedRocksDBBulkSink::onFinish()
|
||||
|
@ -211,7 +211,7 @@ void StorageEmbeddedRocksDB::truncate(const ASTPtr &, const StorageMetadataPtr &
|
||||
rocksdb_ptr->Close();
|
||||
rocksdb_ptr = nullptr;
|
||||
|
||||
fs::remove_all(rocksdb_dir);
|
||||
(void)fs::remove_all(rocksdb_dir);
|
||||
fs::create_directories(rocksdb_dir);
|
||||
initDB();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user