diff --git a/src/Disks/HDFS/DiskHDFS.cpp b/src/Disks/HDFS/DiskHDFS.cpp index a738264e3fb..0648fd9f08c 100644 --- a/src/Disks/HDFS/DiskHDFS.cpp +++ b/src/Disks/HDFS/DiskHDFS.cpp @@ -24,7 +24,7 @@ public: using Chunk = std::vector; using Chunks = std::list; - HDFSPathKeeper(size_t chunk_limit_) : RemoteFSPathKeeper(chunk_limit_) {} + explicit HDFSPathKeeper(size_t chunk_limit_) : RemoteFSPathKeeper(chunk_limit_) {} void addPath(const String & path) override { @@ -137,19 +137,20 @@ RemoteFSPathKeeperPtr DiskHDFS::createFSPathKeeper() const void DiskHDFS::removeFromRemoteFS(RemoteFSPathKeeperPtr fs_paths_keeper) { auto * hdfs_paths_keeper = dynamic_cast(fs_paths_keeper.get()); - hdfs_paths_keeper->removePaths([&](std::vector && chunk) - { - for (const auto & hdfs_object_path : chunk) + if (hdfs_paths_keeper) + hdfs_paths_keeper->removePaths([&](std::vector && chunk) { - const String & hdfs_path = hdfs_object_path; - const size_t begin_of_path = hdfs_path.find('/', hdfs_path.find("//") + 2); + for (const auto & hdfs_object_path : chunk) + { + const String & hdfs_path = hdfs_object_path; + const size_t begin_of_path = hdfs_path.find('/', hdfs_path.find("//") + 2); - /// Add path from root to file name - int res = hdfsDelete(hdfs_fs.get(), hdfs_path.substr(begin_of_path).c_str(), 0); - if (res == -1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "HDFSDelete failed with path: " + hdfs_path); - } - }); + /// Add path from root to file name + int res = hdfsDelete(hdfs_fs.get(), hdfs_path.substr(begin_of_path).c_str(), 0); + if (res == -1) + throw Exception(ErrorCodes::LOGICAL_ERROR, "HDFSDelete failed with path: " + hdfs_path); + } + }); } diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index 58f7e0554a2..a3f5fe89870 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -47,7 +47,7 @@ public: using Chunk = Aws::Vector; using Chunks = std::list; - S3PathKeeper(size_t chunk_limit_) : RemoteFSPathKeeper(chunk_limit_) {} + explicit S3PathKeeper(size_t chunk_limit_) : RemoteFSPathKeeper(chunk_limit_) {} void addPath(const String & path) override { @@ -178,18 +178,19 @@ void DiskS3::removeFromRemoteFS(RemoteFSPathKeeperPtr fs_paths_keeper) auto settings = current_settings.get(); auto * s3_paths_keeper = dynamic_cast(fs_paths_keeper.get()); - s3_paths_keeper->removePaths([&](S3PathKeeper::Chunk && chunk) - { - LOG_DEBUG(log, "Remove AWS keys {}", S3PathKeeper::getChunkKeys(chunk)); - Aws::S3::Model::Delete delkeys; - delkeys.SetObjects(chunk); - /// TODO: Make operation idempotent. Do not throw exception if key is already deleted. - Aws::S3::Model::DeleteObjectsRequest request; - request.SetBucket(bucket); - request.SetDelete(delkeys); - auto outcome = settings->client->DeleteObjects(request); - throwIfError(outcome); - }); + if (s3_paths_keeper) + s3_paths_keeper->removePaths([&](S3PathKeeper::Chunk && chunk) + { + LOG_DEBUG(log, "Remove AWS keys {}", S3PathKeeper::getChunkKeys(chunk)); + Aws::S3::Model::Delete delkeys; + delkeys.SetObjects(chunk); + /// TODO: Make operation idempotent. Do not throw exception if key is already deleted. + Aws::S3::Model::DeleteObjectsRequest request; + request.SetBucket(bucket); + request.SetDelete(delkeys); + auto outcome = settings->client->DeleteObjects(request); + throwIfError(outcome); + }); } void DiskS3::moveFile(const String & from_path, const String & to_path)