From af4a5a5f3ab0777e93e45dab07dcd3e480392abc Mon Sep 17 00:00:00 2001 From: Sergei Trifonov Date: Wed, 13 Jul 2022 19:44:29 +0200 Subject: [PATCH] fix conflicts --- src/Disks/IO/ReadBufferFromRemoteFSGather.cpp | 1 - src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp | 9 --------- src/IO/ReadBufferFromAzureBlobStorage.cpp | 1 - src/IO/ReadBufferFromAzureBlobStorage.h | 1 - src/Storages/Hive/HiveFile.cpp | 8 -------- 5 files changed, 20 deletions(-) diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index 8f70ab876ff..14614871185 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -90,7 +90,6 @@ SeekableReadBufferPtr ReadBufferFromAzureBlobStorageGather::createImplementation settings, max_single_read_retries, max_single_download_retries, - settings, /* use_external_buffer */true, read_until_position); } diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index da5051f8f47..55c466d45f6 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -193,14 +193,8 @@ std::unique_ptr S3ObjectStorage::writeObject( /// NOLIN throw Exception(ErrorCodes::BAD_ARGUMENTS, "S3 doesn't support append to files"); bool cache_on_write = cache -<<<<<<< HEAD - && fs::path(path).extension() != ".tmp" - && disk_write_settings.enable_filesystem_cache_on_write_operations - && FileCacheFactory::instance().getSettings(getCacheBasePath()).cache_on_write_operations; -======= && write_settings.enable_filesystem_cache_on_write_operations && FileCacheFactory::instance().getSettings(getCacheBasePath()).cache_on_write_operations; ->>>>>>> master auto settings_ptr = s3_settings.get(); auto s3_buffer = std::make_unique( @@ -211,10 +205,7 @@ std::unique_ptr S3ObjectStorage::writeObject( /// NOLIN attributes, buf_size, threadPoolCallbackRunner(getThreadPoolWriter()), -<<<<<<< HEAD disk_write_settings, -======= ->>>>>>> master cache_on_write ? cache : nullptr); diff --git a/src/IO/ReadBufferFromAzureBlobStorage.cpp b/src/IO/ReadBufferFromAzureBlobStorage.cpp index f624de56dc2..32e70305bea 100644 --- a/src/IO/ReadBufferFromAzureBlobStorage.cpp +++ b/src/IO/ReadBufferFromAzureBlobStorage.cpp @@ -27,7 +27,6 @@ ReadBufferFromAzureBlobStorage::ReadBufferFromAzureBlobStorage( const ReadSettings & read_settings_, size_t max_single_read_retries_, size_t max_single_download_retries_, - const ReadSettings & read_settings_, bool use_external_buffer_, size_t read_until_position_) : ReadBufferFromFileBase(read_settings_.remote_fs_buffer_size, nullptr, 0) diff --git a/src/IO/ReadBufferFromAzureBlobStorage.h b/src/IO/ReadBufferFromAzureBlobStorage.h index 136f1573fed..5396fcf9719 100644 --- a/src/IO/ReadBufferFromAzureBlobStorage.h +++ b/src/IO/ReadBufferFromAzureBlobStorage.h @@ -23,7 +23,6 @@ public: const ReadSettings & read_settings_, size_t max_single_read_retries_, size_t max_single_download_retries_, - const ReadSettings & read_settings_, bool use_external_buffer_ = false, size_t read_until_position_ = 0); diff --git a/src/Storages/Hive/HiveFile.cpp b/src/Storages/Hive/HiveFile.cpp index 23abc074da4..09c3aff4455 100644 --- a/src/Storages/Hive/HiveFile.cpp +++ b/src/Storages/Hive/HiveFile.cpp @@ -267,15 +267,7 @@ bool HiveParquetFile::useSplitMinMaxIndex() const void HiveParquetFile::prepareReader() { -<<<<<<< HEAD - in = std::make_unique( - namenode_url, - path, - getContext()->getGlobalContext()->getConfigRef(), - ReadSettings{.throttler = getContext()->getRemoteReadThrottler()}); -======= in = std::make_unique(namenode_url, path, getContext()->getGlobalContext()->getConfigRef(), getContext()->getReadSettings()); ->>>>>>> master auto format_settings = getFormatSettings(getContext()); std::atomic is_stopped{0}; THROW_ARROW_NOT_OK(parquet::arrow::OpenFile(asArrowFile(*in, format_settings, is_stopped, "Parquet", PARQUET_MAGIC_BYTES), arrow::default_memory_pool(), &reader));