fix conflicts

This commit is contained in:
Sergei Trifonov 2022-07-13 19:44:29 +02:00
parent f854507729
commit af4a5a5f3a
5 changed files with 0 additions and 20 deletions

View File

@ -90,7 +90,6 @@ SeekableReadBufferPtr ReadBufferFromAzureBlobStorageGather::createImplementation
settings,
max_single_read_retries,
max_single_download_retries,
settings,
/* use_external_buffer */true,
read_until_position);
}

View File

@ -193,14 +193,8 @@ std::unique_ptr<WriteBufferFromFileBase> S3ObjectStorage::writeObject( /// NOLIN
throw Exception(ErrorCodes::BAD_ARGUMENTS, "S3 doesn't support append to files");
bool cache_on_write = cache
<<<<<<< HEAD
&& fs::path(path).extension() != ".tmp"
&& disk_write_settings.enable_filesystem_cache_on_write_operations
&& FileCacheFactory::instance().getSettings(getCacheBasePath()).cache_on_write_operations;
=======
&& write_settings.enable_filesystem_cache_on_write_operations
&& FileCacheFactory::instance().getSettings(getCacheBasePath()).cache_on_write_operations;
>>>>>>> master
auto settings_ptr = s3_settings.get();
auto s3_buffer = std::make_unique<WriteBufferFromS3>(
@ -211,10 +205,7 @@ std::unique_ptr<WriteBufferFromFileBase> S3ObjectStorage::writeObject( /// NOLIN
attributes,
buf_size,
threadPoolCallbackRunner(getThreadPoolWriter()),
<<<<<<< HEAD
disk_write_settings,
=======
>>>>>>> master
cache_on_write ? cache : nullptr);

View File

@ -27,7 +27,6 @@ ReadBufferFromAzureBlobStorage::ReadBufferFromAzureBlobStorage(
const ReadSettings & read_settings_,
size_t max_single_read_retries_,
size_t max_single_download_retries_,
const ReadSettings & read_settings_,
bool use_external_buffer_,
size_t read_until_position_)
: ReadBufferFromFileBase(read_settings_.remote_fs_buffer_size, nullptr, 0)

View File

@ -23,7 +23,6 @@ public:
const ReadSettings & read_settings_,
size_t max_single_read_retries_,
size_t max_single_download_retries_,
const ReadSettings & read_settings_,
bool use_external_buffer_ = false,
size_t read_until_position_ = 0);

View File

@ -267,15 +267,7 @@ bool HiveParquetFile::useSplitMinMaxIndex() const
void HiveParquetFile::prepareReader()
{
<<<<<<< HEAD
in = std::make_unique<ReadBufferFromHDFS>(
namenode_url,
path,
getContext()->getGlobalContext()->getConfigRef(),
ReadSettings{.throttler = getContext()->getRemoteReadThrottler()});
=======
in = std::make_unique<ReadBufferFromHDFS>(namenode_url, path, getContext()->getGlobalContext()->getConfigRef(), getContext()->getReadSettings());
>>>>>>> master
auto format_settings = getFormatSettings(getContext());
std::atomic<int> is_stopped{0};
THROW_ARROW_NOT_OK(parquet::arrow::OpenFile(asArrowFile(*in, format_settings, is_stopped, "Parquet", PARQUET_MAGIC_BYTES), arrow::default_memory_pool(), &reader));