Merge pull request #18481 from ClickHouse/disable_write_with_aio

Disable write with AIO even for big merges
This commit is contained in:
alexey-milovidov 2020-12-25 04:24:43 +03:00 committed by GitHub
commit 491f481713
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 3 additions and 16 deletions

View File

@ -685,7 +685,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor
if (disk->exists(new_part_tmp_path))
throw Exception("Directory " + fullPath(disk, new_part_tmp_path) + " already exists", ErrorCodes::DIRECTORY_ALREADY_EXISTS);
MergeTreeData::DataPart::ColumnToSize merged_column_to_size;
Names all_column_names = metadata_snapshot->getColumns().getNamesOfPhysical();
NamesAndTypesList storage_columns = metadata_snapshot->getColumns().getAllPhysical();
@ -767,6 +766,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor
rows_sources_uncompressed_write_buf = tmp_disk->writeFile(rows_sources_file_path);
rows_sources_write_buf = std::make_unique<CompressedWriteBuffer>(*rows_sources_uncompressed_write_buf);
MergeTreeData::DataPart::ColumnToSize merged_column_to_size;
for (const MergeTreeData::DataPartPtr & part : parts)
part->accumulateColumnSizes(merged_column_to_size);
@ -921,7 +921,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor
merging_columns,
index_factory.getMany(metadata_snapshot->getSecondaryIndices()),
compression_codec,
merged_column_to_size,
data_settings->min_merge_bytes_to_use_direct_io,
blocks_are_granules_size};

View File

@ -46,7 +46,8 @@ struct MergeTreeWriterSettings
bool rewrite_primary_key;
bool blocks_are_granules_size;
/// true if we write temporary files during alter.
/// Used for AIO threshold comparsion
/// FIXME currently doesn't work because WriteBufferAIO contain obscure bug(s)
size_t estimated_size = 0;
};

View File

@ -27,7 +27,6 @@ MergedBlockOutputStream::MergedBlockOutputStream(
columns_list_,
skip_indices,
default_codec_,
{},
data_part->storage.global_context.getSettings().min_bytes_to_use_direct_io,
blocks_are_granules_size)
{
@ -39,7 +38,6 @@ MergedBlockOutputStream::MergedBlockOutputStream(
const NamesAndTypesList & columns_list_,
const MergeTreeIndices & skip_indices,
CompressionCodecPtr default_codec_,
const MergeTreeData::DataPart::ColumnToSize & merged_column_to_size,
size_t aio_threshold,
bool blocks_are_granules_size)
: IMergedBlockOutputStream(data_part, metadata_snapshot_)
@ -54,16 +52,6 @@ MergedBlockOutputStream::MergedBlockOutputStream(
/* rewrite_primary_key = */ true,
blocks_are_granules_size);
if (aio_threshold > 0 && !merged_column_to_size.empty())
{
for (const auto & column : columns_list)
{
auto size_it = merged_column_to_size.find(column.name);
if (size_it != merged_column_to_size.end())
writer_settings.estimated_size += size_it->second;
}
}
if (!part_path.empty())
volume->getDisk()->createDirectories(part_path);

View File

@ -27,7 +27,6 @@ public:
const NamesAndTypesList & columns_list_,
const MergeTreeIndices & skip_indices,
CompressionCodecPtr default_codec_,
const MergeTreeData::DataPart::ColumnToSize & merged_column_to_size,
size_t aio_threshold,
bool blocks_are_granules_size = false);