#include namespace DB { namespace ErrorCodes { extern const int LOGICAL_ERROR; } namespace { constexpr auto DATA_FILE_EXTENSION = ".bin"; } MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( DiskPtr disk_, const String & part_path_, const MergeTreeData & storage_, const NamesAndTypesList & columns_list_, const std::vector & indices_to_recalc_, const String & marks_file_extension_, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & settings_, const MergeTreeIndexGranularity & index_granularity_) : IMergeTreeDataPartWriter(disk_, part_path_, storage_, columns_list_, indices_to_recalc_, marks_file_extension_, default_codec_, settings_, index_granularity_, false) { const auto & columns = storage.getColumns(); for (const auto & it : columns_list) addStreams(it.name, *it.type, columns.getCodecOrDefault(it.name, default_codec), settings.estimated_size); } void MergeTreeDataPartWriterWide::addStreams( const String & name, const IDataType & type, const CompressionCodecPtr & effective_codec, size_t estimated_size) { IDataType::StreamCallback callback = [&] (const IDataType::SubstreamPath & substream_path) { if (settings.skip_offsets && !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes) return; String stream_name = IDataType::getFileNameForStream(name, substream_path); /// Shared offsets for Nested type. if (column_streams.count(stream_name)) return; column_streams[stream_name] = std::make_unique( stream_name, disk, part_path + stream_name, DATA_FILE_EXTENSION, part_path + stream_name, marks_file_extension, effective_codec, settings.max_compress_block_size, estimated_size, settings.aio_threshold); }; IDataType::SubstreamPath stream_path; type.enumerateStreams(callback, stream_path); } IDataType::OutputStreamGetter MergeTreeDataPartWriterWide::createStreamGetter( const String & name, WrittenOffsetColumns & offset_columns) { return [&, this] (const IDataType::SubstreamPath & substream_path) -> WriteBuffer * { bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes; if (is_offsets && settings.skip_offsets) return nullptr; String stream_name = IDataType::getFileNameForStream(name, substream_path); /// Don't write offsets more than one time for Nested type. if (is_offsets && offset_columns.count(stream_name)) return nullptr; return &column_streams[stream_name]->compressed; }; } void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Permutation * permutation, const Block & primary_key_block, const Block & skip_indexes_block) { /// Fill index granularity for this block /// if it's unknown (in case of insert data or horizontal merge, /// but not in case of vertical merge) if (compute_granularity) fillIndexGranularity(block); auto offset_columns = written_offset_columns ? *written_offset_columns : WrittenOffsetColumns{}; auto it = columns_list.begin(); for (size_t i = 0; i < columns_list.size(); ++i, ++it) { const ColumnWithTypeAndName & column = block.getByName(it->name); if (permutation) { if (primary_key_block.has(it->name)) { const auto & primary_column = *primary_key_block.getByName(it->name).column; writeColumn(column.name, *column.type, primary_column, offset_columns); } else if (skip_indexes_block.has(it->name)) { const auto & index_column = *skip_indexes_block.getByName(it->name).column; writeColumn(column.name, *column.type, index_column, offset_columns); } else { /// We rearrange the columns that are not included in the primary key here; Then the result is released - to save RAM. ColumnPtr permuted_column = column.column->permute(*permutation, 0); writeColumn(column.name, *column.type, *permuted_column, offset_columns); } } else { writeColumn(column.name, *column.type, *column.column, offset_columns); } } } void MergeTreeDataPartWriterWide::writeSingleMark( const String & name, const IDataType & type, WrittenOffsetColumns & offset_columns, size_t number_of_rows, DB::IDataType::SubstreamPath & path) { type.enumerateStreams([&] (const IDataType::SubstreamPath & substream_path) { bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes; if (is_offsets && settings.skip_offsets) return; String stream_name = IDataType::getFileNameForStream(name, substream_path); /// Don't write offsets more than one time for Nested type. if (is_offsets && offset_columns.count(stream_name)) return; Stream & stream = *column_streams[stream_name]; /// There could already be enough data to compress into the new block. if (stream.compressed.offset() >= settings.min_compress_block_size) stream.compressed.next(); writeIntBinary(stream.plain_hashing.count(), stream.marks); writeIntBinary(stream.compressed.offset(), stream.marks); if (settings.can_use_adaptive_granularity) writeIntBinary(number_of_rows, stream.marks); }, path); } size_t MergeTreeDataPartWriterWide::writeSingleGranule( const String & name, const IDataType & type, const IColumn & column, WrittenOffsetColumns & offset_columns, IDataType::SerializeBinaryBulkStatePtr & serialization_state, IDataType::SerializeBinaryBulkSettings & serialize_settings, size_t from_row, size_t number_of_rows, bool write_marks) { if (write_marks) writeSingleMark(name, type, offset_columns, number_of_rows, serialize_settings.path); type.serializeBinaryBulkWithMultipleStreams(column, from_row, number_of_rows, serialize_settings, serialization_state); /// So that instead of the marks pointing to the end of the compressed block, there were marks pointing to the beginning of the next one. type.enumerateStreams([&] (const IDataType::SubstreamPath & substream_path) { bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes; if (is_offsets && settings.skip_offsets) return; String stream_name = IDataType::getFileNameForStream(name, substream_path); /// Don't write offsets more than one time for Nested type. if (is_offsets && offset_columns.count(stream_name)) return; column_streams[stream_name]->compressed.nextIfAtEnd(); }, serialize_settings.path); return from_row + number_of_rows; } /// Column must not be empty. (column.size() !== 0) void MergeTreeDataPartWriterWide::writeColumn( const String & name, const IDataType & type, const IColumn & column, WrittenOffsetColumns & offset_columns) { auto [it, inserted] = serialization_states.emplace(name, nullptr); if (inserted) { IDataType::SerializeBinaryBulkSettings serialize_settings; serialize_settings.getter = createStreamGetter(name, offset_columns); type.serializeBinaryBulkStatePrefix(serialize_settings, it->second); } const auto & global_settings = storage.global_context.getSettingsRef(); IDataType::SerializeBinaryBulkSettings serialize_settings; serialize_settings.getter = createStreamGetter(name, offset_columns); serialize_settings.low_cardinality_max_dictionary_size = global_settings.low_cardinality_max_dictionary_size; serialize_settings.low_cardinality_use_single_dictionary_for_part = global_settings.low_cardinality_use_single_dictionary_for_part != 0; size_t total_rows = column.size(); size_t current_row = 0; size_t current_column_mark = current_mark; while (current_row < total_rows) { size_t rows_to_write; bool write_marks = true; /// If there is `index_offset`, then the first mark goes not immediately, but after this number of rows. if (current_row == 0 && index_offset != 0) { write_marks = false; rows_to_write = index_offset; } else { if (index_granularity.getMarksCount() <= current_column_mark) throw Exception( "Incorrect size of index granularity expect mark " + toString(current_column_mark) + " totally have marks " + toString(index_granularity.getMarksCount()), ErrorCodes::LOGICAL_ERROR); rows_to_write = index_granularity.getMarkRows(current_column_mark); } if (rows_to_write != 0) data_written = true; current_row = writeSingleGranule( name, type, column, offset_columns, it->second, serialize_settings, current_row, rows_to_write, write_marks ); if (write_marks) current_column_mark++; } type.enumerateStreams([&] (const IDataType::SubstreamPath & substream_path) { bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes; if (is_offsets) { String stream_name = IDataType::getFileNameForStream(name, substream_path); offset_columns.insert(stream_name); } }, serialize_settings.path); next_mark = current_column_mark; next_index_offset = current_row - total_rows; } void MergeTreeDataPartWriterWide::finishDataSerialization(IMergeTreeDataPart::Checksums & checksums, bool sync) { const auto & global_settings = storage.global_context.getSettingsRef(); IDataType::SerializeBinaryBulkSettings serialize_settings; serialize_settings.low_cardinality_max_dictionary_size = global_settings.low_cardinality_max_dictionary_size; serialize_settings.low_cardinality_use_single_dictionary_for_part = global_settings.low_cardinality_use_single_dictionary_for_part != 0; WrittenOffsetColumns offset_columns; bool write_final_mark = (with_final_mark && data_written); { auto it = columns_list.begin(); for (size_t i = 0; i < columns_list.size(); ++i, ++it) { if (!serialization_states.empty()) { serialize_settings.getter = createStreamGetter(it->name, written_offset_columns ? *written_offset_columns : offset_columns); it->type->serializeBinaryBulkStateSuffix(serialize_settings, serialization_states[it->name]); } if (write_final_mark) { writeFinalMark(it->name, it->type, offset_columns, serialize_settings.path); } } } for (auto & stream : column_streams) { stream.second->finalize(); if (sync) stream.second->sync(); stream.second->addToChecksums(checksums); } column_streams.clear(); serialization_states.clear(); } void MergeTreeDataPartWriterWide::writeFinalMark( const std::string & column_name, const DataTypePtr column_type, WrittenOffsetColumns & offset_columns, DB::IDataType::SubstreamPath & path) { writeSingleMark(column_name, *column_type, offset_columns, 0, path); /// Memoize information about offsets column_type->enumerateStreams([&] (const IDataType::SubstreamPath & substream_path) { bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes; if (is_offsets) { String stream_name = IDataType::getFileNameForStream(column_name, substream_path); offset_columns.insert(stream_name); } }, path); } }