2020-04-14 01:26:34 +00:00
|
|
|
#include <Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h>
|
2019-10-16 18:27:53 +00:00
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2020-02-03 12:08:40 +00:00
|
|
|
/// Writes data part in compact format.
|
2020-04-14 01:26:34 +00:00
|
|
|
class MergeTreeDataPartWriterCompact : public MergeTreeDataPartWriterOnDisk
|
2019-10-16 18:27:53 +00:00
|
|
|
{
|
|
|
|
public:
|
2019-10-22 10:50:17 +00:00
|
|
|
MergeTreeDataPartWriterCompact(
|
2020-05-10 13:33:27 +00:00
|
|
|
const MergeTreeData::DataPartPtr & data_part,
|
2019-10-22 10:50:17 +00:00
|
|
|
const NamesAndTypesList & columns_list,
|
2020-06-17 12:39:20 +00:00
|
|
|
const StorageMetadataPtr & metadata_snapshot_,
|
2019-12-18 16:41:11 +00:00
|
|
|
const std::vector<MergeTreeIndexPtr> & indices_to_recalc,
|
2019-10-22 10:50:17 +00:00
|
|
|
const String & marks_file_extension,
|
|
|
|
const CompressionCodecPtr & default_codec,
|
2019-12-18 15:54:45 +00:00
|
|
|
const MergeTreeWriterSettings & settings,
|
2019-11-07 11:11:38 +00:00
|
|
|
const MergeTreeIndexGranularity & index_granularity);
|
2019-10-22 10:50:17 +00:00
|
|
|
|
2020-03-18 03:27:32 +00:00
|
|
|
void write(const Block & block, const IColumn::Permutation * permutation,
|
|
|
|
const Block & primary_key_block, const Block & skip_indexes_block) override;
|
2019-10-16 18:27:53 +00:00
|
|
|
|
2020-04-17 11:59:10 +00:00
|
|
|
void finishDataSerialization(IMergeTreeDataPart::Checksums & checksums) override;
|
2019-10-21 17:23:06 +00:00
|
|
|
|
2020-04-26 21:19:25 +00:00
|
|
|
protected:
|
2020-04-27 18:12:17 +00:00
|
|
|
void fillIndexGranularity(size_t index_granularity_for_block, size_t rows_in_block) override;
|
2020-04-26 21:19:25 +00:00
|
|
|
|
2019-10-21 17:23:06 +00:00
|
|
|
private:
|
2019-10-16 18:27:53 +00:00
|
|
|
/// Write single granule of one column (rows between 2 marks)
|
2019-12-27 21:17:53 +00:00
|
|
|
void writeColumnSingleGranule(
|
2019-10-16 18:27:53 +00:00
|
|
|
const ColumnWithTypeAndName & column,
|
|
|
|
size_t from_row,
|
2019-12-27 21:17:53 +00:00
|
|
|
size_t number_of_rows) const;
|
2019-10-21 17:23:06 +00:00
|
|
|
|
2019-11-27 11:35:27 +00:00
|
|
|
void writeBlock(const Block & block);
|
|
|
|
|
2020-07-07 00:15:02 +00:00
|
|
|
void addToChecksums(MergeTreeDataPartChecksums & checksumns);
|
2019-11-27 11:35:27 +00:00
|
|
|
|
|
|
|
Block header;
|
2019-12-27 21:17:53 +00:00
|
|
|
|
2019-12-27 21:32:55 +00:00
|
|
|
/** Simplified SquashingTransform. The original one isn't suitable in this case
|
2019-12-27 21:17:53 +00:00
|
|
|
* as it can return smaller block from buffer without merging it with larger block if last is enough size.
|
|
|
|
* But in compact parts we should guarantee, that written block is larger or equals than index_granularity.
|
|
|
|
*/
|
|
|
|
class ColumnsBuffer
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
void add(MutableColumns && columns);
|
|
|
|
size_t size() const;
|
|
|
|
Columns releaseColumns();
|
|
|
|
private:
|
|
|
|
MutableColumns accumulated_columns;
|
|
|
|
};
|
|
|
|
|
|
|
|
ColumnsBuffer columns_buffer;
|
2020-07-07 00:15:02 +00:00
|
|
|
|
2020-09-03 14:53:05 +00:00
|
|
|
/// hashing_buf -> compressed_buf -> plain_hashing -> plain_file
|
2020-07-07 00:15:02 +00:00
|
|
|
std::unique_ptr<WriteBufferFromFileBase> plain_file;
|
|
|
|
HashingWriteBuffer plain_hashing;
|
|
|
|
|
|
|
|
struct CompressedStream
|
|
|
|
{
|
|
|
|
CompressedWriteBuffer compressed_buf;
|
|
|
|
HashingWriteBuffer hashing_buf;
|
|
|
|
|
|
|
|
CompressedStream(WriteBuffer & buf, const CompressionCodecPtr & codec)
|
|
|
|
: compressed_buf(buf, codec), hashing_buf(compressed_buf) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unordered_map<String, std::unique_ptr<CompressedStream>> compressed_streams;
|
|
|
|
|
|
|
|
/// marks -> marks_file
|
|
|
|
std::unique_ptr<WriteBufferFromFileBase> marks_file;
|
|
|
|
HashingWriteBuffer marks;
|
2019-10-16 18:27:53 +00:00
|
|
|
};
|
|
|
|
|
2019-10-21 15:33:59 +00:00
|
|
|
}
|