mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-15 10:52:30 +00:00
09aaa2b5dd
It depends on how much parts do you have, but for some workload with InMemory only parts w/o merges, I got 5% increase. Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
48 lines
1.1 KiB
C++
48 lines
1.1 KiB
C++
#pragma once
|
|
|
|
#include <Processors/Sinks/SinkToStorage.h>
|
|
#include <Storages/StorageInMemoryMetadata.h>
|
|
|
|
|
|
namespace DB
|
|
{
|
|
|
|
class Block;
|
|
class StorageMergeTree;
|
|
struct StorageSnapshot;
|
|
using StorageSnapshotPtr = std::shared_ptr<StorageSnapshot>;
|
|
|
|
|
|
class MergeTreeSink : public SinkToStorage
|
|
{
|
|
public:
|
|
MergeTreeSink(
|
|
StorageMergeTree & storage_,
|
|
StorageMetadataPtr metadata_snapshot_,
|
|
size_t max_parts_per_block_,
|
|
ContextPtr context_);
|
|
|
|
~MergeTreeSink() override;
|
|
|
|
String getName() const override { return "MergeTreeSink"; }
|
|
void consume(Chunk chunk) override;
|
|
void onStart() override;
|
|
void onFinish() override;
|
|
|
|
private:
|
|
StorageMergeTree & storage;
|
|
StorageMetadataPtr metadata_snapshot;
|
|
size_t max_parts_per_block;
|
|
ContextPtr context;
|
|
StorageSnapshotPtr storage_snapshot;
|
|
uint64_t chunk_dedup_seqnum = 0; /// input chunk ordinal number in case of dedup token
|
|
|
|
/// We can delay processing for previous chunk and start writing a new one.
|
|
struct DelayedChunk;
|
|
std::unique_ptr<DelayedChunk> delayed_chunk;
|
|
|
|
void finishDelayedChunk();
|
|
};
|
|
|
|
}
|