ClickHouse/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h

65 lines
1.7 KiB
C++
Raw Normal View History

#pragma once
#include <Core/Names.h>
2019-05-22 19:38:43 +00:00
#include <Core/Types.h>
#include <IO/ReadBuffer.h>
#include <cppkafka/cppkafka.h>
namespace Poco
{
class Logger;
}
namespace DB
{
using ConsumerPtr = std::shared_ptr<cppkafka::Consumer>;
class ReadBufferFromKafkaConsumer : public ReadBuffer
{
public:
ReadBufferFromKafkaConsumer(
ConsumerPtr consumer_,
Poco::Logger * log_,
size_t max_batch_size,
size_t poll_timeout_,
bool intermediate_commit_,
const std::atomic<bool> & stopped_);
~ReadBufferFromKafkaConsumer() override;
void allowNext() { allowed = true; } // Allow to read next message.
void commit(); // Commit all processed messages.
void subscribe(const Names & topics); // Subscribe internal consumer to topics.
void unsubscribe(); // Unsubscribe internal consumer in case of failure.
auto pollTimeout() const { return poll_timeout; }
2019-05-22 19:38:43 +00:00
// Return values for the message that's being read.
String currentTopic() const { return current[-1].get_topic(); }
String currentKey() const { return current[-1].get_key(); }
auto currentOffset() const { return current[-1].get_offset(); }
2019-07-22 11:32:11 +00:00
auto currentPartition() const { return current[-1].get_partition(); }
2019-08-06 14:18:37 +00:00
auto currentTimestamp() const { return current[-1].get_timestamp(); }
2019-05-22 19:38:43 +00:00
private:
using Messages = std::vector<cppkafka::Message>;
ConsumerPtr consumer;
Poco::Logger * log;
const size_t batch_size = 1;
const size_t poll_timeout = 0;
bool stalled = false;
bool intermediate_commit = true;
bool allowed = true;
const std::atomic<bool> & stopped;
Messages messages;
Messages::const_iterator current;
bool nextImpl() override;
};
}