mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-18 21:51:57 +00:00
5851316742
* Print better offsets on commit * Stop consumption on the read buffer level * Don't use cancellation in the middle of copyData() * Add test * Disable squashing stream for Kafka materialized views * Commit last read message, since rdkafka auto-modifies offset
42 lines
1.2 KiB
C++
42 lines
1.2 KiB
C++
#pragma once
|
|
|
|
#include <DataStreams/IBlockOutputStream.h>
|
|
#include <DataStreams/BlockIO.h>
|
|
#include <Interpreters/Context.h>
|
|
#include <Interpreters/IInterpreter.h>
|
|
#include <Parsers/ASTInsertQuery.h>
|
|
|
|
namespace DB
|
|
{
|
|
|
|
|
|
/** Interprets the INSERT query.
|
|
*/
|
|
class InterpreterInsertQuery : public IInterpreter
|
|
{
|
|
public:
|
|
InterpreterInsertQuery(const ASTPtr & query_ptr_, const Context & context_, bool allow_materialized_ = false, bool no_squash_ = false);
|
|
|
|
/** Prepare a request for execution. Return block streams
|
|
* - the stream into which you can write data to execute the query, if INSERT;
|
|
* - the stream from which you can read the result of the query, if SELECT and similar;
|
|
* Or nothing if the request INSERT SELECT (self-sufficient query - does not accept the input data, does not return the result).
|
|
*/
|
|
BlockIO execute() override;
|
|
|
|
std::pair<String, String> getDatabaseTable() const;
|
|
|
|
private:
|
|
StoragePtr getTable(const ASTInsertQuery & query);
|
|
Block getSampleBlock(const ASTInsertQuery & query, const StoragePtr & table);
|
|
void checkAccess(const ASTInsertQuery & query);
|
|
|
|
ASTPtr query_ptr;
|
|
const Context & context;
|
|
const bool allow_materialized;
|
|
const bool no_squash;
|
|
};
|
|
|
|
|
|
}
|