mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 08:40:50 +00:00
InterpreterInsertQuery::buildChain: Make sure to keep the context alive
This commit is contained in:
parent
c9e9bd78b2
commit
b25cfa0b4d
@ -196,6 +196,9 @@ Chain InterpreterInsertQuery::buildChainImpl(
|
||||
/// We create a pipeline of several streams, into which we will write data.
|
||||
Chain out;
|
||||
|
||||
/// Keep a reference to the context to make sure it stays alive until the chain is executed and destroyed
|
||||
out.addInterpreterContext(context_ptr);
|
||||
|
||||
/// NOTE: we explicitly ignore bound materialized views when inserting into Kafka Storage.
|
||||
/// Otherwise we'll get duplicates when MV reads same rows again from Kafka.
|
||||
if (table->noPushingToViews() && !no_destination)
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Processors/IProcessor.h>
|
||||
#include <QueryPipeline/PipelineResourcesHolder.h>
|
||||
|
||||
@ -42,6 +43,7 @@ public:
|
||||
void addTableLock(TableLockHolder lock) { holder.table_locks.emplace_back(std::move(lock)); }
|
||||
void addStorageHolder(StoragePtr storage) { holder.storage_holders.emplace_back(std::move(storage)); }
|
||||
void attachResources(PipelineResourcesHolder holder_) { holder = std::move(holder_); }
|
||||
void addInterpreterContext(ContextPtr context) { holder.interpreter_context.emplace_back(std::move(context)); }
|
||||
PipelineResourcesHolder detachResources() { return std::move(holder); }
|
||||
|
||||
void reset();
|
||||
|
3
tests/queries/0_stateless/02137_mv_into_join.reference
Normal file
3
tests/queries/0_stateless/02137_mv_into_join.reference
Normal file
@ -0,0 +1,3 @@
|
||||
sku_0001 black women nice shirt
|
||||
sku_0001_black sku_0001 black women nice shirt
|
||||
sku_0001_black sku_0001 black women nice shirt
|
17
tests/queries/0_stateless/02137_mv_into_join.sql
Normal file
17
tests/queries/0_stateless/02137_mv_into_join.sql
Normal file
@ -0,0 +1,17 @@
|
||||
CREATE TABLE main ( `id` String, `color` String, `section` String, `description` String) ENGINE = MergeTree ORDER BY tuple();
|
||||
CREATE TABLE destination_join ( `key` String, `id` String, `color` String, `section` String, `description` String) ENGINE = Join(ANY, LEFT, key);
|
||||
CREATE TABLE destination_set (`key` String) ENGINE = Set;
|
||||
|
||||
CREATE MATERIALIZED VIEW mv_to_join TO `destination_join` AS SELECT concat(id, '_', color) AS key, * FROM main;
|
||||
CREATE MATERIALIZED VIEW mv_to_set TO `destination_set` AS SELECT key FROM destination_join;
|
||||
|
||||
INSERT INTO main VALUES ('sku_0001','black','women','nice shirt');
|
||||
SELECT * FROM main;
|
||||
SELECT * FROM destination_join;
|
||||
SELECT * FROM destination_join WHERE key in destination_set;
|
||||
|
||||
DROP TABLE mv_to_set;
|
||||
DROP TABLE destination_set;
|
||||
DROP TABLE mv_to_join;
|
||||
DROP TABLE destination_join;
|
||||
DROP TABLE main;
|
Loading…
Reference in New Issue
Block a user