From b25cfa0b4d4b3fae41f61691b97236754359465a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 13 Dec 2021 11:50:46 +0100 Subject: [PATCH] InterpreterInsertQuery::buildChain: Make sure to keep the context alive --- src/Interpreters/InterpreterInsertQuery.cpp | 3 +++ src/QueryPipeline/Chain.h | 2 ++ .../0_stateless/02137_mv_into_join.reference | 3 +++ .../queries/0_stateless/02137_mv_into_join.sql | 17 +++++++++++++++++ 4 files changed, 25 insertions(+) create mode 100644 tests/queries/0_stateless/02137_mv_into_join.reference create mode 100644 tests/queries/0_stateless/02137_mv_into_join.sql diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index b7edf12e23f..d1b8a056053 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -196,6 +196,9 @@ Chain InterpreterInsertQuery::buildChainImpl( /// We create a pipeline of several streams, into which we will write data. Chain out; + /// Keep a reference to the context to make sure it stays alive until the chain is executed and destroyed + out.addInterpreterContext(context_ptr); + /// NOTE: we explicitly ignore bound materialized views when inserting into Kafka Storage. /// Otherwise we'll get duplicates when MV reads same rows again from Kafka. if (table->noPushingToViews() && !no_destination) diff --git a/src/QueryPipeline/Chain.h b/src/QueryPipeline/Chain.h index c5fdc34cecf..60dbad10131 100644 --- a/src/QueryPipeline/Chain.h +++ b/src/QueryPipeline/Chain.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -42,6 +43,7 @@ public: void addTableLock(TableLockHolder lock) { holder.table_locks.emplace_back(std::move(lock)); } void addStorageHolder(StoragePtr storage) { holder.storage_holders.emplace_back(std::move(storage)); } void attachResources(PipelineResourcesHolder holder_) { holder = std::move(holder_); } + void addInterpreterContext(ContextPtr context) { holder.interpreter_context.emplace_back(std::move(context)); } PipelineResourcesHolder detachResources() { return std::move(holder); } void reset(); diff --git a/tests/queries/0_stateless/02137_mv_into_join.reference b/tests/queries/0_stateless/02137_mv_into_join.reference new file mode 100644 index 00000000000..1228a2322e6 --- /dev/null +++ b/tests/queries/0_stateless/02137_mv_into_join.reference @@ -0,0 +1,3 @@ +sku_0001 black women nice shirt +sku_0001_black sku_0001 black women nice shirt +sku_0001_black sku_0001 black women nice shirt diff --git a/tests/queries/0_stateless/02137_mv_into_join.sql b/tests/queries/0_stateless/02137_mv_into_join.sql new file mode 100644 index 00000000000..cca896ac622 --- /dev/null +++ b/tests/queries/0_stateless/02137_mv_into_join.sql @@ -0,0 +1,17 @@ +CREATE TABLE main ( `id` String, `color` String, `section` String, `description` String) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE destination_join ( `key` String, `id` String, `color` String, `section` String, `description` String) ENGINE = Join(ANY, LEFT, key); +CREATE TABLE destination_set (`key` String) ENGINE = Set; + +CREATE MATERIALIZED VIEW mv_to_join TO `destination_join` AS SELECT concat(id, '_', color) AS key, * FROM main; +CREATE MATERIALIZED VIEW mv_to_set TO `destination_set` AS SELECT key FROM destination_join; + +INSERT INTO main VALUES ('sku_0001','black','women','nice shirt'); +SELECT * FROM main; +SELECT * FROM destination_join; +SELECT * FROM destination_join WHERE key in destination_set; + +DROP TABLE mv_to_set; +DROP TABLE destination_set; +DROP TABLE mv_to_join; +DROP TABLE destination_join; +DROP TABLE main;