diff --git a/dbms/include/DB/DataStreams/RemoteBlockInputStream.h b/dbms/include/DB/DataStreams/RemoteBlockInputStream.h index fe9b693f8db..336e836207d 100644 --- a/dbms/include/DB/DataStreams/RemoteBlockInputStream.h +++ b/dbms/include/DB/DataStreams/RemoteBlockInputStream.h @@ -29,24 +29,28 @@ private: public: /// Принимает готовое соединение. RemoteBlockInputStream(Connection & connection_, const String & query_, const Settings * settings_, - const Tables & external_tables_ = Tables(), QueryProcessingStage::Enum stage_ = QueryProcessingStage::Complete) - : connection(&connection_), query(query_), external_tables(external_tables_), stage(stage_) + const Tables & external_tables_ = Tables(), QueryProcessingStage::Enum stage_ = QueryProcessingStage::Complete, + const Context & context = Context{}) + : connection(&connection_), query(query_), external_tables(external_tables_), stage(stage_), context(context) { init(settings_); } /// Принимает готовое соединение. Захватывает владение соединением из пула. RemoteBlockInputStream(ConnectionPool::Entry & pool_entry_, const String & query_, const Settings * settings_, - const Tables & external_tables_ = Tables(), QueryProcessingStage::Enum stage_ = QueryProcessingStage::Complete) - : pool_entry(pool_entry_), connection(&*pool_entry_), query(query_), external_tables(external_tables_), stage(stage_) + const Tables & external_tables_ = Tables(), QueryProcessingStage::Enum stage_ = QueryProcessingStage::Complete, + const Context & context = Context{}) + : pool_entry(pool_entry_), connection(&*pool_entry_), query(query_), + external_tables(external_tables_), stage(stage_), context(context) { init(settings_); } /// Принимает пул, из которого нужно будет достать соединение. RemoteBlockInputStream(IConnectionPool * pool_, const String & query_, const Settings * settings_, - const Tables & external_tables_ = Tables(), QueryProcessingStage::Enum stage_ = QueryProcessingStage::Complete) - : pool(pool_), query(query_), external_tables(external_tables_), stage(stage_) + const Tables & external_tables_ = Tables(), QueryProcessingStage::Enum stage_ = QueryProcessingStage::Complete, + const Context & context = Context{}) + : pool(pool_), query(query_), external_tables(external_tables_), stage(stage_), context(context) { init(settings_); } @@ -103,7 +107,8 @@ protected: { StoragePtr cur = table.second; QueryProcessingStage::Enum stage = QueryProcessingStage::Complete; - DB::BlockInputStreams input = cur->read(cur->getColumnNamesList(), ASTPtr(), settings, stage, DEFAULT_BLOCK_SIZE, 1); + DB::BlockInputStreams input = cur->read(cur->getColumnNamesList(), ASTPtr(), context, settings, + stage, DEFAULT_BLOCK_SIZE, 1); if (input.size() == 0) res.push_back(std::make_pair(new OneBlockInputStream(cur->getSampleBlock()), table.first)); else @@ -246,6 +251,7 @@ private: /// Временные таблицы, которые необходимо переслать на удаленные сервера. Tables external_tables; QueryProcessingStage::Enum stage; + Context context; /// Отправили запрос (это делается перед получением первого блока). bool sent_query = false; diff --git a/dbms/include/DB/Storages/IStorage.h b/dbms/include/DB/Storages/IStorage.h index 7148e0207bc..a4153f165bf 100644 --- a/dbms/include/DB/Storages/IStorage.h +++ b/dbms/include/DB/Storages/IStorage.h @@ -162,6 +162,7 @@ public: virtual BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/dbms/include/DB/Storages/MergeTree/MergeTreeDataSelectExecutor.h index 89b90025e27..b7f80cd37cd 100644 --- a/dbms/include/DB/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/dbms/include/DB/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -20,6 +20,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageBuffer.h b/dbms/include/DB/Storages/StorageBuffer.h index 2eb9a43246c..a15979453c7 100644 --- a/dbms/include/DB/Storages/StorageBuffer.h +++ b/dbms/include/DB/Storages/StorageBuffer.h @@ -61,6 +61,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageChunkMerger.h b/dbms/include/DB/Storages/StorageChunkMerger.h index d3a4fb97a55..c8087c5ebe5 100644 --- a/dbms/include/DB/Storages/StorageChunkMerger.h +++ b/dbms/include/DB/Storages/StorageChunkMerger.h @@ -40,6 +40,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageChunkRef.h b/dbms/include/DB/Storages/StorageChunkRef.h index 3f643d5705a..6f0575e4a2c 100644 --- a/dbms/include/DB/Storages/StorageChunkRef.h +++ b/dbms/include/DB/Storages/StorageChunkRef.h @@ -25,6 +25,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageChunks.h b/dbms/include/DB/Storages/StorageChunks.h index 196b6df98b3..ca07a2611b3 100644 --- a/dbms/include/DB/Storages/StorageChunks.h +++ b/dbms/include/DB/Storages/StorageChunks.h @@ -37,6 +37,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, @@ -46,6 +47,7 @@ public: const std::string & chunk_name, const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageDistributed.h b/dbms/include/DB/Storages/StorageDistributed.h index 1425d99d132..c0c9367f424 100644 --- a/dbms/include/DB/Storages/StorageDistributed.h +++ b/dbms/include/DB/Storages/StorageDistributed.h @@ -60,6 +60,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageLog.h b/dbms/include/DB/Storages/StorageLog.h index 9194d7b7667..e445cf883b8 100644 --- a/dbms/include/DB/Storages/StorageLog.h +++ b/dbms/include/DB/Storages/StorageLog.h @@ -162,6 +162,7 @@ public: virtual BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, @@ -227,6 +228,7 @@ protected: size_t to_mark, const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageMaterializedView.h b/dbms/include/DB/Storages/StorageMaterializedView.h index f4dba33a8bb..23104f8e570 100644 --- a/dbms/include/DB/Storages/StorageMaterializedView.h +++ b/dbms/include/DB/Storages/StorageMaterializedView.h @@ -37,6 +37,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageMemory.h b/dbms/include/DB/Storages/StorageMemory.h index 5c2f8da4831..2fb561c6e6d 100644 --- a/dbms/include/DB/Storages/StorageMemory.h +++ b/dbms/include/DB/Storages/StorageMemory.h @@ -83,6 +83,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageMerge.h b/dbms/include/DB/Storages/StorageMerge.h index b4f14a370b4..5700bd29878 100644 --- a/dbms/include/DB/Storages/StorageMerge.h +++ b/dbms/include/DB/Storages/StorageMerge.h @@ -49,6 +49,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageMergeTree.h b/dbms/include/DB/Storages/StorageMergeTree.h index f1bfb184ccb..a2c4f68ee26 100644 --- a/dbms/include/DB/Storages/StorageMergeTree.h +++ b/dbms/include/DB/Storages/StorageMergeTree.h @@ -70,6 +70,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageNull.h b/dbms/include/DB/Storages/StorageNull.h index cd666deac51..0460ff11096 100644 --- a/dbms/include/DB/Storages/StorageNull.h +++ b/dbms/include/DB/Storages/StorageNull.h @@ -33,10 +33,11 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size = DEFAULT_BLOCK_SIZE, - unsigned threads = 1) override + const size_t max_block_size = DEFAULT_BLOCK_SIZE, + const unsigned threads = 1) override { return { new NullBlockInputStream }; } diff --git a/dbms/include/DB/Storages/StorageReplicatedMergeTree.h b/dbms/include/DB/Storages/StorageReplicatedMergeTree.h index 6d1f52987fa..3de03cd27e4 100644 --- a/dbms/include/DB/Storages/StorageReplicatedMergeTree.h +++ b/dbms/include/DB/Storages/StorageReplicatedMergeTree.h @@ -74,6 +74,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemDatabases.h b/dbms/include/DB/Storages/StorageSystemDatabases.h index 90bcce9db58..f3a3188e321 100644 --- a/dbms/include/DB/Storages/StorageSystemDatabases.h +++ b/dbms/include/DB/Storages/StorageSystemDatabases.h @@ -27,6 +27,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemEvents.h b/dbms/include/DB/Storages/StorageSystemEvents.h index 4ea4026c8a8..8ce3febbd43 100644 --- a/dbms/include/DB/Storages/StorageSystemEvents.h +++ b/dbms/include/DB/Storages/StorageSystemEvents.h @@ -26,6 +26,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemMerges.h b/dbms/include/DB/Storages/StorageSystemMerges.h index b2dc8e6f82a..b8e365333d5 100644 --- a/dbms/include/DB/Storages/StorageSystemMerges.h +++ b/dbms/include/DB/Storages/StorageSystemMerges.h @@ -17,6 +17,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemNumbers.h b/dbms/include/DB/Storages/StorageSystemNumbers.h index 8ff60f946ae..5de23e6112f 100644 --- a/dbms/include/DB/Storages/StorageSystemNumbers.h +++ b/dbms/include/DB/Storages/StorageSystemNumbers.h @@ -28,6 +28,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemOne.h b/dbms/include/DB/Storages/StorageSystemOne.h index 5a4ba9993bd..2e34e84bb92 100644 --- a/dbms/include/DB/Storages/StorageSystemOne.h +++ b/dbms/include/DB/Storages/StorageSystemOne.h @@ -25,6 +25,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemParts.h b/dbms/include/DB/Storages/StorageSystemParts.h index 4b7a029a443..b9dab8843e1 100644 --- a/dbms/include/DB/Storages/StorageSystemParts.h +++ b/dbms/include/DB/Storages/StorageSystemParts.h @@ -27,6 +27,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemProcesses.h b/dbms/include/DB/Storages/StorageSystemProcesses.h index 8663d2aa781..e7290f2291b 100644 --- a/dbms/include/DB/Storages/StorageSystemProcesses.h +++ b/dbms/include/DB/Storages/StorageSystemProcesses.h @@ -27,6 +27,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemReplicas.h b/dbms/include/DB/Storages/StorageSystemReplicas.h index 631ecfa7b98..b44e77d1790 100644 --- a/dbms/include/DB/Storages/StorageSystemReplicas.h +++ b/dbms/include/DB/Storages/StorageSystemReplicas.h @@ -27,6 +27,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemSettings.h b/dbms/include/DB/Storages/StorageSystemSettings.h index 11e35010e6d..8ee31429fea 100644 --- a/dbms/include/DB/Storages/StorageSystemSettings.h +++ b/dbms/include/DB/Storages/StorageSystemSettings.h @@ -27,6 +27,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemTables.h b/dbms/include/DB/Storages/StorageSystemTables.h index d4b1dee4881..d91788c2a50 100644 --- a/dbms/include/DB/Storages/StorageSystemTables.h +++ b/dbms/include/DB/Storages/StorageSystemTables.h @@ -27,6 +27,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageSystemZooKeeper.h b/dbms/include/DB/Storages/StorageSystemZooKeeper.h index e02a5d19dc3..35c263ec268 100644 --- a/dbms/include/DB/Storages/StorageSystemZooKeeper.h +++ b/dbms/include/DB/Storages/StorageSystemZooKeeper.h @@ -27,6 +27,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageTinyLog.h b/dbms/include/DB/Storages/StorageTinyLog.h index 459a7a8e801..c33278a62fa 100644 --- a/dbms/include/DB/Storages/StorageTinyLog.h +++ b/dbms/include/DB/Storages/StorageTinyLog.h @@ -129,6 +129,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/Storages/StorageView.h b/dbms/include/DB/Storages/StorageView.h index 45d9b58c3dd..f6fcfcddb9b 100644 --- a/dbms/include/DB/Storages/StorageView.h +++ b/dbms/include/DB/Storages/StorageView.h @@ -33,6 +33,7 @@ public: BlockInputStreams read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size = DEFAULT_BLOCK_SIZE, diff --git a/dbms/include/DB/TableFunctions/TableFunctionRemote.h b/dbms/include/DB/TableFunctions/TableFunctionRemote.h index 400a0a24ec5..a48ab1c0be4 100644 --- a/dbms/include/DB/TableFunctions/TableFunctionRemote.h +++ b/dbms/include/DB/TableFunctions/TableFunctionRemote.h @@ -90,7 +90,12 @@ private: NamesAndTypesList res; /// Отправляем на первый попавшийся шард - BlockInputStreamPtr input = new RemoteBlockInputStream(&*cluster.pools.front(), query, &settings, Tables(), QueryProcessingStage::Complete); + BlockInputStreamPtr input{ + new RemoteBlockInputStream{ + cluster.pools.front().get(), query, &settings, + Tables(), QueryProcessingStage::Complete, context + } + }; input->readPrefix(); while (true) diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.cpp b/dbms/src/Interpreters/ExpressionAnalyzer.cpp index 0bc06dcbd3c..dd86b5c9682 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.cpp +++ b/dbms/src/Interpreters/ExpressionAnalyzer.cpp @@ -32,6 +32,8 @@ #include +#include + namespace DB { @@ -589,6 +591,9 @@ static SharedPtr interpretSubquery( if (!parse_res) throw Exception("Error in parsing SELECT query while creating set or join for table " + table->name + ".", ErrorCodes::LOGICAL_ERROR); + + /// @note it may be more appropriate to manually replace ASTAsterisk with table's columns + ExpressionAnalyzer{query, context, subquery_depth}; } else query = subquery->children.at(0); @@ -1737,26 +1742,32 @@ void ExpressionAnalyzer::collectJoinedColumns(NameSet & joined_columns, NamesAnd return; auto & node = typeid_cast(*select_query->join); - auto & keys = typeid_cast(*node.using_expr_list); - auto & table = node.table->children.at(0); /// TODO: поддержка идентификаторов. - size_t num_join_keys = keys.children.size(); - - for (size_t i = 0; i < num_join_keys; ++i) + Block nested_result_sample; + if (const auto identifier = typeid_cast(node.table.get())) { - if (!join_key_names_left_set.insert(keys.children[i]->getColumnName()).second) + const auto & table = context.getTable("", identifier->name); + nested_result_sample = table->getSampleBlockNonMaterialized(); + } + else if (typeid_cast(node.table.get())) + { + const auto & table = node.table->children.at(0); + nested_result_sample = ExpressionAnalyzer(table, context, subquery_depth + 1).getSelectSampleBlock(); + } + + auto & keys = typeid_cast(*node.using_expr_list); + for (const auto & key : keys.children) + { + if (!join_key_names_left_set.insert(key->getColumnName()).second) throw Exception("Duplicate column in USING list", ErrorCodes::DUPLICATE_COLUMN); - if (!join_key_names_right_set.insert(keys.children[i]->getAliasOrColumnName()).second) + if (!join_key_names_right_set.insert(key->getAliasOrColumnName()).second) throw Exception("Duplicate column in USING list", ErrorCodes::DUPLICATE_COLUMN); } - Block nested_result_sample = ExpressionAnalyzer(table, context, subquery_depth + 1).getSelectSampleBlock(); - - size_t nested_result_columns = nested_result_sample.columns(); - for (size_t i = 0; i < nested_result_columns; ++i) + for (const auto i : ext::range(0, nested_result_sample.columns())) { - auto col = nested_result_sample.getByPosition(i); + const auto & col = nested_result_sample.getByPosition(i); if (!join_key_names_right_set.count(col.name)) { joined_columns.insert(col.name); diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index 677014fa014..538879e0d0c 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -547,7 +547,9 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns(BlockInpu if (storage && storage->isRemote()) storage->storeExternalTables(query_analyzer->getExternalTables()); - streams = storage->read(required_columns, query_ptr, settings_for_storage, from_stage, settings.max_block_size, settings.max_threads); + streams = storage->read(required_columns, query_ptr, + context, settings_for_storage, from_stage, + settings.max_block_size, settings.max_threads); for (auto & stream : streams) stream->addTableLock(table_lock); diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 16f88703115..e2367f44dcb 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -38,10 +38,11 @@ static Block getBlockWithVirtualColumns(const MergeTreeData::DataPartsVector & p BlockInputStreams MergeTreeDataSelectExecutor::read( const Names & column_names_to_return, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads, + const size_t max_block_size, + const unsigned threads, size_t * part_index) { size_t part_index_var = 0; @@ -74,15 +75,15 @@ BlockInputStreams MergeTreeDataSelectExecutor::read( /// Если запрошен хотя бы один виртуальный столбец, пробуем индексировать if (!virt_column_names.empty()) - VirtualColumnUtils::filterBlockWithQuery(query, virtual_columns_block, data.context); + VirtualColumnUtils::filterBlockWithQuery(query, virtual_columns_block, context); std::multiset values = VirtualColumnUtils::extractSingleValueFromBlock(virtual_columns_block, "_part"); data.check(real_column_names); processed_stage = QueryProcessingStage::FetchColumns; - PKCondition key_condition(query, data.context, data.getColumnsList(), data.getSortDescription()); - PKCondition date_condition(query, data.context, data.getColumnsList(), SortDescription(1, SortColumnDescription(data.date_column_name, 1))); + PKCondition key_condition(query, context, data.getColumnsList(), data.getSortDescription()); + PKCondition date_condition(query, context, data.getColumnsList(), SortDescription(1, SortColumnDescription(data.date_column_name, 1))); /// Выберем куски, в которых могут быть данные, удовлетворяющие date_condition, и которые подходят под условие на _part. { diff --git a/dbms/src/Storages/StorageBuffer.cpp b/dbms/src/Storages/StorageBuffer.cpp index 499976b0ca4..9bf472ad63a 100644 --- a/dbms/src/Storages/StorageBuffer.cpp +++ b/dbms/src/Storages/StorageBuffer.cpp @@ -90,6 +90,7 @@ private: BlockInputStreams StorageBuffer::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size, @@ -101,7 +102,7 @@ BlockInputStreams StorageBuffer::read( if (!no_destination) streams_from_dst = context.getTable(destination_database, destination_table)->read( - column_names, query, settings, processed_stage, max_block_size, threads); + column_names, query, context, settings, processed_stage, max_block_size, threads); BlockInputStreams streams_from_buffers; streams_from_buffers.reserve(num_shards); diff --git a/dbms/src/Storages/StorageChunkMerger.cpp b/dbms/src/Storages/StorageChunkMerger.cpp index 72faaa8a4cd..7462efa9f20 100644 --- a/dbms/src/Storages/StorageChunkMerger.cpp +++ b/dbms/src/Storages/StorageChunkMerger.cpp @@ -62,10 +62,11 @@ bool StorageChunkMerger::hasColumn(const String & column_name) const BlockInputStreams StorageChunkMerger::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const size_t max_block_size, + const unsigned threads) { /// Будем читать из таблиц Chunks, на которые есть хоть одна ChunkRef, подходящая под регэксп, и из прочих таблиц, подходящих под регэксп. Storages selected_tables; @@ -76,16 +77,17 @@ BlockInputStreams StorageChunkMerger::read( typedef std::set StringSet; StringSet chunks_table_names; - Databases & databases = context.getDatabases(); + const Databases & databases = context.getDatabases(); - if (!databases.count(source_database)) + const auto database_it = databases.find(source_database); + if (database_it == std::end(databases)) throw Exception("No database " + source_database, ErrorCodes::UNKNOWN_DATABASE); - Tables & tables = databases[source_database]; - for (Tables::iterator it = tables.begin(); it != tables.end(); ++it) + const Tables & tables = database_it->second; + for (const auto & it : tables) { - StoragePtr table = it->second; - if (table_name_regexp.match(it->first) && + const StoragePtr & table = it.second; + if (table_name_regexp.match(it.first) && !typeid_cast(&*table) && !typeid_cast(&*table)) { @@ -93,19 +95,20 @@ BlockInputStreams StorageChunkMerger::read( { if (chunk_ref->source_database_name != source_database) { - LOG_WARNING(log, "ChunkRef " + it->first + " points to another database, ignoring"); + LOG_WARNING(log, "ChunkRef " + it.first + " points to another database, ignoring"); continue; } if (!chunks_table_names.count(chunk_ref->source_table_name)) { - if (tables.count(chunk_ref->source_table_name)) + const auto table_it = tables.find(chunk_ref->source_table_name); + if (table_it != std::end(tables)) { chunks_table_names.insert(chunk_ref->source_table_name); - selected_tables.push_back(tables[chunk_ref->source_table_name]); + selected_tables.push_back(table_it->second); } else { - LOG_WARNING(log, "ChunkRef " + it->first + " points to non-existing Chunks table, ignoring"); + LOG_WARNING(log, "ChunkRef " + it.first + " points to non-existing Chunks table, ignoring"); } } } @@ -174,6 +177,7 @@ BlockInputStreams StorageChunkMerger::read( BlockInputStreams source_streams = table->read( real_column_names, modified_query_ast, + context, settings, tmp_processed_stage, max_block_size, @@ -465,6 +469,7 @@ bool StorageChunkMerger::mergeChunks(const Storages & chunks) BlockInputStreams input_streams = src_storage->read( src_column_names, select_query_ptr, + context, settings, processed_stage, DEFAULT_MERGE_BLOCK_SIZE); diff --git a/dbms/src/Storages/StorageChunkRef.cpp b/dbms/src/Storages/StorageChunkRef.cpp index f84ff574773..377b633e879 100644 --- a/dbms/src/Storages/StorageChunkRef.cpp +++ b/dbms/src/Storages/StorageChunkRef.cpp @@ -12,14 +12,17 @@ StoragePtr StorageChunkRef::create(const std::string & name_, const Context & co } BlockInputStreams StorageChunkRef::read( - const Names & column_names, - ASTPtr query, - const Settings & settings, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { - return getSource().readFromChunk(name, column_names, query, settings, processed_stage, max_block_size, threads); + return getSource().readFromChunk(name, column_names, query, + context, settings, processed_stage, + max_block_size, threads); } ASTPtr StorageChunkRef::getCustomCreateQuery(const Context & context) const diff --git a/dbms/src/Storages/StorageChunks.cpp b/dbms/src/Storages/StorageChunks.cpp index 9b6fb81cee3..c3aad961a39 100644 --- a/dbms/src/Storages/StorageChunks.cpp +++ b/dbms/src/Storages/StorageChunks.cpp @@ -46,10 +46,11 @@ void StorageChunks::removeReference() BlockInputStreams StorageChunks::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const size_t max_block_size, + const unsigned threads) { bool has_virtual_column = false; @@ -59,17 +60,21 @@ BlockInputStreams StorageChunks::read( /// Если виртуальных столбцов нет, просто считать данные из таблицы if (!has_virtual_column) - return read(0, std::numeric_limits::max(), column_names, query, settings, processed_stage, max_block_size, threads); + return read(0, std::numeric_limits::max(), column_names, + query, context, settings, + processed_stage, max_block_size, threads); Block virtual_columns_block = getBlockWithVirtualColumns(); if (!VirtualColumnUtils::filterBlockWithQuery(query, virtual_columns_block, context)) - return read(0, std::numeric_limits::max(), column_names, query, settings, processed_stage, max_block_size, threads); + return read(0, std::numeric_limits::max(), column_names, + query, context, settings, + processed_stage, max_block_size, threads); std::multiset values = VirtualColumnUtils::extractSingleValueFromBlock(virtual_columns_block, _table_column_name); BlockInputStreams res; for (const auto & it : values) { - BlockInputStreams temp = readFromChunk(it, column_names, query, settings, processed_stage, max_block_size, threads); + BlockInputStreams temp = readFromChunk(it, column_names, query, context, settings, processed_stage, max_block_size, threads); res.insert(res.end(), temp.begin(), temp.end()); } return res; @@ -92,10 +97,11 @@ BlockInputStreams StorageChunks::readFromChunk( const std::string & chunk_name, const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const size_t max_block_size, + const unsigned threads) { size_t mark1; size_t mark2; @@ -110,7 +116,7 @@ BlockInputStreams StorageChunks::readFromChunk( mark2 = index + 1 == chunk_num_to_marks.size() ? marksCount() : chunk_num_to_marks[index + 1]; } - return read(mark1, mark2, column_names, query, settings, processed_stage, max_block_size, threads); + return read(mark1, mark2, column_names, query, context, settings, processed_stage, max_block_size, threads); } BlockOutputStreamPtr StorageChunks::writeToNewChunk( diff --git a/dbms/src/Storages/StorageDistributed.cpp b/dbms/src/Storages/StorageDistributed.cpp index 25f339f34cb..ce38efef02b 100644 --- a/dbms/src/Storages/StorageDistributed.cpp +++ b/dbms/src/Storages/StorageDistributed.cpp @@ -140,10 +140,11 @@ StoragePtr StorageDistributed::create( BlockInputStreams StorageDistributed::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const size_t max_block_size, + const unsigned threads) { Settings new_settings = settings; new_settings.queue_max_wait_ms = Cluster::saturate(new_settings.queue_max_wait_ms, settings.limits.max_execution_time); @@ -163,7 +164,8 @@ BlockInputStreams StorageDistributed::read( for (auto & conn_pool : cluster.pools) res.emplace_back(new RemoteBlockInputStream{ conn_pool, modified_query, &new_settings, - external_tables, processed_stage}); + external_tables, processed_stage, context + }); /// Добавляем запросы к локальному ClickHouse. if (cluster.getLocalNodesNum() > 0) diff --git a/dbms/src/Storages/StorageLog.cpp b/dbms/src/Storages/StorageLog.cpp index e980e9055a5..28cfce37b55 100644 --- a/dbms/src/Storages/StorageLog.cpp +++ b/dbms/src/Storages/StorageLog.cpp @@ -620,6 +620,7 @@ BlockInputStreams StorageLog::read( size_t to_mark, const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size, @@ -693,12 +694,15 @@ BlockInputStreams StorageLog::read( BlockInputStreams StorageLog::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const size_t max_block_size, + const unsigned threads) { - return read(0, std::numeric_limits::max(), column_names, query, settings, processed_stage, max_block_size, threads); + return read(0, std::numeric_limits::max(), column_names, + query, context, settings, processed_stage, + max_block_size, threads); } diff --git a/dbms/src/Storages/StorageMaterializedView.cpp b/dbms/src/Storages/StorageMaterializedView.cpp index a9560808b1d..e9e84d541b4 100644 --- a/dbms/src/Storages/StorageMaterializedView.cpp +++ b/dbms/src/Storages/StorageMaterializedView.cpp @@ -99,12 +99,13 @@ bool StorageMaterializedView::hasColumn(const String & column_name) const BlockInputStreams StorageMaterializedView::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const size_t max_block_size, + const unsigned threads) { - return data->read(column_names, query, settings, processed_stage, max_block_size, threads); + return data->read(column_names, query, context, settings, processed_stage, max_block_size, threads); } BlockOutputStreamPtr StorageMaterializedView::write(ASTPtr query) diff --git a/dbms/src/Storages/StorageMemory.cpp b/dbms/src/Storages/StorageMemory.cpp index b5ba1dac28d..b22f1080a2c 100644 --- a/dbms/src/Storages/StorageMemory.cpp +++ b/dbms/src/Storages/StorageMemory.cpp @@ -99,6 +99,7 @@ StoragePtr StorageMemory::create( BlockInputStreams StorageMemory::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, size_t max_block_size, diff --git a/dbms/src/Storages/StorageMerge.cpp b/dbms/src/Storages/StorageMerge.cpp index aaa169f4688..fcb99636408 100644 --- a/dbms/src/Storages/StorageMerge.cpp +++ b/dbms/src/Storages/StorageMerge.cpp @@ -79,10 +79,11 @@ bool StorageMerge::hasColumn(const String & column_name) const BlockInputStreams StorageMerge::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const size_t max_block_size, + const unsigned threads) { BlockInputStreams res; @@ -143,6 +144,7 @@ BlockInputStreams StorageMerge::read( BlockInputStreams source_streams = table->read( real_column_names, modified_query_ast, + context, settings, tmp_processed_stage, max_block_size, diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index 202580c2ac8..bfad370a64d 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -90,12 +90,13 @@ StorageMergeTree::~StorageMergeTree() BlockInputStreams StorageMergeTree::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const size_t max_block_size, + const unsigned threads) { - return reader.read(column_names, query, settings, processed_stage, max_block_size, threads); + return reader.read(column_names, query, context, settings, processed_stage, max_block_size, threads); } BlockOutputStreamPtr StorageMergeTree::write(ASTPtr query) diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index bbf364bf6a4..f557f9ae7c5 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -1990,12 +1990,13 @@ StorageReplicatedMergeTree::~StorageReplicatedMergeTree() BlockInputStreams StorageReplicatedMergeTree::read( - const Names & column_names, - ASTPtr query, - const Settings & settings, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { Names virt_column_names; Names real_column_names; @@ -2024,8 +2025,9 @@ BlockInputStreams StorageReplicatedMergeTree::read( if (unreplicated_reader && values.count(0)) { - res = unreplicated_reader->read( - real_column_names, query, settings, processed_stage, max_block_size, threads, &part_index); + res = unreplicated_reader->read(real_column_names, query, + context, settings, processed_stage, + max_block_size, threads, &part_index); for (auto & virtual_column : virt_column_names) { @@ -2039,7 +2041,7 @@ BlockInputStreams StorageReplicatedMergeTree::read( if (values.count(1)) { - auto res2 = reader.read(real_column_names, query, settings, processed_stage, max_block_size, threads, &part_index); + auto res2 = reader.read(real_column_names, query, context, settings, processed_stage, max_block_size, threads, &part_index); for (auto & virtual_column : virt_column_names) { diff --git a/dbms/src/Storages/StorageSystemDatabases.cpp b/dbms/src/Storages/StorageSystemDatabases.cpp index 0a27756a1a1..2e45ed4b355 100644 --- a/dbms/src/Storages/StorageSystemDatabases.cpp +++ b/dbms/src/Storages/StorageSystemDatabases.cpp @@ -21,8 +21,13 @@ StoragePtr StorageSystemDatabases::create(const std::string & name_, const Conte BlockInputStreams StorageSystemDatabases::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, size_t max_block_size, unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageSystemEvents.cpp b/dbms/src/Storages/StorageSystemEvents.cpp index da6e1180348..4b90e3ae4bc 100644 --- a/dbms/src/Storages/StorageSystemEvents.cpp +++ b/dbms/src/Storages/StorageSystemEvents.cpp @@ -24,8 +24,13 @@ StoragePtr StorageSystemEvents::create(const std::string & name_) BlockInputStreams StorageSystemEvents::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, size_t max_block_size, unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageSystemMerges.cpp b/dbms/src/Storages/StorageSystemMerges.cpp index a7de97573b9..8f8fa227173 100644 --- a/dbms/src/Storages/StorageSystemMerges.cpp +++ b/dbms/src/Storages/StorageSystemMerges.cpp @@ -33,8 +33,13 @@ StoragePtr StorageSystemMerges::create(const std::string & name, const Context & } BlockInputStreams StorageSystemMerges::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, const size_t max_block_size, const unsigned) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageSystemNumbers.cpp b/dbms/src/Storages/StorageSystemNumbers.cpp index 211a57f3d95..ae370669c46 100644 --- a/dbms/src/Storages/StorageSystemNumbers.cpp +++ b/dbms/src/Storages/StorageSystemNumbers.cpp @@ -66,8 +66,13 @@ StoragePtr StorageSystemNumbers::create(const std::string & name_, bool multithr BlockInputStreams StorageSystemNumbers::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, size_t max_block_size, unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageSystemOne.cpp b/dbms/src/Storages/StorageSystemOne.cpp index a6d66fed732..c5f0459d71b 100644 --- a/dbms/src/Storages/StorageSystemOne.cpp +++ b/dbms/src/Storages/StorageSystemOne.cpp @@ -24,8 +24,13 @@ StoragePtr StorageSystemOne::create(const std::string & name_) BlockInputStreams StorageSystemOne::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, size_t max_block_size, unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageSystemParts.cpp b/dbms/src/Storages/StorageSystemParts.cpp index c9d7b54a99e..4024fd7aea8 100644 --- a/dbms/src/Storages/StorageSystemParts.cpp +++ b/dbms/src/Storages/StorageSystemParts.cpp @@ -38,8 +38,13 @@ StoragePtr StorageSystemParts::create(const std::string & name_, const Context & BlockInputStreams StorageSystemParts::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, size_t max_block_size, unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageSystemProcesses.cpp b/dbms/src/Storages/StorageSystemProcesses.cpp index e9a92de1572..63f433afe13 100644 --- a/dbms/src/Storages/StorageSystemProcesses.cpp +++ b/dbms/src/Storages/StorageSystemProcesses.cpp @@ -32,8 +32,13 @@ StoragePtr StorageSystemProcesses::create(const std::string & name_, const Conte BlockInputStreams StorageSystemProcesses::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, size_t max_block_size, unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageSystemReplicas.cpp b/dbms/src/Storages/StorageSystemReplicas.cpp index 452fac9eb65..6ef40a9ebfa 100644 --- a/dbms/src/Storages/StorageSystemReplicas.cpp +++ b/dbms/src/Storages/StorageSystemReplicas.cpp @@ -46,8 +46,13 @@ StoragePtr StorageSystemReplicas::create(const std::string & name_, const Contex BlockInputStreams StorageSystemReplicas::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, size_t max_block_size, unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; @@ -59,7 +64,7 @@ BlockInputStreams StorageSystemReplicas::read( for (const auto & db : context.getDatabases()) for (const auto & table : db.second) - if (typeid_cast(&*table.second)) + if (typeid_cast(table.second.get())) replicated_tables[db.first][table.first] = table.second; } diff --git a/dbms/src/Storages/StorageSystemSettings.cpp b/dbms/src/Storages/StorageSystemSettings.cpp index 0e79044b292..73544d71354 100644 --- a/dbms/src/Storages/StorageSystemSettings.cpp +++ b/dbms/src/Storages/StorageSystemSettings.cpp @@ -26,8 +26,13 @@ StoragePtr StorageSystemSettings::create(const std::string & name_, const Contex BlockInputStreams StorageSystemSettings::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, size_t max_block_size, unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageSystemTables.cpp b/dbms/src/Storages/StorageSystemTables.cpp index e62fa17a256..f35575788ae 100644 --- a/dbms/src/Storages/StorageSystemTables.cpp +++ b/dbms/src/Storages/StorageSystemTables.cpp @@ -24,8 +24,13 @@ StoragePtr StorageSystemTables::create(const std::string & name_, const Context BlockInputStreams StorageSystemTables::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, size_t max_block_size, unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageSystemZooKeeper.cpp b/dbms/src/Storages/StorageSystemZooKeeper.cpp index 2715d490406..ee2d7f9298d 100644 --- a/dbms/src/Storages/StorageSystemZooKeeper.cpp +++ b/dbms/src/Storages/StorageSystemZooKeeper.cpp @@ -103,8 +103,13 @@ static String extractPath(const ASTPtr & query) BlockInputStreams StorageSystemZooKeeper::read( - const Names & column_names, ASTPtr query, const Settings & settings, - QueryProcessingStage::Enum & processed_stage, size_t max_block_size, unsigned threads) + const Names & column_names, + ASTPtr query, + const Context & context, + const Settings & settings, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageTinyLog.cpp b/dbms/src/Storages/StorageTinyLog.cpp index 3d111c7d8e4..b63f5dd904a 100644 --- a/dbms/src/Storages/StorageTinyLog.cpp +++ b/dbms/src/Storages/StorageTinyLog.cpp @@ -406,10 +406,11 @@ void StorageTinyLog::rename(const String & new_path_to_db, const String & new_da BlockInputStreams StorageTinyLog::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const size_t max_block_size, + const unsigned threads) { check(column_names); processed_stage = QueryProcessingStage::FetchColumns; diff --git a/dbms/src/Storages/StorageView.cpp b/dbms/src/Storages/StorageView.cpp index d8a75a3a5bf..50b00df5832 100644 --- a/dbms/src/Storages/StorageView.cpp +++ b/dbms/src/Storages/StorageView.cpp @@ -74,10 +74,11 @@ StorageView::StorageView( BlockInputStreams StorageView::read( const Names & column_names, ASTPtr query, + const Context & context, const Settings & settings, QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned threads) + const size_t max_block_size, + const unsigned threads) { ASTPtr inner_query_clone = getInnerQuery(); ASTSelectQuery & inner_select = static_cast(*inner_query_clone);