From 237de4e775ba4080087496303c350cddbebc9043 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Fri, 14 Jun 2024 12:31:52 +0000 Subject: [PATCH 01/80] add feature --- .../system-tables/detached_tables.md | 36 + src/Databases/DatabaseAtomic.cpp | 20 +- src/Databases/DatabaseAtomic.h | 7 +- src/Databases/DatabaseOnDisk.cpp | 3 + src/Databases/DatabaseOrdinary.cpp | 37 +- src/Databases/DatabaseOrdinary.h | 3 + src/Databases/DatabasesCommon.cpp | 54 +- src/Databases/DatabasesCommon.h | 6 +- src/Databases/IDatabase.h | 63 +- src/Storages/IStorage_fwd.h | 4 +- src/Storages/System/ReadFromSystemTables.cpp | 138 ++++ src/Storages/System/ReadFromSystemTables.h | 47 ++ .../System/StorageSystemDetachedTables.cpp | 56 ++ .../System/StorageSystemDetachedTables.h | 32 + src/Storages/System/StorageSystemTables.cpp | 761 ++---------------- src/Storages/System/TablesBlockSource.cpp | 569 +++++++++++++ src/Storages/System/TablesBlockSource.h | 96 +++ src/Storages/System/attachSystemTables.cpp | 2 + .../test_system_detached_tables/__init__.py | 0 .../test_system_detached_tables/test.py | 38 + .../03172_system_detached_tables.reference | 5 + .../03172_system_detached_tables.sql | 28 + 22 files changed, 1289 insertions(+), 716 deletions(-) create mode 100644 docs/en/operations/system-tables/detached_tables.md create mode 100644 src/Storages/System/ReadFromSystemTables.cpp create mode 100644 src/Storages/System/ReadFromSystemTables.h create mode 100644 src/Storages/System/StorageSystemDetachedTables.cpp create mode 100644 src/Storages/System/StorageSystemDetachedTables.h create mode 100644 src/Storages/System/TablesBlockSource.cpp create mode 100644 src/Storages/System/TablesBlockSource.h create mode 100644 tests/integration/test_system_detached_tables/__init__.py create mode 100644 tests/integration/test_system_detached_tables/test.py create mode 100644 tests/queries/0_stateless/03172_system_detached_tables.reference create mode 100644 tests/queries/0_stateless/03172_system_detached_tables.sql diff --git a/docs/en/operations/system-tables/detached_tables.md b/docs/en/operations/system-tables/detached_tables.md new file mode 100644 index 00000000000..d86e58d2899 --- /dev/null +++ b/docs/en/operations/system-tables/detached_tables.md @@ -0,0 +1,36 @@ +--- +slug: /en/operations/system-tables/detached_tables +--- +# detached_tables + +Contains information of each detached table. + +Columns: + +- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in. + +- `name` ([String](../../sql-reference/data-types/string.md)) — Table name. + +- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Table uuid (Atomic database). + +- `is_permanently` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag indicates that the table was detached PERMANENTLY. + +- `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system. + + +**Example** + +```sql +SELECT * FROM system.detached_tables FORMAT Vertical; +``` + +```text +Row 1: +────── +database: base +name: t1 +uuid: 81b1c20a-b7c6-4116-a2ce-7583fb6b6736 +data_paths: ['/var/lib/clickhouse/store/81b/81b1c20a-b7c6-4116-a2ce-7583fb6b6736/'] +metadata_path: /var/lib/clickhouse/store/461/461cf698-fd0b-406d-8c01-5d8fd5748a91/t1.sql +is_permanently: 1 +``` diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index ccab72cfbae..ddc30627cde 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -37,8 +37,10 @@ namespace ErrorCodes class AtomicDatabaseTablesSnapshotIterator final : public DatabaseTablesSnapshotIterator { public: - explicit AtomicDatabaseTablesSnapshotIterator(DatabaseTablesSnapshotIterator && base) - : DatabaseTablesSnapshotIterator(std::move(base)) {} + explicit AtomicDatabaseTablesSnapshotIterator(DatabaseTablesSnapshotIterator && base) noexcept + : DatabaseTablesSnapshotIterator(std::move(base)) + { + } UUID uuid() const override { return table()->getStorageID().uuid; } }; @@ -101,6 +103,8 @@ void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name, auto table_id = table->getStorageID(); assertDetachedTableNotInUse(table_id.uuid); DatabaseOrdinary::attachTableUnlocked(name, table); + detached_tables.erase(table_id.uuid); + table_name_to_path.emplace(std::make_pair(name, relative_table_path)); } @@ -108,11 +112,11 @@ StoragePtr DatabaseAtomic::detachTable(ContextPtr /* context */, const String & { DetachedTables not_in_use; std::lock_guard lock(mutex); - auto table = DatabaseOrdinary::detachTableUnlocked(name); + auto detached_table = DatabaseOrdinary::detachTableUnlocked(name); table_name_to_path.erase(name); - detached_tables.emplace(table->getStorageID().uuid, table); + detached_tables.emplace(detached_table->getStorageID().uuid, detached_table); not_in_use = cleanupDetachedTables(); - return table; + return detached_table; } void DatabaseAtomic::dropTable(ContextPtr local_context, const String & table_name, bool sync) @@ -433,6 +437,12 @@ DatabaseAtomic::getTablesIterator(ContextPtr local_context, const IDatabase::Fil return std::make_unique(std::move(typeid_cast(*base_iter))); } +DatabaseDetachedTablesSnapshotIteratorPtr DatabaseAtomic::getDetachedTablesIterator( + ContextPtr local_context, const IDatabase::FilterByNameFunction & filter_by_table_name, const bool skip_not_loaded) const +{ + return DatabaseOrdinary::getDetachedTablesIterator(local_context, filter_by_table_name, skip_not_loaded); +} + UUID DatabaseAtomic::tryGetTableUUID(const String & table_name) const { if (auto table = tryGetTable(table_name, getContext())) diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index b59edd479ba..47afc4dc5ef 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -1,7 +1,8 @@ #pragma once -#include #include +#include +#include namespace DB @@ -48,6 +49,9 @@ public: DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name, bool skip_not_loaded) const override; + DatabaseDetachedTablesSnapshotIteratorPtr + getDetachedTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name, bool skip_not_loaded) const override; + void beforeLoadingMetadata(ContextMutablePtr context, LoadingStrictnessLevel mode) override; LoadTaskPtr startupDatabaseAsync(AsyncLoader & async_loader, LoadJobSet startup_after, LoadingStrictnessLevel mode) override; @@ -81,6 +85,7 @@ protected: //TODO store path in DatabaseWithOwnTables::tables using NameToPathMap = std::unordered_map; + NameToPathMap table_name_to_path TSA_GUARDED_BY(mutex); DetachedTables detached_tables TSA_GUARDED_BY(mutex); diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index b8154372116..25e5347e017 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -307,6 +307,9 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri try { FS::createFile(detached_permanently_flag); + + std::lock_guard lock(mutex); + snapshot_detached_tables.at(table_name).is_permanently = true; } catch (Exception & e) { diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 10a8e06e8f0..046cbfa40be 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -187,7 +187,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables size_t prev_tables_count = metadata.parsed_tables.size(); size_t prev_total_dictionaries = metadata.total_dictionaries; - auto process_metadata = [&metadata, is_startup, this](const String & file_name) + auto process_metadata = [&metadata, is_startup, local_context, this](const String & file_name) mutable { fs::path path(getMetadataPath()); fs::path file_path(file_name); @@ -195,7 +195,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables try { - auto ast = parseQueryFromMetadata(log, getContext(), full_path.string(), /*throw_on_error*/ true, /*remove_empty*/ false); + auto ast = parseQueryFromMetadata(log, local_context, full_path.string(), /*throw_on_error*/ true, /*remove_empty*/ false); if (ast) { FunctionNameNormalizer::visit(ast.get()); @@ -226,6 +226,33 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables const std::string table_name = unescapeForFileName(file_name.substr(0, file_name.size() - 4)); permanently_detached_tables.push_back(table_name); LOG_DEBUG(log, "Skipping permanently detached table {}.", backQuote(table_name)); + + // @TODO refactoring + auto parsed_table_metadata = ParsedTableMetadata{full_path.string(), ast}; + const auto & query = parsed_table_metadata.ast->as(); + + std::lock_guard lock(mutex); + + auto [detached_table_name, table] = createTableFromAST( + query, + database_name, + getTableDataPath(query), + std::const_pointer_cast(local_context), + LoadingStrictnessLevel::CREATE); + + const auto storage_id = table->getStorageID(); + + SnapshotDetachedTable snapshot_detached_table; + snapshot_detached_table.detabase = storage_id.getDatabaseName(); + snapshot_detached_table.table = detached_table_name; + snapshot_detached_table.uuid = storage_id.uuid; + snapshot_detached_table.is_permanently = true; + snapshot_detached_table.metadata_path = getObjectMetadataPath(snapshot_detached_table.table); + + + snapshot_detached_tables.emplace(detached_table_name, std::move(snapshot_detached_table)); + + LOG_TRACE(log, "Add detached table {} to system.detached_tables", detached_table_name); return; } @@ -487,6 +514,12 @@ DatabaseTablesIteratorPtr DatabaseOrdinary::getTablesIterator(ContextPtr local_c return DatabaseWithOwnTablesBase::getTablesIterator(local_context, filter_by_table_name, skip_not_loaded); } +DatabaseDetachedTablesSnapshotIteratorPtr DatabaseOrdinary::getDetachedTablesIterator( + ContextPtr local_context, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name, bool skip_not_loaded) const +{ + return DatabaseWithOwnTablesBase::getDetachedTablesIterator(local_context, filter_by_table_name, skip_not_loaded); +} + Strings DatabaseOrdinary::getAllTableNames(ContextPtr) const { std::set unique_names; diff --git a/src/Databases/DatabaseOrdinary.h b/src/Databases/DatabaseOrdinary.h index ef00ac8fdfa..d2891147922 100644 --- a/src/Databases/DatabaseOrdinary.h +++ b/src/Databases/DatabaseOrdinary.h @@ -57,6 +57,9 @@ public: LoadTaskPtr startupDatabaseAsync(AsyncLoader & async_loader, LoadJobSet startup_after, LoadingStrictnessLevel mode) override; DatabaseTablesIteratorPtr getTablesIterator(ContextPtr local_context, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name, bool skip_not_loaded) const override; + DatabaseDetachedTablesSnapshotIteratorPtr getDetachedTablesIterator( + ContextPtr local_context, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name, bool skip_not_loaded) const override; + Strings getAllTableNames(ContextPtr context) const override; void alterTable( diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index fd38a31da5c..312c0a4477f 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -2,12 +2,9 @@ #include #include -#include -#include -#include -#include #include #include +#include #include #include #include @@ -16,6 +13,10 @@ #include #include #include +#include +#include +#include +#include namespace DB @@ -237,6 +238,35 @@ DatabaseTablesIteratorPtr DatabaseWithOwnTablesBase::getTablesIterator(ContextPt return std::make_unique(std::move(filtered_tables), database_name); } +DatabaseDetachedTablesSnapshotIteratorPtr DatabaseWithOwnTablesBase::getDetachedTablesIterator( + ContextPtr, const FilterByNameFunction & filter_by_table_name, bool /* skip_not_loaded */) const +{ + std::lock_guard lock(mutex); + if (!filter_by_table_name) + return std::make_unique(snapshot_detached_tables); + + SnapshotDetachedTables filtered_tables; + for (const auto & [table_name, storage] : tables) + if (filter_by_table_name(table_name)) + { + SnapshotDetachedTable snapshot_detached_table; + snapshot_detached_table.detabase = storage->getStorageID().getDatabaseName(); + snapshot_detached_table.table = table_name; + if (storage->getStorageID().hasUUID()) + { + snapshot_detached_table.uuid = storage->getStorageID().uuid; + } + + snapshot_detached_table.is_permanently = false; + snapshot_detached_table.metadata_path = getObjectMetadataPath(snapshot_detached_table.table); + + filtered_tables.emplace(table_name, std::move(snapshot_detached_table)); + } + + + return std::make_unique(std::move(filtered_tables)); +} + bool DatabaseWithOwnTablesBase::empty() const { std::lock_guard lock(mutex); @@ -258,6 +288,19 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuote(database_name), backQuote(table_name)); res = it->second; + + SnapshotDetachedTable snapshot_detached_table; + snapshot_detached_table.detabase = it->second->getStorageID().getDatabaseName(); + snapshot_detached_table.table = it->first; + if (it->second->getStorageID().hasUUID()) + { + snapshot_detached_table.uuid = it->second->getStorageID().uuid; + } + snapshot_detached_table.is_permanently = false; + snapshot_detached_table.metadata_path = getObjectMetadataPath(snapshot_detached_table.table); + + snapshot_detached_tables.emplace(it->first, std::move(snapshot_detached_table)); + tables.erase(it); res->is_detached = true; CurrentMetrics::sub(getAttachedCounterForStorage(res), 1); @@ -298,6 +341,8 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {} already exists.", table_id.getFullTableName()); } + snapshot_detached_tables.erase(table_name); + /// It is important to reset is_detached here since in case of RENAME in /// non-Atomic database the is_detached is set to true before RENAME. table->is_detached = false; @@ -333,6 +378,7 @@ void DatabaseWithOwnTablesBase::shutdown() std::lock_guard lock(mutex); tables.clear(); + snapshot_detached_tables.clear(); } DatabaseWithOwnTablesBase::~DatabaseWithOwnTablesBase() diff --git a/src/Databases/DatabasesCommon.h b/src/Databases/DatabasesCommon.h index 2eecf8a564f..1ca49e90c23 100644 --- a/src/Databases/DatabasesCommon.h +++ b/src/Databases/DatabasesCommon.h @@ -37,6 +37,9 @@ public: DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name, bool skip_not_loaded) const override; + DatabaseDetachedTablesSnapshotIteratorPtr + getDetachedTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name, bool skip_not_loaded) const override; + std::vector> getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const override; void createTableRestoredFromBackup(const ASTPtr & create_table_query, ContextMutablePtr local_context, std::shared_ptr restore_coordination, UInt64 timeout_ms) override; @@ -46,12 +49,13 @@ public: protected: Tables tables TSA_GUARDED_BY(mutex); + SnapshotDetachedTables snapshot_detached_tables TSA_GUARDED_BY(mutex); LoggerPtr log; DatabaseWithOwnTablesBase(const String & name_, const String & logger, ContextPtr context); void attachTableUnlocked(const String & table_name, const StoragePtr & table) TSA_REQUIRES(mutex); - StoragePtr detachTableUnlocked(const String & table_name) TSA_REQUIRES(mutex); + StoragePtr detachTableUnlocked(const String & table_name) TSA_REQUIRES(mutex); StoragePtr getTableUnlocked(const String & table_name) const TSA_REQUIRES(mutex); StoragePtr tryGetTableNoWait(const String & table_name) const; }; diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index b00f2fe4baf..7c6b3b3004c 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -5,20 +5,22 @@ #include #include #include +#include +#include #include #include -#include #include +#include #include #include -#include #include #include +#include #include #include +#include #include -#include namespace DB @@ -110,6 +112,55 @@ public: using DatabaseTablesIteratorPtr = std::unique_ptr; +struct SnapshotDetachedTable final +{ + String detabase; + String table; + UUID uuid = UUIDHelpers::Nil; + String metadata_path; + bool is_permanently{false}; +}; + +class DatabaseDetachedTablesSnapshotIterator +{ +private: + SnapshotDetachedTables snapshot; + SnapshotDetachedTables::iterator it; + +protected: + DatabaseDetachedTablesSnapshotIterator(DatabaseDetachedTablesSnapshotIterator && other) noexcept + { + size_t idx = std::distance(other.snapshot.begin(), other.it); + std::swap(snapshot, other.snapshot); + other.it = other.snapshot.end(); + it = snapshot.begin(); + std::advance(it, idx); + } + +public: + explicit DatabaseDetachedTablesSnapshotIterator(const SnapshotDetachedTables & tables_) : snapshot(tables_), it(snapshot.begin()) { } + + explicit DatabaseDetachedTablesSnapshotIterator(SnapshotDetachedTables && tables_) : snapshot(std::move(tables_)), it(snapshot.begin()) + { + } + + void next() { ++it; } + + bool isValid() const { return it != snapshot.end(); } + + String database() const { return it->second.detabase; } + + String table() const { return it->second.table; } + + UUID uuid() const { return it->second.uuid; } + + String metadataPath() const { return it->second.metadata_path; } + + bool isPermanently() const { return it->second.is_permanently; } +}; + +using DatabaseDetachedTablesSnapshotIteratorPtr = std::unique_ptr; + /** Database engine. * It is responsible for: @@ -232,6 +283,12 @@ public: /// Wait for all tables to be loaded and started up. If `skip_not_loaded` is true, then not yet loaded or not yet started up (at the moment of iterator creation) tables are excluded. virtual DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name = {}, bool skip_not_loaded = false) const = 0; /// NOLINT + virtual DatabaseDetachedTablesSnapshotIteratorPtr getDetachedTablesIterator( + ContextPtr /*context*/, const FilterByNameFunction & /*filter_by_table_name = {}*/, bool /*skip_not_loaded = false*/) const + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "There is no get detached tables for Database{}", getEngineName()); + } + /// Returns list of table names. virtual Strings getAllTableNames(ContextPtr context) const { diff --git a/src/Storages/IStorage_fwd.h b/src/Storages/IStorage_fwd.h index b9243b029b0..4cbc586a745 100644 --- a/src/Storages/IStorage_fwd.h +++ b/src/Storages/IStorage_fwd.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -9,9 +10,10 @@ namespace DB { class IStorage; +struct SnapshotDetachedTable; using ConstStoragePtr = std::shared_ptr; using StoragePtr = std::shared_ptr; using Tables = std::map; - +using SnapshotDetachedTables = std::map; } diff --git a/src/Storages/System/ReadFromSystemTables.cpp b/src/Storages/System/ReadFromSystemTables.cpp new file mode 100644 index 00000000000..6e9c6a869d1 --- /dev/null +++ b/src/Storages/System/ReadFromSystemTables.cpp @@ -0,0 +1,138 @@ +#include "ReadFromSystemTables.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace +{ + +ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) +{ + MutableColumnPtr column = ColumnString::create(); + + const auto databases = DatabaseCatalog::instance().getDatabases(); + for (const auto & database_name : databases | boost::adaptors::map_keys) + { + if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) + continue; /// We don't want to show the internal database for temporary tables in system.tables + + column->insert(database_name); + } + + Block block{ColumnWithTypeAndName(std::move(column), std::make_shared(), "database")}; + VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); + return block.getByPosition(0).column; +} + +ColumnPtr getFilteredTables( + const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context, const bool need_detached_tables) +{ + Block sample{ + ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), + ColumnWithTypeAndName(nullptr, std::make_shared(), "engine")}; + + MutableColumnPtr database_column = ColumnString::create(); + MutableColumnPtr engine_column; + + auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); + if (dag) + { + bool filter_by_engine = false; + for (const auto * input : dag->getInputs()) + if (input->result_name == "engine") + filter_by_engine = true; + + if (filter_by_engine) + engine_column = ColumnString::create(); + } + + for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) + { + const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); + DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); + if (!database) + continue; + + if (need_detached_tables) + { + auto table_it = database->getDetachedTablesIterator(context, {}, false); + for (; table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->table()); + } + } + else + { + auto table_it = database->getTablesIterator(context); + for (; table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->name()); + if (engine_column) + engine_column->insert(table_it->table()->getName()); + } + } + } + + Block block{ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; + if (engine_column) + block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); + + if (dag) + VirtualColumnUtils::filterBlockWithDAG(dag, block, context); + + return block.getByPosition(0).column; +} + +} + +ReadFromSystemTables::ReadFromSystemTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_, + const bool need_detached_tables_) + : SourceStepWithFilter(DataStream{.header = std::move(sample_block)}, column_names_, query_info_, storage_snapshot_, context_) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + , need_detached_tables(need_detached_tables_) +{ +} + +void ReadFromSystemTables::applyFilters(ActionDAGNodes added_filter_nodes) +{ + SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); + + const ActionsDAG::Node * predicate = nullptr; + if (filter_actions_dag) + predicate = filter_actions_dag->getOutputs().at(0); + + filtered_databases_column = getFilteredDatabases(predicate, context); + filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context, need_detached_tables); +} + +void ReadFromSystemTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) +{ + if (need_detached_tables) + { + pipeline.init(createPipe()); + } + else + { + pipeline.init(createPipe()); + } +} +} diff --git a/src/Storages/System/ReadFromSystemTables.h b/src/Storages/System/ReadFromSystemTables.h new file mode 100644 index 00000000000..f8280ffcc8d --- /dev/null +++ b/src/Storages/System/ReadFromSystemTables.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class ReadFromSystemTables : public SourceStepWithFilter +{ +public: + std::string getName() const override { return "ReadFromSystemTables"; } + void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; + + ReadFromSystemTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_, + bool need_detached_tables); + + void applyFilters(ActionDAGNodes added_filter_nodes) override; + +private: + std::vector columns_mask; + size_t max_block_size; + const bool need_detached_tables; + + ColumnPtr filtered_databases_column; + ColumnPtr filtered_tables_column; + + template + Pipe createPipe() + { + return Pipe(std::make_shared( + std::move(columns_mask), + getOutputStream().header, + max_block_size, + std::move(filtered_databases_column), + std::move(filtered_tables_column), + context)); + } +}; +} diff --git a/src/Storages/System/StorageSystemDetachedTables.cpp b/src/Storages/System/StorageSystemDetachedTables.cpp new file mode 100644 index 00000000000..1596ac6a046 --- /dev/null +++ b/src/Storages/System/StorageSystemDetachedTables.cpp @@ -0,0 +1,56 @@ +#include "StorageSystemDetachedTables.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +StorageSystemDetachedTables::StorageSystemDetachedTables(const StorageID & table_id_) : IStorage(table_id_) +{ + StorageInMemoryMetadata storage_metadata; + + auto description = ColumnsDescription{ + ColumnDescription{"database", std::make_shared(), "The name of the database the table is in."}, + ColumnDescription{"name", std::make_shared(), "Table name."}, + ColumnDescription{"uuid", std::make_shared(), "Table uuid (Atomic database)."}, + ColumnDescription{"metadata_path", std::make_shared(), "Path to the table metadata in the file system."}, + ColumnDescription{"is_permanently", std::make_shared(), "Table was detached permanently."}, + }; + + storage_metadata.setColumns(std::move(description)); + + setInMemoryMetadata(storage_metadata); +} + +void StorageSystemDetachedTables::read( + QueryPlan & query_plan, + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum /*processed_stage*/, + const size_t max_block_size, + size_t /*num_streams*/) +{ + storage_snapshot->check(column_names); + auto sample_block = storage_snapshot->metadata->getSampleBlock(); + + auto [columns_mask, res_block] = getQueriedColumnsMaskAndHeader(sample_block, column_names); + + auto reading = std::make_unique( + column_names, query_info, storage_snapshot, context, std::move(res_block), std::move(columns_mask), max_block_size, true); + + query_plan.addStep(std::move(reading)); +} +} diff --git a/src/Storages/System/StorageSystemDetachedTables.h b/src/Storages/System/StorageSystemDetachedTables.h new file mode 100644 index 00000000000..cd042f51eaa --- /dev/null +++ b/src/Storages/System/StorageSystemDetachedTables.h @@ -0,0 +1,32 @@ +#pragma once + +#include + + +namespace DB +{ + +class Context; + +/** Implements the system table `detached_tables`, which allows you to get information about detached tables. + */ +class StorageSystemDetachedTables final : public IStorage +{ +public: + explicit StorageSystemDetachedTables(const StorageID & table_id_); + + std::string getName() const override { return "SystemDetachedTables"; } + + void read( + QueryPlan & query_plan, + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & /*query_info*/, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + size_t num_streams) override; + + bool isSystemStorage() const override { return true; } +}; +} diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 783b899c978..5a8d6315d57 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -1,29 +1,31 @@ +#include "StorageSystemTables.h" + +#include #include -#include +#include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include -#include #include #include #include #include #include -#include +#include +#include +#include +#include +#include +#include +#include #include @@ -32,8 +34,7 @@ namespace DB { -StorageSystemTables::StorageSystemTables(const StorageID & table_id_) - : IStorage(table_id_) +StorageSystemTables::StorageSystemTables(const StorageID & table_id_) : IStorage(table_id_) { StorageInMemoryMetadata storage_metadata; @@ -46,9 +47,13 @@ StorageSystemTables::StorageSystemTables(const StorageID & table_id_) {"data_paths", std::make_shared(std::make_shared()), "Paths to the table data in the file systems."}, {"metadata_path", std::make_shared(), "Path to the table metadata in the file system."}, {"metadata_modification_time", std::make_shared(), "Time of latest modification of the table metadata."}, - {"metadata_version", std::make_shared(), "Metadata version for ReplicatedMergeTree table, 0 for non ReplicatedMergeTree table."}, + {"metadata_version", + std::make_shared(), + "Metadata version for ReplicatedMergeTree table, 0 for non ReplicatedMergeTree table."}, {"dependencies_database", std::make_shared(std::make_shared()), "Database dependencies."}, - {"dependencies_table", std::make_shared(std::make_shared()), "Table dependencies (materialized views the current table)."}, + {"dependencies_table", + std::make_shared(std::make_shared()), + "Table dependencies (materialized views the current table)."}, {"create_table_query", std::make_shared(), "The query that was used to create the table."}, {"engine_full", std::make_shared(), "Parameters of the table engine."}, {"as_select", std::make_shared(), "SELECT query for view."}, @@ -57,676 +62,54 @@ StorageSystemTables::StorageSystemTables(const StorageID & table_id_) {"primary_key", std::make_shared(), "The primary key expression specified in the table."}, {"sampling_key", std::make_shared(), "The sampling key expression specified in the table."}, {"storage_policy", std::make_shared(), "The storage policy."}, - {"total_rows", std::make_shared(std::make_shared()), - "Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise NULL (including underlying Buffer table)." - }, - {"total_bytes", std::make_shared(std::make_shared()), - "Total number of bytes, if it is possible to quickly determine exact number " - "of bytes for the table on storage, otherwise NULL (does not includes any underlying storage). " - "If the table stores data on disk, returns used space on disk (i.e. compressed). " - "If the table stores data in memory, returns approximated number of used bytes in memory." - }, - {"total_bytes_uncompressed", std::make_shared(std::make_shared()), - "Total number of uncompressed bytes, if it's possible to quickly determine the exact number " - "of bytes from the part checksums for the table on storage, otherwise NULL (does not take underlying storage (if any) into account)." - }, + {"total_rows", + std::make_shared(std::make_shared()), + "Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise NULL (including " + "underlying Buffer table)."}, + {"total_bytes", + std::make_shared(std::make_shared()), + "Total number of bytes, if it is possible to quickly determine exact number " + "of bytes for the table on storage, otherwise NULL (does not includes any underlying storage). " + "If the table stores data on disk, returns used space on disk (i.e. compressed). " + "If the table stores data in memory, returns approximated number of used bytes in memory."}, + {"total_bytes_uncompressed", + std::make_shared(std::make_shared()), + "Total number of uncompressed bytes, if it's possible to quickly determine the exact number " + "of bytes from the part checksums for the table on storage, otherwise NULL (does not take underlying storage (if any) into " + "account)."}, {"parts", std::make_shared(std::make_shared()), "The total number of parts in this table."}, - {"active_parts", std::make_shared(std::make_shared()), "The number of active parts in this table."}, - {"total_marks", std::make_shared(std::make_shared()), "The total number of marks in all parts in this table."}, - {"lifetime_rows", std::make_shared(std::make_shared()), - "Total number of rows INSERTed since server start (only for Buffer tables)." - }, - {"lifetime_bytes", std::make_shared(std::make_shared()), - "Total number of bytes INSERTed since server start (only for Buffer tables)." - }, + {"active_parts", + std::make_shared(std::make_shared()), + "The number of active parts in this table."}, + {"total_marks", + std::make_shared(std::make_shared()), + "The total number of marks in all parts in this table."}, + {"lifetime_rows", + std::make_shared(std::make_shared()), + "Total number of rows INSERTed since server start (only for Buffer tables)."}, + {"lifetime_bytes", + std::make_shared(std::make_shared()), + "Total number of bytes INSERTed since server start (only for Buffer tables)."}, {"comment", std::make_shared(), "The comment for the table."}, - {"has_own_data", std::make_shared(), - "Flag that indicates whether the table itself stores some data on disk or only accesses some other source." - }, - {"loading_dependencies_database", std::make_shared(std::make_shared()), - "Database loading dependencies (list of objects which should be loaded before the current object)." - }, - {"loading_dependencies_table", std::make_shared(std::make_shared()), - "Table loading dependencies (list of objects which should be loaded before the current object)." - }, - {"loading_dependent_database", std::make_shared(std::make_shared()), - "Dependent loading database." - }, - {"loading_dependent_table", std::make_shared(std::make_shared()), - "Dependent loading table." - }, + {"has_own_data", + std::make_shared(), + "Flag that indicates whether the table itself stores some data on disk or only accesses some other source."}, + {"loading_dependencies_database", + std::make_shared(std::make_shared()), + "Database loading dependencies (list of objects which should be loaded before the current object)."}, + {"loading_dependencies_table", + std::make_shared(std::make_shared()), + "Table loading dependencies (list of objects which should be loaded before the current object)."}, + {"loading_dependent_database", std::make_shared(std::make_shared()), "Dependent loading database."}, + {"loading_dependent_table", std::make_shared(std::make_shared()), "Dependent loading table."}, }; - description.setAliases({ - {"table", std::make_shared(), "name"} - }); + description.setAliases({{"table", std::make_shared(), "name"}}); storage_metadata.setColumns(std::move(description)); setInMemoryMetadata(storage_metadata); } - -namespace -{ - -ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) -{ - MutableColumnPtr column = ColumnString::create(); - - const auto databases = DatabaseCatalog::instance().getDatabases(); - for (const auto & database_name : databases | boost::adaptors::map_keys) - { - if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) - continue; /// We don't want to show the internal database for temporary tables in system.tables - - column->insert(database_name); - } - - Block block { ColumnWithTypeAndName(std::move(column), std::make_shared(), "database") }; - VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); - return block.getByPosition(0).column; -} - -ColumnPtr getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context) -{ - Block sample { - ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), - ColumnWithTypeAndName(nullptr, std::make_shared(), "engine") - }; - - MutableColumnPtr database_column = ColumnString::create(); - MutableColumnPtr engine_column; - - auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); - if (dag) - { - bool filter_by_engine = false; - for (const auto * input : dag->getInputs()) - if (input->result_name == "engine") - filter_by_engine = true; - - if (filter_by_engine) - engine_column = ColumnString::create(); - } - - for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) - { - const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); - DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); - if (!database) - continue; - - for (auto table_it = database->getTablesIterator(context); table_it->isValid(); table_it->next()) - { - database_column->insert(table_it->name()); - if (engine_column) - engine_column->insert(table_it->table()->getName()); - } - } - - Block block {ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; - if (engine_column) - block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); - - if (dag) - VirtualColumnUtils::filterBlockWithDAG(dag, block, context); - - return block.getByPosition(0).column; -} - -/// Avoid heavy operation on tables if we only queried columns that we can get without table object. -/// Otherwise it will require table initialization for Lazy database. -bool needTable(const DatabasePtr & database, const Block & header) -{ - if (database->getEngineName() != "Lazy") - return true; - - static const std::set columns_without_table = { "database", "name", "uuid", "metadata_modification_time" }; - for (const auto & column : header.getColumnsWithTypeAndName()) - { - if (columns_without_table.find(column.name) == columns_without_table.end()) - return true; - } - return false; -} - - -class TablesBlockSource : public ISource -{ -public: - TablesBlockSource( - std::vector columns_mask_, - Block header, - UInt64 max_block_size_, - ColumnPtr databases_, - ColumnPtr tables_, - ContextPtr context_) - : ISource(std::move(header)) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) - , databases(std::move(databases_)) - , context(Context::createCopy(context_)) - { - size_t size = tables_->size(); - tables.reserve(size); - for (size_t idx = 0; idx < size; ++idx) - tables.insert(tables_->getDataAt(idx).toString()); - } - - String getName() const override { return "Tables"; } - -protected: - Chunk generate() override - { - if (done) - return {}; - - MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns(); - - const auto access = context->getAccess(); - const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); - - size_t rows_count = 0; - while (rows_count < max_block_size) - { - if (tables_it && !tables_it->isValid()) - ++database_idx; - - while (database_idx < databases->size() && (!tables_it || !tables_it->isValid())) - { - database_name = databases->getDataAt(database_idx).toString(); - database = DatabaseCatalog::instance().tryGetDatabase(database_name); - - if (!database) - { - /// Database was deleted just now or the user has no access. - ++database_idx; - continue; - } - - break; - } - - /// This is for temporary tables. They are output in single block regardless to max_block_size. - if (database_idx >= databases->size()) - { - if (context->hasSessionContext()) - { - Tables external_tables = context->getSessionContext()->getExternalTables(); - - for (auto & table : external_tables) - { - size_t src_index = 0; - size_t res_index = 0; - - // database - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // name - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.first); - - // uuid - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getStorageID().uuid); - - // engine - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getName()); - - // is_temporary - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(1u); - - // data_paths - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_path - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_modification_time - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_version - // Temporary tables does not support replication - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // dependencies_database - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // dependencies_table - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // create_table_query - if (columns_mask[src_index++]) - { - auto temp_db = DatabaseCatalog::instance().getDatabaseForTemporaryTables(); - ASTPtr ast = temp_db ? temp_db->tryGetCreateTableQuery(table.second->getStorageID().getTableName(), context) : nullptr; - res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); - } - - // engine_full - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getName()); - - const auto & settings = context->getSettingsRef(); - while (src_index < columns_mask.size()) - { - // total_rows - if (src_index == 19 && columns_mask[src_index]) - { - if (auto total_rows = table.second->totalRows(settings)) - res_columns[res_index++]->insert(*total_rows); - else - res_columns[res_index++]->insertDefault(); - } - // total_bytes - else if (src_index == 20 && columns_mask[src_index]) - { - if (auto total_bytes = table.second->totalBytes(settings)) - res_columns[res_index++]->insert(*total_bytes); - else - res_columns[res_index++]->insertDefault(); - } - /// Fill the rest columns with defaults - else if (columns_mask[src_index]) - res_columns[res_index++]->insertDefault(); - src_index++; - } - } - } - - UInt64 num_rows = res_columns.at(0)->size(); - done = true; - return Chunk(std::move(res_columns), num_rows); - } - - const bool need_to_check_access_for_tables = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); - - if (!tables_it || !tables_it->isValid()) - tables_it = database->getTablesIterator(context); - - const bool need_table = needTable(database, getPort().getHeader()); - - for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next()) - { - auto table_name = tables_it->name(); - if (!tables.contains(table_name)) - continue; - - if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name)) - continue; - - StoragePtr table = nullptr; - TableLockHolder lock; - if (need_table) - { - table = tables_it->table(); - if (!table) - // Table might have just been removed or detached for Lazy engine (see DatabaseLazy::tryGetTable()) - continue; - - /// The only column that requires us to hold a shared lock is data_paths as rename might alter them (on ordinary tables) - /// and it's not protected internally by other mutexes - static const size_t DATA_PATHS_INDEX = 5; - if (columns_mask[DATA_PATHS_INDEX]) - { - lock = table->tryLockForShare(context->getCurrentQueryId(), - context->getSettingsRef().lock_acquire_timeout); - if (!lock) - // Table was dropped while acquiring the lock, skipping table - continue; - } - } - ++rows_count; - - size_t src_index = 0; - size_t res_index = 0; - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(database_name); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table_name); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(tables_it->uuid()); - - if (columns_mask[src_index++]) - { - chassert(table != nullptr); - res_columns[res_index++]->insert(table->getName()); - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(0u); // is_temporary - - if (columns_mask[src_index++]) - { - chassert(lock != nullptr); - Array table_paths_array; - auto paths = table->getDataPaths(); - table_paths_array.reserve(paths.size()); - for (const String & path : paths) - table_paths_array.push_back(path); - res_columns[res_index++]->insert(table_paths_array); - /// We don't need the lock anymore - lock = nullptr; - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(database->getObjectMetadataPath(table_name)); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(static_cast(database->getObjectMetadataModificationTime(table_name))); - - StorageMetadataPtr metadata_snapshot; - if (table) - metadata_snapshot = table->getInMemoryMetadataPtr(); - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && table->supportsReplication()) - res_columns[res_index++]->insert(metadata_snapshot->metadata_version); - else - res_columns[res_index++]->insertDefault(); - } - - { - Array views_table_name_array; - Array views_database_name_array; - if (columns_mask[src_index] || columns_mask[src_index + 1]) - { - const auto view_ids = DatabaseCatalog::instance().getDependentViews(StorageID(database_name, table_name)); - - views_table_name_array.reserve(view_ids.size()); - views_database_name_array.reserve(view_ids.size()); - for (const auto & view_id : view_ids) - { - views_table_name_array.push_back(view_id.table_name); - views_database_name_array.push_back(view_id.database_name); - } - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(views_database_name_array); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(views_table_name_array); - } - - if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2]) - { - ASTPtr ast = database->tryGetCreateTableQuery(table_name, context); - auto * ast_create = ast ? ast->as() : nullptr; - - if (ast_create && !context->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) - { - ast_create->uuid = UUIDHelpers::Nil; - ast_create->to_inner_uuid = UUIDHelpers::Nil; - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); - - if (columns_mask[src_index++]) - { - String engine_full; - - if (ast_create && ast_create->storage) - { - engine_full = format({context, *ast_create->storage}); - - static const char * const extra_head = " ENGINE = "; - if (startsWith(engine_full, extra_head)) - engine_full = engine_full.substr(strlen(extra_head)); - } - - res_columns[res_index++]->insert(engine_full); - } - - if (columns_mask[src_index++]) - { - String as_select; - if (ast_create && ast_create->select) - as_select = format({context, *ast_create->select}); - res_columns[res_index++]->insert(as_select); - } - } - else - src_index += 3; - - ASTPtr expression_ptr; - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPartitionKeyAST())) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSortingKey().expression_list_ast)) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPrimaryKey().expression_list_ast)) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSamplingKeyAST())) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto policy = table ? table->getStoragePolicy() : nullptr; - if (policy) - res_columns[res_index++]->insert(policy->getName()); - else - res_columns[res_index++]->insertDefault(); - } - - auto settings = context->getSettingsRef(); - settings.select_sequential_consistency = 0; - if (columns_mask[src_index++]) - { - auto total_rows = table ? table->totalRows(settings) : std::nullopt; - if (total_rows) - res_columns[res_index++]->insert(*total_rows); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto total_bytes = table->totalBytes(settings); - if (total_bytes) - res_columns[res_index++]->insert(*total_bytes); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto total_bytes_uncompressed = table->totalBytesUncompressed(settings); - if (total_bytes_uncompressed) - res_columns[res_index++]->insert(*total_bytes_uncompressed); - else - res_columns[res_index++]->insertDefault(); - } - - auto table_merge_tree = std::dynamic_pointer_cast(table); - if (columns_mask[src_index++]) - { - if (table_merge_tree) - res_columns[res_index++]->insert(table_merge_tree->getAllPartsCount()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table_merge_tree) - res_columns[res_index++]->insert(table_merge_tree->getActivePartsCount()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table_merge_tree) - { - res_columns[res_index++]->insert(table_merge_tree->getTotalMarksCount()); - } - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto lifetime_rows = table ? table->lifetimeRows() : std::nullopt; - if (lifetime_rows) - res_columns[res_index++]->insert(*lifetime_rows); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto lifetime_bytes = table ? table->lifetimeBytes() : std::nullopt; - if (lifetime_bytes) - res_columns[res_index++]->insert(*lifetime_bytes); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot) - res_columns[res_index++]->insert(metadata_snapshot->comment); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table) - res_columns[res_index++]->insert(table->storesDataOnDisk()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2] || columns_mask[src_index + 3]) - { - auto dependencies = DatabaseCatalog::instance().getLoadingDependencies(StorageID{database_name, table_name}); - auto dependents = DatabaseCatalog::instance().getLoadingDependents(StorageID{database_name, table_name}); - - Array dependencies_databases; - Array dependencies_tables; - dependencies_databases.reserve(dependencies.size()); - dependencies_tables.reserve(dependencies.size()); - for (const auto & dependency : dependencies) - { - dependencies_databases.push_back(dependency.database_name); - dependencies_tables.push_back(dependency.table_name); - } - - Array dependents_databases; - Array dependents_tables; - dependents_databases.reserve(dependents.size()); - dependents_tables.reserve(dependents.size()); - for (const auto & dependent : dependents) - { - dependents_databases.push_back(dependent.database_name); - dependents_tables.push_back(dependent.table_name); - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependencies_databases); - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependencies_tables); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependents_databases); - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependents_tables); - - } - } - } - - UInt64 num_rows = res_columns.at(0)->size(); - return Chunk(std::move(res_columns), num_rows); - } -private: - std::vector columns_mask; - UInt64 max_block_size; - ColumnPtr databases; - NameSet tables; - size_t database_idx = 0; - DatabaseTablesIteratorPtr tables_it; - ContextPtr context; - bool done = false; - DatabasePtr database; - std::string database_name; -}; - -} - -class ReadFromSystemTables : public SourceStepWithFilter -{ -public: - std::string getName() const override { return "ReadFromSystemTables"; } - void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; - - ReadFromSystemTables( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - Block sample_block, - std::vector columns_mask_, - size_t max_block_size_) - : SourceStepWithFilter( - DataStream{.header = std::move(sample_block)}, - column_names_, - query_info_, - storage_snapshot_, - context_) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) - { - } - - void applyFilters(ActionDAGNodes added_filter_nodes) override; - -private: - std::vector columns_mask; - size_t max_block_size; - - ColumnPtr filtered_databases_column; - ColumnPtr filtered_tables_column; -}; - void StorageSystemTables::read( QueryPlan & query_plan, const Names & column_names, @@ -743,28 +126,8 @@ void StorageSystemTables::read( auto [columns_mask, res_block] = getQueriedColumnsMaskAndHeader(sample_block, column_names); auto reading = std::make_unique( - column_names, query_info, storage_snapshot, context, std::move(res_block), std::move(columns_mask), max_block_size); + column_names, query_info, storage_snapshot, context, std::move(res_block), std::move(columns_mask), max_block_size, false); query_plan.addStep(std::move(reading)); } - -void ReadFromSystemTables::applyFilters(ActionDAGNodes added_filter_nodes) -{ - SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); - - const ActionsDAG::Node * predicate = nullptr; - if (filter_actions_dag) - predicate = filter_actions_dag->getOutputs().at(0); - - filtered_databases_column = getFilteredDatabases(predicate, context); - filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context); -} - -void ReadFromSystemTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) -{ - Pipe pipe(std::make_shared( - std::move(columns_mask), getOutputStream().header, max_block_size, std::move(filtered_databases_column), std::move(filtered_tables_column), context)); - pipeline.init(std::move(pipe)); -} - } diff --git a/src/Storages/System/TablesBlockSource.cpp b/src/Storages/System/TablesBlockSource.cpp new file mode 100644 index 00000000000..4ea5bb53deb --- /dev/null +++ b/src/Storages/System/TablesBlockSource.cpp @@ -0,0 +1,569 @@ +#include "TablesBlockSource.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace +{ + +/// Avoid heavy operation on tables if we only queried columns that we can get without table object. +/// Otherwise it will require table initialization for Lazy database. +bool needTable(const DatabasePtr & database, const Block & header) +{ + if (database->getEngineName() != "Lazy") + return true; + + static const std::set columns_without_table = { "database", "name", "uuid", "metadata_modification_time" }; + for (const auto & column : header.getColumnsWithTypeAndName()) + { + if (columns_without_table.find(column.name) == columns_without_table.end()) + return true; + } + return false; +} + +} + +Chunk TablesBlockSource::generate() +{ + if (done) + return {}; + + MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns(); + + const auto access = context->getAccess(); + const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); + + size_t rows_count = 0; + while (rows_count < max_block_size) + { + if (tables_it && !tables_it->isValid()) + ++database_idx; + + while (database_idx < databases->size() && (!tables_it || !tables_it->isValid())) + { + database_name = databases->getDataAt(database_idx).toString(); + database = DatabaseCatalog::instance().tryGetDatabase(database_name); + + if (!database) + { + /// Database was deleted just now or the user has no access. + ++database_idx; + continue; + } + + break; + } + + /// This is for temporary tables. They are output in single block regardless to max_block_size. + if (database_idx >= databases->size()) + { + if (context->hasSessionContext()) + { + Tables external_tables = context->getSessionContext()->getExternalTables(); + + for (auto & table : external_tables) + { + size_t src_index = 0; + size_t res_index = 0; + + // database + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // name + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.first); + + // uuid + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getStorageID().uuid); + + // engine + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getName()); + + // is_temporary + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(1u); + + // data_paths + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_path + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_modification_time + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_version + // Temporary tables does not support replication + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // dependencies_database + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // dependencies_table + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // create_table_query + if (columns_mask[src_index++]) + { + auto temp_db = DatabaseCatalog::instance().getDatabaseForTemporaryTables(); + ASTPtr ast + = temp_db ? temp_db->tryGetCreateTableQuery(table.second->getStorageID().getTableName(), context) : nullptr; + res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); + } + + // engine_full + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getName()); + + const auto & settings = context->getSettingsRef(); + while (src_index < columns_mask.size()) + { + // total_rows + if (src_index == 19 && columns_mask[src_index]) + { + if (auto total_rows = table.second->totalRows(settings)) + res_columns[res_index++]->insert(*total_rows); + else + res_columns[res_index++]->insertDefault(); + } + // total_bytes + else if (src_index == 20 && columns_mask[src_index]) + { + if (auto total_bytes = table.second->totalBytes(settings)) + res_columns[res_index++]->insert(*total_bytes); + else + res_columns[res_index++]->insertDefault(); + } + /// Fill the rest columns with defaults + else if (columns_mask[src_index]) + res_columns[res_index++]->insertDefault(); + src_index++; + } + } + } + + UInt64 num_rows = res_columns.at(0)->size(); + done = true; + return Chunk(std::move(res_columns), num_rows); + } + + const bool need_to_check_access_for_tables + = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); + + if (!tables_it || !tables_it->isValid()) + tables_it = database->getTablesIterator(context); + + const bool need_table = needTable(database, getPort().getHeader()); + + for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next()) + { + auto table_name = tables_it->name(); + if (!tables.contains(table_name)) + continue; + + if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name)) + continue; + + StoragePtr table = nullptr; + TableLockHolder lock; + if (need_table) + { + table = tables_it->table(); + if (!table) + // Table might have just been removed or detached for Lazy engine (see DatabaseLazy::tryGetTable()) + continue; + + /// The only column that requires us to hold a shared lock is data_paths as rename might alter them (on ordinary tables) + /// and it's not protected internally by other mutexes + static const size_t DATA_PATHS_INDEX = 5; + if (columns_mask[DATA_PATHS_INDEX]) + { + lock = table->tryLockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout); + if (!lock) + // Table was dropped while acquiring the lock, skipping table + continue; + } + } + ++rows_count; + + size_t src_index = 0; + size_t res_index = 0; + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(database_name); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table_name); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(tables_it->uuid()); + + if (columns_mask[src_index++]) + { + chassert(table != nullptr); + res_columns[res_index++]->insert(table->getName()); + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(0u); // is_temporary + + if (columns_mask[src_index++]) + { + chassert(lock != nullptr); + Array table_paths_array; + auto paths = table->getDataPaths(); + table_paths_array.reserve(paths.size()); + for (const String & path : paths) + table_paths_array.push_back(path); + res_columns[res_index++]->insert(table_paths_array); + /// We don't need the lock anymore + lock = nullptr; + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(database->getObjectMetadataPath(table_name)); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(static_cast(database->getObjectMetadataModificationTime(table_name))); + + StorageMetadataPtr metadata_snapshot; + if (table) + metadata_snapshot = table->getInMemoryMetadataPtr(); + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && table->supportsReplication()) + res_columns[res_index++]->insert(metadata_snapshot->metadata_version); + else + res_columns[res_index++]->insertDefault(); + } + + { + Array views_table_name_array; + Array views_database_name_array; + if (columns_mask[src_index] || columns_mask[src_index + 1]) + { + const auto view_ids = DatabaseCatalog::instance().getDependentViews(StorageID(database_name, table_name)); + + views_table_name_array.reserve(view_ids.size()); + views_database_name_array.reserve(view_ids.size()); + for (const auto & view_id : view_ids) + { + views_table_name_array.push_back(view_id.table_name); + views_database_name_array.push_back(view_id.database_name); + } + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(views_database_name_array); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(views_table_name_array); + } + + if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2]) + { + ASTPtr ast = database->tryGetCreateTableQuery(table_name, context); + auto * ast_create = ast ? ast->as() : nullptr; + + if (ast_create && !context->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) + { + ast_create->uuid = UUIDHelpers::Nil; + ast_create->to_inner_uuid = UUIDHelpers::Nil; + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); + + if (columns_mask[src_index++]) + { + String engine_full; + + if (ast_create && ast_create->storage) + { + engine_full = format({context, *ast_create->storage}); + + static const char * const extra_head = " ENGINE = "; + if (startsWith(engine_full, extra_head)) + engine_full = engine_full.substr(strlen(extra_head)); + } + + res_columns[res_index++]->insert(engine_full); + } + + if (columns_mask[src_index++]) + { + String as_select; + if (ast_create && ast_create->select) + as_select = format({context, *ast_create->select}); + res_columns[res_index++]->insert(as_select); + } + } + else + src_index += 3; + + ASTPtr expression_ptr; + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPartitionKeyAST())) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSortingKey().expression_list_ast)) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPrimaryKey().expression_list_ast)) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSamplingKeyAST())) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto policy = table ? table->getStoragePolicy() : nullptr; + if (policy) + res_columns[res_index++]->insert(policy->getName()); + else + res_columns[res_index++]->insertDefault(); + } + + auto settings = context->getSettingsRef(); + settings.select_sequential_consistency = 0; + if (columns_mask[src_index++]) + { + auto total_rows = table ? table->totalRows(settings) : std::nullopt; + if (total_rows) + res_columns[res_index++]->insert(*total_rows); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto total_bytes = table->totalBytes(settings); + if (total_bytes) + res_columns[res_index++]->insert(*total_bytes); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto total_bytes_uncompressed = table->totalBytesUncompressed(settings); + if (total_bytes_uncompressed) + res_columns[res_index++]->insert(*total_bytes_uncompressed); + else + res_columns[res_index++]->insertDefault(); + } + + auto table_merge_tree = std::dynamic_pointer_cast(table); + if (columns_mask[src_index++]) + { + if (table_merge_tree) + res_columns[res_index++]->insert(table_merge_tree->getAllPartsCount()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table_merge_tree) + res_columns[res_index++]->insert(table_merge_tree->getActivePartsCount()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table_merge_tree) + { + res_columns[res_index++]->insert(table_merge_tree->getTotalMarksCount()); + } + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto lifetime_rows = table ? table->lifetimeRows() : std::nullopt; + if (lifetime_rows) + res_columns[res_index++]->insert(*lifetime_rows); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto lifetime_bytes = table ? table->lifetimeBytes() : std::nullopt; + if (lifetime_bytes) + res_columns[res_index++]->insert(*lifetime_bytes); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot) + res_columns[res_index++]->insert(metadata_snapshot->comment); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table) + res_columns[res_index++]->insert(table->storesDataOnDisk()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2] || columns_mask[src_index + 3]) + { + auto dependencies = DatabaseCatalog::instance().getLoadingDependencies(StorageID{database_name, table_name}); + auto dependents = DatabaseCatalog::instance().getLoadingDependents(StorageID{database_name, table_name}); + + Array dependencies_databases; + Array dependencies_tables; + dependencies_databases.reserve(dependencies.size()); + dependencies_tables.reserve(dependencies.size()); + for (const auto & dependency : dependencies) + { + dependencies_databases.push_back(dependency.database_name); + dependencies_tables.push_back(dependency.table_name); + } + + Array dependents_databases; + Array dependents_tables; + dependents_databases.reserve(dependents.size()); + dependents_tables.reserve(dependents.size()); + for (const auto & dependent : dependents) + { + dependents_databases.push_back(dependent.database_name); + dependents_tables.push_back(dependent.table_name); + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependencies_databases); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependencies_tables); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependents_databases); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependents_tables); + } + } + } + + UInt64 num_rows = res_columns.at(0)->size(); + return Chunk(std::move(res_columns), num_rows); +} + +Chunk DetachedTablesBlockSource::generate() +{ + if (done) + return {}; + + MutableColumns result_columns = getPort().getHeader().cloneEmptyColumns(); + + const auto access = context->getAccess(); + const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); + + for (size_t database_idx = 0, rows_count = 0; database_idx < databases->size() && rows_count < max_block_size; ++database_idx) + { + database_name = databases->getDataAt(database_idx).toString(); + database = DatabaseCatalog::instance().tryGetDatabase(database_name); + + if (!database) + { + LOG_DEBUG(lg, "Database was deleted just now or the user has no access"); + continue; + } + + const bool need_to_check_access_for_tables + = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); + + if (!detached_tables_it || !detached_tables_it->isValid()) + detached_tables_it = database->getDetachedTablesIterator(context, {}, false); + + for (; rows_count < max_block_size && detached_tables_it->isValid(); detached_tables_it->next()) + { + const auto detached_table_name = detached_tables_it->table(); + LOG_DEBUG(lg, "detached_table_name={}", detached_table_name); + + if (!detached_tables.contains(detached_table_name)) + continue; + + if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, detached_table_name)) + continue; + + fillResultColumnsByDetachedTableIterator(result_columns); + ++rows_count; + } + } + + const UInt64 num_rows = result_columns.at(0)->size(); + done = true; + return Chunk(std::move(result_columns), num_rows); +} + +void DetachedTablesBlockSource::fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const +{ + size_t src_index = 0; + size_t res_index = 0; + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->database()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->table()); + + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->uuid()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->metadataPath()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->isPermanently()); +} +} diff --git a/src/Storages/System/TablesBlockSource.h b/src/Storages/System/TablesBlockSource.h new file mode 100644 index 00000000000..9698fdd494c --- /dev/null +++ b/src/Storages/System/TablesBlockSource.h @@ -0,0 +1,96 @@ +#pragma once + +#include +#include +#include +#include + +namespace DB +{ + +class TablesBlockSource : public ISource +{ +public: + TablesBlockSource( + std::vector columns_mask_, + Block header, + UInt64 max_block_size_, + ColumnPtr databases_, + ColumnPtr tables_, + ContextPtr context_) + : ISource(std::move(header)) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + , databases(std::move(databases_)) + , context(Context::createCopy(context_)) + { + size_t size = tables_->size(); + tables.reserve(size); + for (size_t idx = 0; idx < size; ++idx) + tables.insert(tables_->getDataAt(idx).toString()); + } + + String getName() const override { return "Tables"; } + +protected: + Chunk generate() override; + +private: + std::vector columns_mask; + UInt64 max_block_size; + ColumnPtr databases; + NameSet tables; + size_t database_idx = 0; + DatabaseTablesIteratorPtr tables_it; + ContextPtr context; + bool done = false; + DatabasePtr database; + std::string database_name; +}; + +class DetachedTablesBlockSource : public ISource +{ +public: + DetachedTablesBlockSource( + std::vector columns_mask_, + Block header, + UInt64 max_block_size_, + ColumnPtr databases_, + ColumnPtr detached_tables_, + ContextPtr context_) + : ISource(std::move(header)) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + , databases(std::move(databases_)) + , context(Context::createCopy(context_)) + { + size_t size = detached_tables_->size(); + detached_tables.reserve(size); + for (size_t idx = 0; idx < size; ++idx) + { + detached_tables.insert(detached_tables_->getDataAt(idx).toString()); + } + } + + String getName() const override { return "DetachedTables"; } + +protected: + Chunk generate() override; + +private: + const std::vector columns_mask; + const UInt64 max_block_size; + const ColumnPtr databases; + NameSet detached_tables; + DatabaseDetachedTablesSnapshotIteratorPtr detached_tables_it; + ContextPtr context; + bool done = false; + DatabasePtr database; + std::string database_name; + + // temp log for debug + LoggerPtr lg = getLogger("DetachedTablesBlockSource"); + + void fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const; +}; +} diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index 6ff86b26ca9..97eda1db3fa 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -129,6 +130,7 @@ void attachSystemTablesServer(ContextPtr context, IDatabase & system_database, b attachNoDescription(context, system_database, "zeros_mt", "Multithreaded version of system.zeros.", true); attach(context, system_database, "databases", "Lists all databases of the current server."); attachNoDescription(context, system_database, "tables", "Lists all tables of the current server."); + attachNoDescription(context, system_database, "detached_tables", "Lists all detached tables of the current server."); attachNoDescription(context, system_database, "columns", "Lists all columns from all tables of the current server."); attach(context, system_database, "functions", "Contains a list of all available ordinary and aggregate functions with their descriptions."); attach(context, system_database, "events", "Contains profiling events and their current value."); diff --git a/tests/integration/test_system_detached_tables/__init__.py b/tests/integration/test_system_detached_tables/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_system_detached_tables/test.py b/tests/integration/test_system_detached_tables/test.py new file mode 100644 index 00000000000..2e0165deba2 --- /dev/null +++ b/tests/integration/test_system_detached_tables/test.py @@ -0,0 +1,38 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance("node_default", stay_alive=True) + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_system_detached_tables(): + node.query("CREATE TABLE test_table (n Int64) ENGINE=MergeTree ORDER BY n;") + node.query("CREATE TABLE test_table_perm (n Int64) ENGINE=MergeTree ORDER BY n;") + + result = node.query("SELECT * FROM system.detached_tables") + assert result == "" + + node.query("DETACH TABLE test_table") + node.query("DETACH TABLE test_table_perm PERMANENTLY") + + result = node.query("SELECT name FROM system.detached_tables") + assert result == "test_table\ntest_table_perm\n" + + node.restart_clickhouse() + + result = node.query("SELECT name FROM system.detached_tables") + assert result == "test_table_perm\n" + + node.restart_clickhouse() + + result = node.query("SELECT name FROM system.detached_tables") + assert result == "test_table_perm\n" diff --git a/tests/queries/0_stateless/03172_system_detached_tables.reference b/tests/queries/0_stateless/03172_system_detached_tables.reference new file mode 100644 index 00000000000..1cf9e0275c9 --- /dev/null +++ b/tests/queries/0_stateless/03172_system_detached_tables.reference @@ -0,0 +1,5 @@ +test_system_detached_tables test_table 0 +test_system_detached_tables test_table_perm 1 +test_system_detached_tables test_table 0 +test_system_detached_tables test_table_perm 1 +test_system_detached_tables test_table 0 diff --git a/tests/queries/0_stateless/03172_system_detached_tables.sql b/tests/queries/0_stateless/03172_system_detached_tables.sql new file mode 100644 index 00000000000..13465d02b80 --- /dev/null +++ b/tests/queries/0_stateless/03172_system_detached_tables.sql @@ -0,0 +1,28 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS test_system_detached_tables; +CREATE DATABASE IF NOT EXISTS test_system_detached_tables; + +CREATE TABLE test_system_detached_tables.test_table (n Int64) ENGINE=MergeTree ORDER BY n; +SELECT * FROM system.detached_tables; + +DETACH TABLE test_system_detached_tables.test_table; +SELECT database, name, is_permanently FROM system.detached_tables; + +ATTACH TABLE test_system_detached_tables.test_table; + +CREATE TABLE test_system_detached_tables.test_table_perm (n Int64) ENGINE=MergeTree ORDER BY n; +SELECT * FROM system.detached_tables; + +DETACH TABLE test_system_detached_tables.test_table_perm PERMANENTLY; +SELECT database, name, is_permanently FROM system.detached_tables; + +DETACH TABLE test_system_detached_tables.test_table SYNC; +SELECT database, name, is_permanently FROM system.detached_tables; + +SELECT database, name, is_permanently FROM system.detached_tables WHERE name='test_table'; + +DROP DATABASE test_system_detached_tables; + + + From 21936f32a655b1008b6233ee274549b0ce2b33c7 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Fri, 21 Jun 2024 12:32:54 +0000 Subject: [PATCH 02/80] fix style --- src/Databases/DatabaseOrdinary.cpp | 2 +- src/Databases/DatabasesCommon.cpp | 4 ++-- src/Databases/IDatabase.h | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 046cbfa40be..ac40bf4e90e 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -243,7 +243,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables const auto storage_id = table->getStorageID(); SnapshotDetachedTable snapshot_detached_table; - snapshot_detached_table.detabase = storage_id.getDatabaseName(); + snapshot_detached_table.database = storage_id.getDatabaseName(); snapshot_detached_table.table = detached_table_name; snapshot_detached_table.uuid = storage_id.uuid; snapshot_detached_table.is_permanently = true; diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index 312c0a4477f..c4cad663878 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -250,7 +250,7 @@ DatabaseDetachedTablesSnapshotIteratorPtr DatabaseWithOwnTablesBase::getDetached if (filter_by_table_name(table_name)) { SnapshotDetachedTable snapshot_detached_table; - snapshot_detached_table.detabase = storage->getStorageID().getDatabaseName(); + snapshot_detached_table.database = storage->getStorageID().getDatabaseName(); snapshot_detached_table.table = table_name; if (storage->getStorageID().hasUUID()) { @@ -290,7 +290,7 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n res = it->second; SnapshotDetachedTable snapshot_detached_table; - snapshot_detached_table.detabase = it->second->getStorageID().getDatabaseName(); + snapshot_detached_table.database = it->second->getStorageID().getDatabaseName(); snapshot_detached_table.table = it->first; if (it->second->getStorageID().hasUUID()) { diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index 7c6b3b3004c..2ce4650798c 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -114,7 +114,7 @@ using DatabaseTablesIteratorPtr = std::unique_ptr; struct SnapshotDetachedTable final { - String detabase; + String database; String table; UUID uuid = UUIDHelpers::Nil; String metadata_path; @@ -148,7 +148,7 @@ public: bool isValid() const { return it != snapshot.end(); } - String database() const { return it->second.detabase; } + String database() const { return it->second.database; } String table() const { return it->second.table; } From 7aa7b7977221d5bc9b65d0e09d272ffca96acddc Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Fri, 21 Jun 2024 16:33:09 +0000 Subject: [PATCH 03/80] refactor --- .../system-tables/detached_tables.md | 9 +- src/Databases/DatabaseAtomic.h | 1 - src/Databases/DatabaseOrdinary.cpp | 6 +- src/Storages/System/ReadFromSystemTables.cpp | 138 ---- src/Storages/System/ReadFromSystemTables.h | 47 -- .../System/StorageSystemDetachedTables.cpp | 252 +++++- src/Storages/System/StorageSystemTables.cpp | 761 ++++++++++++++++-- src/Storages/System/TablesBlockSource.cpp | 569 ------------- src/Storages/System/TablesBlockSource.h | 96 --- .../test_system_detached_tables/test.py | 6 +- .../03172_system_detached_tables.sql | 8 +- 11 files changed, 959 insertions(+), 934 deletions(-) delete mode 100644 src/Storages/System/ReadFromSystemTables.cpp delete mode 100644 src/Storages/System/ReadFromSystemTables.h delete mode 100644 src/Storages/System/TablesBlockSource.cpp delete mode 100644 src/Storages/System/TablesBlockSource.h diff --git a/docs/en/operations/system-tables/detached_tables.md b/docs/en/operations/system-tables/detached_tables.md index d86e58d2899..f669c8fd251 100644 --- a/docs/en/operations/system-tables/detached_tables.md +++ b/docs/en/operations/system-tables/detached_tables.md @@ -9,14 +9,14 @@ Columns: - `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in. -- `name` ([String](../../sql-reference/data-types/string.md)) — Table name. +- `table` ([String](../../sql-reference/data-types/string.md)) — Table name. - `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Table uuid (Atomic database). -- `is_permanently` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag indicates that the table was detached PERMANENTLY. - - `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system. +- `is_permanently` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag indicates that the table was detached PERMANENTLY. + **Example** @@ -28,9 +28,8 @@ SELECT * FROM system.detached_tables FORMAT Vertical; Row 1: ────── database: base -name: t1 +table: t1 uuid: 81b1c20a-b7c6-4116-a2ce-7583fb6b6736 -data_paths: ['/var/lib/clickhouse/store/81b/81b1c20a-b7c6-4116-a2ce-7583fb6b6736/'] metadata_path: /var/lib/clickhouse/store/461/461cf698-fd0b-406d-8c01-5d8fd5748a91/t1.sql is_permanently: 1 ``` diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index 47afc4dc5ef..25c5cdfbde1 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -85,7 +85,6 @@ protected: //TODO store path in DatabaseWithOwnTables::tables using NameToPathMap = std::unordered_map; - NameToPathMap table_name_to_path TSA_GUARDED_BY(mutex); DetachedTables detached_tables TSA_GUARDED_BY(mutex); diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index ac40bf4e90e..7086f6e628e 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -187,7 +187,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables size_t prev_tables_count = metadata.parsed_tables.size(); size_t prev_total_dictionaries = metadata.total_dictionaries; - auto process_metadata = [&metadata, is_startup, local_context, this](const String & file_name) mutable + auto process_metadata = [&metadata, is_startup, local_context, this](const String & file_name) { fs::path path(getMetadataPath()); fs::path file_path(file_name); @@ -227,7 +227,6 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables permanently_detached_tables.push_back(table_name); LOG_DEBUG(log, "Skipping permanently detached table {}.", backQuote(table_name)); - // @TODO refactoring auto parsed_table_metadata = ParsedTableMetadata{full_path.string(), ast}; const auto & query = parsed_table_metadata.ast->as(); @@ -249,10 +248,9 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables snapshot_detached_table.is_permanently = true; snapshot_detached_table.metadata_path = getObjectMetadataPath(snapshot_detached_table.table); - snapshot_detached_tables.emplace(detached_table_name, std::move(snapshot_detached_table)); - LOG_TRACE(log, "Add detached table {} to system.detached_tables", detached_table_name); + LOG_TRACE(log, "Add permanently detached table {} to system.detached_tables", detached_table_name); return; } diff --git a/src/Storages/System/ReadFromSystemTables.cpp b/src/Storages/System/ReadFromSystemTables.cpp deleted file mode 100644 index 6e9c6a869d1..00000000000 --- a/src/Storages/System/ReadFromSystemTables.cpp +++ /dev/null @@ -1,138 +0,0 @@ -#include "ReadFromSystemTables.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace DB -{ - -namespace -{ - -ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) -{ - MutableColumnPtr column = ColumnString::create(); - - const auto databases = DatabaseCatalog::instance().getDatabases(); - for (const auto & database_name : databases | boost::adaptors::map_keys) - { - if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) - continue; /// We don't want to show the internal database for temporary tables in system.tables - - column->insert(database_name); - } - - Block block{ColumnWithTypeAndName(std::move(column), std::make_shared(), "database")}; - VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); - return block.getByPosition(0).column; -} - -ColumnPtr getFilteredTables( - const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context, const bool need_detached_tables) -{ - Block sample{ - ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), - ColumnWithTypeAndName(nullptr, std::make_shared(), "engine")}; - - MutableColumnPtr database_column = ColumnString::create(); - MutableColumnPtr engine_column; - - auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); - if (dag) - { - bool filter_by_engine = false; - for (const auto * input : dag->getInputs()) - if (input->result_name == "engine") - filter_by_engine = true; - - if (filter_by_engine) - engine_column = ColumnString::create(); - } - - for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) - { - const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); - DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); - if (!database) - continue; - - if (need_detached_tables) - { - auto table_it = database->getDetachedTablesIterator(context, {}, false); - for (; table_it->isValid(); table_it->next()) - { - database_column->insert(table_it->table()); - } - } - else - { - auto table_it = database->getTablesIterator(context); - for (; table_it->isValid(); table_it->next()) - { - database_column->insert(table_it->name()); - if (engine_column) - engine_column->insert(table_it->table()->getName()); - } - } - } - - Block block{ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; - if (engine_column) - block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); - - if (dag) - VirtualColumnUtils::filterBlockWithDAG(dag, block, context); - - return block.getByPosition(0).column; -} - -} - -ReadFromSystemTables::ReadFromSystemTables( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - Block sample_block, - std::vector columns_mask_, - size_t max_block_size_, - const bool need_detached_tables_) - : SourceStepWithFilter(DataStream{.header = std::move(sample_block)}, column_names_, query_info_, storage_snapshot_, context_) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) - , need_detached_tables(need_detached_tables_) -{ -} - -void ReadFromSystemTables::applyFilters(ActionDAGNodes added_filter_nodes) -{ - SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); - - const ActionsDAG::Node * predicate = nullptr; - if (filter_actions_dag) - predicate = filter_actions_dag->getOutputs().at(0); - - filtered_databases_column = getFilteredDatabases(predicate, context); - filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context, need_detached_tables); -} - -void ReadFromSystemTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) -{ - if (need_detached_tables) - { - pipeline.init(createPipe()); - } - else - { - pipeline.init(createPipe()); - } -} -} diff --git a/src/Storages/System/ReadFromSystemTables.h b/src/Storages/System/ReadFromSystemTables.h deleted file mode 100644 index f8280ffcc8d..00000000000 --- a/src/Storages/System/ReadFromSystemTables.h +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -#include -#include - -namespace DB -{ - -class ReadFromSystemTables : public SourceStepWithFilter -{ -public: - std::string getName() const override { return "ReadFromSystemTables"; } - void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; - - ReadFromSystemTables( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - Block sample_block, - std::vector columns_mask_, - size_t max_block_size_, - bool need_detached_tables); - - void applyFilters(ActionDAGNodes added_filter_nodes) override; - -private: - std::vector columns_mask; - size_t max_block_size; - const bool need_detached_tables; - - ColumnPtr filtered_databases_column; - ColumnPtr filtered_tables_column; - - template - Pipe createPipe() - { - return Pipe(std::make_shared( - std::move(columns_mask), - getOutputStream().header, - max_block_size, - std::move(filtered_databases_column), - std::move(filtered_tables_column), - context)); - } -}; -} diff --git a/src/Storages/System/StorageSystemDetachedTables.cpp b/src/Storages/System/StorageSystemDetachedTables.cpp index 1596ac6a046..9805fa2a8cf 100644 --- a/src/Storages/System/StorageSystemDetachedTables.cpp +++ b/src/Storages/System/StorageSystemDetachedTables.cpp @@ -1,28 +1,232 @@ #include "StorageSystemDetachedTables.h" +#include #include #include #include #include +#include +#include #include +#include +#include #include #include #include -#include -#include #include +#include + +#include namespace DB { +namespace +{ + +ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) +{ + MutableColumnPtr column = ColumnString::create(); + + const auto databases = DatabaseCatalog::instance().getDatabases(); + for (const auto & database_name : databases | boost::adaptors::map_keys) + { + if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) + continue; /// We don't want to show the internal database for temporary tables in system.tables + + column->insert(database_name); + } + + Block block{ColumnWithTypeAndName(std::move(column), std::make_shared(), "database")}; + VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); + return block.getByPosition(0).column; +} + +ColumnPtr getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context) +{ + Block sample{ + ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), + ColumnWithTypeAndName(nullptr, std::make_shared(), "engine")}; + + MutableColumnPtr database_column = ColumnString::create(); + MutableColumnPtr engine_column; + + auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); + if (dag) + { + bool filter_by_engine = false; + for (const auto * input : dag->getInputs()) + if (input->result_name == "engine") + filter_by_engine = true; + + if (filter_by_engine) + engine_column = ColumnString::create(); + } + + for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) + { + const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); + DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); + if (!database) + continue; + + auto table_it = database->getDetachedTablesIterator(context, {}, false); + for (; table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->table()); + } + } + + Block block{ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; + if (engine_column) + block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); + + if (dag) + VirtualColumnUtils::filterBlockWithDAG(dag, block, context); + + return block.getByPosition(0).column; +} + +class DetachedTablesBlockSource : public ISource +{ +public: + DetachedTablesBlockSource( + std::vector columns_mask_, + Block header, + UInt64 max_block_size_, + ColumnPtr databases_, + ColumnPtr detached_tables_, + ContextPtr context_) + : ISource(std::move(header)) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + , databases(std::move(databases_)) + , context(Context::createCopy(context_)) + { + size_t size = detached_tables_->size(); + detached_tables.reserve(size); + for (size_t idx = 0; idx < size; ++idx) + { + detached_tables.insert(detached_tables_->getDataAt(idx).toString()); + } + } + + String getName() const override { return "DetachedTables"; } + +protected: + Chunk generate() override + { + if (done) + return {}; + + MutableColumns result_columns = getPort().getHeader().cloneEmptyColumns(); + + const auto access = context->getAccess(); + const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); + + for (size_t database_idx = 0, rows_count = 0; database_idx < databases->size() && rows_count < max_block_size; ++database_idx) + { + database_name = databases->getDataAt(database_idx).toString(); + database = DatabaseCatalog::instance().tryGetDatabase(database_name); + + if (!database) + continue; + + const bool need_to_check_access_for_tables + = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); + + if (!detached_tables_it || !detached_tables_it->isValid()) + detached_tables_it = database->getDetachedTablesIterator(context, {}, false); + + for (; rows_count < max_block_size && detached_tables_it->isValid(); detached_tables_it->next()) + { + const auto detached_table_name = detached_tables_it->table(); + + if (!detached_tables.contains(detached_table_name)) + continue; + + if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, detached_table_name)) + continue; + + fillResultColumnsByDetachedTableIterator(result_columns); + ++rows_count; + } + } + + const UInt64 num_rows = result_columns.at(0)->size(); + done = true; + return Chunk(std::move(result_columns), num_rows); + } + +private: + const std::vector columns_mask; + const UInt64 max_block_size; + const ColumnPtr databases; + NameSet detached_tables; + DatabaseDetachedTablesSnapshotIteratorPtr detached_tables_it; + ContextPtr context; + bool done = false; + DatabasePtr database; + std::string database_name; + + void fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const + { + size_t src_index = 0; + size_t res_index = 0; + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->database()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->table()); + + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->uuid()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->metadataPath()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->isPermanently()); + } +}; + +} + +class ReadFromSystemDetachedTables : public SourceStepWithFilter +{ +public: + std::string getName() const override { return "ReadFromSystemDetachedTables"; } + void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; + + ReadFromSystemDetachedTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_); + + void applyFilters(ActionDAGNodes added_filter_nodes) override; + +private: + std::vector columns_mask; + size_t max_block_size; + + ColumnPtr filtered_databases_column; + ColumnPtr filtered_tables_column; +}; + StorageSystemDetachedTables::StorageSystemDetachedTables(const StorageID & table_id_) : IStorage(table_id_) { StorageInMemoryMetadata storage_metadata; auto description = ColumnsDescription{ ColumnDescription{"database", std::make_shared(), "The name of the database the table is in."}, - ColumnDescription{"name", std::make_shared(), "Table name."}, + ColumnDescription{"table", std::make_shared(), "Table name."}, ColumnDescription{"uuid", std::make_shared(), "Table uuid (Atomic database)."}, ColumnDescription{"metadata_path", std::make_shared(), "Path to the table metadata in the file system."}, ColumnDescription{"is_permanently", std::make_shared(), "Table was detached permanently."}, @@ -48,9 +252,47 @@ void StorageSystemDetachedTables::read( auto [columns_mask, res_block] = getQueriedColumnsMaskAndHeader(sample_block, column_names); - auto reading = std::make_unique( - column_names, query_info, storage_snapshot, context, std::move(res_block), std::move(columns_mask), max_block_size, true); + auto reading = std::make_unique( + column_names, query_info, storage_snapshot, context, std::move(res_block), std::move(columns_mask), max_block_size); query_plan.addStep(std::move(reading)); } + +ReadFromSystemDetachedTables::ReadFromSystemDetachedTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_) + : SourceStepWithFilter(DataStream{.header = std::move(sample_block)}, column_names_, query_info_, storage_snapshot_, context_) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) +{ +} + +void ReadFromSystemDetachedTables::applyFilters(ActionDAGNodes added_filter_nodes) +{ + SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); + + const ActionsDAG::Node * predicate = nullptr; + if (filter_actions_dag) + predicate = filter_actions_dag->getOutputs().at(0); + + filtered_databases_column = getFilteredDatabases(predicate, context); + filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context); +} + +void ReadFromSystemDetachedTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) +{ + auto pipe = Pipe(std::make_shared( + std::move(columns_mask), + getOutputStream().header, + max_block_size, + std::move(filtered_databases_column), + std::move(filtered_tables_column), + context)); + pipeline.init(std::move(pipe)); +} } diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 5a8d6315d57..783b899c978 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -1,31 +1,29 @@ -#include "StorageSystemTables.h" - -#include #include -#include +#include #include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include #include #include #include #include +#include +#include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include +#include #include @@ -34,7 +32,8 @@ namespace DB { -StorageSystemTables::StorageSystemTables(const StorageID & table_id_) : IStorage(table_id_) +StorageSystemTables::StorageSystemTables(const StorageID & table_id_) + : IStorage(table_id_) { StorageInMemoryMetadata storage_metadata; @@ -47,13 +46,9 @@ StorageSystemTables::StorageSystemTables(const StorageID & table_id_) : IStorage {"data_paths", std::make_shared(std::make_shared()), "Paths to the table data in the file systems."}, {"metadata_path", std::make_shared(), "Path to the table metadata in the file system."}, {"metadata_modification_time", std::make_shared(), "Time of latest modification of the table metadata."}, - {"metadata_version", - std::make_shared(), - "Metadata version for ReplicatedMergeTree table, 0 for non ReplicatedMergeTree table."}, + {"metadata_version", std::make_shared(), "Metadata version for ReplicatedMergeTree table, 0 for non ReplicatedMergeTree table."}, {"dependencies_database", std::make_shared(std::make_shared()), "Database dependencies."}, - {"dependencies_table", - std::make_shared(std::make_shared()), - "Table dependencies (materialized views the current table)."}, + {"dependencies_table", std::make_shared(std::make_shared()), "Table dependencies (materialized views the current table)."}, {"create_table_query", std::make_shared(), "The query that was used to create the table."}, {"engine_full", std::make_shared(), "Parameters of the table engine."}, {"as_select", std::make_shared(), "SELECT query for view."}, @@ -62,54 +57,676 @@ StorageSystemTables::StorageSystemTables(const StorageID & table_id_) : IStorage {"primary_key", std::make_shared(), "The primary key expression specified in the table."}, {"sampling_key", std::make_shared(), "The sampling key expression specified in the table."}, {"storage_policy", std::make_shared(), "The storage policy."}, - {"total_rows", - std::make_shared(std::make_shared()), - "Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise NULL (including " - "underlying Buffer table)."}, - {"total_bytes", - std::make_shared(std::make_shared()), - "Total number of bytes, if it is possible to quickly determine exact number " - "of bytes for the table on storage, otherwise NULL (does not includes any underlying storage). " - "If the table stores data on disk, returns used space on disk (i.e. compressed). " - "If the table stores data in memory, returns approximated number of used bytes in memory."}, - {"total_bytes_uncompressed", - std::make_shared(std::make_shared()), - "Total number of uncompressed bytes, if it's possible to quickly determine the exact number " - "of bytes from the part checksums for the table on storage, otherwise NULL (does not take underlying storage (if any) into " - "account)."}, + {"total_rows", std::make_shared(std::make_shared()), + "Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise NULL (including underlying Buffer table)." + }, + {"total_bytes", std::make_shared(std::make_shared()), + "Total number of bytes, if it is possible to quickly determine exact number " + "of bytes for the table on storage, otherwise NULL (does not includes any underlying storage). " + "If the table stores data on disk, returns used space on disk (i.e. compressed). " + "If the table stores data in memory, returns approximated number of used bytes in memory." + }, + {"total_bytes_uncompressed", std::make_shared(std::make_shared()), + "Total number of uncompressed bytes, if it's possible to quickly determine the exact number " + "of bytes from the part checksums for the table on storage, otherwise NULL (does not take underlying storage (if any) into account)." + }, {"parts", std::make_shared(std::make_shared()), "The total number of parts in this table."}, - {"active_parts", - std::make_shared(std::make_shared()), - "The number of active parts in this table."}, - {"total_marks", - std::make_shared(std::make_shared()), - "The total number of marks in all parts in this table."}, - {"lifetime_rows", - std::make_shared(std::make_shared()), - "Total number of rows INSERTed since server start (only for Buffer tables)."}, - {"lifetime_bytes", - std::make_shared(std::make_shared()), - "Total number of bytes INSERTed since server start (only for Buffer tables)."}, + {"active_parts", std::make_shared(std::make_shared()), "The number of active parts in this table."}, + {"total_marks", std::make_shared(std::make_shared()), "The total number of marks in all parts in this table."}, + {"lifetime_rows", std::make_shared(std::make_shared()), + "Total number of rows INSERTed since server start (only for Buffer tables)." + }, + {"lifetime_bytes", std::make_shared(std::make_shared()), + "Total number of bytes INSERTed since server start (only for Buffer tables)." + }, {"comment", std::make_shared(), "The comment for the table."}, - {"has_own_data", - std::make_shared(), - "Flag that indicates whether the table itself stores some data on disk or only accesses some other source."}, - {"loading_dependencies_database", - std::make_shared(std::make_shared()), - "Database loading dependencies (list of objects which should be loaded before the current object)."}, - {"loading_dependencies_table", - std::make_shared(std::make_shared()), - "Table loading dependencies (list of objects which should be loaded before the current object)."}, - {"loading_dependent_database", std::make_shared(std::make_shared()), "Dependent loading database."}, - {"loading_dependent_table", std::make_shared(std::make_shared()), "Dependent loading table."}, + {"has_own_data", std::make_shared(), + "Flag that indicates whether the table itself stores some data on disk or only accesses some other source." + }, + {"loading_dependencies_database", std::make_shared(std::make_shared()), + "Database loading dependencies (list of objects which should be loaded before the current object)." + }, + {"loading_dependencies_table", std::make_shared(std::make_shared()), + "Table loading dependencies (list of objects which should be loaded before the current object)." + }, + {"loading_dependent_database", std::make_shared(std::make_shared()), + "Dependent loading database." + }, + {"loading_dependent_table", std::make_shared(std::make_shared()), + "Dependent loading table." + }, }; - description.setAliases({{"table", std::make_shared(), "name"}}); + description.setAliases({ + {"table", std::make_shared(), "name"} + }); storage_metadata.setColumns(std::move(description)); setInMemoryMetadata(storage_metadata); } + +namespace +{ + +ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) +{ + MutableColumnPtr column = ColumnString::create(); + + const auto databases = DatabaseCatalog::instance().getDatabases(); + for (const auto & database_name : databases | boost::adaptors::map_keys) + { + if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) + continue; /// We don't want to show the internal database for temporary tables in system.tables + + column->insert(database_name); + } + + Block block { ColumnWithTypeAndName(std::move(column), std::make_shared(), "database") }; + VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); + return block.getByPosition(0).column; +} + +ColumnPtr getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context) +{ + Block sample { + ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), + ColumnWithTypeAndName(nullptr, std::make_shared(), "engine") + }; + + MutableColumnPtr database_column = ColumnString::create(); + MutableColumnPtr engine_column; + + auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); + if (dag) + { + bool filter_by_engine = false; + for (const auto * input : dag->getInputs()) + if (input->result_name == "engine") + filter_by_engine = true; + + if (filter_by_engine) + engine_column = ColumnString::create(); + } + + for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) + { + const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); + DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); + if (!database) + continue; + + for (auto table_it = database->getTablesIterator(context); table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->name()); + if (engine_column) + engine_column->insert(table_it->table()->getName()); + } + } + + Block block {ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; + if (engine_column) + block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); + + if (dag) + VirtualColumnUtils::filterBlockWithDAG(dag, block, context); + + return block.getByPosition(0).column; +} + +/// Avoid heavy operation on tables if we only queried columns that we can get without table object. +/// Otherwise it will require table initialization for Lazy database. +bool needTable(const DatabasePtr & database, const Block & header) +{ + if (database->getEngineName() != "Lazy") + return true; + + static const std::set columns_without_table = { "database", "name", "uuid", "metadata_modification_time" }; + for (const auto & column : header.getColumnsWithTypeAndName()) + { + if (columns_without_table.find(column.name) == columns_without_table.end()) + return true; + } + return false; +} + + +class TablesBlockSource : public ISource +{ +public: + TablesBlockSource( + std::vector columns_mask_, + Block header, + UInt64 max_block_size_, + ColumnPtr databases_, + ColumnPtr tables_, + ContextPtr context_) + : ISource(std::move(header)) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + , databases(std::move(databases_)) + , context(Context::createCopy(context_)) + { + size_t size = tables_->size(); + tables.reserve(size); + for (size_t idx = 0; idx < size; ++idx) + tables.insert(tables_->getDataAt(idx).toString()); + } + + String getName() const override { return "Tables"; } + +protected: + Chunk generate() override + { + if (done) + return {}; + + MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns(); + + const auto access = context->getAccess(); + const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); + + size_t rows_count = 0; + while (rows_count < max_block_size) + { + if (tables_it && !tables_it->isValid()) + ++database_idx; + + while (database_idx < databases->size() && (!tables_it || !tables_it->isValid())) + { + database_name = databases->getDataAt(database_idx).toString(); + database = DatabaseCatalog::instance().tryGetDatabase(database_name); + + if (!database) + { + /// Database was deleted just now or the user has no access. + ++database_idx; + continue; + } + + break; + } + + /// This is for temporary tables. They are output in single block regardless to max_block_size. + if (database_idx >= databases->size()) + { + if (context->hasSessionContext()) + { + Tables external_tables = context->getSessionContext()->getExternalTables(); + + for (auto & table : external_tables) + { + size_t src_index = 0; + size_t res_index = 0; + + // database + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // name + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.first); + + // uuid + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getStorageID().uuid); + + // engine + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getName()); + + // is_temporary + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(1u); + + // data_paths + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_path + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_modification_time + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_version + // Temporary tables does not support replication + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // dependencies_database + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // dependencies_table + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // create_table_query + if (columns_mask[src_index++]) + { + auto temp_db = DatabaseCatalog::instance().getDatabaseForTemporaryTables(); + ASTPtr ast = temp_db ? temp_db->tryGetCreateTableQuery(table.second->getStorageID().getTableName(), context) : nullptr; + res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); + } + + // engine_full + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getName()); + + const auto & settings = context->getSettingsRef(); + while (src_index < columns_mask.size()) + { + // total_rows + if (src_index == 19 && columns_mask[src_index]) + { + if (auto total_rows = table.second->totalRows(settings)) + res_columns[res_index++]->insert(*total_rows); + else + res_columns[res_index++]->insertDefault(); + } + // total_bytes + else if (src_index == 20 && columns_mask[src_index]) + { + if (auto total_bytes = table.second->totalBytes(settings)) + res_columns[res_index++]->insert(*total_bytes); + else + res_columns[res_index++]->insertDefault(); + } + /// Fill the rest columns with defaults + else if (columns_mask[src_index]) + res_columns[res_index++]->insertDefault(); + src_index++; + } + } + } + + UInt64 num_rows = res_columns.at(0)->size(); + done = true; + return Chunk(std::move(res_columns), num_rows); + } + + const bool need_to_check_access_for_tables = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); + + if (!tables_it || !tables_it->isValid()) + tables_it = database->getTablesIterator(context); + + const bool need_table = needTable(database, getPort().getHeader()); + + for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next()) + { + auto table_name = tables_it->name(); + if (!tables.contains(table_name)) + continue; + + if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name)) + continue; + + StoragePtr table = nullptr; + TableLockHolder lock; + if (need_table) + { + table = tables_it->table(); + if (!table) + // Table might have just been removed or detached for Lazy engine (see DatabaseLazy::tryGetTable()) + continue; + + /// The only column that requires us to hold a shared lock is data_paths as rename might alter them (on ordinary tables) + /// and it's not protected internally by other mutexes + static const size_t DATA_PATHS_INDEX = 5; + if (columns_mask[DATA_PATHS_INDEX]) + { + lock = table->tryLockForShare(context->getCurrentQueryId(), + context->getSettingsRef().lock_acquire_timeout); + if (!lock) + // Table was dropped while acquiring the lock, skipping table + continue; + } + } + ++rows_count; + + size_t src_index = 0; + size_t res_index = 0; + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(database_name); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table_name); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(tables_it->uuid()); + + if (columns_mask[src_index++]) + { + chassert(table != nullptr); + res_columns[res_index++]->insert(table->getName()); + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(0u); // is_temporary + + if (columns_mask[src_index++]) + { + chassert(lock != nullptr); + Array table_paths_array; + auto paths = table->getDataPaths(); + table_paths_array.reserve(paths.size()); + for (const String & path : paths) + table_paths_array.push_back(path); + res_columns[res_index++]->insert(table_paths_array); + /// We don't need the lock anymore + lock = nullptr; + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(database->getObjectMetadataPath(table_name)); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(static_cast(database->getObjectMetadataModificationTime(table_name))); + + StorageMetadataPtr metadata_snapshot; + if (table) + metadata_snapshot = table->getInMemoryMetadataPtr(); + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && table->supportsReplication()) + res_columns[res_index++]->insert(metadata_snapshot->metadata_version); + else + res_columns[res_index++]->insertDefault(); + } + + { + Array views_table_name_array; + Array views_database_name_array; + if (columns_mask[src_index] || columns_mask[src_index + 1]) + { + const auto view_ids = DatabaseCatalog::instance().getDependentViews(StorageID(database_name, table_name)); + + views_table_name_array.reserve(view_ids.size()); + views_database_name_array.reserve(view_ids.size()); + for (const auto & view_id : view_ids) + { + views_table_name_array.push_back(view_id.table_name); + views_database_name_array.push_back(view_id.database_name); + } + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(views_database_name_array); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(views_table_name_array); + } + + if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2]) + { + ASTPtr ast = database->tryGetCreateTableQuery(table_name, context); + auto * ast_create = ast ? ast->as() : nullptr; + + if (ast_create && !context->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) + { + ast_create->uuid = UUIDHelpers::Nil; + ast_create->to_inner_uuid = UUIDHelpers::Nil; + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); + + if (columns_mask[src_index++]) + { + String engine_full; + + if (ast_create && ast_create->storage) + { + engine_full = format({context, *ast_create->storage}); + + static const char * const extra_head = " ENGINE = "; + if (startsWith(engine_full, extra_head)) + engine_full = engine_full.substr(strlen(extra_head)); + } + + res_columns[res_index++]->insert(engine_full); + } + + if (columns_mask[src_index++]) + { + String as_select; + if (ast_create && ast_create->select) + as_select = format({context, *ast_create->select}); + res_columns[res_index++]->insert(as_select); + } + } + else + src_index += 3; + + ASTPtr expression_ptr; + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPartitionKeyAST())) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSortingKey().expression_list_ast)) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPrimaryKey().expression_list_ast)) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSamplingKeyAST())) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto policy = table ? table->getStoragePolicy() : nullptr; + if (policy) + res_columns[res_index++]->insert(policy->getName()); + else + res_columns[res_index++]->insertDefault(); + } + + auto settings = context->getSettingsRef(); + settings.select_sequential_consistency = 0; + if (columns_mask[src_index++]) + { + auto total_rows = table ? table->totalRows(settings) : std::nullopt; + if (total_rows) + res_columns[res_index++]->insert(*total_rows); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto total_bytes = table->totalBytes(settings); + if (total_bytes) + res_columns[res_index++]->insert(*total_bytes); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto total_bytes_uncompressed = table->totalBytesUncompressed(settings); + if (total_bytes_uncompressed) + res_columns[res_index++]->insert(*total_bytes_uncompressed); + else + res_columns[res_index++]->insertDefault(); + } + + auto table_merge_tree = std::dynamic_pointer_cast(table); + if (columns_mask[src_index++]) + { + if (table_merge_tree) + res_columns[res_index++]->insert(table_merge_tree->getAllPartsCount()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table_merge_tree) + res_columns[res_index++]->insert(table_merge_tree->getActivePartsCount()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table_merge_tree) + { + res_columns[res_index++]->insert(table_merge_tree->getTotalMarksCount()); + } + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto lifetime_rows = table ? table->lifetimeRows() : std::nullopt; + if (lifetime_rows) + res_columns[res_index++]->insert(*lifetime_rows); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto lifetime_bytes = table ? table->lifetimeBytes() : std::nullopt; + if (lifetime_bytes) + res_columns[res_index++]->insert(*lifetime_bytes); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot) + res_columns[res_index++]->insert(metadata_snapshot->comment); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table) + res_columns[res_index++]->insert(table->storesDataOnDisk()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2] || columns_mask[src_index + 3]) + { + auto dependencies = DatabaseCatalog::instance().getLoadingDependencies(StorageID{database_name, table_name}); + auto dependents = DatabaseCatalog::instance().getLoadingDependents(StorageID{database_name, table_name}); + + Array dependencies_databases; + Array dependencies_tables; + dependencies_databases.reserve(dependencies.size()); + dependencies_tables.reserve(dependencies.size()); + for (const auto & dependency : dependencies) + { + dependencies_databases.push_back(dependency.database_name); + dependencies_tables.push_back(dependency.table_name); + } + + Array dependents_databases; + Array dependents_tables; + dependents_databases.reserve(dependents.size()); + dependents_tables.reserve(dependents.size()); + for (const auto & dependent : dependents) + { + dependents_databases.push_back(dependent.database_name); + dependents_tables.push_back(dependent.table_name); + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependencies_databases); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependencies_tables); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependents_databases); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependents_tables); + + } + } + } + + UInt64 num_rows = res_columns.at(0)->size(); + return Chunk(std::move(res_columns), num_rows); + } +private: + std::vector columns_mask; + UInt64 max_block_size; + ColumnPtr databases; + NameSet tables; + size_t database_idx = 0; + DatabaseTablesIteratorPtr tables_it; + ContextPtr context; + bool done = false; + DatabasePtr database; + std::string database_name; +}; + +} + +class ReadFromSystemTables : public SourceStepWithFilter +{ +public: + std::string getName() const override { return "ReadFromSystemTables"; } + void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; + + ReadFromSystemTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_) + : SourceStepWithFilter( + DataStream{.header = std::move(sample_block)}, + column_names_, + query_info_, + storage_snapshot_, + context_) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + { + } + + void applyFilters(ActionDAGNodes added_filter_nodes) override; + +private: + std::vector columns_mask; + size_t max_block_size; + + ColumnPtr filtered_databases_column; + ColumnPtr filtered_tables_column; +}; + void StorageSystemTables::read( QueryPlan & query_plan, const Names & column_names, @@ -126,8 +743,28 @@ void StorageSystemTables::read( auto [columns_mask, res_block] = getQueriedColumnsMaskAndHeader(sample_block, column_names); auto reading = std::make_unique( - column_names, query_info, storage_snapshot, context, std::move(res_block), std::move(columns_mask), max_block_size, false); + column_names, query_info, storage_snapshot, context, std::move(res_block), std::move(columns_mask), max_block_size); query_plan.addStep(std::move(reading)); } + +void ReadFromSystemTables::applyFilters(ActionDAGNodes added_filter_nodes) +{ + SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); + + const ActionsDAG::Node * predicate = nullptr; + if (filter_actions_dag) + predicate = filter_actions_dag->getOutputs().at(0); + + filtered_databases_column = getFilteredDatabases(predicate, context); + filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context); +} + +void ReadFromSystemTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) +{ + Pipe pipe(std::make_shared( + std::move(columns_mask), getOutputStream().header, max_block_size, std::move(filtered_databases_column), std::move(filtered_tables_column), context)); + pipeline.init(std::move(pipe)); +} + } diff --git a/src/Storages/System/TablesBlockSource.cpp b/src/Storages/System/TablesBlockSource.cpp deleted file mode 100644 index 4ea5bb53deb..00000000000 --- a/src/Storages/System/TablesBlockSource.cpp +++ /dev/null @@ -1,569 +0,0 @@ -#include "TablesBlockSource.h" - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace -{ - -/// Avoid heavy operation on tables if we only queried columns that we can get without table object. -/// Otherwise it will require table initialization for Lazy database. -bool needTable(const DatabasePtr & database, const Block & header) -{ - if (database->getEngineName() != "Lazy") - return true; - - static const std::set columns_without_table = { "database", "name", "uuid", "metadata_modification_time" }; - for (const auto & column : header.getColumnsWithTypeAndName()) - { - if (columns_without_table.find(column.name) == columns_without_table.end()) - return true; - } - return false; -} - -} - -Chunk TablesBlockSource::generate() -{ - if (done) - return {}; - - MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns(); - - const auto access = context->getAccess(); - const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); - - size_t rows_count = 0; - while (rows_count < max_block_size) - { - if (tables_it && !tables_it->isValid()) - ++database_idx; - - while (database_idx < databases->size() && (!tables_it || !tables_it->isValid())) - { - database_name = databases->getDataAt(database_idx).toString(); - database = DatabaseCatalog::instance().tryGetDatabase(database_name); - - if (!database) - { - /// Database was deleted just now or the user has no access. - ++database_idx; - continue; - } - - break; - } - - /// This is for temporary tables. They are output in single block regardless to max_block_size. - if (database_idx >= databases->size()) - { - if (context->hasSessionContext()) - { - Tables external_tables = context->getSessionContext()->getExternalTables(); - - for (auto & table : external_tables) - { - size_t src_index = 0; - size_t res_index = 0; - - // database - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // name - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.first); - - // uuid - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getStorageID().uuid); - - // engine - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getName()); - - // is_temporary - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(1u); - - // data_paths - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_path - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_modification_time - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_version - // Temporary tables does not support replication - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // dependencies_database - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // dependencies_table - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // create_table_query - if (columns_mask[src_index++]) - { - auto temp_db = DatabaseCatalog::instance().getDatabaseForTemporaryTables(); - ASTPtr ast - = temp_db ? temp_db->tryGetCreateTableQuery(table.second->getStorageID().getTableName(), context) : nullptr; - res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); - } - - // engine_full - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getName()); - - const auto & settings = context->getSettingsRef(); - while (src_index < columns_mask.size()) - { - // total_rows - if (src_index == 19 && columns_mask[src_index]) - { - if (auto total_rows = table.second->totalRows(settings)) - res_columns[res_index++]->insert(*total_rows); - else - res_columns[res_index++]->insertDefault(); - } - // total_bytes - else if (src_index == 20 && columns_mask[src_index]) - { - if (auto total_bytes = table.second->totalBytes(settings)) - res_columns[res_index++]->insert(*total_bytes); - else - res_columns[res_index++]->insertDefault(); - } - /// Fill the rest columns with defaults - else if (columns_mask[src_index]) - res_columns[res_index++]->insertDefault(); - src_index++; - } - } - } - - UInt64 num_rows = res_columns.at(0)->size(); - done = true; - return Chunk(std::move(res_columns), num_rows); - } - - const bool need_to_check_access_for_tables - = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); - - if (!tables_it || !tables_it->isValid()) - tables_it = database->getTablesIterator(context); - - const bool need_table = needTable(database, getPort().getHeader()); - - for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next()) - { - auto table_name = tables_it->name(); - if (!tables.contains(table_name)) - continue; - - if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name)) - continue; - - StoragePtr table = nullptr; - TableLockHolder lock; - if (need_table) - { - table = tables_it->table(); - if (!table) - // Table might have just been removed or detached for Lazy engine (see DatabaseLazy::tryGetTable()) - continue; - - /// The only column that requires us to hold a shared lock is data_paths as rename might alter them (on ordinary tables) - /// and it's not protected internally by other mutexes - static const size_t DATA_PATHS_INDEX = 5; - if (columns_mask[DATA_PATHS_INDEX]) - { - lock = table->tryLockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout); - if (!lock) - // Table was dropped while acquiring the lock, skipping table - continue; - } - } - ++rows_count; - - size_t src_index = 0; - size_t res_index = 0; - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(database_name); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table_name); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(tables_it->uuid()); - - if (columns_mask[src_index++]) - { - chassert(table != nullptr); - res_columns[res_index++]->insert(table->getName()); - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(0u); // is_temporary - - if (columns_mask[src_index++]) - { - chassert(lock != nullptr); - Array table_paths_array; - auto paths = table->getDataPaths(); - table_paths_array.reserve(paths.size()); - for (const String & path : paths) - table_paths_array.push_back(path); - res_columns[res_index++]->insert(table_paths_array); - /// We don't need the lock anymore - lock = nullptr; - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(database->getObjectMetadataPath(table_name)); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(static_cast(database->getObjectMetadataModificationTime(table_name))); - - StorageMetadataPtr metadata_snapshot; - if (table) - metadata_snapshot = table->getInMemoryMetadataPtr(); - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && table->supportsReplication()) - res_columns[res_index++]->insert(metadata_snapshot->metadata_version); - else - res_columns[res_index++]->insertDefault(); - } - - { - Array views_table_name_array; - Array views_database_name_array; - if (columns_mask[src_index] || columns_mask[src_index + 1]) - { - const auto view_ids = DatabaseCatalog::instance().getDependentViews(StorageID(database_name, table_name)); - - views_table_name_array.reserve(view_ids.size()); - views_database_name_array.reserve(view_ids.size()); - for (const auto & view_id : view_ids) - { - views_table_name_array.push_back(view_id.table_name); - views_database_name_array.push_back(view_id.database_name); - } - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(views_database_name_array); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(views_table_name_array); - } - - if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2]) - { - ASTPtr ast = database->tryGetCreateTableQuery(table_name, context); - auto * ast_create = ast ? ast->as() : nullptr; - - if (ast_create && !context->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) - { - ast_create->uuid = UUIDHelpers::Nil; - ast_create->to_inner_uuid = UUIDHelpers::Nil; - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); - - if (columns_mask[src_index++]) - { - String engine_full; - - if (ast_create && ast_create->storage) - { - engine_full = format({context, *ast_create->storage}); - - static const char * const extra_head = " ENGINE = "; - if (startsWith(engine_full, extra_head)) - engine_full = engine_full.substr(strlen(extra_head)); - } - - res_columns[res_index++]->insert(engine_full); - } - - if (columns_mask[src_index++]) - { - String as_select; - if (ast_create && ast_create->select) - as_select = format({context, *ast_create->select}); - res_columns[res_index++]->insert(as_select); - } - } - else - src_index += 3; - - ASTPtr expression_ptr; - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPartitionKeyAST())) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSortingKey().expression_list_ast)) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPrimaryKey().expression_list_ast)) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSamplingKeyAST())) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto policy = table ? table->getStoragePolicy() : nullptr; - if (policy) - res_columns[res_index++]->insert(policy->getName()); - else - res_columns[res_index++]->insertDefault(); - } - - auto settings = context->getSettingsRef(); - settings.select_sequential_consistency = 0; - if (columns_mask[src_index++]) - { - auto total_rows = table ? table->totalRows(settings) : std::nullopt; - if (total_rows) - res_columns[res_index++]->insert(*total_rows); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto total_bytes = table->totalBytes(settings); - if (total_bytes) - res_columns[res_index++]->insert(*total_bytes); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto total_bytes_uncompressed = table->totalBytesUncompressed(settings); - if (total_bytes_uncompressed) - res_columns[res_index++]->insert(*total_bytes_uncompressed); - else - res_columns[res_index++]->insertDefault(); - } - - auto table_merge_tree = std::dynamic_pointer_cast(table); - if (columns_mask[src_index++]) - { - if (table_merge_tree) - res_columns[res_index++]->insert(table_merge_tree->getAllPartsCount()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table_merge_tree) - res_columns[res_index++]->insert(table_merge_tree->getActivePartsCount()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table_merge_tree) - { - res_columns[res_index++]->insert(table_merge_tree->getTotalMarksCount()); - } - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto lifetime_rows = table ? table->lifetimeRows() : std::nullopt; - if (lifetime_rows) - res_columns[res_index++]->insert(*lifetime_rows); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto lifetime_bytes = table ? table->lifetimeBytes() : std::nullopt; - if (lifetime_bytes) - res_columns[res_index++]->insert(*lifetime_bytes); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot) - res_columns[res_index++]->insert(metadata_snapshot->comment); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table) - res_columns[res_index++]->insert(table->storesDataOnDisk()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2] || columns_mask[src_index + 3]) - { - auto dependencies = DatabaseCatalog::instance().getLoadingDependencies(StorageID{database_name, table_name}); - auto dependents = DatabaseCatalog::instance().getLoadingDependents(StorageID{database_name, table_name}); - - Array dependencies_databases; - Array dependencies_tables; - dependencies_databases.reserve(dependencies.size()); - dependencies_tables.reserve(dependencies.size()); - for (const auto & dependency : dependencies) - { - dependencies_databases.push_back(dependency.database_name); - dependencies_tables.push_back(dependency.table_name); - } - - Array dependents_databases; - Array dependents_tables; - dependents_databases.reserve(dependents.size()); - dependents_tables.reserve(dependents.size()); - for (const auto & dependent : dependents) - { - dependents_databases.push_back(dependent.database_name); - dependents_tables.push_back(dependent.table_name); - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependencies_databases); - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependencies_tables); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependents_databases); - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependents_tables); - } - } - } - - UInt64 num_rows = res_columns.at(0)->size(); - return Chunk(std::move(res_columns), num_rows); -} - -Chunk DetachedTablesBlockSource::generate() -{ - if (done) - return {}; - - MutableColumns result_columns = getPort().getHeader().cloneEmptyColumns(); - - const auto access = context->getAccess(); - const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); - - for (size_t database_idx = 0, rows_count = 0; database_idx < databases->size() && rows_count < max_block_size; ++database_idx) - { - database_name = databases->getDataAt(database_idx).toString(); - database = DatabaseCatalog::instance().tryGetDatabase(database_name); - - if (!database) - { - LOG_DEBUG(lg, "Database was deleted just now or the user has no access"); - continue; - } - - const bool need_to_check_access_for_tables - = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); - - if (!detached_tables_it || !detached_tables_it->isValid()) - detached_tables_it = database->getDetachedTablesIterator(context, {}, false); - - for (; rows_count < max_block_size && detached_tables_it->isValid(); detached_tables_it->next()) - { - const auto detached_table_name = detached_tables_it->table(); - LOG_DEBUG(lg, "detached_table_name={}", detached_table_name); - - if (!detached_tables.contains(detached_table_name)) - continue; - - if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, detached_table_name)) - continue; - - fillResultColumnsByDetachedTableIterator(result_columns); - ++rows_count; - } - } - - const UInt64 num_rows = result_columns.at(0)->size(); - done = true; - return Chunk(std::move(result_columns), num_rows); -} - -void DetachedTablesBlockSource::fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const -{ - size_t src_index = 0; - size_t res_index = 0; - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->database()); - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->table()); - - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->uuid()); - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->metadataPath()); - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->isPermanently()); -} -} diff --git a/src/Storages/System/TablesBlockSource.h b/src/Storages/System/TablesBlockSource.h deleted file mode 100644 index 9698fdd494c..00000000000 --- a/src/Storages/System/TablesBlockSource.h +++ /dev/null @@ -1,96 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -namespace DB -{ - -class TablesBlockSource : public ISource -{ -public: - TablesBlockSource( - std::vector columns_mask_, - Block header, - UInt64 max_block_size_, - ColumnPtr databases_, - ColumnPtr tables_, - ContextPtr context_) - : ISource(std::move(header)) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) - , databases(std::move(databases_)) - , context(Context::createCopy(context_)) - { - size_t size = tables_->size(); - tables.reserve(size); - for (size_t idx = 0; idx < size; ++idx) - tables.insert(tables_->getDataAt(idx).toString()); - } - - String getName() const override { return "Tables"; } - -protected: - Chunk generate() override; - -private: - std::vector columns_mask; - UInt64 max_block_size; - ColumnPtr databases; - NameSet tables; - size_t database_idx = 0; - DatabaseTablesIteratorPtr tables_it; - ContextPtr context; - bool done = false; - DatabasePtr database; - std::string database_name; -}; - -class DetachedTablesBlockSource : public ISource -{ -public: - DetachedTablesBlockSource( - std::vector columns_mask_, - Block header, - UInt64 max_block_size_, - ColumnPtr databases_, - ColumnPtr detached_tables_, - ContextPtr context_) - : ISource(std::move(header)) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) - , databases(std::move(databases_)) - , context(Context::createCopy(context_)) - { - size_t size = detached_tables_->size(); - detached_tables.reserve(size); - for (size_t idx = 0; idx < size; ++idx) - { - detached_tables.insert(detached_tables_->getDataAt(idx).toString()); - } - } - - String getName() const override { return "DetachedTables"; } - -protected: - Chunk generate() override; - -private: - const std::vector columns_mask; - const UInt64 max_block_size; - const ColumnPtr databases; - NameSet detached_tables; - DatabaseDetachedTablesSnapshotIteratorPtr detached_tables_it; - ContextPtr context; - bool done = false; - DatabasePtr database; - std::string database_name; - - // temp log for debug - LoggerPtr lg = getLogger("DetachedTablesBlockSource"); - - void fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const; -}; -} diff --git a/tests/integration/test_system_detached_tables/test.py b/tests/integration/test_system_detached_tables/test.py index 2e0165deba2..02734d8bf14 100644 --- a/tests/integration/test_system_detached_tables/test.py +++ b/tests/integration/test_system_detached_tables/test.py @@ -24,15 +24,15 @@ def test_system_detached_tables(): node.query("DETACH TABLE test_table") node.query("DETACH TABLE test_table_perm PERMANENTLY") - result = node.query("SELECT name FROM system.detached_tables") + result = node.query("SELECT table FROM system.detached_tables") assert result == "test_table\ntest_table_perm\n" node.restart_clickhouse() - result = node.query("SELECT name FROM system.detached_tables") + result = node.query("SELECT table FROM system.detached_tables") assert result == "test_table_perm\n" node.restart_clickhouse() - result = node.query("SELECT name FROM system.detached_tables") + result = node.query("SELECT table FROM system.detached_tables") assert result == "test_table_perm\n" diff --git a/tests/queries/0_stateless/03172_system_detached_tables.sql b/tests/queries/0_stateless/03172_system_detached_tables.sql index 13465d02b80..558dfe4cf18 100644 --- a/tests/queries/0_stateless/03172_system_detached_tables.sql +++ b/tests/queries/0_stateless/03172_system_detached_tables.sql @@ -7,7 +7,7 @@ CREATE TABLE test_system_detached_tables.test_table (n Int64) ENGINE=MergeTree O SELECT * FROM system.detached_tables; DETACH TABLE test_system_detached_tables.test_table; -SELECT database, name, is_permanently FROM system.detached_tables; +SELECT database, table, is_permanently FROM system.detached_tables; ATTACH TABLE test_system_detached_tables.test_table; @@ -15,12 +15,12 @@ CREATE TABLE test_system_detached_tables.test_table_perm (n Int64) ENGINE=MergeT SELECT * FROM system.detached_tables; DETACH TABLE test_system_detached_tables.test_table_perm PERMANENTLY; -SELECT database, name, is_permanently FROM system.detached_tables; +SELECT database, table, is_permanently FROM system.detached_tables; DETACH TABLE test_system_detached_tables.test_table SYNC; -SELECT database, name, is_permanently FROM system.detached_tables; +SELECT database, table, is_permanently FROM system.detached_tables; -SELECT database, name, is_permanently FROM system.detached_tables WHERE name='test_table'; +SELECT database, table, is_permanently FROM system.detached_tables WHERE table='test_table'; DROP DATABASE test_system_detached_tables; From 00f74905a769d06a451c6472a0f461b25266b3be Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Mon, 24 Jun 2024 09:07:23 +0000 Subject: [PATCH 04/80] small up --- src/Databases/DatabaseOrdinary.cpp | 5 ++++- src/Databases/IDatabase.h | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 7086f6e628e..3975eb46a4b 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -244,7 +244,10 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables SnapshotDetachedTable snapshot_detached_table; snapshot_detached_table.database = storage_id.getDatabaseName(); snapshot_detached_table.table = detached_table_name; - snapshot_detached_table.uuid = storage_id.uuid; + if (storage_id.hasUUID()) + { + snapshot_detached_table.uuid = storage_id.uuid; + } snapshot_detached_table.is_permanently = true; snapshot_detached_table.metadata_path = getObjectMetadataPath(snapshot_detached_table.table); diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index 2ce4650798c..76d25908fda 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -286,7 +286,7 @@ public: virtual DatabaseDetachedTablesSnapshotIteratorPtr getDetachedTablesIterator( ContextPtr /*context*/, const FilterByNameFunction & /*filter_by_table_name = {}*/, bool /*skip_not_loaded = false*/) const { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "There is no get detached tables for Database{}", getEngineName()); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "There is no get detached tables for Database {}", getEngineName()); } /// Returns list of table names. From c7dc99757afec4dbe4ac5b1bcfab037e141839d8 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Wed, 26 Jun 2024 15:39:16 +0000 Subject: [PATCH 05/80] apply some comments --- src/Databases/DatabaseOrdinary.cpp | 31 ++++-------- src/Databases/DatabasesCommon.cpp | 48 +++++++------------ src/Databases/IDatabase.h | 4 +- .../test_system_detached_tables/test.py | 22 +++++---- 4 files changed, 42 insertions(+), 63 deletions(-) diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 3975eb46a4b..ef0c981ddad 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -227,31 +227,18 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables permanently_detached_tables.push_back(table_name); LOG_DEBUG(log, "Skipping permanently detached table {}.", backQuote(table_name)); - auto parsed_table_metadata = ParsedTableMetadata{full_path.string(), ast}; - const auto & query = parsed_table_metadata.ast->as(); - std::lock_guard lock(mutex); - auto [detached_table_name, table] = createTableFromAST( - query, - database_name, - getTableDataPath(query), - std::const_pointer_cast(local_context), - LoadingStrictnessLevel::CREATE); + const auto detached_table_name = create_query->getTable(); - const auto storage_id = table->getStorageID(); - - SnapshotDetachedTable snapshot_detached_table; - snapshot_detached_table.database = storage_id.getDatabaseName(); - snapshot_detached_table.table = detached_table_name; - if (storage_id.hasUUID()) - { - snapshot_detached_table.uuid = storage_id.uuid; - } - snapshot_detached_table.is_permanently = true; - snapshot_detached_table.metadata_path = getObjectMetadataPath(snapshot_detached_table.table); - - snapshot_detached_tables.emplace(detached_table_name, std::move(snapshot_detached_table)); + snapshot_detached_tables.emplace( + detached_table_name, + SnapshotDetachedTable{ + .database = create_query->getDatabase(), + .table = detached_table_name, + .uuid = create_query->uuid, + .metadata_path = getObjectMetadataPath(detached_table_name), + .is_permanently = true}); LOG_TRACE(log, "Add permanently detached table {} to system.detached_tables", detached_table_name); return; diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index 3b54980597c..470625ddd8b 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -2,6 +2,10 @@ #include #include +#include +#include +#include +#include #include #include #include @@ -13,10 +17,6 @@ #include #include #include -#include -#include -#include -#include namespace DB @@ -245,26 +245,15 @@ DatabaseDetachedTablesSnapshotIteratorPtr DatabaseWithOwnTablesBase::getDetached if (!filter_by_table_name) return std::make_unique(snapshot_detached_tables); - SnapshotDetachedTables filtered_tables; - for (const auto & [table_name, storage] : tables) - if (filter_by_table_name(table_name)) + SnapshotDetachedTables filtered_detached_tables; + for (const auto & [detached_table_name, snapshot] : snapshot_detached_tables) + if (filter_by_table_name(detached_table_name)) { - SnapshotDetachedTable snapshot_detached_table; - snapshot_detached_table.database = storage->getStorageID().getDatabaseName(); - snapshot_detached_table.table = table_name; - if (storage->getStorageID().hasUUID()) - { - snapshot_detached_table.uuid = storage->getStorageID().uuid; - } - - snapshot_detached_table.is_permanently = false; - snapshot_detached_table.metadata_path = getObjectMetadataPath(snapshot_detached_table.table); - - filtered_tables.emplace(table_name, std::move(snapshot_detached_table)); + filtered_detached_tables.emplace(detached_table_name, snapshot); } - return std::make_unique(std::move(filtered_tables)); + return std::make_unique(std::move(filtered_detached_tables)); } bool DatabaseWithOwnTablesBase::empty() const @@ -289,17 +278,14 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n backQuote(database_name), backQuote(table_name)); res = it->second; - SnapshotDetachedTable snapshot_detached_table; - snapshot_detached_table.database = it->second->getStorageID().getDatabaseName(); - snapshot_detached_table.table = it->first; - if (it->second->getStorageID().hasUUID()) - { - snapshot_detached_table.uuid = it->second->getStorageID().uuid; - } - snapshot_detached_table.is_permanently = false; - snapshot_detached_table.metadata_path = getObjectMetadataPath(snapshot_detached_table.table); - - snapshot_detached_tables.emplace(it->first, std::move(snapshot_detached_table)); + snapshot_detached_tables.emplace( + it->first, + SnapshotDetachedTable{ + .database = it->second->getStorageID().getDatabaseName(), + .table = it->first, + .uuid = it->second->getStorageID().uuid, + .metadata_path = getObjectMetadataPath(it->first), + .is_permanently = false}); tables.erase(it); res->is_detached = true; diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index 76d25908fda..e856dd3d2f0 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -118,7 +118,7 @@ struct SnapshotDetachedTable final String table; UUID uuid = UUIDHelpers::Nil; String metadata_path; - bool is_permanently{false}; + bool is_permanently{}; }; class DatabaseDetachedTablesSnapshotIterator @@ -286,7 +286,7 @@ public: virtual DatabaseDetachedTablesSnapshotIteratorPtr getDetachedTablesIterator( ContextPtr /*context*/, const FilterByNameFunction & /*filter_by_table_name = {}*/, bool /*skip_not_loaded = false*/) const { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "There is no get detached tables for Database {}", getEngineName()); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot get detached tables for Database{}", getEngineName()); } /// Returns list of table names. diff --git a/tests/integration/test_system_detached_tables/test.py b/tests/integration/test_system_detached_tables/test.py index 02734d8bf14..bbac5e391f8 100644 --- a/tests/integration/test_system_detached_tables/test.py +++ b/tests/integration/test_system_detached_tables/test.py @@ -18,21 +18,27 @@ def test_system_detached_tables(): node.query("CREATE TABLE test_table (n Int64) ENGINE=MergeTree ORDER BY n;") node.query("CREATE TABLE test_table_perm (n Int64) ENGINE=MergeTree ORDER BY n;") - result = node.query("SELECT * FROM system.detached_tables") - assert result == "" + test_table_uuid = node.query("SELECT uuid FROM system.tables WHERE table='test_table'").rstrip('\n') + test_table_metadata_path = node.query("SELECT metadata_path FROM system.tables WHERE table='test_table'").rstrip('\n') + + test_table_perm_uuid = node.query("SELECT uuid FROM system.tables WHERE table='test_table_perm'").rstrip('\n') + test_table_perm_metadata_path = node.query("SELECT metadata_path FROM system.tables WHERE table='test_table_perm'").rstrip('\n') + + assert "" == node.query("SELECT * FROM system.detached_tables") node.query("DETACH TABLE test_table") node.query("DETACH TABLE test_table_perm PERMANENTLY") - result = node.query("SELECT table FROM system.detached_tables") - assert result == "test_table\ntest_table_perm\n" + querry = "SELECT database, table, is_permanently, uuid, metadata_path FROM system.detached_tables FORMAT Values" + result = node.query(querry) + assert result == f"('default','test_table',0,'{test_table_uuid}','{test_table_metadata_path}'),('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" node.restart_clickhouse() - result = node.query("SELECT table FROM system.detached_tables") - assert result == "test_table_perm\n" + result = node.query(querry) + assert result == f"('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" node.restart_clickhouse() - result = node.query("SELECT table FROM system.detached_tables") - assert result == "test_table_perm\n" + result = node.query(querry) + assert result == f"('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" From de8f1fc6e3adb6c8f346b2fda4e34c2240e1bf8e Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Thu, 27 Jun 2024 07:32:56 +0000 Subject: [PATCH 06/80] general logic for tables --- src/Storages/System/ReadFromSystemTables.cpp | 157 ++++ src/Storages/System/ReadFromSystemTables.h | 66 ++ .../System/StorageSystemDetachedTables.cpp | 237 +------ src/Storages/System/StorageSystemTables.cpp | 669 +----------------- src/Storages/System/TablesBlockSource.cpp | 564 +++++++++++++++ src/Storages/System/TablesBlockSource.h | 92 +++ 6 files changed, 894 insertions(+), 891 deletions(-) create mode 100644 src/Storages/System/ReadFromSystemTables.cpp create mode 100644 src/Storages/System/ReadFromSystemTables.h create mode 100644 src/Storages/System/TablesBlockSource.cpp create mode 100644 src/Storages/System/TablesBlockSource.h diff --git a/src/Storages/System/ReadFromSystemTables.cpp b/src/Storages/System/ReadFromSystemTables.cpp new file mode 100644 index 00000000000..638cab1b63b --- /dev/null +++ b/src/Storages/System/ReadFromSystemTables.cpp @@ -0,0 +1,157 @@ +#include "ReadFromSystemTables.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace +{ + +ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) +{ + MutableColumnPtr column = ColumnString::create(); + + const auto databases = DatabaseCatalog::instance().getDatabases(); + for (const auto & database_name : databases | boost::adaptors::map_keys) + { + if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) + continue; /// We don't want to show the internal database for temporary tables in system.tables + + column->insert(database_name); + } + + Block block{ColumnWithTypeAndName(std::move(column), std::make_shared(), "database")}; + VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); + return block.getByPosition(0).column; +} + +ColumnPtr getFilteredTables( + const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context, const bool need_detached_tables) +{ + Block sample{ + ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), + ColumnWithTypeAndName(nullptr, std::make_shared(), "engine")}; + + MutableColumnPtr database_column = ColumnString::create(); + MutableColumnPtr engine_column; + + auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); + if (dag) + { + bool filter_by_engine = false; + for (const auto * input : dag->getInputs()) + if (input->result_name == "engine") + filter_by_engine = true; + + if (filter_by_engine) + engine_column = ColumnString::create(); + } + + for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) + { + const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); + DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); + if (!database) + continue; + + if (need_detached_tables) + { + auto table_it = database->getDetachedTablesIterator(context, {}, false); + for (; table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->table()); + } + } + else + { + auto table_it = database->getTablesIterator(context); + for (; table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->name()); + if (engine_column) + engine_column->insert(table_it->table()->getName()); + } + } + } + + Block block{ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; + if (engine_column) + block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); + + if (dag) + VirtualColumnUtils::filterBlockWithDAG(dag, block, context); + + return block.getByPosition(0).column; +} + +} + +ReadFromSystemTables::ReadFromSystemTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_) + : SourceStepWithFilter(DataStream{.header = std::move(sample_block)}, column_names_, query_info_, storage_snapshot_, context_) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) +{ +} + +void ReadFromSystemTables::applyFilters(ActionDAGNodes added_filter_nodes) +{ + applyFiltersImpl(std::move(added_filter_nodes), false); +} + +void ReadFromSystemTables::applyFiltersImpl(ActionDAGNodes added_filter_nodes, const bool need_detached_tables_) +{ + SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); + + const ActionsDAG::Node * predicate = nullptr; + if (filter_actions_dag) + predicate = filter_actions_dag->getOutputs().at(0); + + filtered_databases_column = getFilteredDatabases(predicate, context); + filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context, need_detached_tables_); +} + +void ReadFromSystemTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) +{ + pipeline.init(createPipe()); +} + +ReadFromSystemDetachedTables::ReadFromSystemDetachedTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + const size_t max_block_size_) + : ReadFromSystemTables( + column_names_, query_info_, storage_snapshot_, context_, std::move(sample_block), std::move(columns_mask_), max_block_size_) +{ +} + +void ReadFromSystemDetachedTables::applyFilters(ActionDAGNodes added_filter_nodes) +{ + applyFiltersImpl(std::move(added_filter_nodes), true); +} + +void ReadFromSystemDetachedTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) +{ + pipeline.init(createPipe()); +} +} diff --git a/src/Storages/System/ReadFromSystemTables.h b/src/Storages/System/ReadFromSystemTables.h new file mode 100644 index 00000000000..f8f34363159 --- /dev/null +++ b/src/Storages/System/ReadFromSystemTables.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class ReadFromSystemTables : public SourceStepWithFilter +{ +public: + std::string getName() const override { return "ReadFromSystemTables"; } + void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; + + ReadFromSystemTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_); + + void applyFilters(ActionDAGNodes added_filter_nodes) override; + +protected: + void applyFiltersImpl(ActionDAGNodes added_filter_nodes, bool need_detached_tables_); + + template + Pipe createPipe() + { + return Pipe(std::make_shared( + std::move(columns_mask), + getOutputStream().header, + max_block_size, + std::move(filtered_databases_column), + std::move(filtered_tables_column), + context)); + } + +private: + std::vector columns_mask; + size_t max_block_size; + + ColumnPtr filtered_databases_column; + ColumnPtr filtered_tables_column; +}; + +class ReadFromSystemDetachedTables : public ReadFromSystemTables +{ +public: + std::string getName() const override { return "ReadFromSystemDetachedTables"; } + void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; + + ReadFromSystemDetachedTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_); + + void applyFilters(ActionDAGNodes added_filter_nodes) override; +}; +} diff --git a/src/Storages/System/StorageSystemDetachedTables.cpp b/src/Storages/System/StorageSystemDetachedTables.cpp index 9805fa2a8cf..99c15fdae0e 100644 --- a/src/Storages/System/StorageSystemDetachedTables.cpp +++ b/src/Storages/System/StorageSystemDetachedTables.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -22,204 +23,6 @@ namespace DB { -namespace -{ - -ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) -{ - MutableColumnPtr column = ColumnString::create(); - - const auto databases = DatabaseCatalog::instance().getDatabases(); - for (const auto & database_name : databases | boost::adaptors::map_keys) - { - if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) - continue; /// We don't want to show the internal database for temporary tables in system.tables - - column->insert(database_name); - } - - Block block{ColumnWithTypeAndName(std::move(column), std::make_shared(), "database")}; - VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); - return block.getByPosition(0).column; -} - -ColumnPtr getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context) -{ - Block sample{ - ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), - ColumnWithTypeAndName(nullptr, std::make_shared(), "engine")}; - - MutableColumnPtr database_column = ColumnString::create(); - MutableColumnPtr engine_column; - - auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); - if (dag) - { - bool filter_by_engine = false; - for (const auto * input : dag->getInputs()) - if (input->result_name == "engine") - filter_by_engine = true; - - if (filter_by_engine) - engine_column = ColumnString::create(); - } - - for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) - { - const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); - DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); - if (!database) - continue; - - auto table_it = database->getDetachedTablesIterator(context, {}, false); - for (; table_it->isValid(); table_it->next()) - { - database_column->insert(table_it->table()); - } - } - - Block block{ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; - if (engine_column) - block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); - - if (dag) - VirtualColumnUtils::filterBlockWithDAG(dag, block, context); - - return block.getByPosition(0).column; -} - -class DetachedTablesBlockSource : public ISource -{ -public: - DetachedTablesBlockSource( - std::vector columns_mask_, - Block header, - UInt64 max_block_size_, - ColumnPtr databases_, - ColumnPtr detached_tables_, - ContextPtr context_) - : ISource(std::move(header)) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) - , databases(std::move(databases_)) - , context(Context::createCopy(context_)) - { - size_t size = detached_tables_->size(); - detached_tables.reserve(size); - for (size_t idx = 0; idx < size; ++idx) - { - detached_tables.insert(detached_tables_->getDataAt(idx).toString()); - } - } - - String getName() const override { return "DetachedTables"; } - -protected: - Chunk generate() override - { - if (done) - return {}; - - MutableColumns result_columns = getPort().getHeader().cloneEmptyColumns(); - - const auto access = context->getAccess(); - const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); - - for (size_t database_idx = 0, rows_count = 0; database_idx < databases->size() && rows_count < max_block_size; ++database_idx) - { - database_name = databases->getDataAt(database_idx).toString(); - database = DatabaseCatalog::instance().tryGetDatabase(database_name); - - if (!database) - continue; - - const bool need_to_check_access_for_tables - = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); - - if (!detached_tables_it || !detached_tables_it->isValid()) - detached_tables_it = database->getDetachedTablesIterator(context, {}, false); - - for (; rows_count < max_block_size && detached_tables_it->isValid(); detached_tables_it->next()) - { - const auto detached_table_name = detached_tables_it->table(); - - if (!detached_tables.contains(detached_table_name)) - continue; - - if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, detached_table_name)) - continue; - - fillResultColumnsByDetachedTableIterator(result_columns); - ++rows_count; - } - } - - const UInt64 num_rows = result_columns.at(0)->size(); - done = true; - return Chunk(std::move(result_columns), num_rows); - } - -private: - const std::vector columns_mask; - const UInt64 max_block_size; - const ColumnPtr databases; - NameSet detached_tables; - DatabaseDetachedTablesSnapshotIteratorPtr detached_tables_it; - ContextPtr context; - bool done = false; - DatabasePtr database; - std::string database_name; - - void fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const - { - size_t src_index = 0; - size_t res_index = 0; - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->database()); - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->table()); - - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->uuid()); - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->metadataPath()); - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->isPermanently()); - } -}; - -} - -class ReadFromSystemDetachedTables : public SourceStepWithFilter -{ -public: - std::string getName() const override { return "ReadFromSystemDetachedTables"; } - void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; - - ReadFromSystemDetachedTables( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - Block sample_block, - std::vector columns_mask_, - size_t max_block_size_); - - void applyFilters(ActionDAGNodes added_filter_nodes) override; - -private: - std::vector columns_mask; - size_t max_block_size; - - ColumnPtr filtered_databases_column; - ColumnPtr filtered_tables_column; -}; - StorageSystemDetachedTables::StorageSystemDetachedTables(const StorageID & table_id_) : IStorage(table_id_) { StorageInMemoryMetadata storage_metadata; @@ -257,42 +60,4 @@ void StorageSystemDetachedTables::read( query_plan.addStep(std::move(reading)); } - -ReadFromSystemDetachedTables::ReadFromSystemDetachedTables( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - Block sample_block, - std::vector columns_mask_, - size_t max_block_size_) - : SourceStepWithFilter(DataStream{.header = std::move(sample_block)}, column_names_, query_info_, storage_snapshot_, context_) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) -{ -} - -void ReadFromSystemDetachedTables::applyFilters(ActionDAGNodes added_filter_nodes) -{ - SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); - - const ActionsDAG::Node * predicate = nullptr; - if (filter_actions_dag) - predicate = filter_actions_dag->getOutputs().at(0); - - filtered_databases_column = getFilteredDatabases(predicate, context); - filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context); -} - -void ReadFromSystemDetachedTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) -{ - auto pipe = Pipe(std::make_shared( - std::move(columns_mask), - getOutputStream().header, - max_block_size, - std::move(filtered_databases_column), - std::move(filtered_tables_column), - context)); - pipeline.init(std::move(pipe)); -} } diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 783b899c978..40190f53fd3 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -1,29 +1,30 @@ +#include #include -#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include -#include #include #include #include #include #include -#include +#include +#include +#include +#include +#include +#include #include @@ -105,628 +106,6 @@ StorageSystemTables::StorageSystemTables(const StorageID & table_id_) setInMemoryMetadata(storage_metadata); } - -namespace -{ - -ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) -{ - MutableColumnPtr column = ColumnString::create(); - - const auto databases = DatabaseCatalog::instance().getDatabases(); - for (const auto & database_name : databases | boost::adaptors::map_keys) - { - if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) - continue; /// We don't want to show the internal database for temporary tables in system.tables - - column->insert(database_name); - } - - Block block { ColumnWithTypeAndName(std::move(column), std::make_shared(), "database") }; - VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); - return block.getByPosition(0).column; -} - -ColumnPtr getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context) -{ - Block sample { - ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), - ColumnWithTypeAndName(nullptr, std::make_shared(), "engine") - }; - - MutableColumnPtr database_column = ColumnString::create(); - MutableColumnPtr engine_column; - - auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); - if (dag) - { - bool filter_by_engine = false; - for (const auto * input : dag->getInputs()) - if (input->result_name == "engine") - filter_by_engine = true; - - if (filter_by_engine) - engine_column = ColumnString::create(); - } - - for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) - { - const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); - DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); - if (!database) - continue; - - for (auto table_it = database->getTablesIterator(context); table_it->isValid(); table_it->next()) - { - database_column->insert(table_it->name()); - if (engine_column) - engine_column->insert(table_it->table()->getName()); - } - } - - Block block {ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; - if (engine_column) - block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); - - if (dag) - VirtualColumnUtils::filterBlockWithDAG(dag, block, context); - - return block.getByPosition(0).column; -} - -/// Avoid heavy operation on tables if we only queried columns that we can get without table object. -/// Otherwise it will require table initialization for Lazy database. -bool needTable(const DatabasePtr & database, const Block & header) -{ - if (database->getEngineName() != "Lazy") - return true; - - static const std::set columns_without_table = { "database", "name", "uuid", "metadata_modification_time" }; - for (const auto & column : header.getColumnsWithTypeAndName()) - { - if (columns_without_table.find(column.name) == columns_without_table.end()) - return true; - } - return false; -} - - -class TablesBlockSource : public ISource -{ -public: - TablesBlockSource( - std::vector columns_mask_, - Block header, - UInt64 max_block_size_, - ColumnPtr databases_, - ColumnPtr tables_, - ContextPtr context_) - : ISource(std::move(header)) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) - , databases(std::move(databases_)) - , context(Context::createCopy(context_)) - { - size_t size = tables_->size(); - tables.reserve(size); - for (size_t idx = 0; idx < size; ++idx) - tables.insert(tables_->getDataAt(idx).toString()); - } - - String getName() const override { return "Tables"; } - -protected: - Chunk generate() override - { - if (done) - return {}; - - MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns(); - - const auto access = context->getAccess(); - const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); - - size_t rows_count = 0; - while (rows_count < max_block_size) - { - if (tables_it && !tables_it->isValid()) - ++database_idx; - - while (database_idx < databases->size() && (!tables_it || !tables_it->isValid())) - { - database_name = databases->getDataAt(database_idx).toString(); - database = DatabaseCatalog::instance().tryGetDatabase(database_name); - - if (!database) - { - /// Database was deleted just now or the user has no access. - ++database_idx; - continue; - } - - break; - } - - /// This is for temporary tables. They are output in single block regardless to max_block_size. - if (database_idx >= databases->size()) - { - if (context->hasSessionContext()) - { - Tables external_tables = context->getSessionContext()->getExternalTables(); - - for (auto & table : external_tables) - { - size_t src_index = 0; - size_t res_index = 0; - - // database - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // name - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.first); - - // uuid - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getStorageID().uuid); - - // engine - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getName()); - - // is_temporary - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(1u); - - // data_paths - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_path - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_modification_time - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_version - // Temporary tables does not support replication - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // dependencies_database - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // dependencies_table - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // create_table_query - if (columns_mask[src_index++]) - { - auto temp_db = DatabaseCatalog::instance().getDatabaseForTemporaryTables(); - ASTPtr ast = temp_db ? temp_db->tryGetCreateTableQuery(table.second->getStorageID().getTableName(), context) : nullptr; - res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); - } - - // engine_full - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getName()); - - const auto & settings = context->getSettingsRef(); - while (src_index < columns_mask.size()) - { - // total_rows - if (src_index == 19 && columns_mask[src_index]) - { - if (auto total_rows = table.second->totalRows(settings)) - res_columns[res_index++]->insert(*total_rows); - else - res_columns[res_index++]->insertDefault(); - } - // total_bytes - else if (src_index == 20 && columns_mask[src_index]) - { - if (auto total_bytes = table.second->totalBytes(settings)) - res_columns[res_index++]->insert(*total_bytes); - else - res_columns[res_index++]->insertDefault(); - } - /// Fill the rest columns with defaults - else if (columns_mask[src_index]) - res_columns[res_index++]->insertDefault(); - src_index++; - } - } - } - - UInt64 num_rows = res_columns.at(0)->size(); - done = true; - return Chunk(std::move(res_columns), num_rows); - } - - const bool need_to_check_access_for_tables = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); - - if (!tables_it || !tables_it->isValid()) - tables_it = database->getTablesIterator(context); - - const bool need_table = needTable(database, getPort().getHeader()); - - for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next()) - { - auto table_name = tables_it->name(); - if (!tables.contains(table_name)) - continue; - - if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name)) - continue; - - StoragePtr table = nullptr; - TableLockHolder lock; - if (need_table) - { - table = tables_it->table(); - if (!table) - // Table might have just been removed or detached for Lazy engine (see DatabaseLazy::tryGetTable()) - continue; - - /// The only column that requires us to hold a shared lock is data_paths as rename might alter them (on ordinary tables) - /// and it's not protected internally by other mutexes - static const size_t DATA_PATHS_INDEX = 5; - if (columns_mask[DATA_PATHS_INDEX]) - { - lock = table->tryLockForShare(context->getCurrentQueryId(), - context->getSettingsRef().lock_acquire_timeout); - if (!lock) - // Table was dropped while acquiring the lock, skipping table - continue; - } - } - ++rows_count; - - size_t src_index = 0; - size_t res_index = 0; - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(database_name); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table_name); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(tables_it->uuid()); - - if (columns_mask[src_index++]) - { - chassert(table != nullptr); - res_columns[res_index++]->insert(table->getName()); - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(0u); // is_temporary - - if (columns_mask[src_index++]) - { - chassert(lock != nullptr); - Array table_paths_array; - auto paths = table->getDataPaths(); - table_paths_array.reserve(paths.size()); - for (const String & path : paths) - table_paths_array.push_back(path); - res_columns[res_index++]->insert(table_paths_array); - /// We don't need the lock anymore - lock = nullptr; - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(database->getObjectMetadataPath(table_name)); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(static_cast(database->getObjectMetadataModificationTime(table_name))); - - StorageMetadataPtr metadata_snapshot; - if (table) - metadata_snapshot = table->getInMemoryMetadataPtr(); - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && table->supportsReplication()) - res_columns[res_index++]->insert(metadata_snapshot->metadata_version); - else - res_columns[res_index++]->insertDefault(); - } - - { - Array views_table_name_array; - Array views_database_name_array; - if (columns_mask[src_index] || columns_mask[src_index + 1]) - { - const auto view_ids = DatabaseCatalog::instance().getDependentViews(StorageID(database_name, table_name)); - - views_table_name_array.reserve(view_ids.size()); - views_database_name_array.reserve(view_ids.size()); - for (const auto & view_id : view_ids) - { - views_table_name_array.push_back(view_id.table_name); - views_database_name_array.push_back(view_id.database_name); - } - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(views_database_name_array); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(views_table_name_array); - } - - if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2]) - { - ASTPtr ast = database->tryGetCreateTableQuery(table_name, context); - auto * ast_create = ast ? ast->as() : nullptr; - - if (ast_create && !context->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) - { - ast_create->uuid = UUIDHelpers::Nil; - ast_create->to_inner_uuid = UUIDHelpers::Nil; - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); - - if (columns_mask[src_index++]) - { - String engine_full; - - if (ast_create && ast_create->storage) - { - engine_full = format({context, *ast_create->storage}); - - static const char * const extra_head = " ENGINE = "; - if (startsWith(engine_full, extra_head)) - engine_full = engine_full.substr(strlen(extra_head)); - } - - res_columns[res_index++]->insert(engine_full); - } - - if (columns_mask[src_index++]) - { - String as_select; - if (ast_create && ast_create->select) - as_select = format({context, *ast_create->select}); - res_columns[res_index++]->insert(as_select); - } - } - else - src_index += 3; - - ASTPtr expression_ptr; - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPartitionKeyAST())) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSortingKey().expression_list_ast)) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPrimaryKey().expression_list_ast)) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSamplingKeyAST())) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto policy = table ? table->getStoragePolicy() : nullptr; - if (policy) - res_columns[res_index++]->insert(policy->getName()); - else - res_columns[res_index++]->insertDefault(); - } - - auto settings = context->getSettingsRef(); - settings.select_sequential_consistency = 0; - if (columns_mask[src_index++]) - { - auto total_rows = table ? table->totalRows(settings) : std::nullopt; - if (total_rows) - res_columns[res_index++]->insert(*total_rows); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto total_bytes = table->totalBytes(settings); - if (total_bytes) - res_columns[res_index++]->insert(*total_bytes); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto total_bytes_uncompressed = table->totalBytesUncompressed(settings); - if (total_bytes_uncompressed) - res_columns[res_index++]->insert(*total_bytes_uncompressed); - else - res_columns[res_index++]->insertDefault(); - } - - auto table_merge_tree = std::dynamic_pointer_cast(table); - if (columns_mask[src_index++]) - { - if (table_merge_tree) - res_columns[res_index++]->insert(table_merge_tree->getAllPartsCount()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table_merge_tree) - res_columns[res_index++]->insert(table_merge_tree->getActivePartsCount()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table_merge_tree) - { - res_columns[res_index++]->insert(table_merge_tree->getTotalMarksCount()); - } - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto lifetime_rows = table ? table->lifetimeRows() : std::nullopt; - if (lifetime_rows) - res_columns[res_index++]->insert(*lifetime_rows); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto lifetime_bytes = table ? table->lifetimeBytes() : std::nullopt; - if (lifetime_bytes) - res_columns[res_index++]->insert(*lifetime_bytes); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot) - res_columns[res_index++]->insert(metadata_snapshot->comment); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table) - res_columns[res_index++]->insert(table->storesDataOnDisk()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2] || columns_mask[src_index + 3]) - { - auto dependencies = DatabaseCatalog::instance().getLoadingDependencies(StorageID{database_name, table_name}); - auto dependents = DatabaseCatalog::instance().getLoadingDependents(StorageID{database_name, table_name}); - - Array dependencies_databases; - Array dependencies_tables; - dependencies_databases.reserve(dependencies.size()); - dependencies_tables.reserve(dependencies.size()); - for (const auto & dependency : dependencies) - { - dependencies_databases.push_back(dependency.database_name); - dependencies_tables.push_back(dependency.table_name); - } - - Array dependents_databases; - Array dependents_tables; - dependents_databases.reserve(dependents.size()); - dependents_tables.reserve(dependents.size()); - for (const auto & dependent : dependents) - { - dependents_databases.push_back(dependent.database_name); - dependents_tables.push_back(dependent.table_name); - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependencies_databases); - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependencies_tables); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependents_databases); - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependents_tables); - - } - } - } - - UInt64 num_rows = res_columns.at(0)->size(); - return Chunk(std::move(res_columns), num_rows); - } -private: - std::vector columns_mask; - UInt64 max_block_size; - ColumnPtr databases; - NameSet tables; - size_t database_idx = 0; - DatabaseTablesIteratorPtr tables_it; - ContextPtr context; - bool done = false; - DatabasePtr database; - std::string database_name; -}; - -} - -class ReadFromSystemTables : public SourceStepWithFilter -{ -public: - std::string getName() const override { return "ReadFromSystemTables"; } - void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; - - ReadFromSystemTables( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - Block sample_block, - std::vector columns_mask_, - size_t max_block_size_) - : SourceStepWithFilter( - DataStream{.header = std::move(sample_block)}, - column_names_, - query_info_, - storage_snapshot_, - context_) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) - { - } - - void applyFilters(ActionDAGNodes added_filter_nodes) override; - -private: - std::vector columns_mask; - size_t max_block_size; - - ColumnPtr filtered_databases_column; - ColumnPtr filtered_tables_column; -}; - void StorageSystemTables::read( QueryPlan & query_plan, const Names & column_names, @@ -747,24 +126,4 @@ void StorageSystemTables::read( query_plan.addStep(std::move(reading)); } - -void ReadFromSystemTables::applyFilters(ActionDAGNodes added_filter_nodes) -{ - SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); - - const ActionsDAG::Node * predicate = nullptr; - if (filter_actions_dag) - predicate = filter_actions_dag->getOutputs().at(0); - - filtered_databases_column = getFilteredDatabases(predicate, context); - filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context); -} - -void ReadFromSystemTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) -{ - Pipe pipe(std::make_shared( - std::move(columns_mask), getOutputStream().header, max_block_size, std::move(filtered_databases_column), std::move(filtered_tables_column), context)); - pipeline.init(std::move(pipe)); -} - } diff --git a/src/Storages/System/TablesBlockSource.cpp b/src/Storages/System/TablesBlockSource.cpp new file mode 100644 index 00000000000..d5481856f1b --- /dev/null +++ b/src/Storages/System/TablesBlockSource.cpp @@ -0,0 +1,564 @@ +#include "TablesBlockSource.h" + +#include + +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace +{ + +/// Avoid heavy operation on tables if we only queried columns that we can get without table object. +/// Otherwise it will require table initialization for Lazy database. +bool needTable(const DatabasePtr & database, const Block & header) +{ + if (database->getEngineName() != "Lazy") + return true; + + static const std::set columns_without_table = {"database", "name", "uuid", "metadata_modification_time"}; + for (const auto & column : header.getColumnsWithTypeAndName()) + { + if (columns_without_table.find(column.name) == columns_without_table.end()) + return true; + } + return false; +} + +} + +Chunk TablesBlockSource::generate() +{ + if (done) + return {}; + + MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns(); + + const auto access = context->getAccess(); + const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); + + size_t rows_count = 0; + while (rows_count < max_block_size) + { + if (tables_it && !tables_it->isValid()) + ++database_idx; + + while (database_idx < databases->size() && (!tables_it || !tables_it->isValid())) + { + database_name = databases->getDataAt(database_idx).toString(); + database = DatabaseCatalog::instance().tryGetDatabase(database_name); + + if (!database) + { + /// Database was deleted just now or the user has no access. + ++database_idx; + continue; + } + + break; + } + + /// This is for temporary tables. They are output in single block regardless to max_block_size. + if (database_idx >= databases->size()) + { + if (context->hasSessionContext()) + { + Tables external_tables = context->getSessionContext()->getExternalTables(); + + for (auto & table : external_tables) + { + size_t src_index = 0; + size_t res_index = 0; + + // database + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // name + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.first); + + // uuid + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getStorageID().uuid); + + // engine + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getName()); + + // is_temporary + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(1u); + + // data_paths + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_path + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_modification_time + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_version + // Temporary tables does not support replication + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // dependencies_database + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // dependencies_table + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // create_table_query + if (columns_mask[src_index++]) + { + auto temp_db = DatabaseCatalog::instance().getDatabaseForTemporaryTables(); + ASTPtr ast + = temp_db ? temp_db->tryGetCreateTableQuery(table.second->getStorageID().getTableName(), context) : nullptr; + res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); + } + + // engine_full + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getName()); + + const auto & settings = context->getSettingsRef(); + while (src_index < columns_mask.size()) + { + // total_rows + if (src_index == 19 && columns_mask[src_index]) + { + if (auto total_rows = table.second->totalRows(settings)) + res_columns[res_index++]->insert(*total_rows); + else + res_columns[res_index++]->insertDefault(); + } + // total_bytes + else if (src_index == 20 && columns_mask[src_index]) + { + if (auto total_bytes = table.second->totalBytes(settings)) + res_columns[res_index++]->insert(*total_bytes); + else + res_columns[res_index++]->insertDefault(); + } + /// Fill the rest columns with defaults + else if (columns_mask[src_index]) + res_columns[res_index++]->insertDefault(); + src_index++; + } + } + } + + UInt64 num_rows = res_columns.at(0)->size(); + done = true; + return Chunk(std::move(res_columns), num_rows); + } + + const bool need_to_check_access_for_tables + = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); + + if (!tables_it || !tables_it->isValid()) + tables_it = database->getTablesIterator(context); + + const bool need_table = needTable(database, getPort().getHeader()); + + for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next()) + { + auto table_name = tables_it->name(); + if (!tables.contains(table_name)) + continue; + + if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name)) + continue; + + StoragePtr table = nullptr; + TableLockHolder lock; + if (need_table) + { + table = tables_it->table(); + if (!table) + // Table might have just been removed or detached for Lazy engine (see DatabaseLazy::tryGetTable()) + continue; + + /// The only column that requires us to hold a shared lock is data_paths as rename might alter them (on ordinary tables) + /// and it's not protected internally by other mutexes + static const size_t DATA_PATHS_INDEX = 5; + if (columns_mask[DATA_PATHS_INDEX]) + { + lock = table->tryLockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout); + if (!lock) + // Table was dropped while acquiring the lock, skipping table + continue; + } + } + ++rows_count; + + size_t src_index = 0; + size_t res_index = 0; + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(database_name); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table_name); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(tables_it->uuid()); + + if (columns_mask[src_index++]) + { + chassert(table != nullptr); + res_columns[res_index++]->insert(table->getName()); + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(0u); // is_temporary + + if (columns_mask[src_index++]) + { + chassert(lock != nullptr); + Array table_paths_array; + auto paths = table->getDataPaths(); + table_paths_array.reserve(paths.size()); + for (const String & path : paths) + table_paths_array.push_back(path); + res_columns[res_index++]->insert(table_paths_array); + /// We don't need the lock anymore + lock = nullptr; + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(database->getObjectMetadataPath(table_name)); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(static_cast(database->getObjectMetadataModificationTime(table_name))); + + StorageMetadataPtr metadata_snapshot; + if (table) + metadata_snapshot = table->getInMemoryMetadataPtr(); + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && table->supportsReplication()) + res_columns[res_index++]->insert(metadata_snapshot->metadata_version); + else + res_columns[res_index++]->insertDefault(); + } + + { + Array views_table_name_array; + Array views_database_name_array; + if (columns_mask[src_index] || columns_mask[src_index + 1]) + { + const auto view_ids = DatabaseCatalog::instance().getDependentViews(StorageID(database_name, table_name)); + + views_table_name_array.reserve(view_ids.size()); + views_database_name_array.reserve(view_ids.size()); + for (const auto & view_id : view_ids) + { + views_table_name_array.push_back(view_id.table_name); + views_database_name_array.push_back(view_id.database_name); + } + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(views_database_name_array); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(views_table_name_array); + } + + if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2]) + { + ASTPtr ast = database->tryGetCreateTableQuery(table_name, context); + auto * ast_create = ast ? ast->as() : nullptr; + + if (ast_create && !context->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) + { + ast_create->uuid = UUIDHelpers::Nil; + ast_create->to_inner_uuid = UUIDHelpers::Nil; + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); + + if (columns_mask[src_index++]) + { + String engine_full; + + if (ast_create && ast_create->storage) + { + engine_full = format({context, *ast_create->storage}); + + static const char * const extra_head = " ENGINE = "; + if (startsWith(engine_full, extra_head)) + engine_full = engine_full.substr(strlen(extra_head)); + } + + res_columns[res_index++]->insert(engine_full); + } + + if (columns_mask[src_index++]) + { + String as_select; + if (ast_create && ast_create->select) + as_select = format({context, *ast_create->select}); + res_columns[res_index++]->insert(as_select); + } + } + else + src_index += 3; + + ASTPtr expression_ptr; + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPartitionKeyAST())) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSortingKey().expression_list_ast)) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPrimaryKey().expression_list_ast)) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSamplingKeyAST())) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto policy = table ? table->getStoragePolicy() : nullptr; + if (policy) + res_columns[res_index++]->insert(policy->getName()); + else + res_columns[res_index++]->insertDefault(); + } + + auto settings = context->getSettingsRef(); + settings.select_sequential_consistency = 0; + if (columns_mask[src_index++]) + { + auto total_rows = table ? table->totalRows(settings) : std::nullopt; + if (total_rows) + res_columns[res_index++]->insert(*total_rows); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto total_bytes = table->totalBytes(settings); + if (total_bytes) + res_columns[res_index++]->insert(*total_bytes); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto total_bytes_uncompressed = table->totalBytesUncompressed(settings); + if (total_bytes_uncompressed) + res_columns[res_index++]->insert(*total_bytes_uncompressed); + else + res_columns[res_index++]->insertDefault(); + } + + auto table_merge_tree = std::dynamic_pointer_cast(table); + if (columns_mask[src_index++]) + { + if (table_merge_tree) + res_columns[res_index++]->insert(table_merge_tree->getAllPartsCount()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table_merge_tree) + res_columns[res_index++]->insert(table_merge_tree->getActivePartsCount()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table_merge_tree) + { + res_columns[res_index++]->insert(table_merge_tree->getTotalMarksCount()); + } + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto lifetime_rows = table ? table->lifetimeRows() : std::nullopt; + if (lifetime_rows) + res_columns[res_index++]->insert(*lifetime_rows); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto lifetime_bytes = table ? table->lifetimeBytes() : std::nullopt; + if (lifetime_bytes) + res_columns[res_index++]->insert(*lifetime_bytes); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot) + res_columns[res_index++]->insert(metadata_snapshot->comment); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table) + res_columns[res_index++]->insert(table->storesDataOnDisk()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2] || columns_mask[src_index + 3]) + { + auto dependencies = DatabaseCatalog::instance().getLoadingDependencies(StorageID{database_name, table_name}); + auto dependents = DatabaseCatalog::instance().getLoadingDependents(StorageID{database_name, table_name}); + + Array dependencies_databases; + Array dependencies_tables; + dependencies_databases.reserve(dependencies.size()); + dependencies_tables.reserve(dependencies.size()); + for (const auto & dependency : dependencies) + { + dependencies_databases.push_back(dependency.database_name); + dependencies_tables.push_back(dependency.table_name); + } + + Array dependents_databases; + Array dependents_tables; + dependents_databases.reserve(dependents.size()); + dependents_tables.reserve(dependents.size()); + for (const auto & dependent : dependents) + { + dependents_databases.push_back(dependent.database_name); + dependents_tables.push_back(dependent.table_name); + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependencies_databases); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependencies_tables); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependents_databases); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependents_tables); + } + } + } + + UInt64 num_rows = res_columns.at(0)->size(); + return Chunk(std::move(res_columns), num_rows); +} + +Chunk DetachedTablesBlockSource::generate() +{ + if (done) + return {}; + + MutableColumns result_columns = getPort().getHeader().cloneEmptyColumns(); + + const auto access = context->getAccess(); + const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); + + for (size_t database_idx = 0, rows_count = 0; database_idx < databases->size() && rows_count < max_block_size; ++database_idx) + { + database_name = databases->getDataAt(database_idx).toString(); + database = DatabaseCatalog::instance().tryGetDatabase(database_name); + + if (!database) + continue; + + const bool need_to_check_access_for_tables + = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); + + if (!detached_tables_it || !detached_tables_it->isValid()) + detached_tables_it = database->getDetachedTablesIterator(context, {}, false); + + for (; rows_count < max_block_size && detached_tables_it->isValid(); detached_tables_it->next()) + { + const auto detached_table_name = detached_tables_it->table(); + + if (!detached_tables.contains(detached_table_name)) + continue; + + if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, detached_table_name)) + continue; + + fillResultColumnsByDetachedTableIterator(result_columns); + ++rows_count; + } + } + + const UInt64 num_rows = result_columns.at(0)->size(); + done = true; + return Chunk(std::move(result_columns), num_rows); +} + +void DetachedTablesBlockSource::fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const +{ + size_t src_index = 0; + size_t res_index = 0; + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->database()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->table()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->uuid()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->metadataPath()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->isPermanently()); +} +} diff --git a/src/Storages/System/TablesBlockSource.h b/src/Storages/System/TablesBlockSource.h new file mode 100644 index 00000000000..a9f2226dca8 --- /dev/null +++ b/src/Storages/System/TablesBlockSource.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +class TablesBlockSource : public ISource +{ +public: + TablesBlockSource( + std::vector columns_mask_, + Block header, + UInt64 max_block_size_, + ColumnPtr databases_, + ColumnPtr tables_, + ContextPtr context_) + : ISource(std::move(header)) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + , databases(std::move(databases_)) + , context(Context::createCopy(context_)) + { + size_t size = tables_->size(); + tables.reserve(size); + for (size_t idx = 0; idx < size; ++idx) + tables.insert(tables_->getDataAt(idx).toString()); + } + + String getName() const override { return "Tables"; } + +protected: + Chunk generate() override; + +private: + std::vector columns_mask; + UInt64 max_block_size; + ColumnPtr databases; + NameSet tables; + size_t database_idx = 0; + DatabaseTablesIteratorPtr tables_it; + ContextPtr context; + bool done = false; + DatabasePtr database; + std::string database_name; +}; + +class DetachedTablesBlockSource : public ISource +{ +public: + DetachedTablesBlockSource( + std::vector columns_mask_, + Block header, + UInt64 max_block_size_, + ColumnPtr databases_, + ColumnPtr detached_tables_, + ContextPtr context_) + : ISource(std::move(header)) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + , databases(std::move(databases_)) + , context(Context::createCopy(context_)) + { + size_t size = detached_tables_->size(); + detached_tables.reserve(size); + for (size_t idx = 0; idx < size; ++idx) + { + detached_tables.insert(detached_tables_->getDataAt(idx).toString()); + } + } + + String getName() const override { return "DetachedTables"; } + +protected: + Chunk generate() override; + +private: + const std::vector columns_mask; + const UInt64 max_block_size; + const ColumnPtr databases; + NameSet detached_tables; + DatabaseDetachedTablesSnapshotIteratorPtr detached_tables_it; + ContextPtr context; + bool done = false; + DatabasePtr database; + std::string database_name; + + void fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const; +}; +} From e06387e6da4a89d9aab145dd057b6a99348e3ee8 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Thu, 27 Jun 2024 07:37:23 +0000 Subject: [PATCH 07/80] fix stle --- .../test_system_detached_tables/test.py | 33 +++++++++++++------ 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/tests/integration/test_system_detached_tables/test.py b/tests/integration/test_system_detached_tables/test.py index bbac5e391f8..45bc70147b6 100644 --- a/tests/integration/test_system_detached_tables/test.py +++ b/tests/integration/test_system_detached_tables/test.py @@ -18,11 +18,19 @@ def test_system_detached_tables(): node.query("CREATE TABLE test_table (n Int64) ENGINE=MergeTree ORDER BY n;") node.query("CREATE TABLE test_table_perm (n Int64) ENGINE=MergeTree ORDER BY n;") - test_table_uuid = node.query("SELECT uuid FROM system.tables WHERE table='test_table'").rstrip('\n') - test_table_metadata_path = node.query("SELECT metadata_path FROM system.tables WHERE table='test_table'").rstrip('\n') + test_table_uuid = node.query( + "SELECT uuid FROM system.tables WHERE table='test_table'" + ).rstrip('\n') + test_table_metadata_path = node.query( + "SELECT metadata_path FROM system.tables WHERE table='test_table'" + ).rstrip('\n') - test_table_perm_uuid = node.query("SELECT uuid FROM system.tables WHERE table='test_table_perm'").rstrip('\n') - test_table_perm_metadata_path = node.query("SELECT metadata_path FROM system.tables WHERE table='test_table_perm'").rstrip('\n') + test_table_perm_uuid = node.query( + "SELECT uuid FROM system.tables WHERE table='test_table_perm'" + ).rstrip('\n') + test_table_perm_metadata_path = node.query( + "SELECT metadata_path FROM system.tables WHERE table='test_table_perm'" + ).rstrip('\n') assert "" == node.query("SELECT * FROM system.detached_tables") @@ -31,12 +39,17 @@ def test_system_detached_tables(): querry = "SELECT database, table, is_permanently, uuid, metadata_path FROM system.detached_tables FORMAT Values" result = node.query(querry) - assert result == f"('default','test_table',0,'{test_table_uuid}','{test_table_metadata_path}'),('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" - - node.restart_clickhouse() - - result = node.query(querry) - assert result == f"('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" + assert ( + result + == f"('default','test_table',0,'{test_table_uuid}','{test_table_metadata_path}'),('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" + ) + node.restart_clickhouse() + + result = node.query(querry) + assert ( + result + == f"('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" + ) node.restart_clickhouse() From 1c9ccbae1142d585f7534441a6abc5e75acfdce3 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Thu, 27 Jun 2024 07:49:07 +0000 Subject: [PATCH 08/80] small up --- src/Databases/DatabasesCommon.cpp | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index 470625ddd8b..4cf914fea56 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -270,37 +270,36 @@ StoragePtr DatabaseWithOwnTablesBase::detachTable(ContextPtr /* context_ */, con StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_name) { - StoragePtr res; - auto it = tables.find(table_name); if (it == tables.end()) throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuote(database_name), backQuote(table_name)); - res = it->second; + + auto table_storage = it->second; snapshot_detached_tables.emplace( - it->first, + table_name, SnapshotDetachedTable{ .database = it->second->getStorageID().getDatabaseName(), - .table = it->first, + .table = table_name, .uuid = it->second->getStorageID().uuid, - .metadata_path = getObjectMetadataPath(it->first), + .metadata_path = getObjectMetadataPath(table_name), .is_permanently = false}); tables.erase(it); - res->is_detached = true; + table_storage->is_detached = true; - if (res->isSystemStorage() == false) - CurrentMetrics::sub(getAttachedCounterForStorage(res), 1); + if (table_storage->isSystemStorage() == false) + CurrentMetrics::sub(getAttachedCounterForStorage(table_storage), 1); - auto table_id = res->getStorageID(); + auto table_id = table_storage->getStorageID(); if (table_id.hasUUID()) { assert(database_name == DatabaseCatalog::TEMPORARY_DATABASE || getUUID() != UUIDHelpers::Nil); DatabaseCatalog::instance().removeUUIDMapping(table_id.uuid); } - return res; + return table_storage; } void DatabaseWithOwnTablesBase::attachTable(ContextPtr /* context_ */, const String & table_name, const StoragePtr & table, const String &) From 215802e74eb841449af6cb80f25b66a3610707dd Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Thu, 27 Jun 2024 08:28:35 +0000 Subject: [PATCH 09/80] fix style --- .../test_system_detached_tables/test.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/integration/test_system_detached_tables/test.py b/tests/integration/test_system_detached_tables/test.py index 45bc70147b6..dbccac92523 100644 --- a/tests/integration/test_system_detached_tables/test.py +++ b/tests/integration/test_system_detached_tables/test.py @@ -20,17 +20,17 @@ def test_system_detached_tables(): test_table_uuid = node.query( "SELECT uuid FROM system.tables WHERE table='test_table'" - ).rstrip('\n') + ).rstrip("\n") test_table_metadata_path = node.query( "SELECT metadata_path FROM system.tables WHERE table='test_table'" - ).rstrip('\n') + ).rstrip("\n") test_table_perm_uuid = node.query( "SELECT uuid FROM system.tables WHERE table='test_table_perm'" - ).rstrip('\n') + ).rstrip("\n") test_table_perm_metadata_path = node.query( "SELECT metadata_path FROM system.tables WHERE table='test_table_perm'" - ).rstrip('\n') + ).rstrip("\n") assert "" == node.query("SELECT * FROM system.detached_tables") @@ -40,7 +40,7 @@ def test_system_detached_tables(): querry = "SELECT database, table, is_permanently, uuid, metadata_path FROM system.detached_tables FORMAT Values" result = node.query(querry) assert ( - result + result == f"('default','test_table',0,'{test_table_uuid}','{test_table_metadata_path}'),('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" ) node.restart_clickhouse() @@ -54,4 +54,7 @@ def test_system_detached_tables(): node.restart_clickhouse() result = node.query(querry) - assert result == f"('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" + assert ( + result + == f"('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" + ) From bda41ab03d650978cb6765746d786b25de9ed72c Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Thu, 27 Jun 2024 10:52:04 +0000 Subject: [PATCH 10/80] fix tests for CI --- src/Databases/DatabasesCommon.cpp | 8 ++++---- src/Databases/IDatabase.h | 4 +++- src/Databases/TablesLoader.cpp | 10 +++++----- .../0_stateless/03172_system_detached_tables.sql | 12 ++++++------ 4 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index 4cf914fea56..fe0baf30e57 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -2,10 +2,6 @@ #include #include -#include -#include -#include -#include #include #include #include @@ -17,6 +13,10 @@ #include #include #include +#include +#include +#include +#include namespace DB diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index e856dd3d2f0..ce5a52b1b0f 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -138,7 +138,9 @@ protected: } public: - explicit DatabaseDetachedTablesSnapshotIterator(const SnapshotDetachedTables & tables_) : snapshot(tables_), it(snapshot.begin()) { } + explicit DatabaseDetachedTablesSnapshotIterator(const SnapshotDetachedTables & tables_) : snapshot(tables_), it(snapshot.begin()) + { + } explicit DatabaseDetachedTablesSnapshotIterator(SnapshotDetachedTables && tables_) : snapshot(std::move(tables_)), it(snapshot.begin()) { diff --git a/src/Databases/TablesLoader.cpp b/src/Databases/TablesLoader.cpp index 6aa13b7b759..1f70f46da60 100644 --- a/src/Databases/TablesLoader.cpp +++ b/src/Databases/TablesLoader.cpp @@ -1,14 +1,14 @@ -#include -#include +#include #include #include -#include +#include +#include #include +#include #include #include -#include #include -#include +#include namespace DB diff --git a/tests/queries/0_stateless/03172_system_detached_tables.sql b/tests/queries/0_stateless/03172_system_detached_tables.sql index 558dfe4cf18..e361d2f34ca 100644 --- a/tests/queries/0_stateless/03172_system_detached_tables.sql +++ b/tests/queries/0_stateless/03172_system_detached_tables.sql @@ -4,23 +4,23 @@ DROP DATABASE IF EXISTS test_system_detached_tables; CREATE DATABASE IF NOT EXISTS test_system_detached_tables; CREATE TABLE test_system_detached_tables.test_table (n Int64) ENGINE=MergeTree ORDER BY n; -SELECT * FROM system.detached_tables; +SELECT * FROM system.detached_tables WHERE database='test_system_detached_tables'; DETACH TABLE test_system_detached_tables.test_table; -SELECT database, table, is_permanently FROM system.detached_tables; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test_system_detached_tables'; ATTACH TABLE test_system_detached_tables.test_table; CREATE TABLE test_system_detached_tables.test_table_perm (n Int64) ENGINE=MergeTree ORDER BY n; -SELECT * FROM system.detached_tables; +SELECT * FROM system.detached_tables WHERE database='test_system_detached_tables'; DETACH TABLE test_system_detached_tables.test_table_perm PERMANENTLY; -SELECT database, table, is_permanently FROM system.detached_tables; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test_system_detached_tables'; DETACH TABLE test_system_detached_tables.test_table SYNC; -SELECT database, table, is_permanently FROM system.detached_tables; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test_system_detached_tables'; -SELECT database, table, is_permanently FROM system.detached_tables WHERE table='test_table'; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test_system_detached_tables' AND table='test_table'; DROP DATABASE test_system_detached_tables; From 6d1f37ad3db0078514dd76e857ef10474afd2ed4 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Thu, 27 Jun 2024 15:37:27 +0000 Subject: [PATCH 11/80] support lazy db --- src/Databases/DatabaseLazy.cpp | 16 ++++++ .../03172_system_detached_tables.reference | 16 ++++-- .../03172_system_detached_tables.sql | 55 ++++++++++++++----- 3 files changed, 67 insertions(+), 20 deletions(-) diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index 233db07cd68..3c398e50371 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -187,6 +187,13 @@ void DatabaseLazy::attachTable(ContextPtr /* context_ */, const String & table_n it->second.expiration_iterator = cache_expiration_queue.emplace(cache_expiration_queue.end(), current_time, table_name); + LOG_DEBUG(log, "Add info for detached table {} to snapshot.", backQuote(table_name)); + if (snapshot_detached_tables.contains(table_name)) + { + LOG_DEBUG(log, "Clean info about detached table {} from snapshot.", backQuote(table_name)); + snapshot_detached_tables.erase(table_name); + } + CurrentMetrics::add(CurrentMetrics::AttachedTable, 1); } @@ -203,6 +210,15 @@ StoragePtr DatabaseLazy::detachTable(ContextPtr /* context */, const String & ta if (it->second.expiration_iterator != cache_expiration_queue.end()) cache_expiration_queue.erase(it->second.expiration_iterator); tables_cache.erase(it); + LOG_DEBUG(log, "Add info for detached table {} to snapshot.", backQuote(table_name)); + snapshot_detached_tables.emplace( + table_name, + SnapshotDetachedTable{ + .database = res->getStorageID().database_name, + .table = res->getStorageID().table_name, + .uuid = res->getStorageID().uuid, + .metadata_path = getObjectMetadataPath(table_name), + .is_permanently = false}); CurrentMetrics::sub(CurrentMetrics::AttachedTable, 1); } diff --git a/tests/queries/0_stateless/03172_system_detached_tables.reference b/tests/queries/0_stateless/03172_system_detached_tables.reference index 1cf9e0275c9..ce19daa4cdc 100644 --- a/tests/queries/0_stateless/03172_system_detached_tables.reference +++ b/tests/queries/0_stateless/03172_system_detached_tables.reference @@ -1,5 +1,11 @@ -test_system_detached_tables test_table 0 -test_system_detached_tables test_table_perm 1 -test_system_detached_tables test_table 0 -test_system_detached_tables test_table_perm 1 -test_system_detached_tables test_table 0 +database atomic tests +test03172_system_detached_tables test_table 0 +test03172_system_detached_tables test_table_perm 1 +test03172_system_detached_tables test_table 0 +test03172_system_detached_tables test_table_perm 1 +test03172_system_detached_tables test_table 0 +----------------------- +database lazy tests +test03172_system_detached_tables_lazy test_table 0 +test03172_system_detached_tables_lazy test_table_perm 1 +DROP TABLE diff --git a/tests/queries/0_stateless/03172_system_detached_tables.sql b/tests/queries/0_stateless/03172_system_detached_tables.sql index e361d2f34ca..916821d996b 100644 --- a/tests/queries/0_stateless/03172_system_detached_tables.sql +++ b/tests/queries/0_stateless/03172_system_detached_tables.sql @@ -1,28 +1,53 @@ -- Tags: no-parallel -DROP DATABASE IF EXISTS test_system_detached_tables; -CREATE DATABASE IF NOT EXISTS test_system_detached_tables; +SELECT 'database atomic tests'; +DROP DATABASE IF EXISTS test03172_system_detached_tables; +CREATE DATABASE IF NOT EXISTS test03172_system_detached_tables; -CREATE TABLE test_system_detached_tables.test_table (n Int64) ENGINE=MergeTree ORDER BY n; -SELECT * FROM system.detached_tables WHERE database='test_system_detached_tables'; +CREATE TABLE test03172_system_detached_tables.test_table (n Int64) ENGINE=MergeTree ORDER BY n; +SELECT * FROM system.detached_tables WHERE database='test03172_system_detached_tables'; -DETACH TABLE test_system_detached_tables.test_table; -SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test_system_detached_tables'; +DETACH TABLE test03172_system_detached_tables.test_table; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables'; -ATTACH TABLE test_system_detached_tables.test_table; +ATTACH TABLE test03172_system_detached_tables.test_table; -CREATE TABLE test_system_detached_tables.test_table_perm (n Int64) ENGINE=MergeTree ORDER BY n; -SELECT * FROM system.detached_tables WHERE database='test_system_detached_tables'; +CREATE TABLE test03172_system_detached_tables.test_table_perm (n Int64) ENGINE=MergeTree ORDER BY n; +SELECT * FROM system.detached_tables WHERE database='test03172_system_detached_tables'; -DETACH TABLE test_system_detached_tables.test_table_perm PERMANENTLY; -SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test_system_detached_tables'; +DETACH TABLE test03172_system_detached_tables.test_table_perm PERMANENTLY; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables'; -DETACH TABLE test_system_detached_tables.test_table SYNC; -SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test_system_detached_tables'; +DETACH TABLE test03172_system_detached_tables.test_table SYNC; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables'; -SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test_system_detached_tables' AND table='test_table'; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables' AND table='test_table'; -DROP DATABASE test_system_detached_tables; +DROP DATABASE test03172_system_detached_tables SYNC; +SELECT '-----------------------'; +SELECT 'database lazy tests'; +DROP DATABASE IF EXISTS test03172_system_detached_tables_lazy; +CREATE DATABASE test03172_system_detached_tables_lazy Engine=Lazy(10); +CREATE TABLE test03172_system_detached_tables_lazy.test_table (number UInt64) engine=Log; +INSERT INTO test03172_system_detached_tables_lazy.test_table SELECT * FROM numbers(100); +DETACH TABLE test03172_system_detached_tables_lazy.test_table; + +CREATE TABLE test03172_system_detached_tables_lazy.test_table_perm (number UInt64) engine=Log; +INSERT INTO test03172_system_detached_tables_lazy.test_table_perm SELECT * FROM numbers(100); +DETACH table test03172_system_detached_tables_lazy.test_table_perm PERMANENTLY; + +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables_lazy'; + +ATTACH TABLE test03172_system_detached_tables_lazy.test_table; +ATTACH TABLE test03172_system_detached_tables_lazy.test_table_perm; + +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables_lazy'; + +SELECT 'DROP TABLE'; +DROP TABLE test03172_system_detached_tables_lazy.test_table SYNC; +DROP TABLE test03172_system_detached_tables_lazy.test_table_perm SYNC; + +DROP DATABASE test03172_system_detached_tables_lazy SYNC; From 062198b95642a52ec0c67bbc25e7d5da45814b33 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Thu, 27 Jun 2024 15:44:37 +0000 Subject: [PATCH 12/80] add checking and exception --- src/Databases/DatabaseOnDisk.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 25e5347e017..67c9114e793 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -22,6 +22,7 @@ #include #include #include +#include "Common/Exception.h" #include #include #include @@ -309,7 +310,11 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri FS::createFile(detached_permanently_flag); std::lock_guard lock(mutex); - snapshot_detached_tables.at(table_name).is_permanently = true; + if (!snapshot_detached_tables.contains(table_name)) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + } + snapshot_detached_tables[table_name].is_permanently = true; } catch (Exception & e) { From 818e8597a22cf87c560d7986818f8a094cd7db1f Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Fri, 28 Jun 2024 12:27:40 +0000 Subject: [PATCH 13/80] Revert "general logic for tables" This reverts commit de8f1fc6e3adb6c8f346b2fda4e34c2240e1bf8e. --- src/Storages/System/ReadFromSystemTables.cpp | 157 ---- src/Storages/System/ReadFromSystemTables.h | 66 -- .../System/StorageSystemDetachedTables.cpp | 237 ++++++- src/Storages/System/StorageSystemTables.cpp | 669 +++++++++++++++++- src/Storages/System/TablesBlockSource.cpp | 564 --------------- src/Storages/System/TablesBlockSource.h | 92 --- 6 files changed, 891 insertions(+), 894 deletions(-) delete mode 100644 src/Storages/System/ReadFromSystemTables.cpp delete mode 100644 src/Storages/System/ReadFromSystemTables.h delete mode 100644 src/Storages/System/TablesBlockSource.cpp delete mode 100644 src/Storages/System/TablesBlockSource.h diff --git a/src/Storages/System/ReadFromSystemTables.cpp b/src/Storages/System/ReadFromSystemTables.cpp deleted file mode 100644 index 638cab1b63b..00000000000 --- a/src/Storages/System/ReadFromSystemTables.cpp +++ /dev/null @@ -1,157 +0,0 @@ -#include "ReadFromSystemTables.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace DB -{ - -namespace -{ - -ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) -{ - MutableColumnPtr column = ColumnString::create(); - - const auto databases = DatabaseCatalog::instance().getDatabases(); - for (const auto & database_name : databases | boost::adaptors::map_keys) - { - if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) - continue; /// We don't want to show the internal database for temporary tables in system.tables - - column->insert(database_name); - } - - Block block{ColumnWithTypeAndName(std::move(column), std::make_shared(), "database")}; - VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); - return block.getByPosition(0).column; -} - -ColumnPtr getFilteredTables( - const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context, const bool need_detached_tables) -{ - Block sample{ - ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), - ColumnWithTypeAndName(nullptr, std::make_shared(), "engine")}; - - MutableColumnPtr database_column = ColumnString::create(); - MutableColumnPtr engine_column; - - auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); - if (dag) - { - bool filter_by_engine = false; - for (const auto * input : dag->getInputs()) - if (input->result_name == "engine") - filter_by_engine = true; - - if (filter_by_engine) - engine_column = ColumnString::create(); - } - - for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) - { - const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); - DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); - if (!database) - continue; - - if (need_detached_tables) - { - auto table_it = database->getDetachedTablesIterator(context, {}, false); - for (; table_it->isValid(); table_it->next()) - { - database_column->insert(table_it->table()); - } - } - else - { - auto table_it = database->getTablesIterator(context); - for (; table_it->isValid(); table_it->next()) - { - database_column->insert(table_it->name()); - if (engine_column) - engine_column->insert(table_it->table()->getName()); - } - } - } - - Block block{ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; - if (engine_column) - block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); - - if (dag) - VirtualColumnUtils::filterBlockWithDAG(dag, block, context); - - return block.getByPosition(0).column; -} - -} - -ReadFromSystemTables::ReadFromSystemTables( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - Block sample_block, - std::vector columns_mask_, - size_t max_block_size_) - : SourceStepWithFilter(DataStream{.header = std::move(sample_block)}, column_names_, query_info_, storage_snapshot_, context_) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) -{ -} - -void ReadFromSystemTables::applyFilters(ActionDAGNodes added_filter_nodes) -{ - applyFiltersImpl(std::move(added_filter_nodes), false); -} - -void ReadFromSystemTables::applyFiltersImpl(ActionDAGNodes added_filter_nodes, const bool need_detached_tables_) -{ - SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); - - const ActionsDAG::Node * predicate = nullptr; - if (filter_actions_dag) - predicate = filter_actions_dag->getOutputs().at(0); - - filtered_databases_column = getFilteredDatabases(predicate, context); - filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context, need_detached_tables_); -} - -void ReadFromSystemTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) -{ - pipeline.init(createPipe()); -} - -ReadFromSystemDetachedTables::ReadFromSystemDetachedTables( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - Block sample_block, - std::vector columns_mask_, - const size_t max_block_size_) - : ReadFromSystemTables( - column_names_, query_info_, storage_snapshot_, context_, std::move(sample_block), std::move(columns_mask_), max_block_size_) -{ -} - -void ReadFromSystemDetachedTables::applyFilters(ActionDAGNodes added_filter_nodes) -{ - applyFiltersImpl(std::move(added_filter_nodes), true); -} - -void ReadFromSystemDetachedTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) -{ - pipeline.init(createPipe()); -} -} diff --git a/src/Storages/System/ReadFromSystemTables.h b/src/Storages/System/ReadFromSystemTables.h deleted file mode 100644 index f8f34363159..00000000000 --- a/src/Storages/System/ReadFromSystemTables.h +++ /dev/null @@ -1,66 +0,0 @@ -#pragma once - -#include -#include - -namespace DB -{ - -class ReadFromSystemTables : public SourceStepWithFilter -{ -public: - std::string getName() const override { return "ReadFromSystemTables"; } - void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; - - ReadFromSystemTables( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - Block sample_block, - std::vector columns_mask_, - size_t max_block_size_); - - void applyFilters(ActionDAGNodes added_filter_nodes) override; - -protected: - void applyFiltersImpl(ActionDAGNodes added_filter_nodes, bool need_detached_tables_); - - template - Pipe createPipe() - { - return Pipe(std::make_shared( - std::move(columns_mask), - getOutputStream().header, - max_block_size, - std::move(filtered_databases_column), - std::move(filtered_tables_column), - context)); - } - -private: - std::vector columns_mask; - size_t max_block_size; - - ColumnPtr filtered_databases_column; - ColumnPtr filtered_tables_column; -}; - -class ReadFromSystemDetachedTables : public ReadFromSystemTables -{ -public: - std::string getName() const override { return "ReadFromSystemDetachedTables"; } - void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; - - ReadFromSystemDetachedTables( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - Block sample_block, - std::vector columns_mask_, - size_t max_block_size_); - - void applyFilters(ActionDAGNodes added_filter_nodes) override; -}; -} diff --git a/src/Storages/System/StorageSystemDetachedTables.cpp b/src/Storages/System/StorageSystemDetachedTables.cpp index 99c15fdae0e..9805fa2a8cf 100644 --- a/src/Storages/System/StorageSystemDetachedTables.cpp +++ b/src/Storages/System/StorageSystemDetachedTables.cpp @@ -13,7 +13,6 @@ #include #include #include -#include #include #include @@ -23,6 +22,204 @@ namespace DB { +namespace +{ + +ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) +{ + MutableColumnPtr column = ColumnString::create(); + + const auto databases = DatabaseCatalog::instance().getDatabases(); + for (const auto & database_name : databases | boost::adaptors::map_keys) + { + if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) + continue; /// We don't want to show the internal database for temporary tables in system.tables + + column->insert(database_name); + } + + Block block{ColumnWithTypeAndName(std::move(column), std::make_shared(), "database")}; + VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); + return block.getByPosition(0).column; +} + +ColumnPtr getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context) +{ + Block sample{ + ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), + ColumnWithTypeAndName(nullptr, std::make_shared(), "engine")}; + + MutableColumnPtr database_column = ColumnString::create(); + MutableColumnPtr engine_column; + + auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); + if (dag) + { + bool filter_by_engine = false; + for (const auto * input : dag->getInputs()) + if (input->result_name == "engine") + filter_by_engine = true; + + if (filter_by_engine) + engine_column = ColumnString::create(); + } + + for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) + { + const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); + DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); + if (!database) + continue; + + auto table_it = database->getDetachedTablesIterator(context, {}, false); + for (; table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->table()); + } + } + + Block block{ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; + if (engine_column) + block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); + + if (dag) + VirtualColumnUtils::filterBlockWithDAG(dag, block, context); + + return block.getByPosition(0).column; +} + +class DetachedTablesBlockSource : public ISource +{ +public: + DetachedTablesBlockSource( + std::vector columns_mask_, + Block header, + UInt64 max_block_size_, + ColumnPtr databases_, + ColumnPtr detached_tables_, + ContextPtr context_) + : ISource(std::move(header)) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + , databases(std::move(databases_)) + , context(Context::createCopy(context_)) + { + size_t size = detached_tables_->size(); + detached_tables.reserve(size); + for (size_t idx = 0; idx < size; ++idx) + { + detached_tables.insert(detached_tables_->getDataAt(idx).toString()); + } + } + + String getName() const override { return "DetachedTables"; } + +protected: + Chunk generate() override + { + if (done) + return {}; + + MutableColumns result_columns = getPort().getHeader().cloneEmptyColumns(); + + const auto access = context->getAccess(); + const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); + + for (size_t database_idx = 0, rows_count = 0; database_idx < databases->size() && rows_count < max_block_size; ++database_idx) + { + database_name = databases->getDataAt(database_idx).toString(); + database = DatabaseCatalog::instance().tryGetDatabase(database_name); + + if (!database) + continue; + + const bool need_to_check_access_for_tables + = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); + + if (!detached_tables_it || !detached_tables_it->isValid()) + detached_tables_it = database->getDetachedTablesIterator(context, {}, false); + + for (; rows_count < max_block_size && detached_tables_it->isValid(); detached_tables_it->next()) + { + const auto detached_table_name = detached_tables_it->table(); + + if (!detached_tables.contains(detached_table_name)) + continue; + + if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, detached_table_name)) + continue; + + fillResultColumnsByDetachedTableIterator(result_columns); + ++rows_count; + } + } + + const UInt64 num_rows = result_columns.at(0)->size(); + done = true; + return Chunk(std::move(result_columns), num_rows); + } + +private: + const std::vector columns_mask; + const UInt64 max_block_size; + const ColumnPtr databases; + NameSet detached_tables; + DatabaseDetachedTablesSnapshotIteratorPtr detached_tables_it; + ContextPtr context; + bool done = false; + DatabasePtr database; + std::string database_name; + + void fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const + { + size_t src_index = 0; + size_t res_index = 0; + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->database()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->table()); + + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->uuid()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->metadataPath()); + + if (columns_mask[src_index++]) + result_columns[res_index++]->insert(detached_tables_it->isPermanently()); + } +}; + +} + +class ReadFromSystemDetachedTables : public SourceStepWithFilter +{ +public: + std::string getName() const override { return "ReadFromSystemDetachedTables"; } + void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; + + ReadFromSystemDetachedTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_); + + void applyFilters(ActionDAGNodes added_filter_nodes) override; + +private: + std::vector columns_mask; + size_t max_block_size; + + ColumnPtr filtered_databases_column; + ColumnPtr filtered_tables_column; +}; + StorageSystemDetachedTables::StorageSystemDetachedTables(const StorageID & table_id_) : IStorage(table_id_) { StorageInMemoryMetadata storage_metadata; @@ -60,4 +257,42 @@ void StorageSystemDetachedTables::read( query_plan.addStep(std::move(reading)); } + +ReadFromSystemDetachedTables::ReadFromSystemDetachedTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_) + : SourceStepWithFilter(DataStream{.header = std::move(sample_block)}, column_names_, query_info_, storage_snapshot_, context_) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) +{ +} + +void ReadFromSystemDetachedTables::applyFilters(ActionDAGNodes added_filter_nodes) +{ + SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); + + const ActionsDAG::Node * predicate = nullptr; + if (filter_actions_dag) + predicate = filter_actions_dag->getOutputs().at(0); + + filtered_databases_column = getFilteredDatabases(predicate, context); + filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context); +} + +void ReadFromSystemDetachedTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) +{ + auto pipe = Pipe(std::make_shared( + std::move(columns_mask), + getOutputStream().header, + max_block_size, + std::move(filtered_databases_column), + std::move(filtered_tables_column), + context)); + pipeline.init(std::move(pipe)); +} } diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 40190f53fd3..783b899c978 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -1,30 +1,29 @@ -#include #include -#include -#include -#include +#include #include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include #include #include #include #include +#include +#include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include -#include -#include +#include #include @@ -106,6 +105,628 @@ StorageSystemTables::StorageSystemTables(const StorageID & table_id_) setInMemoryMetadata(storage_metadata); } + +namespace +{ + +ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) +{ + MutableColumnPtr column = ColumnString::create(); + + const auto databases = DatabaseCatalog::instance().getDatabases(); + for (const auto & database_name : databases | boost::adaptors::map_keys) + { + if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) + continue; /// We don't want to show the internal database for temporary tables in system.tables + + column->insert(database_name); + } + + Block block { ColumnWithTypeAndName(std::move(column), std::make_shared(), "database") }; + VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); + return block.getByPosition(0).column; +} + +ColumnPtr getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context) +{ + Block sample { + ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), + ColumnWithTypeAndName(nullptr, std::make_shared(), "engine") + }; + + MutableColumnPtr database_column = ColumnString::create(); + MutableColumnPtr engine_column; + + auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); + if (dag) + { + bool filter_by_engine = false; + for (const auto * input : dag->getInputs()) + if (input->result_name == "engine") + filter_by_engine = true; + + if (filter_by_engine) + engine_column = ColumnString::create(); + } + + for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) + { + const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); + DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); + if (!database) + continue; + + for (auto table_it = database->getTablesIterator(context); table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->name()); + if (engine_column) + engine_column->insert(table_it->table()->getName()); + } + } + + Block block {ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; + if (engine_column) + block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); + + if (dag) + VirtualColumnUtils::filterBlockWithDAG(dag, block, context); + + return block.getByPosition(0).column; +} + +/// Avoid heavy operation on tables if we only queried columns that we can get without table object. +/// Otherwise it will require table initialization for Lazy database. +bool needTable(const DatabasePtr & database, const Block & header) +{ + if (database->getEngineName() != "Lazy") + return true; + + static const std::set columns_without_table = { "database", "name", "uuid", "metadata_modification_time" }; + for (const auto & column : header.getColumnsWithTypeAndName()) + { + if (columns_without_table.find(column.name) == columns_without_table.end()) + return true; + } + return false; +} + + +class TablesBlockSource : public ISource +{ +public: + TablesBlockSource( + std::vector columns_mask_, + Block header, + UInt64 max_block_size_, + ColumnPtr databases_, + ColumnPtr tables_, + ContextPtr context_) + : ISource(std::move(header)) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + , databases(std::move(databases_)) + , context(Context::createCopy(context_)) + { + size_t size = tables_->size(); + tables.reserve(size); + for (size_t idx = 0; idx < size; ++idx) + tables.insert(tables_->getDataAt(idx).toString()); + } + + String getName() const override { return "Tables"; } + +protected: + Chunk generate() override + { + if (done) + return {}; + + MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns(); + + const auto access = context->getAccess(); + const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); + + size_t rows_count = 0; + while (rows_count < max_block_size) + { + if (tables_it && !tables_it->isValid()) + ++database_idx; + + while (database_idx < databases->size() && (!tables_it || !tables_it->isValid())) + { + database_name = databases->getDataAt(database_idx).toString(); + database = DatabaseCatalog::instance().tryGetDatabase(database_name); + + if (!database) + { + /// Database was deleted just now or the user has no access. + ++database_idx; + continue; + } + + break; + } + + /// This is for temporary tables. They are output in single block regardless to max_block_size. + if (database_idx >= databases->size()) + { + if (context->hasSessionContext()) + { + Tables external_tables = context->getSessionContext()->getExternalTables(); + + for (auto & table : external_tables) + { + size_t src_index = 0; + size_t res_index = 0; + + // database + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // name + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.first); + + // uuid + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getStorageID().uuid); + + // engine + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getName()); + + // is_temporary + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(1u); + + // data_paths + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_path + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_modification_time + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // metadata_version + // Temporary tables does not support replication + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // dependencies_database + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // dependencies_table + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); + + // create_table_query + if (columns_mask[src_index++]) + { + auto temp_db = DatabaseCatalog::instance().getDatabaseForTemporaryTables(); + ASTPtr ast = temp_db ? temp_db->tryGetCreateTableQuery(table.second->getStorageID().getTableName(), context) : nullptr; + res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); + } + + // engine_full + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table.second->getName()); + + const auto & settings = context->getSettingsRef(); + while (src_index < columns_mask.size()) + { + // total_rows + if (src_index == 19 && columns_mask[src_index]) + { + if (auto total_rows = table.second->totalRows(settings)) + res_columns[res_index++]->insert(*total_rows); + else + res_columns[res_index++]->insertDefault(); + } + // total_bytes + else if (src_index == 20 && columns_mask[src_index]) + { + if (auto total_bytes = table.second->totalBytes(settings)) + res_columns[res_index++]->insert(*total_bytes); + else + res_columns[res_index++]->insertDefault(); + } + /// Fill the rest columns with defaults + else if (columns_mask[src_index]) + res_columns[res_index++]->insertDefault(); + src_index++; + } + } + } + + UInt64 num_rows = res_columns.at(0)->size(); + done = true; + return Chunk(std::move(res_columns), num_rows); + } + + const bool need_to_check_access_for_tables = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); + + if (!tables_it || !tables_it->isValid()) + tables_it = database->getTablesIterator(context); + + const bool need_table = needTable(database, getPort().getHeader()); + + for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next()) + { + auto table_name = tables_it->name(); + if (!tables.contains(table_name)) + continue; + + if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name)) + continue; + + StoragePtr table = nullptr; + TableLockHolder lock; + if (need_table) + { + table = tables_it->table(); + if (!table) + // Table might have just been removed or detached for Lazy engine (see DatabaseLazy::tryGetTable()) + continue; + + /// The only column that requires us to hold a shared lock is data_paths as rename might alter them (on ordinary tables) + /// and it's not protected internally by other mutexes + static const size_t DATA_PATHS_INDEX = 5; + if (columns_mask[DATA_PATHS_INDEX]) + { + lock = table->tryLockForShare(context->getCurrentQueryId(), + context->getSettingsRef().lock_acquire_timeout); + if (!lock) + // Table was dropped while acquiring the lock, skipping table + continue; + } + } + ++rows_count; + + size_t src_index = 0; + size_t res_index = 0; + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(database_name); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(table_name); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(tables_it->uuid()); + + if (columns_mask[src_index++]) + { + chassert(table != nullptr); + res_columns[res_index++]->insert(table->getName()); + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(0u); // is_temporary + + if (columns_mask[src_index++]) + { + chassert(lock != nullptr); + Array table_paths_array; + auto paths = table->getDataPaths(); + table_paths_array.reserve(paths.size()); + for (const String & path : paths) + table_paths_array.push_back(path); + res_columns[res_index++]->insert(table_paths_array); + /// We don't need the lock anymore + lock = nullptr; + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(database->getObjectMetadataPath(table_name)); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(static_cast(database->getObjectMetadataModificationTime(table_name))); + + StorageMetadataPtr metadata_snapshot; + if (table) + metadata_snapshot = table->getInMemoryMetadataPtr(); + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && table->supportsReplication()) + res_columns[res_index++]->insert(metadata_snapshot->metadata_version); + else + res_columns[res_index++]->insertDefault(); + } + + { + Array views_table_name_array; + Array views_database_name_array; + if (columns_mask[src_index] || columns_mask[src_index + 1]) + { + const auto view_ids = DatabaseCatalog::instance().getDependentViews(StorageID(database_name, table_name)); + + views_table_name_array.reserve(view_ids.size()); + views_database_name_array.reserve(view_ids.size()); + for (const auto & view_id : view_ids) + { + views_table_name_array.push_back(view_id.table_name); + views_database_name_array.push_back(view_id.database_name); + } + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(views_database_name_array); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(views_table_name_array); + } + + if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2]) + { + ASTPtr ast = database->tryGetCreateTableQuery(table_name, context); + auto * ast_create = ast ? ast->as() : nullptr; + + if (ast_create && !context->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) + { + ast_create->uuid = UUIDHelpers::Nil; + ast_create->to_inner_uuid = UUIDHelpers::Nil; + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); + + if (columns_mask[src_index++]) + { + String engine_full; + + if (ast_create && ast_create->storage) + { + engine_full = format({context, *ast_create->storage}); + + static const char * const extra_head = " ENGINE = "; + if (startsWith(engine_full, extra_head)) + engine_full = engine_full.substr(strlen(extra_head)); + } + + res_columns[res_index++]->insert(engine_full); + } + + if (columns_mask[src_index++]) + { + String as_select; + if (ast_create && ast_create->select) + as_select = format({context, *ast_create->select}); + res_columns[res_index++]->insert(as_select); + } + } + else + src_index += 3; + + ASTPtr expression_ptr; + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPartitionKeyAST())) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSortingKey().expression_list_ast)) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPrimaryKey().expression_list_ast)) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSamplingKeyAST())) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto policy = table ? table->getStoragePolicy() : nullptr; + if (policy) + res_columns[res_index++]->insert(policy->getName()); + else + res_columns[res_index++]->insertDefault(); + } + + auto settings = context->getSettingsRef(); + settings.select_sequential_consistency = 0; + if (columns_mask[src_index++]) + { + auto total_rows = table ? table->totalRows(settings) : std::nullopt; + if (total_rows) + res_columns[res_index++]->insert(*total_rows); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto total_bytes = table->totalBytes(settings); + if (total_bytes) + res_columns[res_index++]->insert(*total_bytes); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto total_bytes_uncompressed = table->totalBytesUncompressed(settings); + if (total_bytes_uncompressed) + res_columns[res_index++]->insert(*total_bytes_uncompressed); + else + res_columns[res_index++]->insertDefault(); + } + + auto table_merge_tree = std::dynamic_pointer_cast(table); + if (columns_mask[src_index++]) + { + if (table_merge_tree) + res_columns[res_index++]->insert(table_merge_tree->getAllPartsCount()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table_merge_tree) + res_columns[res_index++]->insert(table_merge_tree->getActivePartsCount()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table_merge_tree) + { + res_columns[res_index++]->insert(table_merge_tree->getTotalMarksCount()); + } + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto lifetime_rows = table ? table->lifetimeRows() : std::nullopt; + if (lifetime_rows) + res_columns[res_index++]->insert(*lifetime_rows); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + auto lifetime_bytes = table ? table->lifetimeBytes() : std::nullopt; + if (lifetime_bytes) + res_columns[res_index++]->insert(*lifetime_bytes); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (metadata_snapshot) + res_columns[res_index++]->insert(metadata_snapshot->comment); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index++]) + { + if (table) + res_columns[res_index++]->insert(table->storesDataOnDisk()); + else + res_columns[res_index++]->insertDefault(); + } + + if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2] || columns_mask[src_index + 3]) + { + auto dependencies = DatabaseCatalog::instance().getLoadingDependencies(StorageID{database_name, table_name}); + auto dependents = DatabaseCatalog::instance().getLoadingDependents(StorageID{database_name, table_name}); + + Array dependencies_databases; + Array dependencies_tables; + dependencies_databases.reserve(dependencies.size()); + dependencies_tables.reserve(dependencies.size()); + for (const auto & dependency : dependencies) + { + dependencies_databases.push_back(dependency.database_name); + dependencies_tables.push_back(dependency.table_name); + } + + Array dependents_databases; + Array dependents_tables; + dependents_databases.reserve(dependents.size()); + dependents_tables.reserve(dependents.size()); + for (const auto & dependent : dependents) + { + dependents_databases.push_back(dependent.database_name); + dependents_tables.push_back(dependent.table_name); + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependencies_databases); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependencies_tables); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependents_databases); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(dependents_tables); + + } + } + } + + UInt64 num_rows = res_columns.at(0)->size(); + return Chunk(std::move(res_columns), num_rows); + } +private: + std::vector columns_mask; + UInt64 max_block_size; + ColumnPtr databases; + NameSet tables; + size_t database_idx = 0; + DatabaseTablesIteratorPtr tables_it; + ContextPtr context; + bool done = false; + DatabasePtr database; + std::string database_name; +}; + +} + +class ReadFromSystemTables : public SourceStepWithFilter +{ +public: + std::string getName() const override { return "ReadFromSystemTables"; } + void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; + + ReadFromSystemTables( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + Block sample_block, + std::vector columns_mask_, + size_t max_block_size_) + : SourceStepWithFilter( + DataStream{.header = std::move(sample_block)}, + column_names_, + query_info_, + storage_snapshot_, + context_) + , columns_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + { + } + + void applyFilters(ActionDAGNodes added_filter_nodes) override; + +private: + std::vector columns_mask; + size_t max_block_size; + + ColumnPtr filtered_databases_column; + ColumnPtr filtered_tables_column; +}; + void StorageSystemTables::read( QueryPlan & query_plan, const Names & column_names, @@ -126,4 +747,24 @@ void StorageSystemTables::read( query_plan.addStep(std::move(reading)); } + +void ReadFromSystemTables::applyFilters(ActionDAGNodes added_filter_nodes) +{ + SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); + + const ActionsDAG::Node * predicate = nullptr; + if (filter_actions_dag) + predicate = filter_actions_dag->getOutputs().at(0); + + filtered_databases_column = getFilteredDatabases(predicate, context); + filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context); +} + +void ReadFromSystemTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) +{ + Pipe pipe(std::make_shared( + std::move(columns_mask), getOutputStream().header, max_block_size, std::move(filtered_databases_column), std::move(filtered_tables_column), context)); + pipeline.init(std::move(pipe)); +} + } diff --git a/src/Storages/System/TablesBlockSource.cpp b/src/Storages/System/TablesBlockSource.cpp deleted file mode 100644 index d5481856f1b..00000000000 --- a/src/Storages/System/TablesBlockSource.cpp +++ /dev/null @@ -1,564 +0,0 @@ -#include "TablesBlockSource.h" - -#include - -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -namespace -{ - -/// Avoid heavy operation on tables if we only queried columns that we can get without table object. -/// Otherwise it will require table initialization for Lazy database. -bool needTable(const DatabasePtr & database, const Block & header) -{ - if (database->getEngineName() != "Lazy") - return true; - - static const std::set columns_without_table = {"database", "name", "uuid", "metadata_modification_time"}; - for (const auto & column : header.getColumnsWithTypeAndName()) - { - if (columns_without_table.find(column.name) == columns_without_table.end()) - return true; - } - return false; -} - -} - -Chunk TablesBlockSource::generate() -{ - if (done) - return {}; - - MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns(); - - const auto access = context->getAccess(); - const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); - - size_t rows_count = 0; - while (rows_count < max_block_size) - { - if (tables_it && !tables_it->isValid()) - ++database_idx; - - while (database_idx < databases->size() && (!tables_it || !tables_it->isValid())) - { - database_name = databases->getDataAt(database_idx).toString(); - database = DatabaseCatalog::instance().tryGetDatabase(database_name); - - if (!database) - { - /// Database was deleted just now or the user has no access. - ++database_idx; - continue; - } - - break; - } - - /// This is for temporary tables. They are output in single block regardless to max_block_size. - if (database_idx >= databases->size()) - { - if (context->hasSessionContext()) - { - Tables external_tables = context->getSessionContext()->getExternalTables(); - - for (auto & table : external_tables) - { - size_t src_index = 0; - size_t res_index = 0; - - // database - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // name - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.first); - - // uuid - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getStorageID().uuid); - - // engine - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getName()); - - // is_temporary - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(1u); - - // data_paths - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_path - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_modification_time - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // metadata_version - // Temporary tables does not support replication - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // dependencies_database - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // dependencies_table - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // create_table_query - if (columns_mask[src_index++]) - { - auto temp_db = DatabaseCatalog::instance().getDatabaseForTemporaryTables(); - ASTPtr ast - = temp_db ? temp_db->tryGetCreateTableQuery(table.second->getStorageID().getTableName(), context) : nullptr; - res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); - } - - // engine_full - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table.second->getName()); - - const auto & settings = context->getSettingsRef(); - while (src_index < columns_mask.size()) - { - // total_rows - if (src_index == 19 && columns_mask[src_index]) - { - if (auto total_rows = table.second->totalRows(settings)) - res_columns[res_index++]->insert(*total_rows); - else - res_columns[res_index++]->insertDefault(); - } - // total_bytes - else if (src_index == 20 && columns_mask[src_index]) - { - if (auto total_bytes = table.second->totalBytes(settings)) - res_columns[res_index++]->insert(*total_bytes); - else - res_columns[res_index++]->insertDefault(); - } - /// Fill the rest columns with defaults - else if (columns_mask[src_index]) - res_columns[res_index++]->insertDefault(); - src_index++; - } - } - } - - UInt64 num_rows = res_columns.at(0)->size(); - done = true; - return Chunk(std::move(res_columns), num_rows); - } - - const bool need_to_check_access_for_tables - = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); - - if (!tables_it || !tables_it->isValid()) - tables_it = database->getTablesIterator(context); - - const bool need_table = needTable(database, getPort().getHeader()); - - for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next()) - { - auto table_name = tables_it->name(); - if (!tables.contains(table_name)) - continue; - - if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name)) - continue; - - StoragePtr table = nullptr; - TableLockHolder lock; - if (need_table) - { - table = tables_it->table(); - if (!table) - // Table might have just been removed or detached for Lazy engine (see DatabaseLazy::tryGetTable()) - continue; - - /// The only column that requires us to hold a shared lock is data_paths as rename might alter them (on ordinary tables) - /// and it's not protected internally by other mutexes - static const size_t DATA_PATHS_INDEX = 5; - if (columns_mask[DATA_PATHS_INDEX]) - { - lock = table->tryLockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout); - if (!lock) - // Table was dropped while acquiring the lock, skipping table - continue; - } - } - ++rows_count; - - size_t src_index = 0; - size_t res_index = 0; - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(database_name); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(table_name); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(tables_it->uuid()); - - if (columns_mask[src_index++]) - { - chassert(table != nullptr); - res_columns[res_index++]->insert(table->getName()); - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(0u); // is_temporary - - if (columns_mask[src_index++]) - { - chassert(lock != nullptr); - Array table_paths_array; - auto paths = table->getDataPaths(); - table_paths_array.reserve(paths.size()); - for (const String & path : paths) - table_paths_array.push_back(path); - res_columns[res_index++]->insert(table_paths_array); - /// We don't need the lock anymore - lock = nullptr; - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(database->getObjectMetadataPath(table_name)); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(static_cast(database->getObjectMetadataModificationTime(table_name))); - - StorageMetadataPtr metadata_snapshot; - if (table) - metadata_snapshot = table->getInMemoryMetadataPtr(); - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && table->supportsReplication()) - res_columns[res_index++]->insert(metadata_snapshot->metadata_version); - else - res_columns[res_index++]->insertDefault(); - } - - { - Array views_table_name_array; - Array views_database_name_array; - if (columns_mask[src_index] || columns_mask[src_index + 1]) - { - const auto view_ids = DatabaseCatalog::instance().getDependentViews(StorageID(database_name, table_name)); - - views_table_name_array.reserve(view_ids.size()); - views_database_name_array.reserve(view_ids.size()); - for (const auto & view_id : view_ids) - { - views_table_name_array.push_back(view_id.table_name); - views_database_name_array.push_back(view_id.database_name); - } - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(views_database_name_array); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(views_table_name_array); - } - - if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2]) - { - ASTPtr ast = database->tryGetCreateTableQuery(table_name, context); - auto * ast_create = ast ? ast->as() : nullptr; - - if (ast_create && !context->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) - { - ast_create->uuid = UUIDHelpers::Nil; - ast_create->to_inner_uuid = UUIDHelpers::Nil; - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(ast ? format({context, *ast}) : ""); - - if (columns_mask[src_index++]) - { - String engine_full; - - if (ast_create && ast_create->storage) - { - engine_full = format({context, *ast_create->storage}); - - static const char * const extra_head = " ENGINE = "; - if (startsWith(engine_full, extra_head)) - engine_full = engine_full.substr(strlen(extra_head)); - } - - res_columns[res_index++]->insert(engine_full); - } - - if (columns_mask[src_index++]) - { - String as_select; - if (ast_create && ast_create->select) - as_select = format({context, *ast_create->select}); - res_columns[res_index++]->insert(as_select); - } - } - else - src_index += 3; - - ASTPtr expression_ptr; - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPartitionKeyAST())) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSortingKey().expression_list_ast)) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPrimaryKey().expression_list_ast)) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSamplingKeyAST())) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto policy = table ? table->getStoragePolicy() : nullptr; - if (policy) - res_columns[res_index++]->insert(policy->getName()); - else - res_columns[res_index++]->insertDefault(); - } - - auto settings = context->getSettingsRef(); - settings.select_sequential_consistency = 0; - if (columns_mask[src_index++]) - { - auto total_rows = table ? table->totalRows(settings) : std::nullopt; - if (total_rows) - res_columns[res_index++]->insert(*total_rows); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto total_bytes = table->totalBytes(settings); - if (total_bytes) - res_columns[res_index++]->insert(*total_bytes); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto total_bytes_uncompressed = table->totalBytesUncompressed(settings); - if (total_bytes_uncompressed) - res_columns[res_index++]->insert(*total_bytes_uncompressed); - else - res_columns[res_index++]->insertDefault(); - } - - auto table_merge_tree = std::dynamic_pointer_cast(table); - if (columns_mask[src_index++]) - { - if (table_merge_tree) - res_columns[res_index++]->insert(table_merge_tree->getAllPartsCount()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table_merge_tree) - res_columns[res_index++]->insert(table_merge_tree->getActivePartsCount()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table_merge_tree) - { - res_columns[res_index++]->insert(table_merge_tree->getTotalMarksCount()); - } - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto lifetime_rows = table ? table->lifetimeRows() : std::nullopt; - if (lifetime_rows) - res_columns[res_index++]->insert(*lifetime_rows); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - auto lifetime_bytes = table ? table->lifetimeBytes() : std::nullopt; - if (lifetime_bytes) - res_columns[res_index++]->insert(*lifetime_bytes); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (metadata_snapshot) - res_columns[res_index++]->insert(metadata_snapshot->comment); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index++]) - { - if (table) - res_columns[res_index++]->insert(table->storesDataOnDisk()); - else - res_columns[res_index++]->insertDefault(); - } - - if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2] || columns_mask[src_index + 3]) - { - auto dependencies = DatabaseCatalog::instance().getLoadingDependencies(StorageID{database_name, table_name}); - auto dependents = DatabaseCatalog::instance().getLoadingDependents(StorageID{database_name, table_name}); - - Array dependencies_databases; - Array dependencies_tables; - dependencies_databases.reserve(dependencies.size()); - dependencies_tables.reserve(dependencies.size()); - for (const auto & dependency : dependencies) - { - dependencies_databases.push_back(dependency.database_name); - dependencies_tables.push_back(dependency.table_name); - } - - Array dependents_databases; - Array dependents_tables; - dependents_databases.reserve(dependents.size()); - dependents_tables.reserve(dependents.size()); - for (const auto & dependent : dependents) - { - dependents_databases.push_back(dependent.database_name); - dependents_tables.push_back(dependent.table_name); - } - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependencies_databases); - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependencies_tables); - - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependents_databases); - if (columns_mask[src_index++]) - res_columns[res_index++]->insert(dependents_tables); - } - } - } - - UInt64 num_rows = res_columns.at(0)->size(); - return Chunk(std::move(res_columns), num_rows); -} - -Chunk DetachedTablesBlockSource::generate() -{ - if (done) - return {}; - - MutableColumns result_columns = getPort().getHeader().cloneEmptyColumns(); - - const auto access = context->getAccess(); - const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); - - for (size_t database_idx = 0, rows_count = 0; database_idx < databases->size() && rows_count < max_block_size; ++database_idx) - { - database_name = databases->getDataAt(database_idx).toString(); - database = DatabaseCatalog::instance().tryGetDatabase(database_name); - - if (!database) - continue; - - const bool need_to_check_access_for_tables - = need_to_check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); - - if (!detached_tables_it || !detached_tables_it->isValid()) - detached_tables_it = database->getDetachedTablesIterator(context, {}, false); - - for (; rows_count < max_block_size && detached_tables_it->isValid(); detached_tables_it->next()) - { - const auto detached_table_name = detached_tables_it->table(); - - if (!detached_tables.contains(detached_table_name)) - continue; - - if (need_to_check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, detached_table_name)) - continue; - - fillResultColumnsByDetachedTableIterator(result_columns); - ++rows_count; - } - } - - const UInt64 num_rows = result_columns.at(0)->size(); - done = true; - return Chunk(std::move(result_columns), num_rows); -} - -void DetachedTablesBlockSource::fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const -{ - size_t src_index = 0; - size_t res_index = 0; - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->database()); - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->table()); - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->uuid()); - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->metadataPath()); - - if (columns_mask[src_index++]) - result_columns[res_index++]->insert(detached_tables_it->isPermanently()); -} -} diff --git a/src/Storages/System/TablesBlockSource.h b/src/Storages/System/TablesBlockSource.h deleted file mode 100644 index a9f2226dca8..00000000000 --- a/src/Storages/System/TablesBlockSource.h +++ /dev/null @@ -1,92 +0,0 @@ -#pragma once - -#include -#include -#include - -namespace DB -{ - -class TablesBlockSource : public ISource -{ -public: - TablesBlockSource( - std::vector columns_mask_, - Block header, - UInt64 max_block_size_, - ColumnPtr databases_, - ColumnPtr tables_, - ContextPtr context_) - : ISource(std::move(header)) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) - , databases(std::move(databases_)) - , context(Context::createCopy(context_)) - { - size_t size = tables_->size(); - tables.reserve(size); - for (size_t idx = 0; idx < size; ++idx) - tables.insert(tables_->getDataAt(idx).toString()); - } - - String getName() const override { return "Tables"; } - -protected: - Chunk generate() override; - -private: - std::vector columns_mask; - UInt64 max_block_size; - ColumnPtr databases; - NameSet tables; - size_t database_idx = 0; - DatabaseTablesIteratorPtr tables_it; - ContextPtr context; - bool done = false; - DatabasePtr database; - std::string database_name; -}; - -class DetachedTablesBlockSource : public ISource -{ -public: - DetachedTablesBlockSource( - std::vector columns_mask_, - Block header, - UInt64 max_block_size_, - ColumnPtr databases_, - ColumnPtr detached_tables_, - ContextPtr context_) - : ISource(std::move(header)) - , columns_mask(std::move(columns_mask_)) - , max_block_size(max_block_size_) - , databases(std::move(databases_)) - , context(Context::createCopy(context_)) - { - size_t size = detached_tables_->size(); - detached_tables.reserve(size); - for (size_t idx = 0; idx < size; ++idx) - { - detached_tables.insert(detached_tables_->getDataAt(idx).toString()); - } - } - - String getName() const override { return "DetachedTables"; } - -protected: - Chunk generate() override; - -private: - const std::vector columns_mask; - const UInt64 max_block_size; - const ColumnPtr databases; - NameSet detached_tables; - DatabaseDetachedTablesSnapshotIteratorPtr detached_tables_it; - ContextPtr context; - bool done = false; - DatabasePtr database; - std::string database_name; - - void fillResultColumnsByDetachedTableIterator(MutableColumns & result_columns) const; -}; -} From 51d293beb9dbbe915c6bedb92613833db0240450 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Fri, 28 Jun 2024 15:29:28 +0000 Subject: [PATCH 14/80] general logic in detail --- .../System/StorageSystemDetachedTables.cpp | 68 +----- src/Storages/System/StorageSystemTables.cpp | 230 ++++++++++-------- src/Storages/System/StorageSystemTables.h | 9 + 3 files changed, 139 insertions(+), 168 deletions(-) diff --git a/src/Storages/System/StorageSystemDetachedTables.cpp b/src/Storages/System/StorageSystemDetachedTables.cpp index 9805fa2a8cf..560056b891a 100644 --- a/src/Storages/System/StorageSystemDetachedTables.cpp +++ b/src/Storages/System/StorageSystemDetachedTables.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -25,69 +26,6 @@ namespace DB namespace { -ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) -{ - MutableColumnPtr column = ColumnString::create(); - - const auto databases = DatabaseCatalog::instance().getDatabases(); - for (const auto & database_name : databases | boost::adaptors::map_keys) - { - if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) - continue; /// We don't want to show the internal database for temporary tables in system.tables - - column->insert(database_name); - } - - Block block{ColumnWithTypeAndName(std::move(column), std::make_shared(), "database")}; - VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); - return block.getByPosition(0).column; -} - -ColumnPtr getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context) -{ - Block sample{ - ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), - ColumnWithTypeAndName(nullptr, std::make_shared(), "engine")}; - - MutableColumnPtr database_column = ColumnString::create(); - MutableColumnPtr engine_column; - - auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); - if (dag) - { - bool filter_by_engine = false; - for (const auto * input : dag->getInputs()) - if (input->result_name == "engine") - filter_by_engine = true; - - if (filter_by_engine) - engine_column = ColumnString::create(); - } - - for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) - { - const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); - DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); - if (!database) - continue; - - auto table_it = database->getDetachedTablesIterator(context, {}, false); - for (; table_it->isValid(); table_it->next()) - { - database_column->insert(table_it->table()); - } - } - - Block block{ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; - if (engine_column) - block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); - - if (dag) - VirtualColumnUtils::filterBlockWithDAG(dag, block, context); - - return block.getByPosition(0).column; -} - class DetachedTablesBlockSource : public ISource { public: @@ -280,8 +218,8 @@ void ReadFromSystemDetachedTables::applyFilters(ActionDAGNodes added_filter_node if (filter_actions_dag) predicate = filter_actions_dag->getOutputs().at(0); - filtered_databases_column = getFilteredDatabases(predicate, context); - filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context); + filtered_databases_column = detail::getFilteredDatabases(predicate, context); + filtered_tables_column = detail::getFilteredTables(predicate, filtered_databases_column, context, true); } void ReadFromSystemDetachedTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 783b899c978..dd7f1a6fdc8 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -1,29 +1,30 @@ +#include + +#include #include -#include +#include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include -#include #include #include #include #include #include -#include +#include +#include +#include +#include +#include +#include #include @@ -31,6 +32,117 @@ namespace DB { +namespace +{ + +/// Avoid heavy operation on tables if we only queried columns that we can get without table object. +/// Otherwise it will require table initialization for Lazy database. +bool needTable(const DatabasePtr & database, const Block & header) +{ + if (database->getEngineName() != "Lazy") + return true; + + static const std::set columns_without_table = {"database", "name", "uuid", "metadata_modification_time"}; + for (const auto & column : header.getColumnsWithTypeAndName()) + { + if (columns_without_table.find(column.name) == columns_without_table.end()) + return true; + } + return false; +} + +template +void fillColumns(T table_it, MutableColumnPtr & database_column, MutableColumnPtr & engine_column) +{ + for (; table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->name()); + if (engine_column) + engine_column->insert(table_it->table()->getName()); + } +} + +} + +namespace detail +{ +ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) +{ + MutableColumnPtr column = ColumnString::create(); + + const auto databases = DatabaseCatalog::instance().getDatabases(); + for (const auto & database_name : databases | boost::adaptors::map_keys) + { + if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) + continue; /// We don't want to show the internal database for temporary tables in system.tables + + column->insert(database_name); + } + + Block block{ColumnWithTypeAndName(std::move(column), std::make_shared(), "database")}; + VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); + return block.getByPosition(0).column; +} + +ColumnPtr getFilteredTables( + const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context, const bool is_detached) +{ + Block sample{ + ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), + ColumnWithTypeAndName(nullptr, std::make_shared(), "engine")}; + + MutableColumnPtr database_column = ColumnString::create(); + MutableColumnPtr engine_column; + + auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); + if (dag) + { + bool filter_by_engine = false; + for (const auto * input : dag->getInputs()) + if (input->result_name == "engine") + filter_by_engine = true; + + if (filter_by_engine) + engine_column = ColumnString::create(); + } + + for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) + { + const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); + DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); + if (!database) + continue; + + if (is_detached) + { + auto table_it = database->getDetachedTablesIterator(context, {}, false); + for (; table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->table()); + } + } + else + { + for (auto table_it = database->getTablesIterator(context); table_it->isValid(); table_it->next()) + { + database_column->insert(table_it->name()); + if (engine_column) + engine_column->insert(table_it->table()->getName()); + } + } + } + + Block block{ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; + if (engine_column) + block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); + + if (dag) + VirtualColumnUtils::filterBlockWithDAG(dag, block, context); + + return block.getByPosition(0).column; +} + +} StorageSystemTables::StorageSystemTables(const StorageID & table_id_) : IStorage(table_id_) @@ -105,92 +217,6 @@ StorageSystemTables::StorageSystemTables(const StorageID & table_id_) setInMemoryMetadata(storage_metadata); } - -namespace -{ - -ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context) -{ - MutableColumnPtr column = ColumnString::create(); - - const auto databases = DatabaseCatalog::instance().getDatabases(); - for (const auto & database_name : databases | boost::adaptors::map_keys) - { - if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) - continue; /// We don't want to show the internal database for temporary tables in system.tables - - column->insert(database_name); - } - - Block block { ColumnWithTypeAndName(std::move(column), std::make_shared(), "database") }; - VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); - return block.getByPosition(0).column; -} - -ColumnPtr getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context) -{ - Block sample { - ColumnWithTypeAndName(nullptr, std::make_shared(), "name"), - ColumnWithTypeAndName(nullptr, std::make_shared(), "engine") - }; - - MutableColumnPtr database_column = ColumnString::create(); - MutableColumnPtr engine_column; - - auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(predicate, &sample); - if (dag) - { - bool filter_by_engine = false; - for (const auto * input : dag->getInputs()) - if (input->result_name == "engine") - filter_by_engine = true; - - if (filter_by_engine) - engine_column = ColumnString::create(); - } - - for (size_t database_idx = 0; database_idx < filtered_databases_column->size(); ++database_idx) - { - const auto & database_name = filtered_databases_column->getDataAt(database_idx).toString(); - DatabasePtr database = DatabaseCatalog::instance().tryGetDatabase(database_name); - if (!database) - continue; - - for (auto table_it = database->getTablesIterator(context); table_it->isValid(); table_it->next()) - { - database_column->insert(table_it->name()); - if (engine_column) - engine_column->insert(table_it->table()->getName()); - } - } - - Block block {ColumnWithTypeAndName(std::move(database_column), std::make_shared(), "name")}; - if (engine_column) - block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); - - if (dag) - VirtualColumnUtils::filterBlockWithDAG(dag, block, context); - - return block.getByPosition(0).column; -} - -/// Avoid heavy operation on tables if we only queried columns that we can get without table object. -/// Otherwise it will require table initialization for Lazy database. -bool needTable(const DatabasePtr & database, const Block & header) -{ - if (database->getEngineName() != "Lazy") - return true; - - static const std::set columns_without_table = { "database", "name", "uuid", "metadata_modification_time" }; - for (const auto & column : header.getColumnsWithTypeAndName()) - { - if (columns_without_table.find(column.name) == columns_without_table.end()) - return true; - } - return false; -} - - class TablesBlockSource : public ISource { public: @@ -690,8 +716,6 @@ private: std::string database_name; }; -} - class ReadFromSystemTables : public SourceStepWithFilter { public: @@ -756,8 +780,8 @@ void ReadFromSystemTables::applyFilters(ActionDAGNodes added_filter_nodes) if (filter_actions_dag) predicate = filter_actions_dag->getOutputs().at(0); - filtered_databases_column = getFilteredDatabases(predicate, context); - filtered_tables_column = getFilteredTables(predicate, filtered_databases_column, context); + filtered_databases_column = detail::getFilteredDatabases(predicate, context); + filtered_tables_column = detail::getFilteredTables(predicate, filtered_databases_column, context, false); } void ReadFromSystemTables::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) diff --git a/src/Storages/System/StorageSystemTables.h b/src/Storages/System/StorageSystemTables.h index 87cdf1b6a8e..d6e3996b8e3 100644 --- a/src/Storages/System/StorageSystemTables.h +++ b/src/Storages/System/StorageSystemTables.h @@ -8,6 +8,15 @@ namespace DB class Context; +namespace detail +{ + +ColumnPtr getFilteredDatabases(const ActionsDAG::Node * predicate, ContextPtr context); +ColumnPtr +getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr & filtered_databases_column, ContextPtr context, bool is_detached); + +} + /** Implements the system table `tables`, which allows you to get information about all tables. */ From 488767e601e95cdf81b30b34c70e4e4ba18510e0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 1 Jul 2024 02:38:42 +0200 Subject: [PATCH 15/80] Uninteresting changes --- programs/local/LocalServer.cpp | 1 + src/Client/LocalConnection.h | 1 + src/Interpreters/Squashing.cpp | 96 ++++++++++++++++++---------------- src/Interpreters/Squashing.h | 10 +--- 4 files changed, 54 insertions(+), 54 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index b33e1595056..310c366b023 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -422,6 +422,7 @@ void LocalServer::connect() { connection_parameters = ConnectionParameters(getClientConfiguration(), "localhost"); + /// This is needed for table function input(...). ReadBuffer * in; auto table_file = getClientConfiguration().getString("table-file", "-"); if (table_file == "-" || table_file == "stdin") diff --git a/src/Client/LocalConnection.h b/src/Client/LocalConnection.h index fb6fa1b55eb..b424c5b5aa3 100644 --- a/src/Client/LocalConnection.h +++ b/src/Client/LocalConnection.h @@ -175,4 +175,5 @@ private: ReadBuffer * in; }; + } diff --git a/src/Interpreters/Squashing.cpp b/src/Interpreters/Squashing.cpp index f8b6a6542cc..858debacf0c 100644 --- a/src/Interpreters/Squashing.cpp +++ b/src/Interpreters/Squashing.cpp @@ -5,11 +5,60 @@ namespace DB { + namespace ErrorCodes { extern const int LOGICAL_ERROR; } +namespace +{ + +Chunk squashImpl(std::vector & input_chunks) +{ + Chunk accumulated_chunk; + std::vector mutable_columns = {}; + size_t rows = 0; + for (const Chunk & chunk : input_chunks) + rows += chunk.getNumRows(); + + { + auto & first_chunk = input_chunks[0]; + Columns columns = first_chunk.detachColumns(); + for (auto & column : columns) + { + mutable_columns.push_back(IColumn::mutate(std::move(column))); + mutable_columns.back()->reserve(rows); + } + } + + for (size_t i = 1; i < input_chunks.size(); ++i) // We've already processed the first chunk above + { + Columns columns = input_chunks[i].detachColumns(); + for (size_t j = 0, size = mutable_columns.size(); j < size; ++j) + { + const auto source_column = columns[j]; + + mutable_columns[j]->insertRangeFrom(*source_column, 0, source_column->size()); + } + } + accumulated_chunk.setColumns(std::move(mutable_columns), rows); + return accumulated_chunk; +} + +const ChunksToSquash * getInfoFromChunk(const Chunk & chunk) +{ + const auto & info = chunk.getChunkInfo(); + const auto * agg_info = typeid_cast(info.get()); + + if (!agg_info) + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no ChunksToSquash in ChunkInfoPtr"); + + return agg_info; +} + +} + Squashing::Squashing(Block header_, size_t min_block_size_rows_, size_t min_block_size_bytes_) : header(header_) , min_block_size_rows(min_block_size_rows_) @@ -27,8 +76,8 @@ Chunk Squashing::squash(Chunk && input_chunk) if (!input_chunk.hasChunkInfo()) return Chunk(); - const auto *info = getInfoFromChunk(input_chunk); - return squash(info->chunks); + const auto * info = getInfoFromChunk(input_chunk); + return squashImpl(info->chunks); } Chunk Squashing::add(Chunk && input_chunk) @@ -95,49 +144,6 @@ Chunk Squashing::convertToChunk(std::vector && chunks) const return Chunk(header.cloneEmptyColumns(), 0, info); } -Chunk Squashing::squash(std::vector & input_chunks) -{ - Chunk accumulated_chunk; - std::vector mutable_columns = {}; - size_t rows = 0; - for (const Chunk & chunk : input_chunks) - rows += chunk.getNumRows(); - - { - auto & first_chunk = input_chunks[0]; - Columns columns = first_chunk.detachColumns(); - for (auto & column : columns) - { - mutable_columns.push_back(IColumn::mutate(std::move(column))); - mutable_columns.back()->reserve(rows); - } - } - - for (size_t i = 1; i < input_chunks.size(); ++i) // We've already processed the first chunk above - { - Columns columns = input_chunks[i].detachColumns(); - for (size_t j = 0, size = mutable_columns.size(); j < size; ++j) - { - const auto source_column = columns[j]; - - mutable_columns[j]->insertRangeFrom(*source_column, 0, source_column->size()); - } - } - accumulated_chunk.setColumns(std::move(mutable_columns), rows); - return accumulated_chunk; -} - -const ChunksToSquash* Squashing::getInfoFromChunk(const Chunk & chunk) -{ - const auto& info = chunk.getChunkInfo(); - const auto * agg_info = typeid_cast(info.get()); - - if (!agg_info) - throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no ChunksToSquash in ChunkInfoPtr"); - - return agg_info; -} - void Squashing::expandCurrentSize(size_t rows, size_t bytes) { accumulated_size.rows += rows; diff --git a/src/Interpreters/Squashing.h b/src/Interpreters/Squashing.h index d76cca60e41..bb4cd7bed14 100644 --- a/src/Interpreters/Squashing.h +++ b/src/Interpreters/Squashing.h @@ -36,12 +36,8 @@ public: static Chunk squash(Chunk && input_chunk); Chunk flush(); - bool isDataLeft() - { - return !chunks_to_merge_vec.empty(); - } - Block header; + private: struct CurrentSize { @@ -55,10 +51,6 @@ private: CurrentSize accumulated_size; - static const ChunksToSquash * getInfoFromChunk(const Chunk & chunk); - - static Chunk squash(std::vector & input_chunks); - void expandCurrentSize(size_t rows, size_t bytes); void changeCurrentSize(size_t rows, size_t bytes); bool isEnoughSize(size_t rows, size_t bytes) const; From f76bd8ed776bc34829a7ff2de394936dc6035d01 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Tue, 2 Jul 2024 14:58:04 +0000 Subject: [PATCH 16/80] apply some comments --- src/Databases/DatabaseAtomic.cpp | 6 ------ src/Databases/DatabaseAtomic.h | 3 --- src/Databases/DatabaseOnDisk.cpp | 9 ++++++--- src/Databases/TablesLoader.cpp | 10 +++++----- src/Storages/System/StorageSystemTables.cpp | 12 ------------ .../0_stateless/03172_system_detached_tables.sql | 2 +- 6 files changed, 12 insertions(+), 30 deletions(-) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index ddc30627cde..b8d7315fc0f 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -437,12 +437,6 @@ DatabaseAtomic::getTablesIterator(ContextPtr local_context, const IDatabase::Fil return std::make_unique(std::move(typeid_cast(*base_iter))); } -DatabaseDetachedTablesSnapshotIteratorPtr DatabaseAtomic::getDetachedTablesIterator( - ContextPtr local_context, const IDatabase::FilterByNameFunction & filter_by_table_name, const bool skip_not_loaded) const -{ - return DatabaseOrdinary::getDetachedTablesIterator(local_context, filter_by_table_name, skip_not_loaded); -} - UUID DatabaseAtomic::tryGetTableUUID(const String & table_name) const { if (auto table = tryGetTable(table_name, getContext())) diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index 25c5cdfbde1..4a4ccfa2573 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -49,9 +49,6 @@ public: DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name, bool skip_not_loaded) const override; - DatabaseDetachedTablesSnapshotIteratorPtr - getDetachedTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name, bool skip_not_loaded) const override; - void beforeLoadingMetadata(ContextMutablePtr context, LoadingStrictnessLevel mode) override; LoadTaskPtr startupDatabaseAsync(AsyncLoader & async_loader, LoadJobSet startup_after, LoadingStrictnessLevel mode) override; diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 67c9114e793..61de0727ba6 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -22,8 +22,8 @@ #include #include #include -#include "Common/Exception.h" #include +#include #include #include #include @@ -310,11 +310,14 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri FS::createFile(detached_permanently_flag); std::lock_guard lock(mutex); - if (!snapshot_detached_tables.contains(table_name)) + if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); } - snapshot_detached_tables[table_name].is_permanently = true; + else + { + it->second.is_permanently = true; + } } catch (Exception & e) { diff --git a/src/Databases/TablesLoader.cpp b/src/Databases/TablesLoader.cpp index 1f70f46da60..6aa13b7b759 100644 --- a/src/Databases/TablesLoader.cpp +++ b/src/Databases/TablesLoader.cpp @@ -1,14 +1,14 @@ -#include +#include +#include #include #include -#include -#include -#include #include +#include #include #include -#include #include +#include +#include namespace DB diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index dd7f1a6fdc8..43b761d84b1 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -50,18 +50,6 @@ bool needTable(const DatabasePtr & database, const Block & header) } return false; } - -template -void fillColumns(T table_it, MutableColumnPtr & database_column, MutableColumnPtr & engine_column) -{ - for (; table_it->isValid(); table_it->next()) - { - database_column->insert(table_it->name()); - if (engine_column) - engine_column->insert(table_it->table()->getName()); - } -} - } namespace detail diff --git a/tests/queries/0_stateless/03172_system_detached_tables.sql b/tests/queries/0_stateless/03172_system_detached_tables.sql index 916821d996b..64f30a9d275 100644 --- a/tests/queries/0_stateless/03172_system_detached_tables.sql +++ b/tests/queries/0_stateless/03172_system_detached_tables.sql @@ -2,7 +2,7 @@ SELECT 'database atomic tests'; DROP DATABASE IF EXISTS test03172_system_detached_tables; -CREATE DATABASE IF NOT EXISTS test03172_system_detached_tables; +CREATE DATABASE IF NOT EXISTS test03172_system_detached_tables ENGINE=Atomic; CREATE TABLE test03172_system_detached_tables.test_table (n Int64) ENGINE=MergeTree ORDER BY n; SELECT * FROM system.detached_tables WHERE database='test03172_system_detached_tables'; From 192ada5ed93f93c17f26671336c6c62e53a1b669 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Wed, 3 Jul 2024 08:37:50 +0000 Subject: [PATCH 17/80] apply comments --- src/Databases/DatabaseAtomic.cpp | 1 - src/Storages/System/StorageSystemDetachedTables.cpp | 13 +++++++++---- .../03172_system_detached_tables.reference | 4 ++-- .../0_stateless/03172_system_detached_tables.sql | 4 ++-- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index b8d7315fc0f..65672f0583c 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -103,7 +103,6 @@ void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name, auto table_id = table->getStorageID(); assertDetachedTableNotInUse(table_id.uuid); DatabaseOrdinary::attachTableUnlocked(name, table); - detached_tables.erase(table_id.uuid); table_name_to_path.emplace(std::make_pair(name, relative_table_path)); } diff --git a/src/Storages/System/StorageSystemDetachedTables.cpp b/src/Storages/System/StorageSystemDetachedTables.cpp index 560056b891a..158edb7818f 100644 --- a/src/Storages/System/StorageSystemDetachedTables.cpp +++ b/src/Storages/System/StorageSystemDetachedTables.cpp @@ -31,12 +31,12 @@ class DetachedTablesBlockSource : public ISource public: DetachedTablesBlockSource( std::vector columns_mask_, - Block header, + Block header_, UInt64 max_block_size_, ColumnPtr databases_, ColumnPtr detached_tables_, ContextPtr context_) - : ISource(std::move(header)) + : ISource(std::move(header_)) , columns_mask(std::move(columns_mask_)) , max_block_size(max_block_size_) , databases(std::move(databases_)) @@ -63,7 +63,9 @@ protected: const auto access = context->getAccess(); const bool need_to_check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); - for (size_t database_idx = 0, rows_count = 0; database_idx < databases->size() && rows_count < max_block_size; ++database_idx) + size_t database_idx = 0; + size_t rows_count = 0; + for (; database_idx < databases->size() && rows_count < max_block_size; ++database_idx) { database_name = databases->getDataAt(database_idx).toString(); database = DatabaseCatalog::instance().tryGetDatabase(database_name); @@ -92,8 +94,11 @@ protected: } } + if (databases->size() == database_idx && max_block_size != rows_count) + { + done = true; + } const UInt64 num_rows = result_columns.at(0)->size(); - done = true; return Chunk(std::move(result_columns), num_rows); } diff --git a/tests/queries/0_stateless/03172_system_detached_tables.reference b/tests/queries/0_stateless/03172_system_detached_tables.reference index ce19daa4cdc..83d1ff13942 100644 --- a/tests/queries/0_stateless/03172_system_detached_tables.reference +++ b/tests/queries/0_stateless/03172_system_detached_tables.reference @@ -6,6 +6,6 @@ test03172_system_detached_tables test_table_perm 1 test03172_system_detached_tables test_table 0 ----------------------- database lazy tests -test03172_system_detached_tables_lazy test_table 0 -test03172_system_detached_tables_lazy test_table_perm 1 +before attach test03172_system_detached_tables_lazy test_table 0 +before attach test03172_system_detached_tables_lazy test_table_perm 1 DROP TABLE diff --git a/tests/queries/0_stateless/03172_system_detached_tables.sql b/tests/queries/0_stateless/03172_system_detached_tables.sql index 64f30a9d275..1a3c2d7cc0f 100644 --- a/tests/queries/0_stateless/03172_system_detached_tables.sql +++ b/tests/queries/0_stateless/03172_system_detached_tables.sql @@ -39,12 +39,12 @@ CREATE TABLE test03172_system_detached_tables_lazy.test_table_perm (number UInt6 INSERT INTO test03172_system_detached_tables_lazy.test_table_perm SELECT * FROM numbers(100); DETACH table test03172_system_detached_tables_lazy.test_table_perm PERMANENTLY; -SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables_lazy'; +SELECT 'before attach', database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables_lazy'; ATTACH TABLE test03172_system_detached_tables_lazy.test_table; ATTACH TABLE test03172_system_detached_tables_lazy.test_table_perm; -SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables_lazy'; +SELECT 'after attach', database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables_lazy'; SELECT 'DROP TABLE'; DROP TABLE test03172_system_detached_tables_lazy.test_table SYNC; From 7616f58a0bed80525385879ac3a2a4dcdf2707f4 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Wed, 3 Jul 2024 11:36:04 +0000 Subject: [PATCH 18/80] add tests for another db engines --- .../configs/remote_servers.xml | 13 ++++ .../test_system_detached_tables/test.py | 69 +++++++++++++------ 2 files changed, 61 insertions(+), 21 deletions(-) create mode 100644 tests/integration/test_system_detached_tables/configs/remote_servers.xml diff --git a/tests/integration/test_system_detached_tables/configs/remote_servers.xml b/tests/integration/test_system_detached_tables/configs/remote_servers.xml new file mode 100644 index 00000000000..8b579724282 --- /dev/null +++ b/tests/integration/test_system_detached_tables/configs/remote_servers.xml @@ -0,0 +1,13 @@ + + + + + true + + replica1 + 9000 + + + + + \ No newline at end of file diff --git a/tests/integration/test_system_detached_tables/test.py b/tests/integration/test_system_detached_tables/test.py index dbccac92523..f2bb5d71e85 100644 --- a/tests/integration/test_system_detached_tables/test.py +++ b/tests/integration/test_system_detached_tables/test.py @@ -2,8 +2,12 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node_default", stay_alive=True) - +node = cluster.add_instance("replica1", + with_zookeeper=True, + main_configs=["configs/remote_servers.xml"], + macros={"replica": "replica1"}, + stay_alive=True, +) @pytest.fixture(scope="module", autouse=True) def start_cluster(): @@ -13,10 +17,25 @@ def start_cluster(): finally: cluster.shutdown() +@pytest.mark.parametrize( + "db_name,db_engine,table_engine,table_engine_params", + [ + pytest.param( + "test_db_atomic", "Atomic", "MergeTree", "ORDER BY n", id="Atomic db with MergeTree table" + ), + pytest.param( + "test_db_lazy", "Lazy(60)", "Log", "", id="Lazy db with Log table" + ), + pytest.param( + "test_db_repl", "Replicated('/clickhouse/tables/test_table', 'shard1', 'replica1')", "ReplicatedMergeTree", "ORDER BY n", id="Replicated db with ReplicatedMergeTree table" + ), + ], +) +def test_system_detached_tables(start_cluster, db_name, db_engine, table_engine, table_engine_params): + node.query(f"CREATE DATABASE IF NOT EXISTS {db_name} ENGINE={db_engine};") -def test_system_detached_tables(): - node.query("CREATE TABLE test_table (n Int64) ENGINE=MergeTree ORDER BY n;") - node.query("CREATE TABLE test_table_perm (n Int64) ENGINE=MergeTree ORDER BY n;") + node.query(f"CREATE TABLE {db_name}.test_table (n Int64) ENGINE={table_engine} {table_engine_params};") + node.query(f"CREATE TABLE {db_name}.test_table_perm (n Int64) ENGINE={table_engine} {table_engine_params};") test_table_uuid = node.query( "SELECT uuid FROM system.tables WHERE table='test_table'" @@ -32,29 +51,37 @@ def test_system_detached_tables(): "SELECT metadata_path FROM system.tables WHERE table='test_table_perm'" ).rstrip("\n") - assert "" == node.query("SELECT * FROM system.detached_tables") + assert "" == node.query(f"SELECT * FROM system.detached_tables WHERE database='{db_name}'") - node.query("DETACH TABLE test_table") - node.query("DETACH TABLE test_table_perm PERMANENTLY") + node.query(f"SET database_replicated_always_detach_permanently=1; DETACH TABLE {db_name}.test_table") + node.query(f"DETACH TABLE {db_name}.test_table_perm PERMANENTLY") - querry = "SELECT database, table, is_permanently, uuid, metadata_path FROM system.detached_tables FORMAT Values" + querry = f"SELECT database, table, is_permanently, uuid, metadata_path FROM system.detached_tables WHERE database='{db_name}' FORMAT Values" result = node.query(querry) - assert ( - result - == f"('default','test_table',0,'{test_table_uuid}','{test_table_metadata_path}'),('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" - ) + + if db_engine.startswith("Repl"): + expected_before_restart = f"('{db_name}','test_table',1,'{test_table_uuid}','{test_table_metadata_path}'),('{db_name}','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" + else: + expected_before_restart = f"('{db_name}','test_table',0,'{test_table_uuid}','{test_table_metadata_path}'),('{db_name}','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" + + assert result == expected_before_restart + + if db_engine.startswith("Lazy"): + return + node.restart_clickhouse() + if db_engine.startswith("Repl"): + expected_after_restart = expected_before_restart + else: + expected_after_restart = f"('{db_name}','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" + result = node.query(querry) - assert ( - result - == f"('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" - ) + assert result == expected_after_restart node.restart_clickhouse() result = node.query(querry) - assert ( - result - == f"('default','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" - ) + assert result == expected_after_restart + + node.query(f"DROP DATABASE {db_name}") From 1bb1e0189207c458550ab677983e3efeebad23b1 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Wed, 3 Jul 2024 12:12:09 +0000 Subject: [PATCH 19/80] fix style --- .../test_system_detached_tables/test.py | 42 ++++++++++++++----- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/tests/integration/test_system_detached_tables/test.py b/tests/integration/test_system_detached_tables/test.py index f2bb5d71e85..dfb55dfd99b 100644 --- a/tests/integration/test_system_detached_tables/test.py +++ b/tests/integration/test_system_detached_tables/test.py @@ -2,7 +2,8 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("replica1", +node = cluster.add_instance( + "replica1", with_zookeeper=True, main_configs=["configs/remote_servers.xml"], macros={"replica": "replica1"}, @@ -17,25 +18,42 @@ def start_cluster(): finally: cluster.shutdown() + @pytest.mark.parametrize( "db_name,db_engine,table_engine,table_engine_params", [ pytest.param( - "test_db_atomic", "Atomic", "MergeTree", "ORDER BY n", id="Atomic db with MergeTree table" + "test_db_atomic", + "Atomic", + "MergeTree", + "ORDER BY n", + id="Atomic db with MergeTree table", ), pytest.param( - "test_db_lazy", "Lazy(60)", "Log", "", id="Lazy db with Log table" + "test_db_lazy", + "Lazy(60)", + "Log", + "", + id="Lazy db with Log table", ), pytest.param( - "test_db_repl", "Replicated('/clickhouse/tables/test_table', 'shard1', 'replica1')", "ReplicatedMergeTree", "ORDER BY n", id="Replicated db with ReplicatedMergeTree table" + "test_db_repl", + "Replicated('/clickhouse/tables/test_table','shard1', 'replica1')", + "ReplicatedMergeTree", + "ORDER BY n", + id="Replicated db with ReplicatedMergeTree table", ), ], ) def test_system_detached_tables(start_cluster, db_name, db_engine, table_engine, table_engine_params): node.query(f"CREATE DATABASE IF NOT EXISTS {db_name} ENGINE={db_engine};") - node.query(f"CREATE TABLE {db_name}.test_table (n Int64) ENGINE={table_engine} {table_engine_params};") - node.query(f"CREATE TABLE {db_name}.test_table_perm (n Int64) ENGINE={table_engine} {table_engine_params};") + node.query( + f"CREATE TABLE {db_name}.test_table (n Int64) ENGINE={table_engine} {table_engine_params};" + ) + node.query( + f"CREATE TABLE {db_name}.test_table_perm (n Int64) ENGINE={table_engine} {table_engine_params};" + ) test_table_uuid = node.query( "SELECT uuid FROM system.tables WHERE table='test_table'" @@ -51,14 +69,18 @@ def test_system_detached_tables(start_cluster, db_name, db_engine, table_engine, "SELECT metadata_path FROM system.tables WHERE table='test_table_perm'" ).rstrip("\n") - assert "" == node.query(f"SELECT * FROM system.detached_tables WHERE database='{db_name}'") + assert "" == node.query( + f"SELECT * FROM system.detached_tables WHERE database='{db_name}'" + ) - node.query(f"SET database_replicated_always_detach_permanently=1; DETACH TABLE {db_name}.test_table") + node.query( + f"SET database_replicated_always_detach_permanently=1; DETACH TABLE {db_name}.test_table" + ) node.query(f"DETACH TABLE {db_name}.test_table_perm PERMANENTLY") querry = f"SELECT database, table, is_permanently, uuid, metadata_path FROM system.detached_tables WHERE database='{db_name}' FORMAT Values" result = node.query(querry) - + if db_engine.startswith("Repl"): expected_before_restart = f"('{db_name}','test_table',1,'{test_table_uuid}','{test_table_metadata_path}'),('{db_name}','test_table_perm',1,'{test_table_perm_uuid}','{test_table_perm_metadata_path}')" else: @@ -68,7 +90,7 @@ def test_system_detached_tables(start_cluster, db_name, db_engine, table_engine, if db_engine.startswith("Lazy"): return - + node.restart_clickhouse() if db_engine.startswith("Repl"): From f964b0b6ab48bd92a784a5c719817e2c82fa1f4a Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Wed, 3 Jul 2024 12:22:32 +0000 Subject: [PATCH 20/80] fix style again --- tests/integration/test_system_detached_tables/test.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_system_detached_tables/test.py b/tests/integration/test_system_detached_tables/test.py index dfb55dfd99b..2eb870efcbc 100644 --- a/tests/integration/test_system_detached_tables/test.py +++ b/tests/integration/test_system_detached_tables/test.py @@ -10,6 +10,7 @@ node = cluster.add_instance( stay_alive=True, ) + @pytest.fixture(scope="module", autouse=True) def start_cluster(): try: @@ -38,14 +39,16 @@ def start_cluster(): ), pytest.param( "test_db_repl", - "Replicated('/clickhouse/tables/test_table','shard1', 'replica1')", - "ReplicatedMergeTree", + "Replicated('/clickhouse/tables/test_table','shard1', 'replica1')", + "ReplicatedMergeTree", "ORDER BY n", id="Replicated db with ReplicatedMergeTree table", ), ], ) -def test_system_detached_tables(start_cluster, db_name, db_engine, table_engine, table_engine_params): +def test_system_detached_tables( + start_cluster, db_name, db_engine, table_engine, table_engine_params +): node.query(f"CREATE DATABASE IF NOT EXISTS {db_name} ENGINE={db_engine};") node.query( From cc3e166dec6fde41c3903d486e4e18ba9b2991b2 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Thu, 4 Jul 2024 12:11:15 +0000 Subject: [PATCH 21/80] apply comment --- src/Storages/System/StorageSystemDetachedTables.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemDetachedTables.cpp b/src/Storages/System/StorageSystemDetachedTables.cpp index 158edb7818f..56c5e49b467 100644 --- a/src/Storages/System/StorageSystemDetachedTables.cpp +++ b/src/Storages/System/StorageSystemDetachedTables.cpp @@ -94,7 +94,7 @@ protected: } } - if (databases->size() == database_idx && max_block_size != rows_count) + if (databases->size() == database_idx && (!detached_tables_it || !detached_tables_it->isValid())) { done = true; } From 678c472236f8479455cdfcd697ed2a822f68a6b4 Mon Sep 17 00:00:00 2001 From: Aleksei Filatov Date: Wed, 10 Jul 2024 14:42:13 +0300 Subject: [PATCH 22/80] Close log files in watchdog --- src/Daemon/BaseDaemon.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 48f76769a09..f82f02c9d9d 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -1305,6 +1305,10 @@ void BaseDaemon::setupWatchdog() int status = 0; do { + // Close log files to prevent keeping descriptors of unlinked rotated files. + // On next log write files will be reopened. + closeLogs(logger()); + if (-1 != waitpid(pid, &status, WUNTRACED | WCONTINUED) || errno == ECHILD) { if (WIFSTOPPED(status)) From 2d1f45bb985debfb20037cb5f7dba6fb4f6903d8 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Thu, 11 Jul 2024 08:32:00 +0000 Subject: [PATCH 23/80] Remove redundant code --- src/Client/IConnections.h | 2 -- src/Client/MultiplexedConnections.cpp | 11 +++++------ src/Interpreters/ClientInfo.cpp | 4 ++-- src/Interpreters/ClientInfo.h | 2 +- src/Interpreters/Context.cpp | 7 ------- src/Interpreters/Context.h | 1 - src/Processors/QueryPlan/ReadFromRemote.cpp | 13 ++++++------- 7 files changed, 14 insertions(+), 26 deletions(-) diff --git a/src/Client/IConnections.h b/src/Client/IConnections.h index ebc71511834..09211de53b0 100644 --- a/src/Client/IConnections.h +++ b/src/Client/IConnections.h @@ -54,8 +54,6 @@ public: struct ReplicaInfo { - bool collaborate_with_initiator{false}; - size_t all_replicas_count{0}; size_t number_of_current_replica{0}; }; diff --git a/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp index bcef286ecbc..866338bee8d 100644 --- a/src/Client/MultiplexedConnections.cpp +++ b/src/Client/MultiplexedConnections.cpp @@ -141,13 +141,12 @@ void MultiplexedConnections::sendQuery( modified_settings.group_by_two_level_threshold = 0; modified_settings.group_by_two_level_threshold_bytes = 0; } + } - if (replica_info) - { - client_info.collaborate_with_initiator = true; - client_info.count_participating_replicas = replica_info->all_replicas_count; - client_info.number_of_current_replica = replica_info->number_of_current_replica; - } + if (replica_info) + { + client_info.collaborate_with_initiator = true; + client_info.number_of_current_replica = replica_info->number_of_current_replica; } /// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting. diff --git a/src/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp index ce1efb61cc0..39fdef23baa 100644 --- a/src/Interpreters/ClientInfo.cpp +++ b/src/Interpreters/ClientInfo.cpp @@ -95,7 +95,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const if (server_protocol_revision >= DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS) { writeVarUInt(static_cast(collaborate_with_initiator), out); - writeVarUInt(count_participating_replicas, out); + writeVarUInt(obsolete_count_participating_replicas, out); writeVarUInt(number_of_current_replica, out); } } @@ -185,7 +185,7 @@ void ClientInfo::read(ReadBuffer & in, UInt64 client_protocol_revision) UInt64 value; readVarUInt(value, in); collaborate_with_initiator = static_cast(value); - readVarUInt(count_participating_replicas, in); + readVarUInt(obsolete_count_participating_replicas, in); readVarUInt(number_of_current_replica, in); } } diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h index 3054667e264..ca32b4c5cfa 100644 --- a/src/Interpreters/ClientInfo.h +++ b/src/Interpreters/ClientInfo.h @@ -127,7 +127,7 @@ public: /// For parallel processing on replicas bool collaborate_with_initiator{false}; - UInt64 count_participating_replicas{0}; + UInt64 obsolete_count_participating_replicas{0}; UInt64 number_of_current_replica{0}; enum class BackgroundOperationType : uint8_t diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index f2626696492..0d60f7dc5f4 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -4941,13 +4941,6 @@ void Context::setConnectionClientVersion(UInt64 client_version_major, UInt64 cli client_info.connection_tcp_protocol_version = client_tcp_protocol_version; } -void Context::setReplicaInfo(bool collaborate_with_initiator, size_t all_replicas_count, size_t number_of_current_replica) -{ - client_info.collaborate_with_initiator = collaborate_with_initiator; - client_info.count_participating_replicas = all_replicas_count; - client_info.number_of_current_replica = number_of_current_replica; -} - void Context::increaseDistributedDepth() { ++client_info.distributed_depth; diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 8c5492bcbc8..5cd1153ab44 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -696,7 +696,6 @@ public: void setInitialQueryStartTime(std::chrono::time_point initial_query_start_time); void setQuotaClientKey(const String & quota_key); void setConnectionClientVersion(UInt64 client_version_major, UInt64 client_version_minor, UInt64 client_version_patch, unsigned client_tcp_protocol_version); - void setReplicaInfo(bool collaborate_with_initiator, size_t all_replicas_count, size_t number_of_current_replica); void increaseDistributedDepth(); const OpenTelemetry::TracingContext & getClientTraceContext() const { return client_info.client_trace_context; } OpenTelemetry::TracingContext & getClientTraceContext() { return client_info.client_trace_context; } diff --git a/src/Processors/QueryPlan/ReadFromRemote.cpp b/src/Processors/QueryPlan/ReadFromRemote.cpp index 84c2515e8ca..8ebd95f27e5 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.cpp +++ b/src/Processors/QueryPlan/ReadFromRemote.cpp @@ -411,8 +411,8 @@ void ReadFromParallelRemoteReplicasStep::initializePipeline(QueryPipelineBuilder auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(current_settings); const auto & shard = cluster->getShardsInfo().at(0); - size_t all_replicas_count = current_settings.max_parallel_replicas; - if (all_replicas_count > shard.getAllNodeCount()) + size_t max_replicas_to_use = current_settings.max_parallel_replicas; + if (max_replicas_to_use > shard.getAllNodeCount()) { LOG_INFO( getLogger("ReadFromParallelRemoteReplicasStep"), @@ -420,14 +420,14 @@ void ReadFromParallelRemoteReplicasStep::initializePipeline(QueryPipelineBuilder "Will use the latter number to execute the query.", current_settings.max_parallel_replicas, shard.getAllNodeCount()); - all_replicas_count = shard.getAllNodeCount(); + max_replicas_to_use = shard.getAllNodeCount(); } std::vector shuffled_pool; - if (all_replicas_count < shard.getAllNodeCount()) + if (max_replicas_to_use < shard.getAllNodeCount()) { shuffled_pool = shard.pool->getShuffledPools(current_settings); - shuffled_pool.resize(all_replicas_count); + shuffled_pool.resize(max_replicas_to_use); } else { @@ -437,11 +437,10 @@ void ReadFromParallelRemoteReplicasStep::initializePipeline(QueryPipelineBuilder shuffled_pool = shard.pool->getShuffledPools(current_settings, priority_func); } - for (size_t i=0; i < all_replicas_count; ++i) + for (size_t i=0; i < max_replicas_to_use; ++i) { IConnections::ReplicaInfo replica_info { - .all_replicas_count = all_replicas_count, /// we should use this number specifically because efficiency of data distribution by consistent hash depends on it. .number_of_current_replica = i, }; From f85be2b452e9d62f766cee3d170dacd1906e04b7 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 11 Jul 2024 12:17:59 +0000 Subject: [PATCH 24/80] Add --memory-usage option to client in non interactive mode --- src/Client/ClientBase.cpp | 22 ++++++++++++++++++++-- src/Common/ProgressIndication.h | 10 +++++----- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 5d472ba99b9..b5de348d583 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -2069,9 +2069,18 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin progress_indication.writeFinalProgress(); output_stream << std::endl << std::endl; } - else if (getClientConfiguration().getBool("print-time-to-stderr", false)) + else { - error_stream << progress_indication.elapsedSeconds() << "\n"; + const auto & config = getClientConfiguration(); + if (config.getBool("print-time-to-stderr", false)) + error_stream << progress_indication.elapsedSeconds() << "\n"; + + const auto & print_memory_mode = config.getString("print-memory-to-stderr", ""); + auto peak_memeory_usage = std::max(progress_indication.getMemoryUsage().peak, 0); + if (print_memory_mode == "default") + error_stream << peak_memeory_usage << "\n"; + else if (print_memory_mode == "readable") + error_stream << formatReadableSizeWithBinarySuffix(peak_memeory_usage) << "\n"; } if (!is_interactive && getClientConfiguration().getBool("print-num-processed-rows", false)) @@ -3035,6 +3044,7 @@ void ClientBase::init(int argc, char ** argv) ("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.") ("wait_for_suggestions_to_load", "Load suggestion data synchonously.") ("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)") + ("memory-usage", po::value()->implicit_value("default")->default_value("none"), "print memory usage to stderr in non-interactive mode (for benchmarks). Values: 'none', 'default', 'readable'") ("echo", "in batch mode, print query before execution") @@ -3120,6 +3130,14 @@ void ClientBase::init(int argc, char ** argv) /// Output execution time to stderr in batch mode. if (options.count("time")) getClientConfiguration().setBool("print-time-to-stderr", true); + if (options.count("memory-usage")) + { + const auto & memory_usage_mode = options["memory-usage"].as(); + if (memory_usage_mode != "none" && memory_usage_mode != "default" && memory_usage_mode != "readable") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown memory-usage mode: {}", memory_usage_mode); + getClientConfiguration().setString("print-memory-to-stderr", memory_usage_mode); + } + if (options.count("query")) queries = options["query"].as>(); if (options.count("query_id")) diff --git a/src/Common/ProgressIndication.h b/src/Common/ProgressIndication.h index ae39fb49bcc..c7f61a7fba9 100644 --- a/src/Common/ProgressIndication.h +++ b/src/Common/ProgressIndication.h @@ -71,11 +71,6 @@ public: /// How much seconds passed since query execution start. double elapsedSeconds() const { return getElapsedNanoseconds() / 1e9; } - void updateThreadEventData(HostToTimesMap & new_hosts_data); - -private: - double getCPUUsage(); - struct MemoryUsage { UInt64 total = 0; @@ -85,6 +80,11 @@ private: MemoryUsage getMemoryUsage() const; + void updateThreadEventData(HostToTimesMap & new_hosts_data); + +private: + double getCPUUsage(); + UInt64 getElapsedNanoseconds() const; /// This flag controls whether to show the progress bar. We start showing it after From b9cd1bddd7c8612e3b43172ea8670a7e904fe237 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 11 Jul 2024 12:20:37 +0000 Subject: [PATCH 25/80] upd doc --- docs/en/interfaces/cli.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index e18ff6f1a3f..63b7353d092 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -185,6 +185,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va - `--format, -f` – Use the specified default format to output the result. - `--vertical, -E` – If specified, use the [Vertical format](../interfaces/formats.md#vertical) by default to output the result. This is the same as `–format=Vertical`. In this format, each value is printed on a separate line, which is helpful when displaying wide tables. - `--time, -t` – If specified, print the query execution time to ‘stderr’ in non-interactive mode. +- `--memory-usage` – If specified, print the query memory usage to ‘stderr’ in non-interactive mode. - `--stacktrace` – If specified, also print the stack trace if an exception occurs. - `--config-file` – The name of the configuration file. - `--secure` – If specified, will connect to server over secure connection (TLS). You might need to configure your CA certificates in the [configuration file](#configuration_files). The available configuration settings are the same as for [server-side TLS configuration](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl). @@ -339,7 +340,7 @@ clickhouse-client clickhouse://some_user%40some_mail.com@localhost:9000 Connect to one of provides hosts: `192.168.1.15`, `192.168.1.25`. ``` bash -clickhouse-client clickhouse://192.168.1.15,192.168.1.25 +clickhouse-client clickhouse://192.168.1.15,192.168.1.25 ``` ### Configuration Files {#configuration_files} @@ -367,7 +368,7 @@ Example of a config file: ``` Or the same config in a YAML format: - + ```yaml user: username password: 'password' From 05c3692e1dfcacffc9de62bac9a53a0e28f4bea7 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 11 Jul 2024 12:22:03 +0000 Subject: [PATCH 26/80] add 03203_client_benchmark_options --- .../03203_client_benchmark_options.reference | 6 ++++++ .../0_stateless/03203_client_benchmark_options.sh | 13 +++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 tests/queries/0_stateless/03203_client_benchmark_options.reference create mode 100755 tests/queries/0_stateless/03203_client_benchmark_options.sh diff --git a/tests/queries/0_stateless/03203_client_benchmark_options.reference b/tests/queries/0_stateless/03203_client_benchmark_options.reference new file mode 100644 index 00000000000..fd2996b1c78 --- /dev/null +++ b/tests/queries/0_stateless/03203_client_benchmark_options.reference @@ -0,0 +1,6 @@ +Ok +Ok +Ok +Ok +Ok +Ok diff --git a/tests/queries/0_stateless/03203_client_benchmark_options.sh b/tests/queries/0_stateless/03203_client_benchmark_options.sh new file mode 100755 index 00000000000..a9b9d69822b --- /dev/null +++ b/tests/queries/0_stateless/03203_client_benchmark_options.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -t -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1 | grep -q "^2\." && echo "Ok" || echo "Fail" +${CLICKHOUSE_CLIENT} --time -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1 | grep -q "^2\." && echo "Ok" || echo "Fail" +${CLICKHOUSE_CLIENT} --memory-usage -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1 | grep -q "^[0-9]\+$" && echo "Ok" || echo "Fail" +${CLICKHOUSE_CLIENT} --memory-usage=none -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" # expected no output +${CLICKHOUSE_CLIENT} --memory-usage=default -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1 | grep -q "^[0-9]\+$" && echo "Ok" || echo "Fail" +${CLICKHOUSE_CLIENT} --memory-usage=readable -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1 | grep -q "^[0-9].*B$" && echo "Ok" || echo "Fail" +${CLICKHOUSE_CLIENT} --memory-usage=unknown -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1 | grep -q "BAD_ARGUMENTS" && echo "Ok" || echo "Fail" From fa2270fd59c0de91d45ac50f2064d31f568935e4 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Thu, 11 Jul 2024 22:35:39 +0000 Subject: [PATCH 27/80] Fix VALID UNTIL clause resetting after restart --- .../Access/InterpreterCreateUserQuery.cpp | 17 ++++++++++++++- .../integration/test_user_valid_until/test.py | 21 ++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/Access/InterpreterCreateUserQuery.cpp b/src/Interpreters/Access/InterpreterCreateUserQuery.cpp index 32c51b745c7..a09de7e688a 100644 --- a/src/Interpreters/Access/InterpreterCreateUserQuery.cpp +++ b/src/Interpreters/Access/InterpreterCreateUserQuery.cpp @@ -259,7 +259,22 @@ void InterpreterCreateUserQuery::updateUserFromQuery(User & user, const ASTCreat if (query.auth_data) auth_data = AuthenticationData::fromAST(*query.auth_data, {}, !query.attach); - updateUserFromQueryImpl(user, query, auth_data, {}, {}, {}, {}, {}, allow_no_password, allow_plaintext_password, true); + std::optional valid_until; + if (query.valid_until) + { + const String valid_until_str = checkAndGetLiteralArgument(query.valid_until, "valid_until"); + time_t time = 0; + + if (valid_until_str != "infinity") + { + ReadBufferFromString in(valid_until_str); + readDateTimeText(time, in); + } + + valid_until = time; + } + + updateUserFromQueryImpl(user, query, auth_data, {}, {}, {}, {}, valid_until, allow_no_password, allow_plaintext_password, true); } void registerInterpreterCreateUserQuery(InterpreterFactory & factory) diff --git a/tests/integration/test_user_valid_until/test.py b/tests/integration/test_user_valid_until/test.py index d6d5bf8b18e..39ca5997067 100644 --- a/tests/integration/test_user_valid_until/test.py +++ b/tests/integration/test_user_valid_until/test.py @@ -5,7 +5,7 @@ from time import sleep from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node") +node = cluster.add_instance("node", stay_alive=True) @pytest.fixture(scope="module") @@ -84,3 +84,22 @@ def test_details(started_cluster): node.query("SHOW CREATE USER user_details_time_only") == f"CREATE USER user_details_time_only VALID UNTIL \\'{until_year}-01-01 22:03:40\\'\n" ) + + +def test_restart(started_cluster): + node.query("CREATE USER user_restart VALID UNTIL '06/11/2010 08:03:20 Z+3'") + + assert ( + node.query("SHOW CREATE USER user_restart") + == "CREATE USER user_restart VALID UNTIL \\'2010-11-06 05:03:20\\'\n" + ) + + node.restart_clickhouse() + + assert ( + node.query("SHOW CREATE USER user_restart") + == "CREATE USER user_restart VALID UNTIL \\'2010-11-06 05:03:20\\'\n" + ) + + error = "Authentication failed" + assert error in node.query_and_get_error("SELECT 1", user="user_restart") From 705134413ffd321a59c1c2c82af11a5edddc962c Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 12 Jul 2024 12:00:39 +0200 Subject: [PATCH 28/80] Update docs/en/interfaces/cli.md --- docs/en/interfaces/cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 63b7353d092..849b3d4b486 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -185,7 +185,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va - `--format, -f` – Use the specified default format to output the result. - `--vertical, -E` – If specified, use the [Vertical format](../interfaces/formats.md#vertical) by default to output the result. This is the same as `–format=Vertical`. In this format, each value is printed on a separate line, which is helpful when displaying wide tables. - `--time, -t` – If specified, print the query execution time to ‘stderr’ in non-interactive mode. -- `--memory-usage` – If specified, print the query memory usage to ‘stderr’ in non-interactive mode. +- `--memory-usage` – If specified, print memory usage to ‘stderr’ in non-interactive mode]. Possible values: 'none' - do not print memory usage, 'default' - print number of bytes, 'readable' - print memory usage in human-readable format. - `--stacktrace` – If specified, also print the stack trace if an exception occurs. - `--config-file` – The name of the configuration file. - `--secure` – If specified, will connect to server over secure connection (TLS). You might need to configure your CA certificates in the [configuration file](#configuration_files). The available configuration settings are the same as for [server-side TLS configuration](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl). From f7fc031d9d1d63a057e5597088f1b70bc81f38a6 Mon Sep 17 00:00:00 2001 From: Konstantin Morozov Date: Fri, 12 Jul 2024 10:04:10 +0000 Subject: [PATCH 29/80] fix data race with permanently_detached_tables --- src/Databases/DatabaseOrdinary.cpp | 2 +- src/Databases/DatabaseOrdinary.h | 8 ++++++-- src/Interpreters/loadMetadata.cpp | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 98ef4b26ae2..db812e89782 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -224,10 +224,10 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables if (fs::exists(full_path.string() + detached_suffix)) { const std::string table_name = unescapeForFileName(file_name.substr(0, file_name.size() - 4)); - permanently_detached_tables.push_back(table_name); LOG_DEBUG(log, "Skipping permanently detached table {}.", backQuote(table_name)); std::lock_guard lock(mutex); + permanently_detached_tables.push_back(table_name); const auto detached_table_name = create_query->getTable(); diff --git a/src/Databases/DatabaseOrdinary.h b/src/Databases/DatabaseOrdinary.h index d2891147922..c2c5775e5ab 100644 --- a/src/Databases/DatabaseOrdinary.h +++ b/src/Databases/DatabaseOrdinary.h @@ -67,7 +67,11 @@ public: const StorageID & table_id, const StorageInMemoryMetadata & metadata) override; - Strings getNamesOfPermanentlyDetachedTables() const override { return permanently_detached_tables; } + Strings getNamesOfPermanentlyDetachedTables() const override + { + std::lock_guard lock(mutex); + return permanently_detached_tables; + } protected: virtual void commitAlterTable( @@ -77,7 +81,7 @@ protected: const String & statement, ContextPtr query_context); - Strings permanently_detached_tables; + Strings permanently_detached_tables TSA_GUARDED_BY(mutex); std::unordered_map load_table TSA_GUARDED_BY(mutex); std::unordered_map startup_table TSA_GUARDED_BY(mutex); diff --git a/src/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp index 9c3922b8bda..03dd1714ead 100644 --- a/src/Interpreters/loadMetadata.cpp +++ b/src/Interpreters/loadMetadata.cpp @@ -384,7 +384,7 @@ static void maybeConvertOrdinaryDatabaseToAtomic(ContextMutablePtr context, cons if (database->getEngineName() != "Ordinary") return; - Strings permanently_detached_tables = database->getNamesOfPermanentlyDetachedTables(); + const Strings permanently_detached_tables = database->getNamesOfPermanentlyDetachedTables(); if (!permanently_detached_tables.empty()) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot automatically convert database {} from Ordinary to Atomic, " From 15ba7534bff6cbd35ae42d42100a3910fe856882 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jul 2024 02:24:41 +0200 Subject: [PATCH 30/80] Fix double whitespace in EXPLAIN AST CREATE --- src/Parsers/ASTCreateQuery.cpp | 10 ++++++++++ src/Parsers/ASTCreateQuery.h | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index d56a2724914..6dc009da9a8 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -265,6 +265,16 @@ ASTPtr ASTCreateQuery::clone() const return res; } +String ASTCreateQuery::getID(char delim) const +{ + String res = attach ? "AttachQuery" : "CreateQuery"; + String database = getDatabase(); + if (!database.empty()) + res += (delim + getDatabase()); + res += (delim + getTable()); + return res; +} + void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { frame.need_parens = false; diff --git a/src/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h index 6fbf045915b..9e4364b1f25 100644 --- a/src/Parsers/ASTCreateQuery.h +++ b/src/Parsers/ASTCreateQuery.h @@ -136,7 +136,7 @@ public: bool create_or_replace{false}; /** Get the text that identifies this element. */ - String getID(char delim) const override { return (attach ? "AttachQuery" : "CreateQuery") + (delim + getDatabase()) + delim + getTable(); } + String getID(char delim) const override; ASTPtr clone() const override; From 24545caa3a7898e754d7b1f002c95132dc6a2d8b Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Mon, 15 Jul 2024 00:52:31 +0000 Subject: [PATCH 31/80] Move parsing into a separate function --- .../Access/InterpreterCreateUserQuery.cpp | 59 ++++++++++--------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/src/Interpreters/Access/InterpreterCreateUserQuery.cpp b/src/Interpreters/Access/InterpreterCreateUserQuery.cpp index a09de7e688a..855aa36b159 100644 --- a/src/Interpreters/Access/InterpreterCreateUserQuery.cpp +++ b/src/Interpreters/Access/InterpreterCreateUserQuery.cpp @@ -114,6 +114,34 @@ namespace else if (query.grantees) user.grantees = *query.grantees; } + + time_t getValidUntilFromAST(ASTPtr valid_until, ContextPtr context) + { + if (context) + valid_until = evaluateConstantExpressionAsLiteral(valid_until, context); + + const String valid_until_str = checkAndGetLiteralArgument(valid_until, "valid_until"); + + if (valid_until_str == "infinity") + return 0; + + time_t time = 0; + ReadBufferFromString in(valid_until_str); + + if (context) + { + const auto & time_zone = DateLUT::instance(""); + const auto & utc_time_zone = DateLUT::instance("UTC"); + + parseDateTimeBestEffort(time, in, time_zone, utc_time_zone); + } + else + { + readDateTimeText(time, in); + } + + return time; + } } BlockIO InterpreterCreateUserQuery::execute() @@ -134,23 +162,7 @@ BlockIO InterpreterCreateUserQuery::execute() std::optional valid_until; if (query.valid_until) - { - const ASTPtr valid_until_literal = evaluateConstantExpressionAsLiteral(query.valid_until, getContext()); - const String valid_until_str = checkAndGetLiteralArgument(valid_until_literal, "valid_until"); - - time_t time = 0; - - if (valid_until_str != "infinity") - { - const auto & time_zone = DateLUT::instance(""); - const auto & utc_time_zone = DateLUT::instance("UTC"); - - ReadBufferFromString in(valid_until_str); - parseDateTimeBestEffort(time, in, time_zone, utc_time_zone); - } - - valid_until = time; - } + valid_until = getValidUntilFromAST(query.valid_until, getContext()); std::optional default_roles_from_query; if (query.default_roles) @@ -261,18 +273,7 @@ void InterpreterCreateUserQuery::updateUserFromQuery(User & user, const ASTCreat std::optional valid_until; if (query.valid_until) - { - const String valid_until_str = checkAndGetLiteralArgument(query.valid_until, "valid_until"); - time_t time = 0; - - if (valid_until_str != "infinity") - { - ReadBufferFromString in(valid_until_str); - readDateTimeText(time, in); - } - - valid_until = time; - } + valid_until = getValidUntilFromAST(query.valid_until, {}); updateUserFromQueryImpl(user, query, auth_data, {}, {}, {}, {}, valid_until, allow_no_password, allow_plaintext_password, true); } From 09345e6b595952fd2172468c039c7f3bcb3d9494 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jul 2024 03:33:24 +0200 Subject: [PATCH 32/80] Fix bad test `apache_arrow_abort` --- tests/queries/0_stateless/02834_apache_arrow_abort.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02834_apache_arrow_abort.sql b/tests/queries/0_stateless/02834_apache_arrow_abort.sql index 47e1c5d3951..cb0eaebe554 100644 --- a/tests/queries/0_stateless/02834_apache_arrow_abort.sql +++ b/tests/queries/0_stateless/02834_apache_arrow_abort.sql @@ -1,4 +1,4 @@ --- Tags: no-fasttest +-- Tags: no-fasttest, no-tsan, no-asan, no-msan, no-ubsan -- This tests depends on internet access, but it does not matter, because it only has to check that there is no abort due to a bug in Apache Arrow library. SET optimize_trivial_insert_select=1; INSERT INTO TABLE FUNCTION url('https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/athena_partitioned/hits_9.parquet') SELECT * FROM url('https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/athena_partitioned/hits_9.parquet'); -- { serverError CANNOT_WRITE_TO_OSTREAM, RECEIVED_ERROR_FROM_REMOTE_IO_SERVER, POCO_EXCEPTION } From 41327260bad798494fef0914b51d95fa85043057 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jul 2024 03:47:35 +0200 Subject: [PATCH 33/80] Fix MSan report in GRPC --- contrib/grpc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/grpc b/contrib/grpc index f5b7fdc2dff..1716359d2e2 160000 --- a/contrib/grpc +++ b/contrib/grpc @@ -1 +1 @@ -Subproject commit f5b7fdc2dff09ada06dbf6c75df298fb40f898df +Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db From d76139699178b5403255150b0774a12332728238 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jul 2024 04:08:10 +0200 Subject: [PATCH 34/80] Fix test --- .../0_stateless/02903_parameterized_view_explain_ast.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02903_parameterized_view_explain_ast.reference b/tests/queries/0_stateless/02903_parameterized_view_explain_ast.reference index 6ee8d0c3d23..27222968b1a 100644 --- a/tests/queries/0_stateless/02903_parameterized_view_explain_ast.reference +++ b/tests/queries/0_stateless/02903_parameterized_view_explain_ast.reference @@ -1,4 +1,4 @@ -CreateQuery numbers_pv (children 2) +CreateQuery numbers_pv (children 2) Identifier numbers_pv SelectWithUnionQuery (children 1) ExpressionList (children 1) From e346535641bff8998c126f5fb74f02d63014090a Mon Sep 17 00:00:00 2001 From: Han Fei Date: Mon, 15 Jul 2024 16:27:53 +0200 Subject: [PATCH 35/80] fix log in keeper tcp handler --- src/Server/KeeperTCPHandler.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Server/KeeperTCPHandler.cpp b/src/Server/KeeperTCPHandler.cpp index 4849f5827c1..aa79d8814a7 100644 --- a/src/Server/KeeperTCPHandler.cpp +++ b/src/Server/KeeperTCPHandler.cpp @@ -623,7 +623,7 @@ void KeeperTCPHandler::updateStats(Coordination::ZooKeeperResponsePtr & response LOG_INFO( log, "Total time to process a request took too long ({}ms).\nRequest info: {}", - elapsed, + elapsed_ms, request->toString(/*short_format=*/true)); } From dec5931262ab4dfe541abfca3fb1e7b67f3d9a5e Mon Sep 17 00:00:00 2001 From: Han Fei Date: Mon, 15 Jul 2024 16:43:41 +0200 Subject: [PATCH 36/80] log session id --- src/Server/KeeperTCPHandler.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Server/KeeperTCPHandler.cpp b/src/Server/KeeperTCPHandler.cpp index aa79d8814a7..b61df45133a 100644 --- a/src/Server/KeeperTCPHandler.cpp +++ b/src/Server/KeeperTCPHandler.cpp @@ -622,7 +622,8 @@ void KeeperTCPHandler::updateStats(Coordination::ZooKeeperResponsePtr & response { LOG_INFO( log, - "Total time to process a request took too long ({}ms).\nRequest info: {}", + "Total time to process a request in session {} took too long ({}ms).\nRequest info: {}", + session_id, elapsed_ms, request->toString(/*short_format=*/true)); } From 38027abbecfc7b60594ed1073ce9104133152aa9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 8 Jul 2024 13:06:50 +0000 Subject: [PATCH 37/80] Fix bug in `numbers` when both limit and offset is used but the index cannot be used --- src/Storages/System/StorageSystemNumbers.cpp | 5 +++++ .../03203_system_numbers_limit_and_offset.reference | 3 +++ .../0_stateless/03203_system_numbers_limit_and_offset.sql | 5 +++++ 3 files changed, 13 insertions(+) create mode 100644 tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference create mode 100644 tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql diff --git a/src/Storages/System/StorageSystemNumbers.cpp b/src/Storages/System/StorageSystemNumbers.cpp index da700a7a4e9..6ddfb5e3213 100644 --- a/src/Storages/System/StorageSystemNumbers.cpp +++ b/src/Storages/System/StorageSystemNumbers.cpp @@ -38,6 +38,11 @@ void StorageSystemNumbers::read( size_t max_block_size, size_t num_streams) { + if (limit && *limit < max_block_size) + { + max_block_size = static_cast(*limit); + multithreaded = false; + } query_plan.addStep(std::make_unique( column_names, query_info, storage_snapshot, context, shared_from_this(), max_block_size, num_streams)); } diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference new file mode 100644 index 00000000000..d55fa7c71fc --- /dev/null +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference @@ -0,0 +1,3 @@ +9900 +9910 +9920 diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql new file mode 100644 index 00000000000..6b7f998c4d4 --- /dev/null +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql @@ -0,0 +1,5 @@ +SELECT number FROM numbers_mt(10000) +WHERE (number % 10) = 0 +ORDER BY number ASC +LIMIT 990, 3; + From a177bb63077353e15f6780ae775c9519f2795695 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 8 Jul 2024 16:14:20 +0000 Subject: [PATCH 38/80] Really fix the bug --- .../QueryPlan/ReadFromSystemNumbersStep.cpp | 35 +++++++------------ src/Storages/System/StorageSystemNumbers.cpp | 5 --- ..._system_numbers_limit_and_offset.reference | 4 +++ .../03203_system_numbers_limit_and_offset.sql | 34 +++++++++++++++++- 4 files changed, 49 insertions(+), 29 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index a3ae035afdd..049fe762640 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -39,15 +39,10 @@ inline void iotaWithStepOptimized(T * begin, size_t count, T first_value, T step class NumbersSource : public ISource { public: - NumbersSource(UInt64 block_size_, UInt64 offset_, std::optional limit_, UInt64 chunk_step_, const std::string & column_name, UInt64 step_) - : ISource(createHeader(column_name)) - , block_size(block_size_) - , next(offset_) - , chunk_step(chunk_step_) - , step(step_) + NumbersSource( + UInt64 block_size_, UInt64 offset_, std::optional end_, UInt64 chunk_step_, const std::string & column_name, UInt64 step_) + : ISource(createHeader(column_name)), block_size(block_size_), next(offset_), chunk_step(chunk_step_), end(end_), step(step_) { - if (limit_.has_value()) - end = limit_.value() + offset_; } String getName() const override { return "Numbers"; } @@ -79,7 +74,6 @@ protected: next += chunk_step; progress(column->size(), column->byteSize()); - return {Columns{std::move(column)}, real_block_size}; } @@ -549,13 +543,21 @@ Pipe ReadFromSystemNumbersStep::makePipe() return pipe; } + const auto end = std::invoke( + [&]() -> std::optional + { + if (numbers_storage.limit.has_value()) + return *(numbers_storage.limit) + numbers_storage.offset; + return {}; + }); + /// Fall back to NumbersSource for (size_t i = 0; i < num_streams; ++i) { auto source = std::make_shared( max_block_size, numbers_storage.offset + i * max_block_size * numbers_storage.step, - numbers_storage.limit, + end, num_streams * max_block_size * numbers_storage.step, numbers_storage.column_name, numbers_storage.step); @@ -571,19 +573,6 @@ Pipe ReadFromSystemNumbersStep::makePipe() pipe.addSource(std::move(source)); } - if (numbers_storage.limit) - { - size_t i = 0; - auto storage_limit = (*numbers_storage.limit - 1) / numbers_storage.step + 1; - /// This formula is how to split 'limit' elements to 'num_streams' chunks almost uniformly. - pipe.addSimpleTransform( - [&](const Block & header) - { - ++i; - return std::make_shared(header, storage_limit * i / num_streams - storage_limit * (i - 1) / num_streams, 0); - }); - } - return pipe; } diff --git a/src/Storages/System/StorageSystemNumbers.cpp b/src/Storages/System/StorageSystemNumbers.cpp index 6ddfb5e3213..da700a7a4e9 100644 --- a/src/Storages/System/StorageSystemNumbers.cpp +++ b/src/Storages/System/StorageSystemNumbers.cpp @@ -38,11 +38,6 @@ void StorageSystemNumbers::read( size_t max_block_size, size_t num_streams) { - if (limit && *limit < max_block_size) - { - max_block_size = static_cast(*limit); - multithreaded = false; - } query_plan.addStep(std::make_unique( column_names, query_info, storage_snapshot, context, shared_from_this(), max_block_size, num_streams)); } diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference index d55fa7c71fc..05deae711c6 100644 --- a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference @@ -1,3 +1,7 @@ +case 1 9900 9910 9920 +case 2 +9990 +18679 31 diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql index 6b7f998c4d4..1a1133c8a7f 100644 --- a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql @@ -1,5 +1,37 @@ +SELECT 'case 1'; SELECT number FROM numbers_mt(10000) WHERE (number % 10) = 0 ORDER BY number ASC LIMIT 990, 3; - +SELECT 'case 2'; +SELECT number FROM numbers_mt(10000) +WHERE (number % 10) = 0 +ORDER BY number ASC +LIMIT 999, 20 SETTINGS max_block_size = 31; +--- The following query was buggy before, so let's use it as a test case +WITH + toUInt64(-1) AS umax, + toUInt8(ceil(log10(umax))) AS max_digits, + 9 * max_digits AS max_digits_sum, + (x -> ((x > 1) AND (arraySum(arrayMap(y -> ((y > 1) AND (y < x) AND ((x % y) = 0)), range(toUInt64(sqrt(x)) + 1))) = 0))) AS is_prime_slow +SELECT + num, + ds +FROM +( + WITH x -> arraySum(arrayMap(y -> toUInt8(y), splitByString('', toString(x)))) AS digits_sum + SELECT + 1 + (number * 2) AS num, + digits_sum(num) AS ds + FROM numbers_mt(10000) + WHERE ds IN ( + WITH x -> ((x > 1) AND (arraySum(arrayMap(y -> ((y > 1) AND (y < x) AND ((x % y) = 0)), range(toUInt64(sqrt(x)) + 1))) = 0)) AS is_prime_slow + SELECT number + FROM numbers(max_digits_sum + 1) + WHERE is_prime_slow(number) + ) +) +WHERE is_prime_slow(num) +ORDER BY num ASC +LIMIT 998, 1 +SETTINGS max_block_size = 64, max_threads=16; From 8b78cf1c6139931132ff84bdf1ae495eef9714be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 8 Jul 2024 16:22:12 +0000 Subject: [PATCH 39/80] Add one more test case --- .../03203_system_numbers_limit_and_offset.reference | 12 ++++++++++++ .../03203_system_numbers_limit_and_offset.sql | 8 ++++++++ 2 files changed, 20 insertions(+) diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference index 05deae711c6..e95864a6fc9 100644 --- a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference @@ -5,3 +5,15 @@ case 1 case 2 9990 18679 31 +0 +10 +20 +30 +40 +50 +60 +70 +80 +90 +100 +110 diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql index 1a1133c8a7f..dc9e06b806f 100644 --- a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql @@ -3,11 +3,13 @@ SELECT number FROM numbers_mt(10000) WHERE (number % 10) = 0 ORDER BY number ASC LIMIT 990, 3; + SELECT 'case 2'; SELECT number FROM numbers_mt(10000) WHERE (number % 10) = 0 ORDER BY number ASC LIMIT 999, 20 SETTINGS max_block_size = 31; + --- The following query was buggy before, so let's use it as a test case WITH toUInt64(-1) AS umax, @@ -35,3 +37,9 @@ WHERE is_prime_slow(num) ORDER BY num ASC LIMIT 998, 1 SETTINGS max_block_size = 64, max_threads=16; + +SELECT number +FROM numbers_mt(120) +WHERE (number % 10) = 0 +ORDER BY number ASC +SETTINGS max_block_size = 31, max_threads = 11 From 2442473b253c4d8cf8f4474065b196bd4be4782c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 10 Jul 2024 11:12:28 +0000 Subject: [PATCH 40/80] Fix `generate_series` --- .../QueryPlan/ReadFromSystemNumbersStep.cpp | 50 +++++++++++++++---- .../02970_generate_series.reference | 1 + .../0_stateless/02970_generate_series.sql | 1 + 3 files changed, 41 insertions(+), 11 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index 049fe762640..67c228d43f4 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -36,12 +36,32 @@ inline void iotaWithStepOptimized(T * begin, size_t count, T first_value, T step iotaWithStep(begin, count, first_value, step); } +/// The range is defined as [start, end) +UInt64 itemCountInRange(UInt64 start, UInt64 end, UInt64 step) +{ + const auto range_count = end - start; + if (step == 1) + return range_count; + + return (range_count - 1) / step + 1; +} + class NumbersSource : public ISource { public: NumbersSource( - UInt64 block_size_, UInt64 offset_, std::optional end_, UInt64 chunk_step_, const std::string & column_name, UInt64 step_) - : ISource(createHeader(column_name)), block_size(block_size_), next(offset_), chunk_step(chunk_step_), end(end_), step(step_) + UInt64 block_size_, + UInt64 offset_, + std::optional end_, + const std::string & column_name, + UInt64 step_in_chunk_, + UInt64 step_between_chunks_) + : ISource(createHeader(column_name)) + , block_size(block_size_) + , next(offset_) + , end(end_) + , step_in_chunk(step_in_chunk_) + , step_between_chunks(step_between_chunks_) { } String getName() const override { return "Numbers"; } @@ -59,7 +79,10 @@ protected: { if (end.value() <= next) return {}; - real_block_size = std::min(block_size, end.value() - next); + + auto max_items_to_generate = itemCountInRange(next, *end, step_in_chunk); + + real_block_size = std::min(block_size, max_items_to_generate); } auto column = ColumnUInt64::create(real_block_size); ColumnUInt64::Container & vec = column->getData(); @@ -69,9 +92,9 @@ protected: UInt64 * current_end = &vec[real_block_size]; - iotaWithStepOptimized(pos, static_cast(current_end - pos), curr, step); + iotaWithStepOptimized(pos, static_cast(current_end - pos), curr, step_in_chunk); - next += chunk_step; + next += step_between_chunks; progress(column->size(), column->byteSize()); return {Columns{std::move(column)}, real_block_size}; @@ -80,9 +103,9 @@ protected: private: UInt64 block_size; UInt64 next; - UInt64 chunk_step; std::optional end; /// not included - UInt64 step; + UInt64 step_in_chunk; + UInt64 step_between_chunks; }; struct RangeWithStep @@ -552,19 +575,24 @@ Pipe ReadFromSystemNumbersStep::makePipe() }); /// Fall back to NumbersSource + /// Range in a single block + const auto block_range = max_block_size * numbers_storage.step; + /// Step between chunks in a single source. + /// It is bigger than block_range in case of multiple threads, because we have to account for other sources as well. + const auto step_between_chunks = num_streams * block_range; for (size_t i = 0; i < num_streams; ++i) { auto source = std::make_shared( max_block_size, - numbers_storage.offset + i * max_block_size * numbers_storage.step, + numbers_storage.offset + i * block_range, end, - num_streams * max_block_size * numbers_storage.step, numbers_storage.column_name, - numbers_storage.step); + numbers_storage.step, + step_between_chunks); if (numbers_storage.limit && i == 0) { - auto rows_appr = (*numbers_storage.limit - 1) / numbers_storage.step + 1; + auto rows_appr = itemCountInRange(numbers_storage.offset, *numbers_storage.limit, numbers_storage.step); if (limit > 0 && limit < rows_appr) rows_appr = query_info_limit; source->addTotalRowsApprox(rows_appr); diff --git a/tests/queries/0_stateless/02970_generate_series.reference b/tests/queries/0_stateless/02970_generate_series.reference index 6e6f3c81587..f8a8e1891b7 100644 --- a/tests/queries/0_stateless/02970_generate_series.reference +++ b/tests/queries/0_stateless/02970_generate_series.reference @@ -5,6 +5,7 @@ 501 50 17928 +17928 0 10 13 diff --git a/tests/queries/0_stateless/02970_generate_series.sql b/tests/queries/0_stateless/02970_generate_series.sql index 0844714b3a6..edae884a561 100644 --- a/tests/queries/0_stateless/02970_generate_series.sql +++ b/tests/queries/0_stateless/02970_generate_series.sql @@ -5,6 +5,7 @@ SELECT count() FROM generate_series(7, 77, 10); SELECT count() FROM generate_series(0, 1000, 2); SELECT count() FROM generate_series(0, 999, 20); SELECT sum(generate_series) FROM generate_series(4, 1008, 4) WHERE generate_series % 7 = 1; +SELECT sum(generate_series) FROM generate_series(4, 1008, 4) WHERE generate_series % 7 = 1 SETTINGS max_block_size = 71; SELECT * FROM generate_series(5, 4); SELECT * FROM generate_series(0, 0); From a92eb1a4c38be7868fd0cb80261ae882c8fbb70e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 10 Jul 2024 11:20:05 +0000 Subject: [PATCH 41/80] Avoid creating unnecessary sources --- src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index 67c228d43f4..da8a6933d6f 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -582,6 +582,10 @@ Pipe ReadFromSystemNumbersStep::makePipe() const auto step_between_chunks = num_streams * block_range; for (size_t i = 0; i < num_streams; ++i) { + const auto source_start = numbers_storage.offset + i * block_range; + if (numbers_storage.limit.has_value() && *numbers_storage.limit < source_start) + break; + auto source = std::make_shared( max_block_size, numbers_storage.offset + i * block_range, From 17352b6ed1cdcad359d2f17a96040f7fdaba9422 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 10 Jul 2024 12:06:11 +0000 Subject: [PATCH 42/80] Fix the case when offset is bigger than limit --- src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp | 8 +++++--- .../03203_system_numbers_limit_and_offset.reference | 1 + .../0_stateless/03203_system_numbers_limit_and_offset.sql | 8 +++++++- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index da8a6933d6f..b070bbe739b 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -582,13 +582,15 @@ Pipe ReadFromSystemNumbersStep::makePipe() const auto step_between_chunks = num_streams * block_range; for (size_t i = 0; i < num_streams; ++i) { - const auto source_start = numbers_storage.offset + i * block_range; - if (numbers_storage.limit.has_value() && *numbers_storage.limit < source_start) + const auto source_offset = i * block_range; + if (numbers_storage.limit.has_value() && *numbers_storage.limit < source_offset) break; + const auto source_start = numbers_storage.offset + source_offset; + auto source = std::make_shared( max_block_size, - numbers_storage.offset + i * block_range, + source_start, end, numbers_storage.column_name, numbers_storage.step, diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference index e95864a6fc9..960459f42ac 100644 --- a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference @@ -17,3 +17,4 @@ case 2 90 100 110 +4250 diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql index dc9e06b806f..e258b0bd2d2 100644 --- a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql @@ -42,4 +42,10 @@ SELECT number FROM numbers_mt(120) WHERE (number % 10) = 0 ORDER BY number ASC -SETTINGS max_block_size = 31, max_threads = 11 +SETTINGS max_block_size = 31, max_threads = 11; + +SELECT number +FROM numbers_mt(4242, 9) +WHERE (number % 10) = 0 +ORDER BY number ASC +SETTINGS max_block_size = 31, max_threads = 11; From 6be9d7b2544ccb5642999b53409c0d9326163a62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 11 Jul 2024 13:40:44 +0200 Subject: [PATCH 43/80] Make the tests works with old analyzer --- .../0_stateless/03203_system_numbers_limit_and_offset.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql index e258b0bd2d2..b72ab4a98fd 100644 --- a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql @@ -21,10 +21,10 @@ SELECT ds FROM ( - WITH x -> arraySum(arrayMap(y -> toUInt8(y), splitByString('', toString(x)))) AS digits_sum + WITH arraySum(arrayMap(y -> toUInt8(y), splitByString('', toString(num)))) AS digits_sum SELECT 1 + (number * 2) AS num, - digits_sum(num) AS ds + digits_sum AS ds FROM numbers_mt(10000) WHERE ds IN ( WITH x -> ((x > 1) AND (arraySum(arrayMap(y -> ((y > 1) AND (y < x) AND ((x % y) = 0)), range(toUInt64(sqrt(x)) + 1))) = 0)) AS is_prime_slow From 35eabcc9d1cf3a5339c20ea34d006edfc858ae68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Sat, 13 Jul 2024 16:01:15 +0000 Subject: [PATCH 44/80] Split test cases --- ...ystem_numbers_limit_and_offset_complex.reference} | 6 ------ ...3203_system_numbers_limit_and_offset_complex.sql} | 12 ------------ ..._system_numbers_limit_and_offset_simple.reference | 6 ++++++ .../03203_system_numbers_limit_and_offset_simple.sql | 11 +++++++++++ 4 files changed, 17 insertions(+), 18 deletions(-) rename tests/queries/0_stateless/{03203_system_numbers_limit_and_offset.reference => 03203_system_numbers_limit_and_offset_complex.reference} (60%) rename tests/queries/0_stateless/{03203_system_numbers_limit_and_offset.sql => 03203_system_numbers_limit_and_offset_complex.sql} (82%) create mode 100644 tests/queries/0_stateless/03203_system_numbers_limit_and_offset_simple.reference create mode 100644 tests/queries/0_stateless/03203_system_numbers_limit_and_offset_simple.sql diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_complex.reference similarity index 60% rename from tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference rename to tests/queries/0_stateless/03203_system_numbers_limit_and_offset_complex.reference index 960459f42ac..d38b21d2d05 100644 --- a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.reference +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_complex.reference @@ -1,9 +1,3 @@ -case 1 -9900 -9910 -9920 -case 2 -9990 18679 31 0 10 diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_complex.sql similarity index 82% rename from tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql rename to tests/queries/0_stateless/03203_system_numbers_limit_and_offset_complex.sql index b72ab4a98fd..8e8620361fa 100644 --- a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset.sql +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_complex.sql @@ -1,15 +1,3 @@ -SELECT 'case 1'; -SELECT number FROM numbers_mt(10000) -WHERE (number % 10) = 0 -ORDER BY number ASC -LIMIT 990, 3; - -SELECT 'case 2'; -SELECT number FROM numbers_mt(10000) -WHERE (number % 10) = 0 -ORDER BY number ASC -LIMIT 999, 20 SETTINGS max_block_size = 31; - --- The following query was buggy before, so let's use it as a test case WITH toUInt64(-1) AS umax, diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_simple.reference b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_simple.reference new file mode 100644 index 00000000000..6aad3ee0b4b --- /dev/null +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_simple.reference @@ -0,0 +1,6 @@ +case 1 +9900 +9910 +9920 +case 2 +9990 diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_simple.sql b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_simple.sql new file mode 100644 index 00000000000..df01ddf4312 --- /dev/null +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_simple.sql @@ -0,0 +1,11 @@ +SELECT 'case 1'; +SELECT number FROM numbers_mt(10000) +WHERE (number % 10) = 0 +ORDER BY number ASC +LIMIT 990, 3; + +SELECT 'case 2'; +SELECT number FROM numbers_mt(10000) +WHERE (number % 10) = 0 +ORDER BY number ASC +LIMIT 999, 20 SETTINGS max_block_size = 31; From 8e469af2a41b444bbeb58b63fa0ece7d883553c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Sat, 13 Jul 2024 20:36:01 +0000 Subject: [PATCH 45/80] Make query really work with old analyzer --- ...ystem_numbers_limit_and_offset_complex.sql | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_complex.sql b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_complex.sql index 8e8620361fa..756e08da27d 100644 --- a/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_complex.sql +++ b/tests/queries/0_stateless/03203_system_numbers_limit_and_offset_complex.sql @@ -1,27 +1,26 @@ --- The following query was buggy before, so let's use it as a test case WITH - toUInt64(-1) AS umax, - toUInt8(ceil(log10(umax))) AS max_digits, - 9 * max_digits AS max_digits_sum, - (x -> ((x > 1) AND (arraySum(arrayMap(y -> ((y > 1) AND (y < x) AND ((x % y) = 0)), range(toUInt64(sqrt(x)) + 1))) = 0))) AS is_prime_slow + (num > 1) AND (arraySum(arrayMap(y -> ((y > 1) AND (y < num) AND ((num % y) = 0)), range(toUInt64(sqrt(num)) + 1))) = 0) AS is_prime_slow SELECT num, - ds + ds, FROM ( - WITH arraySum(arrayMap(y -> toUInt8(y), splitByString('', toString(num)))) AS digits_sum + WITH + arraySum(arrayMap(y -> toUInt8(y), splitByString('', toString(num)))) AS digits_sum SELECT 1 + (number * 2) AS num, digits_sum AS ds FROM numbers_mt(10000) WHERE ds IN ( - WITH x -> ((x > 1) AND (arraySum(arrayMap(y -> ((y > 1) AND (y < x) AND ((x % y) = 0)), range(toUInt64(sqrt(x)) + 1))) = 0)) AS is_prime_slow + WITH + (number > 1) AND (arraySum(arrayMap(y -> ((y > 1) AND (y < number) AND ((number % y) = 0)), range(toUInt64(sqrt(number)) + 1))) = 0) AS is_prime_slow SELECT number - FROM numbers(max_digits_sum + 1) - WHERE is_prime_slow(number) + FROM numbers(180 + 1) + WHERE is_prime_slow ) ) -WHERE is_prime_slow(num) +WHERE is_prime_slow ORDER BY num ASC LIMIT 998, 1 SETTINGS max_block_size = 64, max_threads=16; From 51c1936b7f15327a1685472b41178f607f1c8670 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 15 Jul 2024 18:28:06 +0200 Subject: [PATCH 46/80] Correctly track memory for Allocator::realloc --- src/Common/Allocator.cpp | 12 +++-- .../configs/async_metrics_no.xml | 21 ++++++++ tests/integration/test_memory_limit/test.py | 52 +++++++++++++++++++ 3 files changed, 82 insertions(+), 3 deletions(-) create mode 100644 tests/integration/test_memory_limit/configs/async_metrics_no.xml create mode 100644 tests/integration/test_memory_limit/test.py diff --git a/src/Common/Allocator.cpp b/src/Common/Allocator.cpp index bfc85559fe8..c8373e74501 100644 --- a/src/Common/Allocator.cpp +++ b/src/Common/Allocator.cpp @@ -187,10 +187,10 @@ void * Allocator::realloc(void * buf, size_t old_size, #if USE_GWP_ASAN if (unlikely(GWPAsan::GuardedAlloc.shouldSample())) { + auto trace_alloc = CurrentMemoryTracker::alloc(new_size); if (void * ptr = GWPAsan::GuardedAlloc.allocate(new_size, alignment)) { auto trace_free = CurrentMemoryTracker::free(old_size); - auto trace_alloc = CurrentMemoryTracker::alloc(new_size); trace_free.onFree(buf, old_size); memcpy(ptr, buf, std::min(old_size, new_size)); @@ -231,13 +231,17 @@ void * Allocator::realloc(void * buf, size_t old_size, if (alignment <= MALLOC_MIN_ALIGNMENT) { /// Resize malloc'd memory region with no special alignment requirement. - auto trace_free = CurrentMemoryTracker::free(old_size); + /// Realloc can do 2 possible things: + /// - expand existing memory region + /// - allocate new memory block and free the old one + /// Because we don't know which option will be picked we need to make sure there is enough + /// memory for all options auto trace_alloc = CurrentMemoryTracker::alloc(new_size); - trace_free.onFree(buf, old_size); void * new_buf = ::realloc(buf, new_size); if (nullptr == new_buf) { + [[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(old_size); throw DB::ErrnoException( DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Allocator: Cannot realloc from {} to {}", @@ -246,6 +250,8 @@ void * Allocator::realloc(void * buf, size_t old_size, } buf = new_buf; + auto trace_free = CurrentMemoryTracker::free(old_size); + trace_free.onFree(buf, old_size); trace_alloc.onAlloc(buf, new_size); if constexpr (clear_memory) diff --git a/tests/integration/test_memory_limit/configs/async_metrics_no.xml b/tests/integration/test_memory_limit/configs/async_metrics_no.xml new file mode 100644 index 00000000000..96cae3bf387 --- /dev/null +++ b/tests/integration/test_memory_limit/configs/async_metrics_no.xml @@ -0,0 +1,21 @@ + + + 86400 + + + + + + + + + + + + + + + + + + diff --git a/tests/integration/test_memory_limit/test.py b/tests/integration/test_memory_limit/test.py new file mode 100644 index 00000000000..292d3ad5719 --- /dev/null +++ b/tests/integration/test_memory_limit/test.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +import logging +import time +import pytest +from helpers.cluster import ClickHouseCluster +from multiprocessing.dummy import Pool + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance( + "node", + main_configs=[ + "configs/async_metrics_no.xml", + ], + env_variables={ + "MALLOC_CONF": "dirty_decay_ms:0" + } +) + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + +def test_multiple_queries(): + p = Pool(15) + + def run_query(node): + try: + node.query("SELECT * FROM system.numbers GROUP BY number") + except Exception as ex: + print("Exception", ex) + raise ex + + tasks = [] + for i in range(30): + tasks.append(p.apply_async(run_query, (node, ))) + time.sleep(i * 0.1) + + + for task in tasks: + try: + task.get() + except Exception as ex: + print("Exception", ex) + + + # test that we didn't kill the server + node.query("SELECT 1") From 7b4c6a201bbc000b1a3482c772a9c6762141c6ee Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 15 Jul 2024 19:05:23 +0200 Subject: [PATCH 47/80] Limit the memory please --- tests/integration/test_memory_limit/__init__.py | 0 tests/integration/test_memory_limit/test.py | 11 +++++------ 2 files changed, 5 insertions(+), 6 deletions(-) create mode 100644 tests/integration/test_memory_limit/__init__.py diff --git a/tests/integration/test_memory_limit/__init__.py b/tests/integration/test_memory_limit/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_memory_limit/test.py b/tests/integration/test_memory_limit/test.py index 292d3ad5719..942cf04f896 100644 --- a/tests/integration/test_memory_limit/test.py +++ b/tests/integration/test_memory_limit/test.py @@ -12,11 +12,11 @@ node = cluster.add_instance( main_configs=[ "configs/async_metrics_no.xml", ], - env_variables={ - "MALLOC_CONF": "dirty_decay_ms:0" - } + mem_limit="4g", + env_variables={"MALLOC_CONF": "dirty_decay_ms:0"}, ) + @pytest.fixture(scope="module", autouse=True) def start_cluster(): try: @@ -25,6 +25,7 @@ def start_cluster(): finally: cluster.shutdown() + def test_multiple_queries(): p = Pool(15) @@ -37,16 +38,14 @@ def test_multiple_queries(): tasks = [] for i in range(30): - tasks.append(p.apply_async(run_query, (node, ))) + tasks.append(p.apply_async(run_query, (node,))) time.sleep(i * 0.1) - for task in tasks: try: task.get() except Exception as ex: print("Exception", ex) - # test that we didn't kill the server node.query("SELECT 1") From 9adc5b9ba4650dc87d361bc88f7323fdac776125 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 15 Jul 2024 19:23:33 +0200 Subject: [PATCH 48/80] more fixes --- src/Common/Allocator.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Common/Allocator.cpp b/src/Common/Allocator.cpp index c8373e74501..bcc5d187047 100644 --- a/src/Common/Allocator.cpp +++ b/src/Common/Allocator.cpp @@ -190,9 +190,6 @@ void * Allocator::realloc(void * buf, size_t old_size, auto trace_alloc = CurrentMemoryTracker::alloc(new_size); if (void * ptr = GWPAsan::GuardedAlloc.allocate(new_size, alignment)) { - auto trace_free = CurrentMemoryTracker::free(old_size); - trace_free.onFree(buf, old_size); - memcpy(ptr, buf, std::min(old_size, new_size)); free(buf, old_size); trace_alloc.onAlloc(buf, new_size); @@ -209,6 +206,7 @@ void * Allocator::realloc(void * buf, size_t old_size, } else { + [[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(old_size); ProfileEvents::increment(ProfileEvents::GWPAsanAllocateFailed); } } From 04735f42297c6526b837e537c74ccbf0d38c0581 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 02:27:58 +0200 Subject: [PATCH 49/80] Fix clang tidy --- src/Interpreters/Context.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index fc1e87e7b7e..2602afd8b78 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -51,7 +51,6 @@ #include #include #include -#include #include #include #include From 68aedc219187b0ffe89a3382c7f8136085e72ecf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 03:58:55 +0200 Subject: [PATCH 50/80] Fix something in Fast Test --- docker/test/fasttest/run.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 0d975d64010..58c436b469b 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -9,7 +9,11 @@ trap 'kill $(jobs -pr) ||:' EXIT stage=${stage:-} # Compiler version, normally set by Dockerfile -export LLVM_VERSION=${LLVM_VERSION:-17} +export LLVM_VERSION=${LLVM_VERSION:-18} + +# For some reason it is needed for tests with 'expect' but I don't know, why +groupadd --system --gid 1000 clickhouse +useradd --system --gid 1000 --uid 1000 clickhouse # A variable to pass additional flags to CMake. # Here we explicitly default it to nothing so that bash doesn't complain about From 26f8b81081dd75941bb72b00642391529b9d5bda Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Tue, 16 Jul 2024 10:54:00 +0800 Subject: [PATCH 51/80] update trusted contributors --- tests/ci/lambda_shared_package/lambda_shared/pr.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ci/lambda_shared_package/lambda_shared/pr.py b/tests/ci/lambda_shared_package/lambda_shared/pr.py index e981e28a454..e3aceaab0de 100644 --- a/tests/ci/lambda_shared_package/lambda_shared/pr.py +++ b/tests/ci/lambda_shared_package/lambda_shared/pr.py @@ -45,6 +45,7 @@ TRUSTED_CONTRIBUTORS = { "k-morozov", # Konstantin Morozov, Yandex Cloud "justindeguzman", # ClickHouse, Inc "jrdi", # ClickHouse contributor, TinyBird + "XuJia0210", # ClickHouse, Inc ] } From 919bbf57a7bca538f4a75c0e63af10555b75aa44 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 05:07:39 +0200 Subject: [PATCH 52/80] Fix hashing of empty tuples --- src/Functions/FunctionsHashing.h | 20 ++++++++++++---- .../03205_hashing_empty_tuples.reference | 22 ++++++++++++++++++ .../03205_hashing_empty_tuples.sql | 23 +++++++++++++++++++ 3 files changed, 60 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/03205_hashing_empty_tuples.reference create mode 100644 tests/queries/0_stateless/03205_hashing_empty_tuples.sql diff --git a/src/Functions/FunctionsHashing.h b/src/Functions/FunctionsHashing.h index 27717ea3611..95c54ac9528 100644 --- a/src/Functions/FunctionsHashing.h +++ b/src/Functions/FunctionsHashing.h @@ -1184,7 +1184,7 @@ private: if (icolumn->size() != vec_to.size()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Argument column '{}' size {} doesn't match result column size {} of function {}", - icolumn->getName(), icolumn->size(), vec_to.size(), getName()); + icolumn->getName(), icolumn->size(), vec_to.size(), getName()); if constexpr (Keyed) if (key_cols.size() != vec_to.size() && key_cols.size() != 1) @@ -1223,6 +1223,9 @@ private: else executeGeneric(key_cols, icolumn, vec_to); } + /// Return a fixed random-looking magic number when input is empty. + static constexpr auto filler = 0xe28dbde7fe22e41c; + void executeForArgument(const KeyColumnsType & key_cols, const IDataType * type, const IColumn * column, typename ColumnVector::Container & vec_to, bool & is_first) const { /// Flattening of tuples. @@ -1231,6 +1234,11 @@ private: const auto & tuple_columns = tuple->getColumns(); const DataTypes & tuple_types = typeid_cast(*type).getElements(); size_t tuple_size = tuple_columns.size(); + + if (0 == tuple_size && is_first) + for (auto & hash : vec_to) + hash = static_cast(filler); + for (size_t i = 0; i < tuple_size; ++i) executeForArgument(key_cols, tuple_types[i].get(), tuple_columns[i].get(), vec_to, is_first); } @@ -1239,6 +1247,11 @@ private: const auto & tuple_columns = tuple_const->getColumns(); const DataTypes & tuple_types = typeid_cast(*type).getElements(); size_t tuple_size = tuple_columns.size(); + + if (0 == tuple_size && is_first) + for (auto & hash : vec_to) + hash = static_cast(filler); + for (size_t i = 0; i < tuple_size; ++i) { auto tmp = ColumnConst::create(tuple_columns[i], column->size()); @@ -1300,10 +1313,7 @@ public: constexpr size_t first_data_argument = Keyed; if (arguments.size() <= first_data_argument) - { - /// Return a fixed random-looking magic number when input is empty - vec_to.assign(input_rows_count, static_cast(0xe28dbde7fe22e41c)); - } + vec_to.assign(input_rows_count, static_cast(filler)); KeyColumnsType key_cols{}; if constexpr (Keyed) diff --git a/tests/queries/0_stateless/03205_hashing_empty_tuples.reference b/tests/queries/0_stateless/03205_hashing_empty_tuples.reference new file mode 100644 index 00000000000..e24b5809aee --- /dev/null +++ b/tests/queries/0_stateless/03205_hashing_empty_tuples.reference @@ -0,0 +1,22 @@ +16324913028386710556 +16324913028386710556 +5049034479224883533 +7385293435322750976 +12248912094175844631 +5049034479224883533 +5887129541803688833 +5887129541803688833 +13747979201178469747 +5887129541803688833 +15520217392480966957 +16324913028386710556 +16324913028386710556 +5049034479224883533 +7385293435322750976 +12248912094175844631 +5049034479224883533 +5887129541803688833 +5887129541803688833 +13747979201178469747 +5887129541803688833 +15520217392480966957 diff --git a/tests/queries/0_stateless/03205_hashing_empty_tuples.sql b/tests/queries/0_stateless/03205_hashing_empty_tuples.sql new file mode 100644 index 00000000000..4a97f30ced3 --- /dev/null +++ b/tests/queries/0_stateless/03205_hashing_empty_tuples.sql @@ -0,0 +1,23 @@ +SELECT sipHash64(()); +SELECT sipHash64((), ()); +SELECT sipHash64((), 1); +SELECT sipHash64(1, ()); +SELECT sipHash64(1, (), 1); +SELECT sipHash64((), 1, ()); +SELECT sipHash64((), (1, 2)); +SELECT sipHash64((), (1, 2)); +SELECT sipHash64((1, 2), ()); +SELECT sipHash64((), (1, 2), ()); +SELECT sipHash64((1, 2), (), (3, 4)); + +SELECT sipHash64(materialize(())); +SELECT sipHash64(materialize(()), materialize(())); +SELECT sipHash64(materialize(()), 1); +SELECT sipHash64(1, materialize(())); +SELECT sipHash64(1, materialize(()), 1); +SELECT sipHash64((), 1, materialize(())); +SELECT sipHash64(materialize(()), (1, 2)); +SELECT sipHash64(materialize(()), (1, 2)); +SELECT sipHash64((1, 2), materialize(())); +SELECT sipHash64(materialize(()), (1, 2), ()); +SELECT sipHash64((1, 2), materialize(()), (3, 4)); From 2c62a6b5283c68aac604a97591571e05e6877fab Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 05:10:26 +0200 Subject: [PATCH 53/80] Update pr.py --- .../lambda_shared_package/lambda_shared/pr.py | 21 +------------------ 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/tests/ci/lambda_shared_package/lambda_shared/pr.py b/tests/ci/lambda_shared_package/lambda_shared/pr.py index e3aceaab0de..d970662d38b 100644 --- a/tests/ci/lambda_shared_package/lambda_shared/pr.py +++ b/tests/ci/lambda_shared_package/lambda_shared/pr.py @@ -3,48 +3,29 @@ import re from typing import Tuple -# Individual trusted contirbutors who are not in any trusted organization. +# Individual trusted contributors who are not in any trusted organization. # Can be changed in runtime: we will append users that we learned to be in # a trusted org, to save GitHub API calls. TRUSTED_CONTRIBUTORS = { e.lower() for e in [ - "achimbab", # Kakao corp "Algunenano", # Raúl Marín, ClickHouse, Inc "amosbird", "azat", # SEMRush "bharatnc", # Many contributions. - "bobrik", # Seasoned contributor, CloudFlare "cwurm", # ClickHouse, Inc "den-crane", # Documentation contributor - "hagen1778", # Roman Khavronenko, seasoned contributor - "hczhcz", - "hexiaoting", # Seasoned contributor "ildus", # adjust, ex-pgpro - "javisantana", # a Spanish ClickHouse enthusiast, ex-Carto - "kreuzerkrieg", - "nikvas0", "nvartolomei", # Seasoned contributor, CloudFlare - "spongedu", # Seasoned contributor "taiyang-li", "ucasFL", # Amos Bird's friend "vdimir", # ClickHouse, Inc - "YiuRULE", - "zlobober", # Developer of YT - "ilejn", # Arenadata, responsible for Kerberized Kafka "thomoco", # ClickHouse, Inc - "BoloniniD", # Seasoned contributor, HSE "tonickkozlov", # Cloudflare "tylerhannan", # ClickHouse, Inc - "myrrc", # Mike Kot, DoubleCloud "thevar1able", # ClickHouse, Inc - "aalexfvk", - "MikhailBurdukov", "tsolodov", # ClickHouse, Inc - "kitaisreal", - "k-morozov", # Konstantin Morozov, Yandex Cloud "justindeguzman", # ClickHouse, Inc - "jrdi", # ClickHouse contributor, TinyBird "XuJia0210", # ClickHouse, Inc ] } From 5d96b684a640ef7cc0f911d25ab8fbde4db9f590 Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Tue, 16 Jul 2024 11:30:13 +0800 Subject: [PATCH 54/80] update format --- tests/ci/lambda_shared_package/lambda_shared/pr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/lambda_shared_package/lambda_shared/pr.py b/tests/ci/lambda_shared_package/lambda_shared/pr.py index d970662d38b..9320cc4ce8c 100644 --- a/tests/ci/lambda_shared_package/lambda_shared/pr.py +++ b/tests/ci/lambda_shared_package/lambda_shared/pr.py @@ -26,7 +26,7 @@ TRUSTED_CONTRIBUTORS = { "thevar1able", # ClickHouse, Inc "tsolodov", # ClickHouse, Inc "justindeguzman", # ClickHouse, Inc - "XuJia0210", # ClickHouse, Inc + "XuJia0210", # ClickHouse, Inc ] } From 1d2f3a1ed5274764a2a051994ebd69464b6b1212 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 05:56:26 +0200 Subject: [PATCH 55/80] Update pr.py --- tests/ci/lambda_shared_package/lambda_shared/pr.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/ci/lambda_shared_package/lambda_shared/pr.py b/tests/ci/lambda_shared_package/lambda_shared/pr.py index 9320cc4ce8c..95130fc2a0f 100644 --- a/tests/ci/lambda_shared_package/lambda_shared/pr.py +++ b/tests/ci/lambda_shared_package/lambda_shared/pr.py @@ -9,7 +9,6 @@ from typing import Tuple TRUSTED_CONTRIBUTORS = { e.lower() for e in [ - "Algunenano", # Raúl Marín, ClickHouse, Inc "amosbird", "azat", # SEMRush "bharatnc", # Many contributions. @@ -19,11 +18,9 @@ TRUSTED_CONTRIBUTORS = { "nvartolomei", # Seasoned contributor, CloudFlare "taiyang-li", "ucasFL", # Amos Bird's friend - "vdimir", # ClickHouse, Inc "thomoco", # ClickHouse, Inc "tonickkozlov", # Cloudflare "tylerhannan", # ClickHouse, Inc - "thevar1able", # ClickHouse, Inc "tsolodov", # ClickHouse, Inc "justindeguzman", # ClickHouse, Inc "XuJia0210", # ClickHouse, Inc From e292358ade930e2779792365407fa229cd37d079 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 06:12:42 +0200 Subject: [PATCH 56/80] Update run.sh --- docker/test/fasttest/run.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 58c436b469b..57e7b6b2f56 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -11,9 +11,9 @@ stage=${stage:-} # Compiler version, normally set by Dockerfile export LLVM_VERSION=${LLVM_VERSION:-18} -# For some reason it is needed for tests with 'expect' but I don't know, why -groupadd --system --gid 1000 clickhouse -useradd --system --gid 1000 --uid 1000 clickhouse +# For some reason, it is needed for tests with 'expect', but I don't know, why. +sudo groupadd --system --gid 1000 clickhouse +sudo useradd --system --gid 1000 --uid 1000 clickhouse # A variable to pass additional flags to CMake. # Here we explicitly default it to nothing so that bash doesn't complain about From 3a8507b6eb906c71177e810a25878bf645147165 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 16 Jul 2024 09:04:38 +0200 Subject: [PATCH 57/80] Fix test --- .../queries/0_stateless/02231_buffer_aggregate_states_leak.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql b/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql index f1f7c876ba6..3d6b69fe161 100644 --- a/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql +++ b/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql @@ -29,7 +29,7 @@ from in_02231 group by key; set optimize_trivial_insert_select = 1; -insert into in_02231 select * from numbers(10e6) settings max_memory_usage='310Mi', max_threads=1; +insert into in_02231 select * from numbers(10e6) settings max_memory_usage='400Mi', max_threads=1; drop table buffer_02231; drop table out_02231; From a9b8c2a29a69d85be827048f02ca85a84ec7afca Mon Sep 17 00:00:00 2001 From: Max K Date: Tue, 16 Jul 2024 10:48:43 +0200 Subject: [PATCH 58/80] CI: Do not block Tests_3 unless MAX_TOTAL_FAILURES_PER_JOB_BEFORE_BLOCKING_CI --- tests/ci/functional_test_check.py | 28 +++++++++++- tests/ci/integration_test_check.py | 19 ++++++++ tests/ci/merge_pr.py | 70 ++++-------------------------- 3 files changed, 54 insertions(+), 63 deletions(-) diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 41c7ed963c9..74dd4d8fbd7 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -17,9 +17,19 @@ from download_release_packages import download_last_release from env_helper import REPO_COPY, REPORT_PATH, TEMP_PATH from get_robot_token import get_parameter_from_ssm from pr_info import PRInfo -from report import ERROR, SUCCESS, JobReport, StatusType, TestResults, read_test_results +from report import ( + ERROR, + SUCCESS, + JobReport, + StatusType, + TestResults, + read_test_results, + FAILURE, +) from stopwatch import Stopwatch from tee_popen import TeePopen +from ci_config import CI +from ci_utils import Utils NO_CHANGES_MSG = "Nothing to run" @@ -351,7 +361,23 @@ def main(): additional_files=additional_logs, ).dump(to_file=args.report_to_file if args.report_to_file else None) + should_block_ci = False if state != SUCCESS: + should_block_ci = True + + if state == FAILURE and CI.is_required(check_name): + failed_cnt = Utils.get_failed_tests_number(description) + print( + f"Job status is [{state}] with [{failed_cnt}] failed test cases. status description [{description}]" + ) + if ( + failed_cnt + and failed_cnt <= CI.MAX_TOTAL_FAILURES_PER_JOB_BEFORE_BLOCKING_CI + ): + print(f"Won't block the CI workflow") + should_block_ci = False + + if should_block_ci: sys.exit(1) diff --git a/tests/ci/integration_test_check.py b/tests/ci/integration_test_check.py index 80ac1935d95..6245f0490fc 100644 --- a/tests/ci/integration_test_check.py +++ b/tests/ci/integration_test_check.py @@ -23,10 +23,13 @@ from report import ( TestResult, TestResults, read_test_results, + FAILURE, ) from stopwatch import Stopwatch import integration_tests_runner as runner +from ci_config import CI +from ci_utils import Utils def get_json_params_dict( @@ -233,7 +236,23 @@ def main(): additional_files=additional_logs, ).dump(to_file=args.report_to_file if args.report_to_file else None) + should_block_ci = False if state != SUCCESS: + should_block_ci = True + + if state == FAILURE and CI.is_required(check_name): + failed_cnt = Utils.get_failed_tests_number(description) + print( + f"Job status is [{state}] with [{failed_cnt}] failed test cases. status description [{description}]" + ) + if ( + failed_cnt + and failed_cnt <= CI.MAX_TOTAL_FAILURES_PER_JOB_BEFORE_BLOCKING_CI + ): + print(f"Won't block the CI workflow") + should_block_ci = False + + if should_block_ci: sys.exit(1) diff --git a/tests/ci/merge_pr.py b/tests/ci/merge_pr.py index 94456506879..4b57467ebdc 100644 --- a/tests/ci/merge_pr.py +++ b/tests/ci/merge_pr.py @@ -27,7 +27,6 @@ from report import SUCCESS, FAILURE from env_helper import GITHUB_UPSTREAM_REPOSITORY, GITHUB_REPOSITORY from synchronizer_utils import SYNC_BRANCH_PREFIX from ci_config import CI -from ci_utils import Utils # The team name for accepted approvals TEAM_NAME = getenv("GITHUB_TEAM_NAME", "core") @@ -249,74 +248,21 @@ def main(): repo = gh.get_repo(args.repo) if args.set_ci_status: + # set Mergeable check status and exit assert args.wf_status in (FAILURE, SUCCESS) - # set mergeable check status and exit commit = get_commit(gh, args.pr_info.sha) statuses = get_commit_filtered_statuses(commit) - max_failed_tests_per_job = 0 - job_name_with_max_failures = None - total_failed_tests = 0 - failed_to_get_info = False has_failed_statuses = False for status in statuses: - if not CI.is_required(status.context) or status.context in ( - CI.StatusNames.SYNC, - CI.StatusNames.PR_CHECK, - ): - # CI.StatusNames.SYNC or CI.StatusNames.PR_CHECK should not be checked - continue print(f"Check status [{status.context}], [{status.state}]") - if status.state == FAILURE: + if not CI.is_required(status.context) and status.state != SUCCESS: has_failed_statuses = True - failed_cnt = Utils.get_failed_tests_number(status.description) - if failed_cnt is None: - failed_to_get_info = True - print( - f"WARNING: failed to get number of failed tests from [{status.description}]" - ) - else: - if failed_cnt > max_failed_tests_per_job: - job_name_with_max_failures = status.context - max_failed_tests_per_job = failed_cnt - total_failed_tests += failed_cnt - print( - f"Failed test cases in [{status.context}] is [{failed_cnt}], total failures [{total_failed_tests}]" - ) - elif status.state != SUCCESS and status.context not in ( - CI.StatusNames.SYNC, - CI.StatusNames.PR_CHECK, - ): - # do not block CI on failures in (CI.StatusNames.SYNC, CI.StatusNames.PR_CHECK) - has_failed_statuses = True - print( - f"Unexpected status for [{status.context}]: [{status.state}] - block further testing" - ) - failed_to_get_info = True - - can_continue = True - if total_failed_tests > CI.MAX_TOTAL_FAILURES_BEFORE_BLOCKING_CI: - print( - f"Required check has [{total_failed_tests}] failed - block further testing" - ) - can_continue = False - if max_failed_tests_per_job > CI.MAX_TOTAL_FAILURES_PER_JOB_BEFORE_BLOCKING_CI: - print( - f"Job [{job_name_with_max_failures}] has [{max_failed_tests_per_job}] failures - block further testing" - ) - can_continue = False - if failed_to_get_info: - print("Unexpected commit status state - block further testing") - can_continue = False - if args.wf_status != SUCCESS and not has_failed_statuses: - # workflow failed but reason is unknown as no failed statuses present - can_continue = False - print( - "WARNING: Either the runner is faulty or the operating status is unknown. The first is self-healing, the second requires investigation." - ) if args.wf_status == SUCCESS or has_failed_statuses: - # do not set mergeable check status if args.wf_status == failure, apparently it has died runners and is to be restarted + # set Mergeable check if workflow is successful (green) + # or if we have GH statuses with failures (red) + # to avoid false-green on a died runner state = trigger_mergeable_check( commit, statuses, @@ -333,10 +279,10 @@ def main(): print( "Workflow failed but no failed statuses found (died runner?) - cannot set Mergeable Check status" ) - - if not can_continue: + if args.wf_status == SUCCESS and not has_failed_statuses: + sys.exit(0) + else: sys.exit(1) - sys.exit(0) # An ugly and not nice fix to patch the wrong organization URL, # see https://github.com/PyGithub/PyGithub/issues/2395#issuecomment-1378629710 From 2018c5e760c148a1b5bc3a6b967fbb29125c5f84 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 16 Jul 2024 14:55:14 +0200 Subject: [PATCH 59/80] Disable sanitizer --- tests/integration/test_memory_limit/test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/test_memory_limit/test.py b/tests/integration/test_memory_limit/test.py index 942cf04f896..6d6745711da 100644 --- a/tests/integration/test_memory_limit/test.py +++ b/tests/integration/test_memory_limit/test.py @@ -27,6 +27,9 @@ def start_cluster(): def test_multiple_queries(): + if node.is_built_with_sanitizer(): + return + p = Pool(15) def run_query(node): From 2af361eb14617d067732795424c22a6f511c9f18 Mon Sep 17 00:00:00 2001 From: Max K Date: Tue, 16 Jul 2024 16:18:41 +0200 Subject: [PATCH 60/80] minor fix --- tests/ci/merge_pr.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/ci/merge_pr.py b/tests/ci/merge_pr.py index 4b57467ebdc..b005be472c7 100644 --- a/tests/ci/merge_pr.py +++ b/tests/ci/merge_pr.py @@ -256,7 +256,12 @@ def main(): has_failed_statuses = False for status in statuses: print(f"Check status [{status.context}], [{status.state}]") - if not CI.is_required(status.context) and status.state != SUCCESS: + if ( + not CI.is_required(status.context) + and status.context not in (CI.StatusNames.SYNC, CI.StatusNames.PR_CHECK) + and status.state != SUCCESS + ): + print(f"WARNING: Failed status [{status.context}], [{status.state}]") has_failed_statuses = True if args.wf_status == SUCCESS or has_failed_statuses: From db7fb78a04280179b7ca09f588c957d2e0eab6be Mon Sep 17 00:00:00 2001 From: Max K Date: Tue, 16 Jul 2024 19:49:21 +0200 Subject: [PATCH 61/80] major fix --- tests/ci/ci_config.py | 6 ++++-- tests/ci/merge_pr.py | 6 +----- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 9b9ddee5326..f5e39f343b2 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -415,7 +415,8 @@ class CI: JobNames.INTEGRATION_TEST_FLAKY: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_ASAN], pr_only=True, - reference_job_name=JobNames.INTEGRATION_TEST_TSAN, + # TODO: approach with reference job names does not work because digest may not be calculated if job skipped in wf + # reference_job_name=JobNames.INTEGRATION_TEST_TSAN, ), JobNames.COMPATIBILITY_TEST: CommonJobConfigs.COMPATIBILITY_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], @@ -460,7 +461,8 @@ class CI: required_builds=[BuildNames.PACKAGE_ASAN], pr_only=True, timeout=3600, - reference_job_name=JobNames.STATELESS_TEST_RELEASE, + # TODO: approach with reference job names does not work because digest may not be calculated if job skipped in wf + # reference_job_name=JobNames.STATELESS_TEST_RELEASE, ), JobNames.JEPSEN_KEEPER: JobConfig( required_builds=[BuildNames.BINARY_RELEASE], diff --git a/tests/ci/merge_pr.py b/tests/ci/merge_pr.py index b005be472c7..4d8facafb84 100644 --- a/tests/ci/merge_pr.py +++ b/tests/ci/merge_pr.py @@ -256,11 +256,7 @@ def main(): has_failed_statuses = False for status in statuses: print(f"Check status [{status.context}], [{status.state}]") - if ( - not CI.is_required(status.context) - and status.context not in (CI.StatusNames.SYNC, CI.StatusNames.PR_CHECK) - and status.state != SUCCESS - ): + if CI.is_required(status.context) and status.state != SUCCESS: print(f"WARNING: Failed status [{status.context}], [{status.state}]") has_failed_statuses = True From ea3a333a0711a33ad9193e67939b5f8a970834ea Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Tue, 16 Jul 2024 22:25:45 +0200 Subject: [PATCH 62/80] Fix formatting error --- docs/en/sql-reference/functions/other-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 260457b3be1..5e2802eb08b 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -86,7 +86,7 @@ Returns the fully qualified domain name of the ClickHouse server. fqdn(); ``` -Aliases: `fullHostName`, 'FQDN'. +Aliases: `fullHostName`, `FQDN`. **Returned value** From 8c0ade413cca3ea63b47909c54c80de76c1e2a26 Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Tue, 16 Jul 2024 22:28:12 +0200 Subject: [PATCH 63/80] Update other-functions.md --- docs/en/sql-reference/functions/other-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 5e2802eb08b..b7e4094f30e 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -76,7 +76,7 @@ WHERE macro = 'test'; └───────┴──────────────┘ ``` -## FQDN +## fqdn Returns the fully qualified domain name of the ClickHouse server. From d579e9f64f1142934eeb8628f9b2764b5a022ec9 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Tue, 16 Jul 2024 23:12:02 +0200 Subject: [PATCH 64/80] Fix logical error in PrometheusRequestHandler. --- src/Server/PrometheusRequestHandler.cpp | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index 1f3e038a1f5..87c106c3fc0 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -30,15 +30,8 @@ void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPSe response.setContentType("text/plain; version=0.0.4; charset=UTF-8"); WriteBufferFromHTTPServerResponse wb(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout, write_event); - try - { - metrics_writer->write(wb); - wb.finalize(); - } - catch (...) - { - wb.finalize(); - } + metrics_writer->write(wb); + wb.finalize(); } catch (...) { From 9ff6de4eba0e82ff6dc2c1d007d4866637e1d0b2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 23:17:45 +0200 Subject: [PATCH 65/80] Remove unused header --- src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index b9edff39b82..a904b29e12f 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -21,7 +21,6 @@ #include #include -#include #include #include #include From 5ca7241865f863d68b3a3b96011b9238e49ac3f8 Mon Sep 17 00:00:00 2001 From: Max K Date: Wed, 17 Jul 2024 00:23:14 +0200 Subject: [PATCH 66/80] CI: AWS ASG decrease capacity from runners --- tests/ci/worker/init_runner.sh | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/ci/worker/init_runner.sh b/tests/ci/worker/init_runner.sh index ec7793db2aa..91bffa876b2 100644 --- a/tests/ci/worker/init_runner.sh +++ b/tests/ci/worker/init_runner.sh @@ -50,7 +50,7 @@ set -uo pipefail # set accordingly to a runner role # #################################### -echo "Running init script" +echo "Running init script v1" export DEBIAN_FRONTEND=noninteractive export RUNNER_HOME=/home/ubuntu/actions-runner @@ -90,7 +90,6 @@ terminate_delayed() { # IF `sleep` IS CHANGED, CHANGE ANOTHER VALUE IN `pgrep` sleep=13.14159265358979323846 echo "Going to terminate the runner's instance in $sleep seconds" - INSTANCE_ID=$(ec2metadata --instance-id) # We execute it with `at` to not have it as an orphan process, but launched independently # GH Runners kill all remain processes echo "sleep '$sleep'; aws ec2 terminate-instances --instance-ids $INSTANCE_ID" | at now || \ @@ -111,11 +110,17 @@ declare -f terminate_delayed >> /tmp/actions-hooks/common.sh terminate_and_exit() { # Terminate instance and exit from the script instantly echo "Going to terminate the runner's instance" - INSTANCE_ID=$(ec2metadata --instance-id) aws ec2 terminate-instances --instance-ids "$INSTANCE_ID" exit 0 } +terminate_decrease_and_exit() { + # Terminate instance and exit from the script instantly + echo "Going to terminate the runner's instance and decrease asg capacity" + aws autoscaling terminate-instance-in-auto-scaling-group --instance-id "$INSTANCE_ID" --should-decrement-desired-capacity + exit 0 +} + declare -f terminate_and_exit >> /tmp/actions-hooks/common.sh check_spot_instance_is_old() { @@ -324,7 +329,7 @@ while true; do sudo -u ubuntu ./config.sh remove --token "$(get_runner_token)" \ || continue echo "Runner didn't launch or have assigned jobs after ${RUNNER_AGE} seconds, shutting down" - terminate_and_exit + terminate_decrease_and_exit fi fi else From a1a802cee9f1e75cfdaed7c86aac63fd4e3010db Mon Sep 17 00:00:00 2001 From: Max K Date: Wed, 17 Jul 2024 00:32:13 +0200 Subject: [PATCH 67/80] fix deployment script --- tests/ci/worker/deploy-runner-init.sh | 4 +++- tests/ci/worker/init_runner.sh | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/ci/worker/deploy-runner-init.sh b/tests/ci/worker/deploy-runner-init.sh index 06edede48fa..96fbd82a99c 100755 --- a/tests/ci/worker/deploy-runner-init.sh +++ b/tests/ci/worker/deploy-runner-init.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +set -e + usage() { echo "Usage: $0 ENVIRONMENT" >&2 echo "Valid values for ENVIRONMENT: staging, production" >&2 @@ -55,7 +57,7 @@ EOF body() { local first_line - first_line=$(sed -n '/^# THE SCRIPT START$/{=;q}' "$SOURCE_SCRIPT") + first_line=$(sed -n '/^# THE SCRIPT START$/{=;q;}' "$SOURCE_SCRIPT") if [ -z "$first_line" ]; then echo "The pattern '# THE SCRIPT START' is not found in $SOURCE_SCRIPT" >&2 exit 1 diff --git a/tests/ci/worker/init_runner.sh b/tests/ci/worker/init_runner.sh index 91bffa876b2..d6cdb6d9c57 100644 --- a/tests/ci/worker/init_runner.sh +++ b/tests/ci/worker/init_runner.sh @@ -50,7 +50,7 @@ set -uo pipefail # set accordingly to a runner role # #################################### -echo "Running init script v1" +echo "Running init v1" export DEBIAN_FRONTEND=noninteractive export RUNNER_HOME=/home/ubuntu/actions-runner From 0d1789292bc480c33c0f2b2ee54058976dea74fe Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 02:11:09 +0200 Subject: [PATCH 68/80] Add sudo for convenience --- docker/test/util/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index f13bb576f79..babddbd228c 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -17,6 +17,7 @@ ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18 RUN apt-get update \ && apt-get install \ + sudo \ apt-transport-https \ apt-utils \ ca-certificates \ From c913a36ed6580e87ebb641cb3024c7182725d8ea Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 02:13:57 +0200 Subject: [PATCH 69/80] Minor modifications --- docker/test/fasttest/Dockerfile | 5 ++++- docker/test/fasttest/run.sh | 4 ---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 2512268be0f..72e33226f38 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -26,7 +26,10 @@ RUN apt-get update \ zstd \ --yes --no-install-recommends \ && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* + && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \ + && groupadd --system --gid 1000 clickhouse \ + && useradd --system --gid 1000 --uid 1000 clickhouse +# ^ For some reason, groupadd and useradd are needed for tests with 'expect', but I don't know, why. COPY requirements.txt / RUN pip3 install --no-cache-dir -r /requirements.txt diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 57e7b6b2f56..970bf12a81a 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -11,10 +11,6 @@ stage=${stage:-} # Compiler version, normally set by Dockerfile export LLVM_VERSION=${LLVM_VERSION:-18} -# For some reason, it is needed for tests with 'expect', but I don't know, why. -sudo groupadd --system --gid 1000 clickhouse -sudo useradd --system --gid 1000 --uid 1000 clickhouse - # A variable to pass additional flags to CMake. # Here we explicitly default it to nothing so that bash doesn't complain about # it being undefined. Also read it as array so that we can pass an empty list From a40edea5c99ac290a3b04a015065c0cad5266ab0 Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Wed, 17 Jul 2024 09:37:28 +0800 Subject: [PATCH 70/80] trigger CI From 60f45cb993ccf15812605d51b6e7ce8489c98d05 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 03:52:35 +0200 Subject: [PATCH 71/80] A fix --- docker/test/fasttest/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 72e33226f38..4cac2ee6135 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -28,7 +28,7 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \ && groupadd --system --gid 1000 clickhouse \ - && useradd --system --gid 1000 --uid 1000 clickhouse + && useradd --system --gid 1000 --uid 1000 -m clickhouse # ^ For some reason, groupadd and useradd are needed for tests with 'expect', but I don't know, why. COPY requirements.txt / From 968d28f1f4bae02eae2f7b96c774ddfd9656c308 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 07:24:46 +0200 Subject: [PATCH 72/80] Remove outdated articles from the docs --- docs/en/sql-reference/data-types/lowcardinality.md | 3 +-- docs/zh/changelog/index.md | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/data-types/lowcardinality.md b/docs/en/sql-reference/data-types/lowcardinality.md index 133ac2bd72e..a92f6f4dce6 100644 --- a/docs/en/sql-reference/data-types/lowcardinality.md +++ b/docs/en/sql-reference/data-types/lowcardinality.md @@ -56,7 +56,6 @@ Functions: ## Related content -- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://altinity.com/blog/2020-5-20-reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer) -- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf) - Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema) - Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) +- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf) diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md index c91d8bcf4d1..cd77a8c03cf 100644 --- a/docs/zh/changelog/index.md +++ b/docs/zh/changelog/index.md @@ -356,7 +356,7 @@ sidebar_label: "\u53D8\u66F4\u65E5\u5FD7" #### 新功能 {#new-feature-1} -- 添加 `deduplicate_blocks_in_dependent_materialized_views` 用于控制具有实例化视图的表中幂等插入的行为的选项。 这个新功能是由Altinity的特殊要求添加到错误修正版本中的。 +- 添加 `deduplicate_blocks_in_dependent_materialized_views` 用于控制具有实例化视图的表中幂等插入的行为的选项。 [#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) ### ClickHouse版本v20.1.2.4,2020-01-22 {#clickhouse-release-v20-1-2-4-2020-01-22} From 005256429fb4d6a9aec88b3bb5662382166f6f9b Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 17 Jul 2024 11:49:15 +0200 Subject: [PATCH 73/80] Don't run test 03198_dynamic_read_subcolumns in azure --- tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql b/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql index 27fca179580..155009cc2fe 100644 --- a/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql +++ b/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql @@ -1,4 +1,4 @@ --- Tags: no-random-settings, no-s3-storage +-- Tags: no-random-settings, no-object-storage SET allow_experimental_dynamic_type = 1; DROP TABLE IF EXISTS test_dynamic; From 2958803255f0f646147eca4f9432f72496318bbf Mon Sep 17 00:00:00 2001 From: Max K Date: Wed, 17 Jul 2024 11:26:38 +0200 Subject: [PATCH 74/80] do not decrease from lambda --- tests/ci/autoscale_runners_lambda/app.py | 77 ++++++++++--------- .../test_autoscale.py | 25 ++++-- 2 files changed, 60 insertions(+), 42 deletions(-) diff --git a/tests/ci/autoscale_runners_lambda/app.py b/tests/ci/autoscale_runners_lambda/app.py index 520abcb41bb..aed5cf3a8dd 100644 --- a/tests/ci/autoscale_runners_lambda/app.py +++ b/tests/ci/autoscale_runners_lambda/app.py @@ -99,7 +99,8 @@ def set_capacity( continue raise ValueError("Queue status is not in ['in_progress', 'queued']") - scale_down, scale_up = get_scales(runner_type) + #scale_down, scale_up = get_scales(runner_type) + _, scale_up = get_scales(runner_type) # With lyfecycle hooks some instances are actually free because some of # them are in 'Terminating:Wait' state effective_capacity = max( @@ -110,7 +111,7 @@ def set_capacity( # How much nodes are free (positive) or need to be added (negative) capacity_reserve = effective_capacity - running - queued stop = False - if capacity_reserve < 0: + if capacity_reserve <= 0: # This part is about scaling up capacity_deficit = -capacity_reserve # It looks that we are still OK, since no queued jobs exist @@ -158,41 +159,43 @@ def set_capacity( ) return - # Now we will calculate if we need to scale down - stop = stop or asg["DesiredCapacity"] == asg["MinSize"] - new_capacity = asg["DesiredCapacity"] - (capacity_reserve // scale_down) - new_capacity = max(new_capacity, asg["MinSize"]) - new_capacity = min(new_capacity, asg["MaxSize"]) - stop = stop or asg["DesiredCapacity"] == new_capacity - if stop: - logging.info( - "Do not decrease ASG %s capacity, current capacity=%s, effective " - "capacity=%s, minimum capacity=%s, running jobs=%s, queue size=%s", - asg["AutoScalingGroupName"], - asg["DesiredCapacity"], - effective_capacity, - asg["MinSize"], - running, - queued, - ) - return - - logging.info( - "The ASG %s capacity will be decreased to %s, current capacity=%s, effective " - "capacity=%s, minimum capacity=%s, running jobs=%s, queue size=%s", - asg["AutoScalingGroupName"], - new_capacity, - asg["DesiredCapacity"], - effective_capacity, - asg["MinSize"], - running, - queued, - ) - if not dry_run: - client.set_desired_capacity( - AutoScalingGroupName=asg["AutoScalingGroupName"], - DesiredCapacity=new_capacity, - ) + # FIXME: try decreasing capacity from runners that finished their jobs and have no job assigned + # IMPORTANT: Runner init script must be of version that supports ASG decrease + # # Now we will calculate if we need to scale down + # stop = stop or asg["DesiredCapacity"] == asg["MinSize"] + # new_capacity = asg["DesiredCapacity"] - (capacity_reserve // scale_down) + # new_capacity = max(new_capacity, asg["MinSize"]) + # new_capacity = min(new_capacity, asg["MaxSize"]) + # stop = stop or asg["DesiredCapacity"] == new_capacity + # if stop: + # logging.info( + # "Do not decrease ASG %s capacity, current capacity=%s, effective " + # "capacity=%s, minimum capacity=%s, running jobs=%s, queue size=%s", + # asg["AutoScalingGroupName"], + # asg["DesiredCapacity"], + # effective_capacity, + # asg["MinSize"], + # running, + # queued, + # ) + # return + # + # logging.info( + # "The ASG %s capacity will be decreased to %s, current capacity=%s, effective " + # "capacity=%s, minimum capacity=%s, running jobs=%s, queue size=%s", + # asg["AutoScalingGroupName"], + # new_capacity, + # asg["DesiredCapacity"], + # effective_capacity, + # asg["MinSize"], + # running, + # queued, + # ) + # if not dry_run: + # client.set_desired_capacity( + # AutoScalingGroupName=asg["AutoScalingGroupName"], + # DesiredCapacity=new_capacity, + # ) def main(dry_run: bool = True) -> None: diff --git a/tests/ci/autoscale_runners_lambda/test_autoscale.py b/tests/ci/autoscale_runners_lambda/test_autoscale.py index 3aeab49ffc7..d1a1f9b358f 100644 --- a/tests/ci/autoscale_runners_lambda/test_autoscale.py +++ b/tests/ci/autoscale_runners_lambda/test_autoscale.py @@ -97,19 +97,34 @@ class TestSetCapacity(unittest.TestCase): ), TestCase("lower-min", 10, 5, 20, [Queue("queued", 5, "lower-min")], 10), # Decrease capacity - TestCase("w/reserve", 1, 13, 20, [Queue("queued", 5, "w/reserve")], 5), + # FIXME: Tests changed for lambda that can only scale up + # TestCase("w/reserve", 1, 13, 20, [Queue("queued", 5, "w/reserve")], 5), + TestCase("w/reserve", 1, 13, 20, [Queue("queued", 5, "w/reserve")], -1), + # TestCase( + # "style-checker", 1, 13, 20, [Queue("queued", 5, "style-checker")], 5 + # ), TestCase( - "style-checker", 1, 13, 20, [Queue("queued", 5, "style-checker")], 5 + "style-checker", 1, 13, 20, [Queue("queued", 5, "style-checker")], -1 ), - TestCase("w/reserve", 1, 23, 20, [Queue("queued", 17, "w/reserve")], 17), - TestCase("decrease", 1, 13, 20, [Queue("in_progress", 3, "decrease")], 3), + # TestCase("w/reserve", 1, 23, 20, [Queue("queued", 17, "w/reserve")], 17), + TestCase("w/reserve", 1, 23, 20, [Queue("queued", 17, "w/reserve")], -1), + # TestCase("decrease", 1, 13, 20, [Queue("in_progress", 3, "decrease")], 3), + TestCase("decrease", 1, 13, 20, [Queue("in_progress", 3, "decrease")], -1), + # TestCase( + # "style-checker", + # 1, + # 13, + # 20, + # [Queue("in_progress", 5, "style-checker")], + # 5, + # ), TestCase( "style-checker", 1, 13, 20, [Queue("in_progress", 5, "style-checker")], - 5, + -1, ), ) for t in test_cases: From c40473ff80c0cdf09b85b7dda695a41ee2df0112 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 17 Jul 2024 10:50:52 +0000 Subject: [PATCH 75/80] Automatic style fix --- tests/ci/autoscale_runners_lambda/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/autoscale_runners_lambda/app.py b/tests/ci/autoscale_runners_lambda/app.py index aed5cf3a8dd..c790a5081dd 100644 --- a/tests/ci/autoscale_runners_lambda/app.py +++ b/tests/ci/autoscale_runners_lambda/app.py @@ -99,7 +99,7 @@ def set_capacity( continue raise ValueError("Queue status is not in ['in_progress', 'queued']") - #scale_down, scale_up = get_scales(runner_type) + # scale_down, scale_up = get_scales(runner_type) _, scale_up = get_scales(runner_type) # With lyfecycle hooks some instances are actually free because some of # them are in 'Terminating:Wait' state From 3ade6de95df356adfaf7f6a6e3fb22d2545c6c50 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 17 Jul 2024 13:13:51 +0200 Subject: [PATCH 76/80] Add description about no-object-storage tag --- tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql b/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql index 155009cc2fe..5d473064c68 100644 --- a/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql +++ b/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql @@ -1,4 +1,5 @@ -- Tags: no-random-settings, no-object-storage +-- Tag no-object-storage: this test relies on the number of opened files in MergeTree that can differ in object storages SET allow_experimental_dynamic_type = 1; DROP TABLE IF EXISTS test_dynamic; From 245bc587ae5d9cb6751153b7bf24dcf88487d5dd Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Wed, 17 Jul 2024 17:57:28 +0200 Subject: [PATCH 77/80] add 'fqdn' to list --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 4e7bf7f686e..b4b18f9bbf6 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1653,6 +1653,7 @@ formated formatschema formatter formatters +fqdn frac freezed fromDaysSinceYearZero From d5345c5aae8c8598a043311e58318d22c5396392 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 18:31:10 +0200 Subject: [PATCH 78/80] Update Squashing.cpp --- src/Interpreters/Squashing.cpp | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/src/Interpreters/Squashing.cpp b/src/Interpreters/Squashing.cpp index 4af7d57f5a0..49e95f202dd 100644 --- a/src/Interpreters/Squashing.cpp +++ b/src/Interpreters/Squashing.cpp @@ -17,38 +17,6 @@ namespace ErrorCodes namespace { -Chunk squashImpl(std::vector & input_chunks) -{ - Chunk accumulated_chunk; - std::vector mutable_columns = {}; - size_t rows = 0; - for (const Chunk & chunk : input_chunks) - rows += chunk.getNumRows(); - - { - auto & first_chunk = input_chunks[0]; - Columns columns = first_chunk.detachColumns(); - for (auto & column : columns) - { - mutable_columns.push_back(IColumn::mutate(std::move(column))); - mutable_columns.back()->reserve(rows); - } - } - - for (size_t i = 1; i < input_chunks.size(); ++i) // We've already processed the first chunk above - { - Columns columns = input_chunks[i].detachColumns(); - for (size_t j = 0, size = mutable_columns.size(); j < size; ++j) - { - const auto source_column = columns[j]; - - mutable_columns[j]->insertRangeFrom(*source_column, 0, source_column->size()); - } - } - accumulated_chunk.setColumns(std::move(mutable_columns), rows); - return accumulated_chunk; -} - const ChunksToSquash * getInfoFromChunk(const Chunk & chunk) { const auto & info = chunk.getChunkInfo(); From 37dd7756434c4a2fa780fc187505c67e92be6f45 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 18:31:27 +0200 Subject: [PATCH 79/80] Update Squashing.cpp --- src/Interpreters/Squashing.cpp | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/src/Interpreters/Squashing.cpp b/src/Interpreters/Squashing.cpp index 49e95f202dd..3a7f28ed837 100644 --- a/src/Interpreters/Squashing.cpp +++ b/src/Interpreters/Squashing.cpp @@ -14,22 +14,6 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -namespace -{ - -const ChunksToSquash * getInfoFromChunk(const Chunk & chunk) -{ - const auto & info = chunk.getChunkInfo(); - const auto * agg_info = typeid_cast(info.get()); - - if (!agg_info) - throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no ChunksToSquash in ChunkInfoPtr"); - - return agg_info; -} - -} - Squashing::Squashing(Block header_, size_t min_block_size_rows_, size_t min_block_size_bytes_) : min_block_size_rows(min_block_size_rows_) , min_block_size_bytes(min_block_size_bytes_) From c57ce063a346698b550134762277c81e12072d88 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 17 Jul 2024 18:37:38 +0100 Subject: [PATCH 80/80] impl --- .../02044_url_glob_parallel_connection_refused.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02044_url_glob_parallel_connection_refused.sh b/tests/queries/0_stateless/02044_url_glob_parallel_connection_refused.sh index 7e8579f7cbe..b4b0ee8a023 100755 --- a/tests/queries/0_stateless/02044_url_glob_parallel_connection_refused.sh +++ b/tests/queries/0_stateless/02044_url_glob_parallel_connection_refused.sh @@ -8,7 +8,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) i=0 retries=5 # Connecting to wrong address and checking for race condition +# http_max_tries is limited to 2 because with the default 10 retries the execution time might go as high as around 3 minutes (because of exponential back-off). +# because of that we might see wrong 'tests hung' reports depending on how close to the end of tests run this particular test was executed. +# proper fix should be implemented in https://github.com/ClickHouse/ClickHouse/issues/66656 while [[ $i -lt $retries ]]; do - timeout 5s ${CLICKHOUSE_CLIENT} --max_threads 10 --query "SELECT * FROM url('http://128.0.0.{1..10}:${CLICKHOUSE_PORT_HTTP}/?query=SELECT+sleep(1)', TSV, 'x UInt8')" --format Null 2>/dev/null + timeout 5s ${CLICKHOUSE_CLIENT} --max_threads 10 --http_max_tries 2 --query "SELECT * FROM url('http://128.0.0.{1..10}:${CLICKHOUSE_PORT_HTTP}/?query=SELECT+sleep(1)', TSV, 'x UInt8')" --format Null 2>/dev/null ((++i)) done