mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge pull request #8476 from ClickHouse/table_name_in_istorage
Add StorageID with optional UUID to IStorage
This commit is contained in:
commit
afb003cb89
@ -993,7 +993,7 @@ bool TCPHandler::receiveData(bool scalar)
|
||||
if (!(storage = query_context->tryGetExternalTable(name)))
|
||||
{
|
||||
NamesAndTypesList columns = block.getNamesAndTypesList();
|
||||
storage = StorageMemory::create("_external", name, ColumnsDescription{columns}, ConstraintsDescription{});
|
||||
storage = StorageMemory::create(StorageID("_external", name), ColumnsDescription{columns}, ConstraintsDescription{});
|
||||
storage->startup();
|
||||
query_context->addExternalTable(name, storage);
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ void ExternalTablesHandler::handlePart(const Poco::Net::MessageHeader & header,
|
||||
|
||||
/// Create table
|
||||
NamesAndTypesList columns = sample_block.getNamesAndTypesList();
|
||||
StoragePtr storage = StorageMemory::create("_external", data.second, ColumnsDescription{columns}, ConstraintsDescription{});
|
||||
StoragePtr storage = StorageMemory::create(StorageID("_external", data.second), ColumnsDescription{columns}, ConstraintsDescription{});
|
||||
storage->startup();
|
||||
context.addExternalTable(data.second, storage);
|
||||
BlockOutputStreamPtr output = storage->write(ASTPtr(), context);
|
||||
|
@ -47,7 +47,8 @@ std::ostream & operator<<(std::ostream & stream, const IDataType & what)
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const IStorage & what)
|
||||
{
|
||||
stream << "IStorage(name = " << what.getName() << ", tableName = " << what.getTableName() << ") {"
|
||||
auto table_id = what.getStorageID();
|
||||
stream << "IStorage(name = " << what.getName() << ", tableName = " << table_id.table_name << ") {"
|
||||
<< what.getColumns().getAllPhysical().toString() << "}";
|
||||
return stream;
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ namespace DB
|
||||
{
|
||||
|
||||
PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
|
||||
const String & database, const String & table, const StoragePtr & storage_,
|
||||
const StoragePtr & storage_,
|
||||
const Context & context_, const ASTPtr & query_ptr_, bool no_destination)
|
||||
: storage(storage_), context(context_), query_ptr(query_ptr_)
|
||||
{
|
||||
@ -32,47 +32,44 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
|
||||
/// Moreover, deduplication for AggregatingMergeTree children could produce false positives due to low size of inserting blocks
|
||||
bool disable_deduplication_for_children = !no_destination && storage->supportsDeduplication();
|
||||
|
||||
if (!table.empty())
|
||||
auto table_id = storage->getStorageID();
|
||||
Dependencies dependencies = context.getDependencies(table_id);
|
||||
|
||||
/// We need special context for materialized views insertions
|
||||
if (!dependencies.empty())
|
||||
{
|
||||
Dependencies dependencies = context.getDependencies(database, table);
|
||||
views_context = std::make_unique<Context>(context);
|
||||
// Do not deduplicate insertions into MV if the main insertion is Ok
|
||||
if (disable_deduplication_for_children)
|
||||
views_context->getSettingsRef().insert_deduplicate = false;
|
||||
}
|
||||
|
||||
/// We need special context for materialized views insertions
|
||||
if (!dependencies.empty())
|
||||
for (const auto & database_table : dependencies)
|
||||
{
|
||||
auto dependent_table = context.getTable(database_table);
|
||||
|
||||
ASTPtr query;
|
||||
BlockOutputStreamPtr out;
|
||||
|
||||
if (auto * materialized_view = dynamic_cast<const StorageMaterializedView *>(dependent_table.get()))
|
||||
{
|
||||
views_context = std::make_unique<Context>(context);
|
||||
// Do not deduplicate insertions into MV if the main insertion is Ok
|
||||
if (disable_deduplication_for_children)
|
||||
views_context->getSettingsRef().insert_deduplicate = false;
|
||||
StoragePtr inner_table = materialized_view->getTargetTable();
|
||||
auto inner_table_id = inner_table->getStorageID();
|
||||
query = materialized_view->getInnerQuery();
|
||||
std::unique_ptr<ASTInsertQuery> insert = std::make_unique<ASTInsertQuery>();
|
||||
insert->database = inner_table_id.database_name;
|
||||
insert->table = inner_table_id.table_name;
|
||||
ASTPtr insert_query_ptr(insert.release());
|
||||
InterpreterInsertQuery interpreter(insert_query_ptr, *views_context);
|
||||
BlockIO io = interpreter.execute();
|
||||
out = io.out;
|
||||
}
|
||||
else if (dynamic_cast<const StorageLiveView *>(dependent_table.get()))
|
||||
out = std::make_shared<PushingToViewsBlockOutputStream>(dependent_table, *views_context, ASTPtr(), true);
|
||||
else
|
||||
out = std::make_shared<PushingToViewsBlockOutputStream>(dependent_table, *views_context, ASTPtr());
|
||||
|
||||
for (const auto & database_table : dependencies)
|
||||
{
|
||||
auto dependent_table = context.getTable(database_table.first, database_table.second);
|
||||
|
||||
ASTPtr query;
|
||||
BlockOutputStreamPtr out;
|
||||
|
||||
if (auto * materialized_view = dynamic_cast<const StorageMaterializedView *>(dependent_table.get()))
|
||||
{
|
||||
StoragePtr inner_table = materialized_view->getTargetTable();
|
||||
query = materialized_view->getInnerQuery();
|
||||
std::unique_ptr<ASTInsertQuery> insert = std::make_unique<ASTInsertQuery>();
|
||||
insert->database = inner_table->getDatabaseName();
|
||||
insert->table = inner_table->getTableName();
|
||||
ASTPtr insert_query_ptr(insert.release());
|
||||
InterpreterInsertQuery interpreter(insert_query_ptr, *views_context);
|
||||
BlockIO io = interpreter.execute();
|
||||
out = io.out;
|
||||
}
|
||||
else if (dynamic_cast<const StorageLiveView *>(dependent_table.get()))
|
||||
out = std::make_shared<PushingToViewsBlockOutputStream>(
|
||||
database_table.first, database_table.second, dependent_table, *views_context, ASTPtr(), true);
|
||||
else
|
||||
out = std::make_shared<PushingToViewsBlockOutputStream>(
|
||||
database_table.first, database_table.second, dependent_table, *views_context, ASTPtr());
|
||||
|
||||
views.emplace_back(ViewInfo{std::move(query), database_table.first, database_table.second, std::move(out)});
|
||||
}
|
||||
views.emplace_back(ViewInfo{std::move(query), database_table, std::move(out)});
|
||||
}
|
||||
|
||||
/* Do not push to destination table if the flag is set */
|
||||
@ -161,7 +158,7 @@ void PushingToViewsBlockOutputStream::writePrefix()
|
||||
}
|
||||
catch (Exception & ex)
|
||||
{
|
||||
ex.addMessage("while write prefix to view " + view.database + "." + view.table);
|
||||
ex.addMessage("while write prefix to view " + view.table_id.getNameForLogs());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
@ -180,7 +177,7 @@ void PushingToViewsBlockOutputStream::writeSuffix()
|
||||
}
|
||||
catch (Exception & ex)
|
||||
{
|
||||
ex.addMessage("while write prefix to view " + view.database + "." + view.table);
|
||||
ex.addMessage("while write prefix to view " + view.table_id.getNameForLogs());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
@ -223,7 +220,7 @@ void PushingToViewsBlockOutputStream::process(const Block & block, size_t view_n
|
||||
/// InterpreterSelectQuery will do processing of alias columns.
|
||||
Context local_context = *views_context;
|
||||
local_context.addViewSource(
|
||||
StorageValues::create(storage->getDatabaseName(), storage->getTableName(), storage->getColumns(),
|
||||
StorageValues::create(storage->getStorageID(), storage->getColumns(),
|
||||
block));
|
||||
select.emplace(view.query, local_context, SelectQueryOptions());
|
||||
in = std::make_shared<MaterializingBlockInputStream>(select->execute().in);
|
||||
@ -250,7 +247,7 @@ void PushingToViewsBlockOutputStream::process(const Block & block, size_t view_n
|
||||
}
|
||||
catch (Exception & ex)
|
||||
{
|
||||
ex.addMessage("while pushing to view " + backQuoteIfNeed(view.database) + "." + backQuoteIfNeed(view.table));
|
||||
ex.addMessage("while pushing to view " + view.table_id.getNameForLogs());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -17,8 +17,7 @@ class ReplicatedMergeTreeBlockOutputStream;
|
||||
class PushingToViewsBlockOutputStream : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
PushingToViewsBlockOutputStream(
|
||||
const String & database, const String & table, const StoragePtr & storage_,
|
||||
PushingToViewsBlockOutputStream(const StoragePtr & storage_,
|
||||
const Context & context_, const ASTPtr & query_ptr_, bool no_destination = false);
|
||||
|
||||
Block getHeader() const override;
|
||||
@ -39,8 +38,7 @@ private:
|
||||
struct ViewInfo
|
||||
{
|
||||
ASTPtr query;
|
||||
String database;
|
||||
String table;
|
||||
StorageID table_id;
|
||||
BlockOutputStreamPtr out;
|
||||
};
|
||||
|
||||
|
@ -52,7 +52,7 @@ Tables DatabaseDictionary::listTables(const Context & context, const FilterByNam
|
||||
auto dict_name = dict_ptr->getName();
|
||||
const DictionaryStructure & dictionary_structure = dict_ptr->getStructure();
|
||||
auto columns = StorageDictionary::getNamesAndTypes(dictionary_structure);
|
||||
tables[dict_name] = StorageDictionary::create(getDatabaseName(), dict_name, ColumnsDescription{columns}, context, true, dict_name);
|
||||
tables[dict_name] = StorageDictionary::create(StorageID(getDatabaseName(), dict_name), ColumnsDescription{columns}, context, true, dict_name);
|
||||
}
|
||||
}
|
||||
return tables;
|
||||
@ -74,7 +74,7 @@ StoragePtr DatabaseDictionary::tryGetTable(
|
||||
{
|
||||
const DictionaryStructure & dictionary_structure = dict_ptr->getStructure();
|
||||
auto columns = StorageDictionary::getNamesAndTypes(dictionary_structure);
|
||||
return StorageDictionary::create(getDatabaseName(), table_name, ColumnsDescription{columns}, context, true, table_name);
|
||||
return StorageDictionary::create(StorageID(getDatabaseName(), table_name), ColumnsDescription{columns}, context, true, table_name);
|
||||
}
|
||||
|
||||
return {};
|
||||
|
@ -132,9 +132,9 @@ static ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr
|
||||
|
||||
{
|
||||
/// init create query.
|
||||
|
||||
create_table_query->table = storage->getTableName();
|
||||
create_table_query->database = storage->getDatabaseName();
|
||||
auto table_id = storage->getStorageID();
|
||||
create_table_query->table = table_id.table_name;
|
||||
create_table_query->database = table_id.database_name;
|
||||
|
||||
for (const auto & column_type_and_name : storage->getColumns().getOrdinary())
|
||||
{
|
||||
@ -144,7 +144,7 @@ static ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr
|
||||
columns_expression_list->children.emplace_back(column_declaration);
|
||||
}
|
||||
|
||||
auto mysql_table_name = std::make_shared<ASTLiteral>(storage->getTableName());
|
||||
auto mysql_table_name = std::make_shared<ASTLiteral>(table_id.table_name);
|
||||
auto storage_engine_arguments = table_storage_define->as<ASTStorage>()->engine->arguments;
|
||||
storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 2, mysql_table_name);
|
||||
}
|
||||
@ -239,7 +239,7 @@ void DatabaseMySQL::fetchLatestTablesStructureIntoCache(const std::map<String, U
|
||||
}
|
||||
|
||||
local_tables_cache[table_name] = std::make_pair(table_modification_time, StorageMySQL::create(
|
||||
database_name, table_name, std::move(mysql_pool), database_name_in_mysql, table_name,
|
||||
StorageID(database_name, table_name), std::move(mysql_pool), database_name_in_mysql, table_name,
|
||||
false, "", ColumnsDescription{columns_name_and_type}, ConstraintsDescription{}, global_context));
|
||||
}
|
||||
}
|
||||
|
@ -68,9 +68,12 @@ std::pair<String, StoragePtr> createTableFromAST(
|
||||
ast_create_query.table,
|
||||
StorageFactory::instance().get(
|
||||
ast_create_query,
|
||||
table_data_path_relative, ast_create_query.table, database_name, context, context.getGlobalContext(),
|
||||
columns, constraints,
|
||||
true, has_force_restore_data_flag)
|
||||
table_data_path_relative,
|
||||
context,
|
||||
context.getGlobalContext(),
|
||||
columns,
|
||||
constraints,
|
||||
has_force_restore_data_flag)
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ StoragePtr DatabaseWithDictionaries::getDictionaryStorage(const Context & contex
|
||||
{
|
||||
const DictionaryStructure & dictionary_structure = dict_ptr->getStructure();
|
||||
auto columns = StorageDictionary::getNamesAndTypes(dictionary_structure);
|
||||
return StorageDictionary::create(database_name, table_name, ColumnsDescription{columns}, context, true, dict_name);
|
||||
return StorageDictionary::create(StorageID(database_name, table_name), ColumnsDescription{columns}, context, true, dict_name);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -739,49 +739,50 @@ void Context::checkDatabaseAccessRightsImpl(const std::string & database_name) c
|
||||
throw Exception("Access denied to database " + database_name + " for user " + client_info.current_user , ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
}
|
||||
|
||||
void Context::addDependencyUnsafe(const DatabaseAndTableName & from, const DatabaseAndTableName & where)
|
||||
|
||||
void Context::addDependencyUnsafe(const StorageID & from, const StorageID & where)
|
||||
{
|
||||
checkDatabaseAccessRightsImpl(from.first);
|
||||
checkDatabaseAccessRightsImpl(where.first);
|
||||
checkDatabaseAccessRightsImpl(from.database_name);
|
||||
checkDatabaseAccessRightsImpl(where.database_name);
|
||||
shared->view_dependencies[from].insert(where);
|
||||
|
||||
// Notify table of dependencies change
|
||||
auto table = tryGetTable(from.first, from.second);
|
||||
auto table = tryGetTable(from);
|
||||
if (table != nullptr)
|
||||
table->updateDependencies();
|
||||
}
|
||||
|
||||
void Context::addDependency(const DatabaseAndTableName & from, const DatabaseAndTableName & where)
|
||||
void Context::addDependency(const StorageID & from, const StorageID & where)
|
||||
{
|
||||
auto lock = getLock();
|
||||
addDependencyUnsafe(from, where);
|
||||
}
|
||||
|
||||
void Context::removeDependencyUnsafe(const DatabaseAndTableName & from, const DatabaseAndTableName & where)
|
||||
void Context::removeDependencyUnsafe(const StorageID & from, const StorageID & where)
|
||||
{
|
||||
checkDatabaseAccessRightsImpl(from.first);
|
||||
checkDatabaseAccessRightsImpl(where.first);
|
||||
checkDatabaseAccessRightsImpl(from.database_name);
|
||||
checkDatabaseAccessRightsImpl(where.database_name);
|
||||
shared->view_dependencies[from].erase(where);
|
||||
|
||||
// Notify table of dependencies change
|
||||
auto table = tryGetTable(from.first, from.second);
|
||||
auto table = tryGetTable(from);
|
||||
if (table != nullptr)
|
||||
table->updateDependencies();
|
||||
}
|
||||
|
||||
void Context::removeDependency(const DatabaseAndTableName & from, const DatabaseAndTableName & where)
|
||||
void Context::removeDependency(const StorageID & from, const StorageID & where)
|
||||
{
|
||||
auto lock = getLock();
|
||||
removeDependencyUnsafe(from, where);
|
||||
}
|
||||
|
||||
Dependencies Context::getDependencies(const String & database_name, const String & table_name) const
|
||||
Dependencies Context::getDependencies(const StorageID & from) const
|
||||
{
|
||||
auto lock = getLock();
|
||||
|
||||
String db = resolveDatabase(database_name, current_database);
|
||||
String db = resolveDatabase(from.database_name, current_database);
|
||||
|
||||
if (database_name.empty() && tryGetExternalTable(table_name))
|
||||
if (from.database_name.empty() && tryGetExternalTable(from.table_name))
|
||||
{
|
||||
/// Table is temporary. Access granted.
|
||||
}
|
||||
@ -790,7 +791,7 @@ Dependencies Context::getDependencies(const String & database_name, const String
|
||||
checkDatabaseAccessRightsImpl(db);
|
||||
}
|
||||
|
||||
ViewDependencies::const_iterator iter = shared->view_dependencies.find(DatabaseAndTableName(db, table_name));
|
||||
ViewDependencies::const_iterator iter = shared->view_dependencies.find(StorageID(db, from.table_name, from.uuid));
|
||||
if (iter == shared->view_dependencies.end())
|
||||
return {};
|
||||
|
||||
@ -919,24 +920,32 @@ StoragePtr Context::tryGetExternalTable(const String & table_name) const
|
||||
return jt->second.first;
|
||||
}
|
||||
|
||||
|
||||
StoragePtr Context::getTable(const String & database_name, const String & table_name) const
|
||||
{
|
||||
return getTable(StorageID(database_name, table_name));
|
||||
}
|
||||
|
||||
StoragePtr Context::getTable(const StorageID & table_id) const
|
||||
{
|
||||
std::optional<Exception> exc;
|
||||
auto res = getTableImpl(database_name, table_name, &exc);
|
||||
auto res = getTableImpl(table_id, &exc);
|
||||
if (!res)
|
||||
throw *exc;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
StoragePtr Context::tryGetTable(const String & database_name, const String & table_name) const
|
||||
{
|
||||
return getTableImpl(database_name, table_name, {});
|
||||
return getTableImpl(StorageID(database_name, table_name), {});
|
||||
}
|
||||
|
||||
StoragePtr Context::tryGetTable(const StorageID & table_id) const
|
||||
{
|
||||
return getTableImpl(table_id, {});
|
||||
}
|
||||
|
||||
|
||||
StoragePtr Context::getTableImpl(const String & database_name, const String & table_name, std::optional<Exception> * exception) const
|
||||
StoragePtr Context::getTableImpl(const StorageID & table_id, std::optional<Exception> * exception) const
|
||||
{
|
||||
String db;
|
||||
DatabasePtr database;
|
||||
@ -944,14 +953,14 @@ StoragePtr Context::getTableImpl(const String & database_name, const String & ta
|
||||
{
|
||||
auto lock = getLock();
|
||||
|
||||
if (database_name.empty())
|
||||
if (table_id.database_name.empty())
|
||||
{
|
||||
StoragePtr res = tryGetExternalTable(table_name);
|
||||
StoragePtr res = tryGetExternalTable(table_id.table_name);
|
||||
if (res)
|
||||
return res;
|
||||
}
|
||||
|
||||
db = resolveDatabase(database_name, current_database);
|
||||
db = resolveDatabase(table_id.database_name, current_database);
|
||||
checkDatabaseAccessRightsImpl(db);
|
||||
|
||||
Databases::const_iterator it = shared->databases.find(db);
|
||||
@ -965,11 +974,11 @@ StoragePtr Context::getTableImpl(const String & database_name, const String & ta
|
||||
database = it->second;
|
||||
}
|
||||
|
||||
auto table = database->tryGetTable(*this, table_name);
|
||||
auto table = database->tryGetTable(*this, table_id.table_name);
|
||||
if (!table)
|
||||
{
|
||||
if (exception)
|
||||
exception->emplace("Table " + backQuoteIfNeed(db) + "." + backQuoteIfNeed(table_name) + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE);
|
||||
exception->emplace("Table " + table_id.getNameForLogs() + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE);
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -80,6 +80,7 @@ class ICompressionCodec;
|
||||
class AccessControlManager;
|
||||
class SettingsConstraints;
|
||||
class RemoteHostFilter;
|
||||
struct StorageID;
|
||||
class IDisk;
|
||||
using DiskPtr = std::shared_ptr<IDisk>;
|
||||
class DiskSelector;
|
||||
@ -96,12 +97,9 @@ class CompiledExpressionCache;
|
||||
|
||||
#endif
|
||||
|
||||
/// (database name, table name)
|
||||
using DatabaseAndTableName = std::pair<String, String>;
|
||||
|
||||
/// Table -> set of table-views that make SELECT from it.
|
||||
using ViewDependencies = std::map<DatabaseAndTableName, std::set<DatabaseAndTableName>>;
|
||||
using Dependencies = std::vector<DatabaseAndTableName>;
|
||||
using ViewDependencies = std::map<StorageID, std::set<StorageID>>;
|
||||
using Dependencies = std::vector<StorageID>;
|
||||
|
||||
using TableAndCreateAST = std::pair<StoragePtr, ASTPtr>;
|
||||
using TableAndCreateASTs = std::map<String, TableAndCreateAST>;
|
||||
@ -256,13 +254,13 @@ public:
|
||||
ClientInfo & getClientInfo() { return client_info; }
|
||||
const ClientInfo & getClientInfo() const { return client_info; }
|
||||
|
||||
void addDependency(const DatabaseAndTableName & from, const DatabaseAndTableName & where);
|
||||
void removeDependency(const DatabaseAndTableName & from, const DatabaseAndTableName & where);
|
||||
Dependencies getDependencies(const String & database_name, const String & table_name) const;
|
||||
void addDependency(const StorageID & from, const StorageID & where);
|
||||
void removeDependency(const StorageID & from, const StorageID & where);
|
||||
Dependencies getDependencies(const StorageID & from) const;
|
||||
|
||||
/// Functions where we can lock the context manually
|
||||
void addDependencyUnsafe(const DatabaseAndTableName & from, const DatabaseAndTableName & where);
|
||||
void removeDependencyUnsafe(const DatabaseAndTableName & from, const DatabaseAndTableName & where);
|
||||
void addDependencyUnsafe(const StorageID & from, const StorageID & where);
|
||||
void removeDependencyUnsafe(const StorageID & from, const StorageID & where);
|
||||
|
||||
/// Checking the existence of the table/database. Database can be empty - in this case the current database is used.
|
||||
bool isTableExist(const String & database_name, const String & table_name) const;
|
||||
@ -288,7 +286,9 @@ public:
|
||||
Tables getExternalTables() const;
|
||||
StoragePtr tryGetExternalTable(const String & table_name) const;
|
||||
StoragePtr getTable(const String & database_name, const String & table_name) const;
|
||||
StoragePtr getTable(const StorageID & table_id) const;
|
||||
StoragePtr tryGetTable(const String & database_name, const String & table_name) const;
|
||||
StoragePtr tryGetTable(const StorageID & table_id) const;
|
||||
void addExternalTable(const String & table_name, const StoragePtr & storage, const ASTPtr & ast = {});
|
||||
void addScalar(const String & name, const Block & block);
|
||||
bool hasScalar(const String & name) const;
|
||||
@ -594,7 +594,7 @@ private:
|
||||
|
||||
EmbeddedDictionaries & getEmbeddedDictionariesImpl(bool throw_on_error) const;
|
||||
|
||||
StoragePtr getTableImpl(const String & database_name, const String & table_name, std::optional<Exception> * exception) const;
|
||||
StoragePtr getTableImpl(const StorageID & table_id, std::optional<Exception> * exception) const;
|
||||
|
||||
SessionKey getSessionKey(const String & session_id) const;
|
||||
|
||||
|
@ -622,8 +622,13 @@ void DDLWorker::processTask(DDLTask & task, const ZooKeeperPtr & zookeeper)
|
||||
|
||||
if (auto query_with_table = dynamic_cast<ASTQueryWithTableAndOutput *>(rewritten_ast.get()); query_with_table)
|
||||
{
|
||||
String database = query_with_table->database.empty() ? context.getCurrentDatabase() : query_with_table->database;
|
||||
StoragePtr storage = context.tryGetTable(database, query_with_table->table);
|
||||
StoragePtr storage;
|
||||
if (!query_with_table->table.empty())
|
||||
{
|
||||
/// It's not CREATE DATABASE
|
||||
String database = query_with_table->database.empty() ? context.getCurrentDatabase() : query_with_table->database;
|
||||
storage = context.tryGetTable(database, query_with_table->table);
|
||||
}
|
||||
|
||||
/// For some reason we check consistency of cluster definition only
|
||||
/// in case of ALTER query, but not in case of CREATE/DROP etc.
|
||||
|
@ -95,7 +95,7 @@ public:
|
||||
Block sample = interpreter->getSampleBlock();
|
||||
NamesAndTypesList columns = sample.getNamesAndTypesList();
|
||||
|
||||
StoragePtr external_storage = StorageMemory::create("_external", external_table_name, ColumnsDescription{columns}, ConstraintsDescription{});
|
||||
StoragePtr external_storage = StorageMemory::create(StorageID("_external", external_table_name), ColumnsDescription{columns}, ConstraintsDescription{});
|
||||
external_storage->startup();
|
||||
|
||||
/** We replace the subquery with the name of the temporary table.
|
||||
|
@ -638,13 +638,10 @@ bool InterpreterCreateQuery::doCreateTable(const ASTCreateQuery & create,
|
||||
{
|
||||
res = StorageFactory::instance().get(create,
|
||||
database ? database->getTableDataPath(create) : "",
|
||||
table_name,
|
||||
create.database,
|
||||
context,
|
||||
context.getGlobalContext(),
|
||||
properties.columns,
|
||||
properties.constraints,
|
||||
create.attach,
|
||||
false);
|
||||
}
|
||||
|
||||
|
@ -79,13 +79,14 @@ BlockIO InterpreterDropQuery::executeToTable(
|
||||
|
||||
if (database_and_table.first && database_and_table.second)
|
||||
{
|
||||
auto table_id = database_and_table.second->getStorageID();
|
||||
if (kind == ASTDropQuery::Kind::Detach)
|
||||
{
|
||||
database_and_table.second->shutdown();
|
||||
/// If table was already dropped by anyone, an exception will be thrown
|
||||
auto table_lock = database_and_table.second->lockExclusively(context.getCurrentQueryId());
|
||||
/// Drop table from memory, don't touch data and metadata
|
||||
database_and_table.first->detachTable(database_and_table.second->getTableName());
|
||||
database_and_table.first->detachTable(table_id.table_name);
|
||||
}
|
||||
else if (kind == ASTDropQuery::Kind::Truncate)
|
||||
{
|
||||
@ -107,7 +108,7 @@ BlockIO InterpreterDropQuery::executeToTable(
|
||||
|
||||
const std::string metadata_file_without_extension =
|
||||
database_and_table.first->getMetadataPath()
|
||||
+ escapeForFileName(database_and_table.second->getTableName());
|
||||
+ escapeForFileName(table_id.table_name);
|
||||
|
||||
const auto prev_metadata_name = metadata_file_without_extension + ".sql";
|
||||
const auto drop_metadata_name = metadata_file_without_extension + ".sql.tmp_drop";
|
||||
@ -131,7 +132,7 @@ BlockIO InterpreterDropQuery::executeToTable(
|
||||
String table_data_path_relative = database_and_table.first->getTableDataPath(table_name);
|
||||
|
||||
/// Delete table metadata and table itself from memory
|
||||
database_and_table.first->removeTable(context, database_and_table.second->getTableName());
|
||||
database_and_table.first->removeTable(context, table_id.table_name);
|
||||
database_and_table.second->is_dropped = true;
|
||||
|
||||
/// If it is not virtual database like Dictionary then drop remaining data dir
|
||||
|
@ -111,7 +111,7 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
if (table->noPushingToViews() && !no_destination)
|
||||
out = table->write(query_ptr, context);
|
||||
else
|
||||
out = std::make_shared<PushingToViewsBlockOutputStream>(query.database, query.table, table, context, query_ptr, no_destination);
|
||||
out = std::make_shared<PushingToViewsBlockOutputStream>(table, context, query_ptr, no_destination);
|
||||
|
||||
/// Do not squash blocks if it is a sync INSERT into Distributed, since it lead to double bufferization on client and server side.
|
||||
/// Client-side bufferization might cause excessive timeouts (especially in case of big blocks).
|
||||
|
@ -116,14 +116,11 @@ namespace ErrorCodes
|
||||
extern const int INVALID_WITH_FILL_EXPRESSION;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/// Assumes `storage` is set and the table filter (row-level security) is not empty.
|
||||
String generateFilterActions(ExpressionActionsPtr & actions, const Context & context, const StoragePtr & storage, const ASTPtr & row_policy_filter, const Names & prerequisite_columns = {})
|
||||
String InterpreterSelectQuery::generateFilterActions(ExpressionActionsPtr & actions, const ASTPtr & row_policy_filter, const Names & prerequisite_columns) const
|
||||
{
|
||||
const auto & db_name = storage->getDatabaseName();
|
||||
const auto & table_name = storage->getTableName();
|
||||
const auto & db_name = table_id.getDatabaseName();
|
||||
const auto & table_name = table_id.getTableName();
|
||||
|
||||
/// TODO: implement some AST builders for this kind of stuff
|
||||
ASTPtr query_ast = std::make_shared<ASTSelectQuery>();
|
||||
@ -153,17 +150,15 @@ String generateFilterActions(ExpressionActionsPtr & actions, const Context & con
|
||||
table_expr->children.push_back(table_expr->database_and_table_name);
|
||||
|
||||
/// Using separate expression analyzer to prevent any possible alias injection
|
||||
auto syntax_result = SyntaxAnalyzer(context).analyze(query_ast, storage->getColumns().getAllPhysical());
|
||||
SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context);
|
||||
ExpressionActionsChain new_chain(context);
|
||||
auto syntax_result = SyntaxAnalyzer(*context).analyze(query_ast, storage->getColumns().getAllPhysical());
|
||||
SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, *context);
|
||||
ExpressionActionsChain new_chain(*context);
|
||||
analyzer.appendSelect(new_chain, false);
|
||||
actions = new_chain.getLastActions();
|
||||
|
||||
return expr_list->children.at(0)->getColumnName();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
const Context & context_,
|
||||
@ -292,7 +287,8 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
if (auto view_source = context->getViewSource())
|
||||
{
|
||||
auto & storage_values = static_cast<const StorageValues &>(*view_source);
|
||||
if (storage_values.getDatabaseName() == database_name && storage_values.getTableName() == table_name)
|
||||
auto tmp_table_id = storage_values.getStorageID();
|
||||
if (tmp_table_id.database_name == database_name && tmp_table_id.table_name == table_name)
|
||||
{
|
||||
/// Read from view source.
|
||||
storage = context->getViewSource();
|
||||
@ -308,7 +304,10 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
}
|
||||
|
||||
if (storage)
|
||||
{
|
||||
table_lock = storage->lockStructureForShare(false, context->getInitialQueryId());
|
||||
table_id = storage->getStorageID();
|
||||
}
|
||||
|
||||
auto analyze = [&] ()
|
||||
{
|
||||
@ -370,11 +369,11 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
source_header = storage->getSampleBlockForColumns(required_columns);
|
||||
|
||||
/// Fix source_header for filter actions.
|
||||
auto row_policy_filter = context->getRowPolicy()->getCondition(storage->getDatabaseName(), storage->getTableName(), RowPolicy::SELECT_FILTER);
|
||||
auto row_policy_filter = context->getRowPolicy()->getCondition(table_id.getDatabaseName(), table_id.getTableName(), RowPolicy::SELECT_FILTER);
|
||||
if (row_policy_filter)
|
||||
{
|
||||
filter_info = std::make_shared<FilterInfo>();
|
||||
filter_info->column_name = generateFilterActions(filter_info->actions, *context, storage, row_policy_filter, required_columns);
|
||||
filter_info->column_name = generateFilterActions(filter_info->actions, row_policy_filter, required_columns);
|
||||
source_header = storage->getSampleBlockForColumns(filter_info->actions->getRequiredColumns());
|
||||
}
|
||||
}
|
||||
@ -424,9 +423,9 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
sanitizeBlock(result_header);
|
||||
|
||||
/// Remove limits for some tables in the `system` database.
|
||||
if (storage && (storage->getDatabaseName() == "system"))
|
||||
if (storage && (table_id.getDatabaseName() == "system"))
|
||||
{
|
||||
String table_name = storage->getTableName();
|
||||
String table_name = table_id.getTableName();
|
||||
if ((table_name == "quotas") || (table_name == "quota_usage") || (table_name == "one"))
|
||||
{
|
||||
options.ignore_quota = true;
|
||||
@ -510,7 +509,7 @@ Block InterpreterSelectQuery::getSampleBlockImpl()
|
||||
|
||||
/// PREWHERE optimization.
|
||||
/// Turn off, if the table filter (row-level security) is applied.
|
||||
if (!context->getRowPolicy()->getCondition(storage->getDatabaseName(), storage->getTableName(), RowPolicy::SELECT_FILTER))
|
||||
if (!context->getRowPolicy()->getCondition(table_id.getDatabaseName(), table_id.getTableName(), RowPolicy::SELECT_FILTER))
|
||||
{
|
||||
auto optimize_prewhere = [&](auto & merge_tree)
|
||||
{
|
||||
@ -1366,12 +1365,12 @@ void InterpreterSelectQuery::executeFetchColumns(
|
||||
if (storage)
|
||||
{
|
||||
/// Append columns from the table filter to required
|
||||
auto row_policy_filter = context->getRowPolicy()->getCondition(storage->getDatabaseName(), storage->getTableName(), RowPolicy::SELECT_FILTER);
|
||||
auto row_policy_filter = context->getRowPolicy()->getCondition(table_id.getDatabaseName(), table_id.getTableName(), RowPolicy::SELECT_FILTER);
|
||||
if (row_policy_filter)
|
||||
{
|
||||
auto initial_required_columns = required_columns;
|
||||
ExpressionActionsPtr actions;
|
||||
generateFilterActions(actions, *context, storage, row_policy_filter, initial_required_columns);
|
||||
generateFilterActions(actions, row_policy_filter, initial_required_columns);
|
||||
auto required_columns_from_filter = actions->getRequiredColumns();
|
||||
|
||||
for (const auto & column : required_columns_from_filter)
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <Storages/SelectQueryInfo.h>
|
||||
#include <Storages/TableStructureLockHolder.h>
|
||||
#include <Storages/ReadInOrderOptimizer.h>
|
||||
#include <Storages/StorageID.h>
|
||||
|
||||
#include <Processors/QueryPipeline.h>
|
||||
#include <Columns/FilterDescription.h>
|
||||
@ -244,6 +245,8 @@ private:
|
||||
void executeSubqueriesInSetsAndJoins(QueryPipeline & pipeline, std::unordered_map<String, SubqueryForSet> & subqueries_for_sets);
|
||||
void executeMergeSorted(QueryPipeline & pipeline, const SortDescription & sort_description, UInt64 limit);
|
||||
|
||||
String generateFilterActions(ExpressionActionsPtr & actions, const ASTPtr & row_policy_filter, const Names & prerequisite_columns = {}) const;
|
||||
|
||||
/// Add ConvertingBlockInputStream to specified header.
|
||||
void unifyStreams(Pipeline & pipeline, Block header);
|
||||
|
||||
@ -293,6 +296,7 @@ private:
|
||||
|
||||
/// Table from where to read data, if not subquery.
|
||||
StoragePtr storage;
|
||||
StorageID table_id = StorageID::createEmpty(); /// Will be initialized if storage is not nullptr
|
||||
TableStructureReadLockHolder table_lock;
|
||||
|
||||
/// Used when we read from prepared input, not table or subquery.
|
||||
|
@ -299,13 +299,10 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const String & database_nam
|
||||
|
||||
StoragePtr table = StorageFactory::instance().get(create,
|
||||
database->getTableDataPath(create),
|
||||
table_name,
|
||||
database_name,
|
||||
system_context,
|
||||
system_context.getGlobalContext(),
|
||||
columns,
|
||||
constraints,
|
||||
create.attach,
|
||||
false);
|
||||
|
||||
database->createTable(system_context, table_name, table, create_ast);
|
||||
|
@ -110,7 +110,8 @@ bool PartLog::addNewParts(Context & current_context, const PartLog::MutableDataP
|
||||
|
||||
try
|
||||
{
|
||||
part_log = current_context.getPartLog(parts.front()->storage.getDatabaseName()); // assume parts belong to the same table
|
||||
auto table_id = parts.front()->storage.getStorageID();
|
||||
part_log = current_context.getPartLog(table_id.database_name); // assume parts belong to the same table
|
||||
if (!part_log)
|
||||
return false;
|
||||
|
||||
@ -122,8 +123,8 @@ bool PartLog::addNewParts(Context & current_context, const PartLog::MutableDataP
|
||||
elem.event_time = time(nullptr);
|
||||
elem.duration_ms = elapsed_ns / 1000000;
|
||||
|
||||
elem.database_name = part->storage.getDatabaseName();
|
||||
elem.table_name = part->storage.getTableName();
|
||||
elem.database_name = table_id.database_name;
|
||||
elem.table_name = table_id.table_name;
|
||||
elem.partition_id = part->info.partition_id;
|
||||
elem.part_name = part->name;
|
||||
elem.path_on_disk = part->getFullPath();
|
||||
|
@ -38,12 +38,9 @@ public:
|
||||
std::string getRemoteDatabaseName() const { return remote_database; }
|
||||
std::string getRemoteTableName() const { return remote_table; }
|
||||
|
||||
std::string getTableName() const override { return ""; }
|
||||
std::string getDatabaseName() const override { return ""; }
|
||||
|
||||
protected:
|
||||
StorageDistributedFake(const std::string & remote_database_, const std::string & remote_table_, size_t shard_count_)
|
||||
: remote_database(remote_database_), remote_table(remote_table_), shard_count(shard_count_)
|
||||
: IStorage({"", ""}), remote_database(remote_database_), remote_table(remote_table_), shard_count(shard_count_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Parsers/ASTQueryWithOutput.h>
|
||||
#include <Core/UUID.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -15,6 +16,7 @@ class ASTQueryWithTableAndOutput : public ASTQueryWithOutput
|
||||
public:
|
||||
String database;
|
||||
String table;
|
||||
UUID uuid;
|
||||
bool temporary{false};
|
||||
|
||||
protected:
|
||||
|
@ -689,7 +689,7 @@ bool StorageDistributedDirectoryMonitor::maybeMarkAsBroken(const std::string & f
|
||||
|
||||
std::string StorageDistributedDirectoryMonitor::getLoggerName() const
|
||||
{
|
||||
return storage.table_name + '.' + storage.getName() + ".DirectoryMonitor";
|
||||
return storage.getStorageID().getFullTableName() + ".DirectoryMonitor";
|
||||
}
|
||||
|
||||
void StorageDistributedDirectoryMonitor::updatePath()
|
||||
|
@ -90,7 +90,7 @@ void DistributedBlockOutputStream::write(const Block & block)
|
||||
if (ordinary_block.has(col.name))
|
||||
{
|
||||
ordinary_block.erase(col.name);
|
||||
LOG_DEBUG(log, storage.getTableName()
|
||||
LOG_DEBUG(log, storage.getStorageID().getNameForLogs()
|
||||
<< ": column " + col.name + " will be removed, "
|
||||
<< "because it is MATERIALIZED");
|
||||
}
|
||||
@ -515,7 +515,7 @@ void DistributedBlockOutputStream::writeAsyncImpl(const Block & block, const siz
|
||||
else
|
||||
{
|
||||
if (shard_info.dir_name_for_internal_replication.empty())
|
||||
throw Exception("Directory name for async inserts is empty, table " + storage.getTableName(), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Directory name for async inserts is empty, table " + storage.getStorageID().getNameForLogs(), ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
writeToShard(block, {shard_info.dir_name_for_internal_replication});
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ namespace ErrorCodes
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
IStorage::IStorage(ColumnsDescription virtuals_) : virtuals(std::move(virtuals_))
|
||||
IStorage::IStorage(StorageID storage_id_, ColumnsDescription virtuals_) : storage_id(std::move(storage_id_)), virtuals(std::move(virtuals_))
|
||||
{
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ void IStorage::check(const Names & column_names, bool include_virtuals) const
|
||||
{
|
||||
if (columns_map.end() == columns_map.find(name))
|
||||
throw Exception(
|
||||
"There is no column with name " + backQuote(name) + " in table " + getTableName() + ". There are columns: " + list_of_columns,
|
||||
"There is no column with name " + backQuote(name) + " in table " + getStorageID().getNameForLogs() + ". There are columns: " + list_of_columns,
|
||||
ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
|
||||
|
||||
if (unique_names.end() != unique_names.find(name))
|
||||
@ -339,7 +339,7 @@ TableStructureWriteLockHolder IStorage::lockAlterIntention(const String & query_
|
||||
void IStorage::lockNewDataStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id)
|
||||
{
|
||||
if (!lock_holder.alter_intention_lock)
|
||||
throw Exception("Alter intention lock for table " + getTableName() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Alter intention lock for table " + getStorageID().getNameForLogs() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
lock_holder.new_data_structure_lock = new_data_structure_lock->getLock(RWLockImpl::Write, query_id);
|
||||
}
|
||||
@ -347,7 +347,7 @@ void IStorage::lockNewDataStructureExclusively(TableStructureWriteLockHolder & l
|
||||
void IStorage::lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id)
|
||||
{
|
||||
if (!lock_holder.alter_intention_lock)
|
||||
throw Exception("Alter intention lock for table " + getTableName() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Alter intention lock for table " + getStorageID().getNameForLogs() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (!lock_holder.new_data_structure_lock)
|
||||
lock_holder.new_data_structure_lock = new_data_structure_lock->getLock(RWLockImpl::Write, query_id);
|
||||
@ -383,14 +383,11 @@ void IStorage::alter(
|
||||
const Context & context,
|
||||
TableStructureWriteLockHolder & table_lock_holder)
|
||||
{
|
||||
const String database_name = getDatabaseName();
|
||||
const String table_name = getTableName();
|
||||
|
||||
lockStructureExclusively(table_lock_holder, context.getCurrentQueryId());
|
||||
|
||||
auto table_id = getStorageID();
|
||||
StorageInMemoryMetadata metadata = getInMemoryMetadata();
|
||||
params.apply(metadata);
|
||||
context.getDatabase(database_name)->alterTable(context, table_name, metadata);
|
||||
context.getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata);
|
||||
setColumns(std::move(metadata.columns));
|
||||
}
|
||||
|
||||
@ -425,4 +422,17 @@ BlockInputStreams IStorage::read(
|
||||
return res;
|
||||
}
|
||||
|
||||
StorageID IStorage::getStorageID() const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(id_mutex);
|
||||
return storage_id;
|
||||
}
|
||||
|
||||
void IStorage::renameInMemory(const String & new_database_name, const String & new_table_name)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(id_mutex);
|
||||
storage_id.database_name = new_database_name;
|
||||
storage_id.table_name = new_table_name;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Interpreters/CancellationCode.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <Storages/StorageID.h>
|
||||
#include <Storages/SelectQueryInfo.h>
|
||||
#include <Storages/TableStructureLockHolder.h>
|
||||
#include <Storages/CheckResults.h>
|
||||
@ -75,8 +76,9 @@ struct ColumnSize
|
||||
class IStorage : public std::enable_shared_from_this<IStorage>, public TypePromotion<IStorage>
|
||||
{
|
||||
public:
|
||||
IStorage() = default;
|
||||
explicit IStorage(ColumnsDescription virtuals_);
|
||||
IStorage() = delete;
|
||||
explicit IStorage(StorageID storage_id_) : storage_id(std::move(storage_id_)) {}
|
||||
IStorage(StorageID id_, ColumnsDescription virtuals_);
|
||||
|
||||
virtual ~IStorage() = default;
|
||||
IStorage(const IStorage &) = delete;
|
||||
@ -86,8 +88,7 @@ public:
|
||||
virtual std::string getName() const = 0;
|
||||
|
||||
/// The name of the table.
|
||||
virtual std::string getTableName() const = 0;
|
||||
virtual std::string getDatabaseName() const { return {}; }
|
||||
StorageID getStorageID() const;
|
||||
|
||||
/// Returns true if the storage receives data from a remote server or servers.
|
||||
virtual bool isRemote() const { return false; }
|
||||
@ -165,6 +166,8 @@ protected: /// still thread-unsafe part.
|
||||
|
||||
|
||||
private:
|
||||
StorageID storage_id;
|
||||
mutable std::mutex id_mutex;
|
||||
ColumnsDescription columns; /// combined real and virtual columns
|
||||
const ColumnsDescription virtuals = {};
|
||||
IndicesDescription indices;
|
||||
@ -303,12 +306,18 @@ public:
|
||||
* In this function, you need to rename the directory with the data, if any.
|
||||
* Called when the table structure is locked for write.
|
||||
*/
|
||||
virtual void rename(const String & /*new_path_to_table_data*/, const String & /*new_database_name*/, const String & /*new_table_name*/,
|
||||
virtual void rename(const String & /*new_path_to_table_data*/, const String & new_database_name, const String & new_table_name,
|
||||
TableStructureWriteLockHolder &)
|
||||
{
|
||||
throw Exception("Method rename is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
renameInMemory(new_database_name, new_table_name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Just updates names of database and table without moving any data on disk
|
||||
* Can be called directly only from DatabaseAtomic.
|
||||
*/
|
||||
virtual void renameInMemory(const String & new_database_name, const String & new_table_name);
|
||||
|
||||
/** ALTER tables in the form of column changes that do not affect the change to Storage or its parameters.
|
||||
* This method must fully execute the ALTER query, taking care of the locks itself.
|
||||
* To update the table metadata on disk, this method should call InterpreterAlterQuery::updateMetadata.
|
||||
|
@ -75,8 +75,7 @@ namespace
|
||||
}
|
||||
|
||||
StorageKafka::StorageKafka(
|
||||
const std::string & table_name_,
|
||||
const std::string & database_name_,
|
||||
const StorageID & table_id_,
|
||||
Context & context_,
|
||||
const ColumnsDescription & columns_,
|
||||
const String & brokers_,
|
||||
@ -89,14 +88,12 @@ StorageKafka::StorageKafka(
|
||||
UInt64 max_block_size_,
|
||||
size_t skip_broken_,
|
||||
bool intermediate_commit_)
|
||||
: IStorage(
|
||||
: IStorage(table_id_,
|
||||
ColumnsDescription({{"_topic", std::make_shared<DataTypeString>()},
|
||||
{"_key", std::make_shared<DataTypeString>()},
|
||||
{"_offset", std::make_shared<DataTypeUInt64>()},
|
||||
{"_partition", std::make_shared<DataTypeUInt64>()},
|
||||
{"_timestamp", std::make_shared<DataTypeNullable>(std::make_shared<DataTypeDateTime>())}}, true))
|
||||
, table_name(table_name_)
|
||||
, database_name(database_name_)
|
||||
, global_context(context_.getGlobalContext())
|
||||
, kafka_context(Context(global_context))
|
||||
, topics(global_context.getMacros()->expand(topics_))
|
||||
@ -107,7 +104,7 @@ StorageKafka::StorageKafka(
|
||||
, schema_name(global_context.getMacros()->expand(schema_name_))
|
||||
, num_consumers(num_consumers_)
|
||||
, max_block_size(max_block_size_)
|
||||
, log(&Logger::get("StorageKafka (" + table_name_ + ")"))
|
||||
, log(&Logger::get("StorageKafka (" + table_id_.table_name + ")"))
|
||||
, semaphore(0, num_consumers_)
|
||||
, skip_broken(skip_broken_)
|
||||
, intermediate_commit(intermediate_commit_)
|
||||
@ -195,14 +192,6 @@ void StorageKafka::shutdown()
|
||||
task->deactivate();
|
||||
}
|
||||
|
||||
|
||||
void StorageKafka::rename(const String & /* new_path_to_db */, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &)
|
||||
{
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
}
|
||||
|
||||
|
||||
void StorageKafka::updateDependencies()
|
||||
{
|
||||
task->activateAndSchedule();
|
||||
@ -303,17 +292,17 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & conf)
|
||||
}
|
||||
}
|
||||
|
||||
bool StorageKafka::checkDependencies(const String & current_database_name, const String & current_table_name)
|
||||
bool StorageKafka::checkDependencies(const StorageID & table_id)
|
||||
{
|
||||
// Check if all dependencies are attached
|
||||
auto dependencies = global_context.getDependencies(current_database_name, current_table_name);
|
||||
auto dependencies = global_context.getDependencies(table_id);
|
||||
if (dependencies.size() == 0)
|
||||
return true;
|
||||
|
||||
// Check the dependencies are ready?
|
||||
for (const auto & db_tab : dependencies)
|
||||
{
|
||||
auto table = global_context.tryGetTable(db_tab.first, db_tab.second);
|
||||
auto table = global_context.tryGetTable(db_tab);
|
||||
if (!table)
|
||||
return false;
|
||||
|
||||
@ -323,7 +312,7 @@ bool StorageKafka::checkDependencies(const String & current_database_name, const
|
||||
return false;
|
||||
|
||||
// Check all its dependencies
|
||||
if (!checkDependencies(db_tab.first, db_tab.second))
|
||||
if (!checkDependencies(db_tab))
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -334,13 +323,14 @@ void StorageKafka::threadFunc()
|
||||
{
|
||||
try
|
||||
{
|
||||
auto table_id = getStorageID();
|
||||
// Check if at least one direct dependency is attached
|
||||
auto dependencies = global_context.getDependencies(database_name, table_name);
|
||||
auto dependencies = global_context.getDependencies(table_id);
|
||||
|
||||
// Keep streaming as long as there are attached views and streaming is not cancelled
|
||||
while (!stream_cancelled && num_created_consumers > 0 && dependencies.size() > 0)
|
||||
{
|
||||
if (!checkDependencies(database_name, table_name))
|
||||
if (!checkDependencies(table_id))
|
||||
break;
|
||||
|
||||
LOG_DEBUG(log, "Started streaming to " << dependencies.size() << " attached views");
|
||||
@ -363,14 +353,15 @@ void StorageKafka::threadFunc()
|
||||
|
||||
bool StorageKafka::streamToViews()
|
||||
{
|
||||
auto table = global_context.getTable(database_name, table_name);
|
||||
auto table_id = getStorageID();
|
||||
auto table = global_context.getTable(table_id);
|
||||
if (!table)
|
||||
throw Exception("Engine table " + backQuote(database_name) + "." + backQuote(table_name) + " doesn't exist.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Engine table " + table_id.getNameForLogs() + " doesn't exist.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
// Create an INSERT query for streaming data
|
||||
auto insert = std::make_shared<ASTInsertQuery>();
|
||||
insert->database = database_name;
|
||||
insert->table = table_name;
|
||||
insert->database = table_id.database_name;
|
||||
insert->table = table_id.table_name;
|
||||
|
||||
const Settings & settings = global_context.getSettingsRef();
|
||||
size_t block_size = max_block_size;
|
||||
@ -643,7 +634,7 @@ void registerStorageKafka(StorageFactory & factory)
|
||||
}
|
||||
|
||||
return StorageKafka::create(
|
||||
args.table_name, args.database_name, args.context, args.columns,
|
||||
args.table_id, args.context, args.columns,
|
||||
brokers, group, topics, format, row_delimiter, schema, num_consumers, max_block_size, skip_broken, intermediate_commit);
|
||||
});
|
||||
}
|
||||
|
@ -28,8 +28,6 @@ class StorageKafka : public ext::shared_ptr_helper<StorageKafka>, public IStorag
|
||||
friend struct ext::shared_ptr_helper<StorageKafka>;
|
||||
public:
|
||||
std::string getName() const override { return "Kafka"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
bool supportsSettings() const override { return true; }
|
||||
bool noPushingToViews() const override { return true; }
|
||||
@ -49,8 +47,6 @@ public:
|
||||
const ASTPtr & query,
|
||||
const Context & context) override;
|
||||
|
||||
void rename(const String & /* new_path_to_db */, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override;
|
||||
|
||||
void updateDependencies() override;
|
||||
|
||||
void pushReadBuffer(ConsumerBufferPtr buf);
|
||||
@ -66,19 +62,22 @@ public:
|
||||
|
||||
protected:
|
||||
StorageKafka(
|
||||
const std::string & table_name_,
|
||||
const std::string & database_name_,
|
||||
const StorageID & table_id_,
|
||||
Context & context_,
|
||||
const ColumnsDescription & columns_,
|
||||
const String & brokers_, const String & group_, const Names & topics_,
|
||||
const String & format_name_, char row_delimiter_, const String & schema_name_,
|
||||
size_t num_consumers_, UInt64 max_block_size_, size_t skip_broken,
|
||||
const String & brokers_,
|
||||
const String & group_,
|
||||
const Names & topics_,
|
||||
const String & format_name_,
|
||||
char row_delimiter_,
|
||||
const String & schema_name_,
|
||||
size_t num_consumers_,
|
||||
UInt64 max_block_size_,
|
||||
size_t skip_broken,
|
||||
bool intermediate_commit_);
|
||||
|
||||
private:
|
||||
// Configuration and state
|
||||
String table_name;
|
||||
String database_name;
|
||||
Context global_context;
|
||||
Context kafka_context;
|
||||
Names topics;
|
||||
@ -116,7 +115,7 @@ private:
|
||||
|
||||
void threadFunc();
|
||||
bool streamToViews();
|
||||
bool checkDependencies(const String & database_name, const String & table_name);
|
||||
bool checkDependencies(const StorageID & table_id);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -12,21 +12,19 @@ class StorageBlocks : public IStorage
|
||||
* Used by Live Views to complete stored query based on the mergeable blocks.
|
||||
*/
|
||||
public:
|
||||
StorageBlocks(const std::string & database_name_, const std::string & table_name_,
|
||||
StorageBlocks(const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_, BlockInputStreams streams_,
|
||||
QueryProcessingStage::Enum to_stage_)
|
||||
: database_name(database_name_), table_name(table_name_), streams(streams_), to_stage(to_stage_)
|
||||
: IStorage(table_id_), streams(streams_), to_stage(to_stage_)
|
||||
{
|
||||
setColumns(columns_);
|
||||
}
|
||||
static StoragePtr createStorage(const std::string & database_name, const std::string & table_name,
|
||||
static StoragePtr createStorage(const StorageID & table_id,
|
||||
const ColumnsDescription & columns, BlockInputStreams streams, QueryProcessingStage::Enum to_stage)
|
||||
{
|
||||
return std::make_shared<StorageBlocks>(database_name, table_name, columns, streams, to_stage);
|
||||
return std::make_shared<StorageBlocks>(table_id, columns, streams, to_stage);
|
||||
}
|
||||
std::string getName() const override { return "Blocks"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
QueryProcessingStage::Enum getQueryProcessingStage(const Context & /*context*/) const override { return to_stage; }
|
||||
|
||||
BlockInputStreams read(
|
||||
@ -41,8 +39,6 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
std::string database_name;
|
||||
std::string table_name;
|
||||
Block res_block;
|
||||
BlockInputStreams streams;
|
||||
QueryProcessingStage::Enum to_stage;
|
||||
|
@ -13,7 +13,6 @@ limitations under the License. */
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTWatchQuery.h>
|
||||
#include <Parsers/ASTDropQuery.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/InterpreterDropQuery.h>
|
||||
@ -54,20 +53,15 @@ namespace ErrorCodes
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
}
|
||||
|
||||
static void extractDependentTable(ASTPtr & query, String & select_database_name, String & select_table_name, const String & table_name, ASTPtr & inner_subquery)
|
||||
|
||||
static StorageID extractDependentTable(ASTPtr & query, Context & context, const String & table_name, ASTPtr & inner_subquery)
|
||||
{
|
||||
ASTSelectQuery & select_query = typeid_cast<ASTSelectQuery &>(*query);
|
||||
auto db_and_table = getDatabaseAndTable(select_query, 0);
|
||||
ASTPtr subquery = extractTableExpression(select_query, 0);
|
||||
|
||||
if (!db_and_table && !subquery)
|
||||
if (auto db_and_table = getDatabaseAndTable(select_query, 0))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (db_and_table)
|
||||
{
|
||||
select_table_name = db_and_table->table;
|
||||
String select_database_name = context.getCurrentDatabase();
|
||||
String select_table_name = db_and_table->table;
|
||||
|
||||
if (db_and_table->database.empty())
|
||||
{
|
||||
@ -79,20 +73,27 @@ static void extractDependentTable(ASTPtr & query, String & select_database_name,
|
||||
select_database_name = db_and_table->database;
|
||||
|
||||
select_query.replaceDatabaseAndTable("", table_name + "_blocks");
|
||||
return StorageID(select_database_name, select_table_name);
|
||||
}
|
||||
else if (auto * ast_select = subquery->as<ASTSelectWithUnionQuery>())
|
||||
else if (auto subquery = extractTableExpression(select_query, 0))
|
||||
{
|
||||
auto * ast_select = subquery->as<ASTSelectWithUnionQuery>();
|
||||
if (!ast_select)
|
||||
throw Exception("Logical error while creating StorageLiveView."
|
||||
" Could not retrieve table name from select query.",
|
||||
DB::ErrorCodes::LOGICAL_ERROR);
|
||||
if (ast_select->list_of_selects->children.size() != 1)
|
||||
throw Exception("UNION is not supported for LIVE VIEW", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW);
|
||||
|
||||
inner_subquery = ast_select->list_of_selects->children.at(0)->clone();
|
||||
|
||||
extractDependentTable(ast_select->list_of_selects->children.at(0), select_database_name, select_table_name, table_name, inner_subquery);
|
||||
return extractDependentTable(ast_select->list_of_selects->children.at(0), context, table_name, inner_subquery);
|
||||
}
|
||||
else
|
||||
throw Exception("Logical error while creating StorageLiveView."
|
||||
" Could not retrieve table name from select query.",
|
||||
DB::ErrorCodes::LOGICAL_ERROR);
|
||||
{
|
||||
/// If the table is not specified - use the table `system.one`
|
||||
return StorageID("system", "one");
|
||||
}
|
||||
}
|
||||
|
||||
MergeableBlocksPtr StorageLiveView::collectMergeableBlocks(const Context & context)
|
||||
@ -138,10 +139,11 @@ BlockInputStreamPtr StorageLiveView::completeQuery(BlockInputStreams from)
|
||||
auto block_context = std::make_unique<Context>(global_context);
|
||||
block_context->makeQueryContext();
|
||||
|
||||
auto blocks_storage = StorageBlocks::createStorage(database_name, table_name, parent_storage->getColumns(),
|
||||
auto blocks_storage_id = getBlocksStorageID();
|
||||
auto blocks_storage = StorageBlocks::createStorage(blocks_storage_id, parent_storage->getColumns(),
|
||||
std::move(from), QueryProcessingStage::WithMergeableState);
|
||||
|
||||
block_context->addExternalTable(table_name + "_blocks", blocks_storage);
|
||||
block_context->addExternalTable(blocks_storage_id.table_name, blocks_storage);
|
||||
|
||||
InterpreterSelectQuery select(inner_blocks_query->clone(), *block_context, StoragePtr(), SelectQueryOptions(QueryProcessingStage::Complete));
|
||||
BlockInputStreamPtr data = std::make_shared<MaterializingBlockInputStream>(select.execute().in);
|
||||
@ -193,6 +195,8 @@ void StorageLiveView::writeIntoLiveView(
|
||||
}
|
||||
}
|
||||
|
||||
auto blocks_storage_id = live_view.getBlocksStorageID();
|
||||
|
||||
if (!is_block_processed)
|
||||
{
|
||||
ASTPtr mergeable_query = live_view.getInnerQuery();
|
||||
@ -202,7 +206,7 @@ void StorageLiveView::writeIntoLiveView(
|
||||
|
||||
BlockInputStreams streams = {std::make_shared<OneBlockInputStream>(block)};
|
||||
|
||||
auto blocks_storage = StorageBlocks::createStorage(live_view.database_name, live_view.table_name,
|
||||
auto blocks_storage = StorageBlocks::createStorage(blocks_storage_id,
|
||||
live_view.getParentStorage()->getColumns(), std::move(streams), QueryProcessingStage::FetchColumns);
|
||||
|
||||
InterpreterSelectQuery select_block(mergeable_query, context, blocks_storage,
|
||||
@ -232,13 +236,11 @@ void StorageLiveView::writeIntoLiveView(
|
||||
|
||||
|
||||
StorageLiveView::StorageLiveView(
|
||||
const String & table_name_,
|
||||
const String & database_name_,
|
||||
const StorageID & table_id_,
|
||||
Context & local_context,
|
||||
const ASTCreateQuery & query,
|
||||
const ColumnsDescription & columns_)
|
||||
: table_name(table_name_),
|
||||
database_name(database_name_), global_context(local_context.getGlobalContext())
|
||||
: IStorage(table_id_), global_context(local_context.getGlobalContext())
|
||||
{
|
||||
live_view_context = std::make_unique<Context>(global_context);
|
||||
live_view_context->makeQueryContext();
|
||||
@ -249,7 +251,6 @@ StorageLiveView::StorageLiveView(
|
||||
throw Exception("SELECT query is not specified for " + getName(), ErrorCodes::INCORRECT_QUERY);
|
||||
|
||||
/// Default value, if only table name exist in the query
|
||||
select_database_name = local_context.getCurrentDatabase();
|
||||
if (query.select->list_of_selects->children.size() != 1)
|
||||
throw Exception("UNION is not supported for LIVE VIEW", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW);
|
||||
|
||||
@ -258,20 +259,11 @@ StorageLiveView::StorageLiveView(
|
||||
|
||||
InterpreterSelectQuery(inner_blocks_query, *live_view_context, SelectQueryOptions().modify().analyze());
|
||||
|
||||
extractDependentTable(inner_blocks_query, select_database_name, select_table_name, table_name, inner_subquery);
|
||||
select_table_id = extractDependentTable(inner_blocks_query, global_context, table_id_.table_name, inner_subquery);
|
||||
|
||||
/// If the table is not specified - use the table `system.one`
|
||||
if (select_table_name.empty())
|
||||
{
|
||||
select_database_name = "system";
|
||||
select_table_name = "one";
|
||||
}
|
||||
global_context.addDependency(select_table_id, table_id_);
|
||||
|
||||
global_context.addDependency(
|
||||
DatabaseAndTableName(select_database_name, select_table_name),
|
||||
DatabaseAndTableName(database_name, table_name));
|
||||
|
||||
parent_storage = local_context.getTable(select_database_name, select_table_name);
|
||||
parent_storage = local_context.getTable(select_table_id);
|
||||
|
||||
is_temporary = query.temporary;
|
||||
temporary_live_view_timeout = local_context.getSettingsRef().temporary_live_view_timeout.totalSeconds();
|
||||
@ -366,11 +358,12 @@ bool StorageLiveView::getNewBlocks()
|
||||
|
||||
void StorageLiveView::checkTableCanBeDropped() const
|
||||
{
|
||||
Dependencies dependencies = global_context.getDependencies(database_name, table_name);
|
||||
auto table_id = getStorageID();
|
||||
Dependencies dependencies = global_context.getDependencies(table_id);
|
||||
if (!dependencies.empty())
|
||||
{
|
||||
DatabaseAndTableName database_and_table_name = dependencies.front();
|
||||
throw Exception("Table has dependency " + database_and_table_name.first + "." + database_and_table_name.second, ErrorCodes::TABLE_WAS_NOT_DROPPED);
|
||||
StorageID dependent_table_id = dependencies.front();
|
||||
throw Exception("Table has dependency " + dependent_table_id.getNameForLogs(), ErrorCodes::TABLE_WAS_NOT_DROPPED);
|
||||
}
|
||||
}
|
||||
|
||||
@ -381,6 +374,7 @@ void StorageLiveView::noUsersThread(std::shared_ptr<StorageLiveView> storage, co
|
||||
if (storage->shutdown_called)
|
||||
return;
|
||||
|
||||
auto table_id = storage->getStorageID();
|
||||
{
|
||||
while (1)
|
||||
{
|
||||
@ -392,7 +386,7 @@ void StorageLiveView::noUsersThread(std::shared_ptr<StorageLiveView> storage, co
|
||||
return;
|
||||
if (storage->hasUsers())
|
||||
return;
|
||||
if (!storage->global_context.getDependencies(storage->database_name, storage->table_name).empty())
|
||||
if (!storage->global_context.getDependencies(table_id).empty())
|
||||
continue;
|
||||
drop_table = true;
|
||||
}
|
||||
@ -402,14 +396,14 @@ void StorageLiveView::noUsersThread(std::shared_ptr<StorageLiveView> storage, co
|
||||
|
||||
if (drop_table)
|
||||
{
|
||||
if (storage->global_context.tryGetTable(storage->database_name, storage->table_name))
|
||||
if (storage->global_context.tryGetTable(table_id))
|
||||
{
|
||||
try
|
||||
{
|
||||
/// We create and execute `drop` query for this table
|
||||
auto drop_query = std::make_shared<ASTDropQuery>();
|
||||
drop_query->database = storage->database_name;
|
||||
drop_query->table = storage->table_name;
|
||||
drop_query->database = table_id.database_name;
|
||||
drop_query->table = table_id.table_name;
|
||||
drop_query->kind = ASTDropQuery::Kind::Drop;
|
||||
ASTPtr ast_drop_query = drop_query;
|
||||
InterpreterDropQuery drop_interpreter(ast_drop_query, storage->global_context);
|
||||
@ -417,6 +411,7 @@ void StorageLiveView::noUsersThread(std::shared_ptr<StorageLiveView> storage, co
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -493,9 +488,8 @@ StorageLiveView::~StorageLiveView()
|
||||
|
||||
void StorageLiveView::drop(TableStructureWriteLockHolder &)
|
||||
{
|
||||
global_context.removeDependency(
|
||||
DatabaseAndTableName(select_database_name, select_table_name),
|
||||
DatabaseAndTableName(database_name, table_name));
|
||||
auto table_id = getStorageID();
|
||||
global_context.removeDependency(select_table_id, table_id);
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
is_dropped = true;
|
||||
@ -623,7 +617,7 @@ void registerStorageLiveView(StorageFactory & factory)
|
||||
if (!args.attach && !args.local_context.getSettingsRef().allow_experimental_live_view)
|
||||
throw Exception("Experimental LIVE VIEW feature is not enabled (the setting 'allow_experimental_live_view')", ErrorCodes::SUPPORT_IS_DISABLED);
|
||||
|
||||
return StorageLiveView::create(args.table_name, args.database_name, args.local_context, args.query, args.columns);
|
||||
return StorageLiveView::create(args.table_id, args.local_context, args.query, args.columns);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -48,10 +48,11 @@ friend class LiveViewBlockOutputStream;
|
||||
public:
|
||||
~StorageLiveView() override;
|
||||
String getName() const override { return "LiveView"; }
|
||||
String getTableName() const override { return table_name; }
|
||||
String getDatabaseName() const override { return database_name; }
|
||||
String getSelectDatabaseName() const { return select_database_name; }
|
||||
String getSelectTableName() const { return select_table_name; }
|
||||
StorageID getSelectTableID() const { return select_table_id; }
|
||||
StorageID getBlocksStorageID() const
|
||||
{
|
||||
return StorageID("", getStorageID().table_name + "_blocks");
|
||||
}
|
||||
StoragePtr getParentStorage() const { return parent_storage; }
|
||||
|
||||
NameAndTypePair getColumn(const String & column_name) const override;
|
||||
@ -170,10 +171,7 @@ public:
|
||||
const Context & context);
|
||||
|
||||
private:
|
||||
String select_database_name;
|
||||
String select_table_name;
|
||||
String table_name;
|
||||
String database_name;
|
||||
StorageID select_table_id = StorageID::createEmpty(); /// Will be initialized in constructor
|
||||
ASTPtr inner_query; /// stored query : SELECT * FROM ( SELECT a FROM A)
|
||||
ASTPtr inner_subquery; /// stored query's innermost subquery if any
|
||||
ASTPtr inner_blocks_query; /// query over the mergeable blocks to produce final result
|
||||
@ -210,8 +208,7 @@ private:
|
||||
UInt64 temporary_live_view_timeout;
|
||||
|
||||
StorageLiveView(
|
||||
const String & table_name_,
|
||||
const String & database_name_,
|
||||
const StorageID & table_id_,
|
||||
Context & local_context,
|
||||
const ASTCreateQuery & query,
|
||||
const ColumnsDescription & columns
|
||||
|
@ -111,8 +111,7 @@ namespace
|
||||
|
||||
|
||||
MergeTreeData::MergeTreeData(
|
||||
const String & database_,
|
||||
const String & table_,
|
||||
const StorageID & table_id_,
|
||||
const String & relative_data_path_,
|
||||
const StorageInMemoryMetadata & metadata,
|
||||
Context & context_,
|
||||
@ -122,17 +121,16 @@ MergeTreeData::MergeTreeData(
|
||||
bool require_part_metadata_,
|
||||
bool attach,
|
||||
BrokenPartCallback broken_part_callback_)
|
||||
: global_context(context_)
|
||||
: IStorage(table_id_)
|
||||
, global_context(context_)
|
||||
, merging_params(merging_params_)
|
||||
, partition_by_ast(metadata.partition_by_ast)
|
||||
, sample_by_ast(metadata.sample_by_ast)
|
||||
, settings_ast(metadata.settings_ast)
|
||||
, require_part_metadata(require_part_metadata_)
|
||||
, database_name(database_)
|
||||
, table_name(table_)
|
||||
, relative_data_path(relative_data_path_)
|
||||
, broken_part_callback(broken_part_callback_)
|
||||
, log_name(database_name + "." + table_name)
|
||||
, log_name(table_id_.getNameForLogs())
|
||||
, log(&Logger::get(log_name))
|
||||
, storage_settings(std::move(storage_settings_))
|
||||
, storage_policy(context_.getStoragePolicy(getSettings()->storage_policy))
|
||||
@ -1257,7 +1255,9 @@ void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & pa
|
||||
|
||||
/// Data parts is still alive (since DataPartsVector holds shared_ptrs) and contain useful metainformation for logging
|
||||
/// NOTE: There is no need to log parts deletion somewhere else, all deleting parts pass through this function and pass away
|
||||
if (auto part_log = global_context.getPartLog(database_name))
|
||||
|
||||
auto table_id = getStorageID();
|
||||
if (auto part_log = global_context.getPartLog(table_id.database_name))
|
||||
{
|
||||
PartLogElement part_log_elem;
|
||||
|
||||
@ -1265,8 +1265,8 @@ void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & pa
|
||||
part_log_elem.event_time = time(nullptr);
|
||||
part_log_elem.duration_ms = 0;
|
||||
|
||||
part_log_elem.database_name = database_name;
|
||||
part_log_elem.table_name = table_name;
|
||||
part_log_elem.database_name = table_id.database_name;
|
||||
part_log_elem.table_name = table_id.table_name;
|
||||
|
||||
for (auto & part : parts)
|
||||
{
|
||||
@ -1341,8 +1341,7 @@ void MergeTreeData::rename(
|
||||
global_context.dropCaches();
|
||||
|
||||
relative_data_path = new_table_path;
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
renameInMemory(new_database_name, new_table_name);
|
||||
}
|
||||
|
||||
void MergeTreeData::dropAllData()
|
||||
@ -3525,7 +3524,8 @@ MergeTreeData & MergeTreeData::checkStructureAndGetMergeTreeData(IStorage * sour
|
||||
{
|
||||
MergeTreeData * src_data = dynamic_cast<MergeTreeData *>(source_table);
|
||||
if (!src_data)
|
||||
throw Exception("Table " + table_name + " supports attachPartitionFrom only for MergeTree family of table engines."
|
||||
throw Exception("Table " + source_table->getStorageID().getNameForLogs() +
|
||||
" supports attachPartitionFrom only for MergeTree family of table engines."
|
||||
" Got " + source_table->getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
|
||||
if (getColumns().getAllPhysical().sizeOfDifference(src_data->getColumns().getAllPhysical()))
|
||||
@ -3691,7 +3691,8 @@ void MergeTreeData::writePartLog(
|
||||
const MergeListEntry * merge_entry)
|
||||
try
|
||||
{
|
||||
auto part_log = global_context.getPartLog(database_name);
|
||||
auto table_id = getStorageID();
|
||||
auto part_log = global_context.getPartLog(table_id.database_name);
|
||||
if (!part_log)
|
||||
return;
|
||||
|
||||
@ -3706,8 +3707,8 @@ try
|
||||
/// TODO: Stop stopwatch in outer code to exclude ZK timings and so on
|
||||
part_log_elem.duration_ms = elapsed_ns / 1000000;
|
||||
|
||||
part_log_elem.database_name = database_name;
|
||||
part_log_elem.table_name = table_name;
|
||||
part_log_elem.database_name = table_id.database_name;
|
||||
part_log_elem.table_name = table_id.table_name;
|
||||
part_log_elem.partition_id = MergeTreePartInfo::fromPartName(new_part_name, format_version).partition_id;
|
||||
part_log_elem.part_name = new_part_name;
|
||||
|
||||
|
@ -330,7 +330,7 @@ public:
|
||||
///
|
||||
/// require_part_metadata - should checksums.txt and columns.txt exist in the part directory.
|
||||
/// attach - whether the existing table is attached or the new table is created.
|
||||
MergeTreeData(const String & database_, const String & table_,
|
||||
MergeTreeData(const StorageID & table_id_,
|
||||
const String & relative_data_path_,
|
||||
const StorageInMemoryMetadata & metadata,
|
||||
Context & context_,
|
||||
@ -396,9 +396,6 @@ public:
|
||||
|| column_name == "_sample_factor";
|
||||
}
|
||||
|
||||
String getDatabaseName() const override { return database_name; }
|
||||
String getTableName() const override { return table_name; }
|
||||
|
||||
/// Load the set of data parts from disk. Call once - immediately after the object is created.
|
||||
void loadDataParts(bool skip_sanity_checks);
|
||||
|
||||
@ -787,8 +784,6 @@ protected:
|
||||
|
||||
bool require_part_metadata;
|
||||
|
||||
String database_name;
|
||||
String table_name;
|
||||
String relative_data_path;
|
||||
|
||||
|
||||
|
@ -25,7 +25,7 @@ static const auto ALTER_ERROR_SLEEP_MS = 10 * 1000;
|
||||
ReplicatedMergeTreeAlterThread::ReplicatedMergeTreeAlterThread(StorageReplicatedMergeTree & storage_)
|
||||
: storage(storage_)
|
||||
, zk_node_cache([&] { return storage.getZooKeeper(); })
|
||||
, log_name(storage.database_name + "." + storage.table_name + " (ReplicatedMergeTreeAlterThread)")
|
||||
, log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeAlterThread)")
|
||||
, log(&Logger::get(log_name))
|
||||
{
|
||||
task = storage_.global_context.getSchedulePool().createTask(log_name, [this]{ run(); });
|
||||
|
@ -19,7 +19,7 @@ namespace ErrorCodes
|
||||
|
||||
ReplicatedMergeTreeCleanupThread::ReplicatedMergeTreeCleanupThread(StorageReplicatedMergeTree & storage_)
|
||||
: storage(storage_)
|
||||
, log_name(storage.database_name + "." + storage.table_name + " (ReplicatedMergeTreeCleanupThread)")
|
||||
, log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeCleanupThread)")
|
||||
, log(&Logger::get(log_name))
|
||||
{
|
||||
task = storage.global_context.getSchedulePool().createTask(log_name, [this]{ run(); });
|
||||
|
@ -24,7 +24,7 @@ static const auto PART_CHECK_ERROR_SLEEP_MS = 5 * 1000;
|
||||
|
||||
ReplicatedMergeTreePartCheckThread::ReplicatedMergeTreePartCheckThread(StorageReplicatedMergeTree & storage_)
|
||||
: storage(storage_)
|
||||
, log_name(storage.database_name + "." + storage.table_name + " (ReplicatedMergeTreePartCheckThread)")
|
||||
, log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreePartCheckThread)")
|
||||
, log(&Logger::get(log_name))
|
||||
{
|
||||
task = storage.global_context.getSchedulePool().createTask(log_name, [this] { run(); });
|
||||
|
@ -40,7 +40,7 @@ static String generateActiveNodeIdentifier()
|
||||
|
||||
ReplicatedMergeTreeRestartingThread::ReplicatedMergeTreeRestartingThread(StorageReplicatedMergeTree & storage_)
|
||||
: storage(storage_)
|
||||
, log_name(storage.database_name + "." + storage.table_name + " (ReplicatedMergeTreeRestartingThread)")
|
||||
, log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeRestartingThread)")
|
||||
, log(&Logger::get(log_name))
|
||||
, active_node_identifier(generateActiveNodeIdentifier())
|
||||
{
|
||||
|
@ -18,8 +18,6 @@ class StorageFromMergeTreeDataPart : public ext::shared_ptr_helper<StorageFromMe
|
||||
friend struct ext::shared_ptr_helper<StorageFromMergeTreeDataPart>;
|
||||
public:
|
||||
String getName() const override { return "FromMergeTreeDataPart"; }
|
||||
String getTableName() const override { return part->storage.getTableName() + " (part " + part->name + ")"; }
|
||||
String getDatabaseName() const override { return part->storage.getDatabaseName(); }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -51,7 +49,8 @@ public:
|
||||
|
||||
protected:
|
||||
StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_)
|
||||
: IStorage(part_->storage.getVirtuals()), part(part_)
|
||||
: IStorage(getIDFromPart(part_), part_->storage.getVirtuals())
|
||||
, part(part_)
|
||||
{
|
||||
setColumns(part_->storage.getColumns());
|
||||
setIndices(part_->storage.getIndices());
|
||||
@ -59,6 +58,12 @@ protected:
|
||||
|
||||
private:
|
||||
MergeTreeData::DataPartPtr part;
|
||||
|
||||
static StorageID getIDFromPart(const MergeTreeData::DataPartPtr & part_)
|
||||
{
|
||||
auto table_id = part_->storage.getStorageID();
|
||||
return StorageID(table_id.database_name, table_id.table_name + " (part " + part_->name + ")");
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -656,12 +656,12 @@ static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
};
|
||||
if (replicated)
|
||||
return StorageReplicatedMergeTree::create(
|
||||
zookeeper_path, replica_name, args.attach, args.database_name, args.table_name, args.relative_data_path,
|
||||
zookeeper_path, replica_name, args.attach, args.table_id, args.relative_data_path,
|
||||
metadata, args.context, date_column_name, merging_params, std::move(storage_settings),
|
||||
args.has_force_restore_data_flag);
|
||||
else
|
||||
return StorageMergeTree::create(
|
||||
args.database_name, args.table_name, args.relative_data_path, metadata, args.attach, args.context,
|
||||
args.table_id, args.relative_data_path, metadata, args.attach, args.context,
|
||||
date_column_name, merging_params, std::move(storage_settings),
|
||||
args.has_force_restore_data_flag);
|
||||
}
|
||||
|
@ -56,18 +56,27 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
StorageBuffer::StorageBuffer(const std::string & database_name_, const std::string & table_name_,
|
||||
const ColumnsDescription & columns_, const ConstraintsDescription & constraints_,
|
||||
StorageBuffer::StorageBuffer(
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
Context & context_,
|
||||
size_t num_shards_, const Thresholds & min_thresholds_, const Thresholds & max_thresholds_,
|
||||
const String & destination_database_, const String & destination_table_, bool allow_materialized_)
|
||||
:
|
||||
table_name(table_name_), database_name(database_name_), global_context(context_),
|
||||
num_shards(num_shards_), buffers(num_shards_),
|
||||
min_thresholds(min_thresholds_), max_thresholds(max_thresholds_),
|
||||
destination_database(destination_database_), destination_table(destination_table_),
|
||||
no_destination(destination_database.empty() && destination_table.empty()),
|
||||
allow_materialized(allow_materialized_), log(&Logger::get("StorageBuffer (" + table_name + ")"))
|
||||
size_t num_shards_,
|
||||
const Thresholds & min_thresholds_,
|
||||
const Thresholds & max_thresholds_,
|
||||
const String & destination_database_,
|
||||
const String & destination_table_,
|
||||
bool allow_materialized_)
|
||||
: IStorage(table_id_)
|
||||
, global_context(context_)
|
||||
, num_shards(num_shards_), buffers(num_shards_)
|
||||
, min_thresholds(min_thresholds_)
|
||||
, max_thresholds(max_thresholds_)
|
||||
, destination_database(destination_database_)
|
||||
, destination_table(destination_table_)
|
||||
, no_destination(destination_database.empty() && destination_table.empty())
|
||||
, allow_materialized(allow_materialized_)
|
||||
, log(&Logger::get("StorageBuffer (" + table_id_.getFullTableName() + ")"))
|
||||
{
|
||||
setColumns(columns_);
|
||||
setConstraints(constraints_);
|
||||
@ -716,15 +725,15 @@ void StorageBuffer::alter(const AlterCommands & params, const Context & context,
|
||||
{
|
||||
lockStructureExclusively(table_lock_holder, context.getCurrentQueryId());
|
||||
|
||||
const String database_name_ = getDatabaseName();
|
||||
const String table_name_ = getTableName();
|
||||
auto table_id = getStorageID();
|
||||
checkAlterIsPossible(params, context.getSettingsRef());
|
||||
|
||||
/// So that no blocks of the old structure remain.
|
||||
optimize({} /*query*/, {} /*partition_id*/, false /*final*/, false /*deduplicate*/, context);
|
||||
|
||||
StorageInMemoryMetadata metadata = getInMemoryMetadata();
|
||||
params.apply(metadata);
|
||||
context.getDatabase(database_name_)->alterTable(context, table_name_, metadata);
|
||||
context.getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata);
|
||||
setColumns(std::move(metadata.columns));
|
||||
}
|
||||
|
||||
@ -763,8 +772,9 @@ void registerStorageBuffer(StorageFactory & factory)
|
||||
UInt64 max_bytes = applyVisitor(FieldVisitorConvertToNumber<UInt64>(), engine_args[8]->as<ASTLiteral &>().value);
|
||||
|
||||
return StorageBuffer::create(
|
||||
args.database_name,
|
||||
args.table_name, args.columns, args.constraints,
|
||||
args.table_id,
|
||||
args.columns,
|
||||
args.constraints,
|
||||
args.context,
|
||||
num_buckets,
|
||||
StorageBuffer::Thresholds{min_time, min_rows, min_bytes},
|
||||
|
@ -53,8 +53,6 @@ public:
|
||||
};
|
||||
|
||||
std::string getName() const override { return "Buffer"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
QueryProcessingStage::Enum getQueryProcessingStage(const Context & context) const override;
|
||||
|
||||
@ -73,12 +71,6 @@ public:
|
||||
void shutdown() override;
|
||||
bool optimize(const ASTPtr & query, const ASTPtr & partition, bool final, bool deduplicate, const Context & context) override;
|
||||
|
||||
void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override
|
||||
{
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
}
|
||||
|
||||
bool supportsSampling() const override { return true; }
|
||||
bool supportsPrewhere() const override
|
||||
{
|
||||
@ -102,9 +94,6 @@ public:
|
||||
~StorageBuffer() override;
|
||||
|
||||
private:
|
||||
String table_name;
|
||||
String database_name;
|
||||
|
||||
Context global_context;
|
||||
|
||||
struct Buffer
|
||||
@ -147,11 +136,17 @@ protected:
|
||||
/** num_shards - the level of internal parallelism (the number of independent buffers)
|
||||
* The buffer is flushed if all minimum thresholds or at least one of the maximum thresholds are exceeded.
|
||||
*/
|
||||
StorageBuffer(const std::string & database_name_, const std::string & table_name_,
|
||||
const ColumnsDescription & columns_, const ConstraintsDescription & constraints_,
|
||||
StorageBuffer(
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
Context & context_,
|
||||
size_t num_shards_, const Thresholds & min_thresholds_, const Thresholds & max_thresholds_,
|
||||
const String & destination_database_, const String & destination_table_, bool allow_materialized_);
|
||||
size_t num_shards_,
|
||||
const Thresholds & min_thresholds_,
|
||||
const Thresholds & max_thresholds_,
|
||||
const String & destination_database_,
|
||||
const String & destination_table_,
|
||||
bool allow_materialized_);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -24,16 +24,14 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
StorageDictionary::StorageDictionary(
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const Context & context,
|
||||
bool attach,
|
||||
const String & dictionary_name_)
|
||||
: table_name(table_name_),
|
||||
database_name(database_name_),
|
||||
dictionary_name(dictionary_name_),
|
||||
logger(&Poco::Logger::get("StorageDictionary"))
|
||||
: IStorage(table_id_)
|
||||
, dictionary_name(dictionary_name_)
|
||||
, logger(&Poco::Logger::get("StorageDictionary"))
|
||||
{
|
||||
setColumns(columns_);
|
||||
|
||||
@ -108,7 +106,7 @@ void registerStorageDictionary(StorageFactory & factory)
|
||||
String dictionary_name = args.engine_args[0]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
|
||||
return StorageDictionary::create(
|
||||
args.database_name, args.table_name, args.columns, args.context, args.attach, dictionary_name);
|
||||
args.table_id, args.columns, args.context, args.attach, dictionary_name);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -24,8 +24,6 @@ class StorageDictionary : public ext::shared_ptr_helper<StorageDictionary>, publ
|
||||
friend struct ext::shared_ptr_helper<StorageDictionary>;
|
||||
public:
|
||||
std::string getName() const override { return "Dictionary"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
BlockInputStreams read(const Names & column_names,
|
||||
const SelectQueryInfo & query_info,
|
||||
@ -59,8 +57,6 @@ public:
|
||||
private:
|
||||
using Ptr = MultiVersion<IDictionaryBase>::Version;
|
||||
|
||||
String table_name;
|
||||
String database_name;
|
||||
String dictionary_name;
|
||||
Poco::Logger * logger;
|
||||
|
||||
@ -68,8 +64,7 @@ private:
|
||||
|
||||
protected:
|
||||
StorageDictionary(
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const Context & context,
|
||||
bool attach,
|
||||
|
@ -205,8 +205,7 @@ static ExpressionActionsPtr buildShardingKeyExpression(const ASTPtr & sharding_k
|
||||
}
|
||||
|
||||
StorageDistributed::StorageDistributed(
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const String & remote_database_,
|
||||
@ -216,13 +215,18 @@ StorageDistributed::StorageDistributed(
|
||||
const ASTPtr & sharding_key_,
|
||||
const String & relative_data_path_,
|
||||
bool attach_)
|
||||
: IStorage(ColumnsDescription({
|
||||
{"_shard_num", std::make_shared<DataTypeUInt32>()},
|
||||
}, true)),
|
||||
table_name(table_name_), database_name(database_name_),
|
||||
remote_database(remote_database_), remote_table(remote_table_),
|
||||
global_context(context_), cluster_name(global_context.getMacros()->expand(cluster_name_)), has_sharding_key(sharding_key_),
|
||||
path(relative_data_path_.empty() ? "" : (context_.getPath() + relative_data_path_))
|
||||
: IStorage(id_,
|
||||
ColumnsDescription(
|
||||
{
|
||||
{"_shard_num", std::make_shared<DataTypeUInt32>()},
|
||||
},
|
||||
true))
|
||||
, remote_database(remote_database_)
|
||||
, remote_table(remote_table_)
|
||||
, global_context(context_)
|
||||
, cluster_name(global_context.getMacros()->expand(cluster_name_))
|
||||
, has_sharding_key(sharding_key_)
|
||||
, path(relative_data_path_.empty() ? "" : (context_.getPath() + relative_data_path_))
|
||||
{
|
||||
setColumns(columns_);
|
||||
setConstraints(constraints_);
|
||||
@ -237,15 +241,14 @@ StorageDistributed::StorageDistributed(
|
||||
if (!attach_ && !cluster_name.empty())
|
||||
{
|
||||
size_t num_local_shards = global_context.getCluster(cluster_name)->getLocalShardCount();
|
||||
if (num_local_shards && remote_database == database_name && remote_table == table_name)
|
||||
throw Exception("Distributed table " + table_name + " looks at itself", ErrorCodes::INFINITE_LOOP);
|
||||
if (num_local_shards && remote_database == id_.database_name && remote_table == id_.table_name)
|
||||
throw Exception("Distributed table " + id_.table_name + " looks at itself", ErrorCodes::INFINITE_LOOP);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
StorageDistributed::StorageDistributed(
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
ASTPtr remote_table_function_ptr_,
|
||||
@ -254,34 +257,34 @@ StorageDistributed::StorageDistributed(
|
||||
const ASTPtr & sharding_key_,
|
||||
const String & relative_data_path_,
|
||||
bool attach)
|
||||
: StorageDistributed(database_name_, table_name_, columns_, constraints_, String{}, String{}, cluster_name_, context_, sharding_key_, relative_data_path_, attach)
|
||||
: StorageDistributed(id_, columns_, constraints_, String{}, String{}, cluster_name_, context_, sharding_key_, relative_data_path_, attach)
|
||||
{
|
||||
remote_table_function_ptr = remote_table_function_ptr_;
|
||||
remote_table_function_ptr = std::move(remote_table_function_ptr_);
|
||||
}
|
||||
|
||||
|
||||
StoragePtr StorageDistributed::createWithOwnCluster(
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const String & remote_database_, /// database on remote servers.
|
||||
const String & remote_table_, /// The name of the table on the remote servers.
|
||||
ClusterPtr owned_cluster_,
|
||||
const Context & context_)
|
||||
{
|
||||
auto res = create(String{}, table_name_, columns_, ConstraintsDescription{}, remote_database_, remote_table_, String{}, context_, ASTPtr(), String(), false);
|
||||
res->owned_cluster = owned_cluster_;
|
||||
auto res = create(table_id_, columns_, ConstraintsDescription{}, remote_database_, remote_table_, String{}, context_, ASTPtr(), String(), false);
|
||||
res->owned_cluster = std::move(owned_cluster_);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
StoragePtr StorageDistributed::createWithOwnCluster(
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
ASTPtr & remote_table_function_ptr_,
|
||||
ClusterPtr & owned_cluster_,
|
||||
const Context & context_)
|
||||
{
|
||||
auto res = create(String{}, table_name_, columns_, ConstraintsDescription{}, remote_table_function_ptr_, String{}, context_, ASTPtr(), String(), false);
|
||||
auto res = create(table_id_, columns_, ConstraintsDescription{}, remote_table_function_ptr_, String{}, context_, ASTPtr(), String(), false);
|
||||
res->owned_cluster = owned_cluster_;
|
||||
return res;
|
||||
}
|
||||
@ -342,17 +345,18 @@ BlockInputStreams StorageDistributed::read(
|
||||
if (has_sharding_key)
|
||||
{
|
||||
auto smaller_cluster = skipUnusedShards(cluster, query_info);
|
||||
auto table_id = getStorageID();
|
||||
|
||||
if (smaller_cluster)
|
||||
{
|
||||
cluster = smaller_cluster;
|
||||
LOG_DEBUG(log, "Reading from " << database_name << "." << table_name << ": "
|
||||
LOG_DEBUG(log, "Reading from " << table_id.getNameForLogs() << ": "
|
||||
"Skipping irrelevant shards - the query will be sent to the following shards of the cluster (shard numbers): "
|
||||
" " << makeFormattedListOfShards(cluster));
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_DEBUG(log, "Reading from " << database_name << "." << table_name << ": "
|
||||
LOG_DEBUG(log, "Reading from " << table_id.getNameForLogs() << ": "
|
||||
"Unable to figure out irrelevant shards from WHERE/PREWHERE clauses - the query will be sent to all shards of the cluster");
|
||||
}
|
||||
}
|
||||
@ -410,13 +414,12 @@ void StorageDistributed::checkAlterIsPossible(const AlterCommands & commands, co
|
||||
void StorageDistributed::alter(const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder)
|
||||
{
|
||||
lockStructureExclusively(table_lock_holder, context.getCurrentQueryId());
|
||||
auto table_id = getStorageID();
|
||||
|
||||
const String current_database_name = getDatabaseName();
|
||||
const String current_table_name = getTableName();
|
||||
|
||||
checkAlterIsPossible(params, context.getSettingsRef());
|
||||
StorageInMemoryMetadata metadata = getInMemoryMetadata();
|
||||
params.apply(metadata);
|
||||
context.getDatabase(current_database_name)->alterTable(context, current_table_name, metadata);
|
||||
context.getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata);
|
||||
setColumns(std::move(metadata.columns));
|
||||
}
|
||||
|
||||
@ -610,8 +613,6 @@ void StorageDistributed::flushClusterNodesAllData()
|
||||
void StorageDistributed::rename(const String & new_path_to_table_data, const String & new_database_name, const String & new_table_name,
|
||||
TableStructureWriteLockHolder &)
|
||||
{
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
if (!path.empty())
|
||||
{
|
||||
auto new_path = global_context.getPath() + new_path_to_table_data;
|
||||
@ -621,6 +622,7 @@ void StorageDistributed::rename(const String & new_path_to_table_data, const Str
|
||||
for (auto & node : cluster_nodes_data)
|
||||
node.second.directory_monitor->updatePath();
|
||||
}
|
||||
renameInMemory(new_database_name, new_table_name);
|
||||
}
|
||||
|
||||
|
||||
@ -674,7 +676,7 @@ void registerStorageDistributed(StorageFactory & factory)
|
||||
}
|
||||
|
||||
return StorageDistributed::create(
|
||||
args.database_name, args.table_name, args.columns, args.constraints,
|
||||
args.table_id, args.columns, args.constraints,
|
||||
remote_database, remote_table, cluster_name,
|
||||
args.context, sharding_key, args.relative_data_path,
|
||||
args.attach);
|
||||
|
@ -37,7 +37,7 @@ public:
|
||||
~StorageDistributed() override;
|
||||
|
||||
static StoragePtr createWithOwnCluster(
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const String & remote_database_, /// database on remote servers.
|
||||
const String & remote_table_, /// The name of the table on the remote servers.
|
||||
@ -45,15 +45,13 @@ public:
|
||||
const Context & context_);
|
||||
|
||||
static StoragePtr createWithOwnCluster(
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
ASTPtr & remote_table_function_ptr_, /// Table function ptr.
|
||||
ClusterPtr & owned_cluster_,
|
||||
const Context & context_);
|
||||
|
||||
std::string getName() const override { return "Distributed"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
bool supportsSampling() const override { return true; }
|
||||
bool supportsFinal() const override { return true; }
|
||||
@ -117,8 +115,6 @@ public:
|
||||
|
||||
ActionLock getActionLock(StorageActionBlockType type) override;
|
||||
|
||||
String table_name;
|
||||
String database_name;
|
||||
String remote_database;
|
||||
String remote_table;
|
||||
ASTPtr remote_table_function_ptr;
|
||||
@ -161,8 +157,7 @@ public:
|
||||
|
||||
protected:
|
||||
StorageDistributed(
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const String & remote_database_,
|
||||
@ -174,8 +169,7 @@ protected:
|
||||
bool attach_);
|
||||
|
||||
StorageDistributed(
|
||||
const String & database_name,
|
||||
const String & table_name_,
|
||||
const StorageID & id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
ASTPtr remote_table_function_ptr_,
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Storages/StorageID.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -41,13 +42,10 @@ void StorageFactory::registerStorage(const std::string & name, Creator creator)
|
||||
StoragePtr StorageFactory::get(
|
||||
const ASTCreateQuery & query,
|
||||
const String & relative_data_path,
|
||||
const String & table_name,
|
||||
const String & database_name,
|
||||
Context & local_context,
|
||||
Context & context,
|
||||
const ColumnsDescription & columns,
|
||||
const ConstraintsDescription & constraints,
|
||||
bool attach,
|
||||
bool has_force_restore_data_flag) const
|
||||
{
|
||||
String name;
|
||||
@ -151,13 +149,12 @@ StoragePtr StorageFactory::get(
|
||||
.storage_def = storage_def,
|
||||
.query = query,
|
||||
.relative_data_path = relative_data_path,
|
||||
.table_name = table_name,
|
||||
.database_name = database_name,
|
||||
.table_id = StorageID(query.database, query.table, query.uuid),
|
||||
.local_context = local_context,
|
||||
.context = context,
|
||||
.columns = columns,
|
||||
.constraints = constraints,
|
||||
.attach = attach,
|
||||
.attach = query.attach,
|
||||
.has_force_restore_data_flag = has_force_restore_data_flag
|
||||
};
|
||||
|
||||
|
@ -15,6 +15,7 @@ namespace DB
|
||||
class Context;
|
||||
class ASTCreateQuery;
|
||||
class ASTStorage;
|
||||
struct StorageID;
|
||||
|
||||
|
||||
/** Allows to create a table by the name and parameters of the engine.
|
||||
@ -36,8 +37,7 @@ public:
|
||||
/// Path to table data.
|
||||
/// Relative to <path> from server config (possibly <path> of some <disk> of some <volume> for *MergeTree)
|
||||
const String & relative_data_path;
|
||||
const String & table_name;
|
||||
const String & database_name;
|
||||
const StorageID & table_id;
|
||||
Context & local_context;
|
||||
Context & context;
|
||||
const ColumnsDescription & columns;
|
||||
@ -51,13 +51,10 @@ public:
|
||||
StoragePtr get(
|
||||
const ASTCreateQuery & query,
|
||||
const String & relative_data_path,
|
||||
const String & table_name,
|
||||
const String & database_name,
|
||||
Context & local_context,
|
||||
Context & context,
|
||||
const ColumnsDescription & columns,
|
||||
const ConstraintsDescription & constraints,
|
||||
bool attach,
|
||||
bool has_force_restore_data_flag) const;
|
||||
|
||||
/// Register a table engine by its name.
|
||||
|
@ -182,9 +182,17 @@ StorageFile::StorageFile(const std::string & relative_table_dir_path, CommonArgu
|
||||
}
|
||||
|
||||
StorageFile::StorageFile(CommonArguments args)
|
||||
: IStorage(ColumnsDescription({{"_path", std::make_shared<DataTypeString>()}, {"_file", std::make_shared<DataTypeString>()}}, true))
|
||||
, table_name(args.table_name), database_name(args.database_name), format_name(args.format_name)
|
||||
, compression_method(args.compression_method), base_path(args.context.getPath())
|
||||
: IStorage(args.table_id,
|
||||
ColumnsDescription({
|
||||
{"_path", std::make_shared<DataTypeString>()},
|
||||
{"_file", std::make_shared<DataTypeString>()}
|
||||
},
|
||||
true /// all_virtuals
|
||||
)
|
||||
)
|
||||
, format_name(args.format_name)
|
||||
, compression_method(args.compression_method)
|
||||
, base_path(args.context.getPath())
|
||||
{
|
||||
if (args.format_name != "Distributed")
|
||||
setColumns(args.columns);
|
||||
@ -360,7 +368,7 @@ public:
|
||||
else
|
||||
{
|
||||
if (storage.paths.size() != 1)
|
||||
throw Exception("Table '" + storage.table_name + "' is in readonly mode because of globs in filepath", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
throw Exception("Table '" + storage.getStorageID().getNameForLogs() + "' is in readonly mode because of globs in filepath", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
write_buf = wrapWriteBufferWithCompressionMethod(
|
||||
std::make_unique<WriteBufferFromFile>(storage.paths[0], DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_APPEND | O_CREAT),
|
||||
compression_method, 3);
|
||||
@ -412,17 +420,17 @@ BlockOutputStreamPtr StorageFile::write(
|
||||
Strings StorageFile::getDataPaths() const
|
||||
{
|
||||
if (paths.empty())
|
||||
throw Exception("Table '" + table_name + "' is in readonly mode", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
throw Exception("Table '" + getStorageID().getNameForLogs() + "' is in readonly mode", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
return paths;
|
||||
}
|
||||
|
||||
void StorageFile::rename(const String & new_path_to_table_data, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &)
|
||||
{
|
||||
if (!is_db_table)
|
||||
throw Exception("Can't rename table '" + table_name + "' binded to user-defined file (or FD)", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
throw Exception("Can't rename table " + getStorageID().getNameForLogs() + " binded to user-defined file (or FD)", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
|
||||
if (paths.size() != 1)
|
||||
throw Exception("Can't rename table '" + table_name + "' in readonly mode", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
throw Exception("Can't rename table " + getStorageID().getNameForLogs() + " in readonly mode", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
@ -431,14 +439,13 @@ void StorageFile::rename(const String & new_path_to_table_data, const String & n
|
||||
Poco::File(paths[0]).renameTo(path_new);
|
||||
|
||||
paths[0] = std::move(path_new);
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
renameInMemory(new_database_name, new_table_name);
|
||||
}
|
||||
|
||||
void StorageFile::truncate(const ASTPtr & /*query*/, const Context & /* context */, TableStructureWriteLockHolder &)
|
||||
{
|
||||
if (paths.size() != 1)
|
||||
throw Exception("Can't truncate table '" + table_name + "' in readonly mode", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
throw Exception("Can't truncate table '" + getStorageID().getNameForLogs() + "' in readonly mode", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
@ -473,7 +480,7 @@ void registerStorageFile(StorageFactory & factory)
|
||||
String format_name = engine_args[0]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
|
||||
String compression_method;
|
||||
StorageFile::CommonArguments common_args{args.database_name, args.table_name, format_name, compression_method,
|
||||
StorageFile::CommonArguments common_args{args.table_id, format_name, compression_method,
|
||||
args.columns, args.constraints, args.context};
|
||||
|
||||
if (engine_args.size() == 1) /// Table in database
|
||||
|
@ -23,8 +23,6 @@ class StorageFile : public ext::shared_ptr_helper<StorageFile>, public IStorage
|
||||
friend struct ext::shared_ptr_helper<StorageFile>;
|
||||
public:
|
||||
std::string getName() const override { return "File"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -46,8 +44,7 @@ public:
|
||||
|
||||
struct CommonArguments
|
||||
{
|
||||
const std::string & database_name;
|
||||
const std::string & table_name;
|
||||
const StorageID & table_id;
|
||||
const std::string & format_name;
|
||||
const std::string & compression_method;
|
||||
const ColumnsDescription & columns;
|
||||
@ -71,8 +68,6 @@ protected:
|
||||
private:
|
||||
explicit StorageFile(CommonArguments args);
|
||||
|
||||
std::string table_name;
|
||||
std::string database_name;
|
||||
std::string format_name;
|
||||
|
||||
int table_fd = -1;
|
||||
|
@ -36,18 +36,22 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
StorageHDFS::StorageHDFS(const String & uri_,
|
||||
const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const String & format_name_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
Context & context_,
|
||||
const String & compression_method_ = "")
|
||||
: IStorage(ColumnsDescription({{"_path", std::make_shared<DataTypeString>()}, {"_file", std::make_shared<DataTypeString>()}}, true))
|
||||
: IStorage(table_id_,
|
||||
ColumnsDescription({
|
||||
{"_path", std::make_shared<DataTypeString>()},
|
||||
{"_file", std::make_shared<DataTypeString>()}
|
||||
},
|
||||
true /// all_virtuals
|
||||
)
|
||||
)
|
||||
, uri(uri_)
|
||||
, format_name(format_name_)
|
||||
, table_name(table_name_)
|
||||
, database_name(database_name_)
|
||||
, context(context_)
|
||||
, compression_method(compression_method_)
|
||||
{
|
||||
@ -259,12 +263,6 @@ BlockInputStreams StorageHDFS::read(
|
||||
return narrowBlockInputStreams(result, num_streams);
|
||||
}
|
||||
|
||||
void StorageHDFS::rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &)
|
||||
{
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
}
|
||||
|
||||
BlockOutputStreamPtr StorageHDFS::write(const ASTPtr & /*query*/, const Context & /*context*/)
|
||||
{
|
||||
return std::make_shared<HDFSBlockOutputStream>(uri,
|
||||
@ -299,7 +297,7 @@ void registerStorageHDFS(StorageFactory & factory)
|
||||
compression_method = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
} else compression_method = "auto";
|
||||
|
||||
return StorageHDFS::create(url, args.database_name, args.table_name, format_name, args.columns, args.constraints, args.context, compression_method);
|
||||
return StorageHDFS::create(url, args.table_id, format_name, args.columns, args.constraints, args.context, compression_method);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -18,8 +18,6 @@ class StorageHDFS : public ext::shared_ptr_helper<StorageHDFS>, public IStorage
|
||||
friend struct ext::shared_ptr_helper<StorageHDFS>;
|
||||
public:
|
||||
String getName() const override { return "HDFS"; }
|
||||
String getTableName() const override { return table_name; }
|
||||
String getDatabaseName() const override { return database_name; }
|
||||
|
||||
BlockInputStreams read(const Names & column_names,
|
||||
const SelectQueryInfo & query_info,
|
||||
@ -30,12 +28,9 @@ public:
|
||||
|
||||
BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override;
|
||||
|
||||
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override;
|
||||
|
||||
protected:
|
||||
StorageHDFS(const String & uri_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const String & format_name_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
@ -45,8 +40,6 @@ protected:
|
||||
private:
|
||||
String uri;
|
||||
String format_name;
|
||||
String table_name;
|
||||
String database_name;
|
||||
Context & context;
|
||||
String compression_method;
|
||||
|
||||
|
98
dbms/src/Storages/StorageID.h
Normal file
98
dbms/src/Storages/StorageID.h
Normal file
@ -0,0 +1,98 @@
|
||||
#pragma once
|
||||
#include <Core/Types.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <tuple>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
static constexpr char const * TABLE_WITH_UUID_NAME_PLACEHOLDER = "_";
|
||||
|
||||
struct StorageID
|
||||
{
|
||||
String database_name;
|
||||
String table_name;
|
||||
UUID uuid = UUID{UInt128(0, 0)};
|
||||
|
||||
|
||||
StorageID(const String & database, const String & table, UUID uuid_ = UUID{UInt128(0, 0)})
|
||||
: database_name(database), table_name(table), uuid(uuid_)
|
||||
{
|
||||
assertNotEmpty();
|
||||
}
|
||||
|
||||
String getDatabaseName() const
|
||||
{
|
||||
assertNotEmpty();
|
||||
return database_name;
|
||||
}
|
||||
|
||||
String getTableName() const
|
||||
{
|
||||
assertNotEmpty();
|
||||
return table_name;
|
||||
}
|
||||
|
||||
String getFullTableName() const
|
||||
{
|
||||
assertNotEmpty();
|
||||
return (database_name.empty() ? "" : database_name + ".") + table_name;
|
||||
}
|
||||
|
||||
String getNameForLogs() const
|
||||
{
|
||||
assertNotEmpty();
|
||||
return (database_name.empty() ? "" : backQuoteIfNeed(database_name) + ".") + backQuoteIfNeed(table_name)
|
||||
+ (hasUUID() ? " (UUID " + toString(uuid) + ")" : "");
|
||||
}
|
||||
|
||||
bool empty() const
|
||||
{
|
||||
return table_name.empty() && !hasUUID();
|
||||
}
|
||||
|
||||
bool hasUUID() const
|
||||
{
|
||||
return uuid != UUID{UInt128(0, 0)};
|
||||
}
|
||||
|
||||
bool operator<(const StorageID & rhs) const
|
||||
{
|
||||
assertNotEmpty();
|
||||
/// It's needed for ViewDependencies
|
||||
if (!hasUUID() && !rhs.hasUUID())
|
||||
/// If both IDs don't have UUID, compare them like pair of strings
|
||||
return std::tie(database_name, table_name) < std::tie(rhs.database_name, rhs.table_name);
|
||||
else if (hasUUID() && rhs.hasUUID())
|
||||
/// If both IDs have UUID, compare UUIDs and ignore database and table name
|
||||
return uuid < rhs.uuid;
|
||||
else
|
||||
/// All IDs without UUID are less, then all IDs with UUID
|
||||
return !hasUUID();
|
||||
}
|
||||
|
||||
void assertNotEmpty() const
|
||||
{
|
||||
if (empty())
|
||||
throw Exception("Both table name and UUID are empty", ErrorCodes::LOGICAL_ERROR);
|
||||
if (table_name == TABLE_WITH_UUID_NAME_PLACEHOLDER && !hasUUID())
|
||||
throw Exception("Table name was replaced with placeholder, but UUID is Nil", ErrorCodes::LOGICAL_ERROR);
|
||||
if (table_name.empty() && !database_name.empty())
|
||||
throw Exception("Table name is empty, but database name is not", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
/// Avoid implicit construction of empty StorageID. However, it's needed for deferred initialization.
|
||||
static StorageID createEmpty() { return {}; }
|
||||
|
||||
private:
|
||||
StorageID() = default;
|
||||
};
|
||||
|
||||
}
|
@ -15,8 +15,8 @@ namespace ErrorCodes
|
||||
extern const int INVALID_USAGE_OF_INPUT;
|
||||
}
|
||||
|
||||
StorageInput::StorageInput(const String &table_name_, const ColumnsDescription & columns_)
|
||||
: IStorage(columns_), table_name(table_name_)
|
||||
StorageInput::StorageInput(const String & table_name_, const ColumnsDescription & columns_)
|
||||
: IStorage({"", table_name_}, columns_)
|
||||
{
|
||||
setColumns(columns_);
|
||||
}
|
||||
|
@ -13,7 +13,6 @@ class StorageInput : public ext::shared_ptr_helper<StorageInput>, public IStorag
|
||||
friend struct ext::shared_ptr_helper<StorageInput>;
|
||||
public:
|
||||
String getName() const override { return "Input"; }
|
||||
String getTableName() const override { return table_name; }
|
||||
|
||||
/// A table will read from this stream.
|
||||
void setInputStream(BlockInputStreamPtr input_stream_);
|
||||
@ -27,7 +26,6 @@ public:
|
||||
unsigned num_streams) override;
|
||||
|
||||
private:
|
||||
String table_name;
|
||||
BlockInputStreamPtr input_stream;
|
||||
|
||||
protected:
|
||||
|
@ -30,8 +30,7 @@ namespace ErrorCodes
|
||||
|
||||
StorageJoin::StorageJoin(
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const Names & key_names_,
|
||||
bool use_nulls_,
|
||||
SizeLimits limits_,
|
||||
@ -41,7 +40,7 @@ StorageJoin::StorageJoin(
|
||||
const ConstraintsDescription & constraints_,
|
||||
bool overwrite,
|
||||
const Context & context_)
|
||||
: StorageSetOrJoinBase{relative_path_, database_name_, table_name_, columns_, constraints_, context_}
|
||||
: StorageSetOrJoinBase{relative_path_, table_id_, columns_, constraints_, context_}
|
||||
, key_names(key_names_)
|
||||
, use_nulls(use_nulls_)
|
||||
, limits(limits_)
|
||||
@ -72,11 +71,11 @@ void StorageJoin::truncate(const ASTPtr &, const Context &, TableStructureWriteL
|
||||
HashJoinPtr StorageJoin::getJoin(std::shared_ptr<AnalyzedJoin> analyzed_join) const
|
||||
{
|
||||
if (kind != analyzed_join->kind() || strictness != analyzed_join->strictness())
|
||||
throw Exception("Table " + backQuote(table_name) + " has incompatible type of JOIN.", ErrorCodes::INCOMPATIBLE_TYPE_OF_JOIN);
|
||||
throw Exception("Table " + getStorageID().getNameForLogs() + " has incompatible type of JOIN.", ErrorCodes::INCOMPATIBLE_TYPE_OF_JOIN);
|
||||
|
||||
if ((analyzed_join->forceNullableRight() && !use_nulls) ||
|
||||
(!analyzed_join->forceNullableRight() && isLeftOrFull(analyzed_join->kind()) && use_nulls))
|
||||
throw Exception("Table " + backQuote(table_name) + " needs the same join_use_nulls setting as present in LEFT or FULL JOIN.",
|
||||
throw Exception("Table " + getStorageID().getNameForLogs() + " needs the same join_use_nulls setting as present in LEFT or FULL JOIN.",
|
||||
ErrorCodes::INCOMPATIBLE_TYPE_OF_JOIN);
|
||||
|
||||
/// TODO: check key columns
|
||||
@ -200,8 +199,7 @@ void registerStorageJoin(StorageFactory & factory)
|
||||
|
||||
return StorageJoin::create(
|
||||
args.relative_data_path,
|
||||
args.database_name,
|
||||
args.table_name,
|
||||
args.table_id,
|
||||
key_names,
|
||||
join_use_nulls,
|
||||
SizeLimits{max_rows_in_join, max_bytes_in_join, join_overflow_mode},
|
||||
|
@ -62,8 +62,7 @@ private:
|
||||
protected:
|
||||
StorageJoin(
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const Names & key_names_,
|
||||
bool use_nulls_,
|
||||
SizeLimits limits_,
|
||||
|
@ -413,14 +413,15 @@ void LogBlockOutputStream::writeMarks(MarksForColumns && marks)
|
||||
StorageLog::StorageLog(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
size_t max_compress_block_size_)
|
||||
: disk(std::move(disk_)), table_path(relative_path_), database_name(database_name_), table_name(table_name_),
|
||||
max_compress_block_size(max_compress_block_size_),
|
||||
file_checker(disk, table_path + "sizes.json")
|
||||
: IStorage(table_id_)
|
||||
, disk(std::move(disk_))
|
||||
, table_path(relative_path_)
|
||||
, max_compress_block_size(max_compress_block_size_)
|
||||
, file_checker(disk, table_path + "sizes.json")
|
||||
{
|
||||
setColumns(columns_);
|
||||
setConstraints(constraints_);
|
||||
@ -512,14 +513,13 @@ void StorageLog::rename(const String & new_path_to_table_data, const String & ne
|
||||
disk->moveDirectory(table_path, new_path_to_table_data);
|
||||
|
||||
table_path = new_path_to_table_data;
|
||||
database_name = new_database_name;
|
||||
table_name = new_table_name;
|
||||
file_checker.setPath(table_path + "sizes.json");
|
||||
|
||||
for (auto & file : files)
|
||||
file.second.data_file_path = table_path + fileName(file.second.data_file_path);
|
||||
|
||||
marks_file_path = table_path + DBMS_STORAGE_LOG_MARKS_FILE_NAME;
|
||||
renameInMemory(new_database_name, new_table_name);
|
||||
}
|
||||
|
||||
void StorageLog::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &)
|
||||
@ -633,7 +633,7 @@ void registerStorageLog(StorageFactory & factory)
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
return StorageLog::create(
|
||||
args.context.getDefaultDisk(), args.relative_data_path, args.database_name, args.table_name, args.columns, args.constraints,
|
||||
args.context.getDefaultDisk(), args.relative_data_path, args.table_id, args.columns, args.constraints,
|
||||
args.context.getSettings().max_compress_block_size);
|
||||
});
|
||||
}
|
||||
|
@ -23,8 +23,6 @@ class StorageLog : public ext::shared_ptr_helper<StorageLog>, public IStorage
|
||||
|
||||
public:
|
||||
String getName() const override { return "Log"; }
|
||||
String getTableName() const override { return table_name; }
|
||||
String getDatabaseName() const override { return database_name; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -55,9 +53,8 @@ protected:
|
||||
*/
|
||||
StorageLog(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const std::string & relative_path_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
size_t max_compress_block_size_);
|
||||
@ -87,8 +84,6 @@ private:
|
||||
|
||||
DiskPtr disk;
|
||||
String table_path;
|
||||
String database_name;
|
||||
String table_name;
|
||||
|
||||
mutable std::shared_mutex rwlock;
|
||||
|
||||
@ -113,9 +108,6 @@ private:
|
||||
/// You can not call with a write locked `rwlock`.
|
||||
void loadMarks();
|
||||
|
||||
/// The order of adding files should not change: it corresponds to the order of the columns in the marks file.
|
||||
void addFile(const String & column_name, const IDataType & type, size_t level = 0);
|
||||
|
||||
/** For normal columns, the number of rows in the block is specified in the marks.
|
||||
* For array columns and nested structures, there are more than one group of marks that correspond to different files
|
||||
* - for elements (file name.bin) - the total number of array elements in the block is specified,
|
||||
|
@ -28,6 +28,7 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int INCORRECT_QUERY;
|
||||
extern const int QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW;
|
||||
extern const int QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW;
|
||||
}
|
||||
|
||||
static inline String generateInnerTableName(const String & table_name)
|
||||
@ -35,40 +36,35 @@ static inline String generateInnerTableName(const String & table_name)
|
||||
return ".inner." + table_name;
|
||||
}
|
||||
|
||||
static void extractDependentTable(ASTSelectQuery & query, String & select_database_name, String & select_table_name)
|
||||
static StorageID extractDependentTableFromSelectQuery(ASTSelectQuery & query, Context & context, bool add_default_db = true)
|
||||
{
|
||||
auto db_and_table = getDatabaseAndTable(query, 0);
|
||||
ASTPtr subquery = extractTableExpression(query, 0);
|
||||
|
||||
if (!db_and_table && !subquery)
|
||||
return;
|
||||
|
||||
if (db_and_table)
|
||||
if (add_default_db)
|
||||
{
|
||||
select_table_name = db_and_table->table;
|
||||
|
||||
if (db_and_table->database.empty())
|
||||
{
|
||||
db_and_table->database = select_database_name;
|
||||
AddDefaultDatabaseVisitor visitor(select_database_name);
|
||||
visitor.visit(query);
|
||||
}
|
||||
else
|
||||
select_database_name = db_and_table->database;
|
||||
AddDefaultDatabaseVisitor visitor(context.getCurrentDatabase(), nullptr);
|
||||
visitor.visit(query);
|
||||
}
|
||||
else if (auto * ast_select = subquery->as<ASTSelectWithUnionQuery>())
|
||||
|
||||
if (auto db_and_table = getDatabaseAndTable(query, 0))
|
||||
{
|
||||
return StorageID(db_and_table->database, db_and_table->table/*, db_and_table->uuid*/);
|
||||
}
|
||||
else if (auto subquery = extractTableExpression(query, 0))
|
||||
{
|
||||
auto * ast_select = subquery->as<ASTSelectWithUnionQuery>();
|
||||
if (!ast_select)
|
||||
throw Exception("Logical error while creating StorageMaterializedView. "
|
||||
"Could not retrieve table name from select query.",
|
||||
DB::ErrorCodes::LOGICAL_ERROR);
|
||||
if (ast_select->list_of_selects->children.size() != 1)
|
||||
throw Exception("UNION is not supported for MATERIALIZED VIEW", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW);
|
||||
throw Exception("UNION is not supported for MATERIALIZED VIEW",
|
||||
ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW);
|
||||
|
||||
auto & inner_query = ast_select->list_of_selects->children.at(0);
|
||||
|
||||
extractDependentTable(inner_query->as<ASTSelectQuery &>(), select_database_name, select_table_name);
|
||||
return extractDependentTableFromSelectQuery(inner_query->as<ASTSelectQuery &>(), context, false);
|
||||
}
|
||||
else
|
||||
throw Exception("Logical error while creating StorageMaterializedView."
|
||||
" Could not retrieve table name from select query.",
|
||||
DB::ErrorCodes::LOGICAL_ERROR);
|
||||
return StorageID::createEmpty();
|
||||
}
|
||||
|
||||
|
||||
@ -94,61 +90,47 @@ static void checkAllowedQueries(const ASTSelectQuery & query)
|
||||
|
||||
|
||||
StorageMaterializedView::StorageMaterializedView(
|
||||
const String & table_name_,
|
||||
const String & database_name_,
|
||||
const StorageID & table_id_,
|
||||
Context & local_context,
|
||||
const ASTCreateQuery & query,
|
||||
const ColumnsDescription & columns_,
|
||||
bool attach_)
|
||||
: table_name(table_name_),
|
||||
database_name(database_name_), global_context(local_context.getGlobalContext())
|
||||
: IStorage(table_id_), global_context(local_context.getGlobalContext())
|
||||
{
|
||||
setColumns(columns_);
|
||||
|
||||
if (!query.select)
|
||||
throw Exception("SELECT query is not specified for " + getName(), ErrorCodes::INCORRECT_QUERY);
|
||||
|
||||
if (!query.storage && query.to_table.empty())
|
||||
/// If the destination table is not set, use inner table
|
||||
has_inner_table = query.to_table.empty();
|
||||
if (has_inner_table && !query.storage)
|
||||
throw Exception(
|
||||
"You must specify where to save results of a MaterializedView query: either ENGINE or an existing table in a TO clause",
|
||||
ErrorCodes::INCORRECT_QUERY);
|
||||
|
||||
/// Default value, if only table name exist in the query
|
||||
select_database_name = local_context.getCurrentDatabase();
|
||||
if (query.select->list_of_selects->children.size() != 1)
|
||||
throw Exception("UNION is not supported for MATERIALIZED VIEW", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW);
|
||||
|
||||
inner_query = query.select->list_of_selects->children.at(0);
|
||||
|
||||
auto & select_query = inner_query->as<ASTSelectQuery &>();
|
||||
extractDependentTable(select_query, select_database_name, select_table_name);
|
||||
select_table_id = extractDependentTableFromSelectQuery(select_query, local_context);
|
||||
checkAllowedQueries(select_query);
|
||||
|
||||
if (!select_table_name.empty())
|
||||
global_context.addDependency(
|
||||
DatabaseAndTableName(select_database_name, select_table_name),
|
||||
DatabaseAndTableName(database_name, table_name));
|
||||
|
||||
// If the destination table is not set, use inner table
|
||||
if (!query.to_table.empty())
|
||||
if (!has_inner_table)
|
||||
target_table_id = StorageID(query.to_database, query.to_table);
|
||||
else if (attach_)
|
||||
{
|
||||
target_database_name = query.to_database;
|
||||
target_table_name = query.to_table;
|
||||
/// If there is an ATTACH request, then the internal table must already be created.
|
||||
target_table_id = StorageID(table_id_.database_name, generateInnerTableName(table_id_.table_name));
|
||||
}
|
||||
else
|
||||
{
|
||||
target_database_name = database_name;
|
||||
target_table_name = generateInnerTableName(table_name);
|
||||
has_inner_table = true;
|
||||
}
|
||||
|
||||
/// If there is an ATTACH request, then the internal table must already be connected.
|
||||
if (!attach_ && has_inner_table)
|
||||
{
|
||||
/// We will create a query to create an internal table.
|
||||
auto manual_create_query = std::make_shared<ASTCreateQuery>();
|
||||
manual_create_query->database = target_database_name;
|
||||
manual_create_query->table = target_table_name;
|
||||
manual_create_query->database = table_id_.database_name;
|
||||
manual_create_query->table = generateInnerTableName(table_id_.table_name);
|
||||
|
||||
auto new_columns_list = std::make_shared<ASTColumns>();
|
||||
new_columns_list->set(new_columns_list->columns, query.columns_list->columns->ptr());
|
||||
@ -156,24 +138,15 @@ StorageMaterializedView::StorageMaterializedView(
|
||||
manual_create_query->set(manual_create_query->columns_list, new_columns_list);
|
||||
manual_create_query->set(manual_create_query->storage, query.storage->ptr());
|
||||
|
||||
/// Execute the query.
|
||||
try
|
||||
{
|
||||
InterpreterCreateQuery create_interpreter(manual_create_query, local_context);
|
||||
create_interpreter.setInternal(true);
|
||||
create_interpreter.execute();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// In case of any error we should remove dependency to the view.
|
||||
if (!select_table_name.empty())
|
||||
global_context.removeDependency(
|
||||
DatabaseAndTableName(select_database_name, select_table_name),
|
||||
DatabaseAndTableName(database_name, table_name));
|
||||
InterpreterCreateQuery create_interpreter(manual_create_query, local_context);
|
||||
create_interpreter.setInternal(true);
|
||||
create_interpreter.execute();
|
||||
|
||||
throw;
|
||||
}
|
||||
target_table_id = global_context.getTable(manual_create_query->database, manual_create_query->table)->getStorageID();
|
||||
}
|
||||
|
||||
if (!select_table_id.empty())
|
||||
global_context.addDependency(select_table_id, table_id_);
|
||||
}
|
||||
|
||||
NameAndTypePair StorageMaterializedView::getColumn(const String & column_name) const
|
||||
@ -220,14 +193,14 @@ BlockOutputStreamPtr StorageMaterializedView::write(const ASTPtr & query, const
|
||||
}
|
||||
|
||||
|
||||
static void executeDropQuery(ASTDropQuery::Kind kind, Context & global_context, const String & target_database_name, const String & target_table_name)
|
||||
static void executeDropQuery(ASTDropQuery::Kind kind, Context & global_context, const StorageID & target_table_id)
|
||||
{
|
||||
if (global_context.tryGetTable(target_database_name, target_table_name))
|
||||
if (global_context.tryGetTable(target_table_id))
|
||||
{
|
||||
/// We create and execute `drop` query for internal table.
|
||||
auto drop_query = std::make_shared<ASTDropQuery>();
|
||||
drop_query->database = target_database_name;
|
||||
drop_query->table = target_table_name;
|
||||
drop_query->database = target_table_id.database_name;
|
||||
drop_query->table = target_table_id.table_name;
|
||||
drop_query->kind = kind;
|
||||
ASTPtr ast_drop_query = drop_query;
|
||||
InterpreterDropQuery drop_interpreter(ast_drop_query, global_context);
|
||||
@ -238,25 +211,25 @@ static void executeDropQuery(ASTDropQuery::Kind kind, Context & global_context,
|
||||
|
||||
void StorageMaterializedView::drop(TableStructureWriteLockHolder &)
|
||||
{
|
||||
global_context.removeDependency(
|
||||
DatabaseAndTableName(select_database_name, select_table_name),
|
||||
DatabaseAndTableName(database_name, table_name));
|
||||
auto table_id = getStorageID();
|
||||
if (!select_table_id.empty())
|
||||
global_context.removeDependency(select_table_id, table_id);
|
||||
|
||||
if (has_inner_table && tryGetTargetTable())
|
||||
executeDropQuery(ASTDropQuery::Kind::Drop, global_context, target_database_name, target_table_name);
|
||||
executeDropQuery(ASTDropQuery::Kind::Drop, global_context, target_table_id);
|
||||
}
|
||||
|
||||
void StorageMaterializedView::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &)
|
||||
{
|
||||
if (has_inner_table)
|
||||
executeDropQuery(ASTDropQuery::Kind::Truncate, global_context, target_database_name, target_table_name);
|
||||
executeDropQuery(ASTDropQuery::Kind::Truncate, global_context, target_table_id);
|
||||
}
|
||||
|
||||
void StorageMaterializedView::checkStatementCanBeForwarded() const
|
||||
{
|
||||
if (!has_inner_table)
|
||||
throw Exception(
|
||||
"MATERIALIZED VIEW targets existing table " + target_database_name + "." + target_table_name + ". "
|
||||
"MATERIALIZED VIEW targets existing table " + target_table_id.getNameForLogs() + ". "
|
||||
+ "Execute the statement directly on it.", ErrorCodes::INCORRECT_QUERY);
|
||||
}
|
||||
|
||||
@ -278,71 +251,54 @@ void StorageMaterializedView::mutate(const MutationCommands & commands, const Co
|
||||
getTargetTable()->mutate(commands, context);
|
||||
}
|
||||
|
||||
static void executeRenameQuery(Context & global_context, const String & database_name, const String & table_original_name, const String & new_table_name)
|
||||
{
|
||||
if (global_context.tryGetTable(database_name, table_original_name))
|
||||
{
|
||||
auto rename = std::make_shared<ASTRenameQuery>();
|
||||
|
||||
ASTRenameQuery::Table from;
|
||||
from.database = database_name;
|
||||
from.table = table_original_name;
|
||||
|
||||
ASTRenameQuery::Table to;
|
||||
to.database = database_name;
|
||||
to.table = new_table_name;
|
||||
|
||||
ASTRenameQuery::Element elem;
|
||||
elem.from = from;
|
||||
elem.to = to;
|
||||
|
||||
rename->elements.emplace_back(elem);
|
||||
|
||||
InterpreterRenameQuery(rename, global_context).execute();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void StorageMaterializedView::rename(
|
||||
const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &)
|
||||
{
|
||||
if (has_inner_table && tryGetTargetTable())
|
||||
{
|
||||
String new_target_table_name = generateInnerTableName(new_table_name);
|
||||
executeRenameQuery(global_context, target_database_name, target_table_name, new_target_table_name);
|
||||
target_table_name = new_target_table_name;
|
||||
auto new_target_table_name = generateInnerTableName(new_table_name);
|
||||
auto rename = std::make_shared<ASTRenameQuery>();
|
||||
|
||||
ASTRenameQuery::Table from;
|
||||
from.database = target_table_id.database_name;
|
||||
from.table = target_table_id.table_name;
|
||||
|
||||
ASTRenameQuery::Table to;
|
||||
to.database = target_table_id.database_name;
|
||||
to.table = new_target_table_name;
|
||||
|
||||
ASTRenameQuery::Element elem;
|
||||
elem.from = from;
|
||||
elem.to = to;
|
||||
rename->elements.emplace_back(elem);
|
||||
|
||||
InterpreterRenameQuery(rename, global_context).execute();
|
||||
target_table_id.table_name = new_target_table_name;
|
||||
}
|
||||
|
||||
auto lock = global_context.getLock();
|
||||
|
||||
global_context.removeDependencyUnsafe(
|
||||
DatabaseAndTableName(select_database_name, select_table_name),
|
||||
DatabaseAndTableName(database_name, table_name));
|
||||
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
|
||||
global_context.addDependencyUnsafe(
|
||||
DatabaseAndTableName(select_database_name, select_table_name),
|
||||
DatabaseAndTableName(database_name, table_name));
|
||||
if (!select_table_id.empty())
|
||||
global_context.removeDependencyUnsafe(select_table_id, getStorageID());
|
||||
IStorage::renameInMemory(new_database_name, new_table_name);
|
||||
if (!select_table_id.empty())
|
||||
global_context.addDependencyUnsafe(select_table_id, getStorageID());
|
||||
}
|
||||
|
||||
void StorageMaterializedView::shutdown()
|
||||
{
|
||||
/// Make sure the dependency is removed after DETACH TABLE
|
||||
global_context.removeDependency(
|
||||
DatabaseAndTableName(select_database_name, select_table_name),
|
||||
DatabaseAndTableName(database_name, table_name));
|
||||
if (!select_table_id.empty())
|
||||
global_context.removeDependency(select_table_id, getStorageID());
|
||||
}
|
||||
|
||||
StoragePtr StorageMaterializedView::getTargetTable() const
|
||||
{
|
||||
return global_context.getTable(target_database_name, target_table_name);
|
||||
return global_context.getTable(target_table_id);
|
||||
}
|
||||
|
||||
StoragePtr StorageMaterializedView::tryGetTargetTable() const
|
||||
{
|
||||
return global_context.tryGetTable(target_database_name, target_table_name);
|
||||
return global_context.tryGetTable(target_table_id);
|
||||
}
|
||||
|
||||
Strings StorageMaterializedView::getDataPaths() const
|
||||
@ -389,7 +345,7 @@ void registerStorageMaterializedView(StorageFactory & factory)
|
||||
{
|
||||
/// Pass local_context here to convey setting for inner table
|
||||
return StorageMaterializedView::create(
|
||||
args.table_name, args.database_name, args.local_context, args.query,
|
||||
args.table_id, args.local_context, args.query,
|
||||
args.columns, args.attach);
|
||||
});
|
||||
}
|
||||
|
@ -14,8 +14,6 @@ class StorageMaterializedView : public ext::shared_ptr_helper<StorageMaterialize
|
||||
friend struct ext::shared_ptr_helper<StorageMaterializedView>;
|
||||
public:
|
||||
std::string getName() const override { return "MaterializedView"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
ASTPtr getInnerQuery() const { return inner_query->clone(); }
|
||||
|
||||
@ -68,12 +66,11 @@ public:
|
||||
Strings getDataPaths() const override;
|
||||
|
||||
private:
|
||||
String select_database_name;
|
||||
String select_table_name;
|
||||
String target_database_name;
|
||||
String target_table_name;
|
||||
String table_name;
|
||||
String database_name;
|
||||
/// Can be empty if SELECT query doesn't contain table
|
||||
StorageID select_table_id = StorageID::createEmpty();
|
||||
/// Will be initialized in constructor
|
||||
StorageID target_table_id = StorageID::createEmpty();
|
||||
|
||||
ASTPtr inner_query;
|
||||
Context & global_context;
|
||||
bool has_inner_table = false;
|
||||
@ -82,8 +79,7 @@ private:
|
||||
|
||||
protected:
|
||||
StorageMaterializedView(
|
||||
const String & table_name_,
|
||||
const String & database_name_,
|
||||
const StorageID & table_id_,
|
||||
Context & local_context,
|
||||
const ASTCreateQuery & query,
|
||||
const ColumnsDescription & columns_,
|
||||
|
@ -74,8 +74,8 @@ private:
|
||||
};
|
||||
|
||||
|
||||
StorageMemory::StorageMemory(String database_name_, String table_name_, ColumnsDescription columns_description_, ConstraintsDescription constraints_)
|
||||
: database_name(std::move(database_name_)), table_name(std::move(table_name_))
|
||||
StorageMemory::StorageMemory(const StorageID & table_id_, ColumnsDescription columns_description_, ConstraintsDescription constraints_)
|
||||
: IStorage(table_id_)
|
||||
{
|
||||
setColumns(std::move(columns_description_));
|
||||
setConstraints(std::move(constraints_));
|
||||
@ -145,7 +145,7 @@ void registerStorageMemory(StorageFactory & factory)
|
||||
"Engine " + args.engine_name + " doesn't support any arguments (" + toString(args.engine_args.size()) + " given)",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
return StorageMemory::create(args.database_name, args.table_name, args.columns, args.constraints);
|
||||
return StorageMemory::create(args.table_id, args.columns, args.constraints);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -25,8 +25,6 @@ friend struct ext::shared_ptr_helper<StorageMemory>;
|
||||
|
||||
public:
|
||||
String getName() const override { return "Memory"; }
|
||||
String getTableName() const override { return table_name; }
|
||||
String getDatabaseName() const override { return database_name; }
|
||||
|
||||
size_t getSize() const { return data.size(); }
|
||||
|
||||
@ -44,23 +42,14 @@ public:
|
||||
|
||||
void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override;
|
||||
|
||||
void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override
|
||||
{
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
}
|
||||
|
||||
private:
|
||||
String database_name;
|
||||
String table_name;
|
||||
|
||||
/// The data itself. `list` - so that when inserted to the end, the existing iterators are not invalidated.
|
||||
BlocksList data;
|
||||
|
||||
std::mutex mutex;
|
||||
|
||||
protected:
|
||||
StorageMemory(String database_name_, String table_name_, ColumnsDescription columns_description_, ConstraintsDescription constraints_);
|
||||
StorageMemory(const StorageID & table_id_, ColumnsDescription columns_description_, ConstraintsDescription constraints_);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -7,19 +7,16 @@
|
||||
#include <DataStreams/ConcatBlockInputStream.h>
|
||||
#include <DataStreams/materializeBlock.h>
|
||||
#include <DataStreams/MaterializingBlockInputStream.h>
|
||||
#include <DataStreams/FilterBlockInputStream.h>
|
||||
#include <Storages/StorageMerge.h>
|
||||
#include <Storages/StorageFactory.h>
|
||||
#include <Storages/VirtualColumnUtils.h>
|
||||
#include <Storages/AlterCommands.h>
|
||||
#include <Interpreters/InterpreterAlterQuery.h>
|
||||
#include <Interpreters/SyntaxAnalyzer.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <Interpreters/InterpreterSelectQuery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
@ -27,7 +24,6 @@
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <ext/range.h>
|
||||
#include <algorithm>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
|
||||
|
||||
@ -37,7 +33,6 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_PREWHERE;
|
||||
extern const int INCOMPATIBLE_SOURCE_TABLES;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int NO_SUCH_COLUMN_IN_TABLE;
|
||||
extern const int BLOCKS_HAVE_DIFFERENT_STRUCTURE;
|
||||
@ -46,15 +41,12 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
StorageMerge::StorageMerge(
|
||||
const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const String & source_database_,
|
||||
const String & table_name_regexp_,
|
||||
const Context & context_)
|
||||
: IStorage(ColumnsDescription({{"_table", std::make_shared<DataTypeString>()}}, true))
|
||||
, table_name(table_name_)
|
||||
, database_name(database_name_)
|
||||
: IStorage(table_id_, ColumnsDescription({{"_table", std::make_shared<DataTypeString>()}}, true))
|
||||
, source_database(source_database_)
|
||||
, table_name_regexp(table_name_regexp_)
|
||||
, global_context(context_)
|
||||
@ -125,7 +117,7 @@ bool StorageMerge::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, cons
|
||||
size_t i = 0;
|
||||
for (const auto & table : selected_tables)
|
||||
{
|
||||
if (table.first->mayBenefitFromIndexForIn(left_in_operand, query_context))
|
||||
if (std::get<0>(table)->mayBenefitFromIndexForIn(left_in_operand, query_context))
|
||||
return true;
|
||||
|
||||
++i;
|
||||
@ -197,12 +189,12 @@ BlockInputStreams StorageMerge::read(
|
||||
* This is necessary to correctly pass the recommended number of threads to each table.
|
||||
*/
|
||||
StorageListWithLocks selected_tables = getSelectedTables(
|
||||
query_info.query, has_table_virtual_column, true, context.getCurrentQueryId());
|
||||
query_info.query, has_table_virtual_column, context.getCurrentQueryId());
|
||||
|
||||
if (selected_tables.empty())
|
||||
/// FIXME: do we support sampling in this case?
|
||||
return createSourceStreams(
|
||||
query_info, processed_stage, max_block_size, header, {}, {}, real_column_names, modified_context, 0, has_table_virtual_column);
|
||||
query_info, processed_stage, max_block_size, header, {}, real_column_names, modified_context, 0, has_table_virtual_column);
|
||||
|
||||
size_t tables_count = selected_tables.size();
|
||||
Float64 num_streams_multiplier = std::min(unsigned(tables_count), std::max(1U, unsigned(context.getSettingsRef().max_streams_multiplier_for_merge_tables)));
|
||||
@ -214,7 +206,7 @@ BlockInputStreams StorageMerge::read(
|
||||
{
|
||||
for (auto it = selected_tables.begin(); it != selected_tables.end(); ++it)
|
||||
{
|
||||
auto current_info = query_info.order_by_optimizer->getInputOrder(it->first);
|
||||
auto current_info = query_info.order_by_optimizer->getInputOrder(std::get<0>(*it));
|
||||
if (it == selected_tables.begin())
|
||||
input_sorting_info = current_info;
|
||||
else if (!current_info || (input_sorting_info && *current_info != *input_sorting_info))
|
||||
@ -234,8 +226,7 @@ BlockInputStreams StorageMerge::read(
|
||||
remaining_streams -= current_streams;
|
||||
current_streams = std::max(size_t(1), current_streams);
|
||||
|
||||
StoragePtr storage = it->first;
|
||||
TableStructureReadLockHolder struct_lock = it->second;
|
||||
auto & storage = std::get<0>(*it);
|
||||
|
||||
/// If sampling requested, then check that table supports it.
|
||||
if (query_info.query->as<ASTSelectQuery>()->sample_size() && !storage->supportsSampling())
|
||||
@ -246,8 +237,8 @@ BlockInputStreams StorageMerge::read(
|
||||
if (current_streams)
|
||||
{
|
||||
source_streams = createSourceStreams(
|
||||
query_info, processed_stage, max_block_size, header, storage,
|
||||
struct_lock, real_column_names, modified_context, current_streams, has_table_virtual_column);
|
||||
query_info, processed_stage, max_block_size, header, *it, real_column_names, modified_context,
|
||||
current_streams, has_table_virtual_column);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -255,7 +246,7 @@ BlockInputStreams StorageMerge::read(
|
||||
header, [=, this]() mutable -> BlockInputStreamPtr
|
||||
{
|
||||
BlockInputStreams streams = createSourceStreams(query_info, processed_stage, max_block_size,
|
||||
header, storage, struct_lock, real_column_names,
|
||||
header, *it, real_column_names,
|
||||
modified_context, current_streams, has_table_virtual_column, true);
|
||||
|
||||
if (!streams.empty() && streams.size() != 1)
|
||||
@ -276,15 +267,16 @@ BlockInputStreams StorageMerge::read(
|
||||
}
|
||||
|
||||
BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & query_info, const QueryProcessingStage::Enum & processed_stage,
|
||||
const UInt64 max_block_size, const Block & header, const StoragePtr & storage,
|
||||
const TableStructureReadLockHolder & struct_lock, Names & real_column_names,
|
||||
const UInt64 max_block_size, const Block & header, const StorageWithLockAndName & storage_with_lock,
|
||||
Names & real_column_names,
|
||||
Context & modified_context, size_t streams_num, bool has_table_virtual_column,
|
||||
bool concat_streams)
|
||||
{
|
||||
auto & [storage, struct_lock, table_name] = storage_with_lock;
|
||||
SelectQueryInfo modified_query_info = query_info;
|
||||
modified_query_info.query = query_info.query->clone();
|
||||
|
||||
VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_table", storage ? storage->getTableName() : "");
|
||||
VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_table", table_name);
|
||||
|
||||
if (!storage)
|
||||
return BlockInputStreams{
|
||||
@ -296,7 +288,7 @@ BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & quer
|
||||
if (processed_stage <= storage->getQueryProcessingStage(modified_context))
|
||||
{
|
||||
/// If there are only virtual columns in query, you must request at least one other column.
|
||||
if (real_column_names.size() ==0)
|
||||
if (real_column_names.empty())
|
||||
real_column_names.push_back(ExpressionActions::getSmallestColumn(storage->getColumns().getAllPhysical()));
|
||||
|
||||
source_streams = storage->read(real_column_names, modified_query_info, modified_context, processed_stage, max_block_size,
|
||||
@ -304,7 +296,7 @@ BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & quer
|
||||
}
|
||||
else if (processed_stage > storage->getQueryProcessingStage(modified_context))
|
||||
{
|
||||
modified_query_info.query->as<ASTSelectQuery>()->replaceDatabaseAndTable(source_database, storage->getTableName());
|
||||
modified_query_info.query->as<ASTSelectQuery>()->replaceDatabaseAndTable(source_database, table_name);
|
||||
|
||||
/// Maximum permissible parallelism is streams_num
|
||||
modified_context.getSettingsRef().max_threads = UInt64(streams_num);
|
||||
@ -335,7 +327,7 @@ BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & quer
|
||||
{
|
||||
if (has_table_virtual_column)
|
||||
source_stream = std::make_shared<AddingConstColumnBlockInputStream<String>>(
|
||||
source_stream, std::make_shared<DataTypeString>(), storage->getTableName(), "_table");
|
||||
source_stream, std::make_shared<DataTypeString>(), table_name, "_table");
|
||||
|
||||
/// Subordinary tables could have different but convertible types, like numeric types of different width.
|
||||
/// We must return streams with structure equals to structure of Merge table.
|
||||
@ -358,7 +350,7 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String
|
||||
{
|
||||
auto & table = iterator->table();
|
||||
if (table.get() != this)
|
||||
selected_tables.emplace_back(table, table->lockStructureForShare(false, query_id));
|
||||
selected_tables.emplace_back(table, table->lockStructureForShare(false, query_id), iterator->name());
|
||||
|
||||
iterator->next();
|
||||
}
|
||||
@ -367,7 +359,7 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String
|
||||
}
|
||||
|
||||
|
||||
StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr & query, bool has_virtual_column, bool get_lock, const String & query_id) const
|
||||
StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr & query, bool has_virtual_column, const String & query_id) const
|
||||
{
|
||||
StorageListWithLocks selected_tables;
|
||||
DatabaseTablesIteratorPtr iterator = getDatabaseIterator(global_context);
|
||||
@ -383,8 +375,8 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr
|
||||
|
||||
if (storage.get() != this)
|
||||
{
|
||||
selected_tables.emplace_back(storage, get_lock ? storage->lockStructureForShare(false, query_id) : TableStructureReadLockHolder{});
|
||||
virtual_column->insert(storage->getTableName());
|
||||
selected_tables.emplace_back(storage, storage->lockStructureForShare(false, query_id), iterator->name());
|
||||
virtual_column->insert(iterator->name());
|
||||
}
|
||||
|
||||
iterator->next();
|
||||
@ -397,7 +389,7 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr
|
||||
auto values = VirtualColumnUtils::extractSingleValueFromBlock<String>(virtual_columns_block, "_table");
|
||||
|
||||
/// Remove unused tables from the list
|
||||
selected_tables.remove_if([&] (const auto & elem) { return values.find(elem.first->getTableName()) == values.end(); });
|
||||
selected_tables.remove_if([&] (const auto & elem) { return values.find(std::get<2>(elem)) == values.end(); });
|
||||
}
|
||||
|
||||
return selected_tables;
|
||||
@ -429,10 +421,11 @@ void StorageMerge::alter(
|
||||
const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder)
|
||||
{
|
||||
lockStructureExclusively(table_lock_holder, context.getCurrentQueryId());
|
||||
auto table_id = getStorageID();
|
||||
|
||||
StorageInMemoryMetadata storage_metadata = getInMemoryMetadata();
|
||||
params.apply(storage_metadata);
|
||||
context.getDatabase(database_name)->alterTable(context, table_name, storage_metadata);
|
||||
context.getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, storage_metadata);
|
||||
setColumns(storage_metadata.columns);
|
||||
}
|
||||
|
||||
@ -523,7 +516,7 @@ void registerStorageMerge(StorageFactory & factory)
|
||||
String table_name_regexp = engine_args[1]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
|
||||
return StorageMerge::create(
|
||||
args.database_name, args.table_name, args.columns,
|
||||
args.table_id, args.columns,
|
||||
source_database, table_name_regexp, args.context);
|
||||
});
|
||||
}
|
||||
|
@ -17,8 +17,6 @@ class StorageMerge : public ext::shared_ptr_helper<StorageMerge>, public IStorag
|
||||
friend struct ext::shared_ptr_helper<StorageMerge>;
|
||||
public:
|
||||
std::string getName() const override { return "Merge"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
bool isRemote() const override;
|
||||
|
||||
@ -42,13 +40,6 @@ public:
|
||||
size_t max_block_size,
|
||||
unsigned num_streams) override;
|
||||
|
||||
void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override
|
||||
{
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
}
|
||||
|
||||
|
||||
void checkAlterIsPossible(const AlterCommands & commands, const Settings & /* settings */) override;
|
||||
|
||||
/// you need to add and remove columns in the sub-tables manually
|
||||
@ -58,17 +49,16 @@ public:
|
||||
bool mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, const Context & query_context) const override;
|
||||
|
||||
private:
|
||||
String table_name;
|
||||
String database_name;
|
||||
String source_database;
|
||||
OptimizedRegularExpression table_name_regexp;
|
||||
Context global_context;
|
||||
|
||||
using StorageListWithLocks = std::list<std::pair<StoragePtr, TableStructureReadLockHolder>>;
|
||||
using StorageWithLockAndName = std::tuple<StoragePtr, TableStructureReadLockHolder, String>;
|
||||
using StorageListWithLocks = std::list<StorageWithLockAndName>;
|
||||
|
||||
StorageListWithLocks getSelectedTables(const String & query_id) const;
|
||||
|
||||
StorageMerge::StorageListWithLocks getSelectedTables(const ASTPtr & query, bool has_virtual_column, bool get_lock, const String & query_id) const;
|
||||
StorageMerge::StorageListWithLocks getSelectedTables(const ASTPtr & query, bool has_virtual_column, const String & query_id) const;
|
||||
|
||||
template <typename F>
|
||||
StoragePtr getFirstTable(F && predicate) const;
|
||||
@ -77,8 +67,7 @@ private:
|
||||
|
||||
protected:
|
||||
StorageMerge(
|
||||
const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const String & source_database_,
|
||||
const String & table_name_regexp_,
|
||||
@ -88,8 +77,8 @@ protected:
|
||||
const Context & context, QueryProcessingStage::Enum processed_stage);
|
||||
|
||||
BlockInputStreams createSourceStreams(const SelectQueryInfo & query_info, const QueryProcessingStage::Enum & processed_stage,
|
||||
const UInt64 max_block_size, const Block & header, const StoragePtr & storage,
|
||||
const TableStructureReadLockHolder & struct_lock, Names & real_column_names,
|
||||
const UInt64 max_block_size, const Block & header, const StorageWithLockAndName & storage_with_lock,
|
||||
Names & real_column_names,
|
||||
Context & modified_context, size_t streams_num, bool has_table_virtual_column,
|
||||
bool concat_streams = false);
|
||||
|
||||
|
@ -53,8 +53,7 @@ namespace ActionLocks
|
||||
|
||||
|
||||
StorageMergeTree::StorageMergeTree(
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const String & relative_data_path_,
|
||||
const StorageInMemoryMetadata & metadata,
|
||||
bool attach,
|
||||
@ -64,15 +63,14 @@ StorageMergeTree::StorageMergeTree(
|
||||
std::unique_ptr<MergeTreeSettings> storage_settings_,
|
||||
bool has_force_restore_data_flag)
|
||||
: MergeTreeData(
|
||||
database_name_,
|
||||
table_name_,
|
||||
table_id_,
|
||||
relative_data_path_,
|
||||
metadata,
|
||||
context_,
|
||||
date_column_name,
|
||||
merging_params_,
|
||||
std::move(storage_settings_),
|
||||
false,
|
||||
false, /// require_part_metadata
|
||||
attach)
|
||||
, reader(*this)
|
||||
, writer(*this)
|
||||
@ -150,12 +148,14 @@ BlockOutputStreamPtr StorageMergeTree::write(const ASTPtr & /*query*/, const Con
|
||||
|
||||
void StorageMergeTree::checkTableCanBeDropped() const
|
||||
{
|
||||
auto table_id = getStorageID();
|
||||
const_cast<StorageMergeTree &>(*this).recalculateColumnSizes();
|
||||
global_context.checkTableCanBeDropped(database_name, table_name, getTotalActiveSizeInBytes());
|
||||
global_context.checkTableCanBeDropped(table_id.database_name, table_id.table_name, getTotalActiveSizeInBytes());
|
||||
}
|
||||
|
||||
void StorageMergeTree::checkPartitionCanBeDropped(const ASTPtr & partition)
|
||||
{
|
||||
auto table_id = getStorageID();
|
||||
const_cast<StorageMergeTree &>(*this).recalculateColumnSizes();
|
||||
|
||||
const String partition_id = getPartitionIDFromQuery(partition, global_context);
|
||||
@ -167,7 +167,7 @@ void StorageMergeTree::checkPartitionCanBeDropped(const ASTPtr & partition)
|
||||
{
|
||||
partition_size += part->bytes_on_disk;
|
||||
}
|
||||
global_context.checkPartitionCanBeDropped(database_name, table_name, partition_size);
|
||||
global_context.checkPartitionCanBeDropped(table_id.database_name, table_id.table_name, partition_size);
|
||||
}
|
||||
|
||||
void StorageMergeTree::drop(TableStructureWriteLockHolder &)
|
||||
@ -248,8 +248,7 @@ void StorageMergeTree::alter(
|
||||
const Context & context,
|
||||
TableStructureWriteLockHolder & table_lock_holder)
|
||||
{
|
||||
const String current_database_name = getDatabaseName();
|
||||
const String current_table_name = getTableName();
|
||||
auto table_id = getStorageID();
|
||||
|
||||
lockNewDataStructureExclusively(table_lock_holder, context.getCurrentQueryId());
|
||||
|
||||
@ -273,7 +272,7 @@ void StorageMergeTree::alter(
|
||||
{
|
||||
lockStructureExclusively(table_lock_holder, context.getCurrentQueryId());
|
||||
|
||||
context.getDatabase(current_database_name)->alterTable(context, current_table_name, metadata);
|
||||
context.getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata);
|
||||
|
||||
update_metadata();
|
||||
}
|
||||
@ -290,7 +289,7 @@ void StorageMergeTree::alter(
|
||||
|
||||
lockStructureExclusively(table_lock_holder, context.getCurrentQueryId());
|
||||
|
||||
context.getDatabase(current_database_name)->alterTable(context, current_table_name, metadata);
|
||||
context.getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata);
|
||||
|
||||
update_metadata();
|
||||
|
||||
@ -630,7 +629,8 @@ bool StorageMergeTree::merge(
|
||||
merging_tagger.emplace(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace(future_part.parts), *this, false);
|
||||
}
|
||||
|
||||
MergeList::EntryPtr merge_entry = global_context.getMergeList().insert(database_name, table_name, future_part);
|
||||
auto table_id = getStorageID();
|
||||
MergeList::EntryPtr merge_entry = global_context.getMergeList().insert(table_id.database_name, table_id.table_name, future_part);
|
||||
|
||||
/// Logging
|
||||
Stopwatch stopwatch;
|
||||
@ -754,7 +754,8 @@ bool StorageMergeTree::tryMutatePart()
|
||||
if (!tagger)
|
||||
return false;
|
||||
|
||||
MergeList::EntryPtr merge_entry = global_context.getMergeList().insert(database_name, table_name, future_part);
|
||||
auto table_id = getStorageID();
|
||||
MergeList::EntryPtr merge_entry = global_context.getMergeList().insert(table_id.database_name, table_id.table_name, future_part);
|
||||
|
||||
Stopwatch stopwatch;
|
||||
MutableDataPartPtr new_part;
|
||||
@ -1219,12 +1220,13 @@ void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const
|
||||
|
||||
auto dest_table_storage = std::dynamic_pointer_cast<StorageMergeTree>(dest_table);
|
||||
if (!dest_table_storage)
|
||||
throw Exception("Table " + this->getTableName() + " supports attachPartitionFrom only for MergeTree family of table engines."
|
||||
throw Exception("Table " + getStorageID().getNameForLogs() + " supports attachPartitionFrom only for MergeTree family of table engines."
|
||||
" Got " + dest_table->getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
if (dest_table_storage->getStoragePolicy() != this->getStoragePolicy())
|
||||
throw Exception("Destination table " + dest_table_storage->getTableName() + " should have the same storage policy of source table " + this->getTableName() + ". " +
|
||||
this->getTableName() + ": " + this->getStoragePolicy()->getName() +
|
||||
", " + dest_table_storage->getTableName() + ": " + dest_table_storage->getStoragePolicy()->getName(), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Destination table " + dest_table_storage->getStorageID().getNameForLogs() +
|
||||
" should have the same storage policy of source table " + getStorageID().getNameForLogs() + ". " +
|
||||
getStorageID().getNameForLogs() + ": " + this->getStoragePolicy()->getName() + ", " +
|
||||
dest_table_storage->getStorageID().getNameForLogs() + ": " + dest_table_storage->getStoragePolicy()->getName(), ErrorCodes::LOGICAL_ERROR);
|
||||
Stopwatch watch;
|
||||
|
||||
MergeTreeData & src_data = dest_table_storage->checkStructureAndGetMergeTreeData(this);
|
||||
|
@ -32,8 +32,6 @@ public:
|
||||
~StorageMergeTree() override;
|
||||
|
||||
std::string getName() const override { return merging_params.getModeName() + "MergeTree"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
bool supportsIndexForIn() const override { return true; }
|
||||
|
||||
@ -159,8 +157,7 @@ protected:
|
||||
* See MergeTreeData constructor for comments on parameters.
|
||||
*/
|
||||
StorageMergeTree(
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const String & relative_data_path_,
|
||||
const StorageInMemoryMetadata & metadata,
|
||||
bool attach,
|
||||
|
@ -37,8 +37,7 @@ static String backQuoteMySQL(const String & x)
|
||||
}
|
||||
|
||||
StorageMySQL::StorageMySQL(
|
||||
const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
mysqlxx::Pool && pool_,
|
||||
const std::string & remote_database_name_,
|
||||
const std::string & remote_table_name_,
|
||||
@ -47,8 +46,7 @@ StorageMySQL::StorageMySQL(
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const Context & context_)
|
||||
: table_name(table_name_)
|
||||
, database_name(database_name_)
|
||||
: IStorage(table_id_)
|
||||
, remote_database_name(remote_database_name_)
|
||||
, remote_table_name(remote_table_name_)
|
||||
, replace_query{replace_query_}
|
||||
@ -235,8 +233,7 @@ void registerStorageMySQL(StorageFactory & factory)
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
return StorageMySQL::create(
|
||||
args.database_name,
|
||||
args.table_name,
|
||||
args.table_id,
|
||||
std::move(pool),
|
||||
remote_database,
|
||||
remote_table,
|
||||
|
@ -21,8 +21,7 @@ class StorageMySQL : public ext::shared_ptr_helper<StorageMySQL>, public IStorag
|
||||
friend struct ext::shared_ptr_helper<StorageMySQL>;
|
||||
public:
|
||||
StorageMySQL(
|
||||
const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
mysqlxx::Pool && pool_,
|
||||
const std::string & remote_database_name_,
|
||||
const std::string & remote_table_name_,
|
||||
@ -33,8 +32,6 @@ public:
|
||||
const Context & context_);
|
||||
|
||||
std::string getName() const override { return "MySQL"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -48,8 +45,6 @@ public:
|
||||
|
||||
private:
|
||||
friend class StorageMySQLBlockOutputStream;
|
||||
std::string table_name;
|
||||
std::string database_name;
|
||||
|
||||
std::string remote_database_name;
|
||||
std::string remote_table_name;
|
||||
|
@ -26,7 +26,7 @@ void registerStorageNull(StorageFactory & factory)
|
||||
"Engine " + args.engine_name + " doesn't support any arguments (" + toString(args.engine_args.size()) + " given)",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
return StorageNull::create(args.database_name, args.table_name, args.columns, args.constraints);
|
||||
return StorageNull::create(args.table_id, args.columns, args.constraints);
|
||||
});
|
||||
}
|
||||
|
||||
@ -47,13 +47,11 @@ void StorageNull::alter(
|
||||
const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder)
|
||||
{
|
||||
lockStructureExclusively(table_lock_holder, context.getCurrentQueryId());
|
||||
|
||||
const String current_database_name = getDatabaseName();
|
||||
const String current_table_name = getTableName();
|
||||
auto table_id = getStorageID();
|
||||
|
||||
StorageInMemoryMetadata metadata = getInMemoryMetadata();
|
||||
params.apply(metadata);
|
||||
context.getDatabase(current_database_name)->alterTable(context, current_table_name, metadata);
|
||||
context.getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata);
|
||||
setColumns(std::move(metadata.columns));
|
||||
}
|
||||
|
||||
|
@ -19,8 +19,6 @@ class StorageNull : public ext::shared_ptr_helper<StorageNull>, public IStorage
|
||||
friend struct ext::shared_ptr_helper<StorageNull>;
|
||||
public:
|
||||
std::string getName() const override { return "Null"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -38,23 +36,15 @@ public:
|
||||
return std::make_shared<NullBlockOutputStream>(getSampleBlock());
|
||||
}
|
||||
|
||||
void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override
|
||||
{
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
}
|
||||
|
||||
void checkAlterIsPossible(const AlterCommands & commands, const Settings & /* settings */) override;
|
||||
|
||||
void alter(const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder) override;
|
||||
|
||||
private:
|
||||
String table_name;
|
||||
String database_name;
|
||||
|
||||
protected:
|
||||
StorageNull(String database_name_, String table_name_, ColumnsDescription columns_description_, ConstraintsDescription constraints_)
|
||||
: table_name(std::move(table_name_)), database_name(std::move(database_name_))
|
||||
StorageNull(const StorageID & table_id_, ColumnsDescription columns_description_, ConstraintsDescription constraints_)
|
||||
: IStorage(table_id_)
|
||||
{
|
||||
setColumns(std::move(columns_description_));
|
||||
setConstraints(std::move(constraints_));
|
||||
|
@ -192,8 +192,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
|
||||
const String & zookeeper_path_,
|
||||
const String & replica_name_,
|
||||
bool attach,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const String & relative_data_path_,
|
||||
const StorageInMemoryMetadata & metadata,
|
||||
Context & context_,
|
||||
@ -201,15 +200,27 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
|
||||
const MergingParams & merging_params_,
|
||||
std::unique_ptr<MergeTreeSettings> settings_,
|
||||
bool has_force_restore_data_flag)
|
||||
: MergeTreeData(database_name_, table_name_, relative_data_path_, metadata,
|
||||
context_, date_column_name, merging_params_, std::move(settings_), true, attach,
|
||||
[this] (const std::string & name) { enqueuePartForCheck(name); }),
|
||||
|
||||
zookeeper_path(global_context.getMacros()->expand(zookeeper_path_, database_name_, table_name_)),
|
||||
replica_name(global_context.getMacros()->expand(replica_name_, database_name_, table_name_)),
|
||||
reader(*this), writer(*this), merger_mutator(*this, global_context.getBackgroundPool().getNumberOfThreads()),
|
||||
queue(*this), fetcher(*this), cleanup_thread(*this), alter_thread(*this),
|
||||
part_check_thread(*this), restarting_thread(*this)
|
||||
: MergeTreeData(table_id_,
|
||||
relative_data_path_,
|
||||
metadata,
|
||||
context_,
|
||||
date_column_name,
|
||||
merging_params_,
|
||||
std::move(settings_),
|
||||
true, /// require_part_metadata
|
||||
attach,
|
||||
[this] (const std::string & name) { enqueuePartForCheck(name); })
|
||||
, zookeeper_path(global_context.getMacros()->expand(zookeeper_path_, table_id_.database_name, table_id_.table_name))
|
||||
, replica_name(global_context.getMacros()->expand(replica_name_, table_id_.database_name, table_id_.table_name))
|
||||
, reader(*this)
|
||||
, writer(*this)
|
||||
, merger_mutator(*this, global_context.getBackgroundPool().getNumberOfThreads())
|
||||
, queue(*this)
|
||||
, fetcher(*this)
|
||||
, cleanup_thread(*this)
|
||||
, alter_thread(*this)
|
||||
, part_check_thread(*this)
|
||||
, restarting_thread(*this)
|
||||
{
|
||||
if (!zookeeper_path.empty() && zookeeper_path.back() == '/')
|
||||
zookeeper_path.resize(zookeeper_path.size() - 1);
|
||||
@ -218,15 +229,15 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
|
||||
zookeeper_path = "/" + zookeeper_path;
|
||||
replica_path = zookeeper_path + "/replicas/" + replica_name;
|
||||
|
||||
queue_updating_task = global_context.getSchedulePool().createTask(database_name + "." + table_name + " (StorageReplicatedMergeTree::queueUpdatingTask)", [this]{ queueUpdatingTask(); });
|
||||
queue_updating_task = global_context.getSchedulePool().createTask(getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::queueUpdatingTask)", [this]{ queueUpdatingTask(); });
|
||||
|
||||
mutations_updating_task = global_context.getSchedulePool().createTask(database_name + "." + table_name + " (StorageReplicatedMergeTree::mutationsUpdatingTask)", [this]{ mutationsUpdatingTask(); });
|
||||
mutations_updating_task = global_context.getSchedulePool().createTask(getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::mutationsUpdatingTask)", [this]{ mutationsUpdatingTask(); });
|
||||
|
||||
merge_selecting_task = global_context.getSchedulePool().createTask(database_name + "." + table_name + " (StorageReplicatedMergeTree::mergeSelectingTask)", [this] { mergeSelectingTask(); });
|
||||
merge_selecting_task = global_context.getSchedulePool().createTask(getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::mergeSelectingTask)", [this] { mergeSelectingTask(); });
|
||||
/// Will be activated if we win leader election.
|
||||
merge_selecting_task->deactivate();
|
||||
|
||||
mutations_finalizing_task = global_context.getSchedulePool().createTask(database_name + "." + table_name + " (StorageReplicatedMergeTree::mutationsFinalizingTask)", [this] { mutationsFinalizingTask(); });
|
||||
mutations_finalizing_task = global_context.getSchedulePool().createTask(getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::mutationsFinalizingTask)", [this] { mutationsFinalizingTask(); });
|
||||
|
||||
if (global_context.hasZooKeeper())
|
||||
current_zookeeper = global_context.getZooKeeper();
|
||||
@ -529,7 +540,8 @@ void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_column
|
||||
}
|
||||
}
|
||||
|
||||
global_context.getDatabase(database_name)->alterTable(global_context, table_name, metadata);
|
||||
auto table_id = getStorageID();
|
||||
global_context.getDatabase(table_id.database_name)->alterTable(global_context, table_id.table_name, metadata);
|
||||
|
||||
/// Even if the primary/sorting keys didn't change we must reinitialize it
|
||||
/// because primary key column types might have changed.
|
||||
@ -687,7 +699,7 @@ void StorageReplicatedMergeTree::checkParts(bool skip_sanity_checks)
|
||||
if (insane && !skip_sanity_checks)
|
||||
{
|
||||
std::stringstream why;
|
||||
why << "The local set of parts of table " << database_name << "." << table_name << " doesn't look like the set of parts "
|
||||
why << "The local set of parts of table " << getStorageID().getNameForLogs() << " doesn't look like the set of parts "
|
||||
<< "in ZooKeeper: "
|
||||
<< formatReadableQuantity(unexpected_parts_rows) << " rows of " << formatReadableQuantity(total_rows_on_filesystem)
|
||||
<< " total rows in filesystem are suspicious.";
|
||||
@ -1072,7 +1084,8 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry)
|
||||
}
|
||||
future_merged_part.updatePath(*this, reserved_space);
|
||||
|
||||
MergeList::EntryPtr merge_entry = global_context.getMergeList().insert(database_name, table_name, future_merged_part);
|
||||
auto table_id = getStorageID();
|
||||
MergeList::EntryPtr merge_entry = global_context.getMergeList().insert(table_id.database_name, table_id.table_name, future_merged_part);
|
||||
|
||||
Transaction transaction(*this);
|
||||
MutableDataPartPtr part;
|
||||
@ -1208,8 +1221,9 @@ bool StorageReplicatedMergeTree::tryExecutePartMutation(const StorageReplicatedM
|
||||
future_mutated_part.name = entry.new_part_name;
|
||||
future_mutated_part.updatePath(*this, reserved_space);
|
||||
|
||||
auto table_id = getStorageID();
|
||||
MergeList::EntryPtr merge_entry = global_context.getMergeList().insert(
|
||||
database_name, table_name, future_mutated_part);
|
||||
table_id.database_name, table_id.table_name, future_mutated_part);
|
||||
|
||||
Stopwatch stopwatch;
|
||||
|
||||
@ -2914,7 +2928,7 @@ void StorageReplicatedMergeTree::startup()
|
||||
|
||||
queue.initialize(
|
||||
zookeeper_path, replica_path,
|
||||
database_name + "." + table_name + " (ReplicatedMergeTreeQueue)",
|
||||
getStorageID().getFullTableName() + " (ReplicatedMergeTreeQueue)",
|
||||
getDataParts());
|
||||
|
||||
StoragePtr ptr = shared_from_this();
|
||||
@ -3198,8 +3212,7 @@ void StorageReplicatedMergeTree::alter(
|
||||
|
||||
LOG_DEBUG(log, "Doing ALTER");
|
||||
|
||||
const String current_database_name = getDatabaseName();
|
||||
const String current_table_name = getTableName();
|
||||
auto table_id = getStorageID();
|
||||
|
||||
/// We cannot check this alter commands with method isModifyingData()
|
||||
/// because ReplicatedMergeTree stores both columns and metadata for
|
||||
@ -3215,7 +3228,7 @@ void StorageReplicatedMergeTree::alter(
|
||||
|
||||
changeSettings(metadata.settings_ast, table_lock_holder);
|
||||
|
||||
global_context.getDatabase(current_database_name)->alterTable(query_context, current_table_name, metadata);
|
||||
global_context.getDatabase(table_id.database_name)->alterTable(query_context, table_id.table_name, metadata);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3293,7 +3306,7 @@ void StorageReplicatedMergeTree::alter(
|
||||
auto old_metadata = getInMemoryMetadata();
|
||||
old_metadata.settings_ast = metadata.settings_ast;
|
||||
changeSettings(metadata.settings_ast, table_lock_holder);
|
||||
global_context.getDatabase(current_database_name)->alterTable(query_context, current_table_name, old_metadata);
|
||||
global_context.getDatabase(table_id.database_name)->alterTable(query_context, table_id.table_name, old_metadata);
|
||||
|
||||
/// Modify shared metadata nodes in ZooKeeper.
|
||||
Coordination::Requests ops;
|
||||
@ -3761,7 +3774,8 @@ void StorageReplicatedMergeTree::checkTableCanBeDropped() const
|
||||
{
|
||||
/// Consider only synchronized data
|
||||
const_cast<StorageReplicatedMergeTree &>(*this).recalculateColumnSizes();
|
||||
global_context.checkTableCanBeDropped(database_name, table_name, getTotalActiveSizeInBytes());
|
||||
auto table_id = getStorageID();
|
||||
global_context.checkTableCanBeDropped(table_id.database_name, table_id.table_name, getTotalActiveSizeInBytes());
|
||||
}
|
||||
|
||||
|
||||
@ -3777,7 +3791,8 @@ void StorageReplicatedMergeTree::checkPartitionCanBeDropped(const ASTPtr & parti
|
||||
for (const auto & part : parts_to_remove)
|
||||
partition_size += part->bytes_on_disk;
|
||||
|
||||
global_context.checkPartitionCanBeDropped(database_name, table_name, partition_size);
|
||||
auto table_id = getStorageID();
|
||||
global_context.checkPartitionCanBeDropped(table_id.database_name, table_id.table_name, partition_size);
|
||||
}
|
||||
|
||||
|
||||
@ -4991,6 +5006,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_
|
||||
|
||||
ReplicatedMergeTreeLogEntryData entry;
|
||||
{
|
||||
auto src_table_id = src_data.getStorageID();
|
||||
entry.type = ReplicatedMergeTreeLogEntryData::REPLACE_RANGE;
|
||||
entry.source_replica = replica_name;
|
||||
entry.create_time = time(nullptr);
|
||||
@ -4998,8 +5014,8 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_
|
||||
|
||||
auto & entry_replace = *entry.replace_range_entry;
|
||||
entry_replace.drop_range_part_name = drop_range_fake_part_name;
|
||||
entry_replace.from_database = src_data.database_name;
|
||||
entry_replace.from_table = src_data.table_name;
|
||||
entry_replace.from_database = src_table_id.database_name;
|
||||
entry_replace.from_table = src_table_id.table_name;
|
||||
for (const auto & part : src_parts)
|
||||
entry_replace.src_part_names.emplace_back(part->name);
|
||||
for (const auto & part : dst_parts)
|
||||
@ -5092,15 +5108,17 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
||||
|
||||
auto dest_table_storage = std::dynamic_pointer_cast<StorageReplicatedMergeTree>(dest_table);
|
||||
if (!dest_table_storage)
|
||||
throw Exception("Table " + this->getTableName() + " supports attachPartitionFrom only for ReplicatedMergeTree family of table engines."
|
||||
throw Exception("Table " + getStorageID().getNameForLogs() + " supports attachPartitionFrom only for ReplicatedMergeTree family of table engines."
|
||||
" Got " + dest_table->getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
if (dest_table_storage->getStoragePolicy() != this->getStoragePolicy())
|
||||
throw Exception("Destination table " + dest_table_storage->getTableName() + " should have the same storage policy of source table " + this->getTableName() + ". " +
|
||||
this->getTableName() + ": " + this->getStoragePolicy()->getName() +
|
||||
", " + dest_table_storage->getTableName() + ": " + dest_table_storage->getStoragePolicy()->getName(), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Destination table " + dest_table_storage->getStorageID().getNameForLogs() +
|
||||
" should have the same storage policy of source table " + getStorageID().getNameForLogs() + ". " +
|
||||
getStorageID().getNameForLogs() + ": " + this->getStoragePolicy()->getName() + ", " +
|
||||
getStorageID().getNameForLogs() + ": " + dest_table_storage->getStoragePolicy()->getName(), ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
Stopwatch watch;
|
||||
MergeTreeData & src_data = dest_table_storage->checkStructureAndGetMergeTreeData(this);
|
||||
auto src_data_id = src_data.getStorageID();
|
||||
String partition_id = getPartitionIDFromQuery(partition, context);
|
||||
|
||||
DataPartsVector src_all_parts = src_data.getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id);
|
||||
@ -5183,8 +5201,8 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
||||
|
||||
auto & entry_replace = *entry.replace_range_entry;
|
||||
entry_replace.drop_range_part_name = getPartNamePossiblyFake(format_version, drop_range_dest);
|
||||
entry_replace.from_database = src_data.database_name;
|
||||
entry_replace.from_table = src_data.table_name;
|
||||
entry_replace.from_database = src_data_id.database_name;
|
||||
entry_replace.from_table = src_data_id.table_name;
|
||||
for (const auto & part : src_parts)
|
||||
entry_replace.src_part_names.emplace_back(part->name);
|
||||
for (const auto & part : dst_parts)
|
||||
@ -5381,13 +5399,14 @@ void StorageReplicatedMergeTree::updatePartHeaderInZooKeeperAndCommit(
|
||||
ReplicatedMergeTreeAddress StorageReplicatedMergeTree::getReplicatedMergeTreeAddress() const
|
||||
{
|
||||
auto host_port = global_context.getInterserverIOAddress();
|
||||
auto table_id = getStorageID();
|
||||
|
||||
ReplicatedMergeTreeAddress res;
|
||||
res.host = host_port.first;
|
||||
res.replication_port = host_port.second;
|
||||
res.queries_port = global_context.getTCPPort();
|
||||
res.database = database_name;
|
||||
res.table = table_name;
|
||||
res.database = table_id.database_name;
|
||||
res.table = table_id.table_name;
|
||||
res.scheme = global_context.getInterserverScheme();
|
||||
return res;
|
||||
}
|
||||
|
@ -83,8 +83,6 @@ public:
|
||||
~StorageReplicatedMergeTree() override;
|
||||
|
||||
std::string getName() const override { return "Replicated" + merging_params.getModeName() + "MergeTree"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
bool supportsReplication() const override { return true; }
|
||||
bool supportsDeduplication() const override { return true; }
|
||||
@ -544,7 +542,7 @@ protected:
|
||||
const String & zookeeper_path_,
|
||||
const String & replica_name_,
|
||||
bool attach,
|
||||
const String & database_name_, const String & name_,
|
||||
const StorageID & table_id_,
|
||||
const String & relative_data_path_,
|
||||
const StorageInMemoryMetadata & metadata,
|
||||
Context & context_,
|
||||
|
@ -133,23 +133,21 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
StorageS3::StorageS3(const S3::URI & uri_,
|
||||
StorageS3::StorageS3(
|
||||
const S3::URI & uri_,
|
||||
const String & access_key_id_,
|
||||
const String & secret_access_key_,
|
||||
const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const String & format_name_,
|
||||
UInt64 min_upload_part_size_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
Context & context_,
|
||||
const String & compression_method_ = "")
|
||||
: IStorage(columns_)
|
||||
: IStorage(table_id_, columns_)
|
||||
, uri(uri_)
|
||||
, context_global(context_)
|
||||
, format_name(format_name_)
|
||||
, database_name(database_name_)
|
||||
, table_name(table_name_)
|
||||
, min_upload_part_size(min_upload_part_size_)
|
||||
, compression_method(compression_method_)
|
||||
, client(S3::ClientFactory::instance().create(uri_.endpoint, access_key_id_, secret_access_key_))
|
||||
@ -185,12 +183,6 @@ BlockInputStreams StorageS3::read(
|
||||
return {std::make_shared<AddingDefaultsBlockInputStream>(block_input, column_defaults, context)};
|
||||
}
|
||||
|
||||
void StorageS3::rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &)
|
||||
{
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
}
|
||||
|
||||
BlockOutputStreamPtr StorageS3::write(const ASTPtr & /*query*/, const Context & /*context*/)
|
||||
{
|
||||
return std::make_shared<StorageS3BlockOutputStream>(
|
||||
@ -234,7 +226,7 @@ void registerStorageS3(StorageFactory & factory)
|
||||
else
|
||||
compression_method = "auto";
|
||||
|
||||
return StorageS3::create(s3_uri, access_key_id, secret_access_key, args.database_name, args.table_name, format_name, min_upload_part_size, args.columns, args.constraints, args.context);
|
||||
return StorageS3::create(s3_uri, access_key_id, secret_access_key, args.table_id, format_name, min_upload_part_size, args.columns, args.constraints, args.context);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -28,8 +28,7 @@ public:
|
||||
StorageS3(const S3::URI & uri,
|
||||
const String & access_key_id,
|
||||
const String & secret_access_key,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const String & format_name_,
|
||||
UInt64 min_upload_part_size_,
|
||||
const ColumnsDescription & columns_,
|
||||
@ -47,11 +46,6 @@ public:
|
||||
return getSampleBlock();
|
||||
}
|
||||
|
||||
String getTableName() const override
|
||||
{
|
||||
return table_name;
|
||||
}
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
const SelectQueryInfo & query_info,
|
||||
@ -62,15 +56,11 @@ public:
|
||||
|
||||
BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override;
|
||||
|
||||
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override;
|
||||
|
||||
private:
|
||||
S3::URI uri;
|
||||
const Context & context_global;
|
||||
|
||||
String format_name;
|
||||
String database_name;
|
||||
String table_name;
|
||||
UInt64 min_upload_part_size;
|
||||
String compression_method;
|
||||
std::shared_ptr<Aws::S3::S3Client> client;
|
||||
|
@ -89,12 +89,11 @@ BlockOutputStreamPtr StorageSetOrJoinBase::write(const ASTPtr & /*query*/, const
|
||||
|
||||
StorageSetOrJoinBase::StorageSetOrJoinBase(
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const Context & context_)
|
||||
: table_name(table_name_), database_name(database_name_)
|
||||
: IStorage(table_id_)
|
||||
{
|
||||
setColumns(columns_);
|
||||
setConstraints(constraints_);
|
||||
@ -110,12 +109,11 @@ StorageSetOrJoinBase::StorageSetOrJoinBase(
|
||||
|
||||
StorageSet::StorageSet(
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const Context & context_)
|
||||
: StorageSetOrJoinBase{relative_path_, database_name_, table_name_, columns_, constraints_, context_},
|
||||
: StorageSetOrJoinBase{relative_path_, table_id_, columns_, constraints_, context_},
|
||||
set(std::make_shared<Set>(SizeLimits(), false))
|
||||
{
|
||||
Block header = getSampleBlock();
|
||||
@ -209,8 +207,7 @@ void StorageSetOrJoinBase::rename(
|
||||
Poco::File(path).renameTo(new_path);
|
||||
|
||||
path = new_path;
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
renameInMemory(new_database_name, new_table_name);
|
||||
}
|
||||
|
||||
|
||||
@ -223,7 +220,7 @@ void registerStorageSet(StorageFactory & factory)
|
||||
"Engine " + args.engine_name + " doesn't support any arguments (" + toString(args.engine_args.size()) + " given)",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
return StorageSet::create(args.relative_data_path, args.database_name, args.table_name, args.columns, args.constraints, args.context);
|
||||
return StorageSet::create(args.relative_data_path, args.table_id, args.columns, args.constraints, args.context);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -19,9 +19,6 @@ class StorageSetOrJoinBase : public IStorage
|
||||
friend class SetOrJoinBlockOutputStream;
|
||||
|
||||
public:
|
||||
String getTableName() const override { return table_name; }
|
||||
String getDatabaseName() const override { return database_name; }
|
||||
|
||||
void rename(const String & new_path_to_table_data, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override;
|
||||
|
||||
BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override;
|
||||
@ -31,16 +28,13 @@ public:
|
||||
protected:
|
||||
StorageSetOrJoinBase(
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const Context & context_);
|
||||
|
||||
String base_path;
|
||||
String path;
|
||||
String table_name;
|
||||
String database_name;
|
||||
|
||||
std::atomic<UInt64> increment = 0; /// For the backup file names.
|
||||
|
||||
@ -85,8 +79,7 @@ private:
|
||||
protected:
|
||||
StorageSet(
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const Context & context_);
|
||||
|
@ -197,16 +197,17 @@ private:
|
||||
StorageStripeLog::StorageStripeLog(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
bool attach,
|
||||
size_t max_compress_block_size_)
|
||||
: disk(std::move(disk_)), table_path(relative_path_), database_name(database_name_), table_name(table_name_),
|
||||
max_compress_block_size(max_compress_block_size_),
|
||||
file_checker(disk, table_path + "sizes.json"),
|
||||
log(&Logger::get("StorageStripeLog"))
|
||||
: IStorage(table_id_)
|
||||
, disk(std::move(disk_))
|
||||
, table_path(relative_path_)
|
||||
, max_compress_block_size(max_compress_block_size_)
|
||||
, file_checker(disk, table_path + "sizes.json")
|
||||
, log(&Logger::get("StorageStripeLog"))
|
||||
{
|
||||
setColumns(columns_);
|
||||
setConstraints(constraints_);
|
||||
@ -229,9 +230,8 @@ void StorageStripeLog::rename(const String & new_path_to_table_data, const Strin
|
||||
disk->moveDirectory(table_path, new_path_to_table_data);
|
||||
|
||||
table_path = new_path_to_table_data;
|
||||
database_name = new_database_name;
|
||||
table_name = new_table_name;
|
||||
file_checker.setPath(table_path + "sizes.json");
|
||||
renameInMemory(new_database_name, new_table_name);
|
||||
}
|
||||
|
||||
|
||||
@ -295,9 +295,6 @@ CheckResults StorageStripeLog::checkData(const ASTPtr & /* query */, const Conte
|
||||
|
||||
void StorageStripeLog::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &)
|
||||
{
|
||||
if (table_name.empty())
|
||||
throw Exception("Logical error: table name is empty", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->clearDirectory(table_path);
|
||||
@ -316,7 +313,7 @@ void registerStorageStripeLog(StorageFactory & factory)
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
return StorageStripeLog::create(
|
||||
args.context.getDefaultDisk(), args.relative_data_path, args.database_name, args.table_name, args.columns, args.constraints,
|
||||
args.context.getDefaultDisk(), args.relative_data_path, args.table_id, args.columns, args.constraints,
|
||||
args.attach, args.context.getSettings().max_compress_block_size);
|
||||
});
|
||||
}
|
||||
|
@ -24,8 +24,6 @@ class StorageStripeLog : public ext::shared_ptr_helper<StorageStripeLog>, public
|
||||
|
||||
public:
|
||||
String getName() const override { return "StripeLog"; }
|
||||
String getTableName() const override { return table_name; }
|
||||
String getDatabaseName() const override { return database_name; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -53,8 +51,7 @@ protected:
|
||||
StorageStripeLog(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
bool attach,
|
||||
@ -69,8 +66,6 @@ private:
|
||||
|
||||
DiskPtr disk;
|
||||
String table_path;
|
||||
String database_name;
|
||||
String table_name;
|
||||
|
||||
size_t max_compress_block_size;
|
||||
|
||||
|
@ -317,16 +317,17 @@ void TinyLogBlockOutputStream::write(const Block & block)
|
||||
StorageTinyLog::StorageTinyLog(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
bool attach,
|
||||
size_t max_compress_block_size_)
|
||||
: disk(std::move(disk_)), table_path(relative_path_), database_name(database_name_), table_name(table_name_),
|
||||
max_compress_block_size(max_compress_block_size_),
|
||||
file_checker(disk, table_path + "sizes.json"),
|
||||
log(&Logger::get("StorageTinyLog"))
|
||||
: IStorage(table_id_)
|
||||
, disk(std::move(disk_))
|
||||
, table_path(relative_path_)
|
||||
, max_compress_block_size(max_compress_block_size_)
|
||||
, file_checker(disk, table_path + "sizes.json")
|
||||
, log(&Logger::get("StorageTinyLog"))
|
||||
{
|
||||
setColumns(columns_);
|
||||
setConstraints(constraints_);
|
||||
@ -374,12 +375,11 @@ void StorageTinyLog::rename(const String & new_path_to_table_data, const String
|
||||
disk->moveDirectory(table_path, new_path_to_table_data);
|
||||
|
||||
table_path = new_path_to_table_data;
|
||||
database_name = new_database_name;
|
||||
table_name = new_table_name;
|
||||
file_checker.setPath(table_path + "sizes.json");
|
||||
|
||||
for (auto & file : files)
|
||||
file.second.data_file_path = table_path + fileName(file.second.data_file_path);
|
||||
renameInMemory(new_database_name, new_table_name);
|
||||
}
|
||||
|
||||
|
||||
@ -414,9 +414,6 @@ CheckResults StorageTinyLog::checkData(const ASTPtr & /* query */, const Context
|
||||
|
||||
void StorageTinyLog::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &)
|
||||
{
|
||||
if (table_name.empty())
|
||||
throw Exception("Logical error: table name is empty", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->clearDirectory(table_path);
|
||||
@ -439,7 +436,7 @@ void registerStorageTinyLog(StorageFactory & factory)
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
return StorageTinyLog::create(
|
||||
args.context.getDefaultDisk(), args.relative_data_path, args.database_name, args.table_name, args.columns, args.constraints,
|
||||
args.context.getDefaultDisk(), args.relative_data_path, args.table_id, args.columns, args.constraints,
|
||||
args.attach, args.context.getSettings().max_compress_block_size);
|
||||
});
|
||||
}
|
||||
|
@ -23,8 +23,6 @@ class StorageTinyLog : public ext::shared_ptr_helper<StorageTinyLog>, public ISt
|
||||
|
||||
public:
|
||||
String getName() const override { return "TinyLog"; }
|
||||
String getTableName() const override { return table_name; }
|
||||
String getDatabaseName() const override { return database_name; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -52,8 +50,7 @@ protected:
|
||||
StorageTinyLog(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
bool attach,
|
||||
@ -68,8 +65,6 @@ private:
|
||||
|
||||
DiskPtr disk;
|
||||
String table_path;
|
||||
String database_name;
|
||||
String table_name;
|
||||
|
||||
size_t max_compress_block_size;
|
||||
|
||||
@ -80,7 +75,6 @@ private:
|
||||
|
||||
Logger * log;
|
||||
|
||||
void addFile(const String & column_name, const IDataType & type, size_t level = 0);
|
||||
void addFiles(const String & column_name, const IDataType & type);
|
||||
};
|
||||
|
||||
|
@ -30,13 +30,16 @@ namespace ErrorCodes
|
||||
IStorageURLBase::IStorageURLBase(
|
||||
const Poco::URI & uri_,
|
||||
const Context & context_,
|
||||
const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const String & format_name_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const String & compression_method_)
|
||||
: uri(uri_), context_global(context_), compression_method(compression_method_), format_name(format_name_), table_name(table_name_), database_name(database_name_)
|
||||
: IStorage(table_id_)
|
||||
, uri(uri_)
|
||||
, context_global(context_)
|
||||
, compression_method(compression_method_)
|
||||
, format_name(format_name_)
|
||||
{
|
||||
context_global.getRemoteHostFilter().checkURL(uri);
|
||||
setColumns(columns_);
|
||||
@ -207,12 +210,6 @@ BlockInputStreams IStorageURLBase::read(const Names & column_names,
|
||||
return {std::make_shared<AddingDefaultsBlockInputStream>(block_input, column_defaults, context)};
|
||||
}
|
||||
|
||||
void IStorageURLBase::rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &)
|
||||
{
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
}
|
||||
|
||||
BlockOutputStreamPtr IStorageURLBase::write(const ASTPtr & /*query*/, const Context & /*context*/)
|
||||
{
|
||||
return std::make_shared<StorageURLBlockOutputStream>(
|
||||
@ -249,7 +246,7 @@ void registerStorageURL(StorageFactory & factory)
|
||||
|
||||
return StorageURL::create(
|
||||
uri,
|
||||
args.database_name, args.table_name,
|
||||
args.table_id,
|
||||
format_name,
|
||||
args.columns, args.constraints, args.context,
|
||||
compression_method);
|
||||
|
@ -16,9 +16,6 @@ namespace DB
|
||||
class IStorageURLBase : public IStorage
|
||||
{
|
||||
public:
|
||||
String getTableName() const override { return table_name; }
|
||||
String getDatabaseName() const override { return database_name; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
const SelectQueryInfo & query_info,
|
||||
@ -29,14 +26,11 @@ public:
|
||||
|
||||
BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override;
|
||||
|
||||
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override;
|
||||
|
||||
protected:
|
||||
IStorageURLBase(
|
||||
const Poco::URI & uri_,
|
||||
const Context & context_,
|
||||
const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
const StorageID & id_,
|
||||
const String & format_name_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
@ -48,8 +42,6 @@ protected:
|
||||
|
||||
private:
|
||||
String format_name;
|
||||
String table_name;
|
||||
String database_name;
|
||||
|
||||
virtual std::string getReadMethod() const;
|
||||
|
||||
@ -77,14 +69,13 @@ class StorageURL : public ext::shared_ptr_helper<StorageURL>, public IStorageURL
|
||||
public:
|
||||
StorageURL(
|
||||
const Poco::URI & uri_,
|
||||
const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const String & format_name_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
Context & context_,
|
||||
const String & compression_method_)
|
||||
: IStorageURLBase(uri_, context_, database_name_, table_name_, format_name_, columns_, constraints_, compression_method_)
|
||||
: IStorageURLBase(uri_, context_, table_id_, format_name_, columns_, constraints_, compression_method_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -7,8 +7,8 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
StorageValues::StorageValues(const std::string & database_name_, const std::string & table_name_, const ColumnsDescription & columns_, const Block & res_block_)
|
||||
: database_name(database_name_), table_name(table_name_), res_block(res_block_)
|
||||
StorageValues::StorageValues(const StorageID & table_id_, const ColumnsDescription & columns_, const Block & res_block_)
|
||||
: IStorage(table_id_), res_block(res_block_)
|
||||
{
|
||||
setColumns(columns_);
|
||||
}
|
||||
|
@ -14,8 +14,6 @@ class StorageValues : public ext::shared_ptr_helper<StorageValues>, public IStor
|
||||
friend struct ext::shared_ptr_helper<StorageValues>;
|
||||
public:
|
||||
std::string getName() const override { return "Values"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -26,12 +24,10 @@ public:
|
||||
unsigned num_streams) override;
|
||||
|
||||
private:
|
||||
std::string database_name;
|
||||
std::string table_name;
|
||||
Block res_block;
|
||||
|
||||
protected:
|
||||
StorageValues(const std::string & database_name_, const std::string & table_name_, const ColumnsDescription & columns_, const Block & res_block_);
|
||||
StorageValues(const StorageID & table_id_, const ColumnsDescription & columns_, const Block & res_block_);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -29,11 +29,10 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
StorageView::StorageView(
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ASTCreateQuery & query,
|
||||
const ColumnsDescription & columns_)
|
||||
: table_name(table_name_), database_name(database_name_)
|
||||
: IStorage(table_id_)
|
||||
{
|
||||
setColumns(columns_);
|
||||
|
||||
@ -123,7 +122,7 @@ void registerStorageView(StorageFactory & factory)
|
||||
if (args.query.storage)
|
||||
throw Exception("Specifying ENGINE is not allowed for a View", ErrorCodes::INCORRECT_QUERY);
|
||||
|
||||
return StorageView::create(args.database_name, args.table_name, args.query, args.columns);
|
||||
return StorageView::create(args.table_id, args.query, args.columns);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -15,8 +15,6 @@ class StorageView : public ext::shared_ptr_helper<StorageView>, public IStorage
|
||||
friend struct ext::shared_ptr_helper<StorageView>;
|
||||
public:
|
||||
std::string getName() const override { return "View"; }
|
||||
std::string getTableName() const override { return table_name; }
|
||||
std::string getDatabaseName() const override { return database_name; }
|
||||
|
||||
/// It is passed inside the query and solved at its level.
|
||||
bool supportsSampling() const override { return true; }
|
||||
@ -30,23 +28,14 @@ public:
|
||||
size_t max_block_size,
|
||||
unsigned num_streams) override;
|
||||
|
||||
void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override
|
||||
{
|
||||
table_name = new_table_name;
|
||||
database_name = new_database_name;
|
||||
}
|
||||
|
||||
private:
|
||||
String table_name;
|
||||
String database_name;
|
||||
ASTPtr inner_query;
|
||||
|
||||
void replaceTableNameWithSubquery(ASTSelectQuery * select_query, ASTPtr & subquery);
|
||||
|
||||
protected:
|
||||
StorageView(
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const ASTCreateQuery & query,
|
||||
const ColumnsDescription & columns_);
|
||||
};
|
||||
|
@ -24,15 +24,20 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
StorageXDBC::StorageXDBC(
|
||||
const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
const StorageID & table_id_,
|
||||
const std::string & remote_database_name_,
|
||||
const std::string & remote_table_name_,
|
||||
const ColumnsDescription & columns_,
|
||||
const Context & context_,
|
||||
const BridgeHelperPtr bridge_helper_)
|
||||
/// Please add support for constraints as soon as StorageODBC or JDBC will support insertion.
|
||||
: IStorageURLBase(Poco::URI(), context_, database_name_, table_name_, IXDBCBridgeHelper::DEFAULT_FORMAT, columns_, ConstraintsDescription{}, "" /* CompressionMethod */)
|
||||
: IStorageURLBase(Poco::URI(),
|
||||
context_,
|
||||
table_id_,
|
||||
IXDBCBridgeHelper::DEFAULT_FORMAT,
|
||||
columns_,
|
||||
ConstraintsDescription{},
|
||||
"" /* CompressionMethod */)
|
||||
, bridge_helper(bridge_helper_)
|
||||
, remote_database_name(remote_database_name_)
|
||||
, remote_table_name(remote_table_name_)
|
||||
@ -120,7 +125,7 @@ namespace
|
||||
BridgeHelperPtr bridge_helper = std::make_shared<XDBCBridgeHelper<BridgeHelperMixin>>(args.context,
|
||||
args.context.getSettingsRef().http_receive_timeout.value,
|
||||
engine_args[0]->as<ASTLiteral &>().value.safeGet<String>());
|
||||
return std::make_shared<StorageXDBC>(args.database_name, args.table_name,
|
||||
return std::make_shared<StorageXDBC>(args.table_id,
|
||||
engine_args[1]->as<ASTLiteral &>().value.safeGet<String>(),
|
||||
engine_args[2]->as<ASTLiteral &>().value.safeGet<String>(),
|
||||
args.columns,
|
||||
|
@ -24,8 +24,7 @@ public:
|
||||
unsigned num_streams) override;
|
||||
|
||||
|
||||
StorageXDBC(const std::string & database_name_,
|
||||
const std::string & table_name_,
|
||||
StorageXDBC(const StorageID & table_id_,
|
||||
const std::string & remote_database_name,
|
||||
const std::string & remote_table_name,
|
||||
const ColumnsDescription & columns_,
|
||||
|
@ -20,14 +20,11 @@ protected:
|
||||
virtual void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const = 0;
|
||||
|
||||
public:
|
||||
IStorageSystemOneBlock(const String & name_) : name(name_)
|
||||
IStorageSystemOneBlock(const String & name_) : IStorage({"system", name_})
|
||||
{
|
||||
setColumns(ColumnsDescription(Self::getNamesAndTypes()));
|
||||
}
|
||||
|
||||
std::string getTableName() const override { return name; }
|
||||
std::string getDatabaseName() const override { return "system"; }
|
||||
|
||||
BlockInputStreams read(const Names & column_names,
|
||||
const SelectQueryInfo & query_info,
|
||||
const Context & context,
|
||||
@ -43,9 +40,6 @@ public:
|
||||
|
||||
return BlockInputStreams(1, std::make_shared<OneBlockInputStream>(sample_block.cloneWithColumns(std::move(res_columns))));
|
||||
}
|
||||
|
||||
private:
|
||||
const String name;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
StorageSystemColumns::StorageSystemColumns(const std::string & name_)
|
||||
: name(name_)
|
||||
: IStorage({"system", name_})
|
||||
{
|
||||
setColumns(ColumnsDescription(
|
||||
{
|
||||
|
@ -16,8 +16,6 @@ class StorageSystemColumns : public ext::shared_ptr_helper<StorageSystemColumns>
|
||||
friend struct ext::shared_ptr_helper<StorageSystemColumns>;
|
||||
public:
|
||||
std::string getName() const override { return "SystemColumns"; }
|
||||
std::string getTableName() const override { return name; }
|
||||
std::string getDatabaseName() const override { return "system"; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -27,9 +25,6 @@ public:
|
||||
size_t max_block_size,
|
||||
unsigned num_streams) override;
|
||||
|
||||
private:
|
||||
const std::string name;
|
||||
|
||||
protected:
|
||||
StorageSystemColumns(const std::string & name_);
|
||||
};
|
||||
|
@ -24,11 +24,10 @@ class StorageSystemDetachedParts :
|
||||
friend struct ext::shared_ptr_helper<StorageSystemDetachedParts>;
|
||||
public:
|
||||
std::string getName() const override { return "SystemDetachedParts"; }
|
||||
std::string getTableName() const override { return "detached_parts"; }
|
||||
std::string getDatabaseName() const override { return "system"; }
|
||||
|
||||
protected:
|
||||
explicit StorageSystemDetachedParts()
|
||||
: IStorage({"system", "detached_parts"})
|
||||
{
|
||||
setColumns(ColumnsDescription{{
|
||||
{"database", std::make_shared<DataTypeString>()},
|
||||
|
@ -10,7 +10,7 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
StorageSystemDisks::StorageSystemDisks(const std::string & name_)
|
||||
: name(name_)
|
||||
: IStorage({"system", name_})
|
||||
{
|
||||
setColumns(ColumnsDescription(
|
||||
{
|
||||
|
@ -19,8 +19,6 @@ class StorageSystemDisks : public ext::shared_ptr_helper<StorageSystemDisks>, pu
|
||||
friend struct ext::shared_ptr_helper<StorageSystemDisks>;
|
||||
public:
|
||||
std::string getName() const override { return "SystemDisks"; }
|
||||
std::string getTableName() const override { return name; }
|
||||
std::string getDatabaseName() const override { return "system"; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -30,9 +28,6 @@ public:
|
||||
size_t max_block_size,
|
||||
unsigned num_streams) override;
|
||||
|
||||
private:
|
||||
const std::string name;
|
||||
|
||||
protected:
|
||||
StorageSystemDisks(const std::string & name_);
|
||||
};
|
||||
|
@ -48,20 +48,21 @@ StorageSystemGraphite::Configs StorageSystemGraphite::getConfigs(const Context &
|
||||
{
|
||||
const String & config_name = table_data->merging_params.graphite_params.config_name;
|
||||
|
||||
auto table_id = table_data->getStorageID();
|
||||
if (!graphite_configs.count(config_name))
|
||||
{
|
||||
Config new_config =
|
||||
{
|
||||
table_data->merging_params.graphite_params,
|
||||
{ table_data->getDatabaseName() },
|
||||
{ table_data->getTableName() },
|
||||
{ table_id.database_name },
|
||||
{ table_id.table_name },
|
||||
};
|
||||
graphite_configs.emplace(config_name, new_config);
|
||||
}
|
||||
else
|
||||
{
|
||||
graphite_configs[config_name].databases.emplace_back(table_data->getDatabaseName());
|
||||
graphite_configs[config_name].tables.emplace_back(table_data->getTableName());
|
||||
graphite_configs[config_name].databases.emplace_back(table_id.database_name);
|
||||
graphite_configs[config_name].tables.emplace_back(table_id.table_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ private:
|
||||
|
||||
|
||||
StorageSystemNumbers::StorageSystemNumbers(const std::string & name_, bool multithreaded_, std::optional<UInt64> limit_, UInt64 offset_, bool even_distribution_)
|
||||
: name(name_), multithreaded(multithreaded_), even_distribution(even_distribution_), limit(limit_), offset(offset_)
|
||||
: IStorage({"system", name_}), multithreaded(multithreaded_), even_distribution(even_distribution_), limit(limit_), offset(offset_)
|
||||
{
|
||||
setColumns(ColumnsDescription({{"number", std::make_shared<DataTypeUInt64>()}}));
|
||||
}
|
||||
|
@ -28,8 +28,6 @@ class StorageSystemNumbers : public ext::shared_ptr_helper<StorageSystemNumbers>
|
||||
friend struct ext::shared_ptr_helper<StorageSystemNumbers>;
|
||||
public:
|
||||
std::string getName() const override { return "SystemNumbers"; }
|
||||
std::string getTableName() const override { return name; }
|
||||
std::string getDatabaseName() const override { return "system"; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -40,7 +38,6 @@ public:
|
||||
unsigned num_streams) override;
|
||||
|
||||
private:
|
||||
const std::string name;
|
||||
bool multithreaded;
|
||||
bool even_distribution;
|
||||
std::optional<UInt64> limit;
|
||||
|
@ -11,7 +11,7 @@ namespace DB
|
||||
|
||||
|
||||
StorageSystemOne::StorageSystemOne(const std::string & name_)
|
||||
: name(name_)
|
||||
: IStorage({"system", name_})
|
||||
{
|
||||
setColumns(ColumnsDescription({{"dummy", std::make_shared<DataTypeUInt8>()}}));
|
||||
}
|
||||
|
@ -20,8 +20,6 @@ class StorageSystemOne : public ext::shared_ptr_helper<StorageSystemOne>, public
|
||||
friend struct ext::shared_ptr_helper<StorageSystemOne>;
|
||||
public:
|
||||
std::string getName() const override { return "SystemOne"; }
|
||||
std::string getTableName() const override { return name; }
|
||||
std::string getDatabaseName() const override { return "system"; }
|
||||
|
||||
BlockInputStreams read(
|
||||
const Names & column_names,
|
||||
@ -31,9 +29,6 @@ public:
|
||||
size_t max_block_size,
|
||||
unsigned num_streams) override;
|
||||
|
||||
private:
|
||||
const std::string name;
|
||||
|
||||
protected:
|
||||
StorageSystemOne(const std::string & name_);
|
||||
};
|
||||
|
@ -257,7 +257,7 @@ bool StorageSystemPartsBase::hasColumn(const String & column_name) const
|
||||
}
|
||||
|
||||
StorageSystemPartsBase::StorageSystemPartsBase(std::string name_, NamesAndTypesList && columns_)
|
||||
: name(std::move(name_))
|
||||
: IStorage({"system", name_})
|
||||
{
|
||||
ColumnsDescription tmp_columns(std::move(columns_));
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user