2018-11-12 15:45:35 +00:00
|
|
|
#include <Storages/ColumnsDescription.h>
|
2017-12-12 15:54:03 +00:00
|
|
|
#include <Storages/System/StorageSystemPartsBase.h>
|
|
|
|
#include <Common/escapeForFileName.h>
|
|
|
|
#include <Columns/ColumnString.h>
|
|
|
|
#include <DataTypes/DataTypeString.h>
|
|
|
|
#include <DataTypes/DataTypesNumber.h>
|
|
|
|
#include <DataTypes/DataTypeDateTime.h>
|
|
|
|
#include <DataTypes/DataTypeDate.h>
|
|
|
|
#include <DataStreams/OneBlockInputStream.h>
|
2019-05-03 02:00:57 +00:00
|
|
|
#include <Storages/MergeTree/MergeTreeData.h>
|
2017-12-12 15:54:03 +00:00
|
|
|
#include <Storages/VirtualColumnUtils.h>
|
2020-03-07 17:37:38 +00:00
|
|
|
#include <Access/ContextAccess.h>
|
2017-12-12 15:54:03 +00:00
|
|
|
#include <Databases/IDatabase.h>
|
|
|
|
#include <Parsers/queryToString.h>
|
2018-03-26 14:18:04 +00:00
|
|
|
#include <Parsers/ASTIdentifier.h>
|
2020-01-24 18:01:21 +00:00
|
|
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
2017-12-12 15:54:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2019-08-27 23:47:30 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
2020-02-25 18:02:41 +00:00
|
|
|
extern const int LOGICAL_ERROR;
|
2019-08-27 23:47:30 +00:00
|
|
|
extern const int TABLE_IS_DROPPED;
|
|
|
|
}
|
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
bool StorageSystemPartsBase::hasStateColumn(const Names & column_names) const
|
2017-12-12 15:54:03 +00:00
|
|
|
{
|
|
|
|
bool has_state_column = false;
|
|
|
|
Names real_column_names;
|
|
|
|
|
|
|
|
for (const String & column_name : column_names)
|
|
|
|
{
|
|
|
|
if (column_name == "_state")
|
|
|
|
has_state_column = true;
|
|
|
|
else
|
|
|
|
real_column_names.emplace_back(column_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Do not check if only _state column is requested
|
|
|
|
if (!(has_state_column && real_column_names.empty()))
|
|
|
|
check(real_column_names);
|
|
|
|
|
|
|
|
return has_state_column;
|
|
|
|
}
|
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
MergeTreeData::DataPartsVector
|
|
|
|
StoragesInfo::getParts(MergeTreeData::DataPartStateVector & state, bool has_state_column) const
|
2017-12-12 15:54:03 +00:00
|
|
|
{
|
2019-05-20 16:24:36 +00:00
|
|
|
using State = MergeTreeData::DataPartState;
|
|
|
|
if (need_inactive_parts)
|
2017-12-12 15:54:03 +00:00
|
|
|
{
|
2019-05-20 16:24:36 +00:00
|
|
|
/// If has_state_column is requested, return all states.
|
|
|
|
if (!has_state_column)
|
|
|
|
return data->getDataPartsVector({State::Committed, State::Outdated}, &state);
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
return data->getAllDataPartsVector(&state);
|
|
|
|
}
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
return data->getDataPartsVector({State::Committed}, &state);
|
|
|
|
}
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
StoragesInfoStream::StoragesInfoStream(const SelectQueryInfo & query_info, const Context & context)
|
2020-04-09 18:10:27 +00:00
|
|
|
: query_id(context.getCurrentQueryId()), settings(context.getSettings())
|
2019-05-20 16:24:36 +00:00
|
|
|
{
|
|
|
|
/// Will apply WHERE to subset of columns and then add more columns.
|
|
|
|
/// This is kind of complicated, but we use WHERE to do less work.
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
Block block_to_filter;
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
MutableColumnPtr table_column_mut = ColumnString::create();
|
|
|
|
MutableColumnPtr engine_column_mut = ColumnString::create();
|
|
|
|
MutableColumnPtr active_column_mut = ColumnUInt8::create();
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2020-03-07 17:37:38 +00:00
|
|
|
const auto access = context.getAccess();
|
|
|
|
const bool check_access_for_tables = !access->isGranted(AccessType::SHOW_TABLES);
|
2020-01-24 16:20:36 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
{
|
2020-02-10 13:10:17 +00:00
|
|
|
Databases databases = DatabaseCatalog::instance().getDatabases();
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
/// Add column 'database'.
|
|
|
|
MutableColumnPtr database_column_mut = ColumnString::create();
|
|
|
|
for (const auto & database : databases)
|
|
|
|
{
|
2019-10-18 12:54:39 +00:00
|
|
|
/// Lazy database can not contain MergeTree tables
|
|
|
|
/// and it's unnecessary to load all tables of Lazy database just to filter all of them.
|
2020-01-24 16:20:36 +00:00
|
|
|
if (database.second->getEngineName() != "Lazy")
|
2019-05-20 16:24:36 +00:00
|
|
|
database_column_mut->insert(database.first);
|
|
|
|
}
|
|
|
|
block_to_filter.insert(ColumnWithTypeAndName(
|
|
|
|
std::move(database_column_mut), std::make_shared<DataTypeString>(), "database"));
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
/// Filter block_to_filter with column 'database'.
|
|
|
|
VirtualColumnUtils::filterBlockWithQuery(query_info.query, block_to_filter, context);
|
|
|
|
rows = block_to_filter.rows();
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
/// Block contains new columns, update database_column.
|
2020-03-23 02:12:31 +00:00
|
|
|
ColumnPtr database_column_for_filter = block_to_filter.getByName("database").column;
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
if (rows)
|
|
|
|
{
|
|
|
|
/// Add columns 'table', 'engine', 'active'
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
IColumn::Offsets offsets(rows);
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
for (size_t i = 0; i < rows; ++i)
|
|
|
|
{
|
2020-03-23 02:12:31 +00:00
|
|
|
String database_name = (*database_column_for_filter)[i].get<String>();
|
2019-05-20 16:24:36 +00:00
|
|
|
const DatabasePtr database = databases.at(database_name);
|
|
|
|
|
|
|
|
offsets[i] = i ? offsets[i - 1] : 0;
|
2020-04-01 22:41:29 +00:00
|
|
|
for (auto iterator = database->getTablesIterator(); iterator->isValid(); iterator->next())
|
2019-05-20 16:24:36 +00:00
|
|
|
{
|
|
|
|
String table_name = iterator->name();
|
|
|
|
StoragePtr storage = iterator->table();
|
|
|
|
String engine_name = storage->getName();
|
|
|
|
|
|
|
|
if (!dynamic_cast<MergeTreeData *>(storage.get()))
|
|
|
|
continue;
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2020-03-07 17:37:38 +00:00
|
|
|
if (check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name))
|
2020-01-24 16:20:36 +00:00
|
|
|
continue;
|
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
storages[std::make_pair(database_name, iterator->name())] = storage;
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
/// Add all combinations of flag 'active'.
|
|
|
|
for (UInt64 active : {0, 1})
|
|
|
|
{
|
|
|
|
table_column_mut->insert(table_name);
|
|
|
|
engine_column_mut->insert(engine_name);
|
|
|
|
active_column_mut->insert(active);
|
2017-12-12 15:54:03 +00:00
|
|
|
}
|
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
offsets[i] += 2;
|
2017-12-12 15:54:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
for (size_t i = 0; i < block_to_filter.columns(); ++i)
|
|
|
|
{
|
|
|
|
ColumnPtr & column = block_to_filter.safeGetByPosition(i).column;
|
|
|
|
column = column->replicate(offsets);
|
|
|
|
}
|
2017-12-12 15:54:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
block_to_filter.insert(ColumnWithTypeAndName(std::move(table_column_mut), std::make_shared<DataTypeString>(), "table"));
|
|
|
|
block_to_filter.insert(ColumnWithTypeAndName(std::move(engine_column_mut), std::make_shared<DataTypeString>(), "engine"));
|
|
|
|
block_to_filter.insert(ColumnWithTypeAndName(std::move(active_column_mut), std::make_shared<DataTypeUInt8>(), "active"));
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
if (rows)
|
|
|
|
{
|
|
|
|
/// Filter block_to_filter with columns 'database', 'table', 'engine', 'active'.
|
|
|
|
VirtualColumnUtils::filterBlockWithQuery(query_info.query, block_to_filter, context);
|
|
|
|
rows = block_to_filter.rows();
|
|
|
|
}
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
database_column = block_to_filter.getByName("database").column;
|
|
|
|
table_column = block_to_filter.getByName("table").column;
|
|
|
|
active_column = block_to_filter.getByName("active").column;
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
next_row = 0;
|
|
|
|
}
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
StoragesInfo StoragesInfoStream::next()
|
|
|
|
{
|
|
|
|
while (next_row < rows)
|
|
|
|
{
|
2019-08-20 00:48:08 +00:00
|
|
|
StoragesInfo info;
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
info.database = (*database_column)[next_row].get<String>();
|
|
|
|
info.table = (*table_column)[next_row].get<String>();
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2020-03-23 02:12:31 +00:00
|
|
|
auto is_same_table = [&info, this] (size_t row) -> bool
|
2019-05-20 16:24:36 +00:00
|
|
|
{
|
|
|
|
return (*database_column)[row].get<String>() == info.database &&
|
|
|
|
(*table_column)[row].get<String>() == info.table;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// We may have two rows per table which differ in 'active' value.
|
|
|
|
/// If rows with 'active = 0' were not filtered out, this means we
|
|
|
|
/// must collect the inactive parts. Remember this fact in StoragesInfo.
|
2020-03-23 02:12:31 +00:00
|
|
|
for (; next_row < rows && is_same_table(next_row); ++next_row)
|
2019-05-20 16:24:36 +00:00
|
|
|
{
|
|
|
|
const auto active = (*active_column)[next_row].get<UInt64>();
|
|
|
|
if (active == 0)
|
|
|
|
info.need_inactive_parts = true;
|
2017-12-12 15:54:03 +00:00
|
|
|
}
|
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
info.storage = storages.at(std::make_pair(info.database, info.table));
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
try
|
|
|
|
{
|
|
|
|
/// For table not to be dropped and set of columns to remain constant.
|
2020-04-09 18:10:27 +00:00
|
|
|
info.table_lock = info.storage->lockStructureForShare(false, query_id, settings.lock_acquire_timeout);
|
2019-05-20 16:24:36 +00:00
|
|
|
}
|
|
|
|
catch (const Exception & e)
|
|
|
|
{
|
|
|
|
/** There are case when IStorage::drop was called,
|
|
|
|
* but we still own the object.
|
|
|
|
* Then table will throw exception at attempt to lock it.
|
|
|
|
* Just skip the table.
|
|
|
|
*/
|
|
|
|
if (e.code() == ErrorCodes::TABLE_IS_DROPPED)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
info.engine = info.storage->getName();
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
info.data = dynamic_cast<MergeTreeData *>(info.storage.get());
|
|
|
|
if (!info.data)
|
|
|
|
throw Exception("Unknown engine " + info.engine, ErrorCodes::LOGICAL_ERROR);
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-08-20 00:48:08 +00:00
|
|
|
return info;
|
2019-05-20 16:24:36 +00:00
|
|
|
}
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2019-08-20 00:48:08 +00:00
|
|
|
return {};
|
2019-05-20 16:24:36 +00:00
|
|
|
}
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2020-02-19 16:07:28 +00:00
|
|
|
Pipes StorageSystemPartsBase::read(
|
2017-12-12 15:54:03 +00:00
|
|
|
const Names & column_names,
|
|
|
|
const SelectQueryInfo & query_info,
|
|
|
|
const Context & context,
|
2018-09-08 11:29:23 +00:00
|
|
|
QueryProcessingStage::Enum /*processed_stage*/,
|
2017-12-12 15:54:03 +00:00
|
|
|
const size_t /*max_block_size*/,
|
|
|
|
const unsigned /*num_streams*/)
|
|
|
|
{
|
|
|
|
bool has_state_column = hasStateColumn(column_names);
|
|
|
|
|
2019-05-20 16:24:36 +00:00
|
|
|
StoragesInfoStream stream(query_info, context);
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2017-12-28 18:20:53 +00:00
|
|
|
/// Create the result.
|
2017-12-12 15:54:03 +00:00
|
|
|
|
2018-03-06 20:18:34 +00:00
|
|
|
MutableColumns res_columns = getSampleBlock().cloneEmptyColumns();
|
2017-12-12 15:54:03 +00:00
|
|
|
if (has_state_column)
|
2018-03-06 20:18:34 +00:00
|
|
|
res_columns.push_back(ColumnString::create());
|
2017-12-12 15:54:03 +00:00
|
|
|
|
|
|
|
while (StoragesInfo info = stream.next())
|
|
|
|
{
|
2018-03-06 20:18:34 +00:00
|
|
|
processNextStorage(res_columns, info, has_state_column);
|
2017-12-12 15:54:03 +00:00
|
|
|
}
|
|
|
|
|
2020-01-24 18:01:21 +00:00
|
|
|
Block header = getSampleBlock();
|
2017-12-28 18:20:53 +00:00
|
|
|
if (has_state_column)
|
2020-01-24 18:01:21 +00:00
|
|
|
header.insert(ColumnWithTypeAndName(std::make_shared<DataTypeString>(), "_state"));
|
2017-12-28 18:20:53 +00:00
|
|
|
|
2020-01-24 18:01:21 +00:00
|
|
|
UInt64 num_rows = res_columns.at(0)->size();
|
|
|
|
Chunk chunk(std::move(res_columns), num_rows);
|
|
|
|
|
|
|
|
Pipes pipes;
|
|
|
|
pipes.emplace_back(std::make_shared<SourceFromSingleChunk>(std::move(header), std::move(chunk)));
|
|
|
|
|
|
|
|
return pipes;
|
2017-12-12 15:54:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-01-25 14:42:39 +00:00
|
|
|
StorageSystemPartsBase::StorageSystemPartsBase(std::string name_, NamesAndTypesList && columns_)
|
2020-04-27 13:55:30 +00:00
|
|
|
: IStorage(StorageID{"system", name_})
|
2018-01-25 14:42:39 +00:00
|
|
|
{
|
2019-08-03 11:02:40 +00:00
|
|
|
ColumnsDescription tmp_columns(std::move(columns_));
|
2019-03-14 15:20:51 +00:00
|
|
|
|
2018-03-26 14:18:04 +00:00
|
|
|
auto add_alias = [&](const String & alias_name, const String & column_name)
|
|
|
|
{
|
2020-04-27 13:55:30 +00:00
|
|
|
ColumnDescription column(alias_name, tmp_columns.get(column_name).type);
|
2019-03-14 15:20:51 +00:00
|
|
|
column.default_desc.kind = ColumnDefaultKind::Alias;
|
|
|
|
column.default_desc.expression = std::make_shared<ASTIdentifier>(column_name);
|
2019-08-03 11:02:40 +00:00
|
|
|
tmp_columns.add(column);
|
2018-03-26 14:18:04 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Add aliases for old column names for backwards compatibility.
|
|
|
|
add_alias("bytes", "bytes_on_disk");
|
|
|
|
add_alias("marks_size", "marks_bytes");
|
|
|
|
|
2019-08-03 11:02:40 +00:00
|
|
|
setColumns(tmp_columns);
|
2018-01-25 14:42:39 +00:00
|
|
|
}
|
|
|
|
|
2020-04-28 10:38:57 +00:00
|
|
|
NamesAndTypesList StorageSystemPartsBase::getVirtuals() const
|
2020-04-27 13:55:30 +00:00
|
|
|
{
|
2020-04-28 10:38:57 +00:00
|
|
|
return NamesAndTypesList{
|
2020-04-27 13:55:30 +00:00
|
|
|
NameAndTypePair("_state", std::make_shared<DataTypeString>())
|
|
|
|
};
|
|
|
|
}
|
2017-12-12 15:54:03 +00:00
|
|
|
}
|