ClickHouse/dbms/src/Storages/StorageMergeTree.cpp

234 lines
7.4 KiB
C++
Raw Normal View History

2012-07-17 20:04:39 +00:00
#include <DB/Storages/StorageMergeTree.h>
2014-03-13 12:48:07 +00:00
#include <DB/Storages/MergeTree/MergeTreeBlockOutputStream.h>
#include <DB/Storages/MergeTree/DiskSpaceMonitor.h>
#include <DB/Common/escapeForFileName.h>
2014-07-11 12:47:45 +00:00
#include <DB/Interpreters/InterpreterAlterQuery.h>
2012-07-19 20:32:10 +00:00
2012-07-17 20:04:39 +00:00
namespace DB
{
StorageMergeTree::StorageMergeTree(
const String & path_,
const String & database_name_,
const String & table_name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
const ASTPtr & sampling_expression_, /// nullptr, если семплирование не поддерживается.
size_t index_granularity_,
MergeTreeData::Mode mode_,
const String & sign_column_,
const MergeTreeSettings & settings_)
: IStorage{materialized_columns_, alias_columns_, column_defaults_},
path(path_), database_name(database_name_), table_name(table_name_), full_path(path + escapeForFileName(table_name) + '/'),
2014-09-10 11:34:26 +00:00
increment(full_path + "increment.txt"), context(context_), background_pool(context_.getBackgroundPool()),
data(full_path, columns_,
materialized_columns_, alias_columns_, column_defaults_,
context_, primary_expr_ast_, date_column_name_,
sampling_expression_, index_granularity_,mode_, sign_column_,
settings_, database_name_ + "." + table_name, false),
2014-03-13 12:48:07 +00:00
reader(data), writer(data), merger(data),
2014-09-10 11:34:26 +00:00
log(&Logger::get(database_name_ + "." + table_name + " (StorageMergeTree)")),
2014-03-13 12:48:07 +00:00
shutdown_called(false)
{
increment.fixIfBroken(data.getMaxDataPartIndex());
2014-04-09 15:52:47 +00:00
2014-08-13 08:07:52 +00:00
data.loadDataParts(false);
2014-04-09 15:52:47 +00:00
data.clearOldParts();
2014-03-13 12:48:07 +00:00
}
2012-07-17 20:04:39 +00:00
StoragePtr StorageMergeTree::create(
2014-09-10 11:34:26 +00:00
const String & path_, const String & database_name_, const String & table_name_,
2014-05-08 07:12:01 +00:00
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
2014-07-02 12:30:38 +00:00
Context & context_,
ASTPtr & primary_expr_ast_,
2014-03-09 17:36:01 +00:00
const String & date_column_name_,
const ASTPtr & sampling_expression_,
size_t index_granularity_,
2014-03-09 17:36:01 +00:00
MergeTreeData::Mode mode_,
const String & sign_column_,
2014-03-09 17:36:01 +00:00
const MergeTreeSettings & settings_)
{
auto res = new StorageMergeTree{
path_, database_name_, table_name_,
columns_, materialized_columns_, alias_columns_, column_defaults_,
context_, primary_expr_ast_, date_column_name_,
sampling_expression_, index_granularity_, mode_, sign_column_, settings_
};
2014-04-11 13:05:17 +00:00
StoragePtr res_ptr = res->thisPtr();
2014-07-02 12:30:38 +00:00
res->merge_task_handle = res->background_pool.addTask(std::bind(&StorageMergeTree::mergeTask, res, std::placeholders::_1));
2014-04-11 13:05:17 +00:00
return res_ptr;
}
2013-09-30 01:29:19 +00:00
void StorageMergeTree::shutdown()
2012-07-30 20:32:36 +00:00
{
2014-03-13 12:48:07 +00:00
if (shutdown_called)
return;
shutdown_called = true;
merger.cancelAll();
2014-07-02 12:30:38 +00:00
background_pool.removeTask(merge_task_handle);
2012-07-18 19:44:04 +00:00
}
2014-03-13 12:48:07 +00:00
StorageMergeTree::~StorageMergeTree()
{
shutdown();
}
2012-07-18 19:44:04 +00:00
2012-07-21 05:07:14 +00:00
BlockInputStreams StorageMergeTree::read(
2014-03-09 17:36:01 +00:00
const Names & column_names,
2012-07-21 05:07:14 +00:00
ASTPtr query,
const Settings & settings,
2012-07-21 05:07:14 +00:00
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,
unsigned threads)
{
2014-03-13 12:48:07 +00:00
return reader.read(column_names, query, settings, processed_stage, max_block_size, threads);
2012-12-06 09:45:09 +00:00
}
2014-03-09 17:36:01 +00:00
BlockOutputStreamPtr StorageMergeTree::write(ASTPtr query)
2013-01-23 11:16:32 +00:00
{
return new MergeTreeBlockOutputStream(*this);
2013-01-23 11:16:32 +00:00
}
2014-03-20 13:28:49 +00:00
void StorageMergeTree::drop()
2012-08-16 18:17:01 +00:00
{
2014-04-11 15:53:32 +00:00
shutdown();
2014-03-13 12:48:07 +00:00
data.dropAllData();
2013-08-07 13:07:42 +00:00
}
void StorageMergeTree::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name)
{
std::string new_full_path = new_path_to_db + escapeForFileName(new_table_name) + '/';
2014-03-13 19:14:25 +00:00
2014-07-28 14:33:30 +00:00
data.setPath(new_full_path, true);
2014-03-13 12:48:07 +00:00
path = new_path_to_db;
2014-09-10 11:34:26 +00:00
table_name = new_table_name;
2014-03-13 12:48:07 +00:00
full_path = new_full_path;
increment.setPath(full_path + "increment.txt");
2014-05-08 07:12:01 +00:00
/// TODO: Можно обновить названия логгеров у this, data, reader, writer, merger.
}
2014-07-11 12:47:45 +00:00
void StorageMergeTree::alter(const AlterCommands & params, const String & database_name, const String & table_name, Context & context)
2013-08-07 13:07:42 +00:00
{
2014-07-11 12:47:45 +00:00
/// NOTE: Здесь так же как в ReplicatedMergeTree можно сделать ALTER, не блокирующий запись данных надолго.
2013-10-03 12:46:17 +00:00
2014-07-11 12:47:45 +00:00
auto table_soft_lock = lockDataForAlter();
2014-03-20 13:00:42 +00:00
2014-07-11 12:47:45 +00:00
data.checkAlter(params);
NamesAndTypesList new_columns = data.getColumnsList();
params.apply(new_columns);
MergeTreeData::DataParts parts = data.getDataParts();
std::vector<MergeTreeData::AlterDataPartTransactionPtr> transactions;
2014-09-29 20:26:46 +00:00
for (const MergeTreeData::DataPartPtr & part : parts)
2014-07-11 12:47:45 +00:00
{
2014-07-17 09:38:31 +00:00
auto transaction = data.alterDataPart(part, new_columns);
if (transaction)
transactions.push_back(std::move(transaction));
2014-07-11 12:47:45 +00:00
}
auto table_hard_lock = lockStructureForAlter();
InterpreterAlterQuery::updateMetadata(database_name, table_name, new_columns, context);
data.setColumnsList(new_columns);
for (auto & transaction : transactions)
{
transaction->commit();
}
2014-03-20 13:00:42 +00:00
}
2014-04-11 13:05:17 +00:00
bool StorageMergeTree::merge(bool aggressive, BackgroundProcessingPool::Context * pool_context)
2014-03-13 12:48:07 +00:00
{
auto structure_lock = lockStructure(true);
2014-03-13 12:48:07 +00:00
2014-04-11 13:05:17 +00:00
/// Удаляем старые куски.
data.clearOldParts();
2014-03-13 12:48:07 +00:00
2014-04-11 13:05:17 +00:00
size_t disk_space = DiskSpaceMonitor::getUnreservedFreeSpace(full_path);
2014-03-13 12:48:07 +00:00
2014-04-11 13:05:17 +00:00
/// Нужно вызывать деструктор под незалоченным currently_merging_mutex.
CurrentlyMergingPartsTaggerPtr merging_tagger;
String merged_name;
2014-03-13 12:48:07 +00:00
{
2014-04-11 13:05:17 +00:00
Poco::ScopedLock<Poco::FastMutex> lock(currently_merging_mutex);
2014-03-27 11:30:54 +00:00
2014-04-11 13:05:17 +00:00
MergeTreeData::DataPartsVector parts;
auto can_merge = std::bind(&StorageMergeTree::canMergeParts, this, std::placeholders::_1, std::placeholders::_2);
/// Если слияние запущено из пула потоков, и хотя бы половина потоков сливает большие куски,
/// не будем сливать большие куски.
size_t big_merges = background_pool.getCounter("big merges");
2014-07-02 12:30:38 +00:00
bool only_small = pool_context && big_merges * 2 >= background_pool.getNumberOfThreads();
2014-04-11 13:05:17 +00:00
if (!merger.selectPartsToMerge(parts, merged_name, disk_space, false, aggressive, only_small, can_merge) &&
!merger.selectPartsToMerge(parts, merged_name, disk_space, true, aggressive, only_small, can_merge))
{
LOG_INFO(log, "No parts to merge");
2014-04-11 13:05:17 +00:00
return false;
}
2014-03-13 12:48:07 +00:00
2014-04-11 13:05:17 +00:00
merging_tagger = new CurrentlyMergingPartsTagger(parts, merger.estimateDiskSpaceForMerge(parts), *this);
2014-03-13 12:48:07 +00:00
2014-04-11 13:05:17 +00:00
/// Если собираемся сливать большие куски, увеличим счетчик потоков, сливающих большие куски.
if (pool_context)
{
for (const auto & part : parts)
2014-03-13 12:48:07 +00:00
{
2014-07-23 15:24:45 +00:00
if (part->size_in_bytes > data.settings.max_bytes_to_merge_parts_small)
2014-03-13 19:07:17 +00:00
{
2014-04-11 13:05:17 +00:00
pool_context->incrementCounter("big merges");
break;
2014-03-13 19:07:17 +00:00
}
2014-03-13 12:48:07 +00:00
}
}
}
2014-09-10 11:34:26 +00:00
const auto & merge_entry = context.getMergeList().insert(database_name, table_name, merged_name);
merger.mergeParts(merging_tagger->parts, merged_name, *merge_entry, nullptr, &*merging_tagger->reserved_space);
2014-03-13 12:48:07 +00:00
2014-04-11 13:05:17 +00:00
return true;
}
bool StorageMergeTree::mergeTask(BackgroundProcessingPool::Context & context)
2014-03-13 12:48:07 +00:00
{
2014-04-11 13:05:17 +00:00
if (shutdown_called)
return false;
2014-04-11 18:04:21 +00:00
try
{
return merge(false, &context);
}
catch (Exception & e)
{
if (e.code() == ErrorCodes::ABORTED)
{
LOG_INFO(log, "Merge cancelled");
return false;
}
throw;
}
2014-03-13 12:48:07 +00:00
}
2014-04-11 13:05:17 +00:00
2014-03-13 17:44:00 +00:00
bool StorageMergeTree::canMergeParts(const MergeTreeData::DataPartPtr & left, const MergeTreeData::DataPartPtr & right)
{
return !currently_merging.count(left) && !currently_merging.count(right);
}
2012-07-17 20:04:39 +00:00
}