mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
use snapshots for semistructured data, durability fixes
This commit is contained in:
parent
d04dc03fa4
commit
07d952b728
@ -1096,7 +1096,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
|
||||
sendDataFromPipe(
|
||||
storage->read(
|
||||
sample.getNames(),
|
||||
storage->getStorageSnapshot(metadata),
|
||||
storage->getStorageSnapshot(metadata, global_context),
|
||||
query_info,
|
||||
global_context,
|
||||
{},
|
||||
|
@ -89,7 +89,7 @@ BlockIO InterpreterDescribeQuery::execute()
|
||||
auto table_lock = table->lockForShare(getContext()->getInitialQueryId(), settings.lock_acquire_timeout);
|
||||
|
||||
auto metadata_snapshot = table->getInMemoryMetadataPtr();
|
||||
storage_snapshot = table->getStorageSnapshot(metadata_snapshot);
|
||||
storage_snapshot = table->getStorageSnapshot(metadata_snapshot, getContext());
|
||||
columns = metadata_snapshot->getColumns();
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ BlockIO InterpreterOptimizeQuery::execute()
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(table_id, getContext());
|
||||
checkStorageSupportsTransactionsIfNeeded(table, getContext());
|
||||
auto metadata_snapshot = table->getInMemoryMetadataPtr();
|
||||
auto storage_snapshot = table->getStorageSnapshot(metadata_snapshot);
|
||||
auto storage_snapshot = table->getStorageSnapshot(metadata_snapshot, getContext());
|
||||
|
||||
// Empty list of names means we deduplicate by all columns, but user can explicitly state which columns to use.
|
||||
Names column_names;
|
||||
|
@ -329,7 +329,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
if (!metadata_snapshot)
|
||||
metadata_snapshot = storage->getInMemoryMetadataPtr();
|
||||
|
||||
storage_snapshot = storage->getStorageSnapshotForQuery(metadata_snapshot, query_ptr);
|
||||
storage_snapshot = storage->getStorageSnapshotForQuery(metadata_snapshot, query_ptr, context);
|
||||
}
|
||||
|
||||
if (has_input || !joined_tables.resolveTables())
|
||||
|
@ -65,8 +65,10 @@ void MergeTreeTransaction::removeOldPart(const StoragePtr & storage, const DataP
|
||||
else
|
||||
{
|
||||
/// Lock part for removal with special TID, so transactions will not try to remove it concurrently.
|
||||
/// We lock it only in memory.
|
||||
/// We lock it only in memory if part was not involved in any transactions.
|
||||
part_to_remove->version.lockRemovalTID(Tx::PrehistoricTID, transaction_context);
|
||||
if (part_to_remove->wasInvolvedInTransaction())
|
||||
part_to_remove->appendRemovalTIDToVersionMetadata();
|
||||
}
|
||||
}
|
||||
|
||||
@ -189,6 +191,9 @@ void MergeTreeTransaction::afterCommit(CSN assigned_csn) noexcept
|
||||
part->version.removal_csn.store(csn);
|
||||
part->appendCSNToVersionMetadata(VersionMetadata::WhichCSN::REMOVAL);
|
||||
}
|
||||
|
||||
for (const auto & storage_and_mutation : mutations)
|
||||
storage_and_mutation.first->setMutationCSN(storage_and_mutation.second, csn);
|
||||
}
|
||||
|
||||
bool MergeTreeTransaction::rollback() noexcept
|
||||
@ -220,7 +225,11 @@ bool MergeTreeTransaction::rollback() noexcept
|
||||
|
||||
/// Kind of optimization: cleanup thread can remove these parts immediately
|
||||
for (const auto & part : parts_to_remove)
|
||||
{
|
||||
part->version.creation_csn.store(Tx::RolledBackCSN);
|
||||
/// Write special RolledBackCSN, so we will be able to cleanup transaction log
|
||||
part->appendCSNToVersionMetadata(VersionMetadata::CREATION);
|
||||
}
|
||||
|
||||
for (const auto & part : parts_to_activate)
|
||||
{
|
||||
|
@ -802,7 +802,7 @@ ASTPtr MutationsInterpreter::prepareInterpreterSelectQuery(std::vector<Stage> &
|
||||
/// e.g. ALTER referencing the same table in scalar subquery
|
||||
bool execute_scalar_subqueries = !dry_run;
|
||||
auto syntax_result = TreeRewriter(context).analyze(
|
||||
all_asts, all_columns, storage, storage->getStorageSnapshot(metadata_snapshot),
|
||||
all_asts, all_columns, storage, storage->getStorageSnapshot(metadata_snapshot, context),
|
||||
false, true, execute_scalar_subqueries);
|
||||
|
||||
if (execute_scalar_subqueries && context->hasQueryContext())
|
||||
|
@ -383,7 +383,8 @@ void VersionMetadata::read(ReadBuffer & buf)
|
||||
{
|
||||
/// NOTE Metadata file may actually contain multiple creation TIDs, we need the last one.
|
||||
removal_tid = TransactionID::read(buf);
|
||||
removal_tid_lock = removal_tid.getHash();
|
||||
if (!removal_tid.isEmpty())
|
||||
removal_tid_lock = removal_tid.getHash();
|
||||
}
|
||||
else if (name == REMOVAL_CSN_STR)
|
||||
{
|
||||
|
@ -563,7 +563,7 @@ void RemoteQueryExecutor::sendExternalTables()
|
||||
{
|
||||
SelectQueryInfo query_info;
|
||||
auto metadata_snapshot = cur->getInMemoryMetadataPtr();
|
||||
auto storage_snapshot = cur->getStorageSnapshot(metadata_snapshot);
|
||||
auto storage_snapshot = cur->getStorageSnapshot(metadata_snapshot, context);
|
||||
QueryProcessingStage::Enum read_from_table_stage = cur->getQueryProcessingStage(
|
||||
context, QueryProcessingStage::Complete, storage_snapshot, query_info);
|
||||
|
||||
|
@ -679,7 +679,7 @@ bool StorageFileLog::streamToViews()
|
||||
throw Exception("Engine table " + table_id.getNameForLogs() + " doesn't exist", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||
auto storage_snapshot = getStorageSnapshot(metadata_snapshot);
|
||||
auto storage_snapshot = getStorageSnapshot(metadata_snapshot, getContext());
|
||||
|
||||
auto max_streams_number = std::min<UInt64>(filelog_settings->max_threads.value, file_infos.file_names.size());
|
||||
/// No files to parse
|
||||
|
@ -488,6 +488,11 @@ public:
|
||||
throw Exception("Mutations are not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
virtual void setMutationCSN(const String & /*mutation_id*/, UInt64 /*csn*/)
|
||||
{
|
||||
throw Exception("Mutations are not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Cancel a part move to shard.
|
||||
virtual CancellationCode killPartMoveToShard(const UUID & /*task_uuid*/)
|
||||
{
|
||||
@ -610,15 +615,15 @@ public:
|
||||
virtual std::optional<UInt64> lifetimeBytes() const { return {}; }
|
||||
|
||||
/// Creates a storage snapshot from given metadata.
|
||||
virtual StorageSnapshotPtr getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot) const
|
||||
virtual StorageSnapshotPtr getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr /*query_context*/) const
|
||||
{
|
||||
return std::make_shared<StorageSnapshot>(*this, metadata_snapshot);
|
||||
}
|
||||
|
||||
/// Creates a storage snapshot from given metadata and columns, which are used in query.
|
||||
virtual StorageSnapshotPtr getStorageSnapshotForQuery(const StorageMetadataPtr & metadata_snapshot, const ASTPtr & /*query*/) const
|
||||
virtual StorageSnapshotPtr getStorageSnapshotForQuery(const StorageMetadataPtr & metadata_snapshot, const ASTPtr & /*query*/, ContextPtr query_context) const
|
||||
{
|
||||
return getStorageSnapshot(metadata_snapshot);
|
||||
return getStorageSnapshot(metadata_snapshot, query_context);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -615,7 +615,7 @@ bool StorageKafka::streamToViews()
|
||||
if (!table)
|
||||
throw Exception("Engine table " + table_id.getNameForLogs() + " doesn't exist.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
auto storage_snapshot = getStorageSnapshot(getInMemoryMetadataPtr());
|
||||
auto storage_snapshot = getStorageSnapshot(getInMemoryMetadataPtr(), getContext());
|
||||
|
||||
// Create an INSERT query for streaming data
|
||||
auto insert = std::make_shared<ASTInsertQuery>();
|
||||
|
@ -1396,7 +1396,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
}
|
||||
|
||||
/// Sanity checks
|
||||
bool csn_order = !version.removal_csn || version.creation_csn <= version.removal_csn;
|
||||
bool csn_order = !version.removal_csn || version.creation_csn <= version.removal_csn || version.removal_csn == Tx::PrehistoricCSN;
|
||||
bool min_start_csn_order = version.creation_tid.start_csn <= version.creation_csn;
|
||||
bool max_start_csn_order = version.removal_tid.start_csn <= version.removal_csn;
|
||||
bool creation_csn_known = version.creation_csn;
|
||||
@ -4039,6 +4039,21 @@ DataPartsVector MergeTreeData::getVisibleDataPartsVector(ContextPtr local_contex
|
||||
return res;
|
||||
}
|
||||
|
||||
DataPartsVector MergeTreeData::getVisibleDataPartsVectorUnlocked(ContextPtr local_context, const DataPartsLock & lock) const
|
||||
{
|
||||
DataPartsVector res;
|
||||
if (const auto * txn = local_context->getCurrentTransaction().get())
|
||||
{
|
||||
res = getDataPartsVectorForInternalUsage({DataPartState::Active, DataPartState::Outdated}, lock);
|
||||
filterVisibleDataParts(res, txn->getSnapshot(), txn->tid);
|
||||
}
|
||||
else
|
||||
{
|
||||
res = getDataPartsVectorForInternalUsage({DataPartState::Active}, lock);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
MergeTreeData::DataPartsVector MergeTreeData::getVisibleDataPartsVector(const MergeTreeTransactionPtr & txn) const
|
||||
{
|
||||
DataPartsVector res;
|
||||
@ -6420,12 +6435,12 @@ void MergeTreeData::updateObjectColumns(const DataPartPtr & part, const DataPart
|
||||
DB::updateObjectColumns(object_columns, part->getColumns());
|
||||
}
|
||||
|
||||
StorageSnapshotPtr MergeTreeData::getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot) const
|
||||
StorageSnapshotPtr MergeTreeData::getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr query_context) const
|
||||
{
|
||||
auto snapshot_data = std::make_unique<SnapshotData>();
|
||||
|
||||
auto lock = lockParts();
|
||||
snapshot_data->parts = getDataPartsVectorForInternalUsage({DataPartState::Active}, lock);
|
||||
snapshot_data->parts = getVisibleDataPartsVectorUnlocked(query_context, lock);
|
||||
return std::make_shared<StorageSnapshot>(*this, metadata_snapshot, object_columns, std::move(snapshot_data));
|
||||
}
|
||||
|
||||
|
@ -435,7 +435,7 @@ public:
|
||||
DataPartsVector parts;
|
||||
};
|
||||
|
||||
StorageSnapshotPtr getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot) const override;
|
||||
StorageSnapshotPtr getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr query_context) const override;
|
||||
|
||||
/// Load the set of data parts from disk. Call once - immediately after the object is created.
|
||||
void loadDataParts(bool skip_sanity_checks);
|
||||
@ -473,6 +473,7 @@ public:
|
||||
|
||||
/// Returns parts that visible with current snapshot
|
||||
DataPartsVector getVisibleDataPartsVector(ContextPtr local_context) const;
|
||||
DataPartsVector getVisibleDataPartsVectorUnlocked(ContextPtr local_context, const DataPartsLock & lock) const;
|
||||
DataPartsVector getVisibleDataPartsVector(const MergeTreeTransactionPtr & txn) const;
|
||||
DataPartsVector getVisibleDataPartsVector(CSN snapshot_version, TransactionID current_tid) const;
|
||||
|
||||
|
@ -131,14 +131,11 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
|
||||
|
||||
const auto & settings = context->getSettingsRef();
|
||||
|
||||
auto parts_in_txn_snapshot = data.getVisibleDataPartsVector(context);
|
||||
|
||||
const auto & metadata_for_reading = storage_snapshot->getMetadataForQuery();
|
||||
|
||||
const auto & snapshot_data = assert_cast<const MergeTreeData::SnapshotData &>(*storage_snapshot->data);
|
||||
|
||||
/// FIXME: use one snapshot
|
||||
const auto & parts = context->getCurrentTransaction() ? parts_in_txn_snapshot : snapshot_data.parts;
|
||||
const auto & parts = snapshot_data.parts;
|
||||
|
||||
if (!query_info.projection)
|
||||
{
|
||||
|
@ -284,7 +284,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart(
|
||||
TemporaryPart temp_part;
|
||||
Block & block = block_with_partition.block;
|
||||
auto columns = metadata_snapshot->getColumns().getAllPhysical().filter(block.getNames());
|
||||
auto storage_snapshot = data.getStorageSnapshot(metadata_snapshot);
|
||||
auto storage_snapshot = data.getStorageSnapshot(metadata_snapshot, context);
|
||||
|
||||
if (!storage_snapshot->object_columns.empty())
|
||||
{
|
||||
|
@ -62,7 +62,11 @@ MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskP
|
||||
*out << "commands: ";
|
||||
commands.writeText(*out);
|
||||
*out << "\n";
|
||||
if (!tid.isPrehistoric())
|
||||
if (tid.isPrehistoric())
|
||||
{
|
||||
csn = Tx::PrehistoricCSN;
|
||||
}
|
||||
else
|
||||
{
|
||||
*out << "tid: ";
|
||||
TransactionID::write(tid, *out);
|
||||
@ -99,6 +103,14 @@ void MergeTreeMutationEntry::removeFile()
|
||||
}
|
||||
}
|
||||
|
||||
void MergeTreeMutationEntry::writeCSN(CSN csn_)
|
||||
{
|
||||
csn = csn_;
|
||||
auto out = disk->writeFile(path_prefix + file_name, 256, WriteMode::Append);
|
||||
*out << "csn: " << csn << "\n";
|
||||
out->finalize();
|
||||
}
|
||||
|
||||
MergeTreeMutationEntry::MergeTreeMutationEntry(DiskPtr disk_, const String & path_prefix_, const String & file_name_)
|
||||
: disk(std::move(disk_))
|
||||
, path_prefix(path_prefix_)
|
||||
@ -120,11 +132,21 @@ MergeTreeMutationEntry::MergeTreeMutationEntry(DiskPtr disk_, const String & pat
|
||||
commands.readText(*buf);
|
||||
*buf >> "\n";
|
||||
|
||||
if (!buf->eof())
|
||||
if (buf->eof())
|
||||
{
|
||||
tid = Tx::PrehistoricTID;
|
||||
csn = Tx::PrehistoricCSN;
|
||||
}
|
||||
else
|
||||
{
|
||||
*buf >> "tid: ";
|
||||
tid = TransactionID::read(*buf);
|
||||
*buf >> "\n";
|
||||
|
||||
if (!buf->eof())
|
||||
{
|
||||
*buf >> "csn: " >> csn >> "\n";
|
||||
}
|
||||
}
|
||||
|
||||
assertEOF(*buf);
|
||||
|
@ -31,7 +31,7 @@ struct MergeTreeMutationEntry
|
||||
|
||||
/// ID of transaction which has created mutation.
|
||||
TransactionID tid = Tx::PrehistoricTID;
|
||||
CSN csn;
|
||||
CSN csn = Tx::UnknownCSN;
|
||||
|
||||
/// Create a new entry and write it to a temporary file.
|
||||
MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk, const String & path_prefix_, UInt64 tmp_number,
|
||||
@ -45,6 +45,8 @@ struct MergeTreeMutationEntry
|
||||
|
||||
void removeFile();
|
||||
|
||||
void writeCSN(CSN csn_);
|
||||
|
||||
static String versionToFileName(UInt64 block_number_);
|
||||
static UInt64 tryParseFileName(const String & file_name_);
|
||||
static UInt64 parseFileName(const String & file_name_);
|
||||
|
@ -1024,7 +1024,7 @@ bool StorageRabbitMQ::streamToViews()
|
||||
InterpreterInsertQuery interpreter(insert, rabbitmq_context, false, true, true);
|
||||
auto block_io = interpreter.execute();
|
||||
|
||||
auto storage_snapshot = getStorageSnapshot(getInMemoryMetadataPtr());
|
||||
auto storage_snapshot = getStorageSnapshot(getInMemoryMetadataPtr(), getContext());
|
||||
auto column_names = block_io.pipeline.getHeader().getNames();
|
||||
auto sample_block = storage_snapshot->getSampleBlockForColumns(column_names);
|
||||
|
||||
|
@ -54,7 +54,7 @@ Pipe readFinalFromNestedStorage(
|
||||
filter_column_name = expressions->children.back()->getColumnName();
|
||||
}
|
||||
|
||||
auto nested_snapshot = nested_storage->getStorageSnapshot(nested_metadata);
|
||||
auto nested_snapshot = nested_storage->getStorageSnapshot(nested_metadata, context);
|
||||
Pipe pipe = nested_storage->read(require_columns_name, nested_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
|
||||
pipe.addTableLock(lock);
|
||||
pipe.addStorageHolder(nested_storage);
|
||||
|
@ -203,7 +203,7 @@ QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(
|
||||
/// TODO: Find a way to support projections for StorageBuffer
|
||||
query_info.ignore_projections = true;
|
||||
const auto & destination_metadata = destination->getInMemoryMetadataPtr();
|
||||
return destination->getQueryProcessingStage(local_context, to_stage, destination->getStorageSnapshot(destination_metadata), query_info);
|
||||
return destination->getQueryProcessingStage(local_context, to_stage, destination->getStorageSnapshot(destination_metadata, local_context), query_info);
|
||||
}
|
||||
|
||||
return QueryProcessingStage::FetchColumns;
|
||||
@ -248,7 +248,7 @@ void StorageBuffer::read(
|
||||
auto destination_lock = destination->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout);
|
||||
|
||||
auto destination_metadata_snapshot = destination->getInMemoryMetadataPtr();
|
||||
auto destination_snapshot = destination->getStorageSnapshot(destination_metadata_snapshot);
|
||||
auto destination_snapshot = destination->getStorageSnapshot(destination_metadata_snapshot, local_context);
|
||||
|
||||
const bool dst_has_same_structure = std::all_of(column_names.begin(), column_names.end(), [metadata_snapshot, destination_metadata_snapshot](const String& column_name)
|
||||
{
|
||||
|
@ -617,13 +617,13 @@ static bool requiresObjectColumns(const ColumnsDescription & all_columns, ASTPtr
|
||||
return false;
|
||||
}
|
||||
|
||||
StorageSnapshotPtr StorageDistributed::getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot) const
|
||||
StorageSnapshotPtr StorageDistributed::getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr query_context) const
|
||||
{
|
||||
return getStorageSnapshotForQuery(metadata_snapshot, nullptr);
|
||||
return getStorageSnapshotForQuery(metadata_snapshot, nullptr, query_context);
|
||||
}
|
||||
|
||||
StorageSnapshotPtr StorageDistributed::getStorageSnapshotForQuery(
|
||||
const StorageMetadataPtr & metadata_snapshot, const ASTPtr & query) const
|
||||
const StorageMetadataPtr & metadata_snapshot, const ASTPtr & query, ContextPtr /*query_context*/) const
|
||||
{
|
||||
/// If query doesn't use columns of type Object, don't deduce
|
||||
/// concrete types for them, because it required extra round trip.
|
||||
|
@ -69,9 +69,9 @@ public:
|
||||
ColumnsDescriptionByShardNum objects_by_shard;
|
||||
};
|
||||
|
||||
StorageSnapshotPtr getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot) const override;
|
||||
StorageSnapshotPtr getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr query_context) const override;
|
||||
StorageSnapshotPtr getStorageSnapshotForQuery(
|
||||
const StorageMetadataPtr & metadata_snapshot, const ASTPtr & query) const override;
|
||||
const StorageMetadataPtr & metadata_snapshot, const ASTPtr & query, ContextPtr query_context) const override;
|
||||
|
||||
QueryProcessingStage::Enum
|
||||
getQueryProcessingStage(ContextPtr, QueryProcessingStage::Enum, const StorageSnapshotPtr &, SelectQueryInfo &) const override;
|
||||
|
@ -140,7 +140,7 @@ QueryProcessingStage::Enum StorageMaterializedView::getQueryProcessingStage(
|
||||
/// converting and use it just like a normal view.
|
||||
query_info.ignore_projections = true;
|
||||
const auto & target_metadata = getTargetTable()->getInMemoryMetadataPtr();
|
||||
return getTargetTable()->getQueryProcessingStage(local_context, to_stage, getTargetTable()->getStorageSnapshot(target_metadata), query_info);
|
||||
return getTargetTable()->getQueryProcessingStage(local_context, to_stage, getTargetTable()->getStorageSnapshot(target_metadata, local_context), query_info);
|
||||
}
|
||||
|
||||
Pipe StorageMaterializedView::read(
|
||||
@ -172,7 +172,7 @@ void StorageMaterializedView::read(
|
||||
auto storage = getTargetTable();
|
||||
auto lock = storage->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout);
|
||||
auto target_metadata_snapshot = storage->getInMemoryMetadataPtr();
|
||||
auto target_storage_snapshot = storage->getStorageSnapshot(target_metadata_snapshot);
|
||||
auto target_storage_snapshot = storage->getStorageSnapshot(target_metadata_snapshot, local_context);
|
||||
|
||||
if (query_info.order_optimizer)
|
||||
query_info.input_order_info = query_info.order_optimizer->getInputOrder(target_metadata_snapshot, local_context);
|
||||
|
@ -110,10 +110,11 @@ class MemorySink : public SinkToStorage
|
||||
public:
|
||||
MemorySink(
|
||||
StorageMemory & storage_,
|
||||
const StorageMetadataPtr & metadata_snapshot_)
|
||||
const StorageMetadataPtr & metadata_snapshot_,
|
||||
ContextPtr context)
|
||||
: SinkToStorage(metadata_snapshot_->getSampleBlock())
|
||||
, storage(storage_)
|
||||
, storage_snapshot(storage_.getStorageSnapshot(metadata_snapshot_))
|
||||
, storage_snapshot(storage_.getStorageSnapshot(metadata_snapshot_, context))
|
||||
{
|
||||
}
|
||||
|
||||
@ -190,7 +191,7 @@ StorageMemory::StorageMemory(
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
}
|
||||
|
||||
StorageSnapshotPtr StorageMemory::getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot) const
|
||||
StorageSnapshotPtr StorageMemory::getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr /*query_context*/) const
|
||||
{
|
||||
auto snapshot_data = std::make_unique<SnapshotData>();
|
||||
snapshot_data->blocks = data.get();
|
||||
@ -260,9 +261,9 @@ Pipe StorageMemory::read(
|
||||
}
|
||||
|
||||
|
||||
SinkToStoragePtr StorageMemory::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr /*context*/)
|
||||
SinkToStoragePtr StorageMemory::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr context)
|
||||
{
|
||||
return std::make_shared<MemorySink>(*this, metadata_snapshot);
|
||||
return std::make_shared<MemorySink>(*this, metadata_snapshot, context);
|
||||
}
|
||||
|
||||
|
||||
|
@ -36,7 +36,7 @@ public:
|
||||
std::shared_ptr<const Blocks> blocks;
|
||||
};
|
||||
|
||||
StorageSnapshotPtr getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot) const override;
|
||||
StorageSnapshotPtr getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr query_context) const override;
|
||||
|
||||
Pipe read(
|
||||
const Names & column_names,
|
||||
|
@ -201,7 +201,7 @@ QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage(
|
||||
stage_in_source_tables = std::max(
|
||||
stage_in_source_tables,
|
||||
table->getQueryProcessingStage(local_context, to_stage,
|
||||
table->getStorageSnapshot(table->getInMemoryMetadataPtr()), query_info));
|
||||
table->getStorageSnapshot(table->getInMemoryMetadataPtr(), local_context), query_info));
|
||||
}
|
||||
|
||||
iterator->next();
|
||||
@ -338,7 +338,7 @@ Pipe StorageMerge::read(
|
||||
Aliases aliases;
|
||||
auto storage_metadata_snapshot = storage->getInMemoryMetadataPtr();
|
||||
auto storage_columns = storage_metadata_snapshot->getColumns();
|
||||
auto nested_storage_snaphsot = storage->getStorageSnapshot(storage_metadata_snapshot);
|
||||
auto nested_storage_snaphsot = storage->getStorageSnapshot(storage_metadata_snapshot, local_context);
|
||||
|
||||
auto modified_query_info = getModifiedQueryInfo(query_info, modified_context, storage->getStorageID(), storage->as<StorageMerge>());
|
||||
auto syntax_result = TreeRewriter(local_context).analyzeSelect(
|
||||
@ -377,7 +377,7 @@ Pipe StorageMerge::read(
|
||||
}
|
||||
|
||||
syntax_result = TreeRewriter(local_context).analyze(
|
||||
required_columns_expr_list, storage_columns.getAllPhysical(), storage, storage->getStorageSnapshot(storage_metadata_snapshot));
|
||||
required_columns_expr_list, storage_columns.getAllPhysical(), storage, storage->getStorageSnapshot(storage_metadata_snapshot, local_context));
|
||||
|
||||
auto alias_actions = ExpressionAnalyzer(required_columns_expr_list, syntax_result, local_context).getActionsDAG(true);
|
||||
|
||||
|
@ -539,6 +539,18 @@ void StorageMergeTree::waitForMutation(const String & mutation_id)
|
||||
LOG_INFO(log, "Mutation {} done", mutation_id);
|
||||
}
|
||||
|
||||
void StorageMergeTree::setMutationCSN(const String & mutation_id, CSN csn)
|
||||
{
|
||||
LOG_INFO(log, "Writing CSN {} for mutation {}", csn, mutation_id);
|
||||
UInt64 version = MergeTreeMutationEntry::parseFileName(mutation_id);
|
||||
|
||||
std::lock_guard lock(currently_processing_in_background_mutex);
|
||||
auto it = current_mutations_by_version.find(version);
|
||||
if (it == current_mutations_by_version.end())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot find mutation {}", mutation_id);
|
||||
it->second.writeCSN(csn);
|
||||
}
|
||||
|
||||
void StorageMergeTree::mutate(const MutationCommands & commands, ContextPtr query_context)
|
||||
{
|
||||
/// Validate partition IDs (if any) before starting mutation
|
||||
@ -718,9 +730,13 @@ void StorageMergeTree::loadMutations()
|
||||
UInt64 block_number = entry.block_number;
|
||||
LOG_DEBUG(log, "Loading mutation: {} entry, commands size: {}", it->name(), entry.commands.size());
|
||||
|
||||
if (!entry.tid.isPrehistoric())
|
||||
if (!entry.tid.isPrehistoric() && !entry.csn)
|
||||
{
|
||||
if (!TransactionLog::getCSN(entry.tid))
|
||||
if (auto csn = TransactionLog::getCSN(entry.tid))
|
||||
{
|
||||
entry.writeCSN(csn);
|
||||
}
|
||||
else
|
||||
{
|
||||
TransactionLog::assertTIDIsNotOutdated(entry.tid);
|
||||
LOG_DEBUG(log, "Mutation entry {} was created by transaction {}, but it was not committed. Removing mutation entry",
|
||||
|
@ -175,6 +175,8 @@ private:
|
||||
/// Wait until mutation with version will finish mutation for all parts
|
||||
void waitForMutation(Int64 version);
|
||||
void waitForMutation(const String & mutation_id) override;
|
||||
void setMutationCSN(const String & mutation_id, CSN csn) override;
|
||||
|
||||
|
||||
friend struct CurrentlyMergingPartsTagger;
|
||||
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
/// TODO: Find a way to support projections for StorageProxy
|
||||
info.ignore_projections = true;
|
||||
const auto & nested_metadata = getNested()->getInMemoryMetadataPtr();
|
||||
return getNested()->getQueryProcessingStage(context, to_stage, getNested()->getStorageSnapshot(nested_metadata), info);
|
||||
return getNested()->getQueryProcessingStage(context, to_stage, getNested()->getStorageSnapshot(nested_metadata, context), info);
|
||||
}
|
||||
|
||||
Pipe watch(
|
||||
|
@ -104,7 +104,7 @@ public:
|
||||
for (const auto & c : column_names)
|
||||
cnames += c + " ";
|
||||
auto storage = getNested();
|
||||
auto nested_snapshot = storage->getStorageSnapshot(storage->getInMemoryMetadataPtr());
|
||||
auto nested_snapshot = storage->getStorageSnapshot(storage->getInMemoryMetadataPtr(), context);
|
||||
auto pipe = storage->read(column_names, nested_snapshot, query_info, context,
|
||||
processed_stage, max_block_size, num_streams);
|
||||
if (!pipe.empty() && add_conversion)
|
||||
|
@ -117,7 +117,7 @@ std::string readData(DB::StoragePtr & table, const DB::ContextPtr context)
|
||||
{
|
||||
using namespace DB;
|
||||
auto metadata_snapshot = table->getInMemoryMetadataPtr();
|
||||
auto storage_snapshot = table->getStorageSnapshot(metadata_snapshot);
|
||||
auto storage_snapshot = table->getStorageSnapshot(metadata_snapshot, context);
|
||||
|
||||
Names column_names;
|
||||
column_names.push_back("a");
|
||||
|
Loading…
Reference in New Issue
Block a user