mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Slightly better
This commit is contained in:
parent
32686e11cb
commit
97e0a0310a
@ -51,7 +51,7 @@ void DiskWebServer::Metadata::initialize(const String & uri_with_path, const Str
|
||||
assertChar('\t', metadata_buf);
|
||||
readIntText(file_size, metadata_buf);
|
||||
assertChar('\n', metadata_buf);
|
||||
LOG_DEBUG(&Poco::Logger::get("DiskWeb"), "Read file: {}, size: {}", remote_file_name, file_size);
|
||||
LOG_TRACE(&Poco::Logger::get("DiskWeb"), "Read file: {}, size: {}", remote_file_name, file_size);
|
||||
|
||||
/*
|
||||
* URI/ {uri}/{uuid}/all_x_x_x/{file}
|
||||
@ -227,7 +227,7 @@ bool DiskWebServer::findFileInMetadata(const String & path, File & file_info) co
|
||||
|
||||
bool DiskWebServer::exists(const String & path) const
|
||||
{
|
||||
LOG_DEBUG(log, "Checking existence of file: {}", path);
|
||||
LOG_TRACE(log, "Checking existence of file: {}", path);
|
||||
|
||||
File file;
|
||||
return findFileInMetadata(path, file);
|
||||
@ -243,7 +243,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskWebServer::readFile(const String & p
|
||||
|
||||
auto file_name = escapeForFileName(fs::path(path).stem()) + fs::path(path).extension().string();
|
||||
auto remote_path = fs::path(path).parent_path() / file_name;
|
||||
LOG_DEBUG(log, "Read from file by path: {}", remote_path.string());
|
||||
LOG_TRACE(log, "Read from file by path: {}", remote_path.string());
|
||||
|
||||
RemoteMetadata meta(uri, remote_path);
|
||||
meta.remote_fs_objects.emplace_back(std::make_pair(getFileName(remote_path), file.size));
|
||||
@ -255,7 +255,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskWebServer::readFile(const String & p
|
||||
|
||||
DiskDirectoryIteratorPtr DiskWebServer::iterateDirectory(const String & path)
|
||||
{
|
||||
LOG_DEBUG(log, "Iterate directory: {}", path);
|
||||
LOG_TRACE(log, "Iterate directory: {}", path);
|
||||
String uuid;
|
||||
|
||||
if (RE2::FullMatch(path, ".*/store/"))
|
||||
|
@ -63,7 +63,7 @@ BlockIO InterpreterAlterQuery::execute()
|
||||
}
|
||||
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(table_id, getContext());
|
||||
if (table->isReadOnly())
|
||||
if (table->isStaticStorage())
|
||||
throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only");
|
||||
auto alter_lock = table->lockForAlter(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout);
|
||||
auto metadata_snapshot = table->getInMemoryMetadataPtr();
|
||||
|
@ -163,7 +163,7 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP
|
||||
if (query.kind == ASTDropQuery::Kind::Detach)
|
||||
{
|
||||
getContext()->checkAccess(drop_storage, table_id);
|
||||
if (table->isReadOnly())
|
||||
if (table->isStaticStorage())
|
||||
throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only");
|
||||
|
||||
if (table->isDictionary())
|
||||
@ -198,7 +198,7 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP
|
||||
throw Exception("Cannot TRUNCATE dictionary", ErrorCodes::SYNTAX_ERROR);
|
||||
|
||||
getContext()->checkAccess(AccessType::TRUNCATE, table_id);
|
||||
if (table->isReadOnly())
|
||||
if (table->isStaticStorage())
|
||||
throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only");
|
||||
|
||||
table->checkTableCanBeDropped();
|
||||
|
@ -201,7 +201,7 @@ NameDependencies IStorage::getDependentViewsByColumn(ContextPtr context) const
|
||||
return name_deps;
|
||||
}
|
||||
|
||||
bool IStorage::isReadOnly() const
|
||||
bool IStorage::isStaticStorage() const
|
||||
{
|
||||
auto storage_policy = getStoragePolicy();
|
||||
if (storage_policy)
|
||||
|
@ -540,7 +540,7 @@ public:
|
||||
virtual StoragePolicyPtr getStoragePolicy() const { return {}; }
|
||||
|
||||
/// Returns true if all disks of storage are read-only.
|
||||
virtual bool isReadOnly() const;
|
||||
virtual bool isStaticStorage() const;
|
||||
|
||||
/// If it is possible to quickly determine exact number of rows in the table at this moment of time, then return it.
|
||||
/// Used for:
|
||||
|
@ -113,6 +113,10 @@ void StorageMergeTree::startup()
|
||||
time_after_previous_cleanup_parts.restart();
|
||||
time_after_previous_cleanup_temporary_directories.restart();
|
||||
|
||||
/// Do not schedule any background jobs if current storage has static data files.
|
||||
if (isStaticStorage())
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
background_executor.start();
|
||||
@ -243,7 +247,7 @@ void StorageMergeTree::drop()
|
||||
{
|
||||
shutdown();
|
||||
/// In case there is read-only disk we cannot allow to call dropAllData(), but dropping tables is allowed.
|
||||
if (isReadOnly())
|
||||
if (isStaticStorage())
|
||||
return;
|
||||
dropAllData();
|
||||
}
|
||||
@ -1049,6 +1053,8 @@ bool StorageMergeTree::scheduleDataProcessingJob(IBackgroundJobExecutor & execut
|
||||
if (shutdown_called)
|
||||
return false;
|
||||
|
||||
assert(!isStaticStorage());
|
||||
|
||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||
std::shared_ptr<MergeMutateSelectedEntry> merge_entry, mutate_entry;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user