mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-30 03:22:14 +00:00
Allow to drop tables from s3_plain disk (so as from web disk)
Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
This commit is contained in:
parent
e2726e03cc
commit
c029549859
@ -75,6 +75,7 @@ public:
|
||||
DataSourceDescription getDataSourceDescription() const override { return delegate->getDataSourceDescription(); }
|
||||
bool isRemote() const override { return delegate->isRemote(); }
|
||||
bool isReadOnly() const override { return delegate->isReadOnly(); }
|
||||
bool isWriteOnce() const override { return delegate->isWriteOnce(); }
|
||||
bool supportZeroCopyReplication() const override { return delegate->supportZeroCopyReplication(); }
|
||||
bool supportParallelWrite() const override { return delegate->supportParallelWrite(); }
|
||||
void onFreeze(const String & path) override;
|
||||
|
@ -308,6 +308,8 @@ public:
|
||||
|
||||
virtual bool isReadOnly() const { return false; }
|
||||
|
||||
virtual bool isWriteOnce() const { return false; }
|
||||
|
||||
/// Check if disk is broken. Broken disks will have 0 space and cannot be used.
|
||||
virtual bool isBroken() const { return false; }
|
||||
|
||||
|
@ -101,6 +101,8 @@ public:
|
||||
|
||||
bool isReadOnly() const override { return object_storage->isReadOnly(); }
|
||||
|
||||
bool isWriteOnce() const override { return object_storage->isWriteOnce(); }
|
||||
|
||||
const std::string & getCacheConfigName() const { return cache_config_name; }
|
||||
|
||||
ObjectStoragePtr getWrappedObjectStorage() { return object_storage; }
|
||||
|
@ -499,6 +499,11 @@ bool DiskObjectStorage::isReadOnly() const
|
||||
return object_storage->isReadOnly();
|
||||
}
|
||||
|
||||
bool DiskObjectStorage::isWriteOnce() const
|
||||
{
|
||||
return object_storage->isWriteOnce();
|
||||
}
|
||||
|
||||
DiskObjectStoragePtr DiskObjectStorage::createDiskObjectStorage()
|
||||
{
|
||||
return std::make_shared<DiskObjectStorage>(
|
||||
|
@ -177,6 +177,12 @@ public:
|
||||
/// with static files, so only read-only operations are allowed for this storage.
|
||||
bool isReadOnly() const override;
|
||||
|
||||
/// Is object write-once?
|
||||
/// For example: S3PlainObjectStorage is write once, this means that it
|
||||
/// does support BACKUP to this disk, but does not support INSERT into
|
||||
/// MergeTree table on this disk.
|
||||
bool isWriteOnce() const override;
|
||||
|
||||
/// Add a cache layer.
|
||||
/// Example: DiskObjectStorage(S3ObjectStorage) -> DiskObjectStorage(CachedObjectStorage(S3ObjectStorage))
|
||||
/// There can be any number of cache layers:
|
||||
|
@ -199,6 +199,7 @@ public:
|
||||
virtual bool supportsCache() const { return false; }
|
||||
|
||||
virtual bool isReadOnly() const { return false; }
|
||||
virtual bool isWriteOnce() const { return false; }
|
||||
|
||||
virtual bool supportParallelWrite() const { return false; }
|
||||
|
||||
|
@ -216,6 +216,11 @@ public:
|
||||
{
|
||||
data_source_description.type = DataSourceType::S3_Plain;
|
||||
}
|
||||
|
||||
/// Notes:
|
||||
/// - supports BACKUP to this disk
|
||||
/// - does not support INSERT into MergeTree table on this disk
|
||||
bool isWriteOnce() const override { return true; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ bool IStorage::isStaticStorage() const
|
||||
if (storage_policy)
|
||||
{
|
||||
for (const auto & disk : storage_policy->getDisks())
|
||||
if (!disk->isReadOnly())
|
||||
if (!(disk->isReadOnly() || disk->isWriteOnce()))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
@ -583,7 +583,8 @@ public:
|
||||
/// Returns storage policy if storage supports it.
|
||||
virtual StoragePolicyPtr getStoragePolicy() const { return {}; }
|
||||
|
||||
/// Returns true if all disks of storage are read-only.
|
||||
/// Returns true if all disks of storage are read-only or write-once.
|
||||
/// NOTE: write-once also does not support INSERTs/merges/... for MergeTree
|
||||
virtual bool isStaticStorage() const;
|
||||
|
||||
virtual bool supportsSubsetOfColumns() const { return false; }
|
||||
|
@ -30,9 +30,7 @@ def start_cluster():
|
||||
pytest.param("wide", "backup_wide", "s3_backup_wide", int(0), id="wide"),
|
||||
],
|
||||
)
|
||||
def test_attach_compact_part(
|
||||
table_name, backup_name, storage_policy, min_bytes_for_wide_part
|
||||
):
|
||||
def test_attach_part(table_name, backup_name, storage_policy, min_bytes_for_wide_part):
|
||||
node.query(
|
||||
f"""
|
||||
-- Catch any errors (NOTE: warnings are ok)
|
||||
@ -61,9 +59,6 @@ def test_attach_compact_part(
|
||||
|
||||
node.query(
|
||||
f"""
|
||||
-- NOTE: be aware not to DROP the table, but DETACH first to keep it in S3.
|
||||
detach table ordinary_db.{table_name};
|
||||
|
||||
-- NOTE: DROP DATABASE cannot be done w/o this due to metadata leftovers
|
||||
set force_remove_data_recursively_on_drop=1;
|
||||
drop database ordinary_db sync;
|
||||
|
Loading…
Reference in New Issue
Block a user