mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge pull request #37450 from Avogar/check-format-on-storage-creation
Check format name on storage creation
This commit is contained in:
commit
0615866aea
@ -584,6 +584,13 @@ bool FormatFactory::checkIfFormatHasAnySchemaReader(const String & name) const
|
||||
return checkIfFormatHasSchemaReader(name) || checkIfFormatHasExternalSchemaReader(name);
|
||||
}
|
||||
|
||||
void FormatFactory::checkFormatName(const String & name) const
|
||||
{
|
||||
auto it = dict.find(name);
|
||||
if (it == dict.end())
|
||||
throw Exception("Unknown format " + name, ErrorCodes::UNKNOWN_FORMAT);
|
||||
}
|
||||
|
||||
FormatFactory & FormatFactory::instance()
|
||||
{
|
||||
static FormatFactory ret;
|
||||
|
@ -210,6 +210,9 @@ public:
|
||||
bool isInputFormat(const String & name) const;
|
||||
bool isOutputFormat(const String & name) const;
|
||||
|
||||
/// Check that format with specified name exists and throw an exception otherwise.
|
||||
void checkFormatName(const String & name) const;
|
||||
|
||||
private:
|
||||
FormatsDictionary dict;
|
||||
FileExtensionFormats file_extension_formats;
|
||||
|
@ -146,6 +146,7 @@ StorageHDFS::StorageHDFS(
|
||||
, distributed_processing(distributed_processing_)
|
||||
, partition_by(partition_by_)
|
||||
{
|
||||
FormatFactory::instance().checkFormatName(format_name);
|
||||
context_->getRemoteHostFilter().checkURL(Poco::URI(uri_));
|
||||
checkHDFSURL(uri_);
|
||||
|
||||
|
@ -382,6 +382,8 @@ StorageFile::StorageFile(CommonArguments args)
|
||||
, compression_method(args.compression_method)
|
||||
, base_path(args.getContext()->getPath())
|
||||
{
|
||||
if (format_name != "Distributed")
|
||||
FormatFactory::instance().checkFormatName(format_name);
|
||||
}
|
||||
|
||||
void StorageFile::setStorageMetadata(CommonArguments args)
|
||||
|
@ -740,6 +740,7 @@ StorageS3::StorageS3(
|
||||
, partition_by(partition_by_)
|
||||
, is_key_with_globs(uri_.key.find_first_of("*?{") != std::string::npos)
|
||||
{
|
||||
FormatFactory::instance().checkFormatName(format_name);
|
||||
context_->getGlobalContext()->getRemoteHostFilter().checkURL(uri_.uri);
|
||||
StorageInMemoryMetadata storage_metadata;
|
||||
|
||||
|
@ -74,6 +74,7 @@ IStorageURLBase::IStorageURLBase(
|
||||
, http_method(http_method_)
|
||||
, partition_by(partition_by_)
|
||||
{
|
||||
FormatFactory::instance().checkFormatName(format_name);
|
||||
StorageInMemoryMetadata storage_metadata;
|
||||
|
||||
if (columns_.empty())
|
||||
|
@ -33,7 +33,7 @@ def start_cluster():
|
||||
def test_config_with_hosts(start_cluster):
|
||||
assert (
|
||||
node1.query(
|
||||
"CREATE TABLE table_test_1_1 (word String) Engine=URL('http://host:80', HDFS)"
|
||||
"CREATE TABLE table_test_1_1 (word String) Engine=URL('http://host:80', CSV)"
|
||||
)
|
||||
== ""
|
||||
)
|
||||
@ -44,7 +44,7 @@ def test_config_with_hosts(start_cluster):
|
||||
== ""
|
||||
)
|
||||
assert "not allowed" in node1.query_and_get_error(
|
||||
"CREATE TABLE table_test_1_4 (word String) Engine=URL('https://host:123', S3)"
|
||||
"CREATE TABLE table_test_1_4 (word String) Engine=URL('https://host:123', CSV)"
|
||||
)
|
||||
assert "not allowed" in node1.query_and_get_error(
|
||||
"CREATE TABLE table_test_1_4 (word String) Engine=URL('https://yandex2.ru', CSV)"
|
||||
@ -60,7 +60,7 @@ def test_config_with_only_primary_hosts(start_cluster):
|
||||
)
|
||||
assert (
|
||||
node2.query(
|
||||
"CREATE TABLE table_test_2_2 (word String) Engine=URL('https://host:123', S3)"
|
||||
"CREATE TABLE table_test_2_2 (word String) Engine=URL('https://host:123', CSV)"
|
||||
)
|
||||
== ""
|
||||
)
|
||||
@ -72,25 +72,25 @@ def test_config_with_only_primary_hosts(start_cluster):
|
||||
)
|
||||
assert (
|
||||
node2.query(
|
||||
"CREATE TABLE table_test_2_4 (word String) Engine=URL('https://yandex.ru:87', HDFS)"
|
||||
"CREATE TABLE table_test_2_4 (word String) Engine=URL('https://yandex.ru:87', CSV)"
|
||||
)
|
||||
== ""
|
||||
)
|
||||
assert "not allowed" in node2.query_and_get_error(
|
||||
"CREATE TABLE table_test_2_5 (word String) Engine=URL('https://host', HDFS)"
|
||||
"CREATE TABLE table_test_2_5 (word String) Engine=URL('https://host', CSV)"
|
||||
)
|
||||
assert "not allowed" in node2.query_and_get_error(
|
||||
"CREATE TABLE table_test_2_5 (word String) Engine=URL('https://host:234', CSV)"
|
||||
)
|
||||
assert "not allowed" in node2.query_and_get_error(
|
||||
"CREATE TABLE table_test_2_6 (word String) Engine=URL('https://yandex2.ru', S3)"
|
||||
"CREATE TABLE table_test_2_6 (word String) Engine=URL('https://yandex2.ru', CSV)"
|
||||
)
|
||||
|
||||
|
||||
def test_config_with_only_regexp_hosts(start_cluster):
|
||||
assert (
|
||||
node3.query(
|
||||
"CREATE TABLE table_test_3_1 (word String) Engine=URL('https://host:80', HDFS)"
|
||||
"CREATE TABLE table_test_3_1 (word String) Engine=URL('https://host:80', CSV)"
|
||||
)
|
||||
== ""
|
||||
)
|
||||
@ -104,7 +104,7 @@ def test_config_with_only_regexp_hosts(start_cluster):
|
||||
"CREATE TABLE table_test_3_3 (word String) Engine=URL('https://host', CSV)"
|
||||
)
|
||||
assert "not allowed" in node3.query_and_get_error(
|
||||
"CREATE TABLE table_test_3_4 (word String) Engine=URL('https://yandex2.ru', S3)"
|
||||
"CREATE TABLE table_test_3_4 (word String) Engine=URL('https://yandex2.ru', CSV)"
|
||||
)
|
||||
|
||||
|
||||
@ -123,7 +123,7 @@ def test_config_without_allowed_hosts_section(start_cluster):
|
||||
)
|
||||
assert (
|
||||
node4.query(
|
||||
"CREATE TABLE table_test_4_3 (word String) Engine=URL('https://host', HDFS)"
|
||||
"CREATE TABLE table_test_4_3 (word String) Engine=URL('https://host', CSV)"
|
||||
)
|
||||
== ""
|
||||
)
|
||||
@ -135,7 +135,7 @@ def test_config_without_allowed_hosts_section(start_cluster):
|
||||
)
|
||||
assert (
|
||||
node4.query(
|
||||
"CREATE TABLE table_test_4_5 (word String) Engine=URL('ftp://something.com', S3)"
|
||||
"CREATE TABLE table_test_4_5 (word String) Engine=URL('ftp://something.com', CSV)"
|
||||
)
|
||||
== ""
|
||||
)
|
||||
@ -149,13 +149,13 @@ def test_config_without_allowed_hosts(start_cluster):
|
||||
"CREATE TABLE table_test_5_2 (word String) Engine=S3('https://host:80/bucket/key', CSV)"
|
||||
)
|
||||
assert "not allowed" in node5.query_and_get_error(
|
||||
"CREATE TABLE table_test_5_3 (word String) Engine=URL('https://host', HDFS)"
|
||||
"CREATE TABLE table_test_5_3 (word String) Engine=URL('https://host', CSV)"
|
||||
)
|
||||
assert "not allowed" in node5.query_and_get_error(
|
||||
"CREATE TABLE table_test_5_4 (word String) Engine=URL('https://yandex.ru', CSV)"
|
||||
)
|
||||
assert "not allowed" in node5.query_and_get_error(
|
||||
"CREATE TABLE table_test_5_5 (word String) Engine=URL('ftp://something.com', S3)"
|
||||
"CREATE TABLE table_test_5_5 (word String) Engine=URL('ftp://something.com', CSV)"
|
||||
)
|
||||
|
||||
|
||||
|
@ -0,0 +1,6 @@
|
||||
-- Tags: no-fasttest, use-hdfs, no-backward-compatibility-check:22.5
|
||||
|
||||
create table test_02311 (x UInt32) engine=File(UnknownFormat); -- {serverError UNKNOWN_FORMAT}
|
||||
create table test_02311 (x UInt32) engine=URL('http://some/url', UnknownFormat); -- {serverError UNKNOWN_FORMAT}
|
||||
create table test_02311 (x UInt32) engine=S3('http://host:2020/test/data', UnknownFormat); -- {serverError UNKNOWN_FORMAT}
|
||||
create table test_02311 (x UInt32) engine=HDFS('http://hdfs:9000/data', UnknownFormat); -- {serverError UNKNOWN_FORMAT}
|
Loading…
Reference in New Issue
Block a user