Merge pull request #13153 from ClickHouse/merge-tree-settings-sanity-check

Sanity checks for MergeTreeSettings
This commit is contained in:
alexey-milovidov 2020-08-04 01:27:19 +03:00 committed by GitHub
commit c43a27782e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 91 additions and 14 deletions

View File

@ -644,6 +644,9 @@ int Server::main(const std::vector<std::string> & /*args*/)
global_context->setFormatSchemaPath(format_schema_path.path());
format_schema_path.createDirectories();
/// Check sanity of MergeTreeSettings on server startup
global_context->getMergeTreeSettings().sanityCheck(settings);
/// Limit on total memory usage
size_t max_server_memory_usage = config().getUInt64("max_server_memory_usage", 0);

View File

@ -146,6 +146,10 @@ MergeTreeData::MergeTreeData(
if (relative_data_path.empty())
throw Exception("MergeTree storages require data path", ErrorCodes::INCORRECT_FILE_NAME);
/// Check sanity of MergeTreeSettings. Only when table is created.
if (!attach)
settings->sanityCheck(global_context.getSettingsRef());
MergeTreeDataFormatVersion min_format_version(0);
if (!date_column_name.empty())
{
@ -1608,6 +1612,7 @@ void MergeTreeData::changeSettings(
const auto & new_changes = new_settings->as<const ASTSetQuery &>().changes;
for (const auto & change : new_changes)
{
if (change.name == "storage_policy")
{
StoragePolicyPtr new_storage_policy = global_context.getStoragePolicy(change.value.safeGet<String>());
@ -1642,9 +1647,13 @@ void MergeTreeData::changeSettings(
has_storage_policy_changed = true;
}
}
}
MergeTreeSettings copy = *getSettings();
copy.applyChanges(new_changes);
copy.sanityCheck(global_context.getSettingsRef());
storage_settings.set(std::make_unique<const MergeTreeSettings>(copy));
StorageInMemoryMetadata new_metadata = getInMemoryMetadata();
new_metadata.setSettingsChanges(new_settings);

View File

@ -75,4 +75,31 @@ void MergeTreeSettings::loadFromQuery(ASTStorage & storage_def)
#undef ADD_IF_ABSENT
}
void MergeTreeSettings::sanityCheck(const Settings & query_settings) const
{
if (number_of_free_entries_in_pool_to_execute_mutation >= query_settings.background_pool_size)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of 'number_of_free_entries_in_pool_to_execute_mutation' setting"
" ({}) (default values are defined in <merge_tree> section of config.xml"
" or the value can be specified per table in SETTINGS section of CREATE TABLE query)"
" is greater or equals to the value of 'background_pool_size'"
" ({}) (the value is defined in users.xml for default profile)."
" This indicates incorrect configuration because mutations cannot work with these settings.",
number_of_free_entries_in_pool_to_execute_mutation,
query_settings.background_pool_size);
}
if (number_of_free_entries_in_pool_to_lower_max_size_of_merge >= query_settings.background_pool_size)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of 'number_of_free_entries_in_pool_to_lower_max_size_of_merge' setting"
" ({}) (default values are defined in <merge_tree> section of config.xml"
" or the value can be specified per table in SETTINGS section of CREATE TABLE query)"
" is greater or equals to the value of 'background_pool_size'"
" ({}) (the value is defined in users.xml for default profile)."
" This indicates incorrect configuration because the maximum size of merge will be always lowered.",
number_of_free_entries_in_pool_to_execute_mutation,
query_settings.background_pool_size);
}
}
}

View File

@ -13,6 +13,7 @@ namespace Poco::Util
namespace DB
{
class ASTStorage;
struct Settings;
#define LIST_OF_MERGE_TREE_SETTINGS(M) \
@ -123,6 +124,9 @@ struct MergeTreeSettings : public BaseSettings<MergeTreeSettingsTraits>
return name == "min_bytes_for_wide_part" || name == "min_rows_for_wide_part"
|| name == "min_bytes_for_compact_part" || name == "min_rows_for_compact_part";
}
/// Check that the values are sane taking also query-level settings into account.
void sanityCheck(const Settings & query_settings) const;
};
using MergeTreeSettingsPtr = std::shared_ptr<const MergeTreeSettings>;

View File

@ -1,7 +1,6 @@
<yandex>
<profiles>
<default>
<background_pool_size>0</background_pool_size>
</default>
</profiles>
<merge_tree>
<max_bytes_to_merge_at_min_space_in_pool>1</max_bytes_to_merge_at_min_space_in_pool>
<max_bytes_to_merge_at_max_space_in_pool>2</max_bytes_to_merge_at_max_space_in_pool>
</merge_tree>
</yandex>

View File

@ -12,7 +12,7 @@ instance_test_reconnect = cluster.add_instance('instance_test_reconnect', main_c
instance_test_inserts_batching = cluster.add_instance(
'instance_test_inserts_batching',
main_configs=['configs/remote_servers.xml'], user_configs=['configs/enable_distributed_inserts_batching.xml'])
remote = cluster.add_instance('remote', user_configs=['configs/forbid_background_merges.xml'])
remote = cluster.add_instance('remote', main_configs=['configs/forbid_background_merges.xml'])
instance_test_inserts_local_cluster = cluster.add_instance(
'instance_test_inserts_local_cluster',

View File

@ -1,7 +1,6 @@
<yandex>
<profiles>
<default>
<background_pool_size>0</background_pool_size>
</default>
</profiles>
<merge_tree>
<max_bytes_to_merge_at_min_space_in_pool>1</max_bytes_to_merge_at_min_space_in_pool>
<max_bytes_to_merge_at_max_space_in_pool>2</max_bytes_to_merge_at_max_space_in_pool>
</merge_tree>
</yandex>

View File

@ -12,7 +12,7 @@ instance_test_reconnect = cluster.add_instance('instance_test_reconnect', main_c
instance_test_inserts_batching = cluster.add_instance(
'instance_test_inserts_batching',
main_configs=['configs/remote_servers.xml'], user_configs=['configs/enable_distributed_inserts_batching.xml'])
remote = cluster.add_instance('remote', user_configs=['configs/forbid_background_merges.xml'])
remote = cluster.add_instance('remote', main_configs=['configs/forbid_background_merges.xml'])
instance_test_inserts_local_cluster = cluster.add_instance(
'instance_test_inserts_local_cluster',

View File

@ -2,7 +2,6 @@
<merge_tree>
<max_bytes_to_merge_at_min_space_in_pool>1</max_bytes_to_merge_at_min_space_in_pool>
<max_bytes_to_merge_at_max_space_in_pool>2</max_bytes_to_merge_at_max_space_in_pool>
<number_of_free_entries_in_pool_to_lower_max_size_of_merge>100</number_of_free_entries_in_pool_to_lower_max_size_of_merge>
<max_replicated_merges_in_queue>0</max_replicated_merges_in_queue>
</merge_tree>
</yandex>

View File

@ -1,7 +1,7 @@
DROP TABLE IF EXISTS t;
SET mutations_sync = 1;
CREATE TABLE t (x UInt8, s String) ENGINE = MergeTree ORDER BY x SETTINGS number_of_free_entries_in_pool_to_execute_mutation = 1000;
CREATE TABLE t (x UInt8, s String) ENGINE = MergeTree ORDER BY x SETTINGS number_of_free_entries_in_pool_to_execute_mutation = 15;
INSERT INTO t VALUES (1, 'hello');
SELECT * FROM t;

View File

@ -0,0 +1,37 @@
DROP TABLE IF EXISTS mytable_local;
CREATE TABLE mytable_local
(
created DateTime,
eventday Date,
user_id UInt32
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(eventday)
ORDER BY (eventday, user_id)
SETTINGS number_of_free_entries_in_pool_to_execute_mutation = 100; -- { serverError 36 }
CREATE TABLE mytable_local
(
created DateTime,
eventday Date,
user_id UInt32
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(eventday)
ORDER BY (eventday, user_id)
SETTINGS number_of_free_entries_in_pool_to_lower_max_size_of_merge = 100; -- { serverError 36 }
CREATE TABLE mytable_local
(
created DateTime,
eventday Date,
user_id UInt32
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(eventday)
ORDER BY (eventday, user_id);
ALTER TABLE mytable_local MODIFY SETTING number_of_free_entries_in_pool_to_execute_mutation = 100; -- { serverError 36 }
DROP TABLE mytable_local;