Merge pull request #35921 from ClickHouse/enable-memory-overcommit

Enable memory overcommit
This commit is contained in:
Dmitry Novik 2022-05-10 02:09:18 +02:00 committed by GitHub
commit adcb792f0d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 28 additions and 29 deletions

View File

@ -1088,11 +1088,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
if (config->has("global_memory_usage_overcommit_max_wait_microseconds"))
{
UInt64 max_overcommit_wait_time = config->getUInt64("global_memory_usage_overcommit_max_wait_microseconds", 0);
global_overcommit_tracker->setMaxWaitTime(max_overcommit_wait_time);
}
UInt64 max_overcommit_wait_time = config->getUInt64("global_memory_usage_overcommit_max_wait_microseconds", 200);
global_overcommit_tracker->setMaxWaitTime(max_overcommit_wait_time);
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);
// FIXME logging-related things need synchronization -- see the 'Logger * log' saved

View File

@ -6,9 +6,6 @@
<profiles>
<!-- Default settings. -->
<default>
<!-- Maximum memory usage for processing single query, in bytes. -->
<max_memory_usage>10000000000</max_memory_usage>
<!-- How to choose between replicas during distributed query processing.
random - choose random replica from set of replicas with minimum number of errors
nearest_hostname - from set of replicas with minimum number of errors, choose replica

View File

@ -22,6 +22,10 @@ namespace DB
{
class IColumn;
static constexpr UInt64 operator""_Gb(unsigned long long value)
{
return value * 1024 * 1024 * 1024;
}
/** List of settings: type, name, default value, description, flags
*
@ -356,9 +360,9 @@ class IColumn;
M(OverflowMode, distinct_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \
\
M(UInt64, max_memory_usage, 0, "Maximum memory usage for processing of single query. Zero means unlimited.", 0) \
M(UInt64, max_guaranteed_memory_usage, 0, "Maximum guaranteed memory usage for processing of single query. It represents soft limit. Zero means unlimited.", 0) \
M(UInt64, max_guaranteed_memory_usage, 10_Gb, "Maximum guaranteed memory usage for processing of single query. It represents soft limit. Zero means unlimited.", 0) \
M(UInt64, max_memory_usage_for_user, 0, "Maximum memory usage for processing all concurrently running queries for the user. Zero means unlimited.", 0) \
M(UInt64, max_guaranteed_memory_usage_for_user, 0, "Maximum guaranteed memory usage for processing all concurrently running queries for the user. It represents soft limit. Zero means unlimited.", 0) \
M(UInt64, max_guaranteed_memory_usage_for_user, 10_Gb, "Maximum guaranteed memory usage for processing all concurrently running queries for the user. It represents soft limit. Zero means unlimited.", 0) \
M(UInt64, max_untracked_memory, (4 * 1024 * 1024), "Small allocations and deallocations are grouped in thread local variable and tracked or profiled only when amount (in absolute value) becomes larger than specified value. If the value is higher than 'memory_profiler_step' it will be effectively lowered to 'memory_profiler_step'.", 0) \
M(UInt64, memory_profiler_step, (4 * 1024 * 1024), "Whenever query memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stack trace. Zero means disabled memory profiler. Values lower than a few megabytes will slow down query processing.", 0) \
M(Float, memory_profiler_sample_probability, 0., "Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Note that sampling happens only when the amount of untracked memory exceeds 'max_untracked_memory'. You may want to set 'max_untracked_memory' to 0 for extra fine grained sampling.", 0) \

View File

@ -100,29 +100,29 @@ def test_select_clamps_settings():
# Check that shards clamp passed settings.
query = "SELECT hostName() as host, name, value FROM shard_settings WHERE name = 'max_memory_usage' OR name = 'readonly' ORDER BY host, name, value"
assert (
distributed.query(query) == "node1\tmax_memory_usage\t99999999\n"
distributed.query(query) == "node1\tmax_memory_usage\t50000000\n"
"node1\treadonly\t0\n"
"node2\tmax_memory_usage\t10000000000\n"
"node2\tmax_memory_usage\t0\n"
"node2\treadonly\t1\n"
)
assert (
distributed.query(query, user="normal") == "node1\tmax_memory_usage\t80000000\n"
"node1\treadonly\t0\n"
"node2\tmax_memory_usage\t10000000000\n"
"node2\tmax_memory_usage\t0\n"
"node2\treadonly\t1\n"
)
assert (
distributed.query(query, user="wasteful")
== "node1\tmax_memory_usage\t99999999\n"
"node1\treadonly\t0\n"
"node2\tmax_memory_usage\t10000000000\n"
"node2\tmax_memory_usage\t0\n"
"node2\treadonly\t1\n"
)
assert (
distributed.query(query, user="readonly")
== "node1\tmax_memory_usage\t99999999\n"
== "node1\tmax_memory_usage\t50000000\n"
"node1\treadonly\t1\n"
"node2\tmax_memory_usage\t10000000000\n"
"node2\tmax_memory_usage\t0\n"
"node2\treadonly\t1\n"
)
@ -130,14 +130,14 @@ def test_select_clamps_settings():
distributed.query(query, settings={"max_memory_usage": 1})
== "node1\tmax_memory_usage\t11111111\n"
"node1\treadonly\t0\n"
"node2\tmax_memory_usage\t10000000000\n"
"node2\tmax_memory_usage\t0\n"
"node2\treadonly\t1\n"
)
assert (
distributed.query(query, settings={"max_memory_usage": 40000000, "readonly": 2})
== "node1\tmax_memory_usage\t40000000\n"
"node1\treadonly\t2\n"
"node2\tmax_memory_usage\t10000000000\n"
"node2\tmax_memory_usage\t0\n"
"node2\treadonly\t1\n"
)
assert (
@ -146,7 +146,7 @@ def test_select_clamps_settings():
)
== "node1\tmax_memory_usage\t99999999\n"
"node1\treadonly\t2\n"
"node2\tmax_memory_usage\t10000000000\n"
"node2\tmax_memory_usage\t0\n"
"node2\treadonly\t1\n"
)

View File

@ -115,7 +115,7 @@ def test_smoke():
"SELECT value FROM system.settings WHERE name = 'max_memory_usage'",
user="robin",
)
== "10000000000\n"
== "0\n"
)
instance.query("SET max_memory_usage = 80000000", user="robin")
instance.query("SET max_memory_usage = 120000000", user="robin")
@ -158,7 +158,7 @@ def test_smoke():
"SELECT value FROM system.settings WHERE name = 'max_memory_usage'",
user="robin",
)
== "10000000000\n"
== "0\n"
)
instance.query("SET max_memory_usage = 80000000", user="robin")
instance.query("SET max_memory_usage = 120000000", user="robin")
@ -228,7 +228,7 @@ def test_settings_from_granted_role():
"SELECT value FROM system.settings WHERE name = 'max_memory_usage'",
user="robin",
)
== "10000000000\n"
== "0\n"
)
instance.query("SET max_memory_usage = 120000000", user="robin")
@ -240,7 +240,7 @@ def test_settings_from_granted_role():
"SELECT value FROM system.settings WHERE name = 'max_memory_usage'",
user="robin",
)
== "10000000000\n"
== "0\n"
)
instance.query("SET max_memory_usage = 120000000", user="robin")
assert system_settings_profile_elements(role_name="worker") == []
@ -278,7 +278,7 @@ def test_settings_from_granted_role():
"SELECT value FROM system.settings WHERE name = 'max_memory_usage'",
user="robin",
)
== "10000000000\n"
== "0\n"
)
instance.query("SET max_memory_usage = 120000000", user="robin")
assert system_settings_profile("xyz") == [
@ -360,7 +360,7 @@ def test_alter_and_drop():
"SELECT value FROM system.settings WHERE name = 'max_memory_usage'",
user="robin",
)
== "10000000000\n"
== "0\n"
)
instance.query("SET max_memory_usage = 80000000", user="robin")
instance.query("SET max_memory_usage = 120000000", user="robin")
@ -374,17 +374,17 @@ def test_show_profiles():
assert instance.query("SHOW CREATE PROFILE xyz") == "CREATE SETTINGS PROFILE xyz\n"
assert (
instance.query("SHOW CREATE SETTINGS PROFILE default")
== "CREATE SETTINGS PROFILE default SETTINGS max_memory_usage = 10000000000, load_balancing = \\'random\\'\n"
== "CREATE SETTINGS PROFILE default SETTINGS load_balancing = \\'random\\'\n"
)
assert (
instance.query("SHOW CREATE PROFILES")
== "CREATE SETTINGS PROFILE default SETTINGS max_memory_usage = 10000000000, load_balancing = \\'random\\'\n"
== "CREATE SETTINGS PROFILE default SETTINGS load_balancing = \\'random\\'\n"
"CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1\n"
"CREATE SETTINGS PROFILE xyz\n"
)
expected_access = (
"CREATE SETTINGS PROFILE default SETTINGS max_memory_usage = 10000000000, load_balancing = \\'random\\'\n"
"CREATE SETTINGS PROFILE default SETTINGS load_balancing = \\'random\\'\n"
"CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1\n"
"CREATE SETTINGS PROFILE xyz\n"
)

View File

@ -3,6 +3,5 @@ connect_timeout Seconds 10
connect_timeout_with_failover_ms Milliseconds 2000
connect_timeout_with_failover_secure_ms Milliseconds 3000
external_storage_connect_timeout_sec UInt64 10
max_memory_usage UInt64 10000000000
max_untracked_memory UInt64 1048576
memory_profiler_step UInt64 1048576

View File

@ -1,3 +1,5 @@
SET max_memory_usage = 10000000000;
-- Unneeded column is removed from subquery.
SELECT count() FROM (SELECT number, groupArray(repeat(toString(number), 1000000)) FROM numbers(1000000) GROUP BY number);
-- Unneeded column cannot be removed from subquery and the query is out of memory