2018-05-31 15:54:08 +00:00
|
|
|
#include "MemoryTracker.h"
|
2020-01-16 12:37:29 +00:00
|
|
|
|
|
|
|
#include <IO/WriteHelpers.h>
|
|
|
|
#include "Common/TraceCollector.h"
|
|
|
|
#include <Common/CurrentThread.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Common/Exception.h>
|
|
|
|
#include <Common/formatReadable.h>
|
2020-01-16 12:37:29 +00:00
|
|
|
#include <common/logger_useful.h>
|
|
|
|
|
2020-01-21 13:53:30 +00:00
|
|
|
#include <atomic>
|
2020-01-22 15:20:19 +00:00
|
|
|
#include <cmath>
|
2020-04-30 13:25:17 +00:00
|
|
|
#include <random>
|
2020-01-16 12:37:29 +00:00
|
|
|
#include <cstdlib>
|
2014-05-03 22:57:43 +00:00
|
|
|
|
|
|
|
|
2016-10-24 04:06:27 +00:00
|
|
|
namespace DB
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
|
|
|
extern const int MEMORY_LIMIT_EXCEEDED;
|
|
|
|
}
|
2016-01-11 21:46:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-09-24 20:57:10 +00:00
|
|
|
static constexpr size_t log_peak_memory_usage_every = 1ULL << 30;
|
|
|
|
|
2020-04-19 21:43:06 +00:00
|
|
|
MemoryTracker total_memory_tracker(nullptr, VariableContext::Global);
|
|
|
|
|
|
|
|
|
|
|
|
MemoryTracker::MemoryTracker(VariableContext level_) : parent(&total_memory_tracker), level(level_) {}
|
|
|
|
MemoryTracker::MemoryTracker(MemoryTracker * parent_, VariableContext level_) : parent(parent_), level(level_) {}
|
|
|
|
|
2018-09-24 20:57:10 +00:00
|
|
|
|
2014-05-03 22:57:43 +00:00
|
|
|
MemoryTracker::~MemoryTracker()
|
|
|
|
{
|
2020-04-19 21:43:06 +00:00
|
|
|
if ((level == VariableContext::Process || level == VariableContext::User) && peak)
|
2017-10-27 17:24:33 +00:00
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
|
|
|
logPeakMemoryUsage();
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
/// Exception in Logger, intentionally swallow.
|
|
|
|
}
|
|
|
|
}
|
2014-05-03 22:57:43 +00:00
|
|
|
}
|
|
|
|
|
2015-12-30 15:39:11 +00:00
|
|
|
|
|
|
|
void MemoryTracker::logPeakMemoryUsage() const
|
|
|
|
{
|
2020-05-28 12:30:42 +00:00
|
|
|
const auto * description = description_ptr.load(std::memory_order_relaxed);
|
2020-05-30 21:57:37 +00:00
|
|
|
LOG_DEBUG(&Poco::Logger::get("MemoryTracker"), "Peak memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(peak));
|
2015-12-30 15:39:11 +00:00
|
|
|
}
|
|
|
|
|
2020-04-19 21:43:06 +00:00
|
|
|
void MemoryTracker::logMemoryUsage(Int64 current) const
|
2018-09-24 20:57:10 +00:00
|
|
|
{
|
2020-05-28 12:30:42 +00:00
|
|
|
const auto * description = description_ptr.load(std::memory_order_relaxed);
|
2020-05-30 21:57:37 +00:00
|
|
|
LOG_DEBUG(&Poco::Logger::get("MemoryTracker"), "Current memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(current));
|
2018-09-24 20:57:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-05-08 15:54:51 +00:00
|
|
|
void MemoryTracker::alloc(Int64 size)
|
2014-05-03 22:57:43 +00:00
|
|
|
{
|
2018-02-01 17:55:08 +00:00
|
|
|
if (blocker.isCancelled())
|
|
|
|
return;
|
|
|
|
|
2017-09-17 00:13:26 +00:00
|
|
|
/** Using memory_order_relaxed means that if allocations are done simultaneously,
|
2020-03-03 00:24:44 +00:00
|
|
|
* we allow exception about memory limit exceeded to be thrown only on next allocation.
|
2017-09-17 00:13:26 +00:00
|
|
|
* So, we allow over-allocations.
|
|
|
|
*/
|
|
|
|
Int64 will_be = size + amount.fetch_add(size, std::memory_order_relaxed);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-10-08 05:30:03 +00:00
|
|
|
if (metric != CurrentMetrics::end())
|
2017-04-01 07:20:54 +00:00
|
|
|
CurrentMetrics::add(metric, size);
|
|
|
|
|
2020-01-21 13:53:30 +00:00
|
|
|
Int64 current_hard_limit = hard_limit.load(std::memory_order_relaxed);
|
2020-01-22 15:20:19 +00:00
|
|
|
Int64 current_profiler_limit = profiler_limit.load(std::memory_order_relaxed);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-07-06 22:43:39 +00:00
|
|
|
/// Cap the limit to the total_memory_tracker, since it may include some drift.
|
|
|
|
///
|
|
|
|
/// And since total_memory_tracker is reseted to the process resident
|
|
|
|
/// memory peridically (in AsynchronousMetrics::update()), any limit can be
|
|
|
|
/// capped to it, to avoid possible drift.
|
|
|
|
if (unlikely(current_hard_limit && will_be > current_hard_limit))
|
|
|
|
{
|
|
|
|
set(total_memory_tracker.amount);
|
|
|
|
will_be = size + amount.fetch_add(size, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
2020-04-30 13:25:17 +00:00
|
|
|
std::bernoulli_distribution fault(fault_probability);
|
|
|
|
if (unlikely(fault_probability && fault(thread_local_rng)))
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
|
|
|
free(size);
|
2019-07-12 17:06:02 +00:00
|
|
|
|
|
|
|
/// Prevent recursion. Exception::ctor -> std::string -> new[] -> MemoryTracker::alloc
|
2020-03-18 18:26:40 +00:00
|
|
|
auto untrack_lock = blocker.cancel(); // NOLINT
|
2019-07-12 17:06:02 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
std::stringstream message;
|
|
|
|
message << "Memory tracker";
|
2020-05-28 12:30:42 +00:00
|
|
|
if (const auto * description = description_ptr.load(std::memory_order_relaxed))
|
2017-04-01 07:20:54 +00:00
|
|
|
message << " " << description;
|
|
|
|
message << ": fault injected. Would use " << formatReadableSizeWithBinarySuffix(will_be)
|
|
|
|
<< " (attempt to allocate chunk of " << size << " bytes)"
|
2020-01-21 13:53:30 +00:00
|
|
|
<< ", maximum: " << formatReadableSizeWithBinarySuffix(current_hard_limit);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
throw DB::Exception(message.str(), DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED);
|
|
|
|
}
|
|
|
|
|
2020-01-22 15:20:19 +00:00
|
|
|
if (unlikely(current_profiler_limit && will_be > current_profiler_limit))
|
2020-01-21 13:53:30 +00:00
|
|
|
{
|
|
|
|
auto no_track = blocker.cancel();
|
2020-03-03 00:24:44 +00:00
|
|
|
DB::TraceCollector::collect(DB::TraceType::Memory, StackTrace(), size);
|
|
|
|
setOrRaiseProfilerLimit((will_be + profiler_step - 1) / profiler_step * profiler_step);
|
2020-01-21 13:53:30 +00:00
|
|
|
}
|
|
|
|
|
2020-04-30 13:25:17 +00:00
|
|
|
std::bernoulli_distribution sample(sample_probability);
|
|
|
|
if (unlikely(sample_probability && sample(thread_local_rng)))
|
|
|
|
{
|
|
|
|
auto no_track = blocker.cancel();
|
|
|
|
DB::TraceCollector::collect(DB::TraceType::MemorySample, StackTrace(), size);
|
|
|
|
}
|
|
|
|
|
2020-01-21 13:53:30 +00:00
|
|
|
if (unlikely(current_hard_limit && will_be > current_hard_limit))
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
|
|
|
free(size);
|
2019-07-12 17:06:02 +00:00
|
|
|
|
|
|
|
/// Prevent recursion. Exception::ctor -> std::string -> new[] -> MemoryTracker::alloc
|
2020-04-30 13:25:17 +00:00
|
|
|
auto no_track = blocker.cancel(); // NOLINT
|
2019-07-12 17:06:02 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
std::stringstream message;
|
|
|
|
message << "Memory limit";
|
2020-05-28 12:30:42 +00:00
|
|
|
if (const auto * description = description_ptr.load(std::memory_order_relaxed))
|
2017-04-01 07:20:54 +00:00
|
|
|
message << " " << description;
|
|
|
|
message << " exceeded: would use " << formatReadableSizeWithBinarySuffix(will_be)
|
|
|
|
<< " (attempt to allocate chunk of " << size << " bytes)"
|
2020-01-21 13:53:30 +00:00
|
|
|
<< ", maximum: " << formatReadableSizeWithBinarySuffix(current_hard_limit);
|
2019-07-17 15:16:28 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
throw DB::Exception(message.str(), DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED);
|
|
|
|
}
|
|
|
|
|
2020-04-19 21:43:06 +00:00
|
|
|
updatePeak(will_be);
|
|
|
|
|
2020-04-22 00:29:38 +00:00
|
|
|
if (auto * loaded_next = parent.load(std::memory_order_relaxed))
|
2020-04-19 21:43:06 +00:00
|
|
|
loaded_next->alloc(size);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MemoryTracker::updatePeak(Int64 will_be)
|
|
|
|
{
|
2018-09-24 20:57:10 +00:00
|
|
|
auto peak_old = peak.load(std::memory_order_relaxed);
|
|
|
|
if (will_be > peak_old) /// Races doesn't matter. Could rewrite with CAS, but not worth.
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
peak.store(will_be, std::memory_order_relaxed);
|
|
|
|
|
2020-04-19 21:43:06 +00:00
|
|
|
if ((level == VariableContext::Process || level == VariableContext::Global)
|
|
|
|
&& will_be / log_peak_memory_usage_every > peak_old / log_peak_memory_usage_every)
|
2018-09-24 20:57:10 +00:00
|
|
|
logMemoryUsage(will_be);
|
|
|
|
}
|
2015-12-30 15:39:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MemoryTracker::free(Int64 size)
|
|
|
|
{
|
2018-02-01 17:55:08 +00:00
|
|
|
if (blocker.isCancelled())
|
|
|
|
return;
|
|
|
|
|
2020-04-30 13:25:17 +00:00
|
|
|
std::bernoulli_distribution sample(sample_probability);
|
|
|
|
if (unlikely(sample_probability && sample(thread_local_rng)))
|
|
|
|
{
|
|
|
|
auto no_track = blocker.cancel();
|
|
|
|
DB::TraceCollector::collect(DB::TraceType::MemorySample, StackTrace(), -size);
|
|
|
|
}
|
|
|
|
|
2018-06-09 15:29:08 +00:00
|
|
|
if (level == VariableContext::Thread)
|
2017-09-17 00:13:26 +00:00
|
|
|
{
|
2018-06-09 15:29:08 +00:00
|
|
|
/// Could become negative if memory allocated in this thread is freed in another one
|
|
|
|
amount.fetch_sub(size, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
Int64 new_amount = amount.fetch_sub(size, std::memory_order_relaxed) - size;
|
|
|
|
|
|
|
|
/** Sometimes, query could free some data, that was allocated outside of query context.
|
|
|
|
* Example: cache eviction.
|
|
|
|
* To avoid negative memory usage, we "saturate" amount.
|
|
|
|
* Memory usage will be calculated with some error.
|
|
|
|
* NOTE: The code is not atomic. Not worth to fix.
|
|
|
|
*/
|
|
|
|
if (unlikely(new_amount < 0))
|
|
|
|
{
|
|
|
|
amount.fetch_sub(new_amount);
|
|
|
|
size += new_amount;
|
|
|
|
}
|
2017-09-17 00:13:26 +00:00
|
|
|
}
|
2015-12-30 15:39:11 +00:00
|
|
|
|
2020-04-22 00:29:38 +00:00
|
|
|
if (auto * loaded_next = parent.load(std::memory_order_relaxed))
|
2017-09-09 04:06:54 +00:00
|
|
|
loaded_next->free(size);
|
2018-10-08 05:30:03 +00:00
|
|
|
|
|
|
|
if (metric != CurrentMetrics::end())
|
2017-09-17 00:13:26 +00:00
|
|
|
CurrentMetrics::sub(metric, size);
|
2015-12-30 15:39:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-06-09 15:29:08 +00:00
|
|
|
void MemoryTracker::resetCounters()
|
|
|
|
{
|
|
|
|
amount.store(0, std::memory_order_relaxed);
|
|
|
|
peak.store(0, std::memory_order_relaxed);
|
2020-01-21 13:53:30 +00:00
|
|
|
hard_limit.store(0, std::memory_order_relaxed);
|
2020-01-22 15:20:19 +00:00
|
|
|
profiler_limit.store(0, std::memory_order_relaxed);
|
2018-06-09 15:29:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-12-30 15:39:11 +00:00
|
|
|
void MemoryTracker::reset()
|
|
|
|
{
|
2018-10-11 00:37:41 +00:00
|
|
|
if (metric != CurrentMetrics::end())
|
|
|
|
CurrentMetrics::sub(metric, amount.load(std::memory_order_relaxed));
|
2016-01-21 01:47:28 +00:00
|
|
|
|
2018-06-09 15:29:08 +00:00
|
|
|
resetCounters();
|
2017-02-01 03:53:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-04-19 21:43:06 +00:00
|
|
|
void MemoryTracker::set(Int64 to)
|
|
|
|
{
|
|
|
|
amount.store(to, std::memory_order_relaxed);
|
|
|
|
updatePeak(to);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-01-21 13:53:30 +00:00
|
|
|
void MemoryTracker::setOrRaiseHardLimit(Int64 value)
|
|
|
|
{
|
|
|
|
/// This is just atomic set to maximum.
|
|
|
|
Int64 old_value = hard_limit.load(std::memory_order_relaxed);
|
|
|
|
while (old_value < value && !hard_limit.compare_exchange_weak(old_value, value))
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-01-22 15:20:19 +00:00
|
|
|
void MemoryTracker::setOrRaiseProfilerLimit(Int64 value)
|
2017-02-01 03:53:10 +00:00
|
|
|
{
|
2020-01-22 15:20:19 +00:00
|
|
|
Int64 old_value = profiler_limit.load(std::memory_order_relaxed);
|
|
|
|
while (old_value < value && !profiler_limit.compare_exchange_weak(old_value, value))
|
2017-04-01 07:20:54 +00:00
|
|
|
;
|
2014-05-03 22:57:43 +00:00
|
|
|
}
|
|
|
|
|
2017-04-10 21:40:38 +00:00
|
|
|
|
|
|
|
namespace CurrentMemoryTracker
|
|
|
|
{
|
2020-04-30 13:25:17 +00:00
|
|
|
using DB::current_thread;
|
|
|
|
|
2017-04-10 21:40:38 +00:00
|
|
|
void alloc(Int64 size)
|
|
|
|
{
|
2020-04-22 00:29:38 +00:00
|
|
|
if (auto * memory_tracker = DB::CurrentThread::getMemoryTracker())
|
2019-07-10 18:12:50 +00:00
|
|
|
{
|
2020-04-30 13:25:17 +00:00
|
|
|
current_thread->untracked_memory += size;
|
|
|
|
if (current_thread->untracked_memory > current_thread->untracked_memory_limit)
|
2019-07-10 18:12:50 +00:00
|
|
|
{
|
2019-07-17 22:48:31 +00:00
|
|
|
/// Zero untracked before track. If tracker throws out-of-limit we would be able to alloc up to untracked_memory_limit bytes
|
2020-01-16 12:37:29 +00:00
|
|
|
/// more. It could be useful to enlarge Exception message in rethrow logic.
|
2020-04-30 13:25:17 +00:00
|
|
|
Int64 tmp = current_thread->untracked_memory;
|
|
|
|
current_thread->untracked_memory = 0;
|
2019-07-17 22:48:31 +00:00
|
|
|
memory_tracker->alloc(tmp);
|
2019-07-10 18:12:50 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-10 21:40:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void realloc(Int64 old_size, Int64 new_size)
|
|
|
|
{
|
2019-07-10 18:12:50 +00:00
|
|
|
Int64 addition = new_size - old_size;
|
2020-01-16 12:37:29 +00:00
|
|
|
addition > 0 ? alloc(addition) : free(-addition);
|
2017-04-10 21:40:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void free(Int64 size)
|
|
|
|
{
|
2020-04-22 00:29:38 +00:00
|
|
|
if (auto * memory_tracker = DB::CurrentThread::getMemoryTracker())
|
2019-07-10 18:12:50 +00:00
|
|
|
{
|
2020-04-30 13:25:17 +00:00
|
|
|
current_thread->untracked_memory -= size;
|
|
|
|
if (current_thread->untracked_memory < -current_thread->untracked_memory_limit)
|
2019-07-10 18:12:50 +00:00
|
|
|
{
|
2020-04-30 13:25:17 +00:00
|
|
|
memory_tracker->free(-current_thread->untracked_memory);
|
|
|
|
current_thread->untracked_memory = 0;
|
2019-07-10 18:12:50 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-10 21:40:38 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-01 17:55:08 +00:00
|
|
|
|
2018-05-31 15:54:08 +00:00
|
|
|
DB::SimpleActionLock getCurrentMemoryTrackerActionLock()
|
2018-02-01 17:55:08 +00:00
|
|
|
{
|
2020-04-22 00:29:38 +00:00
|
|
|
auto * memory_tracker = DB::CurrentThread::getMemoryTracker();
|
2019-03-14 18:03:35 +00:00
|
|
|
if (!memory_tracker)
|
|
|
|
return {};
|
|
|
|
return memory_tracker->blocker.cancel();
|
2018-02-01 17:55:08 +00:00
|
|
|
}
|