ClickHouse/src/Interpreters/ThreadStatusExt.cpp

616 lines
20 KiB
C++
Raw Normal View History

#include <Common/ThreadStatus.h>
2021-06-18 13:44:08 +00:00
#include <DataStreams/PushingToViewsBlockOutputStream.h>
#include <Interpreters/Context.h>
#include <Interpreters/OpenTelemetrySpanLog.h>
2021-06-18 13:44:08 +00:00
#include <Interpreters/ProcessList.h>
#include <Interpreters/QueryThreadLog.h>
2021-06-18 13:44:08 +00:00
#include <Interpreters/QueryViewsLog.h>
#include <Parsers/formatAST.h>
#include <Common/CurrentThread.h>
#include <Common/Exception.h>
2021-06-21 15:58:15 +00:00
#include <Common/ProfileEvents.h>
#include <Common/QueryProfiler.h>
2021-06-21 15:58:15 +00:00
#include <Common/SensitiveDataMasker.h>
#include <Common/ThreadProfileEvents.h>
#include <Common/TraceCollector.h>
#include <common/errnoToString.h>
#if defined(OS_LINUX)
# include <Common/hasLinuxCapability.h>
2019-07-06 18:02:28 +00:00
# include <sys/time.h>
# include <sys/resource.h>
2019-07-06 18:02:28 +00:00
#endif
2021-06-21 15:58:15 +00:00
namespace ProfileEvents
{
extern const Event SelectedRows;
extern const Event SelectedBytes;
extern const Event InsertedRows;
extern const Event InsertedBytes;
}
/// Implement some methods of ThreadStatus and CurrentThread here to avoid extra linking dependencies in clickhouse_common_io
/// TODO It doesn't make sense.
namespace DB
{
2019-07-06 18:02:28 +00:00
namespace ErrorCodes
{
2020-02-25 18:02:41 +00:00
extern const int LOGICAL_ERROR;
2019-07-06 18:02:28 +00:00
extern const int CANNOT_SET_THREAD_PRIORITY;
}
void ThreadStatus::applyQuerySettings()
{
auto query_context_ptr = query_context.lock();
assert(query_context_ptr);
const Settings & settings = query_context_ptr->getSettingsRef();
query_id = query_context_ptr->getCurrentQueryId();
initQueryProfiler();
untracked_memory_limit = settings.max_untracked_memory;
if (settings.memory_profiler_step && settings.memory_profiler_step < UInt64(untracked_memory_limit))
untracked_memory_limit = settings.memory_profiler_step;
#if defined(OS_LINUX)
/// Set "nice" value if required.
Int32 new_os_thread_priority = settings.os_thread_priority;
if (new_os_thread_priority && hasLinuxCapability(CAP_SYS_NICE))
{
LOG_TRACE(log, "Setting nice to {}", new_os_thread_priority);
if (0 != setpriority(PRIO_PROCESS, thread_id, new_os_thread_priority))
throwFromErrno("Cannot 'setpriority'", ErrorCodes::CANNOT_SET_THREAD_PRIORITY);
os_thread_priority = new_os_thread_priority;
}
#endif
}
void ThreadStatus::attachQueryContext(ContextPtr query_context_)
{
query_context = query_context_;
if (global_context.expired())
global_context = query_context_->getGlobalContext();
if (thread_group)
{
std::lock_guard lock(thread_group->mutex);
thread_group->query_context = query_context;
if (thread_group->global_context.expired())
thread_group->global_context = global_context;
}
2020-11-18 17:43:18 +00:00
// Generate new span for thread manually here, because we can't depend
// on OpenTelemetrySpanHolder due to link order issues.
// FIXME why and how is this different from setupState()?
thread_trace_context = query_context_->query_trace_context;
2021-05-04 22:42:14 +00:00
if (thread_trace_context.trace_id != UUID())
2020-11-18 17:43:18 +00:00
{
thread_trace_context.span_id = thread_local_rng();
}
applyQuerySettings();
}
void CurrentThread::defaultThreadDeleter()
{
if (unlikely(!current_thread))
return;
current_thread->detachQuery(true, true);
}
void ThreadStatus::setupState(const ThreadGroupStatusPtr & thread_group_)
{
assertState({ThreadState::DetachedFromQuery}, __PRETTY_FUNCTION__);
/// Attach or init current thread to thread group and copy useful information from it
thread_group = thread_group_;
performance_counters.setParent(&thread_group->performance_counters);
memory_tracker.setParent(&thread_group->memory_tracker);
{
std::lock_guard lock(thread_group->mutex);
/// NOTE: thread may be attached multiple times if it is reused from a thread pool.
2020-02-02 02:27:15 +00:00
thread_group->thread_ids.emplace_back(thread_id);
logs_queue_ptr = thread_group->logs_queue_ptr;
2020-07-09 04:15:45 +00:00
fatal_error_callback = thread_group->fatal_error_callback;
query_context = thread_group->query_context;
if (global_context.expired())
global_context = thread_group->global_context;
}
if (auto query_context_ptr = query_context.lock())
{
applyQuerySettings();
2019-07-06 18:02:28 +00:00
2020-11-18 17:43:18 +00:00
// Generate new span for thread manually here, because we can't depend
// on OpenTelemetrySpanHolder due to link order issues.
thread_trace_context = query_context_ptr->query_trace_context;
2021-05-04 22:42:14 +00:00
if (thread_trace_context.trace_id != UUID())
2020-11-10 05:50:32 +00:00
{
2020-11-18 17:43:18 +00:00
thread_trace_context.span_id = thread_local_rng();
}
}
else
{
2020-11-18 17:43:18 +00:00
thread_trace_context.trace_id = 0;
}
initPerformanceCounters();
thread_state = ThreadState::AttachedToQuery;
}
void ThreadStatus::initializeQuery()
{
setupState(std::make_shared<ThreadGroupStatus>());
/// No need to lock on mutex here
thread_group->memory_tracker.setDescription("(for query)");
2020-02-02 02:27:15 +00:00
thread_group->master_thread_id = thread_id;
}
void ThreadStatus::attachQuery(const ThreadGroupStatusPtr & thread_group_, bool check_detached)
{
if (thread_state == ThreadState::AttachedToQuery)
{
if (check_detached)
throw Exception("Can't attach query to the thread, it is already attached", ErrorCodes::LOGICAL_ERROR);
return;
}
if (!thread_group_)
throw Exception("Attempt to attach to nullptr thread group", ErrorCodes::LOGICAL_ERROR);
setupState(thread_group_);
}
inline UInt64 time_in_nanoseconds(std::chrono::time_point<std::chrono::system_clock> timepoint)
{
return std::chrono::duration_cast<std::chrono::nanoseconds>(timepoint.time_since_epoch()).count();
}
inline UInt64 time_in_microseconds(std::chrono::time_point<std::chrono::system_clock> timepoint)
{
return std::chrono::duration_cast<std::chrono::microseconds>(timepoint.time_since_epoch()).count();
}
inline UInt64 time_in_seconds(std::chrono::time_point<std::chrono::system_clock> timepoint)
{
return std::chrono::duration_cast<std::chrono::seconds>(timepoint.time_since_epoch()).count();
}
void ThreadStatus::initPerformanceCounters()
{
performance_counters_finalized = false;
/// Clear stats from previous query if a new query is started
/// TODO: make separate query_thread_performance_counters and thread_performance_counters
performance_counters.resetCounters();
memory_tracker.resetCounters();
memory_tracker.setDescription("(for thread)");
// query_start_time_{microseconds, nanoseconds} are all constructed from the same time point
2020-10-27 11:04:03 +00:00
// to ensure that they are all equal up to the precision of a second.
const auto now = std::chrono::system_clock::now();
query_start_time_nanoseconds = time_in_nanoseconds(now);
query_start_time = time_in_seconds(now);
query_start_time_microseconds = time_in_microseconds(now);
++queries_started;
// query_start_time_nanoseconds cannot be used here since RUsageCounters expect CLOCK_MONOTONIC
*last_rusage = RUsageCounters::current();
if (auto query_context_ptr = query_context.lock())
{
const Settings & settings = query_context_ptr->getSettingsRef();
if (settings.metrics_perf_events_enabled)
{
try
{
2020-05-29 13:04:32 +00:00
current_thread_counters.initializeProfileEvents(
settings.metrics_perf_events_list);
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
}
if (!taskstats)
{
try
{
taskstats = TasksStatsCounters::create(thread_id);
}
catch (...)
{
tryLogCurrentException(log);
}
}
if (taskstats)
taskstats->reset();
}
void ThreadStatus::finalizePerformanceCounters()
{
if (performance_counters_finalized)
return;
performance_counters_finalized = true;
updatePerformanceCounters();
2020-06-29 12:48:18 +00:00
// We want to close perf file descriptors if the perf events were enabled for
// one query. What this code does in practice is less clear -- e.g., if I run
// 'select 1 settings metrics_perf_events_enabled = 1', I still get
// query_context->getSettingsRef().metrics_perf_events_enabled == 0 *shrug*.
bool close_perf_descriptors = true;
if (auto query_context_ptr = query_context.lock())
close_perf_descriptors = !query_context_ptr->getSettingsRef().metrics_perf_events_enabled;
2020-02-19 16:35:01 +00:00
try
{
2020-05-29 13:04:32 +00:00
current_thread_counters.finalizeProfileEvents(performance_counters);
if (close_perf_descriptors)
2020-05-29 13:04:32 +00:00
current_thread_counters.closeEventDescriptors();
2020-02-19 16:35:01 +00:00
}
catch (...)
{
tryLogCurrentException(log);
}
try
{
auto global_context_ptr = global_context.lock();
auto query_context_ptr = query_context.lock();
if (global_context_ptr && query_context_ptr)
{
const auto & settings = query_context_ptr->getSettingsRef();
if (settings.log_queries && settings.log_query_threads)
{
const auto now = std::chrono::system_clock::now();
Int64 query_duration_ms = (time_in_microseconds(now) - query_start_time_microseconds) / 1000;
if (query_duration_ms >= settings.log_queries_min_query_duration_ms.totalMilliseconds())
{
if (auto thread_log = global_context_ptr->getQueryThreadLog())
logToQueryThreadLog(*thread_log, query_context_ptr->getCurrentDatabase(), now);
}
}
}
}
catch (...)
{
tryLogCurrentException(log);
}
}
2019-02-09 22:40:47 +00:00
void ThreadStatus::initQueryProfiler()
{
/// query profilers are useless without trace collector
auto global_context_ptr = global_context.lock();
if (!global_context_ptr || !global_context_ptr->hasTraceCollector())
return;
auto query_context_ptr = query_context.lock();
assert(query_context_ptr);
const auto & settings = query_context_ptr->getSettingsRef();
try
{
if (settings.query_profiler_real_time_period_ns > 0)
2020-02-02 02:27:15 +00:00
query_profiler_real = std::make_unique<QueryProfilerReal>(thread_id,
/* period */ static_cast<UInt32>(settings.query_profiler_real_time_period_ns));
if (settings.query_profiler_cpu_time_period_ns > 0)
2020-02-02 02:27:15 +00:00
query_profiler_cpu = std::make_unique<QueryProfilerCpu>(thread_id,
/* period */ static_cast<UInt32>(settings.query_profiler_cpu_time_period_ns));
}
catch (...)
{
/// QueryProfiler is optional.
tryLogCurrentException("ThreadStatus", "Cannot initialize QueryProfiler");
}
}
2019-02-09 22:40:47 +00:00
void ThreadStatus::finalizeQueryProfiler()
{
query_profiler_real.reset();
query_profiler_cpu.reset();
}
void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits)
{
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
Lock MEMORY_LIMIT_EXCEEDED in ThreadStatus::detachQuery() Found with fuzzer [1]: <Fatal> BaseDaemon: (version 21.5.1.6440, build id: 3B097C902DDAA35688D90750552ED499DC5D10A0) (from thread 8012) Terminate called for uncaught exception: Code: 241, e.displayText() = DB::Exception: Memory limit (for user) exceeded: would use 153.51 MiB (attempt to allocate chunk of 4194368 bytes), maximum: 150.00 MiB, Stack trace (when copying this message, always include the lines below): 0. ./obj-x86_64-linux-gnu/../contrib/libcxx/include/exception:133: Poco::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x26fdcdd9 in /usr/bin/clickhouse 1. ./obj-x86_64-linux-gnu/../src/Common/Exception.cpp:57: DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int, bool) @ 0xad0df02 in /usr/bin/clickhouse 2. ./obj-x86_64-linux-gnu/../contrib/libcxx/include/string:1444: DB::Exception::Exception<char const*, char const*, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, long&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(int, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, char const*&&, char const*&&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&&, long&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&&) @ 0xacc7bef in /usr/bin/clickhouse 3. ./obj-x86_64-linux-gnu/../src/Common/MemoryTracker.cpp:219: MemoryTracker::alloc(long) @ 0xacc65eb in /usr/bin/clickhouse 4. ./obj-x86_64-linux-gnu/../src/Common/MemoryTracker.cpp:0: MemoryTracker::alloc(long) @ 0xacc5dad in /usr/bin/clickhouse 5. ./obj-x86_64-linux-gnu/../src/Common/MemoryTracker.cpp:0: MemoryTracker::alloc(long) @ 0xacc5dad in /usr/bin/clickhouse 6. ./obj-x86_64-linux-gnu/../src/Common/AllocatorWithMemoryTracking.h:35: AllocatorWithMemoryTracking<DB::Field>::allocate(unsigned long) @ 0xad0a2fe in /usr/bin/clickhouse 7. void std::__1::vector<DB::Field, AllocatorWithMemoryTracking<DB::Field> >::__push_back_slow_path<DB::Field>(DB::Field&&) @ 0x11712a51 in /usr/bin/clickhouse 8. ./obj-x86_64-linux-gnu/../src/Interpreters/ThreadStatusExt.cpp:356: DB::ThreadStatus::detachQuery(bool, bool) @ 0x1f5d5237 in /usr/bin/clickhouse 9. ./obj-x86_64-linux-gnu/../src/Processors/Executors/PipelineExecutor.cpp:0: void std::__1::__function::__policy_invoker<void ()>::__call_impl<std::__1::__function::__default_alloc_func<ThreadFromGlobalPool::ThreadFromGlobalPool<DB::PipelineExecutor::executeImpl(unsigned long)::$_4>(DB::PipelineExecutor::executeImpl(unsigned long)::$_4&&)::'lambda'(), void ()> >(std::__1::__function::__policy_storage const*) @ 0x20c488e6 in /usr/bin/clickhouse 10. ./obj-x86_64-linux-gnu/../contrib/libcxx/include/functional:0: ThreadPoolImpl<std::__1::thread>::worker(std::__1::__list_iterator<std::__1::thread, void*>) @ 0xad9f6cc in /usr/bin/clickhouse 11. ./obj-x86_64-linux-gnu/../contrib/libcxx/include/memory:1655: void* std::__1::__thread_proxy<std::__1::tuple<std::__1::unique_ptr<std::__1::__thread_struct, std::__1::default_delete<std::__1::__thread_struct> >, void ThreadPoolImpl<std::__1::thread>::scheduleImpl<void>(std::__1::function<void ()>, int, std::__1::optional<unsigned long>)::'lambda1'()> >(void*) @ 0xada8264 in /usr/bin/clickhouse 12. start_thread @ 0x9609 in /usr/lib/x86_64-linux-gnu/libpthread-2.31.so 13. __clone @ 0x122293 in /usr/lib/x86_64-linux-gnu/libc-2.31.so (version 21.5.1.6440) [1]: https://clickhouse-test-reports.s3.yandex.net/22583/69296876005c0fa171c755f8b224e4d58192c402/stress_test_(address).html#fail1
2021-04-04 09:23:04 +00:00
if (exit_if_already_detached && thread_state == ThreadState::DetachedFromQuery)
{
thread_state = thread_exits ? ThreadState::Died : ThreadState::DetachedFromQuery;
return;
}
assertState({ThreadState::AttachedToQuery}, __PRETTY_FUNCTION__);
2020-11-10 05:50:32 +00:00
std::shared_ptr<OpenTelemetrySpanLog> opentelemetry_span_log;
auto query_context_ptr = query_context.lock();
2021-05-04 22:42:14 +00:00
if (thread_trace_context.trace_id != UUID() && query_context_ptr)
2020-11-10 05:50:32 +00:00
{
opentelemetry_span_log = query_context_ptr->getOpenTelemetrySpanLog();
2020-11-10 05:50:32 +00:00
}
if (opentelemetry_span_log)
{
2020-11-10 05:50:32 +00:00
// Log the current thread span.
// We do this manually, because we can't use OpenTelemetrySpanHolder as a
// ThreadStatus member, because of linking issues. This file is linked
// separately, so we can reference OpenTelemetrySpanLog here, but if we had
// the span holder as a field, we would have to reference it in the
// destructor, which is in another library.
OpenTelemetrySpanLogElement span;
2020-11-18 17:43:18 +00:00
span.trace_id = thread_trace_context.trace_id;
2020-11-19 15:52:11 +00:00
// All child span holders should be finished by the time we detach this
// thread, so the current span id should be the thread span id. If not,
// an assertion for a proper parent span in ~OpenTelemetrySpanHolder()
// is going to fail, because we're going to reset it to zero later in
// this function.
2020-11-18 17:43:18 +00:00
span.span_id = thread_trace_context.span_id;
assert(query_context_ptr);
span.parent_span_id = query_context_ptr->query_trace_context.span_id;
2020-11-10 05:50:32 +00:00
span.operation_name = getThreadName();
span.start_time_us = query_start_time_microseconds;
span.finish_time_us =
std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
span.attribute_names.push_back("clickhouse.thread_id");
span.attribute_values.push_back(thread_id);
opentelemetry_span_log->add(span);
}
finalizeQueryProfiler();
finalizePerformanceCounters();
/// Detach from thread group
performance_counters.setParent(&ProfileEvents::global_counters);
memory_tracker.reset();
2018-10-05 19:29:49 +00:00
2020-10-24 18:34:54 +00:00
/// Must reset pointer to thread_group's memory_tracker, because it will be destroyed two lines below (will reset to its parent).
memory_tracker.setParent(thread_group->memory_tracker.getParent());
2018-10-05 19:29:49 +00:00
2019-12-22 20:50:33 +00:00
query_id.clear();
query_context.reset();
2020-11-18 17:43:18 +00:00
thread_trace_context.trace_id = 0;
thread_trace_context.span_id = 0;
thread_group.reset();
thread_state = thread_exits ? ThreadState::Died : ThreadState::DetachedFromQuery;
2019-07-06 18:02:28 +00:00
#if defined(__linux__)
if (os_thread_priority)
{
LOG_TRACE(log, "Resetting nice");
2020-02-02 02:27:15 +00:00
if (0 != setpriority(PRIO_PROCESS, thread_id, 0))
2020-05-23 22:24:01 +00:00
LOG_ERROR(log, "Cannot 'setpriority' back to zero: {}", errnoToString(ErrorCodes::CANNOT_SET_THREAD_PRIORITY, errno));
2019-07-06 18:02:28 +00:00
os_thread_priority = 0;
}
#endif
}
void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database, std::chrono::time_point<std::chrono::system_clock> now)
{
QueryThreadLogElement elem;
// construct current_time and current_time_microseconds using the same time point
// so that the two times will always be equal up to a precision of a second.
auto current_time = time_in_seconds(now);
auto current_time_microseconds = time_in_microseconds(now);
elem.event_time = current_time;
elem.event_time_microseconds = current_time_microseconds;
elem.query_start_time = query_start_time;
elem.query_start_time_microseconds = query_start_time_microseconds;
elem.query_duration_ms = (time_in_nanoseconds(now) - query_start_time_nanoseconds) / 1000000U;
elem.read_rows = progress_in.read_rows.load(std::memory_order_relaxed);
elem.read_bytes = progress_in.read_bytes.load(std::memory_order_relaxed);
2019-05-21 04:06:36 +00:00
/// TODO: Use written_rows and written_bytes when run time progress is implemented
elem.written_rows = progress_out.read_rows.load(std::memory_order_relaxed);
elem.written_bytes = progress_out.read_bytes.load(std::memory_order_relaxed);
elem.memory_usage = memory_tracker.get();
elem.peak_memory_usage = memory_tracker.getPeak();
elem.thread_name = getThreadName();
2020-02-02 02:27:15 +00:00
elem.thread_id = thread_id;
elem.current_database = current_database;
if (thread_group)
{
{
std::lock_guard lock(thread_group->mutex);
2020-02-02 02:27:15 +00:00
elem.master_thread_id = thread_group->master_thread_id;
elem.query = thread_group->query;
elem.normalized_query_hash = thread_group->normalized_query_hash;
}
}
auto query_context_ptr = query_context.lock();
if (query_context_ptr)
{
elem.client_info = query_context_ptr->getClientInfo();
if (query_context_ptr->getSettingsRef().log_profile_events != 0)
{
/// NOTE: Here we are in the same thread, so we can make memcpy()
elem.profile_counters = std::make_shared<ProfileEvents::Counters>(performance_counters.getPartiallyAtomicSnapshot());
}
}
thread_log.add(elem);
}
2021-06-21 15:58:15 +00:00
static String getCleanQueryAst(const ASTPtr q, ContextPtr context)
2021-06-18 13:44:08 +00:00
{
2021-06-21 15:58:15 +00:00
String res = serializeAST(*q, true);
if (auto * masker = SensitiveDataMasker::getInstance())
masker->wipeSensitiveData(res);
res = res.substr(0, context->getSettingsRef().log_queries_cut_to_length);
return res;
}
void ThreadStatus::logToQueryViewsLog(const ViewInfo & vinfo)
{
auto query_context_ptr = query_context.lock();
if (!query_context_ptr)
return;
auto views_log = query_context_ptr->getQueryViewsLog();
if (!views_log)
return;
2021-06-18 13:44:08 +00:00
QueryViewsLogElement element;
2021-06-18 16:25:19 +00:00
element.event_time = time_in_seconds(vinfo.runtime_stats.event_time);
element.event_time_microseconds = time_in_microseconds(vinfo.runtime_stats.event_time);
2021-06-18 13:44:08 +00:00
element.view_duration_ms = vinfo.runtime_stats.elapsed_ms;
element.initial_query_id = query_id;
element.view_name = vinfo.table_id.getFullTableName();
2021-06-18 13:44:08 +00:00
element.view_uuid = vinfo.table_id.uuid;
element.view_type = vinfo.runtime_stats.type;
if (vinfo.query)
2021-06-21 15:58:15 +00:00
element.view_query = getCleanQueryAst(vinfo.query, query_context_ptr);
2021-06-18 13:44:08 +00:00
element.view_target = vinfo.runtime_stats.target_name;
2021-06-21 15:58:15 +00:00
auto events = std::make_shared<ProfileEvents::Counters>(performance_counters.getPartiallyAtomicSnapshot());
element.read_rows = (*events)[ProfileEvents::SelectedRows];
element.read_bytes = (*events)[ProfileEvents::SelectedBytes];
element.written_rows = (*events)[ProfileEvents::InsertedRows];
element.written_bytes = (*events)[ProfileEvents::InsertedBytes];
element.peak_memory_usage = memory_tracker.getPeak() > 0 ? memory_tracker.getPeak() : 0;
if (query_context_ptr->getSettingsRef().log_profile_events != 0)
2021-06-18 13:44:08 +00:00
{
2021-06-21 15:58:15 +00:00
element.profile_counters = events;
2021-06-18 13:44:08 +00:00
}
element.status = vinfo.runtime_stats.event_status;
2021-06-18 13:44:08 +00:00
element.exception_code = 0;
if (vinfo.exception)
{
2021-06-18 16:25:19 +00:00
element.exception_code = getExceptionErrorCode(vinfo.exception);
element.exception = getExceptionMessage(vinfo.exception, false);
2021-06-21 15:58:15 +00:00
if (query_context_ptr->getSettingsRef().calculate_text_stack_trace)
element.stack_trace = getExceptionStackTraceString(vinfo.exception);
2021-06-18 13:44:08 +00:00
}
2021-06-21 15:58:15 +00:00
views_log->add(element);
2021-06-18 13:44:08 +00:00
}
void CurrentThread::initializeQuery()
{
if (unlikely(!current_thread))
return;
current_thread->initializeQuery();
current_thread->deleter = CurrentThread::defaultThreadDeleter;
}
void CurrentThread::attachTo(const ThreadGroupStatusPtr & thread_group)
{
if (unlikely(!current_thread))
return;
current_thread->attachQuery(thread_group, true);
current_thread->deleter = CurrentThread::defaultThreadDeleter;
}
void CurrentThread::attachToIfDetached(const ThreadGroupStatusPtr & thread_group)
{
if (unlikely(!current_thread))
return;
current_thread->attachQuery(thread_group, false);
current_thread->deleter = CurrentThread::defaultThreadDeleter;
}
void CurrentThread::attachQueryContext(ContextPtr query_context)
{
if (unlikely(!current_thread))
return;
2019-07-15 16:32:52 +00:00
current_thread->attachQueryContext(query_context);
}
void CurrentThread::finalizePerformanceCounters()
{
if (unlikely(!current_thread))
return;
current_thread->finalizePerformanceCounters();
}
void CurrentThread::detachQuery()
{
if (unlikely(!current_thread))
return;
current_thread->detachQuery(false);
}
void CurrentThread::detachQueryIfNotDetached()
{
if (unlikely(!current_thread))
return;
current_thread->detachQuery(true);
}
2021-05-31 14:49:02 +00:00
CurrentThread::QueryScope::QueryScope(ContextMutablePtr query_context)
{
CurrentThread::initializeQuery();
CurrentThread::attachQueryContext(query_context);
if (!query_context->hasQueryContext())
query_context->makeQueryContext();
}
void CurrentThread::QueryScope::logPeakMemoryUsage()
{
auto group = CurrentThread::getGroup();
if (!group)
return;
log_peak_memory_usage_in_destructor = false;
group->memory_tracker.logPeakMemoryUsage();
}
CurrentThread::QueryScope::~QueryScope()
{
try
{
if (log_peak_memory_usage_in_destructor)
logPeakMemoryUsage();
CurrentThread::detachQueryIfNotDetached();
}
catch (...)
{
tryLogCurrentException("CurrentThread", __PRETTY_FUNCTION__);
}
}
}