ClickHouse/src/Interpreters/TraceCollector.cpp

115 lines
3.1 KiB
C++
Raw Normal View History

2019-02-03 09:57:12 +00:00
#include "TraceCollector.h"
2019-07-05 13:48:47 +00:00
#include <Core/Field.h>
2020-01-16 12:37:29 +00:00
#include <IO/ReadBufferFromFileDescriptor.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteBufferFromFileDescriptor.h>
#include <IO/WriteHelpers.h>
#include <Interpreters/TraceLog.h>
2019-07-05 13:48:47 +00:00
#include <Poco/Logger.h>
2020-11-11 18:30:17 +00:00
#include <Common/setThreadName.h>
2022-04-27 15:05:45 +00:00
#include <Common/logger_useful.h>
2019-02-03 21:30:45 +00:00
2019-07-24 02:02:10 +00:00
2019-07-10 20:47:39 +00:00
namespace DB
{
TraceCollector::TraceCollector(std::shared_ptr<TraceLog> trace_log_)
: trace_log(std::move(trace_log_))
2019-07-10 20:47:39 +00:00
{
TraceSender::pipe.open();
2019-07-24 02:02:10 +00:00
/** Turn write end of pipe to non-blocking mode to avoid deadlocks
* when QueryProfiler is invoked under locks and TraceCollector cannot pull data from pipe.
*/
TraceSender::pipe.setNonBlockingWrite();
TraceSender::pipe.tryIncreaseSize(1 << 20);
2019-07-24 02:02:10 +00:00
2019-07-10 20:47:39 +00:00
thread = ThreadFromGlobalPool(&TraceCollector::run, this);
}
2019-07-10 20:47:39 +00:00
TraceCollector::~TraceCollector()
{
if (!thread.joinable())
LOG_ERROR(&Poco::Logger::get("TraceCollector"), "TraceCollector thread is malformed and cannot be joined");
2019-07-10 20:47:39 +00:00
else
2020-01-16 12:37:29 +00:00
stop();
2019-07-10 20:47:39 +00:00
TraceSender::pipe.close();
2019-07-10 20:47:39 +00:00
}
2019-02-03 09:57:12 +00:00
/** Sends TraceCollector stop message
2019-07-05 13:48:47 +00:00
*
* Each sequence of data for TraceCollector thread starts with a boolean flag.
* If this flag is true, TraceCollector must stop reading trace_pipe and exit.
* This function sends flag with a true value to stop TraceCollector gracefully.
*/
2020-01-16 12:37:29 +00:00
void TraceCollector::stop()
2019-07-05 13:48:47 +00:00
{
WriteBufferFromFileDescriptor out(TraceSender::pipe.fds_rw[1]);
2019-07-10 20:47:39 +00:00
writeChar(true, out);
2019-07-05 13:48:47 +00:00
out.next();
thread.join();
2019-07-05 13:48:47 +00:00
}
2019-02-03 09:57:12 +00:00
2019-07-05 13:48:47 +00:00
void TraceCollector::run()
{
2020-11-11 18:30:17 +00:00
setThreadName("TraceCollector");
ReadBufferFromFileDescriptor in(TraceSender::pipe.fds_rw[0]);
2020-01-16 12:37:29 +00:00
2019-07-05 13:48:47 +00:00
while (true)
{
char is_last;
readChar(is_last, in);
if (is_last)
break;
2019-05-19 20:22:44 +00:00
2019-07-05 13:48:47 +00:00
std::string query_id;
UInt8 query_id_size = 0;
readBinary(query_id_size, in);
query_id.resize(query_id_size);
in.read(query_id.data(), query_id_size);
2019-02-03 21:30:45 +00:00
2020-01-16 12:37:29 +00:00
UInt8 trace_size = 0;
readIntBinary(trace_size, in);
2019-05-14 22:15:23 +00:00
2019-07-05 13:48:47 +00:00
Array trace;
2020-01-16 12:37:29 +00:00
trace.reserve(trace_size);
2021-12-20 12:55:07 +00:00
for (size_t i = 0; i < trace_size; ++i)
{
uintptr_t addr = 0;
readPODBinary(addr, in);
trace.emplace_back(static_cast<UInt64>(addr));
}
2019-02-03 21:30:45 +00:00
TraceType trace_type;
readPODBinary(trace_type, in);
UInt64 thread_id;
2020-02-02 02:27:15 +00:00
readPODBinary(thread_id, in);
2019-02-03 21:30:45 +00:00
2020-04-30 13:25:17 +00:00
Int64 size;
2020-01-16 12:37:29 +00:00
readPODBinary(size, in);
2020-01-23 09:42:58 +00:00
if (trace_log)
{
// time and time_in_microseconds are both being constructed from the same timespec so that the
2020-10-27 11:04:03 +00:00
// times will be equal up to the precision of a second.
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
UInt64 time = static_cast<UInt64>(ts.tv_sec * 1000000000LL + ts.tv_nsec);
UInt64 time_in_microseconds = static_cast<UInt64>((ts.tv_sec * 1000000LL) + (ts.tv_nsec / 1000));
TraceLogElement element{time_t(time / 1000000000), time_in_microseconds, time, trace_type, thread_id, query_id, trace, size};
2020-01-23 09:42:58 +00:00
trace_log->add(element);
}
2019-02-03 09:57:12 +00:00
}
}
2019-07-10 20:47:39 +00:00
}