2018-06-20 17:49:52 +00:00
# pragma once
2018-08-22 05:56:06 +00:00
2018-08-28 19:36:27 +00:00
# include <Core/Types.h>
2018-06-20 17:49:52 +00:00
# include <Common/ProfileEvents.h>
2020-02-04 20:47:30 +00:00
# include <ctime>
2018-06-20 17:49:52 +00:00
# include <sys/resource.h>
# include <pthread.h>
2020-02-21 18:43:23 +00:00
# include <common/logger_useful.h>
2018-08-22 05:56:06 +00:00
2020-05-01 18:47:41 +00:00
2018-08-22 05:56:06 +00:00
# if defined(__linux__)
2018-06-20 17:49:52 +00:00
# include <linux/taskstats.h>
2018-08-22 13:01:50 +00:00
# else
struct taskstats { } ;
2018-08-22 05:56:06 +00:00
# endif
2018-06-20 17:49:52 +00:00
2018-09-06 00:28:15 +00:00
/** Implement ProfileEvents with statistics about resource consumption of the current thread.
*/
2018-06-20 17:49:52 +00:00
namespace ProfileEvents
{
extern const Event RealTimeMicroseconds ;
extern const Event UserTimeMicroseconds ;
extern const Event SystemTimeMicroseconds ;
extern const Event SoftPageFaults ;
extern const Event HardPageFaults ;
extern const Event VoluntaryContextSwitches ;
extern const Event InvoluntaryContextSwitches ;
2018-08-22 05:56:06 +00:00
# if defined(__linux__)
2018-06-20 17:49:52 +00:00
extern const Event OSIOWaitMicroseconds ;
extern const Event OSCPUWaitMicroseconds ;
extern const Event OSCPUVirtualTimeMicroseconds ;
extern const Event OSReadChars ;
extern const Event OSWriteChars ;
extern const Event OSReadBytes ;
extern const Event OSWriteBytes ;
2020-02-04 20:47:30 +00:00
2020-05-18 11:44:24 +00:00
extern const Event PerfCpuCycles ;
extern const Event PerfCpuCyclesRunning ;
extern const Event PerfCpuCyclesEnabled ;
extern const Event PerfInstructions ;
extern const Event PerfInstructionsRunning ;
extern const Event PerfInstructionsEnabled ;
extern const Event PerfCacheReferences ;
extern const Event PerfCacheReferencesRunning ;
extern const Event PerfCacheReferencesEnabled ;
extern const Event PerfCacheMisses ;
extern const Event PerfCacheMissesRunning ;
extern const Event PerfCacheMissesEnabled ;
extern const Event PerfBranchInstructions ;
extern const Event PerfBranchInstructionsRunning ;
extern const Event PerfBranchInstructionsEnabled ;
extern const Event PerfBranchMisses ;
extern const Event PerfBranchMissesRunning ;
extern const Event PerfBranchMissesEnabled ;
extern const Event PerfBusCycles ;
extern const Event PerfBusCyclesRunning ;
extern const Event PerfBusCyclesEnabled ;
extern const Event PerfStalledCyclesFrontend ;
extern const Event PerfStalledCyclesFrontendRunning ;
extern const Event PerfStalledCyclesFrontendEnabled ;
extern const Event PerfStalledCyclesBackend ;
extern const Event PerfStalledCyclesBackendRunning ;
extern const Event PerfStalledCyclesBackendEnabled ;
extern const Event PerfRefCpuCycles ;
extern const Event PerfRefCpuCyclesRunning ;
extern const Event PerfRefCpuCyclesEnabled ;
2020-02-21 18:43:23 +00:00
2020-05-18 11:44:24 +00:00
// extern const Event PerfCpuClock;
extern const Event PerfTaskClock ;
extern const Event PerfPageFaults ;
extern const Event PerfContextSwitches ;
extern const Event PerfCpuMigrations ;
extern const Event PerfPageFaultsMin ;
extern const Event PerfPageFaultsMaj ;
extern const Event PerfAlignmentFaults ;
extern const Event PerfEmulationFaults ;
2018-08-22 05:56:06 +00:00
# endif
2018-06-20 17:49:52 +00:00
}
namespace DB
{
/// Handles overflow
template < typename TUInt >
inline TUInt safeDiff ( TUInt prev , TUInt curr )
{
return curr > = prev ? curr - prev : 0 ;
}
inline UInt64 getCurrentTimeNanoseconds ( clockid_t clock_type = CLOCK_MONOTONIC )
{
struct timespec ts ;
clock_gettime ( clock_type , & ts ) ;
return ts . tv_sec * 1000000000ULL + ts . tv_nsec ;
}
2018-08-14 20:29:42 +00:00
struct RUsageCounters
2018-06-20 17:49:52 +00:00
{
/// In nanoseconds
UInt64 real_time = 0 ;
UInt64 user_time = 0 ;
UInt64 sys_time = 0 ;
UInt64 soft_page_faults = 0 ;
UInt64 hard_page_faults = 0 ;
2018-08-14 20:29:42 +00:00
RUsageCounters ( ) = default ;
RUsageCounters ( const : : rusage & rusage_ , UInt64 real_time_ )
2018-06-20 17:49:52 +00:00
{
set ( rusage_ , real_time_ ) ;
}
void set ( const : : rusage & rusage , UInt64 real_time_ )
{
real_time = real_time_ ;
2018-08-19 04:25:53 +00:00
user_time = rusage . ru_utime . tv_sec * 1000000000UL + rusage . ru_utime . tv_usec * 1000UL ;
sys_time = rusage . ru_stime . tv_sec * 1000000000UL + rusage . ru_stime . tv_usec * 1000UL ;
2018-06-20 17:49:52 +00:00
soft_page_faults = static_cast < UInt64 > ( rusage . ru_minflt ) ;
hard_page_faults = static_cast < UInt64 > ( rusage . ru_majflt ) ;
}
2018-08-14 20:29:42 +00:00
static RUsageCounters zeros ( UInt64 real_time_ = getCurrentTimeNanoseconds ( ) )
2018-06-20 17:49:52 +00:00
{
2018-08-14 20:29:42 +00:00
RUsageCounters res ;
2018-06-20 17:49:52 +00:00
res . real_time = real_time_ ;
return res ;
}
2018-08-14 20:29:42 +00:00
static RUsageCounters current ( UInt64 real_time_ = getCurrentTimeNanoseconds ( ) )
2018-06-20 17:49:52 +00:00
{
2018-08-23 12:20:54 +00:00
: : rusage rusage { } ;
2018-08-22 16:56:00 +00:00
# if !defined(__APPLE__)
2018-06-20 17:49:52 +00:00
: : getrusage ( RUSAGE_THREAD , & rusage ) ;
2018-08-22 13:12:20 +00:00
# endif
2018-08-14 20:29:42 +00:00
return RUsageCounters ( rusage , real_time_ ) ;
2018-06-20 17:49:52 +00:00
}
2018-08-14 20:29:42 +00:00
static void incrementProfileEvents ( const RUsageCounters & prev , const RUsageCounters & curr , ProfileEvents : : Counters & profile_events )
2018-06-20 17:49:52 +00:00
{
profile_events . increment ( ProfileEvents : : RealTimeMicroseconds , ( curr . real_time - prev . real_time ) / 1000U ) ;
profile_events . increment ( ProfileEvents : : UserTimeMicroseconds , ( curr . user_time - prev . user_time ) / 1000U ) ;
profile_events . increment ( ProfileEvents : : SystemTimeMicroseconds , ( curr . sys_time - prev . sys_time ) / 1000U ) ;
profile_events . increment ( ProfileEvents : : SoftPageFaults , curr . soft_page_faults - prev . soft_page_faults ) ;
profile_events . increment ( ProfileEvents : : HardPageFaults , curr . hard_page_faults - prev . hard_page_faults ) ;
}
2018-08-14 20:29:42 +00:00
static void updateProfileEvents ( RUsageCounters & last_counters , ProfileEvents : : Counters & profile_events )
2018-06-20 17:49:52 +00:00
{
auto current_counters = current ( ) ;
incrementProfileEvents ( last_counters , current_counters , profile_events ) ;
last_counters = current_counters ;
}
} ;
2020-02-21 18:43:23 +00:00
# if defined(__linux__)
2020-04-13 21:10:31 +00:00
struct PerfEventInfo
{
// see perf_event.h/perf_type_id enum
int event_type ;
// see configs in perf_event.h
int event_config ;
ProfileEvents : : Event profile_event ;
2020-05-18 10:40:01 +00:00
std : : optional < ProfileEvents : : Event > profile_event_running ;
std : : optional < ProfileEvents : : Event > profile_event_enabled ;
} ;
struct PerfEventValue
{
UInt64 value ;
UInt64 time_enabled ;
UInt64 time_running ;
2020-04-13 21:10:31 +00:00
} ;
2020-02-21 18:43:23 +00:00
2020-04-23 18:09:34 +00:00
struct PerfDescriptorsHolder ;
2020-02-04 20:47:30 +00:00
struct PerfEventsCounters
{
// cat /proc/sys/kernel/perf_event_paranoid - if perf_event_paranoid is set to 3, all calls to `perf_event_open` are rejected (even for the current process)
// https://lwn.net/Articles/696234/
// -1: Allow use of (almost) all events by all users
// >=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK
// >=1: Disallow CPU event access by users without CAP_SYS_ADMIN
// >=2: Disallow kernel profiling by users without CAP_SYS_ADMIN
// >=3: Disallow all event access by users without CAP_SYS_ADMIN
// https://lwn.net/Articles/696216/
// It adds a another value that can be set for the sysctl parameter (i.e. kernel.perf_event_paranoid=3)
// that restricts perf_event_open() to processes with the CAP_SYS_ADMIN capability
// todo: check whether perf_event_open() is available with CAP_SYS_ADMIN
2020-02-21 18:43:23 +00:00
static constexpr size_t NUMBER_OF_RAW_EVENTS = 18 ;
2020-02-19 16:35:01 +00:00
2020-05-18 10:40:01 +00:00
static const PerfEventInfo raw_events_info [ PerfEventsCounters : : NUMBER_OF_RAW_EVENTS ] ;
2020-02-19 16:35:01 +00:00
static void initializeProfileEvents ( PerfEventsCounters & counters ) ;
static void finalizeProfileEvents ( PerfEventsCounters & counters , ProfileEvents : : Counters & profile_events ) ;
2020-02-21 18:43:23 +00:00
private :
2020-04-14 13:55:46 +00:00
// used to write information about perf unavailability only once for all threads
static std : : atomic < bool > perf_unavailability_logged ;
// used to write information about particular perf events unavailability only once for all threads
static std : : atomic < bool > particular_events_unavailability_logged ;
2020-02-21 18:43:23 +00:00
2020-04-23 18:09:34 +00:00
static thread_local PerfDescriptorsHolder thread_events_descriptors_holder ;
static thread_local bool thread_events_descriptors_opened ;
static thread_local PerfEventsCounters * current_thread_counters ;
// temp array just to not create it each time event processing finishes
2020-05-18 10:40:01 +00:00
PerfEventValue raw_event_values [ NUMBER_OF_RAW_EVENTS ] { } ;
2020-04-23 18:09:34 +00:00
2020-02-21 18:43:23 +00:00
static Logger * getLogger ( ) ;
2020-04-23 18:09:34 +00:00
static bool initializeThreadLocalEvents ( PerfEventsCounters & counters ) ;
2020-02-04 20:47:30 +00:00
} ;
2018-06-20 17:49:52 +00:00
2020-04-23 19:38:15 +00:00
struct PerfDescriptorsHolder
{
2020-04-23 18:09:34 +00:00
int descriptors [ PerfEventsCounters : : NUMBER_OF_RAW_EVENTS ] { } ;
PerfDescriptorsHolder ( ) ;
~ PerfDescriptorsHolder ( ) ;
2020-04-23 18:46:19 +00:00
static Logger * getLogger ( ) ;
2020-04-23 18:09:34 +00:00
} ;
2020-04-23 18:46:19 +00:00
# else
struct PerfEventsCounters
{
static void initializeProfileEvents ( PerfEventsCounters & counters ) ;
static void finalizeProfileEvents ( PerfEventsCounters & counters , ProfileEvents : : Counters & profile_events ) ;
} ;
# endif
# if defined(__linux__)
2020-05-01 18:47:41 +00:00
class TasksStatsCounters
2018-06-20 17:49:52 +00:00
{
2020-05-01 18:47:41 +00:00
public :
static bool checkIfAvailable ( ) ;
static std : : unique_ptr < TasksStatsCounters > create ( const UInt64 tid ) ;
2018-06-20 17:49:52 +00:00
2020-05-01 18:47:41 +00:00
void reset ( ) ;
void updateCounters ( ProfileEvents : : Counters & profile_events ) ;
2018-06-20 17:49:52 +00:00
2020-05-01 18:47:41 +00:00
private :
: : taskstats stats ; //-V730_NOINIT
std : : function < : : taskstats ( ) > stats_getter ;
2018-06-20 17:49:52 +00:00
2020-05-01 18:47:41 +00:00
enum class MetricsProvider
2018-06-20 17:49:52 +00:00
{
2020-05-01 18:47:41 +00:00
None ,
Procfs ,
Netlink
} ;
2018-06-20 17:49:52 +00:00
2020-05-01 18:47:41 +00:00
private :
explicit TasksStatsCounters ( const UInt64 tid , const MetricsProvider provider ) ;
static MetricsProvider findBestAvailableProvider ( ) ;
static void incrementProfileEvents ( const : : taskstats & prev , const : : taskstats & curr , ProfileEvents : : Counters & profile_events ) ;
2018-06-20 17:49:52 +00:00
} ;
2018-08-22 05:56:06 +00:00
# else
2020-05-01 18:47:41 +00:00
class TasksStatsCounters
2018-08-22 05:56:06 +00:00
{
2020-05-01 18:47:41 +00:00
public :
static bool checkIfAvailable ( ) { return false ; }
static std : : unique_ptr < TasksStatsCounters > create ( const UInt64 /*tid*/ ) { return { } ; }
void reset ( ) { }
void updateCounters ( ProfileEvents : : Counters & ) { }
2018-08-22 05:59:27 +00:00
2020-05-01 18:47:41 +00:00
private :
TasksStatsCounters ( const UInt64 /*tid*/ ) { }
2018-08-22 05:56:06 +00:00
} ;
# endif
2018-06-20 17:49:52 +00:00
}