2015-10-05 00:44:40 +00:00
|
|
|
#pragma once
|
|
|
|
|
2020-02-17 14:27:09 +00:00
|
|
|
#include <common/time.h>
|
2020-03-19 10:38:34 +00:00
|
|
|
#include <common/types.h>
|
2020-02-17 14:27:09 +00:00
|
|
|
|
|
|
|
#include <atomic>
|
2015-10-05 00:44:40 +00:00
|
|
|
|
2018-02-25 02:43:27 +00:00
|
|
|
|
2020-03-17 02:15:05 +00:00
|
|
|
inline UInt64 clock_gettime_ns(clockid_t clock_type = CLOCK_MONOTONIC)
|
2018-02-25 02:43:27 +00:00
|
|
|
{
|
2020-03-17 02:15:05 +00:00
|
|
|
struct timespec ts;
|
|
|
|
clock_gettime(clock_type, &ts);
|
|
|
|
return UInt64(ts.tv_sec * 1000000000LL + ts.tv_nsec);
|
2018-02-25 02:43:27 +00:00
|
|
|
}
|
|
|
|
|
2017-02-07 19:21:59 +00:00
|
|
|
/** Differs from Poco::Stopwatch only by using 'clock_gettime' instead of 'gettimeofday',
|
|
|
|
* returns nanoseconds instead of microseconds, and also by other minor differencies.
|
2015-10-05 00:44:40 +00:00
|
|
|
*/
|
|
|
|
class Stopwatch
|
|
|
|
{
|
|
|
|
public:
|
2017-04-01 07:20:54 +00:00
|
|
|
/** CLOCK_MONOTONIC works relatively efficient (~15 million calls/sec) and doesn't lead to syscall.
|
|
|
|
* Pass CLOCK_MONOTONIC_COARSE, if you need better performance with acceptable cost of several milliseconds of inaccuracy.
|
|
|
|
*/
|
2018-02-25 01:30:03 +00:00
|
|
|
Stopwatch(clockid_t clock_type_ = CLOCK_MONOTONIC) : clock_type(clock_type_) { start(); }
|
2015-10-05 00:44:40 +00:00
|
|
|
|
2018-11-26 00:56:50 +00:00
|
|
|
void start() { start_ns = nanoseconds(); is_running = true; }
|
|
|
|
void stop() { stop_ns = nanoseconds(); is_running = false; }
|
|
|
|
void reset() { start_ns = 0; stop_ns = 0; is_running = false; }
|
|
|
|
void restart() { start(); }
|
|
|
|
UInt64 elapsed() const { return elapsedNanoseconds(); }
|
|
|
|
UInt64 elapsedNanoseconds() const { return is_running ? nanoseconds() - start_ns : stop_ns - start_ns; }
|
|
|
|
UInt64 elapsedMicroseconds() const { return elapsedNanoseconds() / 1000U; }
|
|
|
|
UInt64 elapsedMilliseconds() const { return elapsedNanoseconds() / 1000000UL; }
|
|
|
|
double elapsedSeconds() const { return static_cast<double>(elapsedNanoseconds()) / 1000000000ULL; }
|
2015-10-05 00:44:40 +00:00
|
|
|
|
|
|
|
private:
|
2018-02-26 19:38:06 +00:00
|
|
|
UInt64 start_ns = 0;
|
|
|
|
UInt64 stop_ns = 0;
|
2017-04-01 07:20:54 +00:00
|
|
|
clockid_t clock_type;
|
2018-02-26 19:38:06 +00:00
|
|
|
bool is_running = false;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2020-03-17 02:15:05 +00:00
|
|
|
UInt64 nanoseconds() const { return clock_gettime_ns(clock_type); }
|
2015-10-05 00:44:40 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2018-02-25 02:43:27 +00:00
|
|
|
class AtomicStopwatch
|
2015-10-05 00:44:40 +00:00
|
|
|
{
|
|
|
|
public:
|
2018-02-25 02:43:27 +00:00
|
|
|
AtomicStopwatch(clockid_t clock_type_ = CLOCK_MONOTONIC) : clock_type(clock_type_) { restart(); }
|
|
|
|
|
2018-11-26 00:56:50 +00:00
|
|
|
void restart() { start_ns = nanoseconds(); }
|
|
|
|
UInt64 elapsed() const { return nanoseconds() - start_ns; }
|
|
|
|
UInt64 elapsedMilliseconds() const { return elapsed() / 1000000UL; }
|
|
|
|
double elapsedSeconds() const { return static_cast<double>(elapsed()) / 1000000000ULL; }
|
2018-02-25 02:43:27 +00:00
|
|
|
|
2018-02-25 02:45:36 +00:00
|
|
|
/** If specified amount of time has passed, then restarts timer and returns true.
|
2017-04-01 07:20:54 +00:00
|
|
|
* Otherwise returns false.
|
|
|
|
* This is done atomically.
|
|
|
|
*/
|
2018-02-25 02:43:27 +00:00
|
|
|
bool compareAndRestart(double seconds)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2018-06-03 16:51:31 +00:00
|
|
|
UInt64 threshold = static_cast<UInt64>(seconds * 1000000000.0);
|
2018-02-25 02:43:27 +00:00
|
|
|
UInt64 current_ns = nanoseconds();
|
|
|
|
UInt64 current_start_ns = start_ns;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-02-25 02:43:27 +00:00
|
|
|
while (true)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2018-02-25 02:43:27 +00:00
|
|
|
if (current_ns < current_start_ns + threshold)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (start_ns.compare_exchange_weak(current_start_ns, current_ns))
|
|
|
|
return true;
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct Lock
|
|
|
|
{
|
2018-02-25 02:43:27 +00:00
|
|
|
AtomicStopwatch * parent = nullptr;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
Lock() {}
|
|
|
|
|
|
|
|
operator bool() const { return parent != nullptr; }
|
|
|
|
|
2019-08-03 11:02:40 +00:00
|
|
|
Lock(AtomicStopwatch * parent_) : parent(parent_) {}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
Lock(Lock &&) = default;
|
|
|
|
|
|
|
|
~Lock()
|
|
|
|
{
|
|
|
|
if (parent)
|
|
|
|
parent->restart();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/** If specified amount of time has passed and timer is not locked right now, then returns Lock object,
|
|
|
|
* which locks timer and, on destruction, restarts timer and releases the lock.
|
|
|
|
* Otherwise returns object, that is implicitly casting to false.
|
|
|
|
* This is done atomically.
|
|
|
|
*
|
|
|
|
* Usage:
|
2018-02-25 02:43:27 +00:00
|
|
|
* if (auto lock = timer.compareAndRestartDeferred(1))
|
2017-04-01 07:20:54 +00:00
|
|
|
* /// do some work, that must be done in one thread and not more frequently than each second.
|
|
|
|
*/
|
2018-02-25 02:43:27 +00:00
|
|
|
Lock compareAndRestartDeferred(double seconds)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2018-06-03 16:51:31 +00:00
|
|
|
UInt64 threshold = UInt64(seconds * 1000000000.0);
|
2018-02-25 02:43:27 +00:00
|
|
|
UInt64 current_ns = nanoseconds();
|
|
|
|
UInt64 current_start_ns = start_ns;
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
if ((current_start_ns & 0x8000000000000000ULL))
|
|
|
|
return {};
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-02-25 02:43:27 +00:00
|
|
|
if (current_ns < current_start_ns + threshold)
|
|
|
|
return {};
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-02-25 02:43:27 +00:00
|
|
|
if (start_ns.compare_exchange_weak(current_start_ns, current_ns | 0x8000000000000000ULL))
|
|
|
|
return Lock(this);
|
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
2016-11-18 02:34:34 +00:00
|
|
|
|
2015-10-05 00:44:40 +00:00
|
|
|
private:
|
2018-02-25 02:43:27 +00:00
|
|
|
std::atomic<UInt64> start_ns;
|
|
|
|
std::atomic<bool> lock {false};
|
|
|
|
clockid_t clock_type;
|
|
|
|
|
|
|
|
/// Most significant bit is a lock. When it is set, compareAndRestartDeferred method will return false.
|
2020-03-17 02:15:05 +00:00
|
|
|
UInt64 nanoseconds() const { return clock_gettime_ns(clock_type) & 0x7FFFFFFFFFFFFFFFULL; }
|
2015-10-05 00:44:40 +00:00
|
|
|
};
|
2018-05-28 19:53:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
/// Like ordinary StopWatch, but uses getrusage() system call
|
2018-08-19 04:25:53 +00:00
|
|
|
struct StopwatchRUsage
|
2018-05-28 19:53:03 +00:00
|
|
|
{
|
2018-08-19 04:25:53 +00:00
|
|
|
StopwatchRUsage() = default;
|
2018-05-28 19:53:03 +00:00
|
|
|
|
2018-11-26 00:56:50 +00:00
|
|
|
void start() { start_ts = Timestamp::current(); is_running = true; }
|
|
|
|
void stop() { stop_ts = Timestamp::current(); is_running = false; }
|
|
|
|
void reset() { start_ts = Timestamp(); stop_ts = Timestamp(); is_running = false; }
|
|
|
|
void restart() { start(); }
|
2018-05-28 19:53:03 +00:00
|
|
|
|
|
|
|
UInt64 elapsed(bool count_user = true, bool count_sys = true) const
|
|
|
|
{
|
|
|
|
return elapsedNanoseconds(count_user, count_sys);
|
|
|
|
}
|
|
|
|
|
|
|
|
UInt64 elapsedNanoseconds(bool count_user = true, bool count_sys = true) const
|
|
|
|
{
|
|
|
|
return (is_running ? Timestamp::current() : stop_ts).nanoseconds(count_user, count_sys) - start_ts.nanoseconds(count_user, count_sys);
|
|
|
|
}
|
|
|
|
|
|
|
|
UInt64 elapsedMicroseconds(bool count_user = true, bool count_sys = true) const
|
|
|
|
{
|
|
|
|
return elapsedNanoseconds(count_user, count_sys) / 1000UL;
|
|
|
|
}
|
|
|
|
|
|
|
|
UInt64 elapsedMilliseconds(bool count_user = true, bool count_sys = true) const
|
|
|
|
{
|
|
|
|
return elapsedNanoseconds(count_user, count_sys) / 1000000UL;
|
|
|
|
}
|
|
|
|
|
|
|
|
double elapsedSeconds(bool count_user = true, bool count_sys = true) const
|
|
|
|
{
|
|
|
|
return static_cast<double>(elapsedNanoseconds(count_user, count_sys)) / 1000000000.0;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
|
|
|
struct Timestamp
|
|
|
|
{
|
|
|
|
UInt64 user_ns = 0;
|
|
|
|
UInt64 sys_ns = 0;
|
|
|
|
|
|
|
|
static Timestamp current();
|
|
|
|
|
|
|
|
UInt64 nanoseconds(bool count_user = true, bool count_sys = true) const
|
|
|
|
{
|
|
|
|
return (count_user ? user_ns : 0) + (count_sys ? sys_ns : 0);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
Timestamp start_ts;
|
|
|
|
Timestamp stop_ts;
|
|
|
|
bool is_running = false;
|
|
|
|
};
|
2019-06-17 16:41:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
template <typename TStopwatch>
|
|
|
|
class StopwatchGuard : public TStopwatch
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit StopwatchGuard(UInt64 & elapsed_ns_) : elapsed_ns(elapsed_ns_) {}
|
|
|
|
|
|
|
|
~StopwatchGuard() { elapsed_ns += TStopwatch::elapsedNanoseconds(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
UInt64 & elapsed_ns;
|
|
|
|
};
|