ClickHouse/src/Common/Stopwatch.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

179 lines
6.4 KiB
C++
Raw Normal View History

2015-10-05 00:44:40 +00:00
#pragma once
2021-10-02 07:13:14 +00:00
#include <base/time.h>
#include <base/types.h>
2021-10-06 15:09:13 +00:00
#include <base/defines.h>
2020-02-17 14:27:09 +00:00
2021-10-06 15:09:13 +00:00
#include <cassert>
2020-02-17 14:27:09 +00:00
#include <atomic>
#include <memory>
2015-10-05 00:44:40 +00:00
/// From clock_getres(2):
///
/// Similar to CLOCK_MONOTONIC, but provides access to a raw hardware-based
/// time that is not subject to NTP adjustments or the incremental
/// adjustments performed by adjtime(3).
#ifdef CLOCK_MONOTONIC_RAW
static constexpr clockid_t STOPWATCH_DEFAULT_CLOCK = CLOCK_MONOTONIC_RAW;
#else
static constexpr clockid_t STOPWATCH_DEFAULT_CLOCK = CLOCK_MONOTONIC;
#endif
inline UInt64 clock_gettime_ns(clockid_t clock_type = STOPWATCH_DEFAULT_CLOCK)
2018-02-25 02:43:27 +00:00
{
struct timespec ts;
clock_gettime(clock_type, &ts);
return UInt64(ts.tv_sec * 1000000000LL + ts.tv_nsec);
2018-02-25 02:43:27 +00:00
}
2021-10-06 15:09:13 +00:00
/// Takes previously returned value and returns it again if time stepped back for some reason.
///
/// You should use this if OS does not support CLOCK_MONOTONIC_RAW
inline UInt64 clock_gettime_ns_adjusted(UInt64 prev_time, clockid_t clock_type = STOPWATCH_DEFAULT_CLOCK)
2021-10-06 15:09:13 +00:00
{
#ifdef CLOCK_MONOTONIC_RAW
if (likely(clock_type == CLOCK_MONOTONIC_RAW))
return clock_gettime_ns(clock_type);
#endif
2021-10-06 15:09:13 +00:00
UInt64 current_time = clock_gettime_ns(clock_type);
if (likely(prev_time <= current_time))
return current_time;
/// Something probably went completely wrong if time stepped back for more than 1 second.
assert(prev_time - current_time <= 1000000000ULL);
return prev_time;
}
2017-02-07 19:21:59 +00:00
/** Differs from Poco::Stopwatch only by using 'clock_gettime' instead of 'gettimeofday',
2022-09-02 08:54:48 +00:00
* returns nanoseconds instead of microseconds, and also by other minor differences.
2015-10-05 00:44:40 +00:00
*/
class Stopwatch
{
public:
/** CLOCK_MONOTONIC/CLOCK_MONOTONIC_RAW works relatively efficient (~40-50 million calls/sec) and doesn't lead to syscall.
2017-02-07 19:21:59 +00:00
* Pass CLOCK_MONOTONIC_COARSE, if you need better performance with acceptable cost of several milliseconds of inaccuracy.
2015-10-05 00:44:40 +00:00
*/
explicit Stopwatch(clockid_t clock_type_ = STOPWATCH_DEFAULT_CLOCK) : clock_type(clock_type_) { start(); }
explicit Stopwatch(clockid_t clock_type_, UInt64 start_nanoseconds, bool is_running_)
: start_ns(start_nanoseconds), clock_type(clock_type_), is_running(is_running_)
{
}
2015-10-05 00:44:40 +00:00
2018-11-26 00:56:50 +00:00
void start() { start_ns = nanoseconds(); is_running = true; }
void stop() { stop_ns = nanoseconds(); is_running = false; }
void reset() { start_ns = 0; stop_ns = 0; is_running = false; }
void restart() { start(); }
UInt64 elapsed() const { return elapsedNanoseconds(); }
UInt64 elapsedNanoseconds() const { return is_running ? nanoseconds() - start_ns : stop_ns - start_ns; }
UInt64 elapsedMicroseconds() const { return elapsedNanoseconds() / 1000U; }
UInt64 elapsedMilliseconds() const { return elapsedNanoseconds() / 1000000UL; }
double elapsedSeconds() const { return static_cast<double>(elapsedNanoseconds()) / 1000000000ULL; }
2015-10-05 00:44:40 +00:00
2023-02-07 17:50:31 +00:00
UInt64 getStart() const { return start_ns; }
UInt64 getEnd() const { return stop_ns; }
2015-10-05 00:44:40 +00:00
private:
UInt64 start_ns = 0;
UInt64 stop_ns = 0;
2015-10-05 00:44:40 +00:00
clockid_t clock_type;
bool is_running = false;
2021-10-06 15:09:13 +00:00
UInt64 nanoseconds() const { return clock_gettime_ns_adjusted(start_ns, clock_type); }
2015-10-05 00:44:40 +00:00
};
using StopwatchUniquePtr = std::unique_ptr<Stopwatch>;
2015-10-05 00:44:40 +00:00
2022-11-14 05:57:53 +00:00
/// Allows to obtain the elapsed time concurrently with restarting the stopwatch.
2022-11-14 05:54:58 +00:00
/// Allows to atomically compare the elapsed time with a threshold and restart the watch if the elapsed time is not less.
2018-02-25 02:43:27 +00:00
class AtomicStopwatch
2015-10-05 00:44:40 +00:00
{
public:
explicit AtomicStopwatch(clockid_t clock_type_ = STOPWATCH_DEFAULT_CLOCK) : clock_type(clock_type_) { restart(); }
2018-02-25 02:43:27 +00:00
2021-10-06 15:09:13 +00:00
void restart() { start_ns = nanoseconds(0); }
UInt64 elapsed() const
{
UInt64 current_start_ns = start_ns;
return nanoseconds(current_start_ns) - current_start_ns;
}
2018-11-26 00:56:50 +00:00
UInt64 elapsedMilliseconds() const { return elapsed() / 1000000UL; }
double elapsedSeconds() const { return static_cast<double>(elapsed()) / 1000000000ULL; }
2018-02-25 02:43:27 +00:00
2018-02-25 02:45:36 +00:00
/** If specified amount of time has passed, then restarts timer and returns true.
* Otherwise returns false.
* This is done atomically.
2015-10-05 00:44:40 +00:00
*/
2018-02-25 02:43:27 +00:00
bool compareAndRestart(double seconds)
2015-10-05 00:44:40 +00:00
{
UInt64 threshold = static_cast<UInt64>(seconds * 1000000000.0);
2018-02-25 02:43:27 +00:00
UInt64 current_start_ns = start_ns;
2021-10-06 15:09:13 +00:00
UInt64 current_ns = nanoseconds(current_start_ns);
2018-02-25 02:43:27 +00:00
while (true)
2015-10-05 00:44:40 +00:00
{
2018-02-25 02:43:27 +00:00
if (current_ns < current_start_ns + threshold)
return false;
if (start_ns.compare_exchange_weak(current_start_ns, current_ns))
return true;
2015-10-05 00:44:40 +00:00
}
}
struct Lock
{
2018-02-25 02:43:27 +00:00
AtomicStopwatch * parent = nullptr;
Lock() = default;
explicit operator bool() const { return parent != nullptr; }
explicit Lock(AtomicStopwatch * parent_) : parent(parent_) {}
Lock(Lock &&) = default;
~Lock()
{
if (parent)
parent->restart();
}
};
/** If specified amount of time has passed and timer is not locked right now, then returns Lock object,
* which locks timer and, on destruction, restarts timer and releases the lock.
* Otherwise returns object, that is implicitly casting to false.
* This is done atomically.
*
* Usage:
2018-02-25 02:43:27 +00:00
* if (auto lock = timer.compareAndRestartDeferred(1))
* /// do some work, that must be done in one thread and not more frequently than each second.
*/
2018-02-25 02:43:27 +00:00
Lock compareAndRestartDeferred(double seconds)
{
UInt64 threshold = UInt64(seconds * 1000000000.0);
2018-02-25 02:43:27 +00:00
UInt64 current_start_ns = start_ns;
2021-10-06 15:09:13 +00:00
UInt64 current_ns = nanoseconds(current_start_ns);
2018-02-25 02:43:27 +00:00
while (true)
{
if ((current_start_ns & 0x8000000000000000ULL))
return {};
2018-02-25 02:43:27 +00:00
if (current_ns < current_start_ns + threshold)
return {};
2018-02-25 02:43:27 +00:00
if (start_ns.compare_exchange_weak(current_start_ns, current_ns | 0x8000000000000000ULL))
return Lock(this);
}
}
2015-10-05 00:44:40 +00:00
private:
2018-02-25 02:43:27 +00:00
std::atomic<UInt64> start_ns;
std::atomic<bool> lock {false};
clockid_t clock_type;
/// Most significant bit is a lock. When it is set, compareAndRestartDeferred method will return false.
2021-10-06 15:09:13 +00:00
UInt64 nanoseconds(UInt64 prev_time) const { return clock_gettime_ns_adjusted(prev_time, clock_type) & 0x7FFFFFFFFFFFFFFFULL; }
2015-10-05 00:44:40 +00:00
};