ClickHouse/dbms/src/Common/ThreadFuzzer.cpp

259 lines
8.3 KiB
C++
Raw Normal View History

2020-03-01 21:56:17 +00:00
#include <signal.h>
#include <time.h>
#include <sys/time.h>
2020-03-15 09:19:26 +00:00
#if defined(OS_LINUX)
2020-03-02 02:23:58 +00:00
#include <sys/sysinfo.h>
#endif
2020-03-01 21:56:17 +00:00
#include <sched.h>
#include <random>
#include <common/sleep.h>
#include <common/getThreadId.h>
#include <IO/ReadHelpers.h>
#include <Common/Exception.h>
#include <Common/thread_local_rng.h>
#include <Common/ThreadFuzzer.h>
2020-03-12 19:08:25 +00:00
/// We will also wrap some thread synchronization functions to inject sleep/migration before or after.
2020-03-15 09:19:26 +00:00
#if defined(OS_LINUX)
2020-03-12 19:08:25 +00:00
#define FOR_EACH_WRAPPED_FUNCTION(M) \
M(int, pthread_mutex_lock, pthread_mutex_t * arg) \
2020-03-12 21:49:51 +00:00
M(int, pthread_mutex_unlock, pthread_mutex_t * arg)
#endif
2020-03-12 19:08:25 +00:00
2020-03-01 21:56:17 +00:00
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_MANIPULATE_SIGSET;
extern const int CANNOT_SET_SIGNAL_HANDLER;
extern const int CANNOT_CREATE_TIMER;
}
ThreadFuzzer::ThreadFuzzer()
{
initConfiguration();
if (!isEffective())
return;
setup();
}
template <typename T>
static void initFromEnv(T & what, const char * name)
{
const char * env = getenv(name);
if (!env)
return;
what = parse<T>(env);
}
2020-03-12 19:08:25 +00:00
template <typename T>
static void initFromEnv(std::atomic<T> & what, const char * name)
{
const char * env = getenv(name);
if (!env)
return;
what.store(parse<T>(env), std::memory_order_relaxed);
}
static std::atomic<int> num_cpus = 0;
2020-03-15 09:19:26 +00:00
#if defined(OS_LINUX)
2020-03-12 19:08:25 +00:00
#define DEFINE_WRAPPER_PARAMS(RET, NAME, ...) \
static std::atomic<double> NAME ## _before_yield_probability = 0; \
static std::atomic<double> NAME ## _before_migrate_probability = 0; \
static std::atomic<double> NAME ## _before_sleep_probability = 0; \
static std::atomic<double> NAME ## _before_sleep_time_us = 0; \
\
static std::atomic<double> NAME ## _after_yield_probability = 0; \
static std::atomic<double> NAME ## _after_migrate_probability = 0; \
static std::atomic<double> NAME ## _after_sleep_probability = 0; \
static std::atomic<double> NAME ## _after_sleep_time_us = 0; \
FOR_EACH_WRAPPED_FUNCTION(DEFINE_WRAPPER_PARAMS)
#undef DEFINE_WRAPPER_PARAMS
2020-03-15 09:19:26 +00:00
#endif
2020-03-12 19:08:25 +00:00
2020-03-01 21:56:17 +00:00
void ThreadFuzzer::initConfiguration()
{
2020-03-15 09:19:26 +00:00
#if defined(OS_LINUX)
2020-03-12 19:08:25 +00:00
num_cpus.store(get_nprocs(), std::memory_order_relaxed);
2020-03-02 14:07:07 +00:00
#else
(void)num_cpus;
2020-03-02 02:23:58 +00:00
#endif
2020-03-01 21:56:17 +00:00
initFromEnv(cpu_time_period_us, "THREAD_FUZZER_CPU_TIME_PERIOD_US");
initFromEnv(yield_probability, "THREAD_FUZZER_YIELD_PROBABILITY");
initFromEnv(migrate_probability, "THREAD_FUZZER_MIGRATE_PROBABILITY");
initFromEnv(sleep_probability, "THREAD_FUZZER_SLEEP_PROBABILITY");
2020-03-12 19:08:25 +00:00
initFromEnv(sleep_time_us, "THREAD_FUZZER_SLEEP_TIME_US");
2020-03-15 09:19:26 +00:00
#if defined(OS_LINUX)
2020-03-12 19:08:25 +00:00
#define INIT_WRAPPER_PARAMS(RET, NAME, ...) \
initFromEnv(NAME ## _before_yield_probability, "THREAD_FUZZER_" #NAME "_BEFORE_YIELD_PROBABILITY"); \
initFromEnv(NAME ## _before_migrate_probability, "THREAD_FUZZER_" #NAME "_BEFORE_MIGRATE_PROBABILITY"); \
initFromEnv(NAME ## _before_sleep_probability, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_PROBABILITY"); \
initFromEnv(NAME ## _before_sleep_time_us, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_TIME_US"); \
\
initFromEnv(NAME ## _after_yield_probability, "THREAD_FUZZER_" #NAME "_AFTER_YIELD_PROBABILITY"); \
initFromEnv(NAME ## _after_migrate_probability, "THREAD_FUZZER_" #NAME "_AFTER_MIGRATE_PROBABILITY"); \
initFromEnv(NAME ## _after_sleep_probability, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_PROBABILITY"); \
initFromEnv(NAME ## _after_sleep_time_us, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_TIME_US"); \
FOR_EACH_WRAPPED_FUNCTION(INIT_WRAPPER_PARAMS)
#undef INIT_WRAPPER_PARAMS
2020-03-15 09:19:26 +00:00
#endif
2020-03-01 21:56:17 +00:00
}
2020-03-12 19:08:25 +00:00
bool ThreadFuzzer::isEffective() const
2020-03-01 21:56:17 +00:00
{
2020-03-15 09:19:26 +00:00
#if defined(OS_LINUX)
2020-03-12 19:08:25 +00:00
#define CHECK_WRAPPER_PARAMS(RET, NAME, ...) \
if (NAME ## _before_yield_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _before_migrate_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _before_sleep_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _before_sleep_time_us.load(std::memory_order_relaxed)) return true; \
\
if (NAME ## _after_yield_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _after_migrate_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _after_sleep_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _after_sleep_time_us.load(std::memory_order_relaxed)) return true; \
FOR_EACH_WRAPPED_FUNCTION(CHECK_WRAPPER_PARAMS)
#undef INIT_WRAPPER_PARAMS
2020-03-15 09:19:26 +00:00
#endif
2020-03-12 19:08:25 +00:00
return cpu_time_period_us != 0
&& (yield_probability > 0
|| migrate_probability > 0
|| (sleep_probability > 0 && sleep_time_us > 0));
}
2020-03-01 21:56:17 +00:00
2020-03-12 19:08:25 +00:00
static void injection(
double yield_probability,
double migrate_probability,
double sleep_probability,
double sleep_time_us [[maybe_unused]])
{
if (yield_probability > 0
&& std::bernoulli_distribution(yield_probability)(thread_local_rng))
2020-03-01 21:56:17 +00:00
{
sched_yield();
}
2020-03-15 09:19:26 +00:00
#if defined(OS_LINUX)
2020-03-12 19:08:25 +00:00
int num_cpus_loaded = num_cpus.load(std::memory_order_relaxed);
if (num_cpus_loaded > 0
&& migrate_probability > 0
&& std::bernoulli_distribution(migrate_probability)(thread_local_rng))
2020-03-01 21:56:17 +00:00
{
2020-03-12 19:08:25 +00:00
int migrate_to = std::uniform_int_distribution<>(0, num_cpus_loaded - 1)(thread_local_rng);
2020-03-01 21:56:17 +00:00
cpu_set_t set;
CPU_ZERO(&set);
CPU_SET(migrate_to, &set);
(void)sched_setaffinity(0, sizeof(set), &set);
}
2020-03-15 09:19:26 +00:00
#else
UNUSED(migrate_probability);
2020-03-02 14:07:07 +00:00
#endif
2020-03-01 21:56:17 +00:00
2020-03-12 19:08:25 +00:00
if (sleep_probability > 0
&& sleep_time_us > 0
&& std::bernoulli_distribution(sleep_probability)(thread_local_rng))
2020-03-01 21:56:17 +00:00
{
2020-03-12 19:08:25 +00:00
sleepForNanoseconds(sleep_time_us * 1000);
2020-03-01 21:56:17 +00:00
}
2020-03-12 19:08:25 +00:00
}
void ThreadFuzzer::signalHandler(int)
{
auto saved_errno = errno;
auto & fuzzer = ThreadFuzzer::instance();
injection(fuzzer.yield_probability, fuzzer.migrate_probability, fuzzer.sleep_probability, fuzzer.sleep_time_us);
2020-03-01 21:56:17 +00:00
errno = saved_errno;
}
void ThreadFuzzer::setup()
{
struct sigaction sa{};
sa.sa_handler = signalHandler;
sa.sa_flags = SA_RESTART;
2020-03-15 09:19:26 +00:00
#if defined(OS_LINUX)
2020-03-01 21:56:17 +00:00
if (sigemptyset(&sa.sa_mask))
throwFromErrno("Failed to clean signal mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
if (sigaddset(&sa.sa_mask, SIGPROF))
throwFromErrno("Failed to add signal to mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
2020-03-15 09:19:26 +00:00
#else
// the two following functions always return 0 under mac
sigemptyset(&sa.sa_mask);
sigaddset(&sa.sa_mask, SIGPROF);
#endif
2020-03-01 21:56:17 +00:00
if (sigaction(SIGPROF, &sa, nullptr))
throwFromErrno("Failed to setup signal handler for thread fuzzer", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
static constexpr UInt32 TIMER_PRECISION = 1000000;
2020-03-02 14:09:56 +00:00
struct timeval interval;
interval.tv_sec = cpu_time_period_us / TIMER_PRECISION;
interval.tv_usec = cpu_time_period_us % TIMER_PRECISION;
2020-03-01 21:56:17 +00:00
struct itimerval timer = {.it_interval = interval, .it_value = interval};
if (0 != setitimer(ITIMER_PROF, &timer, nullptr))
throwFromErrno("Failed to create profiling timer", ErrorCodes::CANNOT_CREATE_TIMER);
}
2020-03-12 19:08:25 +00:00
/// We expect that for every function like pthread_mutex_lock there is the same function with two underscores prefix.
/// NOTE We cannot use dlsym(... RTLD_NEXT), because it will call pthread_mutex_lock and it will lead to infinite recursion.
2020-03-15 09:19:26 +00:00
#if defined(OS_LINUX)
2020-03-12 19:08:25 +00:00
#define MAKE_WRAPPER(RET, NAME, ...) \
extern "C" RET __ ## NAME(__VA_ARGS__); /* NOLINT */ \
extern "C" RET NAME(__VA_ARGS__) /* NOLINT */ \
{ \
injection( \
NAME ## _before_yield_probability.load(std::memory_order_relaxed), \
NAME ## _before_migrate_probability.load(std::memory_order_relaxed), \
NAME ## _before_sleep_probability.load(std::memory_order_relaxed), \
NAME ## _before_sleep_time_us.load(std::memory_order_relaxed)); \
\
auto && ret{__ ## NAME(arg)}; \
\
injection( \
NAME ## _after_yield_probability.load(std::memory_order_relaxed), \
NAME ## _after_migrate_probability.load(std::memory_order_relaxed), \
NAME ## _after_sleep_probability.load(std::memory_order_relaxed), \
NAME ## _after_sleep_time_us.load(std::memory_order_relaxed)); \
\
return ret; \
} \
2020-03-15 09:19:26 +00:00
FOR_EACH_WRAPPED_FUNCTION(MAKE_WRAPPER)
2020-03-12 19:08:25 +00:00
#undef MAKE_WRAPPER
2020-03-15 09:19:26 +00:00
#endif
2020-03-12 19:08:25 +00:00
2020-03-01 21:56:17 +00:00
}