2014-03-10 09:33:18 +00:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <signal.h>
|
2016-01-12 02:55:39 +00:00
|
|
|
#include <time.h>
|
2014-03-10 09:33:18 +00:00
|
|
|
|
|
|
|
#include <iostream>
|
|
|
|
#include <fstream>
|
|
|
|
#include <iomanip>
|
2016-01-12 02:55:39 +00:00
|
|
|
#include <random>
|
2014-03-10 09:33:18 +00:00
|
|
|
|
|
|
|
#include <Poco/File.h>
|
|
|
|
#include <Poco/Util/Application.h>
|
|
|
|
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Common/Stopwatch.h>
|
|
|
|
#include <Common/ThreadPool.h>
|
|
|
|
#include <AggregateFunctions/ReservoirSampler.h>
|
2014-03-10 09:33:18 +00:00
|
|
|
|
|
|
|
#include <boost/program_options.hpp>
|
|
|
|
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Common/ConcurrentBoundedQueue.h>
|
2014-03-10 09:33:18 +00:00
|
|
|
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Common/Exception.h>
|
|
|
|
#include <Common/randomSeed.h>
|
|
|
|
#include <Core/Types.h>
|
2014-03-10 09:33:18 +00:00
|
|
|
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <IO/ReadBufferFromFileDescriptor.h>
|
|
|
|
#include <IO/WriteBufferFromFileDescriptor.h>
|
|
|
|
#include <IO/WriteBufferFromFile.h>
|
|
|
|
#include <IO/ReadHelpers.h>
|
|
|
|
#include <IO/WriteHelpers.h>
|
|
|
|
#include <IO/Operators.h>
|
2014-03-10 09:33:18 +00:00
|
|
|
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <DataStreams/RemoteBlockInputStream.h>
|
2014-03-10 09:33:18 +00:00
|
|
|
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Interpreters/Context.h>
|
2014-03-10 09:33:18 +00:00
|
|
|
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Client/Connection.h>
|
2014-03-10 09:33:18 +00:00
|
|
|
|
|
|
|
#include "InterruptListener.h"
|
|
|
|
|
|
|
|
|
2016-09-28 16:49:59 +00:00
|
|
|
/** A tool for evaluating ClickHouse performance.
|
|
|
|
* The tool emulates a case with fixed amount of simultaneously executing queries.
|
2014-03-10 09:33:18 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2016-01-12 02:55:39 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
extern const int POCO_EXCEPTION;
|
|
|
|
extern const int STD_EXCEPTION;
|
|
|
|
extern const int UNKNOWN_EXCEPTION;
|
2017-04-08 01:32:05 +00:00
|
|
|
extern const int BAD_ARGUMENTS;
|
2016-01-12 02:55:39 +00:00
|
|
|
}
|
|
|
|
|
2014-03-10 09:33:18 +00:00
|
|
|
class Benchmark
|
|
|
|
{
|
|
|
|
public:
|
2017-04-01 07:20:54 +00:00
|
|
|
Benchmark(unsigned concurrency_, double delay_,
|
|
|
|
const String & host_, UInt16 port_, const String & default_database_,
|
|
|
|
const String & user_, const String & password_, const String & stage,
|
|
|
|
bool randomize_, size_t max_iterations_, double max_time_,
|
|
|
|
const String & json_path_, const Settings & settings_)
|
|
|
|
:
|
|
|
|
concurrency(concurrency_), delay(delay_), queue(concurrency),
|
|
|
|
connections(concurrency, host_, port_, default_database_, user_, password_),
|
|
|
|
randomize(randomize_), max_iterations(max_iterations_), max_time(max_time_),
|
|
|
|
json_path(json_path_), settings(settings_), pool(concurrency)
|
|
|
|
{
|
|
|
|
std::cerr << std::fixed << std::setprecision(3);
|
|
|
|
|
|
|
|
if (stage == "complete")
|
|
|
|
query_processing_stage = QueryProcessingStage::Complete;
|
|
|
|
else if (stage == "fetch_columns")
|
|
|
|
query_processing_stage = QueryProcessingStage::FetchColumns;
|
|
|
|
else if (stage == "with_mergeable_state")
|
|
|
|
query_processing_stage = QueryProcessingStage::WithMergeableState;
|
|
|
|
else
|
|
|
|
throw Exception("Unknown query processing stage: " + stage, ErrorCodes::BAD_ARGUMENTS);
|
|
|
|
|
|
|
|
if (!json_path.empty() && Poco::File(json_path).exists()) /// Clear file with previous results
|
|
|
|
{
|
|
|
|
Poco::File(json_path).remove();
|
|
|
|
}
|
|
|
|
|
|
|
|
readQueries();
|
|
|
|
run();
|
|
|
|
}
|
2014-03-10 09:33:18 +00:00
|
|
|
|
|
|
|
private:
|
2017-04-01 07:20:54 +00:00
|
|
|
using Query = std::string;
|
|
|
|
|
|
|
|
unsigned concurrency;
|
|
|
|
double delay;
|
|
|
|
|
|
|
|
using Queries = std::vector<Query>;
|
|
|
|
Queries queries;
|
|
|
|
|
|
|
|
using Queue = ConcurrentBoundedQueue<Query>;
|
|
|
|
Queue queue;
|
|
|
|
|
|
|
|
ConnectionPool connections;
|
|
|
|
bool randomize;
|
|
|
|
size_t max_iterations;
|
|
|
|
double max_time;
|
|
|
|
String json_path;
|
|
|
|
Settings settings;
|
|
|
|
QueryProcessingStage::Enum query_processing_stage;
|
|
|
|
|
|
|
|
/// Don't execute new queries after timelimit or SIGINT or exception
|
|
|
|
std::atomic<bool> shutdown{false};
|
|
|
|
|
|
|
|
struct Stats
|
|
|
|
{
|
|
|
|
Stopwatch watch;
|
|
|
|
std::atomic<size_t> queries{0};
|
|
|
|
size_t read_rows = 0;
|
|
|
|
size_t read_bytes = 0;
|
|
|
|
size_t result_rows = 0;
|
|
|
|
size_t result_bytes = 0;
|
|
|
|
|
|
|
|
using Sampler = ReservoirSampler<double>;
|
|
|
|
Sampler sampler {1 << 16};
|
|
|
|
|
|
|
|
void add(double seconds, size_t read_rows_inc, size_t read_bytes_inc, size_t result_rows_inc, size_t result_bytes_inc)
|
|
|
|
{
|
|
|
|
++queries;
|
|
|
|
read_rows += read_rows_inc;
|
|
|
|
read_bytes += read_bytes_inc;
|
|
|
|
result_rows += result_rows_inc;
|
|
|
|
result_bytes += result_bytes_inc;
|
|
|
|
sampler.insert(seconds);
|
|
|
|
}
|
|
|
|
|
|
|
|
void clear()
|
|
|
|
{
|
|
|
|
watch.restart();
|
|
|
|
queries = 0;
|
|
|
|
read_rows = 0;
|
|
|
|
read_bytes = 0;
|
|
|
|
result_rows = 0;
|
|
|
|
result_bytes = 0;
|
|
|
|
sampler.clear();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
Stats info_per_interval;
|
|
|
|
Stats info_total;
|
|
|
|
Stopwatch delay_watch;
|
|
|
|
|
|
|
|
std::mutex mutex;
|
|
|
|
|
|
|
|
ThreadPool pool;
|
|
|
|
|
|
|
|
|
|
|
|
void readQueries()
|
|
|
|
{
|
|
|
|
ReadBufferFromFileDescriptor in(STDIN_FILENO);
|
|
|
|
|
|
|
|
while (!in.eof())
|
|
|
|
{
|
|
|
|
std::string query;
|
|
|
|
readText(query, in);
|
|
|
|
assertChar('\n', in);
|
|
|
|
|
|
|
|
if (!query.empty())
|
|
|
|
queries.emplace_back(query);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (queries.empty())
|
|
|
|
throw Exception("Empty list of queries.");
|
|
|
|
|
|
|
|
std::cerr << "Loaded " << queries.size() << " queries.\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void printNumberOfQueriesExecuted(size_t num)
|
|
|
|
{
|
|
|
|
std::cerr << "\nQueries executed: " << num;
|
|
|
|
if (queries.size() > 1)
|
|
|
|
std::cerr << " (" << (num * 100.0 / queries.size()) << "%)";
|
|
|
|
std::cerr << ".\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Try push new query and check cancellation conditions
|
|
|
|
bool tryPushQueryInteractively(const String & query, InterruptListener & interrupt_listener)
|
|
|
|
{
|
|
|
|
bool inserted = false;
|
|
|
|
|
|
|
|
while (!inserted)
|
|
|
|
{
|
|
|
|
inserted = queue.tryPush(query, 100);
|
|
|
|
|
|
|
|
if (shutdown)
|
|
|
|
{
|
|
|
|
/// An exception occurred in a worker
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (max_time > 0 && info_total.watch.elapsedSeconds() >= max_time)
|
|
|
|
{
|
|
|
|
std::cout << "Stopping launch of queries. Requested time limit is exhausted.\n";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (interrupt_listener.check())
|
|
|
|
{
|
|
|
|
std::cout << "Stopping launch of queries. SIGINT recieved.\n";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (delay > 0 && delay_watch.elapsedSeconds() > delay)
|
|
|
|
{
|
|
|
|
printNumberOfQueriesExecuted(info_total.queries);
|
|
|
|
report(info_per_interval);
|
|
|
|
delay_watch.restart();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void run()
|
|
|
|
{
|
|
|
|
std::mt19937 generator(randomSeed());
|
|
|
|
std::uniform_int_distribution<size_t> distribution(0, queries.size() - 1);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < concurrency; ++i)
|
2017-04-19 17:40:55 +00:00
|
|
|
pool.schedule(std::bind(&Benchmark::thread, this, connections.get()));
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
InterruptListener interrupt_listener;
|
|
|
|
info_per_interval.watch.restart();
|
|
|
|
delay_watch.restart();
|
|
|
|
|
|
|
|
/// Push queries into queue
|
|
|
|
for (size_t i = 0; !max_iterations || i < max_iterations; ++i)
|
|
|
|
{
|
|
|
|
size_t query_index = randomize ? distribution(generator) : i % queries.size();
|
|
|
|
|
|
|
|
if (!tryPushQueryInteractively(queries[query_index], interrupt_listener))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
shutdown = true;
|
|
|
|
pool.wait();
|
|
|
|
info_total.watch.stop();
|
|
|
|
|
|
|
|
if (!json_path.empty())
|
|
|
|
reportJSON(info_total, json_path);
|
|
|
|
|
|
|
|
printNumberOfQueriesExecuted(info_total.queries);
|
|
|
|
report(info_total);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void thread(ConnectionPool::Entry connection)
|
|
|
|
{
|
|
|
|
Query query;
|
|
|
|
|
|
|
|
try
|
|
|
|
{
|
|
|
|
/// In these threads we do not accept INT signal.
|
|
|
|
sigset_t sig_set;
|
|
|
|
if (sigemptyset(&sig_set)
|
|
|
|
|| sigaddset(&sig_set, SIGINT)
|
|
|
|
|| pthread_sigmask(SIG_BLOCK, &sig_set, nullptr))
|
|
|
|
throwFromErrno("Cannot block signal.", ErrorCodes::CANNOT_BLOCK_SIGNAL);
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
bool extracted = false;
|
|
|
|
|
|
|
|
while (!extracted)
|
|
|
|
{
|
|
|
|
extracted = queue.tryPop(query, 100);
|
|
|
|
|
|
|
|
if (shutdown)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
execute(connection, query);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
shutdown = true;
|
|
|
|
std::cerr << "An error occurred while processing query:\n" << query << "\n";
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void execute(ConnectionPool::Entry & connection, Query & query)
|
|
|
|
{
|
|
|
|
Stopwatch watch;
|
2017-04-17 16:16:04 +00:00
|
|
|
RemoteBlockInputStream stream(*connection, query, &settings, nullptr, Tables(), query_processing_stage);
|
2014-03-10 09:33:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
Progress progress;
|
|
|
|
stream.setProgressCallback([&progress](const Progress & value) { progress.incrementPiecewiseAtomically(value); });
|
|
|
|
|
|
|
|
stream.readPrefix();
|
|
|
|
while (Block block = stream.read())
|
|
|
|
;
|
|
|
|
stream.readSuffix();
|
2014-03-10 09:33:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
const BlockStreamProfileInfo & info = stream.getProfileInfo();
|
2014-05-06 17:34:22 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
double seconds = watch.elapsedSeconds();
|
2014-03-10 09:33:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
info_per_interval.add(seconds, progress.rows, progress.bytes, info.rows, info.bytes);
|
|
|
|
info_total.add(seconds, progress.rows, progress.bytes, info.rows, info.bytes);
|
|
|
|
}
|
2014-05-06 17:08:51 +00:00
|
|
|
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
void report(Stats & info)
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
2014-03-10 09:33:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
/// Avoid zeros, nans or exceptions
|
|
|
|
if (0 == info.queries)
|
|
|
|
return;
|
2017-02-01 20:30:46 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
double seconds = info.watch.elapsedSeconds();
|
2014-05-06 17:34:22 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
std::cerr
|
|
|
|
<< "\n"
|
|
|
|
<< "QPS: " << (info.queries / seconds) << ", "
|
|
|
|
<< "RPS: " << (info.read_rows / seconds) << ", "
|
|
|
|
<< "MiB/s: " << (info.read_bytes / seconds / 1048576) << ", "
|
|
|
|
<< "result RPS: " << (info.result_rows / seconds) << ", "
|
|
|
|
<< "result MiB/s: " << (info.result_bytes / seconds / 1048576) << "."
|
|
|
|
<< "\n";
|
2014-03-10 09:33:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
auto print_percentile = [&](double percent)
|
|
|
|
{
|
|
|
|
std::cerr << percent << "%\t" << info.sampler.quantileInterpolated(percent / 100.0) << " sec." << std::endl;
|
|
|
|
};
|
2014-04-06 23:18:07 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
for (int percent = 0; percent <= 90; percent += 10)
|
|
|
|
print_percentile(percent);
|
2016-09-30 12:39:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
print_percentile(95);
|
|
|
|
print_percentile(99);
|
|
|
|
print_percentile(99.9);
|
|
|
|
print_percentile(99.99);
|
2014-05-06 18:02:57 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
info.clear();
|
|
|
|
}
|
2016-09-28 16:49:59 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
void reportJSON(Stats & info, const std::string & filename)
|
|
|
|
{
|
|
|
|
WriteBufferFromFile json_out(filename);
|
2016-09-28 16:49:59 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
2016-09-28 16:49:59 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
auto print_key_value = [&](auto key, auto value, bool with_comma = true)
|
|
|
|
{
|
|
|
|
json_out << double_quote << key << ": " << value << (with_comma ? ",\n" : "\n");
|
|
|
|
};
|
2016-09-28 16:49:59 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
auto print_percentile = [&json_out, &info](auto percent, bool with_comma = true)
|
|
|
|
{
|
|
|
|
json_out << "\"" << percent << "\"" << ": " << info.sampler.quantileInterpolated(percent / 100.0) << (with_comma ? ",\n" : "\n");
|
|
|
|
};
|
2016-09-28 16:49:59 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
json_out << "{\n";
|
2016-09-30 12:39:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
json_out << double_quote << "statistics" << ": {\n";
|
2016-09-30 12:39:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
double seconds = info.watch.elapsedSeconds();
|
|
|
|
print_key_value("QPS", info.queries / seconds);
|
|
|
|
print_key_value("RPS", info.read_rows / seconds);
|
|
|
|
print_key_value("MiBPS", info.read_bytes / seconds);
|
|
|
|
print_key_value("RPS_result", info.result_rows / seconds);
|
|
|
|
print_key_value("MiBPS_result", info.result_bytes / seconds);
|
|
|
|
print_key_value("num_queries", info.queries.load(), false);
|
2016-09-30 12:39:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
json_out << "},\n";
|
2016-09-30 12:39:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
json_out << double_quote << "query_time_percentiles" << ": {\n";
|
2016-09-30 12:39:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
for (int percent = 0; percent <= 90; percent += 10)
|
|
|
|
print_percentile(percent);
|
2016-09-30 12:39:18 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
print_percentile(95);
|
|
|
|
print_percentile(99);
|
|
|
|
print_percentile(99.9);
|
|
|
|
print_percentile(99.99, false);
|
2016-09-28 16:49:59 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
json_out << "}\n";
|
2016-09-28 16:49:59 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
json_out << "}\n";
|
|
|
|
}
|
2017-02-01 20:30:46 +00:00
|
|
|
|
|
|
|
public:
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
~Benchmark()
|
|
|
|
{
|
|
|
|
shutdown = true;
|
|
|
|
}
|
2014-03-10 09:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-03-24 15:05:54 +00:00
|
|
|
int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
2014-03-10 09:33:18 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
using namespace DB;
|
|
|
|
bool print_stacktrace = true;
|
|
|
|
|
|
|
|
try
|
|
|
|
{
|
|
|
|
using boost::program_options::value;
|
|
|
|
|
|
|
|
boost::program_options::options_description desc("Allowed options");
|
|
|
|
desc.add_options()
|
|
|
|
("help", "produce help message")
|
|
|
|
("concurrency,c", value<unsigned>()->default_value(1), "number of parallel queries")
|
|
|
|
("delay,d", value<double>()->default_value(1), "delay between intermediate reports in seconds (set 0 to disable reports)")
|
|
|
|
("stage", value<std::string>()->default_value("complete"), "request query processing up to specified stage")
|
|
|
|
("iterations,i", value<size_t>()->default_value(0), "amount of queries to be executed")
|
|
|
|
("timelimit,t", value<double>()->default_value(0.), "stop launch of queries after specified time limit")
|
|
|
|
("randomize,r", value<bool>()->default_value(false), "randomize order of execution")
|
|
|
|
("json", value<std::string>()->default_value(""), "write final report to specified file in JSON format")
|
|
|
|
("host,h", value<std::string>()->default_value("localhost"), "")
|
|
|
|
("port", value<UInt16>()->default_value(9000), "")
|
|
|
|
("user", value<std::string>()->default_value("default"), "")
|
|
|
|
("password", value<std::string>()->default_value(""), "")
|
|
|
|
("database", value<std::string>()->default_value("default"), "")
|
|
|
|
("stacktrace", "print stack traces of exceptions")
|
|
|
|
|
|
|
|
#define DECLARE_SETTING(TYPE, NAME, DEFAULT) (#NAME, boost::program_options::value<std::string> (), "Settings.h")
|
|
|
|
#define DECLARE_LIMIT(TYPE, NAME, DEFAULT) (#NAME, boost::program_options::value<std::string> (), "Limits.h")
|
|
|
|
APPLY_FOR_SETTINGS(DECLARE_SETTING)
|
|
|
|
APPLY_FOR_LIMITS(DECLARE_LIMIT)
|
|
|
|
#undef DECLARE_SETTING
|
|
|
|
#undef DECLARE_LIMIT
|
|
|
|
;
|
|
|
|
|
|
|
|
boost::program_options::variables_map options;
|
|
|
|
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
|
|
|
|
|
|
|
|
if (options.count("help"))
|
|
|
|
{
|
|
|
|
std::cout << "Usage: " << argv[0] << " [options] < queries.txt\n";
|
|
|
|
std::cout << desc << "\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
print_stacktrace = options.count("stacktrace");
|
|
|
|
|
|
|
|
/// Extract `settings` and `limits` from received `options`
|
|
|
|
Settings settings;
|
|
|
|
|
|
|
|
#define EXTRACT_SETTING(TYPE, NAME, DEFAULT) \
|
|
|
|
if (options.count(#NAME)) \
|
|
|
|
settings.set(#NAME, options[#NAME].as<std::string>());
|
|
|
|
APPLY_FOR_SETTINGS(EXTRACT_SETTING)
|
|
|
|
APPLY_FOR_LIMITS(EXTRACT_SETTING)
|
|
|
|
#undef EXTRACT_SETTING
|
|
|
|
|
|
|
|
Benchmark benchmark(
|
|
|
|
options["concurrency"].as<unsigned>(),
|
|
|
|
options["delay"].as<double>(),
|
|
|
|
options["host"].as<std::string>(),
|
|
|
|
options["port"].as<UInt16>(),
|
|
|
|
options["database"].as<std::string>(),
|
|
|
|
options["user"].as<std::string>(),
|
|
|
|
options["password"].as<std::string>(),
|
|
|
|
options["stage"].as<std::string>(),
|
|
|
|
options["randomize"].as<bool>(),
|
|
|
|
options["iterations"].as<size_t>(),
|
|
|
|
options["timelimit"].as<double>(),
|
|
|
|
options["json"].as<std::string>(),
|
|
|
|
settings);
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
std::cerr << getCurrentExceptionMessage(print_stacktrace, true) << std::endl;
|
|
|
|
return getCurrentExceptionCode();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-03-10 09:33:18 +00:00
|
|
|
}
|