From ef2cf677369ab1320caa2a1744a52fdf6c4bec09 Mon Sep 17 00:00:00 2001 From: Alexey Zatelepin Date: Wed, 21 Jun 2017 17:16:19 +0300 Subject: [PATCH 01/11] performance-test: allow several queries in xml scenarios [#CLICKHOUSE-3086] --- dbms/src/Server/PerformanceTest.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/dbms/src/Server/PerformanceTest.cpp b/dbms/src/Server/PerformanceTest.cpp index 9e4f4798953..95e721e1bb4 100644 --- a/dbms/src/Server/PerformanceTest.cpp +++ b/dbms/src/Server/PerformanceTest.cpp @@ -868,7 +868,17 @@ private: } if (test_config->has("query")) + { queries.push_back(test_config->getString("query")); + for (size_t i = 1; ; ++i) + { + std::string key = "query[" + std::to_string(i) + "]"; + if (!test_config->has(key)) + break; + + queries.push_back(test_config->getString(key)); + } + } if (test_config->has("query_file")) { From 678d1ef605ac817f372816b77233292540c0282a Mon Sep 17 00:00:00 2001 From: Alexey Zatelepin Date: Wed, 21 Jun 2017 17:19:05 +0300 Subject: [PATCH 02/11] performance-test: fix incorrect results reporting [#CLICKHOUSE-3086] --- dbms/src/Server/PerformanceTest.cpp | 88 +++++++++++++++++------------ 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/dbms/src/Server/PerformanceTest.cpp b/dbms/src/Server/PerformanceTest.cpp index 95e721e1bb4..74ae0303036 100644 --- a/dbms/src/Server/PerformanceTest.cpp +++ b/dbms/src/Server/PerformanceTest.cpp @@ -587,7 +587,7 @@ private: }; size_t times_to_run = 1; - std::vector statistics; + std::vector statistics_by_run; /// Removes configurations that has a given value. If leave is true, the logic is reversed. void removeConfigurationsIf(std::vector & configs, FilterType filter_type, const Strings & values, bool leave = false) @@ -760,7 +760,7 @@ private: String output = runTest(test_config); if (lite_output) - std::cout << output << std::endl; + std::cout << output; else outputs.push_back(output); } @@ -982,7 +982,7 @@ private: if (metrics.size() > 0) checkMetricsInput(metrics); - statistics.resize(times_to_run * queries.size()); + statistics_by_run.resize(times_to_run * queries.size()); for (size_t number_of_launch = 0; number_of_launch < times_to_run; ++number_of_launch) { QueriesWithIndexes queries_with_indexes; @@ -1050,7 +1050,7 @@ private: size_t max_iterations = stop_criterions[statistic_index].iterations.value; size_t iteration = 0; - statistics[statistic_index].clear(); + statistics_by_run[statistic_index].clear(); execute(query, statistic_index); if (exec_type == ExecutionType::Loop) @@ -1085,14 +1085,14 @@ private: if (!gotSIGINT) { - statistics[statistic_index].ready = true; + statistics_by_run[statistic_index].ready = true; } } } void execute(const Query & query, const size_t statistic_index) { - statistics[statistic_index].watch_per_query.restart(); + statistics_by_run[statistic_index].watch_per_query.restart(); RemoteBlockInputStream stream(connection, query, &settings, global_context, nullptr, Tables() /*, query_processing_stage*/); @@ -1108,24 +1108,24 @@ private: ; stream.readSuffix(); - statistics[statistic_index].updateQueryInfo(); - statistics[statistic_index].setTotalTime(); + statistics_by_run[statistic_index].updateQueryInfo(); + statistics_by_run[statistic_index].setTotalTime(); } void checkFulfilledCriterionsAndUpdate(const Progress & progress, RemoteBlockInputStream & stream, const size_t statistic_index) { - statistics[statistic_index].add(progress.rows, progress.bytes); + statistics_by_run[statistic_index].add(progress.rows, progress.bytes); size_t max_rows_to_read = stop_criterions[statistic_index].rows_read.value; - if (max_rows_to_read && statistics[statistic_index].rows_read >= max_rows_to_read) + if (max_rows_to_read && statistics_by_run[statistic_index].rows_read >= max_rows_to_read) { incFulfilledCriterions(statistic_index, rows_read); } size_t max_bytes_to_read = stop_criterions[statistic_index].bytes_read_uncompressed.value; - if (max_bytes_to_read && statistics[statistic_index].bytes_read >= max_bytes_to_read) + if (max_bytes_to_read && statistics_by_run[statistic_index].bytes_read >= max_bytes_to_read) { incFulfilledCriterions(statistic_index, bytes_read_uncompressed); } @@ -1133,7 +1133,7 @@ private: if (UInt64 max_timeout_ms = stop_criterions[statistic_index].timeout_ms.value) { /// cast nanoseconds to ms - if ((statistics[statistic_index].watch.elapsed() / (1000 * 1000)) > max_timeout_ms) + if ((statistics_by_run[statistic_index].watch.elapsed() / (1000 * 1000)) > max_timeout_ms) { incFulfilledCriterions(statistic_index, timeout_ms); } @@ -1142,7 +1142,7 @@ private: size_t min_time_not_changing_for_ms = stop_criterions[statistic_index].min_time_not_changing_for_ms.value; if (min_time_not_changing_for_ms) { - size_t min_time_did_not_change_for = statistics[statistic_index].min_time_watch.elapsed() / (1000 * 1000); + size_t min_time_did_not_change_for = statistics_by_run[statistic_index].min_time_watch.elapsed() / (1000 * 1000); if (min_time_did_not_change_for >= min_time_not_changing_for_ms) { @@ -1153,7 +1153,7 @@ private: size_t max_speed_not_changing_for_ms = stop_criterions[statistic_index].max_speed_not_changing_for_ms.value; if (max_speed_not_changing_for_ms) { - UInt64 speed_not_changing_time = statistics[statistic_index].max_rows_speed_watch.elapsed() / (1000 * 1000); + UInt64 speed_not_changing_time = statistics_by_run[statistic_index].max_rows_speed_watch.elapsed() / (1000 * 1000); if (speed_not_changing_time >= max_speed_not_changing_for_ms) { incFulfilledCriterions(statistic_index, max_speed_not_changing_for_ms); @@ -1163,7 +1163,7 @@ private: size_t average_speed_not_changing_for_ms = stop_criterions[statistic_index].average_speed_not_changing_for_ms.value; if (average_speed_not_changing_for_ms) { - UInt64 speed_not_changing_time = statistics[statistic_index].avg_rows_speed_watch.elapsed() / (1000 * 1000); + UInt64 speed_not_changing_time = statistics_by_run[statistic_index].avg_rows_speed_watch.elapsed() / (1000 * 1000); if (speed_not_changing_time >= average_speed_not_changing_for_ms) { incFulfilledCriterions(statistic_index, average_speed_not_changing_for_ms); @@ -1320,13 +1320,17 @@ public: std::vector run_infos; for (size_t query_index = 0; query_index < queries.size(); ++query_index) { - for (size_t number_of_launch = 0; number_of_launch < statistics.size(); ++number_of_launch) + for (size_t number_of_launch = 0; number_of_launch < times_to_run; ++number_of_launch) { - if (!statistics[number_of_launch].ready) + Stats & statistics = statistics_by_run[number_of_launch * queries.size() + query_index]; + + if (!statistics.ready) continue; JSONString runJSON; + runJSON.set("query", queries[query_index]); + if (substitutions_maps.size()) { JSONString parameters(4); @@ -1339,11 +1343,13 @@ public: runJSON.set("parameters", parameters.asString()); } + + if (exec_type == ExecutionType::Loop) { /// in seconds if (std::find(metrics.begin(), metrics.end(), "min_time") != metrics.end()) - runJSON.set("min_time", statistics[number_of_launch].min_time / double(1000)); + runJSON.set("min_time", statistics.min_time / double(1000)); if (std::find(metrics.begin(), metrics.end(), "quantiles") != metrics.end()) { @@ -1354,44 +1360,44 @@ public: while (quantile_key.back() == '0') quantile_key.pop_back(); - quantiles.set(quantile_key, statistics[number_of_launch].sampler.quantileInterpolated(percent / 100.0)); + quantiles.set(quantile_key, statistics.sampler.quantileInterpolated(percent / 100.0)); } - quantiles.set("0.95", statistics[number_of_launch].sampler.quantileInterpolated(95 / 100.0)); - quantiles.set("0.99", statistics[number_of_launch].sampler.quantileInterpolated(99 / 100.0)); - quantiles.set("0.999", statistics[number_of_launch].sampler.quantileInterpolated(99.9 / 100.0)); - quantiles.set("0.9999", statistics[number_of_launch].sampler.quantileInterpolated(99.99 / 100.0)); + quantiles.set("0.95", statistics.sampler.quantileInterpolated(95 / 100.0)); + quantiles.set("0.99", statistics.sampler.quantileInterpolated(99 / 100.0)); + quantiles.set("0.999", statistics.sampler.quantileInterpolated(99.9 / 100.0)); + quantiles.set("0.9999", statistics.sampler.quantileInterpolated(99.99 / 100.0)); runJSON.set("quantiles", quantiles.asString()); } if (std::find(metrics.begin(), metrics.end(), "total_time") != metrics.end()) - runJSON.set("total_time", statistics[number_of_launch].total_time); + runJSON.set("total_time", statistics.total_time); if (std::find(metrics.begin(), metrics.end(), "queries_per_second") != metrics.end()) - runJSON.set("queries_per_second", double(statistics[number_of_launch].queries) / - statistics[number_of_launch].total_time); + runJSON.set("queries_per_second", double(statistics.queries) / + statistics.total_time); if (std::find(metrics.begin(), metrics.end(), "rows_per_second") != metrics.end()) - runJSON.set("rows_per_second", double(statistics[number_of_launch].rows_read) / - statistics[number_of_launch].total_time); + runJSON.set("rows_per_second", double(statistics.rows_read) / + statistics.total_time); if (std::find(metrics.begin(), metrics.end(), "bytes_per_second") != metrics.end()) - runJSON.set("bytes_per_second", double(statistics[number_of_launch].bytes_read) / - statistics[number_of_launch].total_time); + runJSON.set("bytes_per_second", double(statistics.bytes_read) / + statistics.total_time); } else { if (std::find(metrics.begin(), metrics.end(), "max_rows_per_second") != metrics.end()) - runJSON.set("max_rows_per_second", statistics[number_of_launch].max_rows_speed); + runJSON.set("max_rows_per_second", statistics.max_rows_speed); if (std::find(metrics.begin(), metrics.end(), "max_bytes_per_second") != metrics.end()) - runJSON.set("max_bytes_per_second", statistics[number_of_launch].max_bytes_speed); + runJSON.set("max_bytes_per_second", statistics.max_bytes_speed); if (std::find(metrics.begin(), metrics.end(), "avg_rows_per_second") != metrics.end()) - runJSON.set("avg_rows_per_second", statistics[number_of_launch].avg_rows_speed_value); + runJSON.set("avg_rows_per_second", statistics.avg_rows_speed_value); if (std::find(metrics.begin(), metrics.end(), "avg_bytes_per_second") != metrics.end()) - runJSON.set("avg_bytes_per_second", statistics[number_of_launch].avg_bytes_speed_value); + runJSON.set("avg_bytes_per_second", statistics.avg_bytes_speed_value); } run_infos.push_back(runJSON); @@ -1411,7 +1417,10 @@ public: { for (size_t number_of_launch = 0; number_of_launch < times_to_run; ++number_of_launch) { - output += test_name + ", "; + if (queries.size() > 1) + { + output += "query \"" + queries[query_index] + "\", "; + } if (substitutions_maps.size()) { @@ -1423,7 +1432,7 @@ public: output += "run " + std::to_string(number_of_launch + 1) + ": "; output += main_metric + " = "; - output += statistics[number_of_launch * queries.size() + query_index].getStatisticByName(main_metric); + output += statistics_by_run[number_of_launch * queries.size() + query_index].getStatisticByName(main_metric); output += "\n"; } } @@ -1500,13 +1509,18 @@ int mainEntryClickhousePerformanceTest(int argc, char ** argv) if (!options.count("input-files")) { - std::cerr << "Trying to find tests in current folder" << std::endl; + std::cerr << "Trying to find test scenario files in the current folder..."; FS::path curr_dir("."); getFilesFromDir(curr_dir, input_files); if (input_files.empty()) + { + std::cerr << std::endl; throw DB::Exception("Did not find any xml files", 1); + } + else + std::cerr << " found " << input_files.size() << " files." << std::endl; } else { From 53aa599dc7bc765ab7ffaf15143e837e46b3096a Mon Sep 17 00:00:00 2001 From: Alexey Zatelepin Date: Wed, 21 Jun 2017 17:55:09 +0300 Subject: [PATCH 03/11] performance-test: rename params for clarity [#CLICKHOUSE-3086] --- dbms/src/Server/PerformanceTest.cpp | 106 ++++++++++++++-------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/dbms/src/Server/PerformanceTest.cpp b/dbms/src/Server/PerformanceTest.cpp index 74ae0303036..ee24d846b18 100644 --- a/dbms/src/Server/PerformanceTest.cpp +++ b/dbms/src/Server/PerformanceTest.cpp @@ -118,8 +118,8 @@ public: enum class PriorityType { - Min, - Max + AllOf, + AnyOf }; struct CriterionWithPriority @@ -135,9 +135,9 @@ struct CriterionWithPriority }; /// Termination criterions. The running test will be terminated in either of two conditions: -/// 1. All criterions marked 'min' are fulfilled +/// 1. All criterions marked 'all_of' are fulfilled /// or -/// 2. Any criterion marked 'max' is fulfilled +/// 2. Any criterion marked 'any_of' is fulfilled class StopCriterions { private: @@ -149,14 +149,14 @@ private: Keys keys; stop_criterions_view->keys(priority, keys); - PriorityType priority_type = (priority == "min" ? PriorityType::Min : PriorityType::Max); + PriorityType priority_type = (priority == "all_of" ? PriorityType::AllOf : PriorityType::AnyOf); for (const String & key : keys) { - if (key == "timeout_ms") + if (key == "total_time_ms") { - timeout_ms.value = stop_criterions_view->getUInt64(priority + ".timeout_ms"); - timeout_ms.priority = priority_type; + total_time_ms.value = stop_criterions_view->getUInt64(priority + ".total_time_ms"); + total_time_ms.priority = priority_type; } else if (key == "rows_read") { @@ -193,24 +193,24 @@ private: throw DB::Exception("Met unkown stop criterion: " + key, 1); } - if (priority == "min") + if (priority == "all_of") { - ++number_of_initialized_min; + ++number_of_initialized_all_of; }; - if (priority == "max") + if (priority == "any_of") { - ++number_of_initialized_max; + ++number_of_initialized_any_of; }; } } public: - StopCriterions() : number_of_initialized_min(0), number_of_initialized_max(0), fulfilled_criterions_min(0), fulfilled_criterions_max(0) + StopCriterions() : number_of_initialized_all_of(0), number_of_initialized_any_of(0), fulfilled_criterions_all_of(0), fulfilled_criterions_any_of(0) { } StopCriterions(const StopCriterions & another_criterions) - : timeout_ms(another_criterions.timeout_ms), + : total_time_ms(another_criterions.total_time_ms), rows_read(another_criterions.rows_read), bytes_read_uncompressed(another_criterions.bytes_read_uncompressed), iterations(another_criterions.iterations), @@ -218,29 +218,29 @@ public: max_speed_not_changing_for_ms(another_criterions.max_speed_not_changing_for_ms), average_speed_not_changing_for_ms(another_criterions.average_speed_not_changing_for_ms), - number_of_initialized_min(another_criterions.number_of_initialized_min), - number_of_initialized_max(another_criterions.number_of_initialized_max), - fulfilled_criterions_min(another_criterions.fulfilled_criterions_min), - fulfilled_criterions_max(another_criterions.fulfilled_criterions_max) + number_of_initialized_all_of(another_criterions.number_of_initialized_all_of), + number_of_initialized_any_of(another_criterions.number_of_initialized_any_of), + fulfilled_criterions_all_of(another_criterions.fulfilled_criterions_all_of), + fulfilled_criterions_any_of(another_criterions.fulfilled_criterions_any_of) { } void loadFromConfig(const AbstractConfiguration & stop_criterions_view) { - if (stop_criterions_view->has("min")) + if (stop_criterions_view->has("all_of")) { - initializeStruct("min", stop_criterions_view); + initializeStruct("all_of", stop_criterions_view); } - if (stop_criterions_view->has("max")) + if (stop_criterions_view->has("any_of")) { - initializeStruct("max", stop_criterions_view); + initializeStruct("any_of", stop_criterions_view); } } void reset() { - timeout_ms.fulfilled = false; + total_time_ms.fulfilled = false; rows_read.fulfilled = false; bytes_read_uncompressed.fulfilled = false; iterations.fulfilled = false; @@ -248,11 +248,11 @@ public: max_speed_not_changing_for_ms.fulfilled = false; average_speed_not_changing_for_ms.fulfilled = false; - fulfilled_criterions_min = 0; - fulfilled_criterions_max = 0; + fulfilled_criterions_all_of = 0; + fulfilled_criterions_any_of = 0; } - CriterionWithPriority timeout_ms; + CriterionWithPriority total_time_ms; CriterionWithPriority rows_read; CriterionWithPriority bytes_read_uncompressed; CriterionWithPriority iterations; @@ -260,13 +260,13 @@ public: CriterionWithPriority max_speed_not_changing_for_ms; CriterionWithPriority average_speed_not_changing_for_ms; - /// Hereafter 'min' and 'max', in context of critetions, mean a level of importance + /// Hereafter 'all_of' and 'any_of', in context of critetions, mean a level of importance /// Number of initialized properties met in configuration - size_t number_of_initialized_min; - size_t number_of_initialized_max; + size_t number_of_initialized_all_of; + size_t number_of_initialized_any_of; - size_t fulfilled_criterions_min; - size_t fulfilled_criterions_max; + size_t fulfilled_criterions_all_of; + size_t fulfilled_criterions_any_of; }; struct Stats @@ -567,8 +567,8 @@ private: #define incFulfilledCriterions(index, CRITERION) \ if (!stop_criterions[index].CRITERION.fulfilled) \ { \ - stop_criterions[index].CRITERION.priority == PriorityType::Min ? ++stop_criterions[index].fulfilled_criterions_min \ - : ++stop_criterions[index].fulfilled_criterions_max; \ + stop_criterions[index].CRITERION.priority == PriorityType::AllOf ? ++stop_criterions[index].fulfilled_criterions_all_of \ + : ++stop_criterions[index].fulfilled_criterions_any_of; \ stop_criterions[index].CRITERION.fulfilled = true; \ } @@ -666,9 +666,9 @@ private: for (const String & precondition : preconditions) { - if (precondition == "reset_cpu_cache") - if (system("(>&2 echo 'Flushing cache...') && (sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches') && (>&2 echo 'Flushed.')")) { - std::cerr << "Failed to flush cache" << std::endl; + if (precondition == "flush_disk_cache") + if (system("(>&2 echo 'Flushing disk cache...') && (sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches') && (>&2 echo 'Flushed.')")) { + std::cerr << "Failed to flush disk cache" << std::endl; return false; } @@ -693,7 +693,7 @@ private: } } #else - throw DB::Exception("Not implemented", ErrorCodes::NOT_IMPLEMENTED); + throw DB::Exception("ram_size precondition not available on this platform", ErrorCodes::NOT_IMPLEMENTED); #endif } @@ -943,9 +943,9 @@ private: stop_criterions.resize(times_to_run * queries.size()); - if (test_config->has("stop")) + if (test_config->has("stop_conditions")) { - AbstractConfig stop_criterions_view(test_config->createView("stop")); + AbstractConfig stop_criterions_view(test_config->createView("stop_conditions")); for (StopCriterions & stop_criterion : stop_criterions) { stop_criterion.loadFromConfig(stop_criterions_view); @@ -1065,17 +1065,17 @@ private: incFulfilledCriterions(statistic_index, iterations); } - if (stop_criterions[statistic_index].number_of_initialized_min - && (stop_criterions[statistic_index].fulfilled_criterions_min - >= stop_criterions[statistic_index].number_of_initialized_min)) + if (stop_criterions[statistic_index].number_of_initialized_all_of + && (stop_criterions[statistic_index].fulfilled_criterions_all_of + >= stop_criterions[statistic_index].number_of_initialized_all_of)) { - /// All 'min' criterions are fulfilled + /// All 'all_of' criterions are fulfilled break; } - if (stop_criterions[statistic_index].number_of_initialized_max && stop_criterions[statistic_index].fulfilled_criterions_max) + if (stop_criterions[statistic_index].number_of_initialized_any_of && stop_criterions[statistic_index].fulfilled_criterions_any_of) { - /// Some 'max' criterions are fulfilled + /// Some 'any_of' criterions are fulfilled break; } @@ -1130,12 +1130,12 @@ private: incFulfilledCriterions(statistic_index, bytes_read_uncompressed); } - if (UInt64 max_timeout_ms = stop_criterions[statistic_index].timeout_ms.value) + if (UInt64 max_total_time_ms = stop_criterions[statistic_index].total_time_ms.value) { /// cast nanoseconds to ms - if ((statistics_by_run[statistic_index].watch.elapsed() / (1000 * 1000)) > max_timeout_ms) + if ((statistics_by_run[statistic_index].watch.elapsed() / (1000 * 1000)) > max_total_time_ms) { - incFulfilledCriterions(statistic_index, timeout_ms); + incFulfilledCriterions(statistic_index, total_time_ms); } } @@ -1170,16 +1170,16 @@ private: } } - if (stop_criterions[statistic_index].number_of_initialized_min - && (stop_criterions[statistic_index].fulfilled_criterions_min >= stop_criterions[statistic_index].number_of_initialized_min)) + if (stop_criterions[statistic_index].number_of_initialized_all_of + && (stop_criterions[statistic_index].fulfilled_criterions_all_of >= stop_criterions[statistic_index].number_of_initialized_all_of)) { - /// All 'min' criterions are fulfilled + /// All 'all_of' criterions are fulfilled stream.cancel(); } - if (stop_criterions[statistic_index].number_of_initialized_max && stop_criterions[statistic_index].fulfilled_criterions_max) + if (stop_criterions[statistic_index].number_of_initialized_any_of && stop_criterions[statistic_index].fulfilled_criterions_any_of) { - /// Some 'max' criterions are fulfilled + /// Some 'any_of' criterions are fulfilled stream.cancel(); } From 0bc0150682d0cb9b31d399f97b760744a4294258 Mon Sep 17 00:00:00 2001 From: Alexey Zatelepin Date: Wed, 21 Jun 2017 18:51:12 +0300 Subject: [PATCH 04/11] performance-test: add server version to json output [#CLICKHOUSE-3086] --- dbms/src/Server/PerformanceTest.cpp | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/dbms/src/Server/PerformanceTest.cpp b/dbms/src/Server/PerformanceTest.cpp index ee24d846b18..02c696d5bd6 100644 --- a/dbms/src/Server/PerformanceTest.cpp +++ b/dbms/src/Server/PerformanceTest.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -515,8 +516,16 @@ public: throw DB::Exception("No tests were specified", 0); } - std::cerr << std::fixed << std::setprecision(3); - std::cout << std::fixed << std::setprecision(3); + std::string name; + UInt64 version_major; + UInt64 version_minor; + UInt64 version_revision; + connection.getServerVersion(name, version_major, version_minor, version_revision); + + std::stringstream ss; + ss << name << " v" << version_major << "." << version_minor << "." << version_revision; + server_version = ss.str(); + processTestsConfigurations(input_files); } @@ -529,6 +538,7 @@ private: Queries queries; Connection connection; + std::string server_version; using Keys = std::vector; @@ -1278,16 +1288,10 @@ public: String constructTotalInfo(Strings metrics) { JSONString json_output; - String hostname; - char hostname_buffer[256]; - if (gethostname(hostname_buffer, 256) == 0) - { - hostname = String(hostname_buffer); - } - - json_output.set("hostname", hostname); + json_output.set("hostname", getFQDNOrHostName()); json_output.set("cpu_num", sysconf(_SC_NPROCESSORS_ONLN)); + json_output.set("server_version", server_version); json_output.set("test_name", test_name); json_output.set("main_metric", main_metric); From 1783d233e2fc4bc15bad24ce839b29800ffc6ce8 Mon Sep 17 00:00:00 2001 From: Alexey Zatelepin Date: Wed, 21 Jun 2017 19:47:16 +0300 Subject: [PATCH 05/11] performance-test: do not take cancelled query times into account [#CLICKHOUSE-3086] --- dbms/src/Server/PerformanceTest.cpp | 128 ++++++++++++++++------------ 1 file changed, 73 insertions(+), 55 deletions(-) diff --git a/dbms/src/Server/PerformanceTest.cpp b/dbms/src/Server/PerformanceTest.cpp index 02c696d5bd6..09182787b47 100644 --- a/dbms/src/Server/PerformanceTest.cpp +++ b/dbms/src/Server/PerformanceTest.cpp @@ -279,6 +279,9 @@ struct Stats Stopwatch max_bytes_speed_watch; Stopwatch avg_rows_speed_watch; Stopwatch avg_bytes_speed_watch; + + bool last_query_was_cancelled = false; + size_t queries; size_t rows_read; size_t bytes_read; @@ -454,6 +457,8 @@ struct Stats avg_rows_speed_watch.restart(); avg_bytes_speed_watch.restart(); + last_query_was_cancelled = false; + sampler.clear(); queries = 0; @@ -559,7 +564,7 @@ private: std::vector substitutions_maps; bool gotSIGINT; - std::vector stop_criterions; + std::vector stop_criterions_by_run; String main_metric; bool lite_output; String profiles_file; @@ -574,12 +579,12 @@ private: Strings tests_names_regexp; Strings skip_names_regexp; - #define incFulfilledCriterions(index, CRITERION) \ - if (!stop_criterions[index].CRITERION.fulfilled) \ + #define INC_FULFILLED_CRITERIONS(stop_criterions, CRITERION) \ + if (!stop_criterions.CRITERION.fulfilled) \ { \ - stop_criterions[index].CRITERION.priority == PriorityType::AllOf ? ++stop_criterions[index].fulfilled_criterions_all_of \ - : ++stop_criterions[index].fulfilled_criterions_any_of; \ - stop_criterions[index].CRITERION.fulfilled = true; \ + stop_criterions.CRITERION.priority == PriorityType::AllOf ? ++stop_criterions.fulfilled_criterions_all_of \ + : ++stop_criterions.fulfilled_criterions_any_of; \ + stop_criterions.CRITERION.fulfilled = true; \ } enum class ExecutionType @@ -951,14 +956,14 @@ private: times_to_run = test_config->getUInt("times_to_run"); } - stop_criterions.resize(times_to_run * queries.size()); + stop_criterions_by_run.resize(times_to_run * queries.size()); if (test_config->has("stop_conditions")) { AbstractConfig stop_criterions_view(test_config->createView("stop_conditions")); - for (StopCriterions & stop_criterion : stop_criterions) + for (StopCriterions & stop_criterions : stop_criterions_by_run) { - stop_criterion.loadFromConfig(stop_criterions_view); + stop_criterions.loadFromConfig(stop_criterions_view); } } else @@ -1000,7 +1005,7 @@ private: for (size_t query_index = 0; query_index < queries.size(); ++query_index) { size_t statistic_index = number_of_launch * queries.size() + query_index; - stop_criterions[statistic_index].reset(); + stop_criterions_by_run[statistic_index].reset(); queries_with_indexes.push_back({queries[query_index], statistic_index}); } @@ -1055,13 +1060,17 @@ private: for (const std::pair & query_and_index : queries_with_indexes) { Query query = query_and_index.first; - const size_t statistic_index = query_and_index.second; + const size_t run_index = query_and_index.second; - size_t max_iterations = stop_criterions[statistic_index].iterations.value; + StopCriterions & stop_criterions = stop_criterions_by_run[run_index]; + + size_t max_iterations = stop_criterions.iterations.value; size_t iteration = 0; - statistics_by_run[statistic_index].clear(); - execute(query, statistic_index); + Stats & statistics = statistics_by_run[run_index]; + + statistics.clear(); + execute(query, statistics, stop_criterions); if (exec_type == ExecutionType::Loop) { @@ -1072,130 +1081,139 @@ private: /// check stop criterions if (max_iterations && iteration >= max_iterations) { - incFulfilledCriterions(statistic_index, iterations); + INC_FULFILLED_CRITERIONS(stop_criterions, iterations); } - if (stop_criterions[statistic_index].number_of_initialized_all_of - && (stop_criterions[statistic_index].fulfilled_criterions_all_of - >= stop_criterions[statistic_index].number_of_initialized_all_of)) + if (stop_criterions.number_of_initialized_all_of + && (stop_criterions.fulfilled_criterions_all_of + >= stop_criterions.number_of_initialized_all_of)) { /// All 'all_of' criterions are fulfilled break; } - if (stop_criterions[statistic_index].number_of_initialized_any_of && stop_criterions[statistic_index].fulfilled_criterions_any_of) + if (stop_criterions.number_of_initialized_any_of && stop_criterions.fulfilled_criterions_any_of) { /// Some 'any_of' criterions are fulfilled break; } - execute(query, statistic_index); + execute(query, statistics, stop_criterions); } } if (!gotSIGINT) { - statistics_by_run[statistic_index].ready = true; + statistics.ready = true; } } } - void execute(const Query & query, const size_t statistic_index) + void execute(const Query & query, Stats & statistics, StopCriterions & stop_criterions) { - statistics_by_run[statistic_index].watch_per_query.restart(); + statistics.watch_per_query.restart(); + statistics.last_query_was_cancelled = false; RemoteBlockInputStream stream(connection, query, &settings, global_context, nullptr, Tables() /*, query_processing_stage*/); Progress progress; - stream.setProgressCallback([&progress, &stream, statistic_index, this](const Progress & value) { - progress.incrementPiecewiseAtomically(value); - - this->checkFulfilledCriterionsAndUpdate(progress, stream, statistic_index); - }); + stream.setProgressCallback( + [&progress, &stream, &statistics, &stop_criterions, this](const Progress & value) + { + progress.incrementPiecewiseAtomically(value); + this->checkFulfilledCriterionsAndUpdate(progress, stream, statistics, stop_criterions); + }); stream.readPrefix(); while (Block block = stream.read()) ; stream.readSuffix(); - statistics_by_run[statistic_index].updateQueryInfo(); - statistics_by_run[statistic_index].setTotalTime(); + if (!statistics.last_query_was_cancelled) + statistics.updateQueryInfo(); + + statistics.setTotalTime(); } - void checkFulfilledCriterionsAndUpdate(const Progress & progress, - RemoteBlockInputStream & stream, - const size_t statistic_index) + void checkFulfilledCriterionsAndUpdate( + const Progress & progress, + RemoteBlockInputStream & stream, + Stats & statistics, + StopCriterions & stop_criterions) { - statistics_by_run[statistic_index].add(progress.rows, progress.bytes); + statistics.add(progress.rows, progress.bytes); - size_t max_rows_to_read = stop_criterions[statistic_index].rows_read.value; - if (max_rows_to_read && statistics_by_run[statistic_index].rows_read >= max_rows_to_read) + size_t max_rows_to_read = stop_criterions.rows_read.value; + if (max_rows_to_read && statistics.rows_read >= max_rows_to_read) { - incFulfilledCriterions(statistic_index, rows_read); + INC_FULFILLED_CRITERIONS(stop_criterions, rows_read); } - size_t max_bytes_to_read = stop_criterions[statistic_index].bytes_read_uncompressed.value; - if (max_bytes_to_read && statistics_by_run[statistic_index].bytes_read >= max_bytes_to_read) + size_t max_bytes_to_read = stop_criterions.bytes_read_uncompressed.value; + if (max_bytes_to_read && statistics.bytes_read >= max_bytes_to_read) { - incFulfilledCriterions(statistic_index, bytes_read_uncompressed); + INC_FULFILLED_CRITERIONS(stop_criterions, bytes_read_uncompressed); } - if (UInt64 max_total_time_ms = stop_criterions[statistic_index].total_time_ms.value) + if (UInt64 max_total_time_ms = stop_criterions.total_time_ms.value) { /// cast nanoseconds to ms - if ((statistics_by_run[statistic_index].watch.elapsed() / (1000 * 1000)) > max_total_time_ms) + if ((statistics.watch.elapsed() / (1000 * 1000)) > max_total_time_ms) { - incFulfilledCriterions(statistic_index, total_time_ms); + INC_FULFILLED_CRITERIONS(stop_criterions, total_time_ms); } } - size_t min_time_not_changing_for_ms = stop_criterions[statistic_index].min_time_not_changing_for_ms.value; + size_t min_time_not_changing_for_ms = stop_criterions.min_time_not_changing_for_ms.value; if (min_time_not_changing_for_ms) { - size_t min_time_did_not_change_for = statistics_by_run[statistic_index].min_time_watch.elapsed() / (1000 * 1000); + size_t min_time_did_not_change_for = statistics.min_time_watch.elapsed() / (1000 * 1000); if (min_time_did_not_change_for >= min_time_not_changing_for_ms) { - incFulfilledCriterions(statistic_index, min_time_not_changing_for_ms); + INC_FULFILLED_CRITERIONS(stop_criterions, min_time_not_changing_for_ms); } } - size_t max_speed_not_changing_for_ms = stop_criterions[statistic_index].max_speed_not_changing_for_ms.value; + size_t max_speed_not_changing_for_ms = stop_criterions.max_speed_not_changing_for_ms.value; if (max_speed_not_changing_for_ms) { - UInt64 speed_not_changing_time = statistics_by_run[statistic_index].max_rows_speed_watch.elapsed() / (1000 * 1000); + UInt64 speed_not_changing_time = statistics.max_rows_speed_watch.elapsed() / (1000 * 1000); if (speed_not_changing_time >= max_speed_not_changing_for_ms) { - incFulfilledCriterions(statistic_index, max_speed_not_changing_for_ms); + INC_FULFILLED_CRITERIONS(stop_criterions, max_speed_not_changing_for_ms); } } - size_t average_speed_not_changing_for_ms = stop_criterions[statistic_index].average_speed_not_changing_for_ms.value; + size_t average_speed_not_changing_for_ms = stop_criterions.average_speed_not_changing_for_ms.value; if (average_speed_not_changing_for_ms) { - UInt64 speed_not_changing_time = statistics_by_run[statistic_index].avg_rows_speed_watch.elapsed() / (1000 * 1000); + UInt64 speed_not_changing_time = statistics.avg_rows_speed_watch.elapsed() / (1000 * 1000); if (speed_not_changing_time >= average_speed_not_changing_for_ms) { - incFulfilledCriterions(statistic_index, average_speed_not_changing_for_ms); + INC_FULFILLED_CRITERIONS(stop_criterions, average_speed_not_changing_for_ms); } } - if (stop_criterions[statistic_index].number_of_initialized_all_of - && (stop_criterions[statistic_index].fulfilled_criterions_all_of >= stop_criterions[statistic_index].number_of_initialized_all_of)) + if (stop_criterions.number_of_initialized_all_of + && (stop_criterions.fulfilled_criterions_all_of >= stop_criterions.number_of_initialized_all_of)) { /// All 'all_of' criterions are fulfilled + statistics.last_query_was_cancelled = true; stream.cancel(); } - if (stop_criterions[statistic_index].number_of_initialized_any_of && stop_criterions[statistic_index].fulfilled_criterions_any_of) + if (stop_criterions.number_of_initialized_any_of && stop_criterions.fulfilled_criterions_any_of) { /// Some 'any_of' criterions are fulfilled + statistics.last_query_was_cancelled = true; stream.cancel(); } if (interrupt_listener.check()) { gotSIGINT = true; + statistics.last_query_was_cancelled = true; stream.cancel(); } } From 374e92f76eb701cdf8fdb3a801fd2ada3f27e6a3 Mon Sep 17 00:00:00 2001 From: Alexey Zatelepin Date: Wed, 21 Jun 2017 21:41:50 +0300 Subject: [PATCH 06/11] performance-test: allow same stop condition to be present in all_of and any_of sections [#CLICKHOUSE-3086] --- dbms/src/Server/PerformanceTest.cpp | 405 +++++++++++----------------- 1 file changed, 157 insertions(+), 248 deletions(-) diff --git a/dbms/src/Server/PerformanceTest.cpp b/dbms/src/Server/PerformanceTest.cpp index 09182787b47..af2d2e2adcb 100644 --- a/dbms/src/Server/PerformanceTest.cpp +++ b/dbms/src/Server/PerformanceTest.cpp @@ -117,125 +117,38 @@ public: } }; -enum class PriorityType -{ - AllOf, - AnyOf -}; -struct CriterionWithPriority -{ - PriorityType priority; - size_t value; - bool fulfilled; +using ConfigurationPtr = Poco::AutoPtr; - CriterionWithPriority() : value(0), fulfilled(false) - { - } - CriterionWithPriority(const CriterionWithPriority &) = default; -}; - -/// Termination criterions. The running test will be terminated in either of two conditions: -/// 1. All criterions marked 'all_of' are fulfilled -/// or -/// 2. Any criterion marked 'any_of' is fulfilled -class StopCriterions -{ -private: - using AbstractConfiguration = Poco::AutoPtr; - using Keys = std::vector; - - void initializeStruct(const String & priority, const AbstractConfiguration & stop_criterions_view) +/// A set of supported stop conditions. +struct StopConditionsSet +{ + void loadFromConfig(const ConfigurationPtr & stop_conditions_view) { + using Keys = std::vector; Keys keys; - stop_criterions_view->keys(priority, keys); - - PriorityType priority_type = (priority == "all_of" ? PriorityType::AllOf : PriorityType::AnyOf); + stop_conditions_view->keys(keys); for (const String & key : keys) { if (key == "total_time_ms") - { - total_time_ms.value = stop_criterions_view->getUInt64(priority + ".total_time_ms"); - total_time_ms.priority = priority_type; - } + total_time_ms.value = stop_conditions_view->getUInt64(key); else if (key == "rows_read") - { - rows_read.value = stop_criterions_view->getUInt64(priority + ".rows_read"); - rows_read.priority = priority_type; - } + rows_read.value = stop_conditions_view->getUInt64(key); else if (key == "bytes_read_uncompressed") - { - bytes_read_uncompressed.value = stop_criterions_view->getUInt64(priority + ".bytes_read_uncompressed"); - bytes_read_uncompressed.priority = priority_type; - } + bytes_read_uncompressed.value = stop_conditions_view->getUInt64(key); else if (key == "iterations") - { - iterations.value = stop_criterions_view->getUInt64(priority + ".iterations"); - iterations.priority = priority_type; - } + iterations.value = stop_conditions_view->getUInt64(key); else if (key == "min_time_not_changing_for_ms") - { - min_time_not_changing_for_ms.value = stop_criterions_view->getUInt64(priority + ".min_time_not_changing_for_ms"); - min_time_not_changing_for_ms.priority = priority_type; - } + min_time_not_changing_for_ms.value = stop_conditions_view->getUInt64(key); else if (key == "max_speed_not_changing_for_ms") - { - max_speed_not_changing_for_ms.value = stop_criterions_view->getUInt64(priority + ".max_speed_not_changing_for_ms"); - max_speed_not_changing_for_ms.priority = priority_type; - } + max_speed_not_changing_for_ms.value = stop_conditions_view->getUInt64(key); else if (key == "average_speed_not_changing_for_ms") - { - average_speed_not_changing_for_ms.value = stop_criterions_view->getUInt64(priority + ".average_speed_not_changing_for_ms"); - average_speed_not_changing_for_ms.priority = priority_type; - } + average_speed_not_changing_for_ms.value = stop_conditions_view->getUInt64(key); else - { - throw DB::Exception("Met unkown stop criterion: " + key, 1); - } + throw DB::Exception("Met unkown stop condition: " + key, 1); - if (priority == "all_of") - { - ++number_of_initialized_all_of; - }; - if (priority == "any_of") - { - ++number_of_initialized_any_of; - }; - } - } - -public: - StopCriterions() : number_of_initialized_all_of(0), number_of_initialized_any_of(0), fulfilled_criterions_all_of(0), fulfilled_criterions_any_of(0) - { - } - - StopCriterions(const StopCriterions & another_criterions) - : total_time_ms(another_criterions.total_time_ms), - rows_read(another_criterions.rows_read), - bytes_read_uncompressed(another_criterions.bytes_read_uncompressed), - iterations(another_criterions.iterations), - min_time_not_changing_for_ms(another_criterions.min_time_not_changing_for_ms), - max_speed_not_changing_for_ms(another_criterions.max_speed_not_changing_for_ms), - average_speed_not_changing_for_ms(another_criterions.average_speed_not_changing_for_ms), - - number_of_initialized_all_of(another_criterions.number_of_initialized_all_of), - number_of_initialized_any_of(another_criterions.number_of_initialized_any_of), - fulfilled_criterions_all_of(another_criterions.fulfilled_criterions_all_of), - fulfilled_criterions_any_of(another_criterions.fulfilled_criterions_any_of) - { - } - - void loadFromConfig(const AbstractConfiguration & stop_criterions_view) - { - if (stop_criterions_view->has("all_of")) - { - initializeStruct("all_of", stop_criterions_view); - } - - if (stop_criterions_view->has("any_of")) - { - initializeStruct("any_of", stop_criterions_view); + ++initialized_count; } } @@ -249,25 +162,98 @@ public: max_speed_not_changing_for_ms.fulfilled = false; average_speed_not_changing_for_ms.fulfilled = false; - fulfilled_criterions_all_of = 0; - fulfilled_criterions_any_of = 0; + fulfilled_count = 0; } - CriterionWithPriority total_time_ms; - CriterionWithPriority rows_read; - CriterionWithPriority bytes_read_uncompressed; - CriterionWithPriority iterations; - CriterionWithPriority min_time_not_changing_for_ms; - CriterionWithPriority max_speed_not_changing_for_ms; - CriterionWithPriority average_speed_not_changing_for_ms; + /// Note: only conditions with UInt64 minimal thresholds are supported. + /// I.e. condition is fulfilled when value is exceeded. + struct StopCondition + { + UInt64 value = 0; + bool fulfilled = false; + }; - /// Hereafter 'all_of' and 'any_of', in context of critetions, mean a level of importance - /// Number of initialized properties met in configuration - size_t number_of_initialized_all_of; - size_t number_of_initialized_any_of; + void report(UInt64 value, StopCondition & condition) + { + if (condition.value && !condition.fulfilled && value >= condition.value) + { + condition.fulfilled = true; + ++fulfilled_count; + } + } - size_t fulfilled_criterions_all_of; - size_t fulfilled_criterions_any_of; + StopCondition total_time_ms; + StopCondition rows_read; + StopCondition bytes_read_uncompressed; + StopCondition iterations; + StopCondition min_time_not_changing_for_ms; + StopCondition max_speed_not_changing_for_ms; + StopCondition average_speed_not_changing_for_ms; + + size_t initialized_count = 0; + size_t fulfilled_count = 0; +}; + +/// Stop conditions for a test run. The running test will be terminated in either of two conditions: +/// 1. All conditions marked 'all_of' are fulfilled +/// or +/// 2. Any condition marked 'any_of' is fulfilled +class TestStopConditions +{ +public: + void loadFromConfig(ConfigurationPtr & stop_conditions_config) + { + if (stop_conditions_config->has("all_of")) + { + ConfigurationPtr config_all_of(stop_conditions_config->createView("all_of")); + conditions_all_of.loadFromConfig(config_all_of); + } + if (stop_conditions_config->has("any_of")) + { + ConfigurationPtr config_any_of(stop_conditions_config->createView("any_of")); + conditions_any_of.loadFromConfig(config_any_of); + } + } + + bool empty() const + { + return !conditions_all_of.initialized_count && !conditions_any_of.initialized_count; + } + +#define DEFINE_REPORT_FUNC(FUNC_NAME, CONDITION) \ + void FUNC_NAME(UInt64 value) \ + { \ + conditions_all_of.report(value, conditions_all_of.CONDITION); \ + conditions_any_of.report(value, conditions_any_of.CONDITION); \ + } \ + + DEFINE_REPORT_FUNC(reportTotalTime, total_time_ms); + DEFINE_REPORT_FUNC(reportRowsRead, rows_read); + DEFINE_REPORT_FUNC(reportBytesReadUncompressed, bytes_read_uncompressed); + DEFINE_REPORT_FUNC(reportIterations, iterations); + DEFINE_REPORT_FUNC(reportMinTimeNotChangingFor, min_time_not_changing_for_ms); + DEFINE_REPORT_FUNC(reportMaxSpeedNotChangingFor, max_speed_not_changing_for_ms); + DEFINE_REPORT_FUNC(reportAverageSpeedNotChangingFor, average_speed_not_changing_for_ms); + +#undef REPORT + + bool areFulfilled() const + { + return + (conditions_all_of.initialized_count + && conditions_all_of.fulfilled_count >= conditions_all_of.initialized_count) + || (conditions_any_of.initialized_count && conditions_any_of.fulfilled_count); + } + + void reset() + { + conditions_all_of.reset(); + conditions_any_of.reset(); + } + +private: + StopConditionsSet conditions_all_of; + StopConditionsSet conditions_any_of; }; struct Stats @@ -553,8 +539,7 @@ private: InterruptListener interrupt_listener; using XMLConfiguration = Poco::Util::XMLConfiguration; - using AbstractConfig = Poco::AutoPtr; - using Config = Poco::AutoPtr; + using XMLConfigurationPtr = Poco::AutoPtr; using Paths = std::vector; using StringToVector = std::map>; @@ -564,13 +549,13 @@ private: std::vector substitutions_maps; bool gotSIGINT; - std::vector stop_criterions_by_run; + std::vector stop_conditions_by_run; String main_metric; bool lite_output; String profiles_file; Strings input_files; - std::vector tests_configurations; + std::vector tests_configurations; Strings tests_tags; Strings skip_tags; @@ -579,14 +564,6 @@ private: Strings tests_names_regexp; Strings skip_names_regexp; - #define INC_FULFILLED_CRITERIONS(stop_criterions, CRITERION) \ - if (!stop_criterions.CRITERION.fulfilled) \ - { \ - stop_criterions.CRITERION.priority == PriorityType::AllOf ? ++stop_criterions.fulfilled_criterions_all_of \ - : ++stop_criterions.fulfilled_criterions_any_of; \ - stop_criterions.CRITERION.fulfilled = true; \ - } - enum class ExecutionType { Loop, @@ -605,9 +582,12 @@ private: std::vector statistics_by_run; /// Removes configurations that has a given value. If leave is true, the logic is reversed. - void removeConfigurationsIf(std::vector & configs, FilterType filter_type, const Strings & values, bool leave = false) + void removeConfigurationsIf( + std::vector & configs, + FilterType filter_type, const Strings & values, bool leave = false) { - auto checker = [&filter_type, &values, &leave](Config & config) { + auto checker = [&filter_type, &values, &leave](XMLConfigurationPtr & config) + { if (values.size() == 0) return false; @@ -650,7 +630,7 @@ private: return remove_or_not; }; - std::vector::iterator new_end = std::remove_if(configs.begin(), configs.end(), checker); + auto new_end = std::remove_if(configs.begin(), configs.end(), checker); configs.erase(new_end, configs.end()); } @@ -670,7 +650,7 @@ private: } /// Checks specified preconditions per test (process cache, table existence, etc.) - bool checkPreconditions(const Config & config) + bool checkPreconditions(const XMLConfigurationPtr & config) { if (!config->has("preconditions")) return true; @@ -756,7 +736,7 @@ private: for (size_t i = 0; i != input_files.size(); ++i) { const String path = input_files[i]; - tests_configurations[i] = Config(new XMLConfiguration(path)); + tests_configurations[i] = XMLConfigurationPtr(new XMLConfiguration(path)); } filterConfigurations(); @@ -798,9 +778,9 @@ private: } } - void extractSettings(const Config & config, const String & key, - const Strings & settings_list, - std::map settings_to_apply) + void extractSettings( + const XMLConfigurationPtr & config, const String & key, + const Strings & settings_list, std::map & settings_to_apply) { for (const String & setup : settings_list) { @@ -815,7 +795,7 @@ private: } } - String runTest(Config & test_config) + String runTest(XMLConfigurationPtr & test_config) { queries.clear(); @@ -834,7 +814,7 @@ private: if (!profiles_file.empty()) { String profile_name = test_config->getString("settings.profile"); - Config profiles_config(new XMLConfiguration(profiles_file)); + XMLConfigurationPtr profiles_config(new XMLConfiguration(profiles_file)); Keys profile_settings; profiles_config->keys("profiles." + profile_name, profile_settings); @@ -932,7 +912,7 @@ private: throw DB::Exception("Only one query is allowed when using substitutions", 1); /// Make "subconfig" of inner xml block - AbstractConfig substitutions_view(test_config->createView("substitutions")); + ConfigurationPtr substitutions_view(test_config->createView("substitutions")); constructSubstitutions(substitutions_view, substitutions); queries = formatQueries(queries[0], substitutions); @@ -956,22 +936,21 @@ private: times_to_run = test_config->getUInt("times_to_run"); } - stop_criterions_by_run.resize(times_to_run * queries.size()); - + TestStopConditions stop_conditions_template; if (test_config->has("stop_conditions")) { - AbstractConfig stop_criterions_view(test_config->createView("stop_conditions")); - for (StopCriterions & stop_criterions : stop_criterions_by_run) - { - stop_criterions.loadFromConfig(stop_criterions_view); - } - } - else - { - throw DB::Exception("No termination conditions were found in config", 1); + ConfigurationPtr stop_conditions_config(test_config->createView("stop_conditions")); + stop_conditions_template.loadFromConfig(stop_conditions_config); } - AbstractConfig metrics_view(test_config->createView("metrics")); + if (stop_conditions_template.empty()) + throw DB::Exception("No termination conditions were found in config", 1); + + for (size_t i = 0; i < times_to_run * queries.size(); ++i) + stop_conditions_by_run.push_back(stop_conditions_template); + + + ConfigurationPtr metrics_view(test_config->createView("metrics")); Keys metrics; metrics_view->keys(metrics); @@ -1005,7 +984,7 @@ private: for (size_t query_index = 0; query_index < queries.size(); ++query_index) { size_t statistic_index = number_of_launch * queries.size() + query_index; - stop_criterions_by_run[statistic_index].reset(); + stop_conditions_by_run[statistic_index].reset(); queries_with_indexes.push_back({queries[query_index], statistic_index}); } @@ -1062,43 +1041,24 @@ private: Query query = query_and_index.first; const size_t run_index = query_and_index.second; - StopCriterions & stop_criterions = stop_criterions_by_run[run_index]; - - size_t max_iterations = stop_criterions.iterations.value; - size_t iteration = 0; - + TestStopConditions & stop_conditions = stop_conditions_by_run[run_index]; Stats & statistics = statistics_by_run[run_index]; + size_t iteration = 0; + statistics.clear(); - execute(query, statistics, stop_criterions); + execute(query, statistics, stop_conditions); if (exec_type == ExecutionType::Loop) { while (!gotSIGINT) { ++iteration; - - /// check stop criterions - if (max_iterations && iteration >= max_iterations) - { - INC_FULFILLED_CRITERIONS(stop_criterions, iterations); - } - - if (stop_criterions.number_of_initialized_all_of - && (stop_criterions.fulfilled_criterions_all_of - >= stop_criterions.number_of_initialized_all_of)) - { - /// All 'all_of' criterions are fulfilled + stop_conditions.reportIterations(iteration); + if (stop_conditions.areFulfilled()) break; - } - if (stop_criterions.number_of_initialized_any_of && stop_criterions.fulfilled_criterions_any_of) - { - /// Some 'any_of' criterions are fulfilled - break; - } - - execute(query, statistics, stop_criterions); + execute(query, statistics, stop_conditions); } } @@ -1109,7 +1069,7 @@ private: } } - void execute(const Query & query, Stats & statistics, StopCriterions & stop_criterions) + void execute(const Query & query, Stats & statistics, TestStopConditions & stop_conditions) { statistics.watch_per_query.restart(); statistics.last_query_was_cancelled = false; @@ -1118,10 +1078,10 @@ private: Progress progress; stream.setProgressCallback( - [&progress, &stream, &statistics, &stop_criterions, this](const Progress & value) + [&progress, &stream, &statistics, &stop_conditions, this](const Progress & value) { progress.incrementPiecewiseAtomically(value); - this->checkFulfilledCriterionsAndUpdate(progress, stream, statistics, stop_criterions); + this->checkFulfilledConditionsAndUpdate(progress, stream, statistics, stop_conditions); }); stream.readPrefix(); @@ -1135,77 +1095,25 @@ private: statistics.setTotalTime(); } - void checkFulfilledCriterionsAndUpdate( + void checkFulfilledConditionsAndUpdate( const Progress & progress, RemoteBlockInputStream & stream, Stats & statistics, - StopCriterions & stop_criterions) + TestStopConditions & stop_conditions) { statistics.add(progress.rows, progress.bytes); - size_t max_rows_to_read = stop_criterions.rows_read.value; - if (max_rows_to_read && statistics.rows_read >= max_rows_to_read) - { - INC_FULFILLED_CRITERIONS(stop_criterions, rows_read); - } + stop_conditions.reportRowsRead(statistics.rows_read); + stop_conditions.reportBytesReadUncompressed(statistics.bytes_read); + stop_conditions.reportTotalTime(statistics.watch.elapsed() / (1000 * 1000)); + stop_conditions.reportMinTimeNotChangingFor(statistics.min_time_watch.elapsed() / (1000 * 1000)); + stop_conditions.reportMaxSpeedNotChangingFor( + statistics.max_rows_speed_watch.elapsed() / (1000 * 1000)); + stop_conditions.reportAverageSpeedNotChangingFor( + statistics.avg_rows_speed_watch.elapsed() / (1000 * 1000)); - size_t max_bytes_to_read = stop_criterions.bytes_read_uncompressed.value; - if (max_bytes_to_read && statistics.bytes_read >= max_bytes_to_read) + if (stop_conditions.areFulfilled()) { - INC_FULFILLED_CRITERIONS(stop_criterions, bytes_read_uncompressed); - } - - if (UInt64 max_total_time_ms = stop_criterions.total_time_ms.value) - { - /// cast nanoseconds to ms - if ((statistics.watch.elapsed() / (1000 * 1000)) > max_total_time_ms) - { - INC_FULFILLED_CRITERIONS(stop_criterions, total_time_ms); - } - } - - size_t min_time_not_changing_for_ms = stop_criterions.min_time_not_changing_for_ms.value; - if (min_time_not_changing_for_ms) - { - size_t min_time_did_not_change_for = statistics.min_time_watch.elapsed() / (1000 * 1000); - - if (min_time_did_not_change_for >= min_time_not_changing_for_ms) - { - INC_FULFILLED_CRITERIONS(stop_criterions, min_time_not_changing_for_ms); - } - } - - size_t max_speed_not_changing_for_ms = stop_criterions.max_speed_not_changing_for_ms.value; - if (max_speed_not_changing_for_ms) - { - UInt64 speed_not_changing_time = statistics.max_rows_speed_watch.elapsed() / (1000 * 1000); - if (speed_not_changing_time >= max_speed_not_changing_for_ms) - { - INC_FULFILLED_CRITERIONS(stop_criterions, max_speed_not_changing_for_ms); - } - } - - size_t average_speed_not_changing_for_ms = stop_criterions.average_speed_not_changing_for_ms.value; - if (average_speed_not_changing_for_ms) - { - UInt64 speed_not_changing_time = statistics.avg_rows_speed_watch.elapsed() / (1000 * 1000); - if (speed_not_changing_time >= average_speed_not_changing_for_ms) - { - INC_FULFILLED_CRITERIONS(stop_criterions, average_speed_not_changing_for_ms); - } - } - - if (stop_criterions.number_of_initialized_all_of - && (stop_criterions.fulfilled_criterions_all_of >= stop_criterions.number_of_initialized_all_of)) - { - /// All 'all_of' criterions are fulfilled - statistics.last_query_was_cancelled = true; - stream.cancel(); - } - - if (stop_criterions.number_of_initialized_any_of && stop_criterions.fulfilled_criterions_any_of) - { - /// Some 'any_of' criterions are fulfilled statistics.last_query_was_cancelled = true; stream.cancel(); } @@ -1218,14 +1126,15 @@ private: } } - void constructSubstitutions(AbstractConfig & substitutions_view, StringToVector & substitutions) + void constructSubstitutions(ConfigurationPtr & substitutions_view, StringToVector & substitutions) { Keys xml_substitutions; substitutions_view->keys(xml_substitutions); for (size_t i = 0; i != xml_substitutions.size(); ++i) { - const AbstractConfig xml_substitution(substitutions_view->createView("substitution[" + std::to_string(i) + "]")); + const ConfigurationPtr xml_substitution( + substitutions_view->createView("substitution[" + std::to_string(i) + "]")); /// Property values for substitution will be stored in a vector /// accessible by property name From db546dc82737a4516cf77b78a20b3e5ba9e1bb45 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 22 Jun 2017 03:28:44 +0300 Subject: [PATCH 07/11] Caching parts that cannot be merged for some time [#CLICKHOUSE-3091]. --- .../Storages/StorageReplicatedMergeTree.cpp | 103 +++++++++++++++--- .../src/Storages/StorageReplicatedMergeTree.h | 4 +- 2 files changed, 89 insertions(+), 18 deletions(-) diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index a28f0fd36ad..273b6898405 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -1616,8 +1616,7 @@ bool StorageReplicatedMergeTree::queueTask() bool StorageReplicatedMergeTree::canMergeParts( const MergeTreeData::DataPartPtr & left, - const MergeTreeData::DataPartPtr & right, - MemoizedPartsThatCouldBeMerged * memo) + const MergeTreeData::DataPartPtr & right) { /** It can take a long time to determine whether it is possible to merge two adjacent parts. * Two adjacent parts can be merged if all block numbers between their numbers are not used (abandoned). @@ -1638,10 +1637,6 @@ bool StorageReplicatedMergeTree::canMergeParts( || (left.get() != right.get() && queue.partWillBeMergedOrMergesDisabled(right->name))) return false; - auto key = std::make_pair(left->name, right->name); - if (memo && memo->count(key)) - return true; - String month_name = left->name.substr(0, 6); auto zookeeper = getZooKeeper(); @@ -1705,13 +1700,76 @@ bool StorageReplicatedMergeTree::canMergeParts( } } - if (memo) - memo->insert(key); - return true; } +/** Cache for function, that returns bool. + * If function returned true, cache it forever. + * If function returned false, cache it for exponentially growing time. + * Not thread safe. + */ +template +struct CachedMergingPredicate +{ + using clock = std::chrono::steady_clock; + + struct Expiration + { + static constexpr clock::duration min_delay = std::chrono::seconds(1); + static constexpr clock::duration max_delay = std::chrono::seconds(600); + static constexpr double exponent_base = 2; + + clock::time_point expire_time; + clock::duration delay = clock::duration::zero(); + + void next(clock::time_point now) + { + if (delay == clock::duration::zero()) + delay = min_delay; + else + { + delay *= exponent_base; + if (delay > max_delay) + delay = max_delay; + } + + expire_time = now + delay; + } + + bool expired(clock::time_point now) const + { + return now > expire_time; + } + }; + + std::set true_keys; + std::map false_keys; + + template + bool get(clock::time_point now, Function && function, ArgsToKey && args_to_key, Args &&... args) + { + Key key{args_to_key(std::forward(args)...)}; + + if (true_keys.count(key)) + return true; + + auto it = false_keys.find(key); + if (false_keys.end() != it && !it->second.expired(now)) + return false; + + bool value = function(std::forward(args)...); + + if (value) + true_keys.insert(key); + else + false_keys[key].next(now); + + return value; + } +}; + + void StorageReplicatedMergeTree::mergeSelectingThread() { setThreadName("ReplMTMergeSel"); @@ -1720,12 +1778,25 @@ void StorageReplicatedMergeTree::mergeSelectingThread() bool deduplicate = false; /// TODO: read deduplicate option from table config bool need_pull = true; - MemoizedPartsThatCouldBeMerged memoized_parts_that_could_be_merged; - - auto can_merge = [&memoized_parts_that_could_be_merged, this] - (const MergeTreeData::DataPartPtr & left, const MergeTreeData::DataPartPtr & right) -> bool + auto uncached_merging_predicate = [this](const MergeTreeData::DataPartPtr & left, const MergeTreeData::DataPartPtr & right) { - return canMergeParts(left, right, &memoized_parts_that_could_be_merged); + return canMergeParts(left, right); + }; + + auto merging_predicate_args_to_key = [](const MergeTreeData::DataPartPtr & left, const MergeTreeData::DataPartPtr & right) + { + return std::make_pair(left->name, right->name); + }; + + CachedMergingPredicate> cached_merging_predicate; + + /// Will be updated below. + std::chrono::steady_clock::time_point now; + + auto can_merge = [&] + (const MergeTreeData::DataPartPtr & left, const MergeTreeData::DataPartPtr & right) + { + return cached_merging_predicate.get(now, uncached_merging_predicate, merging_predicate_args_to_key, left, right); }; while (!shutdown_called && is_leader_node) @@ -1763,6 +1834,8 @@ void StorageReplicatedMergeTree::mergeSelectingThread() size_t max_parts_size_for_merge = merger.getMaxPartsSizeForMerge(data.settings.max_replicated_merges_in_queue, merges_queued); + now = std::chrono::steady_clock::now(); + if (max_parts_size_for_merge > 0 && merger.selectPartsToMerge( parts, merged_name, false, @@ -2347,7 +2420,7 @@ bool StorageReplicatedMergeTree::optimize(const String & partition, bool final, auto can_merge = [this] (const MergeTreeData::DataPartPtr & left, const MergeTreeData::DataPartPtr & right) { - return canMergeParts(left, right, nullptr); + return canMergeParts(left, right); }; pullLogsToQueue(); diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.h b/dbms/src/Storages/StorageReplicatedMergeTree.h index 9b4cf2f5a86..570e2441078 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.h +++ b/dbms/src/Storages/StorageReplicatedMergeTree.h @@ -418,12 +418,10 @@ private: */ void mergeSelectingThread(); - using MemoizedPartsThatCouldBeMerged = std::set>; /// Is it possible to merge parts in the specified range? `memo` is an optional parameter. bool canMergeParts( const MergeTreeData::DataPartPtr & left, - const MergeTreeData::DataPartPtr & right, - MemoizedPartsThatCouldBeMerged * memo); + const MergeTreeData::DataPartPtr & right); /** Write the selected parts to merge into the log, * Call when merge_selecting_mutex is locked. From 946c275dfbe901cfec87deecc845f72215350b9d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 22 Jun 2017 03:56:17 +0300 Subject: [PATCH 08/11] Added missing case for broken files in StorageDistributed [#CLICKHOUSE-3091]. --- dbms/src/Storages/Distributed/DirectoryMonitor.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/dbms/src/Storages/Distributed/DirectoryMonitor.cpp b/dbms/src/Storages/Distributed/DirectoryMonitor.cpp index 988ffc6f190..53f57b61c70 100644 --- a/dbms/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/dbms/src/Storages/Distributed/DirectoryMonitor.cpp @@ -26,6 +26,7 @@ namespace ErrorCodes extern const int INCORRECT_FILE_NAME; extern const int CHECKSUM_DOESNT_MATCH; extern const int TOO_LARGE_SIZE_COMPRESSED; + extern const int ATTEMPT_TO_READ_AFTER_EOF; } @@ -210,9 +211,10 @@ void StorageDistributedDirectoryMonitor::processFile(const std::string & file_pa const auto code = e.code(); /// mark file as broken if necessary - if (code == ErrorCodes::CHECKSUM_DOESNT_MATCH || - code == ErrorCodes::TOO_LARGE_SIZE_COMPRESSED || - code == ErrorCodes::CANNOT_READ_ALL_DATA) + if (code == ErrorCodes::CHECKSUM_DOESNT_MATCH + || code == ErrorCodes::TOO_LARGE_SIZE_COMPRESSED + || code == ErrorCodes::CANNOT_READ_ALL_DATA + || code == ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF) { const auto last_path_separator_pos = file_path.rfind('/'); const auto & path = file_path.substr(0, last_path_separator_pos + 1); From 51b41aa0e3decfdb4314508069909349cd8f89e5 Mon Sep 17 00:00:00 2001 From: proller Date: Thu, 22 Jun 2017 17:09:23 +0300 Subject: [PATCH 09/11] Fix build --- dbms/src/Storages/StorageReplicatedMergeTree.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index 273b6898405..7a9238485b3 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -1769,6 +1769,10 @@ struct CachedMergingPredicate } }; +template constexpr CachedMergingPredicate::clock::duration CachedMergingPredicate::Expiration::min_delay; +template constexpr CachedMergingPredicate::clock::duration CachedMergingPredicate::Expiration::max_delay; +template constexpr double CachedMergingPredicate::Expiration::exponent_base; + void StorageReplicatedMergeTree::mergeSelectingThread() { From 57d999eb352b77f805e8840913a3b04eaba10e32 Mon Sep 17 00:00:00 2001 From: Liu Cong Date: Thu, 22 Jun 2017 13:14:49 +0800 Subject: [PATCH 10/11] Fix build cause by libs/libcommon/include/ext/bit_cast.h --- libs/libcommon/include/ext/bit_cast.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/libcommon/include/ext/bit_cast.h b/libs/libcommon/include/ext/bit_cast.h index 1a76e3a27cf..49c48493d68 100644 --- a/libs/libcommon/include/ext/bit_cast.h +++ b/libs/libcommon/include/ext/bit_cast.h @@ -1,6 +1,7 @@ #pragma once -#include +#include +#include #include From 8ef51bf19b1954e8e6591c2e301297b24a4d37ba Mon Sep 17 00:00:00 2001 From: proller Date: Thu, 22 Jun 2017 18:57:37 +0300 Subject: [PATCH 11/11] Fixing PerformanceTest: table_exists condition, move getMemoryAmount function to file. Add more debug helpers (#905) * Fix missing includes * ComplexKeyCacheDictionary: Move includes to .cpp * Fixing PerformanceTest: table_exists condition, move getMemoryAmount function to file. Add more debug helpers * Multiplatform memory size getter * Requested changes * Requested changes * Requested changes * fix * Requested changes --- dbms/src/Common/iostream_debug_helpers.cpp | 14 ++- dbms/src/Common/iostream_debug_helpers.h | 3 + .../ComplexKeyCacheDictionary.cpp | 3 + .../Dictionaries/ComplexKeyCacheDictionary.h | 3 - dbms/src/Server/PerformanceTest.cpp | 47 ++++----- libs/libcommon/CMakeLists.txt | 2 + .../include/common/getMemoryAmount.h | 9 ++ .../include/common/iostream_debug_helpers.h | 9 ++ libs/libcommon/src/getMemoryAmount.cpp | 97 +++++++++++++++++++ 9 files changed, 156 insertions(+), 31 deletions(-) create mode 100644 libs/libcommon/include/common/getMemoryAmount.h create mode 100644 libs/libcommon/src/getMemoryAmount.cpp diff --git a/dbms/src/Common/iostream_debug_helpers.cpp b/dbms/src/Common/iostream_debug_helpers.cpp index d4cded2fab7..94fb4e211e3 100644 --- a/dbms/src/Common/iostream_debug_helpers.cpp +++ b/dbms/src/Common/iostream_debug_helpers.cpp @@ -62,7 +62,7 @@ std::ostream & operator<<(std::ostream & stream, const DB::Block & what) { stream << "Block(" << "size = " << what.getColumns().size() - << ")"; + << "){" << what.dumpStructure() << "}"; return stream; } @@ -79,3 +79,15 @@ std::ostream & operator<<(std::ostream & stream, const DB::IColumn & what) << ")"; return stream; } + +std::ostream & operator<<(std::ostream & stream, const DB::Connection::Packet & what) { + stream << "Connection::Packet(" + << "type = " << what.type; + // types description: Core/Protocol.h + if (what.exception) + stream << "exception = " << what.exception.get(); + //TODO: profile_info + stream << ") {" << what.block << "}"; + return stream; +} + diff --git a/dbms/src/Common/iostream_debug_helpers.h b/dbms/src/Common/iostream_debug_helpers.h index c2cf913273d..5f9f7b5692b 100644 --- a/dbms/src/Common/iostream_debug_helpers.h +++ b/dbms/src/Common/iostream_debug_helpers.h @@ -32,6 +32,9 @@ std::ostream & operator<<(std::ostream & stream, const DB::ColumnWithTypeAndName namespace DB { class IColumn; } std::ostream & operator<<(std::ostream & stream, const DB::IColumn & what); +#include +std::ostream & operator<<(std::ostream & stream, const DB::Connection::Packet & what); + /// some operator<< should be declared before operator<<(... std::shared_ptr<>) #include diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp index cc69d5ff93a..59f4f2fb8d7 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -7,6 +8,8 @@ #include #include #include +#include +#include namespace ProfileEvents diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h index f4f1c9a5c9b..6c82bc5f8cd 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h @@ -3,15 +3,12 @@ #include #include #include -#include #include #include #include #include #include -#include #include -#include #include #include #include diff --git a/dbms/src/Server/PerformanceTest.cpp b/dbms/src/Server/PerformanceTest.cpp index af2d2e2adcb..d609f4374ec 100644 --- a/dbms/src/Server/PerformanceTest.cpp +++ b/dbms/src/Server/PerformanceTest.cpp @@ -2,9 +2,6 @@ #include #include #include -#if __has_include() - #include -#endif #include #include @@ -23,6 +20,7 @@ #include #include #include +#include #include @@ -662,34 +660,24 @@ private: for (const String & precondition : preconditions) { if (precondition == "flush_disk_cache") - if (system("(>&2 echo 'Flushing disk cache...') && (sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches') && (>&2 echo 'Flushed.')")) { + if (system("(>&2 echo 'Flushing disk cache...') && (sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches') && (>&2 echo 'Flushed.')")) + { std::cerr << "Failed to flush disk cache" << std::endl; return false; } if (precondition == "ram_size") { -#if __has_include() - struct sysinfo *system_information = new struct sysinfo(); - if (sysinfo(system_information)) + size_t ram_size_needed = config->getUInt64("preconditions.ram_size"); + size_t actual_ram = getMemoryAmount(); + if (!actual_ram) + throw DB::Exception("ram_size precondition not available on this platform", ErrorCodes::NOT_IMPLEMENTED); + + if (ram_size_needed > actual_ram) { - std::cerr << "Failed to check system RAM size" << std::endl; - delete system_information; + std::cerr << "Not enough RAM: need = " << ram_size_needed << ", present = " << actual_ram << std::endl; + return false; } - else - { - size_t ram_size_needed = config->getUInt64("preconditions.ram_size"); - size_t actual_ram = system_information->totalram / 1024 / 1024; - if (ram_size_needed > actual_ram) - { - std::cerr << "Not enough RAM" << std::endl; - delete system_information; - return false; - } - } -#else - throw DB::Exception("ram_size precondition not available on this platform", ErrorCodes::NOT_IMPLEMENTED); -#endif } if (precondition == "table_exists") @@ -706,11 +694,15 @@ private: { Connection::Packet packet = connection.receivePacket(); - if (packet.type == Protocol::Server::Data) { + if (packet.type == Protocol::Server::Data) + { for (const ColumnWithTypeAndName & column : packet.block.getColumns()) { - if (column.name == "result" && column.column->getDataAt(0).data != nullptr) { + if (column.name == "result" && column.column->size() > 0) + { exist = column.column->get64(0); + if (exist) + break; } } } @@ -719,8 +711,9 @@ private: break; } - if (exist == 0) { - std::cerr << "Table " + table_to_check + " doesn't exist" << std::endl; + if (!exist) + { + std::cerr << "Table " << table_to_check << " doesn't exist" << std::endl; return false; } } diff --git a/libs/libcommon/CMakeLists.txt b/libs/libcommon/CMakeLists.txt index a86e6f25529..ed90c67e049 100644 --- a/libs/libcommon/CMakeLists.txt +++ b/libs/libcommon/CMakeLists.txt @@ -26,6 +26,7 @@ add_library (common src/DateLUTImpl.cpp src/exp10.cpp src/JSON.cpp + src/getMemoryAmount.cpp include/common/ApplicationServerExt.h include/common/Types.h @@ -41,6 +42,7 @@ add_library (common include/common/singleton.h include/common/strong_typedef.h include/common/JSON.h + include/common/getMemoryAmount.h include/ext/bit_cast.h include/ext/collection_cast.h diff --git a/libs/libcommon/include/common/getMemoryAmount.h b/libs/libcommon/include/common/getMemoryAmount.h new file mode 100644 index 00000000000..5139c39debd --- /dev/null +++ b/libs/libcommon/include/common/getMemoryAmount.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +/** +* Returns the size of physical memory (RAM) in bytes. +* Returns 0 on unsupported platform +*/ +uint64_t getMemoryAmount(); diff --git a/libs/libcommon/include/common/iostream_debug_helpers.h b/libs/libcommon/include/common/iostream_debug_helpers.h index 39e16672ef7..fdeb92ad5d2 100644 --- a/libs/libcommon/include/common/iostream_debug_helpers.h +++ b/libs/libcommon/include/common/iostream_debug_helpers.h @@ -160,4 +160,13 @@ std::ostream & operator<<(std::ostream & stream, const std::experimental::option } +#include + +std::ostream & operator<<(std::ostream & stream, const std::exception & what) +{ + stream << "exception{" << what.what() << "}"; + return stream; +} + + // TODO: add more types diff --git a/libs/libcommon/src/getMemoryAmount.cpp b/libs/libcommon/src/getMemoryAmount.cpp new file mode 100644 index 00000000000..36551db62e3 --- /dev/null +++ b/libs/libcommon/src/getMemoryAmount.cpp @@ -0,0 +1,97 @@ +#include "common/getMemoryAmount.h" + +// http://nadeausoftware.com/articles/2012/09/c_c_tip_how_get_physical_memory_size_system + +/* + * Author: David Robert Nadeau + * Site: http://NadeauSoftware.com/ + * License: Creative Commons Attribution 3.0 Unported License + * http://creativecommons.org/licenses/by/3.0/deed.en_US + */ + +#if defined(WIN32) || defined(_WIN32) +#include +#else +#include +#include +#include +#if defined(BSD) +#include +#endif +#endif + + +/** + * Returns the size of physical memory (RAM) in bytes. + * Returns 0 on unsupported platform + */ +uint64_t getMemoryAmount() +{ +#if defined(_WIN32) && (defined(__CYGWIN__) || defined(__CYGWIN32__)) + /* Cygwin under Windows. ------------------------------------ */ + /* New 64-bit MEMORYSTATUSEX isn't available. Use old 32.bit */ + MEMORYSTATUS status; + status.dwLength = sizeof(status); + GlobalMemoryStatus( &status ); + return status.dwTotalPhys; + +#elif defined(WIN32) || defined(_WIN32) + /* Windows. ------------------------------------------------- */ + /* Use new 64-bit MEMORYSTATUSEX, not old 32-bit MEMORYSTATUS */ + MEMORYSTATUSEX status; + status.dwLength = sizeof(status); + GlobalMemoryStatusEx( &status ); + return status.ullTotalPhys; + +#else + /* UNIX variants. ------------------------------------------- */ + /* Prefer sysctl() over sysconf() except sysctl() HW_REALMEM and HW_PHYSMEM */ + +#if defined(CTL_HW) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM64)) + int mib[2]; + mib[0] = CTL_HW; +#if defined(HW_MEMSIZE) + mib[1] = HW_MEMSIZE; /* OSX. --------------------- */ +#elif defined(HW_PHYSMEM64) + mib[1] = HW_PHYSMEM64; /* NetBSD, OpenBSD. --------- */ +#endif + uint64_t size = 0; /* 64-bit */ + size_t len = sizeof(size); + if ( sysctl( mib, 2, &size, &len, NULL, 0 ) == 0 ) { + return size; + } + return 0; /* Failed? */ + +#elif defined(_SC_AIX_REALMEM) + /* AIX. ----------------------------------------------------- */ + return sysconf( _SC_AIX_REALMEM ) * 1024; + +#elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE) + /* FreeBSD, Linux, OpenBSD, and Solaris. -------------------- */ + return (uint64_t)sysconf( _SC_PHYS_PAGES ) + * (uint64_t)sysconf( _SC_PAGESIZE ); + +#elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGE_SIZE) + /* Legacy. -------------------------------------------------- */ + return (uint64_t)sysconf( _SC_PHYS_PAGES ) + * (uint64_t)sysconf( _SC_PAGE_SIZE ); + +#elif defined(CTL_HW) && (defined(HW_PHYSMEM) || defined(HW_REALMEM)) + /* DragonFly BSD, FreeBSD, NetBSD, OpenBSD, and OSX. -------- */ + int mib[2]; + mib[0] = CTL_HW; +#if defined(HW_REALMEM) + mib[1] = HW_REALMEM; /* FreeBSD. ----------------- */ +#elif defined(HW_PYSMEM) + mib[1] = HW_PHYSMEM; /* Others. ------------------ */ +#endif + unsigned int size = 0; /* 32-bit */ + size_t len = sizeof( size ); + if ( sysctl( mib, 2, &size, &len, NULL, 0 ) == 0 ) { + return size; + } + return 0; /* Failed? */ +#endif /* sysctl and sysconf variants */ + +#endif +}