Merge branch 'ClickHouse:master' into master

This commit is contained in:
Roman Antonov 2024-10-29 13:51:41 +03:00 committed by GitHub
commit ac546572e6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 18 additions and 3 deletions

View File

@ -168,6 +168,7 @@ namespace ServerSetting
{
extern const ServerSettingsUInt32 asynchronous_heavy_metrics_update_period_s;
extern const ServerSettingsUInt32 asynchronous_metrics_update_period_s;
extern const ServerSettingsBool asynchronous_metrics_enable_heavy_metrics;
extern const ServerSettingsBool async_insert_queue_flush_on_shutdown;
extern const ServerSettingsUInt64 async_insert_threads;
extern const ServerSettingsBool async_load_databases;
@ -1061,6 +1062,7 @@ try
ServerAsynchronousMetrics async_metrics(
global_context,
server_settings[ServerSetting::asynchronous_metrics_update_period_s],
server_settings[ServerSetting::asynchronous_metrics_enable_heavy_metrics],
server_settings[ServerSetting::asynchronous_heavy_metrics_update_period_s],
[&]() -> std::vector<ProtocolServerMetrics>
{

View File

@ -58,6 +58,7 @@ namespace DB
DECLARE(Double, cannot_allocate_thread_fault_injection_probability, 0, "For testing purposes.", 0) \
DECLARE(Int32, max_connections, 1024, "Max server connections.", 0) \
DECLARE(UInt32, asynchronous_metrics_update_period_s, 1, "Period in seconds for updating asynchronous metrics.", 0) \
DECLARE(Bool, asynchronous_metrics_enable_heavy_metrics, false, "Enable the calculation of heavy asynchronous metrics.", 0) \
DECLARE(UInt32, asynchronous_heavy_metrics_update_period_s, 120, "Period in seconds for updating heavy asynchronous metrics.", 0) \
DECLARE(String, default_database, "default", "Default database name.", 0) \
DECLARE(String, tmp_policy, "", "Policy for storage with temporary data.", 0) \

View File

@ -54,12 +54,14 @@ void calculateMaxAndSum(Max & max, Sum & sum, T x)
ServerAsynchronousMetrics::ServerAsynchronousMetrics(
ContextPtr global_context_,
unsigned update_period_seconds,
bool update_heavy_metrics_,
unsigned heavy_metrics_update_period_seconds,
const ProtocolServerMetricsFunc & protocol_server_metrics_func_,
bool update_jemalloc_epoch_,
bool update_rss_)
: WithContext(global_context_)
, AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_, update_jemalloc_epoch_, update_rss_)
, update_heavy_metrics(update_heavy_metrics_)
, heavy_metric_update_period(heavy_metrics_update_period_seconds)
{
/// sanity check
@ -412,7 +414,8 @@ void ServerAsynchronousMetrics::updateImpl(TimePoint update_time, TimePoint curr
}
#endif
updateHeavyMetricsIfNeeded(current_time, update_time, force_update, first_run, new_values);
if (update_heavy_metrics)
updateHeavyMetricsIfNeeded(current_time, update_time, force_update, first_run, new_values);
}
void ServerAsynchronousMetrics::logImpl(AsynchronousMetricValues & new_values)
@ -459,10 +462,10 @@ void ServerAsynchronousMetrics::updateDetachedPartsStats()
void ServerAsynchronousMetrics::updateHeavyMetricsIfNeeded(TimePoint current_time, TimePoint update_time, bool force_update, bool first_run, AsynchronousMetricValues & new_values)
{
const auto time_since_previous_update = current_time - heavy_metric_previous_update_time;
const bool update_heavy_metrics = (time_since_previous_update >= heavy_metric_update_period) || force_update || first_run;
const bool need_update_heavy_metrics = (time_since_previous_update >= heavy_metric_update_period) || force_update || first_run;
Stopwatch watch;
if (update_heavy_metrics)
if (need_update_heavy_metrics)
{
heavy_metric_previous_update_time = update_time;
if (first_run)

View File

@ -13,6 +13,7 @@ public:
ServerAsynchronousMetrics(
ContextPtr global_context_,
unsigned update_period_seconds,
bool update_heavy_metrics_,
unsigned heavy_metrics_update_period_seconds,
const ProtocolServerMetricsFunc & protocol_server_metrics_func_,
bool update_jemalloc_epoch_,
@ -24,6 +25,7 @@ private:
void updateImpl(TimePoint update_time, TimePoint current_time, bool force_update, bool first_run, AsynchronousMetricValues & new_values) override;
void logImpl(AsynchronousMetricValues & new_values) override;
bool update_heavy_metrics;
const Duration heavy_metric_update_period;
TimePoint heavy_metric_previous_update_time;
double heavy_update_interval = 0.;

View File

@ -6371,6 +6371,12 @@ DetachedPartsInfo MergeTreeData::getDetachedParts() const
for (const auto & disk : getDisks())
{
/// While it is possible to have detached parts on readonly/write-once disks
/// (if they were produced on another machine, where it wasn't readonly)
/// to avoid wasting resources for slow disks, avoid trying to enumerate them.
if (disk->isReadOnly() || disk->isWriteOnce())
continue;
String detached_path = fs::path(relative_data_path) / DETACHED_DIR_NAME;
/// Note: we don't care about TOCTOU issue here.

View File

@ -1,4 +1,5 @@
<clickhouse>
<asynchronous_metrics_update_period_s>1</asynchronous_metrics_update_period_s>
<asynchronous_metrics_enable_heavy_metrics>1</asynchronous_metrics_enable_heavy_metrics>
<asynchronous_heavy_metrics_update_period_s>1</asynchronous_heavy_metrics_update_period_s>
</clickhouse>