mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 19:12:03 +00:00
adjust report and increase time
This commit is contained in:
parent
00c924b0dd
commit
a321d6970c
@ -488,18 +488,11 @@ create view query_metric_stats as
|
|||||||
-- Main statistics for queries -- query time as reported in query log.
|
-- Main statistics for queries -- query time as reported in query log.
|
||||||
create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
|
create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
|
||||||
as select
|
as select
|
||||||
-- Comparison mode doesn't make sense for queries that complete
|
abs(diff) > report_threshold and abs(diff) > stat_threshold as changed_fail,
|
||||||
-- immediately (on the same order of time as noise). If query duration is
|
abs(diff) > report_threshold - 0.05 and abs(diff) > stat_threshold as changed_show,
|
||||||
-- less that some threshold, we just skip it. If there is a significant
|
|
||||||
-- regression in such query, the time will exceed the threshold, and we
|
|
||||||
-- well process it normally and detect the regression.
|
|
||||||
right < $short_query_threshold as short,
|
|
||||||
|
|
||||||
not short and abs(diff) > report_threshold and abs(diff) > stat_threshold as changed_fail,
|
not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail,
|
||||||
not short and abs(diff) > report_threshold - 0.05 and abs(diff) > stat_threshold as changed_show,
|
not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show,
|
||||||
|
|
||||||
not short and not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail,
|
|
||||||
not short and not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show,
|
|
||||||
|
|
||||||
left, right, diff, stat_threshold,
|
left, right, diff, stat_threshold,
|
||||||
if(report_threshold > 0, report_threshold, 0.10) as report_threshold,
|
if(report_threshold > 0, report_threshold, 0.10) as report_threshold,
|
||||||
@ -590,9 +583,9 @@ create table wall_clock_time_per_test engine Memory as select *
|
|||||||
|
|
||||||
create table test_time engine Memory as
|
create table test_time engine Memory as
|
||||||
select test, sum(client) total_client_time,
|
select test, sum(client) total_client_time,
|
||||||
maxIf(client, not short) query_max,
|
max(client) query_max,
|
||||||
minIf(client, not short) query_min,
|
min(client) query_min,
|
||||||
count(*) queries, sum(short) short_queries
|
count(*) queries
|
||||||
from total_client_time_per_query full join queries using (test, query_index)
|
from total_client_time_per_query full join queries using (test, query_index)
|
||||||
group by test;
|
group by test;
|
||||||
|
|
||||||
@ -600,7 +593,6 @@ create table test_times_report engine File(TSV, 'report/test-times.tsv') as
|
|||||||
select wall_clock_time_per_test.test, real,
|
select wall_clock_time_per_test.test, real,
|
||||||
toDecimal64(total_client_time, 3),
|
toDecimal64(total_client_time, 3),
|
||||||
queries,
|
queries,
|
||||||
short_queries,
|
|
||||||
toDecimal64(query_max, 3),
|
toDecimal64(query_max, 3),
|
||||||
toDecimal64(real / queries, 3) avg_real_per_query,
|
toDecimal64(real / queries, 3) avg_real_per_query,
|
||||||
toDecimal64(query_min, 3)
|
toDecimal64(query_min, 3)
|
||||||
@ -641,17 +633,18 @@ create table unmarked_short_queries_report
|
|||||||
engine File(TSV, 'report/unmarked-short-queries.tsv')
|
engine File(TSV, 'report/unmarked-short-queries.tsv')
|
||||||
as select time, test, query_index, query_display_name
|
as select time, test, query_index, query_display_name
|
||||||
from (
|
from (
|
||||||
select right time, test, query_index from queries where short
|
select right time, test, query_index from queries
|
||||||
union all
|
union all
|
||||||
select time_median, test, query_index from partial_query_times
|
select time_median, test, query_index from partial_query_times
|
||||||
where time_median < $short_query_threshold
|
|
||||||
) times
|
) times
|
||||||
left join query_display_names
|
left join query_display_names
|
||||||
on times.test = query_display_names.test
|
on times.test = query_display_names.test
|
||||||
and times.query_index = query_display_names.query_index
|
and times.query_index = query_display_names.query_index
|
||||||
where (test, query_index) not in
|
where
|
||||||
(select * from file('analyze/marked-short-queries.tsv', TSV,
|
(test, query_index) not in
|
||||||
'test text, query_index int'))
|
(select * from file('analyze/marked-short-queries.tsv', TSV,
|
||||||
|
'test text, query_index int'))
|
||||||
|
and time < $short_query_threshold
|
||||||
order by test, query_index
|
order by test, query_index
|
||||||
;
|
;
|
||||||
|
|
||||||
@ -660,7 +653,7 @@ create table unmarked_short_queries_report
|
|||||||
|
|
||||||
-- keep the table in old format so that we can analyze new and old data together
|
-- keep the table in old format so that we can analyze new and old data together
|
||||||
create table queries_old_format engine File(TSVWithNamesAndTypes, 'queries.rep')
|
create table queries_old_format engine File(TSVWithNamesAndTypes, 'queries.rep')
|
||||||
as select short, changed_fail, unstable_fail, left, right, diff,
|
as select 0 short, changed_fail, unstable_fail, left, right, diff,
|
||||||
stat_threshold, test, query_display_name query
|
stat_threshold, test, query_display_name query
|
||||||
from queries
|
from queries
|
||||||
;
|
;
|
||||||
|
@ -276,11 +276,11 @@ for query_index, q in enumerate(test_queries):
|
|||||||
# time per query per server of about one second. Use this value as a
|
# time per query per server of about one second. Use this value as a
|
||||||
# reference for "short" queries.
|
# reference for "short" queries.
|
||||||
if is_short[query_index]:
|
if is_short[query_index]:
|
||||||
if server_seconds >= 1 * len(this_query_connections):
|
if server_seconds >= 2 * len(this_query_connections):
|
||||||
break
|
break
|
||||||
# Also limit the number of runs, so that we don't go crazy processing
|
# Also limit the number of runs, so that we don't go crazy processing
|
||||||
# the results -- 'eqmed.sql' is really suboptimal.
|
# the results -- 'eqmed.sql' is really suboptimal.
|
||||||
if run >= 100:
|
if run >= 200:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
if run >= args.runs:
|
if run >= args.runs:
|
||||||
|
@ -126,7 +126,6 @@ tr:nth-child(odd) td {{filter: brightness(90%);}}
|
|||||||
.test-times tr :nth-child(5),
|
.test-times tr :nth-child(5),
|
||||||
.test-times tr :nth-child(6),
|
.test-times tr :nth-child(6),
|
||||||
.test-times tr :nth-child(7),
|
.test-times tr :nth-child(7),
|
||||||
.test-times tr :nth-child(8),
|
|
||||||
.concurrent-benchmarks tr :nth-child(2),
|
.concurrent-benchmarks tr :nth-child(2),
|
||||||
.concurrent-benchmarks tr :nth-child(3),
|
.concurrent-benchmarks tr :nth-child(3),
|
||||||
.concurrent-benchmarks tr :nth-child(4),
|
.concurrent-benchmarks tr :nth-child(4),
|
||||||
@ -461,10 +460,9 @@ if args.report == 'main':
|
|||||||
'Wall clock time, s', #1
|
'Wall clock time, s', #1
|
||||||
'Total client time, s', #2
|
'Total client time, s', #2
|
||||||
'Total queries', #3
|
'Total queries', #3
|
||||||
'Ignored short queries', #4
|
'Longest query<br>(sum for all runs), s', #4
|
||||||
'Longest query<br>(sum for all runs), s', #5
|
'Avg wall clock time<br>(sum for all runs), s', #5
|
||||||
'Avg wall clock time<br>(sum for all runs), s', #6
|
'Shortest query<br>(sum for all runs), s', #6
|
||||||
'Shortest query<br>(sum for all runs), s', #7
|
|
||||||
]
|
]
|
||||||
|
|
||||||
text = tableStart('Test times')
|
text = tableStart('Test times')
|
||||||
@ -475,20 +473,20 @@ if args.report == 'main':
|
|||||||
attrs = ['' for c in columns]
|
attrs = ['' for c in columns]
|
||||||
for r in rows:
|
for r in rows:
|
||||||
anchor = f'{currentTableAnchor()}.{r[0]}'
|
anchor = f'{currentTableAnchor()}.{r[0]}'
|
||||||
if float(r[6]) > 1.5 * total_runs:
|
if float(r[5]) > 1.5 * total_runs:
|
||||||
# FIXME should be 15s max -- investigate parallel_insert
|
# FIXME should be 15s max -- investigate parallel_insert
|
||||||
slow_average_tests += 1
|
slow_average_tests += 1
|
||||||
attrs[6] = f'style="background: {color_bad}"'
|
attrs[5] = f'style="background: {color_bad}"'
|
||||||
errors_explained.append([f'<a href="#{anchor}">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up'])
|
errors_explained.append([f'<a href="#{anchor}">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up'])
|
||||||
else:
|
else:
|
||||||
attrs[6] = ''
|
attrs[5] = ''
|
||||||
|
|
||||||
if float(r[5]) > allowed_single_run_time * total_runs:
|
if float(r[4]) > allowed_single_run_time * total_runs:
|
||||||
slow_average_tests += 1
|
slow_average_tests += 1
|
||||||
attrs[5] = f'style="background: {color_bad}"'
|
attrs[4] = f'style="background: {color_bad}"'
|
||||||
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report'])
|
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report'])
|
||||||
else:
|
else:
|
||||||
attrs[5] = ''
|
attrs[4] = ''
|
||||||
|
|
||||||
text += tableRow(r, attrs, anchor)
|
text += tableRow(r, attrs, anchor)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user