2019-12-26 17:35:41 +00:00
#!/bin/bash
2020-08-19 15:34:23 +00:00
set -exu
2019-12-26 17:35:41 +00:00
set -o pipefail
trap "exit" INT TERM
2021-03-15 18:45:57 +00:00
# The watchdog is in the separate process group, so we have to kill it separately
# if the script terminates earlier.
trap 'kill $(jobs -pr) ${watchdog_pid:-} ||:' EXIT
2019-12-26 17:35:41 +00:00
2020-02-27 20:02:50 +00:00
stage = ${ stage :- }
2020-03-24 17:33:18 +00:00
script_dir = " $( cd " $( dirname " ${ BASH_SOURCE [0] } " ) " >/dev/null 2>& 1 && pwd ) "
2020-12-08 18:18:48 +00:00
# upstream/master
LEFT_SERVER_PORT = 9001
# patched version
RIGHT_SERVER_PORT = 9002
2020-08-19 15:08:23 +00:00
function wait_for_server # port, pid
{
for _ in { 1..60}
do
if clickhouse-client --port " $1 " --query "select 1" || ! kill -0 " $2 "
then
break
fi
sleep 1
done
if ! clickhouse-client --port " $1 " --query "select 1"
then
echo " Cannot connect to ClickHouse server at $1 "
return 1
fi
if ! kill -0 " $2 "
then
echo " Server pid ' $2 ' is not running "
return 1
fi
}
2019-12-26 17:35:41 +00:00
2021-01-02 14:27:03 +00:00
function left_or_right( )
{
local from = $1 && shift
local basename = $1 && shift
if [ -e " $from / $basename " ] ; then
echo " $from / $basename "
return
fi
case " $from " in
left) echo " right/ $basename " ; ;
right) echo " left/ $basename " ; ;
esac
}
2019-12-26 17:35:41 +00:00
function configure
{
2020-02-25 19:51:09 +00:00
# Use the new config for both servers, so that we can change it in a PR.
2019-12-26 21:33:10 +00:00
rm right/config/config.d/text_log.xml || :
2020-02-25 19:51:09 +00:00
cp -rv right/config left || :
2020-01-14 19:05:58 +00:00
# Start a temporary server to rename the tables
2020-04-17 15:47:01 +00:00
while killall clickhouse-server; do echo . ; sleep 1 ; done
2020-01-14 19:05:58 +00:00
echo all killed
set -m # Spawn temporary in its own process groups
2020-12-08 18:18:48 +00:00
local setup_left_server_opts = (
# server options
--config-file= left/config/config.xml
--
# server *config* directives overrides
--path db0
--user_files_path db0/user_files
2021-01-02 14:27:03 +00:00
--top_level_domains_path " $( left_or_right right top_level_domains) "
2020-12-08 18:18:48 +00:00
--tcp_port $LEFT_SERVER_PORT
)
left/clickhouse-server " ${ setup_left_server_opts [@] } " & > setup-server-log.log &
2020-01-14 19:05:58 +00:00
left_pid = $!
kill -0 $left_pid
disown $left_pid
set +m
2020-08-19 15:08:23 +00:00
2020-12-08 18:18:48 +00:00
wait_for_server $LEFT_SERVER_PORT $left_pid
2020-08-19 15:08:23 +00:00
echo Server for setup started
2020-01-14 19:05:58 +00:00
2020-12-08 18:18:48 +00:00
clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" || :
clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" || :
2020-02-14 12:55:47 +00:00
2020-04-17 15:47:01 +00:00
while killall clickhouse-server; do echo . ; sleep 1 ; done
2020-02-14 12:55:47 +00:00
echo all killed
2020-01-14 19:05:58 +00:00
# Make copies of the original db for both servers. Use hardlinks instead
2020-06-25 20:19:27 +00:00
# of copying to save space. Before that, remove preprocessed configs and
# system tables, because sharing them between servers with hardlinks may
2020-10-27 11:04:03 +00:00
# lead to weird effects.
2020-01-14 19:05:58 +00:00
rm -r left/db || :
rm -r right/db || :
2020-04-13 19:06:07 +00:00
rm -r db0/preprocessed_configs || :
2020-06-25 20:19:27 +00:00
rm -r db0/{ data,metadata} /system || :
2021-02-21 07:56:48 +00:00
rm db0/status || :
2020-01-14 19:05:58 +00:00
cp -al db0/ left/db/
cp -al db0/ right/db/
2020-02-25 19:51:09 +00:00
}
function restart
{
2020-04-17 15:47:01 +00:00
while killall clickhouse-server; do echo . ; sleep 1 ; done
2020-02-25 19:51:09 +00:00
echo all killed
2020-01-14 19:05:58 +00:00
2020-10-26 16:35:37 +00:00
# Change the jemalloc settings here.
2020-09-25 10:19:37 +00:00
# https://github.com/jemalloc/jemalloc/wiki/Getting-Started
2020-10-26 16:35:37 +00:00
export MALLOC_CONF = "confirm_conf:true"
2019-12-26 17:35:41 +00:00
2020-09-25 10:19:37 +00:00
set -m # Spawn servers in their own process groups
2020-10-27 11:04:03 +00:00
2020-12-08 18:18:48 +00:00
local left_server_opts = (
# server options
--config-file= left/config/config.xml
--
# server *config* directives overrides
--path left/db
--user_files_path left/db/user_files
2021-01-02 14:27:03 +00:00
--top_level_domains_path " $( left_or_right left top_level_domains) "
2020-12-08 18:18:48 +00:00
--tcp_port $LEFT_SERVER_PORT
)
left/clickhouse-server " ${ left_server_opts [@] } " & >> left-server-log.log &
2019-12-26 17:35:41 +00:00
left_pid = $!
kill -0 $left_pid
disown $left_pid
2020-12-08 18:18:48 +00:00
local right_server_opts = (
# server options
--config-file= right/config/config.xml
--
# server *config* directives overrides
--path right/db
--user_files_path right/db/user_files
2021-01-02 14:27:03 +00:00
--top_level_domains_path " $( left_or_right right top_level_domains) "
2020-12-08 18:18:48 +00:00
--tcp_port $RIGHT_SERVER_PORT
)
right/clickhouse-server " ${ right_server_opts [@] } " & >> right-server-log.log &
2019-12-26 17:35:41 +00:00
right_pid = $!
kill -0 $right_pid
disown $right_pid
set +m
2020-09-25 10:19:37 +00:00
unset MALLOC_CONF
2020-12-08 18:18:48 +00:00
wait_for_server $LEFT_SERVER_PORT $left_pid
2019-12-26 17:35:41 +00:00
echo left ok
2020-08-19 15:08:23 +00:00
2020-12-08 18:18:48 +00:00
wait_for_server $RIGHT_SERVER_PORT $right_pid
2019-12-26 17:35:41 +00:00
echo right ok
2019-12-26 21:33:10 +00:00
2020-12-08 18:18:48 +00:00
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.tables where database != 'system'"
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.build_options"
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.tables where database != 'system'"
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.build_options"
2020-04-17 15:47:01 +00:00
# Check again that both servers we started are running -- this is important
# for running locally, when there might be some other servers started and we
# will connect to them instead.
kill -0 $left_pid
kill -0 $right_pid
2019-12-26 17:35:41 +00:00
}
2019-12-26 19:16:36 +00:00
function run_tests
{
# Just check that the script runs at all
" $script_dir /perf.py " --help > /dev/null
2020-04-30 08:36:33 +00:00
# Find the directory with test files.
if [ -v CHPC_TEST_PATH ]
2020-02-14 12:55:47 +00:00
then
2020-04-30 08:36:33 +00:00
# Use the explicitly set path to directory with test files.
test_prefix = " $CHPC_TEST_PATH "
2020-09-10 16:57:26 +00:00
elif [ " $PR_TO_TEST " = = "0" ]
2020-04-30 08:36:33 +00:00
then
# When testing commits from master, use the older test files. This
# allows the tests to pass even when we add new functions and tests for
# them, that are not supported in the old revision.
test_prefix = left/performance
2020-06-05 14:30:26 +00:00
else
2020-04-30 08:36:33 +00:00
# For PRs, use newer test files so we can test these changes.
test_prefix = right/performance
2020-02-14 12:55:47 +00:00
fi
2020-04-30 08:36:33 +00:00
# Determine which tests to run.
2020-04-23 20:18:46 +00:00
if [ -v CHPC_TEST_GREP ]
2020-02-14 12:55:47 +00:00
then
2020-04-30 08:36:33 +00:00
# Run only explicitly specified tests, if any.
2020-08-19 15:31:13 +00:00
# shellcheck disable=SC2010
2020-04-24 03:23:38 +00:00
test_files = $( ls " $test_prefix " | grep " $CHPC_TEST_GREP " | xargs -I{ } -n1 readlink -f " $test_prefix /{} " )
2020-09-18 13:27:50 +00:00
elif [ " $PR_TO_TEST " -ne 0 ] \
&& [ " $( wc -l < changed-test-definitions.txt) " -gt 0 ] \
&& [ " $( wc -l < changed-test-scripts.txt) " -eq 0 ] \
&& [ " $( wc -l < other-changed-files.txt) " -eq 0 ]
2020-04-02 18:44:58 +00:00
then
2020-09-18 13:27:50 +00:00
# If only the perf tests were changed in the PR, we will run only these
# tests. The lists of changed files are prepared in entrypoint.sh because
# it has the repository.
test_files = $( sed " s/tests\/performance/ ${ test_prefix // \/ / \\ / } / " changed-test-definitions.txt)
2020-04-30 08:36:33 +00:00
else
# The default -- run all tests found in the test dir.
2020-04-02 18:44:58 +00:00
test_files = $( ls " $test_prefix " /*.xml)
fi
2020-09-18 13:27:50 +00:00
# For PRs w/o changes in test definitons and scripts, test only a subset of
# queries, and run them less times. If the corresponding environment variables
# are already set, keep those values.
if [ " $PR_TO_TEST " -ne 0 ] \
&& [ " $( wc -l < changed-test-definitions.txt) " -eq 0 ] \
2020-09-21 12:01:27 +00:00
&& [ " $( wc -l < changed-test-scripts.txt) " -eq 0 ]
2020-09-10 16:57:26 +00:00
then
2020-09-16 09:06:42 +00:00
CHPC_RUNS = ${ CHPC_RUNS :- 7 }
2020-09-16 09:16:07 +00:00
CHPC_MAX_QUERIES = ${ CHPC_MAX_QUERIES :- 20 }
2020-09-21 12:01:27 +00:00
else
CHPC_RUNS = ${ CHPC_RUNS :- 13 }
CHPC_MAX_QUERIES = ${ CHPC_MAX_QUERIES :- 0 }
2020-09-10 16:57:26 +00:00
fi
2020-09-15 10:44:21 +00:00
export CHPC_RUNS
2020-09-10 16:57:26 +00:00
export CHPC_MAX_QUERIES
2020-06-23 12:30:45 +00:00
# Determine which concurrent benchmarks to run. For now, the only test
# we run as a concurrent benchmark is 'website'. Run it as benchmark if we
# are also going to run it as a normal test.
2020-08-19 15:31:13 +00:00
for test in $test_files ; do echo " $test " ; done | sed -n '/website/p' > benchmarks-to-run.txt
2020-06-23 12:30:45 +00:00
2020-04-30 08:36:33 +00:00
# Delete old report files.
2020-05-20 02:19:19 +00:00
for x in { test-times,wall-clock-times} .tsv
2020-04-30 08:36:33 +00:00
do
rm -v " $x " || :
touch " $x "
done
2020-06-25 20:19:27 +00:00
# Randomize test order.
test_files = $( for f in $test_files ; do echo " $f " ; done | sort -R)
2020-09-25 09:52:09 +00:00
# Limit profiling time to 10 minutes, not to run for too long.
profile_seconds_left = 600
2020-02-25 19:51:09 +00:00
# Run the tests.
test_name = "<none>"
2020-02-14 12:55:47 +00:00
for test in $test_files
2019-12-26 19:16:36 +00:00
do
2020-06-25 20:19:27 +00:00
# Check that both servers are alive, and restart them if they die.
2020-12-08 18:18:48 +00:00
clickhouse-client --port $LEFT_SERVER_PORT --query "select 1 format Null" \
2020-06-25 20:19:27 +00:00
|| { echo $test_name >> left-server-died.log ; restart ; }
2020-12-08 18:18:48 +00:00
clickhouse-client --port $RIGHT_SERVER_PORT --query "select 1 format Null" \
2020-06-25 20:19:27 +00:00
|| { echo $test_name >> right-server-died.log ; restart ; }
2020-02-25 19:51:09 +00:00
2020-02-27 20:02:50 +00:00
test_name = $( basename " $test " ".xml" )
echo test " $test_name "
2020-02-14 19:11:46 +00:00
2020-09-25 09:52:09 +00:00
# Don't profile if we're past the time limit.
2020-10-27 11:04:03 +00:00
# Use awk because bash doesn't support floating point arithmetic.
2020-09-29 11:07:31 +00:00
profile_seconds = $( awk " BEGIN { print ( $profile_seconds_left > 0 ? 10 : 0) } " )
2020-09-25 09:52:09 +00:00
2020-02-06 12:46:57 +00:00
TIMEFORMAT = $( printf " $test_name \t%%3R\t%%3U\t%%3S\n " )
2020-09-14 17:25:17 +00:00
# The grep is to filter out set -x output and keep only time output.
# The '2>&1 >/dev/null' redirects stderr to stdout, and discards stdout.
2020-06-29 08:29:40 +00:00
{ \
2020-12-08 18:18:48 +00:00
time " $script_dir /perf.py " --host localhost localhost --port $LEFT_SERVER_PORT $RIGHT_SERVER_PORT \
2020-09-10 16:57:26 +00:00
--runs " $CHPC_RUNS " --max-queries " $CHPC_MAX_QUERIES " \
2020-09-25 09:52:09 +00:00
--profile-seconds " $profile_seconds " \
2020-06-29 08:29:40 +00:00
-- " $test " > " $test_name -raw.tsv " 2> " $test_name -err.log " ; \
2020-09-14 17:25:17 +00:00
} 2>& 1 >/dev/null | tee >( grep -v ^+ >> "wall-clock-times.tsv" ) \
2020-06-29 08:29:40 +00:00
|| echo " Test $test_name failed with error code $? " >> " $test_name -err.log "
2020-09-25 09:52:09 +00:00
profile_seconds_left = $( awk -F' ' \
2020-09-28 11:44:49 +00:00
'BEGIN { s = ' $profile_seconds_left '; } /^profile-total/ { s -= $2 } END { print s }' \
2020-09-25 09:52:09 +00:00
" $test_name -raw.tsv " )
2019-12-26 19:16:36 +00:00
done
2020-02-14 12:55:47 +00:00
2020-02-14 19:11:46 +00:00
unset TIMEFORMAT
2020-02-14 12:55:47 +00:00
wait
2020-02-14 19:11:46 +00:00
}
2020-02-14 12:55:47 +00:00
2020-04-21 21:53:13 +00:00
function get_profiles_watchdog
{
2020-09-01 23:43:23 +00:00
sleep 600
2020-04-21 21:53:13 +00:00
2020-04-22 19:41:40 +00:00
echo "The trace collection did not finish in time." >> profile-errors.log
2020-04-21 21:53:13 +00:00
for pid in $( pgrep -f clickhouse)
do
2020-04-29 09:47:17 +00:00
gdb -p " $pid " --batch --ex "info proc all" --ex "thread apply all bt" --ex quit & > " $pid .gdb.log " &
2020-04-21 21:53:13 +00:00
done
wait
2020-04-29 09:47:17 +00:00
for _ in { 1..10}
2020-04-21 21:53:13 +00:00
do
if ! pkill -f clickhouse
then
break
fi
sleep 1
done
}
2020-02-14 19:11:46 +00:00
function get_profiles
{
2020-02-14 12:55:47 +00:00
# Collect the profiles
2020-12-08 18:18:48 +00:00
clickhouse-client --port $LEFT_SERVER_PORT --query "set query_profiler_cpu_time_period_ns = 0"
clickhouse-client --port $LEFT_SERVER_PORT --query "set query_profiler_real_time_period_ns = 0"
clickhouse-client --port $LEFT_SERVER_PORT --query "system flush logs" &
2020-06-25 20:19:27 +00:00
2020-12-08 18:18:48 +00:00
clickhouse-client --port $RIGHT_SERVER_PORT --query "set query_profiler_cpu_time_period_ns = 0"
clickhouse-client --port $RIGHT_SERVER_PORT --query "set query_profiler_real_time_period_ns = 0"
clickhouse-client --port $RIGHT_SERVER_PORT --query "system flush logs" &
2020-06-25 20:19:27 +00:00
wait
2020-04-17 15:47:01 +00:00
2020-12-08 18:18:48 +00:00
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > left-query-log.tsv || : &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv || : &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv || : &
clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv || : &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv || : &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv || : &
2020-04-17 15:47:01 +00:00
2020-12-08 18:18:48 +00:00
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > right-query-log.tsv || : &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv || : &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv || : &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv || : &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > right-metric-log.tsv || : &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > right-async-metric-log.tsv || : &
2020-02-14 12:55:47 +00:00
wait
2020-04-24 07:19:23 +00:00
# Just check that the servers are alive so that we return a proper exit code.
# We don't consistently check the return codes of the above background jobs.
2020-12-08 18:18:48 +00:00
clickhouse-client --port $LEFT_SERVER_PORT --query "select 1"
clickhouse-client --port $RIGHT_SERVER_PORT --query "select 1"
2019-12-26 19:16:36 +00:00
}
2019-12-26 17:35:41 +00:00
2020-05-21 23:03:41 +00:00
function build_log_column_definitions
2020-03-02 18:47:37 +00:00
{
2020-05-20 02:19:19 +00:00
# FIXME This loop builds column definitons from TSVWithNamesAndTypes in an
# absolutely atrocious way. This should be done by the file() function itself.
2020-06-16 08:21:15 +00:00
for x in { right,left} -{ addresses,{ query,query-thread,trace,{ async-,} metric} -log} .tsv
2020-05-20 02:19:19 +00:00
do
paste -d' ' \
<( sed -n '1{s/\t/\n/g;p;q}' " $x " | sed 's/\(^.*$\)/"\1"/' ) \
<( sed -n '2{s/\t/\n/g;p;q}' " $x " ) \
| tr '\n' ', ' | sed 's/,$//' > " $x .columns "
done
2020-05-21 23:03:41 +00:00
}
# Build and analyze randomization distribution for all queries.
function analyze_queries
{
rm -v analyze-commands.txt analyze-errors.log all-queries.tsv unstable-queries.tsv ./*-report.tsv raw-queries.tsv || :
2020-05-25 01:42:56 +00:00
rm -rf analyze || :
mkdir analyze analyze/tmp || :
2020-05-21 23:03:41 +00:00
build_log_column_definitions
2020-04-29 17:26:28 +00:00
# Split the raw test output into files suitable for analysis.
2021-03-02 16:21:30 +00:00
# To debug calculations only for a particular test, substitute a suitable
# wildcard here, e.g. `for test_file in modulo-raw.tsv`.
2020-08-19 15:31:13 +00:00
for test_file in *-raw.tsv
2020-04-29 17:26:28 +00:00
do
test_name = $( basename " $test_file " "-raw.tsv" )
2020-05-20 02:19:19 +00:00
sed -n " s/^query\t/ $test_name \t/p " < " $test_file " >> "analyze/query-runs.tsv"
2020-09-25 09:52:09 +00:00
sed -n " s/^profile\t/ $test_name \t/p " < " $test_file " >> "analyze/query-profiles.tsv"
2020-06-27 00:45:00 +00:00
sed -n " s/^client-time\t/ $test_name \t/p " < " $test_file " >> "analyze/client-times.tsv"
sed -n " s/^report-threshold\t/ $test_name \t/p " < " $test_file " >> "analyze/report-thresholds.tsv"
sed -n " s/^skipped\t/ $test_name \t/p " < " $test_file " >> "analyze/skipped-tests.tsv"
sed -n " s/^display-name\t/ $test_name \t/p " < " $test_file " >> "analyze/query-display-names.tsv"
2020-07-03 07:09:20 +00:00
sed -n " s/^short\t/ $test_name \t/p " < " $test_file " >> "analyze/marked-short-queries.tsv"
2020-06-27 00:45:00 +00:00
sed -n " s/^partial\t/ $test_name \t/p " < " $test_file " >> "analyze/partial-queries.tsv"
2020-04-29 17:26:28 +00:00
done
2020-04-27 12:47:59 +00:00
2020-05-20 02:19:19 +00:00
# for each query run, prepare array of metrics from query log
2020-05-25 01:42:56 +00:00
clickhouse-local --query "
2020-05-20 02:19:19 +00:00
create view query_runs as select * from file( 'analyze/query-runs.tsv' , TSV,
'test text, query_index int, query_id text, version UInt8, time float' ) ;
2020-07-03 08:39:43 +00:00
-- Separately process 'partial' queries which we could only run on the new server
-- because they use new functions. We can' t make normal stats for them, but still
-- have to show some stats so that the PR author can tweak them.
2020-06-27 00:45:00 +00:00
create view partial_queries as select test, query_index
from file( 'analyze/partial-queries.tsv' , TSV,
'test text, query_index int, servers Array(int)' ) ;
create table partial_query_times engine File( TSVWithNamesAndTypes,
'analyze/partial-query-times.tsv' )
as select test, query_index, stddevPop( time ) time_stddev, median( time ) time_median
from query_runs
where ( test, query_index) in partial_queries
group by test, query_index
;
2020-07-03 08:39:43 +00:00
-- Process queries that were run normally, on both servers.
2020-05-20 02:19:19 +00:00
create view left_query_log as select *
from file( 'left-query-log.tsv' , TSVWithNamesAndTypes,
'$(cat "left-query-log.tsv.columns")' ) ;
create view right_query_log as select *
from file( 'right-query-log.tsv' , TSVWithNamesAndTypes,
'$(cat "right-query-log.tsv.columns")' ) ;
2020-06-25 20:19:27 +00:00
create view query_logs as
2020-07-23 13:27:47 +00:00
select 0 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
2020-08-11 11:53:25 +00:00
query_duration_ms, memory_usage from left_query_log
2020-06-25 20:19:27 +00:00
union all
2020-07-23 13:27:47 +00:00
select 1 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
2020-08-11 11:53:25 +00:00
query_duration_ms, memory_usage from right_query_log
2020-06-25 20:19:27 +00:00
;
2020-07-09 14:45:24 +00:00
-- This is a single source of truth on all metrics we have for query runs. The
-- metrics include ProfileEvents from system.query_log, and query run times
-- reported by the perf.py test runner.
create table query_run_metric_arrays engine File( TSV, 'analyze/query-run-metric-arrays.tsv' )
2020-06-25 20:19:27 +00:00
as
with (
-- sumMapState with the list of all keys with '-0.' values. Negative zero is because
-- sumMap removes keys with positive zeros.
with ( select groupUniqArrayArray( ProfileEvents.Names) from query_logs) as all_names
select arrayReduce( 'sumMapState' , [ ( all_names, arrayMap( x->-0., all_names) ) ] )
) as all_metrics
select test, query_index, version, query_id,
( finalizeAggregation(
arrayReduce( 'sumMapMergeState' ,
[
all_metrics,
arrayReduce( 'sumMapState' ,
[ ( ProfileEvents.Names,
arrayMap( x->toFloat64( x) , ProfileEvents.Values) ) ]
) ,
arrayReduce( 'sumMapState' , [ (
2020-08-11 11:53:25 +00:00
[ 'client_time' , 'server_time' , 'memory_usage' ] ,
2020-06-25 20:19:27 +00:00
arrayMap( x->if( x != 0., x, -0.) , [
toFloat64( query_runs.time) ,
2020-08-11 11:53:25 +00:00
toFloat64( query_duration_ms / 1000.) ,
toFloat64( memory_usage) ] ) ) ] )
2020-06-25 20:19:27 +00:00
]
) ) as metrics_tuple) .1 metric_names,
metrics_tuple.2 metric_values
from query_logs
2020-05-20 02:19:19 +00:00
right join query_runs
2020-06-25 20:19:27 +00:00
on query_logs.query_id = query_runs.query_id
and query_logs.version = query_runs.version
2020-06-27 00:45:00 +00:00
where ( test, query_index) not in partial_queries
2020-06-25 20:19:27 +00:00
;
2020-07-09 14:45:24 +00:00
-- This is just for convenience -- human-readable + easy to make plots.
create table query_run_metrics_denorm engine File( TSV, 'analyze/query-run-metrics-denorm.tsv' )
as select test, query_index, metric_names, version, query_id, metric_values
from query_run_metric_arrays
array join metric_names, metric_values
order by test, query_index, metric_names, version, query_id
;
2020-09-14 07:59:45 +00:00
-- Filter out tests that don' t have an even number of runs, to avoid breaking
-- the further calculations. This may happen if there was an error during the
-- test runs, e.g. the server died. It will be reported in test errors, so we
-- don' t have to report it again.
create view broken_queries as
select test, query_index
from query_runs
group by test, query_index
having count( *) % 2 != 0
;
2020-07-09 14:45:24 +00:00
-- This is for statistical processing with eqmed.sql
create table query_run_metrics_for_stats engine File(
2020-06-25 20:19:27 +00:00
TSV, -- do not add header -- will parse with grep
2020-07-09 14:45:24 +00:00
'analyze/query-run-metrics-for-stats.tsv' )
2021-03-02 16:21:30 +00:00
as select test, query_index, 0 run, version,
-- For debugging, add a filter for a particular metric like this:
-- arrayFilter( m, n -> n = 'client_time' , metric_values, metric_names)
-- metric_values
-- Note that further reporting may break, because the metric names are
-- not filtered.
metric_values
2020-07-09 14:45:24 +00:00
from query_run_metric_arrays
2020-09-14 07:59:45 +00:00
where ( test, query_index) not in broken_queries
2020-06-25 20:19:27 +00:00
order by test, query_index, run, version
;
2020-07-09 14:45:24 +00:00
-- This is the list of metric names, so that we can join them back after
-- statistical processing.
2020-06-25 20:19:27 +00:00
create table query_run_metric_names engine File( TSV, 'analyze/query-run-metric-names.tsv' )
2020-07-09 14:45:24 +00:00
as select metric_names from query_run_metric_arrays limit 1
2020-05-20 02:19:19 +00:00
;
2020-07-09 14:45:24 +00:00
" 2> >(tee -a analyze/errors.log 1>&2)
2020-05-20 02:19:19 +00:00
2020-04-27 12:47:59 +00:00
# This is a lateral join in bash... please forgive me.
2020-05-20 02:19:19 +00:00
# We don't have arrayPermute(), so I have to make random permutations with
2020-04-27 12:47:59 +00:00
# `order by rand`, and it becomes really slow if I do it for more than one
# query. We also don't have lateral joins. So I just put all runs of each
# query into a separate file, and then compute randomization distribution
# for each file. I do this in parallel using GNU parallel.
2020-06-27 00:45:00 +00:00
( set +x # do not bloat the log
2020-04-27 12:47:59 +00:00
IFS = $'\n'
2020-07-09 14:45:24 +00:00
for prefix in $( cut -f1,2 "analyze/query-run-metrics-for-stats.tsv" | sort | uniq)
2020-04-27 12:47:59 +00:00
do
2020-08-19 15:31:13 +00:00
file = " analyze/tmp/ ${ prefix // /_ } .tsv "
2020-07-09 14:45:24 +00:00
grep " ^ $prefix " "analyze/query-run-metrics-for-stats.tsv" > " $file " &
2020-05-20 02:19:19 +00:00
printf "%s\0\n" \
" clickhouse-local \
--file \" $file \" \
--structure 'test text, query text, run int, version UInt8, metrics Array(float)' \
--query \" $( cat " $script_dir /eqmed.sql " ) \" \
2020-07-09 14:45:24 +00:00
>> \" analyze/query-metric-stats.tsv\" " \
2020-05-20 02:19:19 +00:00
2>> analyze/errors.log \
>> analyze/commands.txt
2020-04-27 12:47:59 +00:00
done
wait
unset IFS
2020-06-27 00:45:00 +00:00
)
2020-04-27 12:47:59 +00:00
2020-10-01 10:56:56 +00:00
# The comparison script might be bound to one NUMA node for better test
# stability, and the calculation runs out of memory because of this. Use
# all nodes.
2020-10-16 14:44:59 +00:00
numactl --show
2020-10-19 14:31:02 +00:00
numactl --cpunodebind= all --membind= all numactl --show
numactl --cpunodebind= all --membind= all parallel --joblog analyze/parallel-log.txt --null < analyze/commands.txt 2>> analyze/errors.log
2020-07-09 14:45:24 +00:00
clickhouse-local --query "
-- Join the metric names back to the metric statistics we' ve calculated, and make
-- a denormalized table of them -- statistics for all metrics for all queries.
-- The WITH, ARRAY JOIN and CROSS JOIN do not like each other:
-- https://github.com/ClickHouse/ClickHouse/issues/11868
-- https://github.com/ClickHouse/ClickHouse/issues/11757
-- Because of this, we make a view with arrays first, and then apply all the
-- array joins.
create view query_metric_stat_arrays as
with ( select * from file( 'analyze/query-run-metric-names.tsv' ,
TSV, 'n Array(String)' ) ) as metric_name
select test, query_index, metric_name, left, right, diff, stat_threshold
from file( 'analyze/query-metric-stats.tsv' , TSV, ' left Array( float) ,
right Array( float) , diff Array( float) , stat_threshold Array( float) ,
test text, query_index int' ) reports
order by test, query_index, metric_name
;
create table query_metric_stats_denorm engine File( TSVWithNamesAndTypes,
'analyze/query-metric-stats-denorm.tsv' )
as select test, query_index, metric_name, left, right, diff, stat_threshold
from query_metric_stat_arrays
left array join metric_name, left, right, diff, stat_threshold
order by test, query_index, metric_name
;
" 2> >(tee -a analyze/errors.log 1>&2)
2020-03-02 18:47:37 +00:00
}
2019-12-26 19:16:36 +00:00
# Analyze results
2020-02-07 18:34:24 +00:00
function report
{
2020-04-28 07:45:35 +00:00
rm -r report || :
2020-05-25 01:42:56 +00:00
mkdir report report/tmp || :
2020-04-28 07:45:35 +00:00
2020-05-28 07:45:03 +00:00
rm ./*.{ rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv run-errors.tsv || :
2020-02-17 19:32:40 +00:00
2020-05-21 23:03:41 +00:00
build_log_column_definitions
2020-05-20 02:19:19 +00:00
cat analyze/errors.log >> report/errors.log || :
2020-04-28 07:45:35 +00:00
cat profile-errors.log >> report/errors.log || :
2020-04-22 19:41:40 +00:00
2020-04-17 15:47:01 +00:00
clickhouse-local --query "
2020-05-20 02:19:19 +00:00
create view query_display_names as select * from
file( 'analyze/query-display-names.tsv' , TSV,
'test text, query_index int, query_display_name text' )
;
2020-07-03 08:39:43 +00:00
create view partial_query_times as select * from
file( 'analyze/partial-query-times.tsv' , TSVWithNamesAndTypes,
'test text, query_index int, time_stddev float, time_median float' )
;
2020-07-09 14:45:24 +00:00
-- Report for partial queries that we could only run on the new server ( e.g.
-- queries with new functions added in the tested PR) .
2020-06-27 00:45:00 +00:00
create table partial_queries_report engine File( TSV, 'report/partial-queries-report.tsv' )
2020-07-31 19:58:18 +00:00
as select toDecimal64( time_median, 3) time,
toDecimal64( time_stddev / time_median, 3) relative_time_stddev,
2020-06-27 00:45:00 +00:00
test, query_index, query_display_name
2020-07-03 08:39:43 +00:00
from partial_query_times
2020-06-27 00:45:00 +00:00
join query_display_names using ( test, query_index)
order by test, query_index
;
2020-07-09 14:45:24 +00:00
create view query_metric_stats as
select * from file( 'analyze/query-metric-stats-denorm.tsv' ,
TSVWithNamesAndTypes,
' test text, query_index int, metric_name text, left float, right float,
diff float, stat_threshold float' )
2020-06-25 20:19:27 +00:00
;
2020-05-20 02:19:19 +00:00
-- Main statistics for queries -- query time as reported in query log.
2020-04-28 07:45:35 +00:00
create table queries engine File( TSVWithNamesAndTypes, 'report/queries.tsv' )
2020-03-23 09:39:12 +00:00
as select
2021-03-02 16:21:30 +00:00
-- It is important to have a non-strict inequality with stat_threshold
-- here. The randomization distribution is actually discrete, and when
-- the number of runs is small, the quantile we need ( e.g. 0.99) turns
-- out to be the maximum value of the distribution. We can also hit this
-- maximum possible value with our test run, and this obviously means
-- that we have observed the difference to the best precision possible
-- for the given number of runs. If we use a strict equality here, we
-- will miss such cases. This happened in the wild and lead to some
-- uncaught regressions, because for the default 7 runs we do for PRs,
-- the randomization distribution has only 16 values, so the max quantile
-- is actually 0.9375.
abs( diff) > report_threshold and abs( diff) >= stat_threshold as changed_fail,
abs( diff) > report_threshold - 0.05 and abs( diff) >= stat_threshold as changed_show,
2020-10-27 11:04:03 +00:00
2020-09-03 01:42:25 +00:00
not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail,
not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show,
2020-10-27 11:04:03 +00:00
2020-04-28 07:45:35 +00:00
left, right, diff, stat_threshold,
2020-06-27 01:15:58 +00:00
if ( report_threshold > 0, report_threshold, 0.10) as report_threshold,
2020-07-15 13:29:00 +00:00
query_metric_stats.test test, query_metric_stats.query_index query_index,
query_display_name
2020-05-20 02:19:19 +00:00
from query_metric_stats
left join file( 'analyze/report-thresholds.tsv' , TSV,
'test text, report_threshold float' ) thresholds
on query_metric_stats.test = thresholds.test
2020-07-03 08:39:43 +00:00
left join query_display_names
on query_metric_stats.test = query_display_names.test
and query_metric_stats.query_index = query_display_names.query_index
2020-09-01 19:05:57 +00:00
-- 'server_time' is rounded down to ms, which might be bad for very short queries.
-- Use 'client_time' instead.
where metric_name = 'client_time'
2020-05-20 02:19:19 +00:00
order by test, query_index, metric_name
;
2020-04-28 07:45:35 +00:00
2020-07-03 08:39:43 +00:00
create table changed_perf_report engine File( TSV, 'report/changed-perf.tsv' ) as
2020-08-05 23:02:18 +00:00
with
2020-07-31 19:58:18 +00:00
-- server_time is sometimes reported as zero ( if it' s less than 1 ms) ,
-- so we have to work around this to not get an error about conversion
-- of NaN to decimal.
2020-08-05 23:02:18 +00:00
( left > right ? left / right : right / left) as times_change_float,
isFinite( times_change_float) as times_change_finite,
toDecimal64( times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
times_change_finite
? ( left > right ? '-' : '+' ) || toString( times_change_decimal) || 'x'
: '--' as times_change_str
select
toDecimal64( left, 3) , toDecimal64( right, 3) , times_change_str,
toDecimal64( diff, 3) , toDecimal64( stat_threshold, 3) ,
changed_fail, test, query_index, query_display_name
2020-05-20 02:19:19 +00:00
from queries where changed_show order by abs( diff) desc;
2020-02-11 20:00:53 +00:00
2020-07-03 08:39:43 +00:00
create table unstable_queries_report engine File( TSV, 'report/unstable-queries.tsv' ) as
2020-07-31 19:58:18 +00:00
select
toDecimal64( left, 3) , toDecimal64( right, 3) , toDecimal64( diff, 3) ,
toDecimal64( stat_threshold, 3) , unstable_fail, test, query_index, query_display_name
2020-05-20 02:19:19 +00:00
from queries where unstable_show order by stat_threshold desc;
2020-02-11 20:00:53 +00:00
2020-06-09 13:29:07 +00:00
2020-09-03 00:57:25 +00:00
create view test_speedup as
select
test,
exp2( avg( log2( left / right) ) ) times_speedup,
count( *) queries,
unstable + changed bad,
sum( changed_show) changed,
sum( unstable_show) unstable
2020-06-09 13:29:07 +00:00
from queries
group by test
2020-09-03 00:57:25 +00:00
order by times_speedup desc
;
create view total_speedup as
select
'Total' test,
exp2( avg( log2( times_speedup) ) ) times_speedup,
sum( queries) queries,
unstable + changed bad,
sum( changed) changed,
sum( unstable) unstable
from test_speedup
2020-06-09 13:29:07 +00:00
;
2020-07-03 08:39:43 +00:00
create table test_perf_changes_report engine File( TSV, 'report/test-perf-changes.tsv' ) as
2020-09-03 00:57:25 +00:00
with
( times_speedup >= 1
? '-' || toString( toDecimal64( times_speedup, 3) ) || 'x'
: '+' || toString( toDecimal64( 1 / times_speedup, 3) ) || 'x' )
as times_speedup_str
select test, times_speedup_str, queries, bad, changed, unstable
-- Not sure what' s the precedence of UNION ALL vs WHERE & ORDER BY, hence all
-- the braces.
from (
(
select * from total_speedup
) union all (
select * from test_speedup
where
( times_speedup >= 1 ? times_speedup : ( 1 / times_speedup) ) >= 1.005
or bad
)
)
2020-09-06 23:49:07 +00:00
order by test = 'Total' desc, times_speedup desc
2020-06-09 13:29:07 +00:00
;
2020-02-07 18:34:24 +00:00
2020-09-03 00:57:25 +00:00
2020-07-03 08:39:43 +00:00
create view total_client_time_per_query as select *
2020-05-20 02:19:19 +00:00
from file( 'analyze/client-times.tsv' , TSV,
'test text, query_index int, client float, server float' ) ;
2020-02-07 18:34:24 +00:00
2020-07-03 08:39:43 +00:00
create table slow_on_client_report engine File( TSV, 'report/slow-on-client.tsv' ) as
2020-07-31 19:58:18 +00:00
select client, server, toDecimal64( client/server, 3) p,
test, query_display_name
2020-07-03 08:39:43 +00:00
from total_client_time_per_query left join query_display_names using ( test, query_index)
2020-07-31 19:58:18 +00:00
where p > toDecimal64( 1.02, 3) order by p desc;
2020-02-10 16:34:07 +00:00
2020-07-03 08:39:43 +00:00
create table wall_clock_time_per_test engine Memory as select *
from file( 'wall-clock-times.tsv' , TSV, 'test text, real float, user float, system float' ) ;
2020-02-10 16:34:07 +00:00
create table test_time engine Memory as
2020-02-11 15:01:16 +00:00
select test, sum( client) total_client_time,
2020-09-03 01:42:25 +00:00
max( client) query_max,
min( client) query_min,
count( *) queries
2020-07-03 08:39:43 +00:00
from total_client_time_per_query full join queries using ( test, query_index)
2020-02-11 15:01:16 +00:00
group by test;
2020-02-07 18:34:24 +00:00
2020-09-18 13:27:50 +00:00
create view query_runs as select * from file( 'analyze/query-runs.tsv' , TSV,
'test text, query_index int, query_id text, version UInt8, time float' ) ;
--
-- Guess the number of query runs used for this test. The number is required to
-- calculate and check the average query run time in the report.
-- We have to be careful, because we will encounter:
-- 1) partial queries which run only on one server
-- 2) short queries which run for a much higher number of times
-- 3) some errors that make query run for a different number of times on a
-- particular server.
--
create view test_runs as
select test,
-- Default to 7 runs if there are only 'short' queries in the test, and
-- we can' t determine the number of runs.
if ( ( ceil( medianOrDefaultIf( t.runs, not short) , 0) as r) != 0, r, 7) runs
from (
select
-- The query id is the same for both servers, so no need to divide here.
uniqExact( query_id) runs,
( test, query_index) in
( select * from file( 'analyze/marked-short-queries.tsv' , TSV,
'test text, query_index int' ) )
as short,
test, query_index
from query_runs
group by test, query_index
) t
group by test
;
2020-09-24 11:46:03 +00:00
create view test_times_view as
select
wall_clock_time_per_test.test test,
real,
total_client_time,
2020-02-11 15:01:16 +00:00
queries,
2020-09-24 11:46:03 +00:00
query_max,
2021-03-18 14:59:49 +00:00
real / if ( queries > 0, queries, 1) avg_real_per_query,
2020-09-24 11:46:03 +00:00
query_min,
2020-09-18 13:27:50 +00:00
runs
2020-04-29 16:27:40 +00:00
from test_time
2020-09-18 13:27:50 +00:00
-- wall clock times are also measured for skipped tests, so don' t
-- do full join
left join wall_clock_time_per_test
on wall_clock_time_per_test.test = test_time.test
full join test_runs
on test_runs.test = test_time.test
2020-09-24 11:46:03 +00:00
;
-- WITH TOTALS doesn' t work with INSERT SELECT, so we have to jump through these
-- hoops: https://github.com/ClickHouse/ClickHouse/issues/15227
create view test_times_view_total as
select
'Total' test,
sum( real) ,
sum( total_client_time) ,
sum( queries) ,
max( query_max) ,
2021-03-18 14:59:49 +00:00
sum( real) / if ( sum( queries) > 0, sum( queries) , 1) avg_real_per_query,
2020-09-24 11:46:03 +00:00
min( query_min) ,
-- Totaling the number of runs doesn' t make sense, but use the max so
-- that the reporting script doesn' t complain about queries being too
-- long.
max( runs)
from test_times_view
;
create table test_times_report engine File( TSV, 'report/test-times.tsv' ) as
select
test,
toDecimal64( real, 3) ,
toDecimal64( total_client_time, 3) ,
queries,
toDecimal64( query_max, 3) ,
toDecimal64( avg_real_per_query, 3) ,
toDecimal64( query_min, 3) ,
runs
from (
select * from test_times_view
union all
select * from test_times_view_total
)
order by test = 'Total' desc, avg_real_per_query desc
;
2020-02-10 16:34:07 +00:00
2020-05-20 02:19:19 +00:00
-- report for all queries page, only main metric
2020-07-03 08:39:43 +00:00
create table all_tests_report engine File( TSV, 'report/all-queries.tsv' ) as
2020-08-05 23:02:18 +00:00
with
-- server_time is sometimes reported as zero ( if it' s less than 1 ms) ,
-- so we have to work around this to not get an error about conversion
-- of NaN to decimal.
( left > right ? left / right : right / left) as times_change_float,
isFinite( times_change_float) as times_change_finite,
toDecimal64( times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
times_change_finite
? ( left > right ? '-' : '+' ) || toString( times_change_decimal) || 'x'
: '--' as times_change_str
2020-04-28 07:45:35 +00:00
select changed_fail, unstable_fail,
2020-08-05 23:02:18 +00:00
toDecimal64( left, 3) , toDecimal64( right, 3) , times_change_str,
2020-07-31 19:58:18 +00:00
toDecimal64( isFinite( diff) ? diff : 0, 3) ,
toDecimal64( isFinite( stat_threshold) ? stat_threshold : 0, 3) ,
test, query_index, query_display_name
2020-06-08 13:57:33 +00:00
from queries order by test, query_index;
2020-04-28 07:45:35 +00:00
2020-09-17 16:21:59 +00:00
2020-09-25 09:52:09 +00:00
-- Report of queries that have inconsistent 'short' markings:
-- 1) have short duration, but are not marked as 'short'
-- 2) the reverse -- marked 'short' but take too long.
-- The threshold for 2) is significantly larger than the threshold for 1) , to
-- avoid jitter.
2020-09-17 16:21:59 +00:00
create view shortness
2020-10-27 11:04:03 +00:00
as select
2020-09-17 16:21:59 +00:00
( test, query_index) in
( select * from file( 'analyze/marked-short-queries.tsv' , TSV,
'test text, query_index int' ) )
as marked_short,
time, test, query_index, query_display_name
2020-07-03 08:39:43 +00:00
from (
2020-09-03 01:42:25 +00:00
select right time, test, query_index from queries
2020-07-03 08:39:43 +00:00
union all
select time_median, test, query_index from partial_query_times
) times
left join query_display_names
on times.test = query_display_names.test
and times.query_index = query_display_names.query_index
;
2020-09-17 16:21:59 +00:00
create table inconsistent_short_marking_report
2020-09-21 12:14:47 +00:00
engine File( TSV, 'report/unexpected-query-duration.tsv' )
2020-09-17 16:21:59 +00:00
as select
2020-09-23 08:21:55 +00:00
multiIf( marked_short and time > 0.1, '\"short\" queries must run faster than 0.02 s' ,
not marked_short and time < 0.02, '\"normal\" queries must run longer than 0.1 s' ,
2020-09-17 16:21:59 +00:00
'' ) problem,
marked_short, time,
test, query_index, query_display_name
from shortness
where problem != ''
;
2020-07-03 08:39:43 +00:00
--------------------------------------------------------------------------------
-- various compatibility data formats follow, not related to the main report
-- keep the table in old format so that we can analyze new and old data together
create table queries_old_format engine File( TSVWithNamesAndTypes, 'queries.rep' )
2020-09-03 01:42:25 +00:00
as select 0 short, changed_fail, unstable_fail, left, right, diff,
2020-07-03 08:39:43 +00:00
stat_threshold, test, query_display_name query
from queries
;
2020-05-20 02:19:19 +00:00
-- new report for all queries with all metrics ( no page yet)
create table all_query_metrics_tsv engine File( TSV, 'report/all-query-metrics.tsv' ) as
select metric_name, left, right, diff,
floor( left > right ? left / right : right / left, 3) ,
stat_threshold, test, query_index, query_display_name
from query_metric_stats
2020-07-03 08:39:43 +00:00
left join query_display_names
on query_metric_stats.test = query_display_names.test
and query_metric_stats.query_index = query_display_names.query_index
2020-05-20 02:19:19 +00:00
order by test, query_index;
" 2> >(tee -a report/errors.log 1>&2)
2020-03-24 17:33:18 +00:00
2020-05-25 01:03:21 +00:00
2020-09-25 09:52:09 +00:00
# Prepare source data for metrics and flamegraphs for queries that were profiled
# by perf.py.
2020-03-24 17:33:18 +00:00
for version in { right,left}
2020-05-25 01:03:21 +00:00
do
2020-05-20 02:19:19 +00:00
rm -rf data
2020-05-25 01:42:56 +00:00
clickhouse-local --query "
2020-09-25 09:52:09 +00:00
create view query_profiles as
2020-05-20 02:19:19 +00:00
with 0 as left, 1 as right
2020-09-25 09:52:09 +00:00
select * from file( 'analyze/query-profiles.tsv' , TSV,
2020-05-20 02:19:19 +00:00
'test text, query_index int, query_id text, version UInt8, time float' )
where version = $version
;
2020-02-17 19:32:40 +00:00
2020-05-20 02:19:19 +00:00
create view query_display_names as select * from
file( 'analyze/query-display-names.tsv' , TSV,
'test text, query_index int, query_display_name text' )
;
2020-02-20 16:28:21 +00:00
2020-03-24 17:33:18 +00:00
create table unstable_query_runs engine File( TSVWithNamesAndTypes,
'unstable-query-runs.$version.rep' ) as
2020-09-25 09:52:09 +00:00
select query_profiles.test test, query_profiles.query_index query_index,
2020-07-15 13:29:00 +00:00
query_display_name, query_id
2020-09-25 09:52:09 +00:00
from query_profiles
2020-05-20 02:19:19 +00:00
left join query_display_names on
2020-09-25 09:52:09 +00:00
query_profiles.test = query_display_names.test
and query_profiles.query_index = query_display_names.query_index
2020-02-17 19:32:40 +00:00
;
2020-05-20 02:19:19 +00:00
create view query_log as select *
from file( '$version-query-log.tsv' , TSVWithNamesAndTypes,
'$(cat "$version-query-log.tsv.columns")' ) ;
2020-02-20 16:28:21 +00:00
2020-03-24 17:33:18 +00:00
create table unstable_run_metrics engine File( TSVWithNamesAndTypes,
'unstable-run-metrics.$version.rep' ) as
2020-05-20 02:19:19 +00:00
select
test, query_index, query_id,
ProfileEvents.Values value, ProfileEvents.Names metric
2020-03-24 17:33:18 +00:00
from query_log array join ProfileEvents
2020-05-20 02:19:19 +00:00
join unstable_query_runs using ( query_id)
2020-02-17 19:32:40 +00:00
;
2020-03-24 17:33:18 +00:00
create table unstable_run_metrics_2 engine File( TSVWithNamesAndTypes,
'unstable-run-metrics-2.$version.rep' ) as
2020-05-20 02:19:19 +00:00
select
test, query_index, query_id,
v, n
from (
select
test, query_index, query_id,
2020-03-17 02:41:47 +00:00
[ 'memory_usage' , 'read_bytes' , 'written_bytes' , 'query_duration_ms' ] n,
2020-05-20 02:19:19 +00:00
[ memory_usage, read_bytes, written_bytes, query_duration_ms] v
2020-03-24 17:33:18 +00:00
from query_log
2020-05-20 02:19:19 +00:00
join unstable_query_runs using ( query_id)
)
array join v, n;
create view trace_log as select *
from file( '$version-trace-log.tsv' , TSVWithNamesAndTypes,
'$(cat "$version-trace-log.tsv.columns")' ) ;
2020-05-25 01:03:21 +00:00
create view addresses_src as select addr,
-- Some functions change name between builds, e.g. '__clone' or 'clone' or
-- even '__GI__clone@@GLIBC_2.32' . This breaks differential flame graphs, so
-- filter them out here.
[ name, 'clone.S (filtered by script)' , 'pthread_cond_timedwait (filtered by script)' ]
-- this line is a subscript operator of the above array
[ 1 + multiSearchFirstIndex( name, [ 'clone.S' , 'pthread_cond_timedwait' ] ) ] name
2020-05-20 02:19:19 +00:00
from file( '$version-addresses.tsv' , TSVWithNamesAndTypes,
'$(cat "$version-addresses.tsv.columns")' ) ;
create table addresses_join_$version engine Join( any, left, address) as
select addr address, name from addresses_src;
2020-02-20 16:28:21 +00:00
2020-03-24 17:33:18 +00:00
create table unstable_run_traces engine File( TSVWithNamesAndTypes,
'unstable-run-traces.$version.rep' ) as
select
2020-05-20 02:19:19 +00:00
test, query_index, query_id,
2020-03-24 17:33:18 +00:00
count( ) value,
2020-05-25 01:03:21 +00:00
joinGet( addresses_join_$version , 'name' , arrayJoin( trace) )
|| '(' || toString( trace_type) || ')' metric
2020-05-20 02:19:19 +00:00
from trace_log
join unstable_query_runs using query_id
group by test, query_index, query_id, metric
2020-02-17 19:32:40 +00:00
order by count( ) desc
;
2020-03-24 17:33:18 +00:00
create table metric_devation engine File( TSVWithNamesAndTypes,
2020-05-25 01:03:21 +00:00
'report/metric-deviation.$version.tsv' ) as
2020-05-20 02:19:19 +00:00
-- first goes the key used to split the file with grep
select test, query_index, query_display_name,
2020-07-31 19:58:18 +00:00
toDecimal64( d, 3) d, q, metric
2020-05-20 02:19:19 +00:00
from (
select
test, query_index,
2020-07-31 19:58:18 +00:00
( q[ 3] - q[ 1] ) /q[ 2] d,
2020-05-20 02:19:19 +00:00
quantilesExact( 0, 0.5, 1) ( value) q, metric
from ( select * from unstable_run_metrics
union all select * from unstable_run_traces
union all select * from unstable_run_metrics_2) mm
group by test, query_index, metric
2020-07-31 19:58:18 +00:00
having isFinite( d) and d > 0.5 and q[ 3] > 5
2020-05-20 02:19:19 +00:00
) metrics
2020-05-25 01:42:56 +00:00
left join query_display_names using ( test, query_index)
2020-05-20 02:19:19 +00:00
order by test, query_index, d desc
2020-02-17 19:32:40 +00:00
;
2020-02-20 16:28:21 +00:00
2020-05-25 01:03:21 +00:00
create table stacks engine File( TSV, 'report/stacks.$version.tsv' ) as
2020-02-20 16:28:21 +00:00
select
2020-05-20 02:19:19 +00:00
-- first goes the key used to split the file with grep
2020-05-25 01:03:21 +00:00
test, query_index, trace_type, any( query_display_name) ,
2020-05-21 23:03:41 +00:00
-- next go the stacks in flamegraph format: 'func1;...;funcN count'
2020-02-20 16:28:21 +00:00
arrayStringConcat(
2020-05-25 01:03:21 +00:00
arrayMap(
addr -> joinGet( addresses_join_$version , 'name' , addr) ,
2020-02-20 16:28:21 +00:00
arrayReverse( trace)
) ,
';'
) readable_trace,
2020-05-20 02:19:19 +00:00
count( ) c
2020-03-24 17:33:18 +00:00
from trace_log
2020-02-20 16:28:21 +00:00
join unstable_query_runs using query_id
2020-05-25 01:03:21 +00:00
group by test, query_index, trace_type, trace
order by test, query_index, trace_type, trace
2020-02-20 16:28:21 +00:00
;
2020-07-31 19:58:18 +00:00
" 2> >(tee -a report/errors.log 1>&2) &
2020-03-24 17:33:18 +00:00
done
wait
2020-02-11 15:01:16 +00:00
2020-05-25 01:42:56 +00:00
# Create per-query flamegraphs
2020-02-20 16:28:21 +00:00
IFS = $'\n'
2020-03-24 17:33:18 +00:00
for version in { right,left}
2020-02-20 16:28:21 +00:00
do
2020-05-25 01:03:21 +00:00
for query in $( cut -d' ' -f1-4 " report/stacks. $version .tsv " | sort | uniq)
2020-03-24 17:33:18 +00:00
do
2020-05-20 02:19:19 +00:00
query_file = $( echo " $query " | cut -c-120 | sed 's/[/ ]/_/g' )
2020-05-21 23:03:41 +00:00
echo " $query_file " >> report/query-files.txt
2020-04-21 18:46:45 +00:00
# Build separate .svg flamegraph for each query.
2020-05-21 23:03:41 +00:00
# -F is somewhat unsafe because it might match not the beginning of the
# string, but this is unlikely and escaping the query for grep is a pain.
2020-05-25 01:03:21 +00:00
grep -F " $query " " report/stacks. $version .tsv " \
| cut -f 5- \
2020-03-24 17:33:18 +00:00
| sed 's/\t/ /g' \
2020-05-25 01:42:56 +00:00
| tee " report/tmp/ $query_file .stacks. $version .tsv " \
2020-05-21 23:03:41 +00:00
| ~/fg/flamegraph.pl --hash > " $query_file . $version .svg " &
2020-03-24 17:33:18 +00:00
done
2020-02-20 16:28:21 +00:00
done
wait
unset IFS
2020-05-21 23:03:41 +00:00
# Create differential flamegraphs.
2020-08-19 15:31:13 +00:00
while IFS = read -r query_file
2020-05-21 23:03:41 +00:00
do
2020-05-25 01:42:56 +00:00
~/fg/difffolded.pl " report/tmp/ $query_file .stacks.left.tsv " \
" report/tmp/ $query_file .stacks.right.tsv " \
| tee " report/tmp/ $query_file .stacks.diff.tsv " \
2020-05-21 23:03:41 +00:00
| ~/fg/flamegraph.pl > " $query_file .diff.svg " &
2020-08-19 15:31:13 +00:00
done < report/query-files.txt
2020-05-21 23:03:41 +00:00
wait
2020-05-25 01:42:56 +00:00
# Create per-query files with metrics. Note that the key is different from flamegraphs.
IFS = $'\n'
for version in { right,left}
do
for query in $( cut -d' ' -f1-3 " report/metric-deviation. $version .tsv " | sort | uniq)
do
query_file = $( echo " $query " | cut -c-120 | sed 's/[/ ]/_/g' )
# Ditto the above comment about -F.
grep -F " $query " " report/metric-deviation. $version .tsv " \
| cut -f4- > " $query_file . $version .metrics.rep " &
done
done
wait
unset IFS
2020-07-02 07:13:02 +00:00
# Prefer to grep for clickhouse_driver exception messages, but if there are none,
# just show a couple of lines from the log.
for log in *-err.log
do
test = $( basename " $log " "-err.log" )
{
2020-09-01 19:05:57 +00:00
# The second grep is a heuristic for error messages like
# "socket.timeout: timed out".
grep -h -m2 -i '\(Exception\|Error\):[^:]' " $log " \
|| grep -h -m2 -i '^[^ ]\+: ' " $log " \
2020-07-02 07:13:02 +00:00
|| head -2 " $log "
} | sed " s/^/ $test \t/ " >> run-errors.tsv || :
done
2020-02-07 18:34:24 +00:00
}
2020-02-10 18:37:46 +00:00
2020-06-25 20:19:27 +00:00
function report_metrics
{
2020-09-14 16:16:38 +00:00
build_log_column_definitions
2020-06-25 20:19:27 +00:00
rm -rf metrics || :
mkdir metrics
2020-07-31 19:58:18 +00:00
clickhouse-local --query "
2020-06-25 20:19:27 +00:00
create view right_async_metric_log as
select * from file( 'right-async-metric-log.tsv' , TSVWithNamesAndTypes,
2020-09-14 16:16:38 +00:00
'$(cat right-async-metric-log.tsv.columns)' )
2020-06-25 20:19:27 +00:00
;
-- Use the right log as time reference because it may have higher precision.
create table metrics engine File( TSV, 'metrics/metrics.tsv' ) as
with ( select min( event_time) from right_async_metric_log) as min_time
select name metric, r.event_time - min_time event_time, l.value as left, r.value as right
from right_async_metric_log r
asof join file( 'left-async-metric-log.tsv' , TSVWithNamesAndTypes,
2020-09-14 16:16:38 +00:00
'$(cat left-async-metric-log.tsv.columns)' ) l
2020-06-25 20:19:27 +00:00
on l.name = r.name and r.event_time <= l.event_time
order by metric, event_time
;
-- Show metrics that have changed
create table changes engine File( TSV, 'metrics/changes.tsv' ) as
2020-07-31 19:58:18 +00:00
select metric, left, right,
toDecimal64( diff, 3) , toDecimal64( times_diff, 3)
from (
select metric, median( left) as left, median( right) as right,
( right - left) / left diff,
if ( left > right, left / right, right / left) times_diff
from metrics
group by metric
having abs( diff) > 0.05 and isFinite( diff)
)
2020-06-25 20:19:27 +00:00
order by diff desc
;
2020-08-19 15:31:13 +00:00
" 2> >(tee -a metrics/errors.log 1>&2)
2020-06-25 20:19:27 +00:00
IFS = $'\n'
for prefix in $( cut -f1 "metrics/metrics.tsv" | sort | uniq)
do
file = " metrics/ $prefix .tsv "
grep " ^ $prefix " "metrics/metrics.tsv" | cut -f2- > " $file "
gnuplot -e "
set datafile separator '\t' ;
set terminal png size 960,540;
set xtics time format '%tH:%tM' ;
set title '$prefix' noenhanced offset 0,-3;
set key left top;
plot
'$file' using 1:2 with lines title 'Left'
, '$file' using 1:3 with lines title 'Right'
;
" \
| convert - -filter point -resize "200%" " metrics/ $prefix .png " &
done
wait
unset IFS
}
2020-11-02 23:10:19 +00:00
function upload_results
{
if ! [ -v CHPC_DATABASE_URL ]
then
echo Database for test results is not specified, will not upload them.
return 0
fi
# Surprisingly, clickhouse-client doesn't understand --host 127.0.0.1:9000
2020-11-04 01:19:53 +00:00
# so I have to extract host and port with clickhouse-local. I tried to use
# Poco URI parser to support this in the client, but it's broken and can't
# parse host:port.
set +x # Don't show password in the log
clickhouse-client \
$( clickhouse-local --query " with ' ${ CHPC_DATABASE_URL } ' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV " ) \
--secure \
--user " ${ CHPC_DATABASE_USER } " \
--password " ${ CHPC_DATABASE_PASSWORD } " \
2020-11-04 15:07:34 +00:00
--config "right/config/client_config.xml" \
2020-11-04 01:19:53 +00:00
--database perftest \
--date_time_input_format= best_effort \
--query "
2020-11-06 10:45:42 +00:00
insert into query_metrics_v2
2020-11-04 01:19:53 +00:00
select
toDate( event_time) event_date,
toDateTime( '$(cd right/ch && git show -s --format=%ci "$SHA_TO_TEST" | cut -d' ' -f-2)' ) event_time,
$PR_TO_TEST pr_number,
'$REF_SHA' old_sha,
'$SHA_TO_TEST' new_sha,
test,
query_index,
query_display_name,
metric_name,
old_value,
new_value,
diff,
stat_threshold
from input( ' metric_name text, old_value float, new_value float, diff float,
ratio_display_text text, stat_threshold float,
test text, query_index int, query_display_name text' )
settings date_time_input_format = 'best_effort'
format TSV
settings date_time_input_format = 'best_effort'
" < report/all-query-metrics.tsv # Don't leave whitespace after INSERT: https://github.com/ClickHouse/ClickHouse/issues/16652
2020-11-02 23:10:19 +00:00
set -x
}
2020-04-17 15:47:01 +00:00
# Check that local and client are in PATH
clickhouse-local --version > /dev/null
clickhouse-client --version > /dev/null
2020-02-10 18:37:46 +00:00
case " $stage " in
"" )
; &
2020-02-14 19:11:46 +00:00
"configure" )
time configure
; &
"restart" )
2020-10-22 12:52:43 +00:00
numactl --show || :
2020-07-21 12:41:14 +00:00
numactl --hardware || :
lscpu || :
2020-10-21 15:06:42 +00:00
dmidecode -t 4 || :
2020-02-14 19:11:46 +00:00
time restart
; &
"run_tests" )
2020-03-17 16:37:09 +00:00
# Ignore the errors to collect the log and build at least some report, anyway
2020-02-25 19:51:09 +00:00
time run_tests || :
2020-02-14 19:11:46 +00:00
; &
"get_profiles" )
2020-06-25 20:19:27 +00:00
# Check for huge pages.
cat /sys/kernel/mm/transparent_hugepage/enabled > thp-enabled.txt || :
cat /proc/meminfo > meminfo.txt || :
for pid in $( pgrep -f clickhouse-server)
do
cat " /proc/ $pid /smaps " > " $pid -smaps.txt " || :
done
# We had a bug where getting profiles froze sometimes, so try to save some
# logs if this happens again. Give the servers some time to collect all info,
# then trace and kill. Start in a subshell, so that both function don't
# interfere with each other's jobs through `wait`. Also make the subshell
# have its own process group, so that we can then kill it with all its child
# processes. Somehow it doesn't kill the children by itself when dying.
2020-04-21 21:53:13 +00:00
set -m
( get_profiles_watchdog ) &
watchdog_pid = $!
set +m
# Check that the watchdog started OK.
kill -0 $watchdog_pid
2020-02-28 16:22:07 +00:00
# If the tests fail with OOM or something, still try to restart the servers
# to collect the logs. Prefer not to restart, because addresses might change
2020-04-21 21:53:13 +00:00
# and we won't be able to process trace_log data. Start in a subshell, so that
# it doesn't interfere with the watchdog through `wait`.
2020-09-23 08:21:55 +00:00
( get_profiles || { restart && get_profiles ; } ) || :
2020-04-21 21:53:13 +00:00
# Kill the whole process group, because somehow when the subshell is killed,
# the sleep inside remains alive and orphaned.
while env kill -- -$watchdog_pid ; do sleep 1; done
2020-03-03 10:47:32 +00:00
# Stop the servers to free memory for the subsequent query analysis.
while killall clickhouse; do echo . ; sleep 1 ; done
echo Servers stopped.
2020-02-10 18:37:46 +00:00
; &
2020-02-27 19:43:43 +00:00
"analyze_queries" )
2020-04-28 14:26:49 +00:00
time analyze_queries || :
2020-02-27 19:43:43 +00:00
; &
2020-02-10 18:37:46 +00:00
"report" )
2020-04-30 08:36:33 +00:00
time report || :
2020-06-25 20:19:27 +00:00
; &
"report_metrics" )
time report_metrics || :
2020-07-31 19:58:18 +00:00
cat metrics/errors.log >> report/errors.log || :
2020-06-25 20:19:27 +00:00
; &
"report_html" )
2020-04-28 07:45:35 +00:00
time " $script_dir /report.py " --report= all-queries > all-queries.html 2> >( tee -a report/errors.log 1>& 2) || :
2020-03-17 16:37:09 +00:00
time " $script_dir /report.py " > report.html
2020-02-10 18:37:46 +00:00
; &
2020-11-02 23:10:19 +00:00
"upload_results" )
time upload_results || :
; &
2020-02-10 18:37:46 +00:00
esac
2020-04-21 21:53:13 +00:00
# Print some final debug info to help debug Weirdness, of which there is plenty.
jobs
pstree -apgT