2019-12-26 17:35:41 +00:00
#!/bin/bash
set -ex
set -o pipefail
trap "exit" INT TERM
trap "kill 0" EXIT
2019-12-26 19:16:36 +00:00
script_dir = " $( cd " $( dirname " ${ BASH_SOURCE [0] } " ) " >/dev/null 2>& 1 && pwd ) "
2019-12-26 17:35:41 +00:00
mkdir db0 || :
left_pr = $1
left_sha = $2
right_pr = $3
right_sha = $4
function download
{
2019-12-26 21:33:10 +00:00
rm -r left || :
mkdir left || :
rm -r right || :
mkdir right || :
2019-12-26 17:35:41 +00:00
la = " $left_pr - $left_sha .tgz "
ra = " $right_pr - $right_sha .tgz "
2020-01-21 13:42:12 +00:00
# might have the same version on left and right
if ! [ " $la " = " $ra " ]
then
2020-01-27 12:35:56 +00:00
wget -nv -nd -c " https://clickhouse-builds.s3.yandex.net/ $left_pr / $left_sha /performance/performance.tgz " -O " $la " && tar -C left --strip-components= 1 -zxvf " $la " &
wget -nv -nd -c " https://clickhouse-builds.s3.yandex.net/ $right_pr / $right_sha /performance/performance.tgz " -O " $ra " && tar -C right --strip-components= 1 -zxvf " $ra " &
2020-01-21 13:42:12 +00:00
else
2020-01-27 12:35:56 +00:00
wget -nv -nd -c " https://clickhouse-builds.s3.yandex.net/ $left_pr / $left_sha /performance/performance.tgz " -O " $la " && { tar -C left --strip-components= 1 -zxvf " $la " & tar -C right --strip-components= 1 -zxvf " $ra " & } &
2020-01-21 13:42:12 +00:00
fi
2020-01-27 12:35:56 +00:00
cd db0 && wget -nv -nd -c "https://s3.mds.yandex.net/clickhouse-private-datasets/hits_10m_single/partitions/hits_10m_single.tar" && tar -xvf hits_10m_single.tar &
cd db0 && wget -nv -nd -c "https://s3.mds.yandex.net/clickhouse-private-datasets/hits_100m_single/partitions/hits_100m_single.tar" && tar -xvf hits_100m_single.tar &
cd db0 && wget -nv -nd -c "https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar" && tar -xvf hits_v1.tar &
2019-12-26 17:35:41 +00:00
wait
}
function configure
{
2020-01-14 19:05:58 +00:00
sed -i 's/<tcp_port>9000/<tcp_port>9001/g' left/config/config.xml
sed -i 's/<tcp_port>9000/<tcp_port>9002/g' right/config/config.xml
2019-12-26 17:35:41 +00:00
2019-12-26 21:33:10 +00:00
cat > right/config/config.d/zz-perf-test-tweaks.xml <<EOF
2019-12-26 17:35:41 +00:00
<yandex>
<logger>
<console>true</console>
</logger>
2019-12-26 21:33:10 +00:00
<text_log remove = "remove" >
<table remove = "remove" />
</text_log>
<metric_log remove = "remove" >
<table remove = "remove" />
</metric_log>
2019-12-26 17:35:41 +00:00
</yandex>
EOF
2019-12-26 21:33:10 +00:00
cp right/config/config.d/zz-perf-test-tweaks.xml left/config/config.d/zz-perf-test-tweaks.xml
rm left/config/config.d/metric_log.xml || :
rm left/config/config.d/text_log.xml || :
rm right/config/config.d/metric_log.xml || :
rm right/config/config.d/text_log.xml || :
2020-01-14 19:05:58 +00:00
# Start a temporary server to rename the tables
while killall clickhouse ; do echo . ; sleep 1 ; done
echo all killed
set -m # Spawn temporary in its own process groups
left/clickhouse server --config-file= left/config/config.xml -- --path db0 & > setup-log.txt &
left_pid = $!
kill -0 $left_pid
disown $left_pid
set +m
while ! left/clickhouse client --port 9001 --query "select 1" ; do kill -0 $left_pid ; echo . ; sleep 1 ; done
echo server for setup started
left/clickhouse client --port 9001 --query "create database test" || :
left/clickhouse client --port 9001 --query "rename table datasets.hits_v1 to test.hits" || :
2019-12-26 17:35:41 +00:00
}
function restart
{
while killall clickhouse ; do echo . ; sleep 1 ; done
echo all killed
2020-01-14 19:05:58 +00:00
# Make copies of the original db for both servers. Use hardlinks instead
# of copying.
rm -r left/db || :
rm -r right/db || :
cp -al db0/ left/db/
cp -al db0/ right/db/
set -m # Spawn servers in their own process groups
2019-12-26 17:35:41 +00:00
left/clickhouse server --config-file= left/config/config.xml -- --path left/db & > left/log.txt &
left_pid = $!
kill -0 $left_pid
disown $left_pid
right/clickhouse server --config-file= right/config/config.xml -- --path right/db & > right/log.txt &
right_pid = $!
kill -0 $right_pid
disown $right_pid
set +m
2020-01-14 19:05:58 +00:00
while ! left/clickhouse client --port 9001 --query "select 1" ; do kill -0 $left_pid ; echo . ; sleep 1 ; done
2019-12-26 17:35:41 +00:00
echo left ok
2020-01-14 19:05:58 +00:00
while ! right/clickhouse client --port 9002 --query "select 1" ; do kill -0 $right_pid ; echo . ; sleep 1 ; done
2019-12-26 17:35:41 +00:00
echo right ok
2019-12-26 21:33:10 +00:00
2020-01-14 19:05:58 +00:00
left/clickhouse client --port 9001 --query "select * from system.tables where database != 'system'"
right/clickhouse client --port 9002 --query "select * from system.tables where database != 'system'"
2019-12-26 17:35:41 +00:00
}
2019-12-26 19:16:36 +00:00
function run_tests
{
# Just check that the script runs at all
" $script_dir /perf.py " --help > /dev/null
2020-02-06 12:46:57 +00:00
rm -v test-times.tsv || :
2020-01-16 19:39:07 +00:00
# FIXME remove some broken long tests
2020-02-05 20:06:23 +00:00
rm right/performance/{ IPv4,IPv6,modulo,parse_engine_file,number_formatting_formats,select_format} .xml || :
2020-01-14 19:05:58 +00:00
2019-12-26 19:16:36 +00:00
# Run the tests
2020-02-05 20:06:23 +00:00
for test in right/performance/${ CHPC_TEST_GLOB :- *.xml }
2019-12-26 19:16:36 +00:00
do
test_name = $( basename $test ".xml" )
2020-01-14 19:05:58 +00:00
echo test $test_name
2020-02-06 12:46:57 +00:00
TIMEFORMAT = $( printf " $test_name \t%%3R\t%%3U\t%%3S\n " )
2020-02-11 15:01:16 +00:00
# the grep is to filter out set -x output and keep only time output
{ time " $script_dir /perf.py " " $test " > " $test_name -raw.tsv " 2> " $test_name -err.log " ; } 2>& 1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue
2020-01-16 19:39:07 +00:00
grep ^query " $test_name -raw.tsv " | cut -f2- > " $test_name -queries.tsv "
grep ^client-time " $test_name -raw.tsv " | cut -f2- > " $test_name -client-time.tsv "
right/clickhouse local --file " $test_name -queries.tsv " --structure 'query text, run int, version UInt32, time float' --query " $( cat $script_dir /eqmed.sql) " > " $test_name -report.tsv "
2019-12-26 19:16:36 +00:00
done
}
2019-12-26 17:35:41 +00:00
2019-12-26 19:16:36 +00:00
# Analyze results
2020-02-07 18:34:24 +00:00
function report
{
2019-12-26 21:33:10 +00:00
result_structure = "left float, right float, diff float, rd Array(float), query text"
2020-02-11 15:01:16 +00:00
rm test-times.tsv test-dump.tsv unstable.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv || :
2020-02-07 18:34:24 +00:00
right/clickhouse local --query "
create table queries engine Memory as select
replaceAll( _file, '-report.tsv' , '' ) test,
if ( abs( diff) < 0.05 and rd[ 3] > 0.05, 1, 0) unstable,
if ( abs( diff) > 0.05 and abs( diff) > rd[ 3] , 1, 0) changed,
*
2020-02-11 15:01:16 +00:00
from file( '*-report.tsv' , TSV, 'left float, right float, diff float, rd Array(float), query text' )
-- FIXME Comparison mode doesn' t make sense for queries that complete
-- immediately, so for now we pretend they don't exist. We don' t want to
-- remove them altogether because we want to be able to detect regressions,
-- but the right way to do this is not yet clear.
where left + right > 0.01;
2020-02-07 18:34:24 +00:00
2020-02-10 16:34:07 +00:00
create table changed_perf_tsv engine File( TSV, 'changed-perf.tsv' ) as
select left, right, diff, rd, test, query from queries where changed
order by rd[ 3] desc;
create table unstable_queries_tsv engine File( TSV, 'unstable-queries.tsv' ) as
select left, right, diff, rd, test, query from queries where unstable
order by rd[ 3] desc;
create table unstable_tests_tsv engine File( TSV, 'bad-tests.tsv' ) as
select test, sum( unstable) u, sum( changed) c, u + c s from queries
2020-02-07 18:34:24 +00:00
group by test having s > 0 order by s desc;
2020-02-10 16:34:07 +00:00
create table query_time engine Memory as select *, replaceAll( _file, '-client-time.tsv' , '' ) test
2020-02-07 18:34:24 +00:00
from file( '*-client-time.tsv' , TSV, 'query text, client float, server float' ) ;
2020-02-10 16:34:07 +00:00
create table wall_clock engine Memory as select *
from file( 'wall-clock-times.tsv' , TSV, 'test text, real float, user float, system float' ) ;
2020-02-07 18:34:24 +00:00
create table slow_on_client_tsv engine File( TSV, 'slow-on-client.tsv' ) as
2020-02-10 16:34:07 +00:00
select client, server, floor( client/server, 3) p, query
from query_time where p > 1.02 order by p desc;
create table test_time engine Memory as
2020-02-11 15:01:16 +00:00
select test, sum( client) total_client_time,
max( client) query_max, min( client) query_min, count( *) queries
from query_time
-- for consistency, filter out everything we filtered out of queries table
semi join queries using query
group by test;
2020-02-07 18:34:24 +00:00
create table test_times_tsv engine File( TSV, 'test-times.tsv' ) as
2020-02-11 15:01:16 +00:00
select wall_clock.test, real,
floor( total_client_time, 3) ,
queries,
floor( query_max, 3) ,
floor( real / queries, 3) avg_real_per_query,
floor( query_min, 3)
from test_time right join wall_clock using test
order by query_max / query_min desc;
2020-02-10 16:34:07 +00:00
create table all_queries_tsv engine File( TSV, 'all-queries.tsv' ) as
select left, right, diff, rd, test, query
from queries order by rd[ 3] desc;
2020-02-07 18:34:24 +00:00
"
2020-02-11 15:01:16 +00:00
# Remember that grep sets error code when nothing is found, hence the bayan
# operator
grep Exception:[ ^:] *-err.log > run-errors.log || :
2020-01-23 17:48:26 +00:00
2020-01-28 15:13:12 +00:00
$script_dir /report.py > report.html
2020-02-07 18:34:24 +00:00
}
2020-02-10 18:37:46 +00:00
case " $stage " in
"" )
; &
"download" )
download
configure
restart
run_tests
; &
"report" )
report
; &
esac