ClickHouse/docker/test/stateless/run.sh

194 lines
8.6 KiB
Bash
Raw Normal View History

2020-07-08 08:41:39 +00:00
#!/bin/bash
2020-10-21 20:11:35 +00:00
# fail on errors, verbose and export all env variables
set -e -x -a
2020-07-08 08:41:39 +00:00
# Choose random timezone for this test run.
TZ="$(grep -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
echo "Choosen random timezone $TZ"
2021-01-06 04:20:33 +00:00
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
2020-07-08 08:41:39 +00:00
dpkg -i package_folder/clickhouse-common-static_*.deb
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
dpkg -i package_folder/clickhouse-server_*.deb
dpkg -i package_folder/clickhouse-client_*.deb
2022-02-15 12:03:51 +00:00
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
2020-07-08 08:41:39 +00:00
2020-09-24 08:18:36 +00:00
# install test configs
/usr/share/clickhouse-test/config/install.sh
2020-07-08 08:41:39 +00:00
2021-08-26 21:54:42 +00:00
./setup_minio.sh
2021-08-26 13:12:42 +00:00
# For flaky check we also enable thread fuzzer
if [ "$NUM_TRIES" -gt "1" ]; then
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
export THREAD_FUZZER_SLEEP_TIME_US=100000
2020-12-21 20:24:16 +00:00
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
# simpliest way to forward env variables to server
2020-12-21 20:09:32 +00:00
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
else
2021-07-03 23:13:32 +00:00
sudo clickhouse start
fi
2020-07-08 08:41:39 +00:00
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
2021-03-13 10:22:48 +00:00
2021-03-18 12:49:31 +00:00
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
2021-04-12 18:40:34 +00:00
--mysql_port 19004 --postgresql_port 19005 \
2021-03-29 20:04:50 +00:00
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
--macros.replica r2 # It doesn't work :(
2021-03-13 10:22:48 +00:00
2021-03-18 12:49:31 +00:00
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \
-- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \
--logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \
2021-03-13 10:22:48 +00:00
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
2021-04-12 18:40:34 +00:00
--mysql_port 29004 --postgresql_port 29005 \
2021-03-29 20:04:50 +00:00
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
2021-03-13 10:22:48 +00:00
--macros.shard s2 # It doesn't work :(
2021-03-16 16:39:31 +00:00
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
2020-07-08 08:41:39 +00:00
fi
sleep 5
function run_tests()
{
2021-03-16 16:39:31 +00:00
set -x
2020-10-22 14:33:23 +00:00
# We can have several additional options so we path them as array because it's
# more idiologically correct.
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
# Skip these tests, because they fail when we rerun them multiple times
if [ "$NUM_TRIES" -gt "1" ]; then
2021-03-29 18:24:29 +00:00
ADDITIONAL_OPTIONS+=('--order=random')
ADDITIONAL_OPTIONS+=('--skip')
2020-10-23 12:32:57 +00:00
ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip')
# Note that flaky check must be ran in parallel, but for now we run
# everything in parallel except DatabaseReplicated. See below.
fi
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--s3-storage')
fi
2021-02-15 10:26:34 +00:00
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--replicated-database')
# Cannot be used with replicated database, due to distributed_ddl_output_mode=none
ADDITIONAL_OPTIONS+=('--no-left-queries-check')
ADDITIONAL_OPTIONS+=('--jobs')
ADDITIONAL_OPTIONS+=('2')
else
# Too many tests fail for DatabaseReplicated in parallel. All other
# configurations are OK.
ADDITIONAL_OPTIONS+=('--jobs')
ADDITIONAL_OPTIONS+=('8')
2021-02-15 10:26:34 +00:00
fi
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
ADDITIONAL_OPTIONS+=('--run-by-hash-num')
ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_NUM")
ADDITIONAL_OPTIONS+=('--run-by-hash-total')
ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_TOTAL")
fi
set +e
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
2021-01-27 15:24:39 +00:00
| ts '%Y-%m-%d %H:%M:%S' \
| tee -a test_output/test_result.txt
set -e
}
export -f run_tests
2020-12-28 11:46:53 +00:00
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
2021-01-14 10:42:51 +00:00
2021-12-10 16:38:20 +00:00
echo "Files in current directory"
ls -la ./
echo "Files in root directory"
ls -la /
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
2021-02-20 20:04:24 +00:00
2021-03-30 10:49:43 +00:00
clickhouse-client -q "system flush logs" ||:
2021-03-29 11:47:57 +00:00
2021-06-15 20:52:29 +00:00
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
2021-03-29 11:47:57 +00:00
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
# Compress tables.
#
# NOTE:
# - that due to tests with s3 storage we cannot use /var/lib/clickhouse/data
# directly
# - even though ci auto-compress some files (but not *.tsv) it does this only
# for files >64MB, we want this files to be compressed explicitly
for table in query_log zookeeper_log trace_log transactions_info_log
do
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.tsv.gz &
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.1.tsv.gz &
clickhouse-client --port 29000 -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.2.tsv.gz &
fi
done
wait ||:
# Also export trace log in flamegraph-friendly format.
for trace_type in CPU Memory Real
do
2021-04-23 18:51:26 +00:00
clickhouse-client -q "
select
arrayStringConcat((arrayMap(x -> concat(splitByChar('/', addressToLine(x))[-1], '#', demangle(addressToSymbol(x)) ), trace)), ';') AS stack,
count(*) AS samples
from system.trace_log
2021-04-23 20:03:53 +00:00
where trace_type = '$trace_type'
group by trace
order by samples desc
settings allow_introspection_functions = 1
format TabSeparated" \
| pigz > "/test_output/trace-log-$trace_type-flamegraph.tsv.gz" &
done
2021-03-29 11:47:57 +00:00
wait ||:
# Compressed (FIXME: remove once only github actions will be left)
rm /var/log/clickhouse-server/clickhouse-server.log
2021-02-22 13:53:43 +00:00
mv /var/log/clickhouse-server/stderr.log /test_output/ ||:
if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||:
fi
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
2021-07-09 14:05:35 +00:00
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
2021-03-18 12:49:31 +00:00
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
# FIXME: remove once only github actions will be left
rm /var/log/clickhouse-server/clickhouse-server1.log
rm /var/log/clickhouse-server/clickhouse-server2.log
2021-03-18 12:49:31 +00:00
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
fi