2020-07-08 08:41:39 +00:00
#!/bin/bash
2023-08-16 20:53:51 +00:00
# shellcheck disable=SC1091
source /setup_export_logs.sh
2024-08-04 14:09:48 +00:00
# shellcheck source=../stateless/stress_tests.lib
source /stress_tests.lib
# Avoid overlaps with previous runs
dmesg --clear
2020-10-21 20:11:35 +00:00
# fail on errors, verbose and export all env variables
set -e -x -a
2020-07-08 08:41:39 +00:00
2024-07-19 18:10:08 +00:00
MAX_RUN_TIME = ${ MAX_RUN_TIME :- 9000 }
MAX_RUN_TIME = $(( MAX_RUN_TIME = = 0 ? 9000 : MAX_RUN_TIME))
2024-07-03 11:37:36 +00:00
2024-07-09 16:16:38 +00:00
USE_DATABASE_REPLICATED = ${ USE_DATABASE_REPLICATED : =0 }
USE_SHARED_CATALOG = ${ USE_SHARED_CATALOG : =0 }
2020-09-28 12:11:36 +00:00
# Choose random timezone for this test run.
2023-07-17 07:54:51 +00:00
#
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
# (it will choose between default server timezone and something specific).
2023-01-01 19:53:06 +00:00
TZ = " $( rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1) "
2024-07-05 09:57:54 +00:00
echo " Chosen random timezone $TZ "
2021-01-06 04:20:33 +00:00
ln -snf " /usr/share/zoneinfo/ $TZ " /etc/localtime && echo " $TZ " > /etc/timezone
2020-09-28 12:11:36 +00:00
2020-07-08 08:41:39 +00:00
dpkg -i package_folder/clickhouse-common-static_*.deb
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
2024-07-05 09:57:54 +00:00
dpkg -i package_folder/clickhouse-odbc-bridge_*.deb
dpkg -i package_folder/clickhouse-library-bridge_*.deb
2020-07-08 08:41:39 +00:00
dpkg -i package_folder/clickhouse-server_*.deb
dpkg -i package_folder/clickhouse-client_*.deb
2022-02-15 12:03:51 +00:00
2023-12-03 15:04:22 +00:00
echo " $BUGFIX_VALIDATE_CHECK "
2023-11-12 01:45:25 +00:00
# Check that the tools are available under short names
2023-12-03 15:04:22 +00:00
if [ [ -z " $BUGFIX_VALIDATE_CHECK " ] ] ; then
ch --query "SELECT 1" || exit 1
chl --query "SELECT 1" || exit 1
chc --version || exit 1
fi
2023-11-12 01:45:25 +00:00
2022-02-15 12:03:51 +00:00
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
2020-07-08 08:41:39 +00:00
2023-06-06 10:23:00 +00:00
# shellcheck disable=SC1091
2023-09-15 11:24:47 +00:00
source /attach_gdb.lib
2023-06-05 18:21:40 +00:00
2023-06-30 14:41:27 +00:00
# shellcheck disable=SC1091
2023-09-15 11:24:47 +00:00
source /utils.lib
2023-06-30 14:41:27 +00:00
2020-09-24 08:18:36 +00:00
# install test configs
/usr/share/clickhouse-test/config/install.sh
2020-07-08 08:41:39 +00:00
2022-06-10 12:51:18 +00:00
./setup_minio.sh stateless
2024-07-10 18:41:48 +00:00
2022-06-03 13:54:29 +00:00
./setup_hdfs_minicluster.sh
2021-08-26 13:12:42 +00:00
2023-08-16 20:53:51 +00:00
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
2023-08-06 02:38:04 +00:00
2023-12-03 15:04:22 +00:00
if [ [ -n " $BUGFIX_VALIDATE_CHECK " ] ] && [ [ " $BUGFIX_VALIDATE_CHECK " -eq 1 ] ] ; then
2024-03-19 16:53:32 +00:00
sudo sed -i "/<use_compression>1<\/use_compression>/d" /etc/clickhouse-server/config.d/zookeeper.xml
2023-12-04 10:01:09 +00:00
# it contains some new settings, but we can safely remove it
2024-03-03 10:55:47 +00:00
rm /etc/clickhouse-server/config.d/handlers.yaml
2023-12-04 10:01:09 +00:00
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
2023-12-20 15:56:39 +00:00
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
2024-02-24 20:16:16 +00:00
function remove_keeper_config( )
{
2024-03-19 16:53:32 +00:00
sudo sed -i " /< $1 > $2 <\/ $1 >/d " /etc/clickhouse-server/config.d/keeper_port.xml
2024-02-24 20:16:16 +00:00
}
# commit_logs_cache_size_threshold setting doesn't exist on some older versions
remove_keeper_config "commit_logs_cache_size_threshold" "[[:digit:]]\+"
2024-02-26 07:50:23 +00:00
remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
2023-12-03 15:04:22 +00:00
fi
2024-08-04 21:59:01 +00:00
export IS_FLAKY_CHECK = 0
2020-12-21 10:06:36 +00:00
# For flaky check we also enable thread fuzzer
if [ " $NUM_TRIES " -gt "1" ] ; then
2024-08-04 21:59:01 +00:00
export IS_FLAKY_CHECK = 1
2020-12-21 10:06:36 +00:00
export THREAD_FUZZER_CPU_TIME_PERIOD_US = 1000
export THREAD_FUZZER_SLEEP_PROBABILITY = 0.1
2024-03-14 22:02:09 +00:00
export THREAD_FUZZER_SLEEP_TIME_US_MAX = 100000
2020-12-21 20:24:16 +00:00
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY = 1
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY = 1
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY = 1
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY = 1
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY = 0.001
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY = 0.001
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY = 0.001
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY = 0.001
2024-03-14 22:02:09 +00:00
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX = 10000
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX = 10000
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US_MAX = 10000
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX = 10000
2020-12-21 20:24:16 +00:00
2022-05-26 10:03:12 +00:00
mkdir -p /var/run/clickhouse-server
2020-12-21 10:06:36 +00:00
fi
2020-07-08 08:41:39 +00:00
2024-04-24 18:03:23 +00:00
# simplest way to forward env variables to server
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
2024-07-09 16:16:38 +00:00
if [ [ " $USE_DATABASE_REPLICATED " -eq 1 ] ] ; then
2024-03-19 16:53:32 +00:00
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_2/</filesystem_caches_path>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
sudo sed -i "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_1/</custom_cached_disks_base_directory>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
sudo sed -i "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_2/</custom_cached_disks_base_directory>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
2023-09-27 09:35:03 +00:00
2022-05-26 10:03:12 +00:00
mkdir -p /var/run/clickhouse-server1
sudo chown clickhouse:clickhouse /var/run/clickhouse-server1
2021-03-18 12:49:31 +00:00
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \
2022-05-26 10:03:12 +00:00
--pid-file /var/run/clickhouse-server1/clickhouse-server.pid \
2021-03-18 12:49:31 +00:00
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
2021-03-13 01:25:13 +00:00
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
2021-04-12 18:40:34 +00:00
--mysql_port 19004 --postgresql_port 19005 \
2021-03-29 20:04:50 +00:00
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
2022-08-06 10:00:53 +00:00
--prometheus.port 19988 \
2021-03-13 01:25:13 +00:00
--macros.replica r2 # It doesn't work :(
2021-03-13 10:22:48 +00:00
2022-05-26 10:03:12 +00:00
mkdir -p /var/run/clickhouse-server2
sudo chown clickhouse:clickhouse /var/run/clickhouse-server2
2021-03-18 12:49:31 +00:00
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \
2022-05-26 10:03:12 +00:00
--pid-file /var/run/clickhouse-server2/clickhouse-server.pid \
2021-03-18 12:49:31 +00:00
-- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \
--logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \
2021-03-13 10:22:48 +00:00
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
2021-04-12 18:40:34 +00:00
--mysql_port 29004 --postgresql_port 29005 \
2021-03-29 20:04:50 +00:00
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
2022-08-06 10:00:53 +00:00
--prometheus.port 29988 \
2021-03-13 10:22:48 +00:00
--macros.shard s2 # It doesn't work :(
2020-07-08 08:41:39 +00:00
fi
2024-07-09 16:16:38 +00:00
if [ [ " $USE_SHARED_CATALOG " -eq 1 ] ] ; then
2024-04-09 00:33:18 +00:00
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
| sed "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" \
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
mv /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
| sed "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_1/</custom_cached_disks_base_directory>|" \
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
mv /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
mkdir -p /var/run/clickhouse-server1
sudo chown clickhouse:clickhouse /var/run/clickhouse-server1
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \
--pid-file /var/run/clickhouse-server1/clickhouse-server.pid \
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
--mysql_port 19004 --postgresql_port 19005 \
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
--prometheus.port 19988 \
--macros.replica r2 # It doesn't work :(
fi
2023-08-06 02:38:04 +00:00
2023-08-10 02:32:39 +00:00
# Wait for the server to start, but not for too long.
for _ in { 1..100}
2023-08-06 02:38:04 +00:00
do
clickhouse-client --query "SELECT 1" && break
sleep 1
done
2023-08-16 20:53:51 +00:00
setup_logs_replication
2023-06-13 15:53:19 +00:00
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
2023-06-02 15:00:24 +00:00
2024-08-07 18:13:50 +00:00
# create tables for minio log webhooks
2024-08-07 15:00:41 +00:00
clickhouse-client --query " CREATE TABLE minio_audit_logs
(
log String,
event_time DateTime64( 9) MATERIALIZED parseDateTime64BestEffortOrZero( substring( JSONExtractRaw( log, 'time' ) , 2, 29) , 9, 'UTC' )
)
ENGINE = MergeTree
ORDER BY tuple( ) "
clickhouse-client --query " CREATE TABLE minio_server_logs
(
log String,
event_time DateTime64( 9) MATERIALIZED parseDateTime64BestEffortOrZero( substring( JSONExtractRaw( log, 'time' ) , 2, 29) , 9, 'UTC' )
)
ENGINE = MergeTree
ORDER BY tuple( ) "
2024-08-07 18:13:50 +00:00
# create minio log webhooks for both audit and server logs
2024-08-07 15:00:41 +00:00
./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint = "http://localhost:8123/?query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString"
2024-08-07 18:13:50 +00:00
./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint = "http://localhost:8123/?query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString"
max_retries = 100
retry = 1
while [ $retry -le $max_retries ] ; do
echo " clickminio restart attempt $retry : "
2024-08-08 07:43:49 +00:00
output = $( ./mc admin service restart clickminio 2>& 1)
2024-08-07 18:13:50 +00:00
echo " $output "
if echo " $output " | grep -q "Restarted \`clickminio\` successfully in 1 seconds" ; then
echo "Restarted clickminio successfully."
break
fi
sleep 1
retry = $(( retry + 1 ))
done
if [ $retry -gt $max_retries ] ; then
echo " Failed to restart clickminio after $max_retries attempts. "
fi
./mc admin service restart clickminio
./mc admin trace clickminio > /test_output/minio.log &
MC_ADMIN_PID = $!
2024-08-07 15:00:41 +00:00
2023-07-02 09:52:50 +00:00
function fn_exists( ) {
declare -F " $1 " > /dev/null;
}
# FIXME: to not break old builds, clean on 2023-09-01
function try_run_with_retry( ) {
local total_retries = " $1 "
shift
if fn_exists run_with_retry; then
run_with_retry " $total_retries " " $@ "
else
" $@ "
fi
}
2020-09-29 09:44:28 +00:00
function run_tests( )
{
2021-03-16 16:39:31 +00:00
set -x
2022-08-25 23:59:14 +00:00
# We can have several additional options so we pass them as array because it is more ideologically correct.
2020-10-22 14:33:23 +00:00
read -ra ADDITIONAL_OPTIONS <<< " ${ ADDITIONAL_OPTIONS :- } "
2020-10-23 10:18:06 +00:00
2022-08-25 23:59:14 +00:00
HIGH_LEVEL_COVERAGE = YES
2022-08-11 19:27:26 +00:00
# Use random order in flaky check
2020-10-23 10:18:06 +00:00
if [ " $NUM_TRIES " -gt "1" ] ; then
2021-03-29 18:24:29 +00:00
ADDITIONAL_OPTIONS += ( '--order=random' )
2022-08-25 23:59:14 +00:00
HIGH_LEVEL_COVERAGE = NO
2020-10-23 10:18:06 +00:00
fi
2022-02-04 11:32:11 +00:00
if [ [ -n " $USE_S3_STORAGE_FOR_MERGE_TREE " ] ] && [ [ " $USE_S3_STORAGE_FOR_MERGE_TREE " -eq 1 ] ] ; then
ADDITIONAL_OPTIONS += ( '--s3-storage' )
fi
2024-04-15 16:52:17 +00:00
if [ [ -n " $USE_AZURE_STORAGE_FOR_MERGE_TREE " ] ] && [ [ " $USE_AZURE_STORAGE_FOR_MERGE_TREE " -eq 1 ] ] ; then
# to disable the same tests
2024-07-02 12:15:59 +00:00
ADDITIONAL_OPTIONS += ( '--azure-blob-storage' )
2024-04-15 16:52:17 +00:00
# azurite is slow, but with these two settings it can be super slow
ADDITIONAL_OPTIONS += ( '--no-random-settings' )
ADDITIONAL_OPTIONS += ( '--no-random-merge-tree-settings' )
fi
2024-07-09 16:16:38 +00:00
if [ [ " $USE_SHARED_CATALOG " -eq 1 ] ] ; then
2024-04-09 00:33:18 +00:00
ADDITIONAL_OPTIONS += ( '--shared-catalog' )
fi
2024-07-29 10:57:36 +00:00
if [ [ " $USE_DISTRIBUTED_CACHE " -eq 1 ] ] ; then
ADDITIONAL_OPTIONS += ( '--distributed-cache' )
fi
2024-07-09 16:16:38 +00:00
if [ [ " $USE_DATABASE_REPLICATED " -eq 1 ] ] ; then
2021-02-15 10:26:34 +00:00
ADDITIONAL_OPTIONS += ( '--replicated-database' )
2024-02-09 02:55:39 +00:00
# Too many tests fail for DatabaseReplicated in parallel.
2021-06-11 12:28:27 +00:00
ADDITIONAL_OPTIONS += ( '--jobs' )
2024-07-09 16:16:38 +00:00
ADDITIONAL_OPTIONS += ( '3' )
2024-02-10 01:27:55 +00:00
elif [ [ 1 = = $( clickhouse-client --query "SELECT value LIKE '%SANITIZE_COVERAGE%' FROM system.build_options WHERE name = 'CXX_FLAGS'" ) ] ] ; then
2024-02-09 02:55:39 +00:00
# Coverage on a per-test basis could only be collected sequentially.
# Do not set the --jobs parameter.
echo "Running tests with coverage collection."
2021-04-01 17:57:31 +00:00
else
2024-02-09 02:55:39 +00:00
# All other configurations are OK.
2021-04-01 17:57:31 +00:00
ADDITIONAL_OPTIONS += ( '--jobs' )
2024-07-19 18:10:41 +00:00
ADDITIONAL_OPTIONS += ( '8' )
2021-02-15 10:26:34 +00:00
fi
2021-12-10 15:39:02 +00:00
if [ [ -n " $RUN_BY_HASH_NUM " ] ] && [ [ -n " $RUN_BY_HASH_TOTAL " ] ] ; then
ADDITIONAL_OPTIONS += ( '--run-by-hash-num' )
ADDITIONAL_OPTIONS += ( " $RUN_BY_HASH_NUM " )
ADDITIONAL_OPTIONS += ( '--run-by-hash-total' )
ADDITIONAL_OPTIONS += ( " $RUN_BY_HASH_TOTAL " )
2022-08-25 23:59:14 +00:00
HIGH_LEVEL_COVERAGE = NO
2021-12-10 15:39:02 +00:00
fi
2022-06-23 07:59:13 +00:00
if [ [ -n " $USE_DATABASE_ORDINARY " ] ] && [ [ " $USE_DATABASE_ORDINARY " -eq 1 ] ] ; then
ADDITIONAL_OPTIONS += ( '--db-engine=Ordinary' )
fi
2022-08-25 23:59:14 +00:00
if [ [ " ${ HIGH_LEVEL_COVERAGE } " = "YES" ] ] ; then
ADDITIONAL_OPTIONS += ( '--report-coverage' )
fi
2023-01-17 19:04:25 +00:00
ADDITIONAL_OPTIONS += ( '--report-logs-stats' )
2023-07-02 09:52:50 +00:00
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
2023-05-25 09:50:14 +00:00
2024-07-19 22:25:57 +00:00
TIMEOUT = $(( MAX_RUN_TIME - 800 > 8400 ? 8400 : MAX_RUN_TIME - 800 ))
START_TIME = ${ SECONDS }
2021-10-06 20:07:30 +00:00
set +e
2024-07-19 22:25:57 +00:00
timeout --preserve-status --signal TERM --kill-after 60m ${ TIMEOUT } s \
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
--no-drop-if-fail --test-runs " $NUM_TRIES " " ${ ADDITIONAL_OPTIONS [@] } " 2>& 1 \
2023-02-03 13:34:18 +00:00
| ts '%Y-%m-%d %H:%M:%S' \
| tee -a test_output/test_result.txt
2021-10-06 20:07:30 +00:00
set -e
2024-08-04 21:53:54 +00:00
DURATION = $(( SECONDS - START_TIME))
2024-07-19 22:25:57 +00:00
echo " Elapsed ${ DURATION } seconds. "
if [ [ $DURATION -ge $TIMEOUT ] ]
then
echo " It looks like the command is terminated by the timeout, which is ${ TIMEOUT } seconds. "
fi
2020-09-29 09:44:28 +00:00
}
export -f run_tests
2024-07-03 15:53:05 +00:00
# This should be enough to setup job and collect artifacts
2024-07-11 23:11:13 +00:00
TIMEOUT = $(( MAX_RUN_TIME - 700 ))
2022-08-11 19:27:26 +00:00
if [ " $NUM_TRIES " -gt "1" ] ; then
# We don't run tests with Ordinary database in PRs, only in master.
# So run new/changed tests with Ordinary at least once in flaky check.
2024-07-03 11:37:36 +00:00
timeout_with_logging " $TIMEOUT " bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
2024-07-19 22:04:52 +00:00
| sed 's/All tests have finished/Redacted: a message about tests finish is deleted/' | sed 's/No tests were run/Redacted: a message about no tests run is deleted/' || :
2022-08-11 19:27:26 +00:00
fi
2024-07-03 11:37:36 +00:00
timeout_with_logging " $TIMEOUT " bash -c run_tests || :
2021-01-14 10:42:51 +00:00
2021-12-10 16:38:20 +00:00
echo "Files in current directory"
ls -la ./
echo "Files in root directory"
ls -la /
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
2021-02-20 20:04:24 +00:00
2021-03-30 10:49:43 +00:00
clickhouse-client -q "system flush logs" || :
2021-03-29 11:47:57 +00:00
Fix system.*_log in artifacts on CI
For now it is broken due to #53086:
Code: 701. DB::Exception: Requested cluster 'system_logs_export' not found: Cannot attach table `system`.`query_views_log_sender` from metadata file /var/lib/clickhouse/store/c05/c05d0eb0-ac48-4b02-aea4-f05cf4f875a5/query_views_log_sender.sql from query ATTACH TABLE system.query_views_log_sender UUID 'ffd9aed6-344c-4b1b-8444-287d82c5a712' (`pull_request_number` UInt32, `commit_sha` String, `check_start_time` DateTime('UTC'), `check_name` LowCardinality(String), `instance_type` LowCardinality(String), `instance_id` String, `event_date` Date, `event_time` DateTime, `event_time_microseconds` DateTime64(6), `view_duration_ms` UInt64, `initial_query_id` String, `view_name` String, `view_uuid` UUID, `view_type` Enum8('Default' = 1, 'Materialized' = 2, 'Live' = 3, 'Window' = 4), `view_query` String, `view_target` String, `read_rows` UInt64, `read_bytes` UInt64, `written_rows` UInt64, `written_bytes` UInt64, `peak_memory_usage` Int64, `ProfileEvents` Map(String, UInt64), `status` Enum8('QueryStart' = 1, 'QueryFinish' = 2, 'ExceptionBeforeStart' = 3, 'ExceptionWhileProcessing' = 4), `exception_code` Int32, `exception` String, `stack_trace` String) ENGINE = Distributed('system_logs_export', 'default', 'query_views_log_17512133378547479980') SETTINGS flush_on_detach = 0. (CLUSTER_DOESNT_EXIST)
Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
2023-11-22 19:58:02 +00:00
# stop logs replication to make it possible to dump logs tables via clickhouse-local
stop_logs_replication
2024-01-18 16:51:48 +00:00
# Try to get logs while server is running
2024-01-26 20:22:39 +00:00
failed_to_save_logs = 0
2024-06-18 08:23:42 +00:00
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
2024-01-18 16:51:48 +00:00
do
2024-08-04 14:00:46 +00:00
if ! clickhouse-client -q " select * from system. $table into outfile '/test_output/ $table .tsv.zst' format TSVWithNamesAndTypes " ; then
failed_to_save_logs = 1
fi
2024-07-09 16:16:38 +00:00
if [ [ " $USE_DATABASE_REPLICATED " -eq 1 ] ] ; then
2024-08-04 14:00:46 +00:00
if ! clickhouse-client --port 19000 -q " select * from system. $table into outfile '/test_output/ $table .1.tsv.zst' format TSVWithNamesAndTypes " ; then
failed_to_save_logs = 1
fi
if ! clickhouse-client --port 29000 -q " select * from system. $table into outfile '/test_output/ $table .2.tsv.zst' format TSVWithNamesAndTypes " ; then
failed_to_save_logs = 1
fi
2024-01-18 16:51:48 +00:00
fi
2024-04-09 00:33:18 +00:00
2024-07-09 16:16:38 +00:00
if [ [ " $USE_SHARED_CATALOG " -eq 1 ] ] ; then
2024-08-04 14:00:46 +00:00
if ! clickhouse-client --port 29000 -q " select * from system. $table into outfile '/test_output/ $table .2.tsv.zst' format TSVWithNamesAndTypes " ; then
failed_to_save_logs = 1
fi
2024-04-09 00:33:18 +00:00
fi
2024-01-18 16:51:48 +00:00
done
2024-08-07 15:00:41 +00:00
# collect minio audit and server logs
clickhouse-client -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow"
clickhouse-client -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow"
2022-05-03 18:10:07 +00:00
# Stop server so we can safely read data with clickhouse-local.
# Why do we read data with clickhouse-local?
# Because it's the simplest way to read it when server has crashed.
2022-05-26 10:03:12 +00:00
sudo clickhouse stop || :
2024-07-09 16:16:38 +00:00
if [ [ " $USE_DATABASE_REPLICATED " -eq 1 ] ] ; then
2022-05-26 10:03:12 +00:00
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 || :
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 || :
2022-05-03 22:10:40 +00:00
fi
2024-07-09 16:16:38 +00:00
if [ [ " $USE_SHARED_CATALOG " -eq 1 ] ] ; then
2024-04-09 00:33:18 +00:00
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 || :
fi
2024-07-10 18:41:48 +00:00
# Kill minio admin client to stop collecting logs
kill $MC_ADMIN_PID
2023-02-14 12:12:53 +00:00
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log || :
2023-03-23 14:36:17 +00:00
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log || :
2023-01-01 20:17:43 +00:00
zstd --threads= 0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
2022-03-30 17:49:39 +00:00
2023-09-05 12:55:41 +00:00
data_path_config = "--path=/var/lib/clickhouse/"
if [ [ -n " $USE_S3_STORAGE_FOR_MERGE_TREE " ] ] && [ [ " $USE_S3_STORAGE_FOR_MERGE_TREE " -eq 1 ] ] ; then
# We need s3 storage configuration (but it's more likely that clickhouse-local will fail for some reason)
data_path_config = "--config-file=/etc/clickhouse-server/config.xml"
fi
2024-01-18 16:51:48 +00:00
# If server crashed dump system logs with clickhouse-local
2024-01-26 20:22:39 +00:00
if [ $failed_to_save_logs -ne 0 ] ; then
2024-01-18 16:51:48 +00:00
# Compress tables.
#
# NOTE:
# - that due to tests with s3 storage we cannot use /var/lib/clickhouse/data
# directly
# - even though ci auto-compress some files (but not *.tsv) it does this only
# for files >64MB, we want this files to be compressed explicitly
2024-06-18 08:23:42 +00:00
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
2024-01-18 16:51:48 +00:00
do
2024-01-31 12:44:16 +00:00
clickhouse-local " $data_path_config " --only-system-tables --stacktrace -q " select * from system. $table format TSVWithNamesAndTypes " | zstd --threads= 0 > /test_output/$table .tsv.zst || :
2024-07-09 16:16:38 +00:00
if [ [ " $USE_DATABASE_REPLICATED " -eq 1 ] ] ; then
2024-01-31 12:44:16 +00:00
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q " select * from system. $table format TSVWithNamesAndTypes " | zstd --threads= 0 > /test_output/$table .1.tsv.zst || :
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q " select * from system. $table format TSVWithNamesAndTypes " | zstd --threads= 0 > /test_output/$table .2.tsv.zst || :
2024-01-18 16:51:48 +00:00
fi
2024-04-09 00:33:18 +00:00
2024-07-09 16:16:38 +00:00
if [ [ " $USE_SHARED_CATALOG " -eq 1 ] ] ; then
2024-04-09 00:33:18 +00:00
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q " select * from system. $table format TSVWithNamesAndTypes " | zstd --threads= 0 > /test_output/$table .1.tsv.zst || :
fi
2024-01-18 16:51:48 +00:00
done
fi
2021-04-23 18:50:02 +00:00
# Also export trace log in flamegraph-friendly format.
for trace_type in CPU Memory Real
do
2023-09-05 12:55:41 +00:00
clickhouse-local " $data_path_config " --only-system-tables -q "
2021-04-23 18:50:02 +00:00
select
arrayStringConcat( ( arrayMap( x -> concat( splitByChar( '/' , addressToLine( x) ) [ -1] , '#' , demangle( addressToSymbol( x) ) ) , trace) ) , ';' ) AS stack,
count( *) AS samples
from system.trace_log
2021-04-23 20:03:53 +00:00
where trace_type = '$trace_type'
2021-04-23 18:50:02 +00:00
group by trace
order by samples desc
settings allow_introspection_functions = 1
format TabSeparated" \
2023-01-01 20:17:43 +00:00
| zstd --threads= 0 > " /test_output/trace-log- $trace_type -flamegraph.tsv.zst " || :
2021-04-23 18:50:02 +00:00
done
2024-08-05 06:40:35 +00:00
# Grep logs for sanitizer asserts, crashes and other critical errors
check_logs_for_critical_errors
2021-03-29 11:47:57 +00:00
2021-11-19 18:17:47 +00:00
# Compressed (FIXME: remove once only github actions will be left)
rm /var/log/clickhouse-server/clickhouse-server.log
2021-02-22 13:53:43 +00:00
mv /var/log/clickhouse-server/stderr.log /test_output/ || :
if [ [ -n " $WITH_COVERAGE " ] ] && [ [ " $WITH_COVERAGE " -eq 1 ] ] ; then
2023-01-01 20:17:43 +00:00
tar --zstd -chf /test_output/clickhouse_coverage.tar.zst /profraw || :
2021-02-22 13:53:43 +00:00
fi
2021-10-23 16:58:10 +00:00
2021-04-14 15:04:57 +00:00
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination || :
2021-03-13 01:25:13 +00:00
2024-06-14 17:16:37 +00:00
rm -rf /var/lib/clickhouse/data/system/*/
tar -chf /test_output/store.tar /var/lib/clickhouse/store || :
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql || :
2024-07-09 16:16:38 +00:00
if [ [ " $USE_DATABASE_REPLICATED " -eq 1 ] ] ; then
2023-02-14 12:12:53 +00:00
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log || :
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log || :
2023-01-01 20:17:43 +00:00
zstd --threads= 0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst || :
zstd --threads= 0 < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.zst || :
2021-03-18 12:49:31 +00:00
mv /var/log/clickhouse-server/stderr1.log /test_output/ || :
2021-03-13 01:25:13 +00:00
mv /var/log/clickhouse-server/stderr2.log /test_output/ || :
2021-04-14 15:04:57 +00:00
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination || :
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination || :
2021-03-13 01:25:13 +00:00
fi
2024-04-09 00:33:18 +00:00
2024-07-09 16:16:38 +00:00
if [ [ " $USE_SHARED_CATALOG " -eq 1 ] ] ; then
2024-04-09 00:33:18 +00:00
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log || :
zstd --threads= 0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst || :
mv /var/log/clickhouse-server/stderr1.log /test_output/ || :
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination || :
fi
2024-07-09 15:06:37 +00:00
collect_core_dumps