mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Backport #60553 to 24.2: Update shellcheck
This commit is contained in:
parent
88f0e03bba
commit
1cacf001a4
@ -20,6 +20,8 @@ if [ -n "$WITH_LOCAL_BINARY" ]; then
|
||||
clickhouse_source="--clickhouse-source /clickhouse"
|
||||
fi
|
||||
|
||||
# $TESTS_TO_RUN comes from docker
|
||||
# shellcheck disable=SC2153
|
||||
tests_count="--test-count $TESTS_TO_RUN"
|
||||
tests_to_run="test-all"
|
||||
workload=""
|
||||
@ -47,6 +49,6 @@ fi
|
||||
|
||||
cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse"
|
||||
|
||||
(lein run server $tests_to_run $workload --keeper "$KEEPER_NODE" $concurrency $nemesis $rate --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 $clickhouse_source $tests_count --reuse-binary || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
||||
(lein run server $tests_to_run "$workload" --keeper "$KEEPER_NODE" "$concurrency" "$nemesis" "$rate" --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 "$clickhouse_source" "$tests_count" --reuse-binary || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
||||
|
||||
mv store "$TEST_OUTPUT/"
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck source=./utils.lib
|
||||
source /utils.lib
|
||||
|
||||
function attach_gdb_to_clickhouse()
|
||||
|
@ -19,7 +19,7 @@ function escaped()
|
||||
|
||||
function head_escaped()
|
||||
{
|
||||
head -n $FAILURE_CONTEXT_LINES $1 | escaped
|
||||
head -n "$FAILURE_CONTEXT_LINES" "$1" | escaped
|
||||
}
|
||||
|
||||
function unts()
|
||||
@ -29,15 +29,15 @@ function unts()
|
||||
|
||||
function trim_server_logs()
|
||||
{
|
||||
head -n $FAILURE_CONTEXT_LINES "/test_output/$1" | grep -Eo " \[ [0-9]+ \] \{.*" | escaped
|
||||
head -n "$FAILURE_CONTEXT_LINES" "/test_output/$1" | grep -Eo " \[ [0-9]+ \] \{.*" | escaped
|
||||
}
|
||||
|
||||
function install_packages()
|
||||
{
|
||||
dpkg -i $1/clickhouse-common-static_*.deb
|
||||
dpkg -i $1/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i $1/clickhouse-server_*.deb
|
||||
dpkg -i $1/clickhouse-client_*.deb
|
||||
dpkg -i "$1"/clickhouse-common-static_*.deb
|
||||
dpkg -i "$1"/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i "$1"/clickhouse-server_*.deb
|
||||
dpkg -i "$1"/clickhouse-client_*.deb
|
||||
}
|
||||
|
||||
function configure()
|
||||
@ -54,11 +54,11 @@ function configure()
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
|
||||
function randomize_config_boolean_value {
|
||||
value=$(($RANDOM % 2))
|
||||
sudo cat /etc/clickhouse-server/config.d/$2.xml \
|
||||
value=$((RANDOM % 2))
|
||||
sudo cat "/etc/clickhouse-server/config.d/$2.xml" \
|
||||
| sed "s|<$1>[01]</$1>|<$1>$value</$1>|" \
|
||||
> /etc/clickhouse-server/config.d/$2.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/$2.xml.tmp /etc/clickhouse-server/config.d/$2.xml
|
||||
> "/etc/clickhouse-server/config.d/$2.xml.tmp"
|
||||
sudo mv "/etc/clickhouse-server/config.d/$2.xml.tmp" "/etc/clickhouse-server/config.d/$2.xml"
|
||||
}
|
||||
|
||||
if [[ -n "$RANDOMIZE_KEEPER_FEATURE_FLAGS" ]] && [[ "$RANDOMIZE_KEEPER_FEATURE_FLAGS" -eq 1 ]]; then
|
||||
@ -146,17 +146,17 @@ EOL
|
||||
|
||||
}
|
||||
|
||||
function stop()
|
||||
function stop_server()
|
||||
{
|
||||
local max_tries="${1:-90}"
|
||||
local check_hang="${2:-true}"
|
||||
local max_tries=90
|
||||
local check_hang=true
|
||||
local pid
|
||||
# Preserve the pid, since the server can hung after the PID will be deleted.
|
||||
pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)"
|
||||
|
||||
clickhouse stop --max-tries "$max_tries" --do-not-kill && return
|
||||
|
||||
if [ $check_hang == true ]
|
||||
if [ "$check_hang" == true ]
|
||||
then
|
||||
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
|
||||
# Add a special status just in case, so it will be possible to find in the CI DB
|
||||
@ -165,7 +165,7 @@ function stop()
|
||||
sleep 5
|
||||
|
||||
# The server could finally stop while we were terminating gdb, let's recheck if it's still running
|
||||
kill -s 0 $pid || return
|
||||
kill -s 0 "$pid" || return
|
||||
echo -e "Possible deadlock on shutdown (see gdb.log)$FAIL" >> /test_output/test_results.tsv
|
||||
echo "thread apply all backtrace (on stop)" >> /test_output/gdb.log
|
||||
timeout 30m gdb -batch -ex 'thread apply all backtrace' -p "$pid" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log
|
||||
@ -176,12 +176,13 @@ function stop()
|
||||
fi
|
||||
}
|
||||
|
||||
function start()
|
||||
function start_server()
|
||||
{
|
||||
counter=0
|
||||
max_attempt=120
|
||||
until clickhouse-client --query "SELECT 1"
|
||||
do
|
||||
if [ "$counter" -gt ${1:-120} ]
|
||||
if [ "$counter" -gt "$max_attempt" ]
|
||||
then
|
||||
echo "Cannot start clickhouse-server"
|
||||
rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt ||:
|
||||
@ -286,9 +287,9 @@ function collect_query_and_trace_logs()
|
||||
|
||||
function collect_core_dumps()
|
||||
{
|
||||
find . -type f -maxdepth 1 -name 'core.*' | while read core; do
|
||||
zstd --threads=0 $core
|
||||
mv $core.zst /test_output/
|
||||
find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do
|
||||
zstd --threads=0 "$core"
|
||||
mv "$core.zst" /test_output/
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,9 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
|
||||
# Stress tests and upgrade check uses similar code that was placed
|
||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||
# shellcheck source=../stateless/attach_gdb.lib
|
||||
source /attach_gdb.lib
|
||||
# shellcheck source=../stateless/stress_tests.lib
|
||||
source /stress_tests.lib
|
||||
|
||||
install_packages package_folder
|
||||
@ -55,7 +57,7 @@ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
start
|
||||
start_server
|
||||
|
||||
setup_logs_replication
|
||||
|
||||
@ -65,7 +67,7 @@ clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
|
||||
stop
|
||||
stop_server
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
||||
|
||||
# Randomize cache policies.
|
||||
@ -85,7 +87,7 @@ if [ "$cache_policy" = "SLRU" ]; then
|
||||
mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
fi
|
||||
|
||||
start
|
||||
start_server
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
@ -188,7 +190,7 @@ clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|
||||
clickhouse-client --query "SYSTEM STOP THREAD FUZZER"
|
||||
|
||||
stop
|
||||
stop_server
|
||||
|
||||
# Let's enable S3 storage by default
|
||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||
@ -222,7 +224,7 @@ if [ $(( $(date +%-d) % 2 )) -eq 1 ]; then
|
||||
> /etc/clickhouse-server/config.d/enable_async_load_databases.xml
|
||||
fi
|
||||
|
||||
start
|
||||
start_server
|
||||
|
||||
stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
||||
&& echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \
|
||||
@ -232,18 +234,18 @@ stress --hung-check --drop-databases --output-folder test_output --skip-func-tes
|
||||
rg -Fa "No queries hung" /test_output/test_results.tsv | grep -Fa "OK" \
|
||||
|| echo -e "Hung check failed, possible deadlock found (see hung_check.log)$FAIL$(head_escaped /test_output/hung_check.log)" >> /test_output/test_results.tsv
|
||||
|
||||
stop
|
||||
stop_server
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log
|
||||
|
||||
# NOTE Disable thread fuzzer before server start with data after stress test.
|
||||
# In debug build it can take a lot of time.
|
||||
unset "${!THREAD_@}"
|
||||
|
||||
start
|
||||
start_server
|
||||
|
||||
check_server_start
|
||||
|
||||
stop
|
||||
stop_server
|
||||
|
||||
[ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL"
|
||||
[ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL"
|
||||
@ -272,7 +274,7 @@ clickhouse-local --structure "test String, res String, time Nullable(Float32), d
|
||||
(test like '%Signal 9%') DESC,
|
||||
(test like '%Fatal message%') DESC,
|
||||
rowNumberInAllBlocks()
|
||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo -e "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||
|
||||
# But OOMs in stress test are allowed
|
||||
|
@ -16,7 +16,6 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
moreutils \
|
||||
python3-fuzzywuzzy \
|
||||
python3-pip \
|
||||
shellcheck \
|
||||
yamllint \
|
||||
locales \
|
||||
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 mypy==1.3.0 PyGithub unidiff pylint==2.6.2 \
|
||||
@ -30,6 +29,19 @@ ENV LC_ALL en_US.UTF-8
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
||||
ARG SHELLCHECK_VERSION=0.9.0
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) sarch=x86_64 ;; \
|
||||
arm64) sarch=aarch64 ;; \
|
||||
esac \
|
||||
&& curl -L \
|
||||
"https://github.com/koalaman/shellcheck/releases/download/v${SHELLCHECK_VERSION}/shellcheck-v${SHELLCHECK_VERSION}.linux.${sarch}.tar.xz" \
|
||||
| tar xJ --strip=1 -C /tmp \
|
||||
&& mv /tmp/shellcheck /usr/bin \
|
||||
&& rm -rf /tmp/*
|
||||
|
||||
|
||||
# Get act and actionlint from releases
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
|
@ -16,7 +16,9 @@ ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_pre
|
||||
|
||||
# Stress tests and upgrade check uses similar code that was placed
|
||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||
# shellcheck source=../stateless/attach_gdb.lib
|
||||
source /attach_gdb.lib
|
||||
# shellcheck source=../stateless/stress_tests.lib
|
||||
source /stress_tests.lib
|
||||
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
@ -335,7 +337,7 @@ clickhouse-local --structure "test String, res String, time Nullable(Float32), d
|
||||
(test like '%Changed settings%') DESC,
|
||||
(test like '%New settings%') DESC,
|
||||
rowNumberInAllBlocks()
|
||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo -e "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||
|
||||
# But OOMs in stress test are allowed
|
||||
|
@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS json_as_string";
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="CREATE TABLE json_as_string (field String) ENGINE = Memory";
|
||||
|
||||
echo '
|
||||
cat << 'EOF' | $CLICKHOUSE_CLIENT --query="INSERT INTO json_as_string FORMAT JSONAsString";
|
||||
{
|
||||
"id" : 1,
|
||||
"date" : "01.01.2020",
|
||||
@ -42,9 +42,10 @@ echo '
|
||||
"{" : 1,
|
||||
"}}" : 2
|
||||
}
|
||||
}' | $CLICKHOUSE_CLIENT --query="INSERT INTO json_as_string FORMAT JSONAsString";
|
||||
}
|
||||
EOF
|
||||
|
||||
echo '
|
||||
cat << 'EOF' | $CLICKHOUSE_CLIENT --query="INSERT INTO json_as_string FORMAT JSONAsString";
|
||||
[
|
||||
{
|
||||
"id" : 1,
|
||||
@ -79,7 +80,8 @@ echo '
|
||||
"}}" : 2
|
||||
}
|
||||
}
|
||||
]' | $CLICKHOUSE_CLIENT --query="INSERT INTO json_as_string FORMAT JSONAsString";
|
||||
]
|
||||
EOF
|
||||
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="SELECT * FROM json_as_string ORDER BY field";
|
||||
|
@ -7,12 +7,14 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS line_as_string1";
|
||||
$CLICKHOUSE_CLIENT --query="CREATE TABLE line_as_string1(field String) ENGINE = Memory";
|
||||
|
||||
echo '"id" : 1,
|
||||
cat <<'EOF' | $CLICKHOUSE_CLIENT --query="INSERT INTO line_as_string1 FORMAT LineAsString";
|
||||
"id" : 1,
|
||||
"date" : "01.01.2020",
|
||||
"string" : "123{{{\"\\",
|
||||
"array" : [1, 2, 3],
|
||||
|
||||
Finally implement this new feature.' | $CLICKHOUSE_CLIENT --query="INSERT INTO line_as_string1 FORMAT LineAsString";
|
||||
Finally implement this new feature.
|
||||
EOF
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="SELECT * FROM line_as_string1";
|
||||
$CLICKHOUSE_CLIENT --query="DROP TABLE line_as_string1"
|
||||
@ -26,7 +28,9 @@ $CLICKHOUSE_CLIENT --query="create table line_as_string2(
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="INSERT INTO line_as_string2(c) values ('ClickHouse')";
|
||||
|
||||
echo 'ClickHouse is a `fast` #open-source# (OLAP) database "management" :system:' | $CLICKHOUSE_CLIENT --query="INSERT INTO line_as_string2(c) FORMAT LineAsString";
|
||||
# Shellcheck thinks `fast` is a shell expansion
|
||||
# shellcheck disable=SC2016
|
||||
echo -e 'ClickHouse is a `fast` #open-source# (OLAP) database "management" :system:' | $CLICKHOUSE_CLIENT --query="INSERT INTO line_as_string2(c) FORMAT LineAsString";
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="SELECT * FROM line_as_string2 order by c";
|
||||
$CLICKHOUSE_CLIENT --query="DROP TABLE line_as_string2"
|
||||
|
@ -45,11 +45,13 @@ query_id=$$-$RANDOM-$SECONDS
|
||||
${CLICKHOUSE_CLIENT} --user=test_01541 --max_block_size=1 --format Null --query_id $query_id -q 'SELECT sleepEachRow(1) FROM numbers(600)' &
|
||||
# trap
|
||||
sleep_query_pid=$!
|
||||
# Shellcheck wrongly process "trap" https://www.shellcheck.net/wiki/SC2317
|
||||
# shellcheck disable=SC2317
|
||||
function cleanup()
|
||||
{
|
||||
echo 'KILL sleep'
|
||||
# if the timeout will not be enough, it will trigger "No such process" error/message
|
||||
kill $sleep_query_pid
|
||||
kill "$sleep_query_pid"
|
||||
# waiting for a query to finish
|
||||
while ${CLICKHOUSE_CLIENT} -q "SELECT query_id FROM system.processes WHERE query_id = '$query_id'" | grep -xq "$query_id"; do
|
||||
sleep 0.1
|
||||
|
@ -47,7 +47,7 @@ function main()
|
||||
{
|
||||
# retries, since there is no guarantee that every time query will take ~0.4 second.
|
||||
local retries=20 i=0
|
||||
while [ "$(test_query_duration_ms | xargs)" != '1 1' ] && [[ $i < $retries ]]; do
|
||||
while [ "$(test_query_duration_ms | xargs)" != '1 1' ] && (( i < retries )); do
|
||||
((++i))
|
||||
done
|
||||
}
|
||||
|
@ -18,10 +18,12 @@ CLICKHOUSE_WATCHDOG_ENABLE=0 $CLICKHOUSE_SERVER_BINARY "${server_opts[@]}" >& cl
|
||||
server_pid=$!
|
||||
|
||||
trap cleanup EXIT
|
||||
# Shellcheck wrongly process "trap" https://www.shellcheck.net/wiki/SC2317
|
||||
# shellcheck disable=SC2317
|
||||
function cleanup()
|
||||
{
|
||||
kill -9 $server_pid
|
||||
kill -9 $client_pid
|
||||
kill -9 "$server_pid"
|
||||
kill -9 "$client_pid"
|
||||
|
||||
echo "Test failed. Server log:"
|
||||
cat clickhouse-server.log
|
||||
|
@ -2,13 +2,13 @@
|
||||
ROOT_PATH=$(git rev-parse --show-toplevel)
|
||||
NPROC=$(($(nproc) + 3))
|
||||
# Check sh tests with Shellcheck
|
||||
( cd "$ROOT_PATH/tests/queries/0_stateless/" && \
|
||||
find "$ROOT_PATH/tests/queries/"{0_stateless,1_stateful} -name '*.sh' -print0 | \
|
||||
xargs -0 -P "$NPROC" -n 20 shellcheck --check-sourced --external-sources --severity info --exclude SC1071,SC2086,SC2016
|
||||
)
|
||||
find "$ROOT_PATH/tests/queries/"{0_stateless,1_stateful} -name '*.sh' -print0 | \
|
||||
xargs -0 -P "$NPROC" -n 20 shellcheck --check-sourced --external-sources --source-path=SCRIPTDIR \
|
||||
--severity info --exclude SC1071,SC2086,SC2016
|
||||
|
||||
# Check docker scripts with shellcheck
|
||||
find "$ROOT_PATH/docker" -executable -type f -exec file -F' ' --mime-type {} \; | \
|
||||
awk -F' ' '$2==" text/x-shellscript" {print $1}' | \
|
||||
# Do not check sourced files, since it causes broken --source-path=SCRIPTDIR
|
||||
find "$ROOT_PATH/docker" -type f -exec file -F' ' --mime-type {} + | \
|
||||
awk '$2=="text/x-shellscript" {print $1}' | \
|
||||
grep -v "compare.sh" | \
|
||||
xargs -P "$NPROC" -n 20 shellcheck
|
||||
xargs -P "$NPROC" -n 20 shellcheck --external-sources --source-path=SCRIPTDIR
|
||||
|
Loading…
Reference in New Issue
Block a user