mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 08:02:02 +00:00
Merge pull request #44879 from ClickHouse/split-bc-check
Split stress test and backward compatibility check
This commit is contained in:
commit
a87ebae3c1
140
.github/workflows/pull_request.yml
vendored
140
.github/workflows/pull_request.yml
vendored
@ -3105,10 +3105,10 @@ jobs:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
TEMP_PATH=${{runner.temp}}/stress_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (asan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
REPO_COPY=${{runner.temp}}/stress_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -3267,6 +3267,142 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
##############################################################################################
|
||||
######################################### UPGRADE CHECK ######################################
|
||||
##############################################################################################
|
||||
UpgradeCheckAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/upgrade_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Upgrade check (asan)
|
||||
REPO_COPY=${{runner.temp}}/upgrade_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Upgrade check
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 upgrade_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
UpgradeCheckTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
# same as for stress test with tsan
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/upgrade_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Upgrade check (tsan)
|
||||
REPO_COPY=${{runner.temp}}/upgrade_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Upgrade check
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 upgrade_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
UpgradeCheckMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/upgrade_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Upgrade check (msan)
|
||||
REPO_COPY=${{runner.temp}}/upgrade_memory/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Upgrade check
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 upgrade_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
UpgradeCheckDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/upgrade_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Upgrade check (debug)
|
||||
REPO_COPY=${{runner.temp}}/upgrade_debug/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Upgrade check
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 upgrade_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
##############################################################################################
|
||||
##################################### AST FUZZERS ############################################
|
||||
##############################################################################################
|
||||
|
@ -54,6 +54,10 @@
|
||||
"name": "clickhouse/stress-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/upgrade": {
|
||||
"name": "clickhouse/upgrade-check",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/codebrowser": {
|
||||
"name": "clickhouse/codebrowser",
|
||||
"dependent": []
|
||||
|
@ -21,10 +21,9 @@ RUN apt-get update -y \
|
||||
openssl \
|
||||
netcat-openbsd \
|
||||
telnet \
|
||||
llvm-9 \
|
||||
brotli
|
||||
brotli \
|
||||
&& apt-get clean
|
||||
|
||||
COPY ./stress /stress
|
||||
COPY run.sh /
|
||||
|
||||
ENV DATASETS="hits visits"
|
||||
|
@ -8,229 +8,13 @@ dmesg --clear
|
||||
|
||||
set -x
|
||||
|
||||
# core.COMM.PID-TID
|
||||
sysctl kernel.core_pattern='core.%e.%p-%P'
|
||||
# we mount tests folder from repo to /usr/share
|
||||
ln -s /usr/share/clickhouse-test/ci/stress.py /usr/bin/stress
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
|
||||
OK="\tOK\t\\N\t"
|
||||
FAIL="\tFAIL\t\\N\t"
|
||||
|
||||
FAILURE_CONTEXT_LINES=50
|
||||
FAILURE_CONTEXT_MAX_LINE_WIDTH=400
|
||||
|
||||
function escaped()
|
||||
{
|
||||
# That's the simplest way I found to escape a string in bash. Yep, bash is the most convenient programming language.
|
||||
# Also limit lines width just in case (too long lines are not really useful usually)
|
||||
clickhouse local -S 's String' --input-format=LineAsString -q "select substr(s, 1, $FAILURE_CONTEXT_MAX_LINE_WIDTH)
|
||||
from table format CustomSeparated settings format_custom_row_after_delimiter='\\\\\\\\n'"
|
||||
}
|
||||
function head_escaped()
|
||||
{
|
||||
head -n $FAILURE_CONTEXT_LINES $1 | escaped
|
||||
}
|
||||
function unts()
|
||||
{
|
||||
grep -Po "[0-9][0-9]:[0-9][0-9] \K.*"
|
||||
}
|
||||
function trim_server_logs()
|
||||
{
|
||||
head -n $FAILURE_CONTEXT_LINES "/test_output/$1" | grep -Eo " \[ [0-9]+ \] \{.*" | escaped
|
||||
}
|
||||
|
||||
function install_packages()
|
||||
{
|
||||
dpkg -i $1/clickhouse-common-static_*.deb
|
||||
dpkg -i $1/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i $1/clickhouse-server_*.deb
|
||||
dpkg -i $1/clickhouse-client_*.deb
|
||||
}
|
||||
|
||||
function configure()
|
||||
{
|
||||
# install test configs
|
||||
export USE_DATABASE_ORDINARY=1
|
||||
export EXPORT_S3_STORAGE_POLICIES=1
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
# we mount tests folder from repo to /usr/share
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
ln -s /usr/share/clickhouse-test/ci/download_release_packages.py /usr/bin/download_release_packages
|
||||
ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag
|
||||
|
||||
# avoid too slow startup
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
| sed "s|<snapshot_distance>100000</snapshot_distance>|<snapshot_distance>10000</snapshot_distance>|" \
|
||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
|
||||
# for clickhouse-server (via service)
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
# for clickhouse-client
|
||||
export ASAN_OPTIONS='malloc_context_size=10 allocator_release_to_os_interval_ms=10000'
|
||||
|
||||
# since we run clickhouse from root
|
||||
sudo chown root: /var/lib/clickhouse
|
||||
|
||||
# Set more frequent update period of asynchronous metrics to more frequently update information about real memory usage (less chance of OOM).
|
||||
echo "<clickhouse><asynchronous_metrics_update_period_s>1</asynchronous_metrics_update_period_s></clickhouse>" \
|
||||
> /etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml
|
||||
|
||||
local total_mem
|
||||
total_mem=$(awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo) # KiB
|
||||
total_mem=$(( total_mem*1024 )) # bytes
|
||||
|
||||
# Set maximum memory usage as half of total memory (less chance of OOM).
|
||||
#
|
||||
# But not via max_server_memory_usage but via max_memory_usage_for_user,
|
||||
# so that we can override this setting and execute service queries, like:
|
||||
# - hung check
|
||||
# - show/drop database
|
||||
# - ...
|
||||
#
|
||||
# So max_memory_usage_for_user will be a soft limit, and
|
||||
# max_server_memory_usage will be hard limit, and queries that should be
|
||||
# executed regardless memory limits will use max_memory_usage_for_user=0,
|
||||
# instead of relying on max_untracked_memory
|
||||
|
||||
max_server_memory_usage_to_ram_ratio=0.5
|
||||
echo "Setting max_server_memory_usage_to_ram_ratio to ${max_server_memory_usage_to_ram_ratio}"
|
||||
cat > /etc/clickhouse-server/config.d/max_server_memory_usage.xml <<EOL
|
||||
<clickhouse>
|
||||
<max_server_memory_usage_to_ram_ratio>${max_server_memory_usage_to_ram_ratio}</max_server_memory_usage_to_ram_ratio>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
local max_users_mem
|
||||
max_users_mem=$((total_mem*30/100)) # 30%
|
||||
echo "Setting max_memory_usage_for_user=$max_users_mem and max_memory_usage for queries to 10G"
|
||||
cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<max_memory_usage>10G</max_memory_usage>
|
||||
<max_memory_usage_for_user>${max_users_mem}</max_memory_usage_for_user>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
cat > /etc/clickhouse-server/config.d/core.xml <<EOL
|
||||
<clickhouse>
|
||||
<core_dump>
|
||||
<!-- 100GiB -->
|
||||
<size_limit>107374182400</size_limit>
|
||||
</core_dump>
|
||||
<!-- NOTE: no need to configure core_path,
|
||||
since clickhouse is not started as daemon (via clickhouse start)
|
||||
-->
|
||||
<core_path>$PWD</core_path>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Let OOM killer terminate other processes before clickhouse-server:
|
||||
cat > /etc/clickhouse-server/config.d/oom_score.xml <<EOL
|
||||
<clickhouse>
|
||||
<oom_score>-1000</oom_score>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Analyzer is not yet ready for testing
|
||||
cat > /etc/clickhouse-server/users.d/no_analyzer.xml <<EOL
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<constraints>
|
||||
<allow_experimental_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
}
|
||||
|
||||
function stop()
|
||||
{
|
||||
local max_tries="${1:-90}"
|
||||
local pid
|
||||
# Preserve the pid, since the server can hung after the PID will be deleted.
|
||||
pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)"
|
||||
|
||||
clickhouse stop --max-tries "$max_tries" --do-not-kill && return
|
||||
|
||||
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
|
||||
echo -e "Possible deadlock on shutdown (see gdb.log)$FAIL" >> /test_output/test_results.tsv
|
||||
kill -TERM "$(pidof gdb)" ||:
|
||||
sleep 5
|
||||
echo "thread apply all backtrace (on stop)" >> /test_output/gdb.log
|
||||
timeout 30m gdb -batch -ex 'thread apply all backtrace' -p "$pid" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log
|
||||
clickhouse stop --force
|
||||
}
|
||||
|
||||
function start()
|
||||
{
|
||||
counter=0
|
||||
until clickhouse-client --query "SELECT 1"
|
||||
do
|
||||
if [ "$counter" -gt ${1:-120} ]
|
||||
then
|
||||
echo "Cannot start clickhouse-server"
|
||||
rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt ||:
|
||||
echo -e "Cannot start clickhouse-server$FAIL$(trim_server_logs application_errors.txt)" >> /test_output/test_results.tsv
|
||||
cat /var/log/clickhouse-server/stdout.log
|
||||
tail -n100 /var/log/clickhouse-server/stderr.log
|
||||
tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | rg -F -v -e '<Warning> RaftInstance:' -e '<Information> RaftInstance' | tail -n100
|
||||
break
|
||||
fi
|
||||
# use root to match with current uid
|
||||
clickhouse start --user root >/var/log/clickhouse-server/stdout.log 2>>/var/log/clickhouse-server/stderr.log
|
||||
sleep 0.5
|
||||
counter=$((counter + 1))
|
||||
done
|
||||
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
echo "
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
thread apply all backtrace full
|
||||
info registers
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
|
||||
# FIXME Hung check may work incorrectly because of attached gdb
|
||||
# 1. False positives are possible
|
||||
# 2. We cannot attach another gdb to get stacktraces if some queries hung
|
||||
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||
}
|
||||
# Stress tests and upgrade check uses similar code that was placed
|
||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||
source /usr/share/clickhouse-test/ci/stress_tests.lib
|
||||
|
||||
install_packages package_folder
|
||||
|
||||
@ -396,7 +180,7 @@ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_defau
|
||||
|
||||
start
|
||||
|
||||
./stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
||||
stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
||||
&& echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Test script failed$FAIL script exit code: $?" >> /test_output/test_results.tsv
|
||||
|
||||
@ -413,316 +197,27 @@ unset "${!THREAD_@}"
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|
||||
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \
|
||||
&& echo -e "Server failed to start (see application_errors.txt and clickhouse-server.clean.log)$FAIL$(trim_server_logs application_errors.txt)" \
|
||||
>> /test_output/test_results.tsv)
|
||||
check_server_start
|
||||
|
||||
stop
|
||||
|
||||
[ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL"
|
||||
[ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL"
|
||||
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.final.log
|
||||
|
||||
# Grep logs for sanitizer asserts, crashes and other critical errors
|
||||
check_logs_for_critical_errors
|
||||
|
||||
# Sanitizer asserts
|
||||
rg -Fa "==================" /var/log/clickhouse-server/stderr.log | rg -v "in query:" >> /test_output/tmp
|
||||
rg -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
|
||||
rm -f /test_output/tmp
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
|
||||
# OOM
|
||||
rg -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
|
||||
&& echo -e "Signal 9 in clickhouse-server.log$FAIL" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No OOM messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
rg -Fa "Code: 49. DB::Exception: " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/logical_errors.txt \
|
||||
&& echo -e "Logical error thrown (see clickhouse-server.log or logical_errors.txt)$FAIL$(head_escaped /test_output/logical_errors.txt)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No logical errors$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file logical_errors.txt if it's empty
|
||||
[ -s /test_output/logical_errors.txt ] || rm /test_output/logical_errors.txt
|
||||
|
||||
# No such key errors
|
||||
rg --text "Code: 499.*The specified key does not exist" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/no_such_key_errors.txt \
|
||||
&& echo -e "S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)$FAIL$(trim_server_logs no_such_key_errors.txt)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No lost s3 keys$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file no_such_key_errors.txt if it's empty
|
||||
[ -s /test_output/no_such_key_errors.txt ] || rm /test_output/no_such_key_errors.txt
|
||||
|
||||
# Crash
|
||||
rg -Fa "########################################" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
|
||||
&& echo -e "Killed by signal (in clickhouse-server.log)$FAIL" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Not crashed$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
rg -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/fatal_messages.txt \
|
||||
&& echo -e "Fatal message in clickhouse-server.log (see fatal_messages.txt)$FAIL$(trim_server_logs fatal_messages.txt)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No fatal messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file fatal_messages.txt if it's empty
|
||||
[ -s /test_output/fatal_messages.txt ] || rm /test_output/fatal_messages.txt
|
||||
|
||||
rg -Fa "########################################" /test_output/* > /dev/null \
|
||||
&& echo -e "Killed by signal (output files)$FAIL" >> /test_output/test_results.tsv
|
||||
|
||||
function get_gdb_log_context()
|
||||
{
|
||||
rg -A50 -Fa " received signal " /test_output/gdb.log | head_escaped
|
||||
}
|
||||
|
||||
rg -Fa " received signal " /test_output/gdb.log > /dev/null \
|
||||
&& echo -e "Found signal in gdb.log$FAIL$(get_gdb_log_context)" >> /test_output/test_results.tsv
|
||||
|
||||
if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
|
||||
echo -e "Backward compatibility check\n"
|
||||
|
||||
echo "Get previous release tag"
|
||||
previous_release_tag=$(clickhouse-client --version | rg -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | get_previous_release_tag)
|
||||
echo $previous_release_tag
|
||||
|
||||
echo "Clone previous release repository"
|
||||
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
|
||||
|
||||
echo "Download clickhouse-server from the previous release"
|
||||
mkdir previous_release_package_folder
|
||||
|
||||
echo $previous_release_tag | download_release_packages && echo -e "Download script exit code$OK" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Download script failed$FAIL" >> /test_output/test_results.tsv
|
||||
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log
|
||||
for table in query_log trace_log
|
||||
do
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
||||
done
|
||||
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
|
||||
# Check if we cloned previous release repository successfully
|
||||
if ! [ "$(ls -A previous_release_repository/tests/queries)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to clone previous release tests$FAIL" >> /test_output/test_results.tsv
|
||||
elif ! [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to download previous release packages$FAIL" >> /test_output/test_results.tsv
|
||||
else
|
||||
echo -e "Successfully cloned previous release tests$OK" >> /test_output/test_results.tsv
|
||||
echo -e "Successfully downloaded previous release packages$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Uninstall current packages
|
||||
dpkg --remove clickhouse-client
|
||||
dpkg --remove clickhouse-server
|
||||
dpkg --remove clickhouse-common-static-dbg
|
||||
dpkg --remove clickhouse-common-static
|
||||
|
||||
rm -rf /var/lib/clickhouse/*
|
||||
|
||||
# Make BC check more funny by forcing Ordinary engine for system database
|
||||
mkdir /var/lib/clickhouse/metadata
|
||||
echo "ATTACH DATABASE system ENGINE=Ordinary" > /var/lib/clickhouse/metadata/system.sql
|
||||
|
||||
# Install previous release packages
|
||||
install_packages previous_release_package_folder
|
||||
|
||||
# Start server from previous release
|
||||
# Previous version may not be ready for fault injections
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# Avoid "Setting s3_check_objects_after_upload is neither a builtin setting..."
|
||||
rm -f /etc/clickhouse-server/users.d/enable_blobs_check.xml ||:
|
||||
rm -f /etc/clickhouse-server/users.d/marks.xml ||:
|
||||
|
||||
# Remove s3 related configs to avoid "there is no disk type `cache`"
|
||||
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
||||
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
||||
|
||||
# Turn on after 22.12
|
||||
rm -f /etc/clickhouse-server/config.d/compressed_marks_and_index.xml ||:
|
||||
# it uses recently introduced settings which previous versions may not have
|
||||
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
||||
|
||||
# Turn on after 23.1
|
||||
rm -f /etc/clickhouse-server/users.d/prefetch_settings.xml ||:
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Install new package before running stress test because we should use new
|
||||
# clickhouse-client and new clickhouse-test.
|
||||
#
|
||||
# But we should leave old binary in /usr/bin/ and debug symbols in
|
||||
# /usr/lib/debug/usr/bin (if any) for gdb and internal DWARF parser, so it
|
||||
# will print sane stacktraces and also to avoid possible crashes.
|
||||
#
|
||||
# FIXME: those files can be extracted directly from debian package, but
|
||||
# actually better solution will be to use different PATH instead of playing
|
||||
# games with files from packages.
|
||||
mv /usr/bin/clickhouse previous_release_package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug previous_release_package_folder/
|
||||
install_packages package_folder
|
||||
mv /usr/bin/clickhouse package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug package_folder/
|
||||
mv previous_release_package_folder/clickhouse /usr/bin/
|
||||
mv previous_release_package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
|
||||
mkdir tmp_stress_output
|
||||
|
||||
./stress --test-cmd="/usr/bin/clickhouse-test --queries=\"previous_release_repository/tests/queries\"" \
|
||||
--backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \
|
||||
&& echo -e "Backward compatibility check: Test script exit code$OK" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Backward compatibility check: Test script failed$FAIL" >> /test_output/test_results.tsv
|
||||
rm -rf tmp_stress_output
|
||||
|
||||
# We experienced deadlocks in this command in very rare cases. Let's debug it:
|
||||
timeout 10m clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables" ||
|
||||
(
|
||||
echo "thread apply all backtrace (on select tables count)" >> /test_output/gdb.log
|
||||
timeout 30m gdb -batch -ex 'thread apply all backtrace' -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log
|
||||
clickhouse stop --force
|
||||
)
|
||||
|
||||
# Use bigger timeout for previous version
|
||||
stop 300
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.stress.log
|
||||
|
||||
# Start new server
|
||||
mv package_folder/clickhouse /usr/bin/
|
||||
mv package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
# Disable fault injections on start (we don't test them here, and it can lead to tons of requests in case of huge number of tables).
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|
||||
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt \
|
||||
&& echo -e "Backward compatibility check: Server failed to start$FAIL$(trim_server_logs bc_check_application_errors.txt)" >> /test_output/test_results.tsv)
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Let the server run for a while before checking log.
|
||||
sleep 60
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.dirty.log
|
||||
|
||||
# Error messages (we should ignore some errors)
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/38643 ("Unknown index: idx.")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 ("Cannot parse string 'Hello' as UInt64")
|
||||
# FIXME Not sure if it's expected, but some tests from BC check may not be finished yet when we restarting server.
|
||||
# Let's just ignore all errors from queries ("} <Error> TCPHandler: Code:", "} <Error> executeQuery: Code:")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility
|
||||
echo "Check for Error messages in server log:"
|
||||
rg -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
||||
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
||||
-e "REPLICA_IS_ALREADY_ACTIVE" \
|
||||
-e "REPLICA_ALREADY_EXISTS" \
|
||||
-e "ALL_REPLICAS_LOST" \
|
||||
-e "DDLWorker: Cannot parse DDL task query" \
|
||||
-e "RaftInstance: failed to accept a rpc connection due to error 125" \
|
||||
-e "UNKNOWN_DATABASE" \
|
||||
-e "NETWORK_ERROR" \
|
||||
-e "UNKNOWN_TABLE" \
|
||||
-e "ZooKeeperClient" \
|
||||
-e "KEEPER_EXCEPTION" \
|
||||
-e "DirectoryMonitor" \
|
||||
-e "TABLE_IS_READ_ONLY" \
|
||||
-e "Code: 1000, e.code() = 111, Connection refused" \
|
||||
-e "UNFINISHED" \
|
||||
-e "NETLINK_ERROR" \
|
||||
-e "Renaming unexpected part" \
|
||||
-e "PART_IS_TEMPORARILY_LOCKED" \
|
||||
-e "and a merge is impossible: we didn't find" \
|
||||
-e "found in queue and some source parts for it was lost" \
|
||||
-e "is lost forever." \
|
||||
-e "Unknown index: idx." \
|
||||
-e "Cannot parse string 'Hello' as UInt64" \
|
||||
-e "} <Error> TCPHandler: Code:" \
|
||||
-e "} <Error> executeQuery: Code:" \
|
||||
-e "Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'" \
|
||||
-e "[Queue = DB::DynamicRuntimeQueue]: Code: 235. DB::Exception: Part" \
|
||||
-e "The set of parts restored in place of" \
|
||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||
-e "Coordination::Exception: Connection loss" \
|
||||
-e "MutateFromLogEntryTask" \
|
||||
-e "No connection to ZooKeeper, cannot get shared table ID" \
|
||||
-e "Session expired" \
|
||||
-e "TOO_MANY_PARTS" \
|
||||
-e "Container already exists" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.dirty.log | rg -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e "Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)$FAIL$(trim_server_logs bc_check_error_messages.txt)" \
|
||||
>> /test_output/test_results.tsv \
|
||||
|| echo -e "Backward compatibility check: No Error messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_error_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_error_messages.txt ] || rm /test_output/bc_check_error_messages.txt
|
||||
|
||||
# Sanitizer asserts
|
||||
rg -Fa "==================" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
rg -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e "Backward compatibility check: Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Backward compatibility check: No sanitizer asserts$OK" >> /test_output/test_results.tsv
|
||||
rm -f /test_output/tmp
|
||||
|
||||
# OOM
|
||||
rg -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e "Backward compatibility check: Signal 9 in clickhouse-server.log$FAIL" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Backward compatibility check: No OOM messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
echo "Check for Logical errors in server log:"
|
||||
rg -Fa -A20 "Code: 49. DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \
|
||||
&& echo -e "Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)$FAIL$(trim_server_logs bc_check_logical_errors.txt)" \
|
||||
>> /test_output/test_results.tsv \
|
||||
|| echo -e "Backward compatibility check: No logical errors$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_logical_errors.txt if it's empty
|
||||
[ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt
|
||||
|
||||
# Crash
|
||||
rg -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e "Backward compatibility check: Killed by signal (in clickhouse-server.log)$FAIL" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Backward compatibility check: Not crashed$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
echo "Check for Fatal message in server log:"
|
||||
rg -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \
|
||||
&& echo -e "Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)$FAIL$(trim_server_logs bc_check_fatal_messages.txt)" \
|
||||
>> /test_output/test_results.tsv \
|
||||
|| echo -e "Backward compatibility check: No fatal messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_fatal_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt
|
||||
|
||||
tar -chf /test_output/coordination.backward.tar /var/lib/clickhouse/coordination ||:
|
||||
for table in query_log trace_log
|
||||
do
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" \
|
||||
| zstd --threads=0 > /test_output/$table.backward.tsv.zst ||:
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
dmesg -T > /test_output/dmesg.log
|
||||
|
||||
# OOM in dmesg -- those are real
|
||||
grep -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE' /test_output/dmesg.log \
|
||||
&& echo -e "OOM in dmesg$FAIL$(head_escaped /test_output/dmesg.log)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No OOM in dmesg$OK" >> /test_output/test_results.tsv
|
||||
collect_query_and_trace_logs
|
||||
|
||||
mv /var/log/clickhouse-server/stderr.log /test_output/
|
||||
|
||||
# Write check result into check_status.tsv
|
||||
# Try to choose most specific error for the whole check status
|
||||
clickhouse-local --structure "test String, res String, time Nullable(Float32), desc String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by
|
||||
(test like 'Backward compatibility check%'), -- BC check goes last
|
||||
(test like '%Sanitizer%') DESC,
|
||||
(test like '%Killed by signal%') DESC,
|
||||
(test like '%gdb.log%') DESC,
|
||||
@ -732,14 +227,8 @@ clickhouse-local --structure "test String, res String, time Nullable(Float32), d
|
||||
(test like '%OOM%') DESC,
|
||||
(test like '%Signal 9%') DESC,
|
||||
(test like '%Fatal message%') DESC,
|
||||
(test like '%Error message%') DESC,
|
||||
(test like '%previous release%') DESC,
|
||||
rowNumberInAllBlocks()
|
||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||
|
||||
# Core dumps
|
||||
find . -type f -maxdepth 1 -name 'core.*' | while read core; do
|
||||
zstd --threads=0 $core
|
||||
mv $core.zst /test_output/
|
||||
done
|
||||
collect_core_dumps
|
||||
|
31
docker/test/upgrade/Dockerfile
Normal file
31
docker/test/upgrade/Dockerfile
Normal file
@ -0,0 +1,31 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/upgrade-check .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/stateful-test:$FROM_TAG
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
bash \
|
||||
tzdata \
|
||||
fakeroot \
|
||||
debhelper \
|
||||
parallel \
|
||||
expect \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-termcolor \
|
||||
python3-requests \
|
||||
curl \
|
||||
sudo \
|
||||
openssl \
|
||||
netcat-openbsd \
|
||||
telnet \
|
||||
brotli \
|
||||
&& apt-get clean
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
200
docker/test/upgrade/run.sh
Normal file
200
docker/test/upgrade/run.sh
Normal file
@ -0,0 +1,200 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2094
|
||||
# shellcheck disable=SC2086
|
||||
# shellcheck disable=SC2024
|
||||
|
||||
# Avoid overlaps with previous runs
|
||||
dmesg --clear
|
||||
|
||||
set -x
|
||||
|
||||
# we mount tests folder from repo to /usr/share
|
||||
ln -s /usr/share/clickhouse-test/ci/stress.py /usr/bin/stress
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
ln -s /usr/share/clickhouse-test/ci/download_release_packages.py /usr/bin/download_release_packages
|
||||
ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag
|
||||
|
||||
# Stress tests and upgrade check uses similar code that was placed
|
||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||
source /usr/share/clickhouse-test/ci/stress_tests.lib
|
||||
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
./setup_minio.sh stateless # to have a proper environment
|
||||
|
||||
echo "Get previous release tag"
|
||||
previous_release_tag=$(dpkg --info package_folder/clickhouse-client*.deb | grep "Version: " | awk '{print $2}' | cut -f1 -d'+' | get_previous_release_tag)
|
||||
echo $previous_release_tag
|
||||
|
||||
echo "Clone previous release repository"
|
||||
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
|
||||
|
||||
echo "Download clickhouse-server from the previous release"
|
||||
mkdir previous_release_package_folder
|
||||
|
||||
echo $previous_release_tag | download_release_packages && echo -e "Download script exit code$OK" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Download script failed$FAIL" >> /test_output/test_results.tsv
|
||||
|
||||
# Check if we cloned previous release repository successfully
|
||||
if ! [ "$(ls -A previous_release_repository/tests/queries)" ]
|
||||
then
|
||||
echo -e 'failure\tFailed to clone previous release tests' > /test_output/check_status.tsv
|
||||
exit
|
||||
elif ! [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ]
|
||||
then
|
||||
echo -e 'failure\tFailed to download previous release packages' > /test_output/check_status.tsv
|
||||
exit
|
||||
fi
|
||||
|
||||
echo -e "Successfully cloned previous release tests$OK" >> /test_output/test_results.tsv
|
||||
echo -e "Successfully downloaded previous release packages$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Make upgrade check more funny by forcing Ordinary engine for system database
|
||||
mkdir /var/lib/clickhouse/metadata
|
||||
echo "ATTACH DATABASE system ENGINE=Ordinary" > /var/lib/clickhouse/metadata/system.sql
|
||||
|
||||
# Install previous release packages
|
||||
install_packages previous_release_package_folder
|
||||
|
||||
# Start server from previous release
|
||||
# Let's enable S3 storage by default
|
||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||
# Previous version may not be ready for fault injections
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# But we still need default disk because some tables loaded only into it
|
||||
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
||||
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
||||
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
mkdir tmp_stress_output
|
||||
|
||||
stress --test-cmd="/usr/bin/clickhouse-test --queries=\"previous_release_repository/tests/queries\"" --upgrade-check --output-folder tmp_stress_output --global-time-limit=1200 \
|
||||
&& echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Test script failed$FAIL script exit code: $?" >> /test_output/test_results.tsv
|
||||
|
||||
rm -rf tmp_stress_output
|
||||
|
||||
# We experienced deadlocks in this command in very rare cases. Let's debug it:
|
||||
timeout 10m clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables" ||
|
||||
(
|
||||
echo "thread apply all backtrace (on select tables count)" >> /test_output/gdb.log
|
||||
timeout 30m gdb -batch -ex 'thread apply all backtrace' -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log
|
||||
clickhouse stop --force
|
||||
)
|
||||
|
||||
# Use bigger timeout for previous version and disable additional hang check
|
||||
stop 300 false
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log
|
||||
|
||||
# Install and start new server
|
||||
install_packages package_folder
|
||||
# Disable fault injections on start (we don't test them here, and it can lead to tons of requests in case of huge number of tables).
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|
||||
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \
|
||||
&& echo -e "Server failed to start (see application_errors.txt and clickhouse-server.clean.log)$FAIL$(trim_server_logs application_errors.txt)" \
|
||||
>> /test_output/test_results.tsv)
|
||||
|
||||
# Remove file application_errors.txt if it's empty
|
||||
[ -s /test_output/application_errors.txt ] || rm /test_output/application_errors.txt
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Let the server run for a while before checking log.
|
||||
sleep 60
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.upgrade.log
|
||||
|
||||
# Error messages (we should ignore some errors)
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/38643 ("Unknown index: idx.")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 ("Cannot parse string 'Hello' as UInt64")
|
||||
# FIXME Not sure if it's expected, but some tests from stress test may not be finished yet when we restarting server.
|
||||
# Let's just ignore all errors from queries ("} <Error> TCPHandler: Code:", "} <Error> executeQuery: Code:")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility
|
||||
echo "Check for Error messages in server log:"
|
||||
rg -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
||||
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
||||
-e "REPLICA_IS_ALREADY_ACTIVE" \
|
||||
-e "REPLICA_ALREADY_EXISTS" \
|
||||
-e "ALL_REPLICAS_LOST" \
|
||||
-e "DDLWorker: Cannot parse DDL task query" \
|
||||
-e "RaftInstance: failed to accept a rpc connection due to error 125" \
|
||||
-e "UNKNOWN_DATABASE" \
|
||||
-e "NETWORK_ERROR" \
|
||||
-e "UNKNOWN_TABLE" \
|
||||
-e "ZooKeeperClient" \
|
||||
-e "KEEPER_EXCEPTION" \
|
||||
-e "DirectoryMonitor" \
|
||||
-e "TABLE_IS_READ_ONLY" \
|
||||
-e "Code: 1000, e.code() = 111, Connection refused" \
|
||||
-e "UNFINISHED" \
|
||||
-e "NETLINK_ERROR" \
|
||||
-e "Renaming unexpected part" \
|
||||
-e "PART_IS_TEMPORARILY_LOCKED" \
|
||||
-e "and a merge is impossible: we didn't find" \
|
||||
-e "found in queue and some source parts for it was lost" \
|
||||
-e "is lost forever." \
|
||||
-e "Unknown index: idx." \
|
||||
-e "Cannot parse string 'Hello' as UInt64" \
|
||||
-e "} <Error> TCPHandler: Code:" \
|
||||
-e "} <Error> executeQuery: Code:" \
|
||||
-e "Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'" \
|
||||
-e "The set of parts restored in place of" \
|
||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||
-e "Coordination::Exception: Connection loss" \
|
||||
-e "MutateFromLogEntryTask" \
|
||||
-e "No connection to ZooKeeper, cannot get shared table ID" \
|
||||
-e "Session expired" \
|
||||
-e "TOO_MANY_PARTS" \
|
||||
-e "Authentication failed" \
|
||||
-e "Container already exists" \
|
||||
/var/log/clickhouse-server/clickhouse-server.upgrade.log | zgrep -Fa "<Error>" > /test_output/upgrade_error_messages.txt \
|
||||
&& echo -e "Error message in clickhouse-server.log (see upgrade_error_messages.txt)$FAIL$(head_escaped /test_output/bc_check_error_messages.txt)" \
|
||||
>> /test_output/test_results.tsv \
|
||||
|| echo -e "No Error messages after server upgrade$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file upgrade_error_messages.txt if it's empty
|
||||
[ -s /test_output/upgrade_error_messages.txt ] || rm /test_output/upgrade_error_messages.txt
|
||||
|
||||
# Grep logs for sanitizer asserts, crashes and other critical errors
|
||||
check_logs_for_critical_errors
|
||||
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
|
||||
collect_query_and_trace_logs
|
||||
|
||||
check_oom_in_dmesg
|
||||
|
||||
mv /var/log/clickhouse-server/stderr.log /test_output/
|
||||
|
||||
# Write check result into check_status.tsv
|
||||
# Try to choose most specific error for the whole check status
|
||||
clickhouse-local --structure "test String, res String, time Nullable(Float32), desc String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by
|
||||
(test like '%Sanitizer%') DESC,
|
||||
(test like '%Killed by signal%') DESC,
|
||||
(test like '%gdb.log%') DESC,
|
||||
(test ilike '%possible deadlock%') DESC,
|
||||
(test like '%start%') DESC,
|
||||
(test like '%dmesg%') DESC,
|
||||
(test like '%OOM%') DESC,
|
||||
(test like '%Signal 9%') DESC,
|
||||
(test like '%Fatal message%') DESC,
|
||||
(test like '%Error message%') DESC,
|
||||
(test like '%previous release%') DESC,
|
||||
rowNumberInAllBlocks()
|
||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||
|
||||
collect_core_dumps
|
@ -289,6 +289,18 @@ CI_CONFIG = {
|
||||
"Stress test (debug)": {
|
||||
"required_build": "package_debug",
|
||||
},
|
||||
"Upgrade check (asan)": {
|
||||
"required_build": "package_asan",
|
||||
},
|
||||
"Upgrade check (tsan)": {
|
||||
"required_build": "package_tsan",
|
||||
},
|
||||
"Upgrade check (msan)": {
|
||||
"required_build": "package_msan",
|
||||
},
|
||||
"Upgrade check (debug)": {
|
||||
"required_build": "package_debug",
|
||||
},
|
||||
"Integration tests (asan)": {
|
||||
"required_build": "package_asan",
|
||||
},
|
||||
|
@ -8,13 +8,13 @@ import logging
|
||||
import time
|
||||
|
||||
|
||||
def get_options(i, backward_compatibility_check):
|
||||
def get_options(i, upgrade_check):
|
||||
options = []
|
||||
client_options = []
|
||||
if 0 < i:
|
||||
if i > 0:
|
||||
options.append("--order=random")
|
||||
|
||||
if i % 3 == 2 and not backward_compatibility_check:
|
||||
if i % 3 == 2 and not upgrade_check:
|
||||
options.append(
|
||||
'''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i)
|
||||
)
|
||||
@ -38,16 +38,16 @@ def get_options(i, backward_compatibility_check):
|
||||
client_options.append("join_algorithm='full_sorting_merge'")
|
||||
if join_alg_num % 4 == 3:
|
||||
client_options.append("join_algorithm='auto'")
|
||||
client_options.append('max_rows_in_join=1000')
|
||||
client_options.append("max_rows_in_join=1000")
|
||||
|
||||
if i % 5 == 1:
|
||||
client_options.append("memory_tracker_fault_probability=0.001")
|
||||
|
||||
if i % 2 == 1 and not backward_compatibility_check:
|
||||
if i % 2 == 1 and not upgrade_check:
|
||||
client_options.append("group_by_use_nulls=1")
|
||||
|
||||
# 12 % 3 == 0, so it's Atomic database
|
||||
if i == 12 and not backward_compatibility_check:
|
||||
if i == 12 and not upgrade_check:
|
||||
client_options.append("implicit_transaction=1")
|
||||
client_options.append("throw_on_unsupported_query_inside_transaction=0")
|
||||
|
||||
@ -63,11 +63,9 @@ def run_func_test(
|
||||
num_processes,
|
||||
skip_tests_option,
|
||||
global_time_limit,
|
||||
backward_compatibility_check,
|
||||
upgrade_check,
|
||||
):
|
||||
backward_compatibility_check_option = (
|
||||
"--backward-compatibility-check" if backward_compatibility_check else ""
|
||||
)
|
||||
upgrade_check_option = "--upgrade-check" if upgrade_check else ""
|
||||
global_time_limit_option = ""
|
||||
if global_time_limit:
|
||||
global_time_limit_option = "--global_time_limit={}".format(global_time_limit)
|
||||
@ -77,14 +75,14 @@ def run_func_test(
|
||||
for i in range(num_processes)
|
||||
]
|
||||
pipes = []
|
||||
for i in range(0, len(output_paths)):
|
||||
f = open(output_paths[i], "w")
|
||||
for i, path in enumerate(output_paths):
|
||||
f = open(path, "w")
|
||||
full_command = "{} {} {} {} {}".format(
|
||||
cmd,
|
||||
get_options(i, backward_compatibility_check),
|
||||
get_options(i, upgrade_check),
|
||||
global_time_limit_option,
|
||||
skip_tests_option,
|
||||
backward_compatibility_check_option,
|
||||
upgrade_check_option,
|
||||
)
|
||||
logging.info("Run func tests '%s'", full_command)
|
||||
p = Popen(full_command, shell=True, stdout=f, stderr=f)
|
||||
@ -176,7 +174,7 @@ def prepare_for_hung_check(drop_databases):
|
||||
for db in databases:
|
||||
if db == "system":
|
||||
continue
|
||||
command = make_query_command(f'DETACH DATABASE {db}')
|
||||
command = make_query_command(f"DETACH DATABASE {db}")
|
||||
# we don't wait for drop
|
||||
Popen(command, shell=True)
|
||||
break
|
||||
@ -235,7 +233,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--output-folder")
|
||||
parser.add_argument("--global-time-limit", type=int, default=1800)
|
||||
parser.add_argument("--num-parallel", type=int, default=cpu_count())
|
||||
parser.add_argument("--backward-compatibility-check", action="store_true")
|
||||
parser.add_argument("--upgrade-check", action="store_true")
|
||||
parser.add_argument("--hung-check", action="store_true", default=False)
|
||||
# make sense only for hung check
|
||||
parser.add_argument("--drop-databases", action="store_true", default=False)
|
||||
@ -250,7 +248,7 @@ if __name__ == "__main__":
|
||||
args.num_parallel,
|
||||
args.skip_func_tests,
|
||||
args.global_time_limit,
|
||||
args.backward_compatibility_check,
|
||||
args.upgrade_check,
|
||||
)
|
||||
|
||||
logging.info("Will wait functests to finish")
|
||||
@ -303,11 +301,12 @@ if __name__ == "__main__":
|
||||
]
|
||||
)
|
||||
hung_check_log = os.path.join(args.output_folder, "hung_check.log")
|
||||
tee = Popen(['/usr/bin/tee', hung_check_log], stdin=PIPE)
|
||||
tee = Popen(["/usr/bin/tee", hung_check_log], stdin=PIPE)
|
||||
res = call(cmd, shell=True, stdout=tee.stdin, stderr=STDOUT)
|
||||
tee.stdin.close()
|
||||
if tee.stdin is not None:
|
||||
tee.stdin.close()
|
||||
if res != 0 and have_long_running_queries:
|
||||
logging.info("Hung check failed with exit code {}".format(res))
|
||||
logging.info("Hung check failed with exit code %d", res)
|
||||
else:
|
||||
hung_check_status = "No queries hung\tOK\t\\N\t\n"
|
||||
with open(
|
||||
@ -316,5 +315,4 @@ if __name__ == "__main__":
|
||||
results.write(hung_check_status)
|
||||
os.remove(hung_check_log)
|
||||
|
||||
|
||||
logging.info("Stress test finished")
|
@ -36,7 +36,6 @@ def get_run_command(
|
||||
"docker run --cap-add=SYS_PTRACE "
|
||||
# a static link, don't use S3_URL or S3_DOWNLOAD
|
||||
"-e S3_URL='https://s3.amazonaws.com/clickhouse-datasets' "
|
||||
f"-e DISABLE_BC_CHECK={os.environ.get('DISABLE_BC_CHECK', '0')} "
|
||||
# For dmesg and sysctl
|
||||
"--privileged "
|
||||
f"--volume={build_path}:/package_folder "
|
||||
@ -109,7 +108,7 @@ def process_results(
|
||||
return state, description, test_results, additional_files
|
||||
|
||||
|
||||
def main():
|
||||
def run_stress_test(docker_image_name):
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
stopwatch = Stopwatch()
|
||||
@ -132,7 +131,7 @@ def main():
|
||||
logging.info("Check is already finished according to github status, exiting")
|
||||
sys.exit(0)
|
||||
|
||||
docker_image = get_image_with_version(reports_path, "clickhouse/stress-test")
|
||||
docker_image = get_image_with_version(reports_path, docker_image_name)
|
||||
|
||||
packages_path = os.path.join(temp_path, "packages")
|
||||
if not os.path.exists(packages_path):
|
||||
@ -199,4 +198,4 @@ def main():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
run_stress_test("clickhouse/stress-test")
|
||||
|
309
tests/ci/stress_tests.lib
Normal file
309
tests/ci/stress_tests.lib
Normal file
@ -0,0 +1,309 @@
|
||||
#!/bin/bash
|
||||
|
||||
# core.COMM.PID-TID
|
||||
sysctl kernel.core_pattern='core.%e.%p-%P'
|
||||
|
||||
OK="\tOK\t\\N\t"
|
||||
FAIL="\tFAIL\t\\N\t"
|
||||
|
||||
FAILURE_CONTEXT_LINES=50
|
||||
FAILURE_CONTEXT_MAX_LINE_WIDTH=400
|
||||
|
||||
function escaped()
|
||||
{
|
||||
# That's the simplest way I found to escape a string in bash. Yep, bash is the most convenient programming language.
|
||||
# Also limit lines width just in case (too long lines are not really useful usually)
|
||||
clickhouse local -S 's String' --input-format=LineAsString -q "select substr(s, 1, $FAILURE_CONTEXT_MAX_LINE_WIDTH)
|
||||
from table format CustomSeparated settings format_custom_row_after_delimiter='\\\\\\\\n'"
|
||||
}
|
||||
|
||||
function head_escaped()
|
||||
{
|
||||
head -n $FAILURE_CONTEXT_LINES $1 | escaped
|
||||
}
|
||||
|
||||
function unts()
|
||||
{
|
||||
grep -Po "[0-9][0-9]:[0-9][0-9] \K.*"
|
||||
}
|
||||
|
||||
function trim_server_logs()
|
||||
{
|
||||
head -n $FAILURE_CONTEXT_LINES "/test_output/$1" | grep -Eo " \[ [0-9]+ \] \{.*" | escaped
|
||||
}
|
||||
|
||||
function install_packages()
|
||||
{
|
||||
dpkg -i $1/clickhouse-common-static_*.deb
|
||||
dpkg -i $1/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i $1/clickhouse-server_*.deb
|
||||
dpkg -i $1/clickhouse-client_*.deb
|
||||
}
|
||||
|
||||
function configure()
|
||||
{
|
||||
# install test configs
|
||||
export USE_DATABASE_ORDINARY=1
|
||||
export EXPORT_S3_STORAGE_POLICIES=1
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
# avoid too slow startup
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
| sed "s|<snapshot_distance>100000</snapshot_distance>|<snapshot_distance>10000</snapshot_distance>|" \
|
||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
|
||||
# for clickhouse-server (via service)
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
# for clickhouse-client
|
||||
export ASAN_OPTIONS='malloc_context_size=10 allocator_release_to_os_interval_ms=10000'
|
||||
|
||||
# since we run clickhouse from root
|
||||
sudo chown root: /var/lib/clickhouse
|
||||
|
||||
# Set more frequent update period of asynchronous metrics to more frequently update information about real memory usage (less chance of OOM).
|
||||
echo "<clickhouse><asynchronous_metrics_update_period_s>1</asynchronous_metrics_update_period_s></clickhouse>" \
|
||||
> /etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml
|
||||
|
||||
local total_mem
|
||||
total_mem=$(awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo) # KiB
|
||||
total_mem=$(( total_mem*1024 )) # bytes
|
||||
|
||||
# Set maximum memory usage as half of total memory (less chance of OOM).
|
||||
#
|
||||
# But not via max_server_memory_usage but via max_memory_usage_for_user,
|
||||
# so that we can override this setting and execute service queries, like:
|
||||
# - hung check
|
||||
# - show/drop database
|
||||
# - ...
|
||||
#
|
||||
# So max_memory_usage_for_user will be a soft limit, and
|
||||
# max_server_memory_usage will be hard limit, and queries that should be
|
||||
# executed regardless memory limits will use max_memory_usage_for_user=0,
|
||||
# instead of relying on max_untracked_memory
|
||||
|
||||
max_server_memory_usage_to_ram_ratio=0.5
|
||||
echo "Setting max_server_memory_usage_to_ram_ratio to ${max_server_memory_usage_to_ram_ratio}"
|
||||
cat > /etc/clickhouse-server/config.d/max_server_memory_usage.xml <<EOL
|
||||
<clickhouse>
|
||||
<max_server_memory_usage_to_ram_ratio>${max_server_memory_usage_to_ram_ratio}</max_server_memory_usage_to_ram_ratio>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
local max_users_mem
|
||||
max_users_mem=$((total_mem*30/100)) # 30%
|
||||
echo "Setting max_memory_usage_for_user=$max_users_mem and max_memory_usage for queries to 10G"
|
||||
cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<max_memory_usage>10G</max_memory_usage>
|
||||
<max_memory_usage_for_user>${max_users_mem}</max_memory_usage_for_user>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
cat > /etc/clickhouse-server/config.d/core.xml <<EOL
|
||||
<clickhouse>
|
||||
<core_dump>
|
||||
<!-- 100GiB -->
|
||||
<size_limit>107374182400</size_limit>
|
||||
</core_dump>
|
||||
<!-- NOTE: no need to configure core_path,
|
||||
since clickhouse is not started as daemon (via clickhouse start)
|
||||
-->
|
||||
<core_path>$PWD</core_path>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Analyzer is not yet ready for testing
|
||||
cat > /etc/clickhouse-server/users.d/no_analyzer.xml <<EOL
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<constraints>
|
||||
<allow_experimental_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
}
|
||||
|
||||
function stop()
|
||||
{
|
||||
local max_tries="${1:-90}"
|
||||
local check_hang="${2:-true}"
|
||||
local pid
|
||||
# Preserve the pid, since the server can hung after the PID will be deleted.
|
||||
pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)"
|
||||
|
||||
clickhouse stop --max-tries "$max_tries" --do-not-kill && return
|
||||
|
||||
if [ $check_hang == true ]
|
||||
then
|
||||
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
|
||||
echo -e "Possible deadlock on shutdown (see gdb.log)$FAIL" >> /test_output/test_results.tsv
|
||||
kill -TERM "$(pidof gdb)" ||:
|
||||
sleep 5
|
||||
echo "thread apply all backtrace (on stop)" >> /test_output/gdb.log
|
||||
timeout 30m gdb -batch -ex 'thread apply all backtrace' -p "$pid" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log
|
||||
clickhouse stop --force
|
||||
fi
|
||||
}
|
||||
|
||||
function start()
|
||||
{
|
||||
counter=0
|
||||
until clickhouse-client --query "SELECT 1"
|
||||
do
|
||||
if [ "$counter" -gt ${1:-120} ]
|
||||
then
|
||||
echo "Cannot start clickhouse-server"
|
||||
rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt ||:
|
||||
echo -e "Cannot start clickhouse-server$FAIL$(trim_server_logs application_errors.txt)" >> /test_output/test_results.tsv
|
||||
cat /var/log/clickhouse-server/stdout.log
|
||||
tail -n100 /var/log/clickhouse-server/stderr.log
|
||||
tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | rg -F -v -e '<Warning> RaftInstance:' -e '<Information> RaftInstance' | tail -n100
|
||||
break
|
||||
fi
|
||||
# use root to match with current uid
|
||||
clickhouse start --user root >/var/log/clickhouse-server/stdout.log 2>>/var/log/clickhouse-server/stderr.log
|
||||
sleep 0.5
|
||||
counter=$((counter + 1))
|
||||
done
|
||||
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
echo "
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
thread apply all backtrace full
|
||||
info registers
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
|
||||
# FIXME Hung check may work incorrectly because of attached gdb
|
||||
# 1. False positives are possible
|
||||
# 2. We cannot attach another gdb to get stacktraces if some queries hung
|
||||
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||
}
|
||||
|
||||
function check_server_start()
|
||||
{
|
||||
clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|
||||
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \
|
||||
&& echo -e "Server failed to start (see application_errors.txt and clickhouse-server.clean.log)$FAIL$(trim_server_logs application_errors.txt)" \
|
||||
>> /test_output/test_results.tsv)
|
||||
|
||||
# Remove file application_errors.txt if it's empty
|
||||
[ -s /test_output/application_errors.txt ] || rm /test_output/application_errors.txt
|
||||
}
|
||||
|
||||
function check_logs_for_critical_errors()
|
||||
{
|
||||
# Sanitizer asserts
|
||||
rg -Fa "==================" /var/log/clickhouse-server/stderr.log | rg -v "in query:" >> /test_output/tmp
|
||||
rg -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
|
||||
rm -f /test_output/tmp
|
||||
|
||||
# OOM
|
||||
rg -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
|
||||
&& echo -e "Signal 9 in clickhouse-server.log$FAIL" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No OOM messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
rg -Fa "Code: 49. DB::Exception: " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/logical_errors.txt \
|
||||
&& echo -e "Logical error thrown (see clickhouse-server.log or logical_errors.txt)$FAIL$(head_escaped /test_output/logical_errors.txt)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No logical errors$OK" >> /test_output/test_results.tsv
|
||||
# Remove file logical_errors.txt if it's empty
|
||||
[ -s /test_output/logical_errors.txt ] || rm /test_output/logical_errors.txt
|
||||
|
||||
# No such key errors
|
||||
rg --text "Code: 499.*The specified key does not exist" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/no_such_key_errors.txt \
|
||||
&& echo -e "S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)$FAIL$(trim_server_logs no_such_key_errors.txt)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No lost s3 keys$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file no_such_key_errors.txt if it's empty
|
||||
[ -s /test_output/no_such_key_errors.txt ] || rm /test_output/no_such_key_errors.txt
|
||||
|
||||
# Crash
|
||||
rg -Fa "########################################" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
|
||||
&& echo -e "Killed by signal (in clickhouse-server.log)$FAIL" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Not crashed$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
rg -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/fatal_messages.txt \
|
||||
&& echo -e "Fatal message in clickhouse-server.log (see fatal_messages.txt)$FAIL$(trim_server_logs fatal_messages.txt)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No fatal messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file fatal_messages.txt if it's empty
|
||||
[ -s /test_output/fatal_messages.txt ] || rm /test_output/fatal_messages.txt
|
||||
|
||||
rg -Fa "########################################" /test_output/* > /dev/null \
|
||||
&& echo -e "Killed by signal (output files)$FAIL" >> /test_output/test_results.tsv
|
||||
|
||||
function get_gdb_log_context()
|
||||
{
|
||||
rg -A50 -Fa " received signal " /test_output/gdb.log | head_escaped
|
||||
}
|
||||
|
||||
rg -Fa " received signal " /test_output/gdb.log > /dev/null \
|
||||
&& echo -e "Found signal in gdb.log$FAIL$(get_gdb_log_context)" >> /test_output/test_results.tsv
|
||||
|
||||
dmesg -T > /test_output/dmesg.log
|
||||
|
||||
# OOM in dmesg -- those are real
|
||||
grep -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE' /test_output/dmesg.log \
|
||||
&& echo -e "OOM in dmesg$FAIL$(head_escaped /test_output/dmesg.log)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No OOM in dmesg$OK" >> /test_output/test_results.tsv
|
||||
}
|
||||
|
||||
function collect_query_and_trace_logs()
|
||||
{
|
||||
for table in query_log trace_log
|
||||
do
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
||||
done
|
||||
}
|
||||
|
||||
function collect_core_dumps()
|
||||
{
|
||||
find . -type f -maxdepth 1 -name 'core.*' | while read core; do
|
||||
zstd --threads=0 $core
|
||||
mv $core.zst /test_output/
|
||||
done
|
||||
}
|
4
tests/ci/upgrade_check.py
Normal file
4
tests/ci/upgrade_check.py
Normal file
@ -0,0 +1,4 @@
|
||||
import stress_check
|
||||
|
||||
if __name__ == "__main__":
|
||||
stress_check.run_stress_test("clickhouse/upgrade-check")
|
@ -450,7 +450,7 @@ class FailureReason(enum.Enum):
|
||||
REPLICATED_DB = "replicated-database"
|
||||
S3_STORAGE = "s3-storage"
|
||||
BUILD = "not running for current build"
|
||||
BACKWARD_INCOMPATIBLE = "test is backward incompatible"
|
||||
NO_UPGRADE_CHECK = "not running for upgrade check"
|
||||
NO_PARALLEL_REPLICAS = "smth in not supported with parallel replicas"
|
||||
|
||||
# UNKNOWN reasons
|
||||
@ -773,35 +773,6 @@ class TestCase:
|
||||
else ""
|
||||
)
|
||||
|
||||
# Check if test contains tag "no-backward-compatibility-check" and we should skip it
|
||||
def check_backward_incompatible_tag(self) -> bool:
|
||||
for tag in self.tags:
|
||||
if tag.startswith("no-backward-compatibility-check"):
|
||||
split = tag.split(":")
|
||||
|
||||
# If version is not specified in tag, always skip this test.
|
||||
if len(split) == 1:
|
||||
return True
|
||||
version_from_tag = split[1]
|
||||
|
||||
# Check if extracted string from tag is a real ClickHouse version, if not - always skip test.
|
||||
if re.match(VERSION_PATTERN, version_from_tag) is None:
|
||||
return True
|
||||
|
||||
server_version = str(
|
||||
clickhouse_execute(args, "SELECT version()").decode()
|
||||
)
|
||||
# If server version is less or equal from the version specified in tag, we should skip this test.
|
||||
version_from_tag_split = list(map(int, version_from_tag.split(".")))
|
||||
server_version_split = list(map(int, server_version.split(".")))
|
||||
if (
|
||||
server_version_split[: len(version_from_tag_split)]
|
||||
<= version_from_tag_split
|
||||
):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# should skip test, should increment skipped_total, skip reason
|
||||
def should_skip_test(self, suite) -> Optional[FailureReason]:
|
||||
tags = self.tags
|
||||
@ -852,10 +823,10 @@ class TestCase:
|
||||
elif tags and ("no-replicated-database" in tags) and args.replicated_database:
|
||||
return FailureReason.REPLICATED_DB
|
||||
|
||||
elif (
|
||||
args.backward_compatibility_check and self.check_backward_incompatible_tag()
|
||||
):
|
||||
return FailureReason.BACKWARD_INCOMPATIBLE
|
||||
# TODO: remove checking "no-upgrade-check" after 23.1
|
||||
elif args.upgrade_check and (
|
||||
"no-upgrade-check" in tags or "no-upgrade-check" in tags):
|
||||
return FailureReason.NO_UPGRADE_CHECK
|
||||
|
||||
elif tags and ("no-s3-storage" in tags) and args.s3_storage:
|
||||
return FailureReason.S3_STORAGE
|
||||
@ -2461,9 +2432,9 @@ def parse_args():
|
||||
)
|
||||
|
||||
group.add_argument(
|
||||
"--backward-compatibility-check",
|
||||
"--upgrade-check",
|
||||
action="store_true",
|
||||
help="Run tests for further backward compatibility testing by ignoring all"
|
||||
help="Run tests for further server upgrade testing by ignoring all"
|
||||
"drop queries in tests for collecting data from new version of server",
|
||||
)
|
||||
parser.add_argument(
|
||||
@ -2614,7 +2585,7 @@ if __name__ == "__main__":
|
||||
else:
|
||||
args.client_database = "default"
|
||||
|
||||
if args.backward_compatibility_check:
|
||||
if args.upgrade_check:
|
||||
args.client += " --fake-drop"
|
||||
|
||||
if args.client_option or args.secure:
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
DROP TABLE IF EXISTS alter_00061;
|
||||
set allow_deprecated_syntax_for_merge_tree=1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: zookeeper, no-parallel, no-s3-storage, no-backward-compatibility-check
|
||||
# Tags: zookeeper, no-parallel, no-s3-storage, no-upgrade-check
|
||||
|
||||
# Because REPLACE PARTITION does not forces immediate removal of replaced data parts from local filesystem
|
||||
# (it tries to do it as quick as possible, but it still performed in separate thread asynchronously)
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: long, zookeeper, no-replicated-database, no-backward-compatibility-check
|
||||
-- Tags: long, zookeeper, no-replicated-database, no-upgrade-check
|
||||
-- Tag no-replicated-database: Fails due to additional replicas or shards
|
||||
|
||||
SET send_logs_level = 'fatal';
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-backward-compatibility-check
|
||||
# Tags: no-upgrade-check
|
||||
# Test fix for issue #5066
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race, zookeeper, no-parallel, no-backward-compatibility-check
|
||||
# Tags: race, zookeeper, no-parallel, no-upgrade-check
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: zookeeper, no-parallel, no-fasttest, no-backward-compatibility-check
|
||||
# Tags: zookeeper, no-parallel, no-fasttest, no-upgrade-check
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race, zookeeper, no-backward-compatibility-check
|
||||
# Tags: race, zookeeper, no-upgrade-check
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-parallel, no-backward-compatibility-check
|
||||
-- Tags: no-parallel, no-upgrade-check
|
||||
|
||||
DROP DATABASE IF EXISTS test_01191;
|
||||
CREATE DATABASE test_01191 ENGINE=Atomic;
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long, zookeeper, no-parallel, no-backward-compatibility-check
|
||||
# Tags: long, zookeeper, no-parallel, no-upgrade-check
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: zookeeper, no-backward-compatibility-check
|
||||
-- Tags: zookeeper, no-upgrade-check
|
||||
|
||||
DROP TABLE IF EXISTS table_rename_with_ttl;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-parallel, no-backward-compatibility-check
|
||||
-- Tags: no-parallel, no-upgrade-check
|
||||
|
||||
DROP DATABASE IF EXISTS db_01391;
|
||||
CREATE DATABASE db_01391;
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
-- force data path with the user/pass in it
|
||||
set use_compact_format_in_distributed_parts_names=0;
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-backward-compatibility-check
|
||||
# Tags: no-upgrade-check
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: long, no-backward-compatibility-check
|
||||
-- Tags: long, no-upgrade-check
|
||||
|
||||
DROP TABLE IF EXISTS test_01640;
|
||||
DROP TABLE IF EXISTS restore_01640;
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
SET mutations_sync = 2;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-backward-compatibility-check
|
||||
# Tags: no-upgrade-check
|
||||
|
||||
set -eu
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-backward-compatibility-check
|
||||
# Tags: no-upgrade-check
|
||||
|
||||
set -eu
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check, no-fasttest
|
||||
-- Tags: no-upgrade-check, no-fasttest
|
||||
|
||||
DROP TABLE IF EXISTS partslost_0;
|
||||
DROP TABLE IF EXISTS partslost_1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest, no-parallel, no-backward-compatibility-check
|
||||
# Tags: no-fasttest, no-parallel, no-upgrade-check
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race, no-fasttest, no-parallel, no-backward-compatibility-check
|
||||
# Tags: race, no-fasttest, no-parallel, no-upgrade-check
|
||||
|
||||
# Test tries to reproduce a race between threads:
|
||||
# - deletes user
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race, no-fasttest, no-parallel, no-backward-compatibility-check
|
||||
# Tags: race, no-fasttest, no-parallel, no-upgrade-check
|
||||
|
||||
set -e
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
SET allow_experimental_hash_functions = 1;
|
||||
|
||||
select number, hashid(number) from system.numbers limit 5;
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
SET max_bytes_in_join = '100', join_algorithm = 'auto';
|
||||
|
||||
|
@ -1,2 +1,2 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
SELECT nth_value(NULL, 1048577) OVER (Rows BETWEEN 1023 FOLLOWING AND UNBOUNDED FOLLOWING)
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/37561
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
SET join_algorithm = 'partial_merge';
|
||||
|
||||
|
@ -1,6 +1,3 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- TODO: remove no-backward-compatibility-check after new 22.6 release
|
||||
|
||||
SET cast_ipv4_ipv6_default_on_conversion_error = 1;
|
||||
|
||||
DROP TABLE IF EXISTS ipv4_test;
|
||||
|
@ -1,3 +1,3 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
SELECT 'Play ClickHouse' InterSect SELECT 'Play ClickHouse'
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
select arrayMap(x -> toNullable(1), range(number)) from numbers(3);
|
||||
select arrayFilter(x -> toNullable(1), range(number)) from numbers(3);
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-backward-compatibility-check
|
||||
# Tags: no-upgrade-check
|
||||
|
||||
CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=trace
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
-- Regression for PartialSortingTransform optimization that requires at least 1500 rows.
|
||||
SELECT * FROM (SELECT * FROM (SELECT 0 a, toNullable(number) b, toString(number) c FROM numbers(1e6)) ORDER BY a DESC, b DESC, c LIMIT 1500) limit 10;
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest, no-ubsan, no-cpu-aarch64, no-backward-compatibility-check
|
||||
# Tags: no-fasttest, no-ubsan, no-cpu-aarch64, no-upgrade-check
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
DROP TABLE IF EXISTS map_test;
|
||||
CREATE TABLE map_test(`tags` Map(String, String)) ENGINE = MergeTree PRIMARY KEY tags ORDER BY tags SETTINGS index_granularity = 8192;
|
||||
INSERT INTO map_test (tags) VALUES (map('fruit','apple','color','red'));
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
DROP TABLE IF EXISTS test_table;
|
||||
CREATE TABLE test_table
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check
|
||||
-- Tags: no-upgrade-check
|
||||
|
||||
SELECT toDate('2022-02-01') AS d1
|
||||
FROM numbers(18) AS number
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Tags: no-backward-compatibility-check, no-random-merge-tree-settings
|
||||
-- Tags: no-upgrade-check, no-random-merge-tree-settings
|
||||
|
||||
drop table if exists test_02381;
|
||||
create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b) SETTINGS compress_marks=false, compress_primary_key=false;
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race, zookeeper, no-parallel, no-backward-compatibility-check, disabled
|
||||
# Tags: race, zookeeper, no-parallel, no-upgrade-check, disabled
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long, no-backward-compatibility-check
|
||||
# Tags: long, no-upgrade-check
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long, no-backward-compatibility-check
|
||||
# Tags: long, no-upgrade-check
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
Loading…
Reference in New Issue
Block a user