ClickHouse/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh

133 lines
4.9 KiB
Bash
Raw Normal View History

2020-02-05 16:30:02 +00:00
#!/usr/bin/env bash
2021-09-12 12:35:27 +00:00
# Tags: replica, no-parallel, no-fasttest
2020-02-05 16:30:02 +00:00
# This test checks mutations concurrent execution with concurrent inserts.
# There was a bug in mutations finalization, when mutation finishes not after all
# MUTATE_PART tasks execution, but after GET of already mutated part from other replica.
2020-02-20 08:50:00 +00:00
# To test it we stop some replicas to delay fetch of required parts for mutation.
2020-02-05 16:30:02 +00:00
# Since our replication queue executing tasks concurrently it may happen, that we dowload already mutated
# part before source part.
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
2020-12-28 11:46:53 +00:00
# shellcheck source=../shell_config.sh
2020-08-01 00:51:12 +00:00
. "$CURDIR"/../shell_config.sh
2020-02-05 16:30:02 +00:00
REPLICAS=5
for i in $(seq $REPLICAS); do
2020-02-19 20:44:36 +00:00
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS concurrent_mutate_mt_$i"
2020-02-05 16:30:02 +00:00
done
for i in $(seq $REPLICAS); do
$CLICKHOUSE_CLIENT -nm --query "
CREATE TABLE concurrent_mutate_mt_$i (key UInt64, value1 UInt64, value2 String)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_mutate_mt', '$i')
ORDER BY key
SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000,temporary_directories_lifetime=10,cleanup_delay_period=3,cleanup_delay_period_random_add=0;
"
2020-02-05 16:30:02 +00:00
done
2020-02-19 20:44:36 +00:00
$CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_mutate_mt_1 SELECT number, number + 10, toString(number) from numbers(10)"
$CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_mutate_mt_1 SELECT number, number + 10, toString(number) from numbers(10, 40)"
2020-02-05 16:30:02 +00:00
for i in $(seq $REPLICAS); do
2020-02-19 20:44:36 +00:00
$CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA concurrent_mutate_mt_$i"
2020-02-05 16:30:02 +00:00
done
for i in $(seq $REPLICAS); do
2020-02-19 20:44:36 +00:00
$CLICKHOUSE_CLIENT --query "SELECT SUM(value1) FROM concurrent_mutate_mt_$i"
2020-02-05 16:30:02 +00:00
done
INITIAL_SUM=$($CLICKHOUSE_CLIENT --query "SELECT SUM(value1) FROM concurrent_mutate_mt_1")
2020-02-05 16:30:02 +00:00
# Run mutation on random replica
function correct_alter_thread()
{
REPLICA=$(($RANDOM % 5 + 1))
$CLICKHOUSE_CLIENT --query "ALTER TABLE concurrent_mutate_mt_$REPLICA UPDATE value1 = value1 + 1 WHERE 1"
sleep 1
2020-02-05 16:30:02 +00:00
}
# This thread add some data to table.
function insert_thread()
{
VALUES=(7 8 9)
REPLICA=$(($RANDOM % 5 + 1))
VALUE=${VALUES[$RANDOM % ${#VALUES[@]} ]}
$CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_mutate_mt_$REPLICA VALUES($RANDOM, $VALUE, toString($VALUE))"
sleep 0.$RANDOM
2020-02-05 16:30:02 +00:00
}
2020-02-19 20:44:36 +00:00
function detach_attach_thread()
2020-02-05 16:30:02 +00:00
{
REPLICA=$(($RANDOM % 5 + 1))
$CLICKHOUSE_CLIENT --query "DETACH TABLE concurrent_mutate_mt_$REPLICA"
sleep 0.$RANDOM
sleep 0.$RANDOM
sleep 0.$RANDOM
$CLICKHOUSE_CLIENT --query "ATTACH TABLE concurrent_mutate_mt_$REPLICA"
2020-02-05 16:30:02 +00:00
}
echo "Starting alters"
export -f correct_alter_thread
export -f insert_thread
export -f detach_attach_thread
2020-02-05 16:30:02 +00:00
2020-02-20 08:50:00 +00:00
# We assign a lot of mutations so timeout shouldn't be too big
TIMEOUT=15
2020-02-05 16:30:02 +00:00
clickhouse_client_loop_timeout $TIMEOUT detach_attach_thread 2> /dev/null &
2020-02-05 16:30:02 +00:00
clickhouse_client_loop_timeout $TIMEOUT correct_alter_thread 2> /dev/null &
2020-02-05 16:30:02 +00:00
clickhouse_client_loop_timeout $TIMEOUT insert_thread 2> /dev/null &
clickhouse_client_loop_timeout $TIMEOUT insert_thread 2> /dev/null &
clickhouse_client_loop_timeout $TIMEOUT insert_thread 2> /dev/null &
clickhouse_client_loop_timeout $TIMEOUT insert_thread 2> /dev/null &
clickhouse_client_loop_timeout $TIMEOUT insert_thread 2> /dev/null &
clickhouse_client_loop_timeout $TIMEOUT insert_thread 2> /dev/null &
clickhouse_client_loop_timeout $TIMEOUT insert_thread 2> /dev/null &
2020-02-05 16:30:02 +00:00
wait
echo "Finishing alters"
for i in $(seq $REPLICAS); do
2020-02-19 20:44:36 +00:00
$CLICKHOUSE_CLIENT --query "ATTACH TABLE concurrent_mutate_mt_$i" 2> /dev/null
2020-02-05 16:30:02 +00:00
done
2020-02-19 20:44:36 +00:00
sleep 1
2020-02-05 16:30:02 +00:00
2020-02-20 08:50:00 +00:00
counter=0
2020-12-21 08:48:40 +00:00
have_undone_mutations_query="select * from system.mutations where table like 'concurrent_mutate_mt_%' and is_done=0 and database='${CLICKHOUSE_DATABASE}'"
have_all_tables_query="select count() FROM system.tables WHERE name LIKE 'concurrent_mutate_mt_%' and database='${CLICKHOUSE_DATABASE}'"
2020-02-20 08:50:00 +00:00
2020-12-21 08:48:40 +00:00
while true ; do
2020-06-15 19:11:17 +00:00
if [ "$counter" -gt 120 ]
2020-02-20 08:50:00 +00:00
then
break
fi
2020-02-19 20:44:36 +00:00
sleep 1
for i in $(seq $REPLICAS); do
2020-06-16 10:34:59 +00:00
$CLICKHOUSE_CLIENT --query "ATTACH TABLE concurrent_mutate_mt_$i" 2> /dev/null
done
2020-12-21 08:48:40 +00:00
2020-02-20 08:50:00 +00:00
counter=$(($counter + 1))
2020-12-21 08:48:40 +00:00
# no active mutations and all tables attached
if [[ -z $($CLICKHOUSE_CLIENT --query "$have_undone_mutations_query" 2>&1) && $($CLICKHOUSE_CLIENT --query "$have_all_tables_query" 2>&1) == "$REPLICAS" ]]; then
break
fi
2020-02-05 16:30:02 +00:00
done
for i in $(seq $REPLICAS); do
2020-02-19 20:44:36 +00:00
$CLICKHOUSE_CLIENT --query "SELECT SUM(toUInt64(value1)) > $INITIAL_SUM FROM concurrent_mutate_mt_$i"
$CLICKHOUSE_CLIENT --query "SELECT COUNT() FROM system.mutations WHERE table='concurrent_mutate_mt_$i' and is_done=0" # all mutations have to be done
$CLICKHOUSE_CLIENT --query "SELECT * FROM system.mutations WHERE table='concurrent_mutate_mt_$i' and is_done=0" # for verbose output
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS concurrent_mutate_mt_$i"
2020-02-05 16:30:02 +00:00
done