mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-02 20:42:04 +00:00
Fix 02434_cancel_insert_when_client_dies
This commit is contained in:
parent
828c2bc08b
commit
53d93b177e
@ -10,22 +10,26 @@ export DATA_FILE="$CLICKHOUSE_TMP/deduptest.tsv"
|
||||
export TEST_MARK="02434_insert_${CLICKHOUSE_DATABASE}_"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q 'select * from numbers(5000000) format TSV' > $DATA_FILE
|
||||
$CLICKHOUSE_CLIENT -q 'create table dedup_test(A Int64) Engine = MergeTree order by A settings non_replicated_deduplication_window=1000;'
|
||||
$CLICKHOUSE_CLIENT -q "create table dedup_test(A Int64) Engine = MergeTree order by A
|
||||
settings non_replicated_deduplication_window=1000
|
||||
, merge_tree_clear_old_temporary_directories_interval_seconds = 1
|
||||
;"
|
||||
$CLICKHOUSE_CLIENT -q "create table dedup_dist(A Int64) Engine = Distributed('test_cluster_one_shard_two_replicas', currentDatabase(), dedup_test)"
|
||||
|
||||
function insert_data
|
||||
{
|
||||
SETTINGS="query_id=$ID&max_insert_block_size=110000&min_insert_block_size_rows=110000"
|
||||
# send_logs_level: https://github.com/ClickHouse/ClickHouse/issues/67599
|
||||
SETTINGS="query_id=$ID&max_insert_block_size=110000&min_insert_block_size_rows=110000&send_logs_level=fatal"
|
||||
# max_block_size=10000, so external table will contain smaller blocks that will be squashed on insert-select (more chances to catch a bug on query cancellation)
|
||||
TRASH_SETTINGS="query_id=$ID&input_format_parallel_parsing=0&max_threads=1&max_insert_threads=1&max_insert_block_size=110000&max_block_size=10000&min_insert_block_size_bytes=0&min_insert_block_size_rows=110000&max_insert_block_size=110000"
|
||||
TYPE=$(( RANDOM % 5 ))
|
||||
|
||||
if [[ "$TYPE" -eq 0 ]]; then
|
||||
# client will send 10000-rows blocks, server will squash them into 110000-rows blocks (more chances to catch a bug on query cancellation)
|
||||
$CLICKHOUSE_CLIENT --max_block_size=10000 --max_insert_block_size=10000 --query_id="$ID" \
|
||||
$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=fatal --max_block_size=10000 --max_insert_block_size=10000 --query_id="$ID" \
|
||||
-q 'insert into dedup_test settings max_insert_block_size=110000, min_insert_block_size_rows=110000 format TSV' < $DATA_FILE
|
||||
elif [[ "$TYPE" -eq 1 ]]; then
|
||||
$CLICKHOUSE_CLIENT --max_block_size=10000 --max_insert_block_size=10000 --query_id="$ID" --prefer_localhost_replica="$(( RANDOM % 2))" \
|
||||
$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=fatal --max_block_size=10000 --max_insert_block_size=10000 --query_id="$ID" --prefer_localhost_replica="$(( RANDOM % 2))" \
|
||||
-q 'insert into dedup_dist settings max_insert_block_size=110000, min_insert_block_size_rows=110000 format TSV' < $DATA_FILE
|
||||
elif [[ "$TYPE" -eq 2 ]]; then
|
||||
$CLICKHOUSE_CURL -sS -X POST --data-binary @- "$CLICKHOUSE_URL&$SETTINGS&query=insert+into+dedup_test+format+TSV" < $DATA_FILE
|
||||
|
Loading…
Reference in New Issue
Block a user