Fix warnings found by Shellcheck

This commit is contained in:
Alexey Milovidov 2020-08-01 03:40:56 +03:00
parent ad675c10cc
commit 0a1f05ffd1
55 changed files with 140 additions and 168 deletions

View File

@ -18,9 +18,9 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO test_optimize_exception_replicated VAL
${CLICKHOUSE_CLIENT} --optimize_throw_if_noop 1 --query="OPTIMIZE TABLE test_optimize_exception PARTITION 201709 FINAL"
${CLICKHOUSE_CLIENT} --optimize_throw_if_noop 1 --query="OPTIMIZE TABLE test_optimize_exception_replicated PARTITION 201709 FINAL"
echo `${CLICKHOUSE_CLIENT} --optimize_throw_if_noop 1 --server_logs_file=/dev/null --query="OPTIMIZE TABLE test_optimize_exception PARTITION 201710" 2>&1` \
echo "`"${CLICKHOUSE_CLIENT}" --optimize_throw_if_noop 1 --server_logs_file=/dev/null --query="OPTIMIZE TABLE test_optimize_exception PARTITION 201710" 2>&1`" \
| grep -c 'Code: 388. DB::Exception: .* DB::Exception: .* Cannot select parts for optimization'
echo `${CLICKHOUSE_CLIENT} --optimize_throw_if_noop 1 --server_logs_file=/dev/null --query="OPTIMIZE TABLE test_optimize_exception_replicated PARTITION 201710" 2>&1` \
echo "`"${CLICKHOUSE_CLIENT}" --optimize_throw_if_noop 1 --server_logs_file=/dev/null --query="OPTIMIZE TABLE test_optimize_exception_replicated PARTITION 201710" 2>&1`" \
| grep -c 'Code: 388. DB::Exception: .* DB::Exception:.* Cannot select parts for optimization'
${CLICKHOUSE_CLIENT} --query="DROP TABLE test_optimize_exception NO DELAY"

View File

@ -13,6 +13,6 @@ echo "
INSERT INTO two_blocks VALUES ('2000-01-02');
" | $CLICKHOUSE_CLIENT -n
for i in {1..10}; do seq 1 100 | sed 's/.*/SELECT count() FROM (SELECT * FROM two_blocks);/' | $CLICKHOUSE_CLIENT -n | grep -vE '^2$' && echo 'Fail!' && break; echo -n '.'; done; echo
for _ in {1..10}; do seq 1 100 | sed 's/.*/SELECT count() FROM (SELECT * FROM two_blocks);/' | $CLICKHOUSE_CLIENT -n | grep -vE '^2$' && echo 'Fail!' && break; echo -n '.'; done; echo
echo "DROP TABLE two_blocks;" | $CLICKHOUSE_CLIENT -n

View File

@ -6,4 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -o errexit
set -o pipefail
for i in {1..10}; do seq 1 100 | sed 's/.*/SELECT count() FROM (SELECT * FROM (SELECT * FROM system.numbers_mt LIMIT 111) LIMIT 55);/' | $CLICKHOUSE_CLIENT -n --max_block_size=1 | grep -vE '^55$' && echo 'Fail!' && break; echo -n '.'; done; echo
for _ in {1..10}; do seq 1 100 | sed 's/.*/SELECT count() FROM (SELECT * FROM (SELECT * FROM system.numbers_mt LIMIT 111) LIMIT 55);/' | $CLICKHOUSE_CLIENT -n --max_block_size=1 | grep -vE '^55$' && echo 'Fail!' && break; echo -n '.'; done; echo

View File

@ -6,4 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -o errexit
set -o pipefail
for i in {1..10}; do seq 1 100 | sed 's/.*/SELECT * FROM (SELECT * FROM system.numbers_mt LIMIT 111) LIMIT 55;/' | $CLICKHOUSE_CLIENT -n --max_block_size=1 | wc -l | grep -vE '^5500$' && echo 'Fail!' && break; echo -n '.'; done; echo
for _ in {1..10}; do seq 1 100 | sed 's/.*/SELECT * FROM (SELECT * FROM system.numbers_mt LIMIT 111) LIMIT 55;/' | $CLICKHOUSE_CLIENT -n --max_block_size=1 | wc -l | grep -vE '^5500$' && echo 'Fail!' && break; echo -n '.'; done; echo

View File

@ -6,4 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -o errexit
set -o pipefail
for i in {1..10}; do seq 1 10 | sed 's/.*/SELECT 1 % ((number + 500) % 1000) FROM numbers_mt(1000);/' | $CLICKHOUSE_CLIENT -n --max_block_size=1 >/dev/null 2>&1 && echo 'Fail!' && break; echo -n '.'; done; echo
for _ in {1..10}; do seq 1 10 | sed 's/.*/SELECT 1 % ((number + 500) % 1000) FROM numbers_mt(1000);/' | $CLICKHOUSE_CLIENT -n --max_block_size=1 >/dev/null 2>&1 && echo 'Fail!' && break; echo -n '.'; done; echo

View File

@ -6,4 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -o errexit
set -o pipefail
for i in {1..10}; do seq 1 100 | sed 's/.*/SELECT * FROM system.numbers_mt LIMIT 111;/' | $CLICKHOUSE_CLIENT -n --max_block_size=$(($RANDOM % 123 + 1)) | wc -l | grep -vE '^11100$' && echo 'Fail!' && break; echo -n '.'; done; echo
for _ in {1..10}; do seq 1 100 | sed 's/.*/SELECT * FROM system.numbers_mt LIMIT 111;/' | $CLICKHOUSE_CLIENT -n --max_block_size=$(($RANDOM % 123 + 1)) | wc -l | grep -vE '^11100$' && echo 'Fail!' && break; echo -n '.'; done; echo

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
export NO_SHELL_CONFIG=1
for i in {1..4}; do
for _ in {1..4}; do
$CURDIR/00097_long_storage_buffer_race_condition.sh > /dev/null 2>&1 &
done

View File

@ -13,6 +13,6 @@ $CLICKHOUSE_CLIENT -n --query="
INSERT INTO users VALUES (1321770221388956068);
";
for i in {1..10}; do seq 1 10 | sed "s/.*/SELECT count() FROM (SELECT * FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}, users) WHERE UserID IN (SELECT arrayJoin([1468013291393583084, 1321770221388956068])));/" | $CLICKHOUSE_CLIENT -n | grep -vE '^4$' && echo 'Fail!' && break; echo -n '.'; done; echo
for _ in {1..10}; do seq 1 10 | sed "s/.*/SELECT count() FROM (SELECT * FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}, users) WHERE UserID IN (SELECT arrayJoin([1468013291393583084, 1321770221388956068])));/" | $CLICKHOUSE_CLIENT -n | grep -vE '^4$' && echo 'Fail!' && break; echo -n '.'; done; echo
$CLICKHOUSE_CLIENT --query="DROP TABLE users;";

View File

@ -8,20 +8,25 @@ TABLE_HASH="cityHash64(groupArray(cityHash64(*)))"
function pack_unpack_compare()
{
local buf_file="${CLICKHOUSE_TMP}/buf.'.$3"
local buf_file
buf_file="${CLICKHOUSE_TMP}/buf.'.$3"
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS buf_00385"
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS buf_file"
${CLICKHOUSE_CLIENT} --query "CREATE TABLE buf_00385 ENGINE = Memory AS $1"
local res_orig=$(${CLICKHOUSE_CLIENT} --max_threads=1 --query "SELECT $TABLE_HASH FROM buf_00385")
local res_orig
res_orig=$(${CLICKHOUSE_CLIENT} --max_threads=1 --query "SELECT $TABLE_HASH FROM buf_00385")
${CLICKHOUSE_CLIENT} --max_threads=1 --query "CREATE TABLE buf_file ENGINE = File($3) AS SELECT * FROM buf_00385"
local res_db_file=$(${CLICKHOUSE_CLIENT} --max_threads=1 --query "SELECT $TABLE_HASH FROM buf_file")
local res_db_file
res_db_file=$(${CLICKHOUSE_CLIENT} --max_threads=1 --query "SELECT $TABLE_HASH FROM buf_file")
${CLICKHOUSE_CLIENT} --max_threads=1 --query "SELECT * FROM buf_00385 FORMAT $3" > "$buf_file"
local res_ch_local1=$(${CLICKHOUSE_LOCAL} --structure "$2" --file "$buf_file" --table "my super table" --input-format "$3" --output-format TabSeparated --query "SELECT $TABLE_HASH FROM \`my super table\`")
local res_ch_local2=$(${CLICKHOUSE_LOCAL} --structure "$2" --table "my super table" --input-format "$3" --output-format TabSeparated --query "SELECT $TABLE_HASH FROM \`my super table\`" < "$buf_file")
local res_ch_local1
res_ch_local1=$(${CLICKHOUSE_LOCAL} --structure "$2" --file "$buf_file" --table "my super table" --input-format "$3" --output-format TabSeparated --query "SELECT $TABLE_HASH FROM \`my super table\`")
local res_ch_local2
res_ch_local2=$(${CLICKHOUSE_LOCAL} --structure "$2" --table "my super table" --input-format "$3" --output-format TabSeparated --query "SELECT $TABLE_HASH FROM \`my super table\`" < "$buf_file")
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS buf_00385"
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS buf_file"

View File

@ -20,7 +20,8 @@ function ch_url() {
exception_pattern="displayText() = DB::Exception:[[:print:]]*"
function check_only_exception() {
local res=`ch_url "$1" "$2"`
local res
res=`ch_url "$1" "$2"`
#(echo "$res")
#(echo "$res" | wc -l)
#(echo "$res" | grep -c "$exception_pattern")
@ -29,7 +30,8 @@ function check_only_exception() {
}
function check_last_line_exception() {
local res=`ch_url "$1" "$2"`
local res
res=`ch_url "$1" "$2"`
#echo "$res" > res
#echo "$res" | wc -c
#echo "$res" | tail -n -2

View File

@ -35,16 +35,16 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere"
# Depend on 00282_merging test
pushd `dirname $0` > /dev/null
pushd "`dirname $0`" > /dev/null
SCRIPTPATH=`pwd`
popd > /dev/null
#SCRIPTDIR=`dirname "$SCRIPTPATH"`
SCRIPTDIR=$SCRIPTPATH
cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 --merge_tree_uniform_read_distribution=1 -n 2>&1 > ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout
cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 --merge_tree_uniform_read_distribution=1 -n > ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout 2>&1
cmp "$SCRIPTDIR"/00282_merging.reference ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED
cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 --merge_tree_uniform_read_distribution=0 -n 2>&1 > ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout
cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 --merge_tree_uniform_read_distribution=0 -n > ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout 2>&1
cmp "$SCRIPTDIR"/00282_merging.reference ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED
rm ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout

View File

@ -21,7 +21,7 @@ set -e
$ch "INSERT INTO clear_column1 VALUES ('2000-01-01', 1, 'a'), ('2000-02-01', 2, 'b')"
$ch "INSERT INTO clear_column1 VALUES ('2000-01-01', 3, 'c'), ('2000-02-01', 4, 'd')"
for i in `seq 10`; do
for _ in `seq 10`; do
$ch "INSERT INTO clear_column1 VALUES ('2000-02-01', 0, ''), ('2000-02-01', 0, '')" & # insert into the same partition
$ch "ALTER TABLE clear_column1 CLEAR COLUMN i IN PARTITION '200001'" --replication_alter_partitions_sync=2
$ch "ALTER TABLE clear_column1 CLEAR COLUMN s IN PARTITION '200001'" --replication_alter_partitions_sync=2

View File

@ -91,7 +91,7 @@ sleep 3
check "$url$session&session_check=1" "$select" "Exception.*Session not found" 1 "Session did not expire on time."
create_temporary_table "$url$session&session_timeout=2"
for i in $(seq 1 3); do
for _ in $(seq 1 3); do
check "$url$session&session_timeout=2" "$select_from_temporary_table" "Hello" 1 "Session expired too early."
sleep 1
done
@ -99,7 +99,7 @@ sleep 3
check "$url$session&session_check=1" "$select" "Exception.*Session not found" 1 "Session did not expire on time."
create_temporary_table "$url$session&session_timeout=2"
for i in $(seq 1 5); do
for _ in $(seq 1 5); do
check "$url$session&session_timeout=2" "$select_from_non_existent_table" "Exception.*Table .* doesn't exist." 1 "Session expired too early."
sleep 1
done

View File

@ -45,7 +45,7 @@ echo $QUERY
URL=$(python -c 'print "'${CLICKHOUSE_URL}'&query=" + __import__("urllib").quote("'"$QUERY"'")')
set +e
for i in 1 2 3; do
for _ in 1 2 3; do
echo run by native protocol
printf "$DATA" | $CLICKHOUSE_CLIENT --query "$QUERY"

View File

@ -10,7 +10,7 @@ ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_00575;"
${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_00575 (dt Date DEFAULT now(), id UInt32, id2 UInt32 DEFAULT id + 1) ENGINE = MergeTree(dt, dt, 8192);"
${CLICKHOUSE_CLIENT} --query "INSERT INTO test_00575(dt,id) VALUES ('2018-02-22',3), ('2018-02-22',4), ('2018-02-22',5);"
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_00575 ORDER BY id;"
echo `${CLICKHOUSE_CLIENT} --query "ALTER TABLE test_00575 DROP COLUMN id;" --server_logs_file=/dev/null 2>&1 | grep -c "$exception_pattern"`
echo "`"${CLICKHOUSE_CLIENT}" --query "ALTER TABLE test_00575 DROP COLUMN id;" --server_logs_file=/dev/null 2>&1 | grep -c "$exception_pattern"`"
${CLICKHOUSE_CLIENT} --query "ALTER TABLE test_00575 DROP COLUMN id2;"
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_00575 ORDER BY id;"
${CLICKHOUSE_CLIENT} --query "ALTER TABLE test_00575 DROP COLUMN id;"

View File

@ -12,7 +12,7 @@ function wait_for_query_to_start()
}
$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d 'SELECT 1, count() FROM system.numbers' 2>&1 > /dev/null &
$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d 'SELECT 1, count() FROM system.numbers' > /dev/null 2>&1 &
wait_for_query_to_start 'hello'
# Replace it

View File

@ -15,7 +15,7 @@ ${CLICKHOUSE_CLIENT} --query "INSERT INTO test_truncate.test_view_depend VALUES(
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_truncate.test_view;"
${CLICKHOUSE_CLIENT} --query "SELECT '========Execute Truncate========';"
echo `${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE test_truncate.test_view;" --server_logs_file=/dev/null 2>&1 | grep -c "Code: 48.*Truncate is not supported by storage View"`
echo "`"${CLICKHOUSE_CLIENT}" --query "TRUNCATE TABLE test_truncate.test_view;" --server_logs_file=/dev/null 2>&1 | grep -c "Code: 48.*Truncate is not supported by storage View"`"
${CLICKHOUSE_CLIENT} --query "SELECT '========After Truncate========';"
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_truncate.test_view;"

View File

@ -19,11 +19,11 @@ settings="$server_logs --log_queries=1 --log_query_threads=1 --log_profile_event
# Check that logs from remote servers are passed from client
# SELECT
> "$server_logs_file"
true > "$server_logs_file"
$CLICKHOUSE_CLIENT $settings -q "SELECT 1 FROM system.one FORMAT Null"
lines_one_server=`cat "$server_logs_file" | wc -l`
> "$server_logs_file"
true > "$server_logs_file"
$CLICKHOUSE_CLIENT $settings -q "SELECT 1 FROM remote('127.0.0.2,127.0.0.3', system, one) FORMAT Null"
lines_two_servers=`cat "$server_logs_file" | wc -l`
@ -33,11 +33,11 @@ lines_two_servers=`cat "$server_logs_file" | wc -l`
$CLICKHOUSE_CLIENT $settings -q "DROP TABLE IF EXISTS null_00634_1"
$CLICKHOUSE_CLIENT $settings -q "CREATE TABLE null_00634_1 (i Int8) ENGINE = Null"
> "$server_logs_file"
true > "$server_logs_file"
$CLICKHOUSE_CLIENT $settings -q "INSERT INTO null_00634_1 VALUES (0)"
lines_one_server=`cat "$server_logs_file" | wc -l`
> "$server_logs_file"
true > "$server_logs_file"
$CLICKHOUSE_CLIENT $settings -q "INSERT INTO TABLE FUNCTION remote('127.0.0.2', '${CLICKHOUSE_DATABASE}', 'null_00634_1') VALUES (0)"
lines_two_servers=`cat "$server_logs_file" | wc -l`

View File

@ -50,29 +50,5 @@ SELECT
(SELECT * FROM system.query_log PREWHERE query='$heavy_cpu_query' WHERE event_date >= today()-1 AND type=2 ORDER BY event_time DESC LIMIT 1)
ARRAY JOIN ProfileEvents.Names AS PN, ProfileEvents.Values AS PV"
# Check per-thread and per-query ProfileEvents consistency
$CLICKHOUSE_CLIENT $settings --any_join_distinct_right_table_keys=1 -q "
SELECT PN, PVq, PVt FROM
(
SELECT PN, sum(PV) AS PVt
FROM system.query_thread_log
ARRAY JOIN ProfileEvents.Names AS PN, ProfileEvents.Values AS PV
WHERE event_date >= today()-1 AND query_id='$query_id'
GROUP BY PN
) js1
ANY INNER JOIN
(
SELECT PN, PV AS PVq
FROM system.query_log
ARRAY JOIN ProfileEvents.Names AS PN, ProfileEvents.Values AS PV
WHERE event_date >= today()-1 AND query_id='$query_id'
) js2
USING PN
WHERE
NOT PN IN ('ContextLock') AND
NOT (PVq <= PVt AND PVt <= 1.1 * PVq)
"
# Clean
rm "$server_logs_file"

View File

@ -3,10 +3,10 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
user=readonly
user="readonly"
address=${CLICKHOUSE_HOST}
port=${CLICKHOUSE_PORT_HTTP}
url="${CLICKHOUSE_PORT_HTTP_PROTO}://readonly@$address:$port/?session_id=test"
url="${CLICKHOUSE_PORT_HTTP_PROTO}://${user}@${address}:${port}/?session_id=test"
select="SELECT name, value, changed FROM system.settings WHERE name = 'readonly'"
${CLICKHOUSE_CURL} -sS $url --data-binary "$select"

View File

@ -22,7 +22,7 @@ ${CLICKHOUSE_CLIENT} --query "SELECT TOP 2 * FROM test_00687 ORDER BY val;"
${CLICKHOUSE_CLIENT} --query "SELECT TOP (2) * FROM test_00687 ORDER BY val;"
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_00687 ORDER BY val LIMIT 2 OFFSET 2;"
echo `${CLICKHOUSE_CLIENT} --query "SELECT TOP 2 * FROM test_00687 ORDER BY val LIMIT 2;" 2>&1 | grep -c "Code: 406"`
echo `${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_00687 ORDER BY val LIMIT 2,3 OFFSET 2;" 2>&1 | grep -c "Code: 62"`
echo "`"${CLICKHOUSE_CLIENT}" --query "SELECT TOP 2 * FROM test_00687 ORDER BY val LIMIT 2;" 2>&1 | grep -c "Code: 406"`"
echo "`"${CLICKHOUSE_CLIENT}" --query "SELECT * FROM test_00687 ORDER BY val LIMIT 2,3 OFFSET 2;" 2>&1 | grep -c "Code: 62"`"
${CLICKHOUSE_CLIENT} --query "DROP TABLE test_00687;"

View File

@ -15,7 +15,7 @@ function stress()
# https://stackoverflow.com/questions/9954794/execute-a-shell-function-with-timeout
export -f stress
for thread in {1..5}; do
for _ in {1..5}; do
# Ten seconds are just barely enough to reproduce the issue in most of runs.
timeout 10 bash -c stress &
done

View File

@ -8,13 +8,13 @@ ${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS parallel_ddl"
function query()
{
for i in {1..100}; do
for _ in {1..100}; do
${CLICKHOUSE_CLIENT} --query "CREATE DATABASE IF NOT EXISTS parallel_ddl"
${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS parallel_ddl"
done
}
for i in {1..2}; do
for _ in {1..2}; do
query &
done

View File

@ -8,13 +8,13 @@ ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS parallel_ddl"
function query()
{
for i in {1..100}; do
for _ in {1..100}; do
${CLICKHOUSE_CLIENT} --query "CREATE TABLE IF NOT EXISTS parallel_ddl(a Int) ENGINE = Memory"
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS parallel_ddl"
done
}
for i in {1..2}; do
for _ in {1..2}; do
query &
done

View File

@ -4,12 +4,10 @@ set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
cur_name=${BASH_SOURCE[0]}
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS json_parse;"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE json_parse (aaa String, bbb String) ENGINE = Memory;"
for n in {1..1000000}; do echo '{"aaa":"aaa","bbb":"bbb"}'; done | curl -sS "${CLICKHOUSE_URL}&query=INSERT%20INTO%20json_parse%20FORMAT%20JSONEachRow" -0 --data-binary @-
for _ in {1..1000000}; do echo '{"aaa":"aaa","bbb":"bbb"}'; done | curl -sS "${CLICKHOUSE_URL}&query=INSERT%20INTO%20json_parse%20FORMAT%20JSONEachRow" -0 --data-binary @-
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM json_parse;"
${CLICKHOUSE_CLIENT} --query="DROP TABLE json_parse;"

View File

@ -4,9 +4,7 @@ set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
cur_name=${BASH_SOURCE[0]}
settings="$server_logs --log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_query_settings=1"
settings="--log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_query_settings=1"
# Test insert logging on each block and checkPacket() method
@ -18,17 +16,17 @@ CREATE TABLE merge_tree_table (id UInt64, date Date, uid UInt32) ENGINE = MergeT
$CLICKHOUSE_CLIENT $settings -q "INSERT INTO merge_tree_table SELECT (intHash64(number)) % 10000, toDate('2018-08-01'), rand() FROM system.numbers LIMIT 10000000;"
# If merge is already happening, OPTIMIZE will be noop. But we have to ensure that the data is merged.
for i in {1..100}; do $CLICKHOUSE_CLIENT $settings --optimize_throw_if_noop=1 -q "OPTIMIZE TABLE merge_tree_table FINAL;" && break; sleep 1; done
for _ in {1..100}; do $CLICKHOUSE_CLIENT $settings --optimize_throw_if_noop=1 -q "OPTIMIZE TABLE merge_tree_table FINAL;" && break; sleep 1; done
# The query may open more files if query log will be flushed during the query.
# To lower this chance, we also flush logs before the query.
$CLICKHOUSE_CLIENT $settings -q "SYSTEM FLUSH LOGS"
toching_many_parts_query="SELECT count() FROM (SELECT toDayOfWeek(date) AS m, id, count() FROM merge_tree_table GROUP BY id, m ORDER BY count() DESC LIMIT 10 SETTINGS max_threads = 1)"
$CLICKHOUSE_CLIENT $settings -q "$toching_many_parts_query" &> /dev/null
touching_many_parts_query="SELECT count() FROM (SELECT toDayOfWeek(date) AS m, id, count() FROM merge_tree_table GROUP BY id, m ORDER BY count() DESC LIMIT 10 SETTINGS max_threads = 1)"
$CLICKHOUSE_CLIENT $settings -q "$touching_many_parts_query" &> /dev/null
$CLICKHOUSE_CLIENT $settings -q "SYSTEM FLUSH LOGS"
$CLICKHOUSE_CLIENT $settings -q "SELECT pi.Values FROM system.query_log ARRAY JOIN ProfileEvents as pi WHERE query='$toching_many_parts_query' and pi.Names = 'FileOpen' ORDER BY event_time DESC LIMIT 1;"
$CLICKHOUSE_CLIENT $settings -q "SELECT pi.Values FROM system.query_log ARRAY JOIN ProfileEvents as pi WHERE query='$touching_many_parts_query' and pi.Names = 'FileOpen' ORDER BY event_time DESC LIMIT 1;"
$CLICKHOUSE_CLIENT $settings -q "DROP TABLE IF EXISTS merge_tree_table;"

View File

@ -19,7 +19,7 @@ function thread2()
{
while true; do
echo "ALTER TABLE concurrent_alter_column ADD COLUMN d DOUBLE" | ${CLICKHOUSE_CLIENT} --query_id=alter2;
sleep `echo 0.0$RANDOM`;
sleep "`echo 0.0$RANDOM`";
echo "ALTER TABLE concurrent_alter_column DROP COLUMN d" | ${CLICKHOUSE_CLIENT} --query_id=alter2;
done
}
@ -28,7 +28,7 @@ function thread3()
{
while true; do
echo "ALTER TABLE concurrent_alter_column ADD COLUMN e DOUBLE" | ${CLICKHOUSE_CLIENT} --query_id=alter3;
sleep `echo 0.0$RANDOM`;
sleep "`echo 0.0$RANDOM`";
echo "ALTER TABLE concurrent_alter_column DROP COLUMN e" | ${CLICKHOUSE_CLIENT} --query_id=alter3;
done
}
@ -37,7 +37,7 @@ function thread4()
{
while true; do
echo "ALTER TABLE concurrent_alter_column ADD COLUMN f DOUBLE" | ${CLICKHOUSE_CLIENT} --query_id=alter4;
sleep `echo 0.0$RANDOM`;
sleep "`echo 0.0$RANDOM`";
echo "ALTER TABLE concurrent_alter_column DROP COLUMN f" | ${CLICKHOUSE_CLIENT} --query_id=alter4;
done
}

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
${CLICKHOUSE_CURL} --max-time 1 -sS "${CLICKHOUSE_URL}&query_id=cancel_http_readonly_queries_on_client_close&cancel_http_readonly_queries_on_client_close=1&query=SELECT+count()+FROM+system.numbers" 2>&1 | grep -cF 'curl: (28)'
for i in {1..10}
for _ in {1..10}
do
${CLICKHOUSE_CURL} -sS --data "SELECT count() FROM system.processes WHERE query_id = 'cancel_http_readonly_queries_on_client_close'" "${CLICKHOUSE_URL}" | grep '0' && break
sleep 0.2

View File

@ -5,8 +5,8 @@ set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
for i in {1..200}; do echo "drop table if exists view_00840" | $CLICKHOUSE_CLIENT; echo "create view view_00840 as select count(*),database,table from system.columns group by database,table" | $CLICKHOUSE_CLIENT; done &
for i in {1..500}; do echo "select * from view_00840 order by table" | $CLICKHOUSE_CLIENT >/dev/null 2>&1 || true; done &
for _ in {1..200}; do echo "drop table if exists view_00840" | $CLICKHOUSE_CLIENT; echo "create view view_00840 as select count(*),database,table from system.columns group by database,table" | $CLICKHOUSE_CLIENT; done &
for _ in {1..500}; do echo "select * from view_00840 order by table" | $CLICKHOUSE_CLIENT >/dev/null 2>&1 || true; done &
wait

View File

@ -3,11 +3,6 @@
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CUR_DIR/../shell_config.sh
CB_DIR=$(dirname "$CLICKHOUSE_CLIENT_BINARY")
[ "$CB_DIR" == "." ] && ROOT_DIR=$CUR_DIR/../../../..
[ "$CB_DIR" != "." ] && BUILD_DIR=$CB_DIR/../..
[ -z "$ROOT_DIR" ] && ROOT_DIR=$CB_DIR/../../..
DATA_FILE=$CUR_DIR/data_orc/test.orc
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS orc_load"

View File

@ -31,7 +31,7 @@ DATA_DIR=$CUR_DIR/data_parquet
# BUG! nulls.snappy.parquet - parquet-reader shows wrong structure. Actual structure is {"type":"struct","fields":[{"name":"b_struct","type":{"type":"struct","fields":[{"name":"b_c_int","type":"integer","nullable":true,"metadata":{}}]},"nullable":true,"metadata":{}}]}
# why? repeated_no_annotation.parquet
for NAME in `ls -1 $DATA_DIR/*.parquet | xargs -n 1 basename | sort`; do
for NAME in `find $DATA_DIR/*.parquet -print0 | xargs -0 -n 1 basename | sort`; do
echo === Try load data from $NAME
JSON=$DATA_DIR/$NAME.json

View File

@ -6,6 +6,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
echo 'DROP TABLE IF EXISTS table_for_insert' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-
echo 'CREATE TABLE table_for_insert (a UInt8, b UInt8) ENGINE = Memory' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-
echo "INSERT INTO table_for_insert VALUES `printf '%*s' "1000000" | sed 's/ /(1, 2)/g'`" | ${CLICKHOUSE_CURL_COMMAND} -q --max-time 30 -sSg ${CLICKHOUSE_URL} -d @-
echo "INSERT INTO table_for_insert VALUES `printf '%*s' "1000000" "" | sed 's/ /(1, 2)/g'`" | ${CLICKHOUSE_CURL_COMMAND} -q --max-time 30 -sSg ${CLICKHOUSE_URL} -d @-
echo 'SELECT count(*) FROM table_for_insert' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-
echo 'DROP TABLE IF EXISTS table_for_insert' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-

View File

@ -37,7 +37,7 @@ sleep 1
timeout 20 $CLICKHOUSE_CLIENT -q "KILL QUERY WHERE query='$query_for_pending' SYNC" &>/dev/null
# Both queries have to be killed, doesn't matter with SYNC or ASYNC kill
for run in {1..15}
for _ in {1..15}
do
sleep 1
no_first_query=`$CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes where query='$query_for_pending'"`

View File

@ -7,6 +7,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e
for i in {1..100}; do \
for _ in {1..100}; do \
$CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables UNION ALL SELECT name FROM system.columns format Null";
done

View File

@ -3,7 +3,7 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
for i in {1..10}; do
for _ in {1..10}; do
$CLICKHOUSE_BINARY client --send_logs_level="trace" --query="SELECT * from numbers(1000000);" > /dev/null 2> /dev/null &
$CLICKHOUSE_BINARY client --send_logs_level="information" --query="SELECT * from numbers(1000000);" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace" &
done

View File

@ -43,7 +43,7 @@ ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE ignore(rand())" 2>&1
${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = y + rand() % 1 WHERE not ignore()" 2>&1 \
| fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL'
${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = x + arrayCount(x -> (x + y) % 2, range(y)) WHERE not ignore()" 2>&1 > /dev/null \
${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = x + arrayCount(x -> (x + y) % 2, range(y)) WHERE not ignore()" > /dev/null 2>&1 \
&& echo 'OK' || echo 'FAIL'
${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = x + arrayCount(x -> (rand() + x) % 2, range(y)) WHERE not ignore()" 2>&1 \
@ -51,15 +51,15 @@ ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = x + arrayCount(x -> (ra
# For regular tables we do not enforce deterministic functions
${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 DELETE WHERE rand() = 0" 2>&1 > /dev/null \
${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 DELETE WHERE rand() = 0" > /dev/null 2>&1 \
&& echo 'OK' || echo 'FAIL'
${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 UPDATE y = y + rand() % 1 WHERE not ignore()" 2>&1 > /dev/null \
${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 UPDATE y = y + rand() % 1 WHERE not ignore()" > /dev/null 2>&1 \
&& echo 'OK' || echo 'FAIL'
# hm... it looks like joinGet condidered determenistic
${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = joinGet('${CLICKHOUSE_DATABASE}.lookup_table', 'y_new', y) WHERE x=1" 2>&1 \
| echo 'OK' || echo 'FAIL'
&& echo 'OK' || echo 'FAIL'
${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE_DATABASE}.dict1', toUInt64(x))" 2>&1 \
| fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL'

View File

@ -35,14 +35,14 @@ function alter_thread()
while true; do
$CLICKHOUSE_CLIENT --allow_experimental_alter_materialized_view_structure=1 -q "${ALTERS[$RANDOM % 2]}"
sleep `echo 0.$RANDOM`;
sleep "`echo 0.$RANDOM`";
done
}
export -f alter_thread;
timeout 10 bash -c alter_thread &
for i in {1..100}; do
for _ in {1..100}; do
# Retry (hopefully retriable (deadlock avoided)) errors.
while true; do
$CLICKHOUSE_CLIENT -q "INSERT INTO src VALUES (1);" 2>/dev/null && break

View File

@ -32,7 +32,7 @@ function insert_thread() {
while true; do
# trigger 50 concurrent inserts at a time
for i in {0..50}; do
for _ in {0..50}; do
# ignore `Possible deadlock avoided. Client should retry`
$CLICKHOUSE_CLIENT -q "${INSERT[$RANDOM % 2]}" 2>/dev/null &
done

View File

@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query="CREATE TABLE b (x UInt64) ENGINE = Memory;"
function thread1()
{
for attempt_thread1 in {1..10}
for _ in {1..10}
do
seq 1 500000 | $CLICKHOUSE_CLIENT --query_id=11 --query="INSERT INTO a FORMAT TSV" &
while true; do
@ -25,7 +25,7 @@ function thread1()
function thread2()
{
for attempt_thread2 in {1..10}
for _ in {1..10}
do
seq 1 500000 | $CLICKHOUSE_CLIENT --query_id=22 --query="INSERT INTO b FORMAT TSV" &
while true; do

View File

@ -7,5 +7,5 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
${CLICKHOUSE_CLIENT} --query="SELECT avgWeighted(x, weight) FROM (SELECT t.1 AS x, t.2 AS weight FROM (SELECT arrayJoin([(1, 5), (2, 4), (3, 3), (4, 2), (5, 1)]) AS t));"
${CLICKHOUSE_CLIENT} --query="SELECT avgWeighted(x, weight) FROM (SELECT t.1 AS x, t.2 AS weight FROM (SELECT arrayJoin([(1, 0), (2, 0), (3, 0), (4, 0), (5, 0)]) AS t));"
echo `${CLICKHOUSE_CLIENT} --server_logs_file=/dev/null --query="SELECT avgWeighted(toDecimal64(0, 0), toFloat64(0))" 2>&1` \
echo "`"${CLICKHOUSE_CLIENT}" --server_logs_file=/dev/null --query="SELECT avgWeighted(toDecimal64(0, 0), toFloat64(0))" 2>&1`" \
| grep -c 'Code: 43. DB::Exception: .* DB::Exception:.* Different types .* of arguments for aggregate function avgWeighted'

View File

@ -29,7 +29,7 @@ $CLICKHOUSE_CLIENT --query "SELECT '12 -> ', dictGetInt64('dictdb.dict', 'y', to
$CLICKHOUSE_CLIENT --query "INSERT INTO dictdb.table VALUES (13, 103, now())"
$CLICKHOUSE_CLIENT --query "INSERT INTO dictdb.table VALUES (14, 104, now() - INTERVAL 1 DAY)"
while [ $($CLICKHOUSE_CLIENT --query "SELECT dictGetInt64('dictdb.dict', 'y', toUInt64(13))") = -1 ]
while [ "$("$CLICKHOUSE_CLIENT" --query "SELECT dictGetInt64('dictdb.dict', 'y', toUInt64(13))")" = -1 ]
do
sleep 0.5
done

View File

@ -16,7 +16,7 @@ $CLICKHOUSE_CLIENT --query="insert into test_01054.ints values (3, 3, 3, 3, 3, 3
function thread1()
{
for attempt_thread1 in {1..100}
for _ in {1..100}
do
RAND_NUMBER_THREAD1=$($CLICKHOUSE_CLIENT --query="SELECT rand() % 100;")
$CLICKHOUSE_CLIENT --query="select dictGet('one_cell_cache_ints', 'i8', toUInt64($RAND_NUMBER_THREAD1));"
@ -26,7 +26,7 @@ function thread1()
function thread2()
{
for attempt_thread2 in {1..100}
for _ in {1..100}
do
RAND_NUMBER_THREAD2=$($CLICKHOUSE_CLIENT --query="SELECT rand() % 100;")
$CLICKHOUSE_CLIENT --query="select dictGet('one_cell_cache_ints', 'i8', toUInt64($RAND_NUMBER_THREAD2));"
@ -36,7 +36,7 @@ function thread2()
function thread3()
{
for attempt_thread3 in {1..100}
for _ in {1..100}
do
RAND_NUMBER_THREAD3=$($CLICKHOUSE_CLIENT --query="SELECT rand() % 100;")
$CLICKHOUSE_CLIENT --query="select dictGet('one_cell_cache_ints', 'i8', toUInt64($RAND_NUMBER_THREAD3));"
@ -46,7 +46,7 @@ function thread3()
function thread4()
{
for attempt_thread4 in {1..100}
for _ in {1..100}
do
RAND_NUMBER_THREAD4=$($CLICKHOUSE_CLIENT --query="SELECT rand() % 100;")
$CLICKHOUSE_CLIENT --query="select dictGet('one_cell_cache_ints', 'i8', toUInt64($RAND_NUMBER_THREAD4));"

View File

@ -20,7 +20,7 @@ $CLICKHOUSE_CLIENT -q "SELECT count() FROM system.parts WHERE table = 'mt_compac
$CLICKHOUSE_CLIENT -q "SYSTEM START MERGES mt_compact"
# Retry because already started concurrent merges may interrupt optimize
for i in {0..10}; do
for _ in {0..10}; do
$CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE mt_compact FINAL SETTINGS optimize_throw_if_noop=1" 2>/dev/null
if [ $? -eq 0 ]; then
break

View File

@ -35,7 +35,7 @@ LAYOUT(CACHE());
function thread1()
{
for attempt_thread1 in {1..50}
for _ in {1..50}
do
# This query will be ended with exception, because source dictionary has UInt8 as a key type.
$CLICKHOUSE_CLIENT --query="SELECT dictGetFloat64('dictdb_01076.dict_datarace', 'value', toUInt64(1));"
@ -45,7 +45,7 @@ function thread1()
function thread2()
{
for attempt_thread2 in {1..50}
for _ in {1..50}
do
# This query will be ended with exception, because source dictionary has UInt8 as a key type.
$CLICKHOUSE_CLIENT --query="SELECT dictGetFloat64('dictdb_01076.dict_datarace', 'value', toUInt64(2));"

View File

@ -23,7 +23,6 @@ done
function alter_thread()
{
TYPES=(Float64 String UInt8 UInt32)
while true; do
REPLICA=$(($RANDOM % 3 + 1))
ADD=$(($RANDOM % 5 + 1))

View File

@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CUR_DIR/../shell_config.sh
for i in $(seq 1 10); do
for _ in $(seq 1 10); do
${CLICKHOUSE_CLIENT} -q "select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (ANSI)}','system','tables'))" 2>/dev/null && break
sleep 0.1
done

View File

@ -3,7 +3,7 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
PARSER=(${CLICKHOUSE_LOCAL} --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CSV)
PARSER=("${CLICKHOUSE_LOCAL}" --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CSV)
echo '2020-04-21 12:34:56, "Hello", 12345678' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo "CSV"
echo '2020-04-21 12:34:56, "Hello", 123456789' | "${PARSER[@]}" 2>&1| grep "ERROR"
echo '2020-04-21 12:34:567, "Hello", 123456789' | "${PARSER[@]}" 2>&1| grep "ERROR"
@ -12,7 +12,7 @@ echo '2020-04-21 12:34:56, "Hello", 12345678,1' | "${PARSER[@]}" 2>&1| grep "ERR
echo '2020-04-21 12:34:56,,123Hello' | "${PARSER[@]}" 2>&1| grep "ERROR"
echo -e '2020-04-21 12:34:56, "Hello", 12345678\n' | "${PARSER[@]}" 2>&1| grep "ERROR"
PARSER=(${CLICKHOUSE_LOCAL} --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CustomSeparatedIgnoreSpaces --format_custom_escaping_rule CSV --format_custom_field_delimiter ',' --format_custom_row_after_delimiter "")
PARSER=("${CLICKHOUSE_LOCAL}" --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CustomSeparatedIgnoreSpaces --format_custom_escaping_rule CSV --format_custom_field_delimiter ',' --format_custom_row_after_delimiter "")
echo '2020-04-21 12:34:56, "Hello", 12345678' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo -e "\nCustomSeparatedIgnoreSpaces"
echo '2020-04-21 12:34:56, "Hello", 123456789' | "${PARSER[@]}" 2>&1| grep "ERROR"
echo '2020-04-21 12:34:567, "Hello", 123456789' | "${PARSER[@]}" 2>&1| grep "ERROR"
@ -20,7 +20,7 @@ echo '2020-04-21 12:34:56, "Hello", 12345678,1' | "${PARSER[@]}" 2>&1| grep "ERR
echo '2020-04-21 12:34:56,,123Hello' | "${PARSER[@]}" 2>&1| grep "ERROR"
echo -e '2020-04-21 12:34:56, "Hello", 12345678\n\n\n\n ' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo "OK"
PARSER=(${CLICKHOUSE_LOCAL} --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format TSV)
PARSER=("${CLICKHOUSE_LOCAL}" --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format TSV)
echo -e '2020-04-21 12:34:56\tHello\t12345678' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo -e "\nTSV"
echo -e '2020-04-21 12:34:56\tHello\t123456789' | "${PARSER[@]}" 2>&1| grep "ERROR"
echo -e '2020-04-21 12:34:567\tHello\t123456789' | "${PARSER[@]}" 2>&1| grep "ERROR"
@ -28,7 +28,7 @@ echo -e '2020-04-21 12:34:56\tHello\t12345678\t1' | "${PARSER[@]}" 2>&1| grep "E
echo -e '2020-04-21 12:34:56\t\t123Hello' | "${PARSER[@]}" 2>&1| grep "ERROR"
echo -e '2020-04-21 12:34:56\tHello\t12345678\n' | "${PARSER[@]}" 2>&1| grep "ERROR"
PARSER=(${CLICKHOUSE_LOCAL} --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CustomSeparated)
PARSER=("${CLICKHOUSE_LOCAL}" --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CustomSeparated)
echo -e '2020-04-21 12:34:56\tHello\t12345678' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo -e "\nCustomSeparated"
echo -e '2020-04-21 12:34:56\tHello\t123456789' | "${PARSER[@]}" 2>&1| grep "ERROR"
echo -e '2020-04-21 12:34:567\tHello\t123456789' | "${PARSER[@]}" 2>&1| grep "ERROR"

View File

@ -3,7 +3,7 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
{ printf "select "; for x in {1..1000}; do printf "coalesce(null, "; done; printf "1"; for x in {1..1000}; do printf ")"; done; } > ${CLICKHOUSE_TMP}/query
{ printf "select "; for _ in {1..1000}; do printf "coalesce(null, "; done; printf "1"; for _ in {1..1000}; do printf ")"; done; } > ${CLICKHOUSE_TMP}/query
cat ${CLICKHOUSE_TMP}/query | $CLICKHOUSE_CLIENT 2>&1 | grep -o -F 'Code: 306'
cat ${CLICKHOUSE_TMP}/query | $CLICKHOUSE_LOCAL 2>&1 | grep -o -F 'Code: 306'

View File

@ -18,7 +18,7 @@ echo "
insert into tableB select number, number % 100000, addDays(toDate('2020-01-01'), number % 90) from numbers(50000000);
" | $CLICKHOUSE_CLIENT -n
for i in {1..1}; do echo "
echo "
SELECT tableName
FROM
(
@ -109,7 +109,7 @@ FROM
) AS a
GROUP BY tableName
ORDER BY tableName ASC;
" | $CLICKHOUSE_CLIENT -n | wc -l ; done;
" | $CLICKHOUSE_CLIENT -n | wc -l
echo "
DROP TABLE tableA;

View File

@ -5,7 +5,6 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
CB_DIR=$(dirname "$CLICKHOUSE_CLIENT_BINARY")
[ "$CB_DIR" == "." ] && ROOT_DIR=$CUR_DIR/../../../..
[ "$CB_DIR" != "." ] && BUILD_DIR=$CB_DIR/../..
[ -z "$ROOT_DIR" ] && ROOT_DIR=$CB_DIR/../../..
DATA_FILE=$CUR_DIR/data_arrow/test.arrow

View File

@ -7,7 +7,7 @@ $CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_1"
function optimize()
{
for i in {0..20}; do
for _ in {0..20}; do
$CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE $1 FINAL SETTINGS optimize_throw_if_noop=1" 2>/dev/null && break
sleep 0.3
done

View File

@ -3,7 +3,7 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
for i in {1..100}; do $CLICKHOUSE_CLIENT --multiquery --query "
for _ in {1..100}; do $CLICKHOUSE_CLIENT --multiquery --query "
DROP TABLE IF EXISTS mt;
CREATE TABLE mt (x UInt8, k UInt8 DEFAULT 0) ENGINE = SummingMergeTree ORDER BY k;

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
function test()
{
for i in {1..1000}; do
for _ in {1..1000}; do
$CLICKHOUSE_CLIENT --max_memory_usage 1G <<< "SELECT uniqExactState(number) FROM system.numbers_mt GROUP BY number % 10";
done
}

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
function test()
{
for i in {1..250}; do
for _ in {1..250}; do
$CLICKHOUSE_CLIENT --query "SELECT groupArrayIfState(('Hello, world' AS s) || s || s || s || s || s || s || s || s || s, NOT throwIf(number > 10000000, 'Ok')) FROM system.numbers_mt GROUP BY number % 10";
done
}

View File

@ -20,7 +20,7 @@ done | $CLICKHOUSE_CLIENT -n --max_block_size 5
# Randomized test
ITERATIONS=1000
for i in $(seq $ITERATIONS); do
for _ in $(seq $ITERATIONS); do
SIZE=$(($RANDOM % 100))
OFFSET=$(($RANDOM % 111))
LIMIT=$(($RANDOM % 111))