diff --git a/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh b/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh index 4b960229419..26d44d36a98 100755 --- a/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh +++ b/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh @@ -18,9 +18,9 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO test_optimize_exception_replicated VAL ${CLICKHOUSE_CLIENT} --optimize_throw_if_noop 1 --query="OPTIMIZE TABLE test_optimize_exception PARTITION 201709 FINAL" ${CLICKHOUSE_CLIENT} --optimize_throw_if_noop 1 --query="OPTIMIZE TABLE test_optimize_exception_replicated PARTITION 201709 FINAL" -echo "$("${CLICKHOUSE_CLIENT}" --optimize_throw_if_noop 1 --server_logs_file=/dev/null --query="OPTIMIZE TABLE test_optimize_exception PARTITION 201710" 2>&1)" \ +echo "$(${CLICKHOUSE_CLIENT} --optimize_throw_if_noop 1 --server_logs_file=/dev/null --query="OPTIMIZE TABLE test_optimize_exception PARTITION 201710" 2>&1)" \ | grep -c 'Code: 388. DB::Exception: .* DB::Exception: .* Cannot select parts for optimization' -echo "$("${CLICKHOUSE_CLIENT}" --optimize_throw_if_noop 1 --server_logs_file=/dev/null --query="OPTIMIZE TABLE test_optimize_exception_replicated PARTITION 201710" 2>&1)" \ +echo "$(${CLICKHOUSE_CLIENT} --optimize_throw_if_noop 1 --server_logs_file=/dev/null --query="OPTIMIZE TABLE test_optimize_exception_replicated PARTITION 201710" 2>&1)" \ | grep -c 'Code: 388. DB::Exception: .* DB::Exception:.* Cannot select parts for optimization' ${CLICKHOUSE_CLIENT} --query="DROP TABLE test_optimize_exception NO DELAY" diff --git a/tests/queries/0_stateless/00387_use_client_time_zone.sh b/tests/queries/0_stateless/00387_use_client_time_zone.sh index e56a1ff6874..a86f866a6ad 100755 --- a/tests/queries/0_stateless/00387_use_client_time_zone.sh +++ b/tests/queries/0_stateless/00387_use_client_time_zone.sh @@ -3,4 +3,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -env TZ=UTC "$CLICKHOUSE_CLIENT" --use_client_time_zone=1 --query="SELECT toDateTime(1000000000)" +env TZ=UTC ${CLICKHOUSE_CLIENT} --use_client_time_zone=1 --query="SELECT toDateTime(1000000000)" diff --git a/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh b/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh index f4a93779bb8..a526fe00e1b 100755 --- a/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh +++ b/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh @@ -10,7 +10,7 @@ ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_00575;" ${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_00575 (dt Date DEFAULT now(), id UInt32, id2 UInt32 DEFAULT id + 1) ENGINE = MergeTree(dt, dt, 8192);" ${CLICKHOUSE_CLIENT} --query "INSERT INTO test_00575(dt,id) VALUES ('2018-02-22',3), ('2018-02-22',4), ('2018-02-22',5);" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_00575 ORDER BY id;" -echo "$("${CLICKHOUSE_CLIENT}" --query "ALTER TABLE test_00575 DROP COLUMN id;" --server_logs_file=/dev/null 2>&1 | grep -c "$exception_pattern")" +echo "$(${CLICKHOUSE_CLIENT} --query "ALTER TABLE test_00575 DROP COLUMN id;" --server_logs_file=/dev/null 2>&1 | grep -c "$exception_pattern")" ${CLICKHOUSE_CLIENT} --query "ALTER TABLE test_00575 DROP COLUMN id2;" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_00575 ORDER BY id;" ${CLICKHOUSE_CLIENT} --query "ALTER TABLE test_00575 DROP COLUMN id;" diff --git a/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh b/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh index ae4b1bab54f..f4e88b21b75 100755 --- a/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh +++ b/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh @@ -15,7 +15,7 @@ ${CLICKHOUSE_CLIENT} --query "INSERT INTO test_truncate.test_view_depend VALUES( ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_truncate.test_view;" ${CLICKHOUSE_CLIENT} --query "SELECT '========Execute Truncate========';" -echo "$("${CLICKHOUSE_CLIENT}" --query "TRUNCATE TABLE test_truncate.test_view;" --server_logs_file=/dev/null 2>&1 | grep -c "Code: 48.*Truncate is not supported by storage View")" +echo "$(${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE test_truncate.test_view;" --server_logs_file=/dev/null 2>&1 | grep -c "Code: 48.*Truncate is not supported by storage View")" ${CLICKHOUSE_CLIENT} --query "SELECT '========After Truncate========';" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_truncate.test_view;" diff --git a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh index c6f72dee9c0..c4f30d2590c 100755 --- a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh +++ b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh @@ -43,7 +43,7 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO unsigned_integer_test_table VALUES (0) ${CLICKHOUSE_CLIENT} --query="INSERT INTO enum_test_table VALUES ('hello'), ('world'), ('world'), ('yandex'), ('clickhouse'), ('clickhouse');" ${CLICKHOUSE_CLIENT} --query="INSERT INTO date_test_table VALUES (1), (2), (2), (256), (257), (257);" -CLICKHOUSE_CLIENT=$(echo "${CLICKHOUSE_CLIENT}" | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=debug/g') +CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=debug/g') ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM string_test_table WHERE toUInt64(val) == 0;" 2>&1 |grep -q "3 marks to read from 1 ranges" && echo "no monotonic int case: String -> UInt64" ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM fixed_string_test_table WHERE toUInt64(val) == 0;" 2>&1 |grep -q "3 marks to read from 1 ranges" && echo "no monotonic int case: FixedString -> UInt64" @@ -78,7 +78,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM date_test_table WHERE toUInt16 ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM date_test_table WHERE toInt8(val) == 1;" 2>&1 |grep -q "5 marks to read from" && echo "monotonic int case: Date -> Int8" ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM date_test_table WHERE toUInt8(val) == 1;" 2>&1 |grep -q "5 marks to read from" && echo "monotonic int case: Date -> UInt8" -CLICKHOUSE_CLIENT=$(echo "${CLICKHOUSE_CLIENT}" | sed 's/--send_logs_level=debug/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/g') +CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/--send_logs_level=debug/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/g') ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS string_test_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS fixed_string_test_table;" diff --git a/tests/queries/0_stateless/00687_top_and_offset.sh b/tests/queries/0_stateless/00687_top_and_offset.sh index e4bda0c4e72..fb7dc2b4d90 100755 --- a/tests/queries/0_stateless/00687_top_and_offset.sh +++ b/tests/queries/0_stateless/00687_top_and_offset.sh @@ -22,7 +22,7 @@ ${CLICKHOUSE_CLIENT} --query "SELECT TOP 2 * FROM test_00687 ORDER BY val;" ${CLICKHOUSE_CLIENT} --query "SELECT TOP (2) * FROM test_00687 ORDER BY val;" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_00687 ORDER BY val LIMIT 2 OFFSET 2;" -echo "$("${CLICKHOUSE_CLIENT}" --query "SELECT TOP 2 * FROM test_00687 ORDER BY val LIMIT 2;" 2>&1 | grep -c "Code: 406")" -echo "$("${CLICKHOUSE_CLIENT}" --query "SELECT * FROM test_00687 ORDER BY val LIMIT 2,3 OFFSET 2;" 2>&1 | grep -c "Code: 62")" +echo "$(${CLICKHOUSE_CLIENT} --query "SELECT TOP 2 * FROM test_00687 ORDER BY val LIMIT 2;" 2>&1 | grep -c "Code: 406")" +echo "$(${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_00687 ORDER BY val LIMIT 2,3 OFFSET 2;" 2>&1 | grep -c "Code: 62")" ${CLICKHOUSE_CLIENT} --query "DROP TABLE test_00687;" diff --git a/tests/queries/0_stateless/00909_kill_not_initialized_query.sh b/tests/queries/0_stateless/00909_kill_not_initialized_query.sh index bc09dc5d575..bc2a42bd708 100755 --- a/tests/queries/0_stateless/00909_kill_not_initialized_query.sh +++ b/tests/queries/0_stateless/00909_kill_not_initialized_query.sh @@ -34,7 +34,7 @@ $CLICKHOUSE_CLIENT -q "KILL QUERY WHERE query='$query_to_kill' ASYNC" &>/dev/nul sleep 1 # Kill $query_for_pending SYNC. This query is not blocker, so it should be killed fast. -timeout 20 "$CLICKHOUSE_CLIENT" -q "KILL QUERY WHERE query='$query_for_pending' SYNC" &>/dev/null +timeout 20 ${CLICKHOUSE_CLIENT} -q "KILL QUERY WHERE query='$query_for_pending' SYNC" &>/dev/null # Both queries have to be killed, doesn't matter with SYNC or ASYNC kill for _ in {1..15} diff --git a/tests/queries/0_stateless/00995_exception_while_insert.sh b/tests/queries/0_stateless/00995_exception_while_insert.sh index 60be93fccf2..0c318c727ea 100755 --- a/tests/queries/0_stateless/00995_exception_while_insert.sh +++ b/tests/queries/0_stateless/00995_exception_while_insert.sh @@ -3,7 +3,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -CLICKHOUSE_CLIENT=$(echo "${CLICKHOUSE_CLIENT}" | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=none/g') +CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=none/g') $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS check;" diff --git a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh index a3eab48ad99..078b49da940 100755 --- a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh +++ b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh @@ -18,7 +18,7 @@ ${CLICKHOUSE_CLIENT} -n -q " INSERT INTO $R1 VALUES (1) " -timeout 10s "${CLICKHOUSE_CLIENT}" -n -q " +timeout 10s ${CLICKHOUSE_CLIENT} -n -q " SET receive_timeout=1; SYSTEM SYNC REPLICA $R2 " 2>&1 | grep -F -q "Code: 159. DB::Exception" && echo 'OK' || echo 'Failed!' diff --git a/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh b/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh index edfb060bc19..0767aef9a92 100755 --- a/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh +++ b/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh @@ -3,7 +3,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -CLICKHOUSE_CLIENT=$(echo "${CLICKHOUSE_CLIENT}" | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=none/g') +CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=none/g') $CLICKHOUSE_CLIENT --query="SELECT * FROM (SELECT number % 5 AS a, count() AS b, c FROM numbers(10) ARRAY JOIN [1,2] AS c GROUP BY a,c) AS table ORDER BY a LIMIT 3 WITH TIES BY a" 2>&1 | grep -q "Code: 498." && echo 'OK' || echo 'FAIL' ||: diff --git a/tests/queries/0_stateless/01035_avg_weighted.sh b/tests/queries/0_stateless/01035_avg_weighted.sh index c30731fbbbe..023ec50db2f 100755 --- a/tests/queries/0_stateless/01035_avg_weighted.sh +++ b/tests/queries/0_stateless/01035_avg_weighted.sh @@ -7,5 +7,5 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="SELECT avgWeighted(x, weight) FROM (SELECT t.1 AS x, t.2 AS weight FROM (SELECT arrayJoin([(1, 5), (2, 4), (3, 3), (4, 2), (5, 1)]) AS t));" ${CLICKHOUSE_CLIENT} --query="SELECT avgWeighted(x, weight) FROM (SELECT t.1 AS x, t.2 AS weight FROM (SELECT arrayJoin([(1, 0), (2, 0), (3, 0), (4, 0), (5, 0)]) AS t));" -echo "$("${CLICKHOUSE_CLIENT}" --server_logs_file=/dev/null --query="SELECT avgWeighted(toDecimal64(0, 0), toFloat64(0))" 2>&1)" \ +echo "$(${CLICKHOUSE_CLIENT} --server_logs_file=/dev/null --query="SELECT avgWeighted(toDecimal64(0, 0), toFloat64(0))" 2>&1)" \ | grep -c 'Code: 43. DB::Exception: .* DB::Exception:.* Different types .* of arguments for aggregate function avgWeighted' diff --git a/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh index d9fcf388fef..f03f7511a4f 100755 --- a/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh +++ b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh @@ -29,7 +29,7 @@ $CLICKHOUSE_CLIENT --query "SELECT '12 -> ', dictGetInt64('dictdb.dict', 'y', to $CLICKHOUSE_CLIENT --query "INSERT INTO dictdb.table VALUES (13, 103, now())" $CLICKHOUSE_CLIENT --query "INSERT INTO dictdb.table VALUES (14, 104, now() - INTERVAL 1 DAY)" -while [ "$("$CLICKHOUSE_CLIENT" --query "SELECT dictGetInt64('dictdb.dict', 'y', toUInt64(13))")" = -1 ] +while [ "$(${CLICKHOUSE_CLIENT} --query "SELECT dictGetInt64('dictdb.dict', 'y', toUInt64(13))")" = -1 ] do sleep 0.5 done diff --git a/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh index 84634d7d871..f351b3be8ff 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh @@ -96,7 +96,7 @@ done echo "Equal number of columns" # This alter will finish all previous, but replica 1 maybe still not up-to-date -while [[ $(timeout 120 "$CLICKHOUSE_CLIENT" --query "ALTER TABLE concurrent_alter_add_drop_1 MODIFY COLUMN value0 String SETTINGS replication_alter_partitions_sync=2" 2>&1) ]]; do +while [[ $(timeout 120 ${CLICKHOUSE_CLIENT} --query "ALTER TABLE concurrent_alter_add_drop_1 MODIFY COLUMN value0 String SETTINGS replication_alter_partitions_sync=2" 2>&1) ]]; do sleep 1 done diff --git a/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh index 9c20a1c8fc7..d35f40137e1 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh @@ -90,7 +90,7 @@ for i in $(seq $REPLICAS); do done # This alter will finish all previous, but replica 1 maybe still not up-to-date -while [[ $(timeout 120 "$CLICKHOUSE_CLIENT" --query "ALTER TABLE concurrent_alter_detach_1 MODIFY COLUMN value1 String SETTINGS replication_alter_partitions_sync=2" 2>&1) ]]; do +while [[ $(timeout 120 ${CLICKHOUSE_CLIENT} --query "ALTER TABLE concurrent_alter_detach_1 MODIFY COLUMN value1 String SETTINGS replication_alter_partitions_sync=2" 2>&1) ]]; do sleep 1 # just try to attach table if it failed for some reason in the code above for i in $(seq $REPLICAS); do diff --git a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh index 88d6da6fefe..859425241eb 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh @@ -107,7 +107,7 @@ echo "Finishing alters" # # 120 seconds is more than enough, but in rare cases for slow builds (debug, # thread) it maybe necessary. -while [[ $(timeout 120 "$CLICKHOUSE_CLIENT" --query "ALTER TABLE concurrent_alter_mt_1 MODIFY COLUMN value1 String SETTINGS replication_alter_partitions_sync=2" 2>&1) ]]; do +while [[ $(timeout 120 ${CLICKHOUSE_CLIENT} --query "ALTER TABLE concurrent_alter_mt_1 MODIFY COLUMN value1 String SETTINGS replication_alter_partitions_sync=2" 2>&1) ]]; do sleep 1 done diff --git a/tests/queries/0_stateless/01085_max_distributed_connections.sh b/tests/queries/0_stateless/01085_max_distributed_connections.sh index 317f1c10cb7..c895b029bac 100755 --- a/tests/queries/0_stateless/01085_max_distributed_connections.sh +++ b/tests/queries/0_stateless/01085_max_distributed_connections.sh @@ -10,4 +10,4 @@ opts=( ) # 1.8 less then 2 seconds, but long enough to cover possible load peaks # "$@" left to pass manual options (like --experimental_use_processors 0) during manual testing -timeout 1.8s "$CLICKHOUSE_CLIENT" "${opts[@]}" "$@" +timeout 1.8s ${CLICKHOUSE_CLIENT} "${opts[@]}" "$@" diff --git a/tests/queries/0_stateless/01085_max_distributed_connections_http.sh b/tests/queries/0_stateless/01085_max_distributed_connections_http.sh index 4b15156aa0c..23d609cfea6 100755 --- a/tests/queries/0_stateless/01085_max_distributed_connections_http.sh +++ b/tests/queries/0_stateless/01085_max_distributed_connections_http.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) query="SELECT sleepEachRow(1) FROM remote('127.{2,3}', system.one)" # 1.8 less then 2 seconds, but long enough to cover possible load peaks -timeout 1.8s "${CLICKHOUSE_CURL}" -sS "${CLICKHOUSE_URL}&max_distributed_connections=2&max_threads=1" -d "$query" +timeout 1.8s ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&max_distributed_connections=2&max_threads=1" -d "$query" diff --git a/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh index 9bd69694816..30319e7cfea 100755 --- a/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh +++ b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Don't even try to do that. This test should be disabled for sanitizer builds. ${CLICKHOUSE_LOCAL} --query "SELECT max(value LIKE '%sanitize%') FROM system.build_options" | grep -q '1' && echo 'Skip test for sanitizer build' && exit -command=$(command -v "${CLICKHOUSE_LOCAL}") +command=$(command -v ${CLICKHOUSE_LOCAL}) function run_with_cpu() { diff --git a/tests/queries/0_stateless/01195_formats_diagnostic_info.sh b/tests/queries/0_stateless/01195_formats_diagnostic_info.sh index 453b7826e66..fce0b0ec974 100755 --- a/tests/queries/0_stateless/01195_formats_diagnostic_info.sh +++ b/tests/queries/0_stateless/01195_formats_diagnostic_info.sh @@ -3,7 +3,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -PARSER=("${CLICKHOUSE_LOCAL}" --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CSV) +PARSER=(${CLICKHOUSE_LOCAL} --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CSV) echo '2020-04-21 12:34:56, "Hello", 12345678' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo "CSV" echo '2020-04-21 12:34:56, "Hello", 123456789' | "${PARSER[@]}" 2>&1| grep "ERROR" echo '2020-04-21 12:34:567, "Hello", 123456789' | "${PARSER[@]}" 2>&1| grep "ERROR" @@ -12,7 +12,7 @@ echo '2020-04-21 12:34:56, "Hello", 12345678,1' | "${PARSER[@]}" 2>&1| grep "ERR echo '2020-04-21 12:34:56,,123Hello' | "${PARSER[@]}" 2>&1| grep "ERROR" echo -e '2020-04-21 12:34:56, "Hello", 12345678\n' | "${PARSER[@]}" 2>&1| grep "ERROR" -PARSER=("${CLICKHOUSE_LOCAL}" --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CustomSeparatedIgnoreSpaces --format_custom_escaping_rule CSV --format_custom_field_delimiter ',' --format_custom_row_after_delimiter "") +PARSER=(${CLICKHOUSE_LOCAL} --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CustomSeparatedIgnoreSpaces --format_custom_escaping_rule CSV --format_custom_field_delimiter ',' --format_custom_row_after_delimiter "") echo '2020-04-21 12:34:56, "Hello", 12345678' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo -e "\nCustomSeparatedIgnoreSpaces" echo '2020-04-21 12:34:56, "Hello", 123456789' | "${PARSER[@]}" 2>&1| grep "ERROR" echo '2020-04-21 12:34:567, "Hello", 123456789' | "${PARSER[@]}" 2>&1| grep "ERROR" @@ -20,7 +20,7 @@ echo '2020-04-21 12:34:56, "Hello", 12345678,1' | "${PARSER[@]}" 2>&1| grep "ERR echo '2020-04-21 12:34:56,,123Hello' | "${PARSER[@]}" 2>&1| grep "ERROR" echo -e '2020-04-21 12:34:56, "Hello", 12345678\n\n\n\n ' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo "OK" -PARSER=("${CLICKHOUSE_LOCAL}" --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format TSV) +PARSER=(${CLICKHOUSE_LOCAL} --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format TSV) echo -e '2020-04-21 12:34:56\tHello\t12345678' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo -e "\nTSV" echo -e '2020-04-21 12:34:56\tHello\t123456789' | "${PARSER[@]}" 2>&1| grep "ERROR" echo -e '2020-04-21 12:34:567\tHello\t123456789' | "${PARSER[@]}" 2>&1| grep "ERROR" @@ -28,7 +28,7 @@ echo -e '2020-04-21 12:34:56\tHello\t12345678\t1' | "${PARSER[@]}" 2>&1| grep "E echo -e '2020-04-21 12:34:56\t\t123Hello' | "${PARSER[@]}" 2>&1| grep "ERROR" echo -e '2020-04-21 12:34:56\tHello\t12345678\n' | "${PARSER[@]}" 2>&1| grep "ERROR" -PARSER=("${CLICKHOUSE_LOCAL}" --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CustomSeparated) +PARSER=(${CLICKHOUSE_LOCAL} --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CustomSeparated) echo -e '2020-04-21 12:34:56\tHello\t12345678' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo -e "\nCustomSeparated" echo -e '2020-04-21 12:34:56\tHello\t123456789' | "${PARSER[@]}" 2>&1| grep "ERROR" echo -e '2020-04-21 12:34:567\tHello\t123456789' | "${PARSER[@]}" 2>&1| grep "ERROR" diff --git a/tests/queries/0_stateless/01238_http_memory_tracking.sh b/tests/queries/0_stateless/01238_http_memory_tracking.sh index 357700d0e26..edace0c3530 100755 --- a/tests/queries/0_stateless/01238_http_memory_tracking.sh +++ b/tests/queries/0_stateless/01238_http_memory_tracking.sh @@ -13,7 +13,7 @@ ${CLICKHOUSE_CLIENT} --format Null -n <<<'SELECT sleepEachRow(1) FROM numbers(5) yes 'SELECT 1' 2>/dev/null | { head -n1000 } | { - xargs -i "${CLICKHOUSE_CURL}" -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&max_memory_usage_for_user=$((1<<30))" -d '{}' + xargs -i ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&max_memory_usage_for_user=$((1<<30))" -d '{}' } | grep -x -c 1 wait diff --git a/tests/queries/0_stateless/01249_flush_interactive.sh b/tests/queries/0_stateless/01249_flush_interactive.sh index 71b6116f2e5..01b91cbace8 100755 --- a/tests/queries/0_stateless/01249_flush_interactive.sh +++ b/tests/queries/0_stateless/01249_flush_interactive.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # unless the my-program will try to output a thousand more lines overflowing pipe buffer and terminating with Broken Pipe. # But if my program just output 5 (or slightly more) lines and hang up, the pipeline is not terminated. -timeout 1 "${CLICKHOUSE_LOCAL}" --max_execution_time 10 --query "SELECT DISTINCT number % 5 FROM system.numbers" ||: +timeout 1 ${CLICKHOUSE_LOCAL} --max_execution_time 10 --query "SELECT DISTINCT number % 5 FROM system.numbers" ||: echo '---' -timeout 1 "${CLICKHOUSE_CURL}" -sS --no-buffer "${CLICKHOUSE_URL}&max_execution_time=10" --data-binary "SELECT DISTINCT number % 5 FROM system.numbers" ||: +timeout 1 ${CLICKHOUSE_CURL} -sS --no-buffer "${CLICKHOUSE_URL}&max_execution_time=10" --data-binary "SELECT DISTINCT number % 5 FROM system.numbers" ||: echo '---'