Merge pull request #67964 from rschu1ze/multiquery-followup-new2

Remove obsolete `--multiquery` parameter (follow-up to #63898), pt. III
This commit is contained in:
Robert Schulze 2024-08-12 18:42:53 +00:00 committed by GitHub
commit d03b354550
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
73 changed files with 186 additions and 186 deletions

View File

@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# $2 - query
function execute_query()
{
${CLICKHOUSE_CLIENT} --opentelemetry_start_trace_probability=1 --query_id $1 -nq "
${CLICKHOUSE_CLIENT} --opentelemetry_start_trace_probability=1 --query_id $1 -q "
${2}
"
}
@ -18,7 +18,7 @@ function execute_query()
# so we only to check the db.statement only
function check_query_span_query_only()
{
${CLICKHOUSE_CLIENT} -nq "
${CLICKHOUSE_CLIENT} -q "
SYSTEM FLUSH LOGS;
SELECT attribute['db.statement'] as query
FROM system.opentelemetry_span_log
@ -31,7 +31,7 @@ ${CLICKHOUSE_CLIENT} -nq "
function check_query_span()
{
${CLICKHOUSE_CLIENT} -nq "
${CLICKHOUSE_CLIENT} -q "
SYSTEM FLUSH LOGS;
SELECT attribute['db.statement'] as query,
attribute['clickhouse.read_rows'] as read_rows,
@ -47,7 +47,7 @@ ${CLICKHOUSE_CLIENT} -nq "
#
# Set up
#
${CLICKHOUSE_CLIENT} -nq "
${CLICKHOUSE_CLIENT} -q "
DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.opentelemetry_test;
CREATE TABLE ${CLICKHOUSE_DATABASE}.opentelemetry_test (id UInt64) Engine=MergeTree Order By id;
"

View File

@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
drop table if exists insert_select_progress_tcp;
create table insert_select_progress_tcp(s UInt16) engine = MergeTree order by s;
"

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS test_table;
CREATE TABLE test_table
(
@ -74,7 +74,7 @@ $CLICKHOUSE_CLIENT -q "SELECT 1 AS constant_value, arrayMap(lambda_argument -> l
$CLICKHOUSE_CLIENT -q "WITH 1 AS constant_value SELECT (SELECT constant_valu) SETTINGS enable_analyzer = 1;" 2>&1 \
| grep "Maybe you meant: \['constant_value'\]" &>/dev/null;
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS test_table_compound;
CREATE TABLE test_table_compound
(
@ -142,7 +142,7 @@ $CLICKHOUSE_CLIENT -q "SELECT cast(tuple(1), 'Tuple(value_1 String)') AS constan
$CLICKHOUSE_CLIENT -q "WITH cast(tuple(1), 'Tuple(value_1 String)') AS constant_value SELECT (SELECT constant_value.value_) SETTINGS enable_analyzer = 1;" 2>&1 \
| grep "Maybe you meant: \['constant_value.value_1'\]" &>/dev/null;
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS test_table_1;
CREATE TABLE test_table_1
(
@ -185,7 +185,7 @@ $CLICKHOUSE_CLIENT -q "SELECT ((1))::Tuple(a Tuple(b UInt32)) AS t, t.a.c SETTIN
$CLICKHOUSE_CLIENT -q "SELECT 1";
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE test_table;
DROP TABLE test_table_compound;
DROP TABLE test_table_1;

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
database=$($CLICKHOUSE_CLIENT -q 'SELECT currentDatabase()')
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS test_02480_table;
DROP VIEW IF EXISTS test_02480_view;
CREATE TABLE test_02480_table (id Int64) ENGINE=MergeTree ORDER BY id;

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n --query "
$CLICKHOUSE_CLIENT --query "
DROP TABLE IF EXISTS load_parts_refcounts SYNC;
CREATE TABLE load_parts_refcounts (id UInt32)

View File

@ -26,15 +26,15 @@ FROM
ORDER BY number DESC
)
ORDER BY number ASC"
$CLICKHOUSE_CLIENT -nq "$DISABLE_OPTIMIZATION;EXPLAIN $query"
$CLICKHOUSE_CLIENT -q "$DISABLE_OPTIMIZATION;EXPLAIN $query"
function run_query {
echo "-- query"
echo "$1"
echo "-- explain"
$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;EXPLAIN $1"
$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;EXPLAIN $1"
echo "-- execute"
$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;$1"
$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;$1"
}
echo "-- Enabled query_plan_remove_redundant_sorting"

View File

@ -13,6 +13,6 @@ $CLICKHOUSE_LOCAL --storage_file_read_method=mmap --print-profile-events -q "SEL
$CLICKHOUSE_LOCAL --storage_file_read_method=pread --print-profile-events -q "SELECT * FROM file($DATA_FILE) FORMAT Null" 2>&1 | grep -F -q "CreatedReadBufferMMap" && echo 'Fail' || echo 0
$CLICKHOUSE_LOCAL --storage_file_read_method=pread --print-profile-events -q "SELECT * FROM file($DATA_FILE) FORMAT Null" 2>&1 | grep -F -q "CreatedReadBufferOrdinary" && echo 1 || echo 'Fail'
$CLICKHOUSE_CLIENT --storage_file_read_method=mmap -nq "SELECT * FROM file('/dev/null', 'LineAsString') FORMAT Null -- { serverError BAD_ARGUMENTS }"
$CLICKHOUSE_CLIENT --storage_file_read_method=mmap -q "SELECT * FROM file('/dev/null', 'LineAsString') FORMAT Null -- { serverError BAD_ARGUMENTS }"
rm $DATA_FILE

View File

@ -24,15 +24,15 @@ FROM
)
)"
$CLICKHOUSE_CLIENT -nq "$DISABLE_OPTIMIZATION;EXPLAIN $query"
$CLICKHOUSE_CLIENT -q "$DISABLE_OPTIMIZATION;EXPLAIN $query"
function run_query {
echo "-- query"
echo "$1"
echo "-- explain"
$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;EXPLAIN $1"
$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;EXPLAIN $1"
echo "-- execute"
$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;$1"
$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;$1"
}
echo "-- Enabled $OPTIMIZATION_SETTING"

View File

@ -14,8 +14,8 @@ ${CLICKHOUSE_CLIENT} -q "CREATE USER user_${CLICKHOUSE_DATABASE} settings databa
${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO user_${CLICKHOUSE_DATABASE}"
${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}"
${CLICKHOUSE_CLIENT} -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')"
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_ok (x UInt32) engine = ReplicatedMergeTree order by x;"
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 80 }"
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_ok (x UInt32) engine = ReplicatedMergeTree order by x;"
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 80 }"
${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db"
${CLICKHOUSE_CLIENT} -q "DROP USER user_${CLICKHOUSE_DATABASE}"

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
drop table if exists data;
create table data (key Int) engine=MergeTree order by tuple() settings min_bytes_for_wide_part = '1G', compress_marks = 1;
insert into data values (1);

View File

@ -46,7 +46,7 @@ tables["wrong_metadata_compact"]="min_bytes_for_wide_part = 10000000"
for table in "${!tables[@]}"; do
settings="${tables[$table]}"
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
DROP TABLE IF EXISTS $table;
CREATE TABLE $table(
@ -69,7 +69,7 @@ for table in "${!tables[@]}"; do
wait_column "$table" "\`a1\` UInt64" || exit 2
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
-- { echoOn }
SELECT 'ECHO_ALIGNMENT_FIX' FORMAT Null;
@ -82,7 +82,7 @@ for table in "${!tables[@]}"; do
wait_mutation_loaded "$table" "b1 TO a" || exit 2
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
-- { echoOn }
SELECT 'ECHO_ALIGNMENT_FIX' FORMAT Null;
@ -94,7 +94,7 @@ for table in "${!tables[@]}"; do
wait_for_all_mutations "$table"
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
-- { echoOn }
SELECT 'ECHO_ALIGNMENT_FIX' FORMAT Null;

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "drop table if exists buffer_02572;
${CLICKHOUSE_CLIENT} --ignore-error --query "drop table if exists buffer_02572;
drop table if exists data_02572; drop table if exists copy_02572; drop table if exists mv_02572;"
${CLICKHOUSE_CLIENT} --query="create table copy_02572 (key Int) engine=Memory();"
@ -21,7 +21,7 @@ ${CLICKHOUSE_CLIENT} --query="insert into buffer_02572 values (1);"
if [ $(( $(date +%s) - start )) -gt 6 ]; then # clickhouse test cluster is overloaded, will skip
# ensure that the flush was not direct
${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;"
${CLICKHOUSE_CLIENT} --ignore-error --query "select * from data_02572; select * from copy_02572;"
fi
# we cannot use OPTIMIZE, this will attach query context, so let's wait
@ -31,7 +31,7 @@ for _ in {1..100}; do
done
${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;"
${CLICKHOUSE_CLIENT} --ignore-error --query "select * from data_02572; select * from copy_02572;"
${CLICKHOUSE_CLIENT} --query="system flush logs;"
${CLICKHOUSE_CLIENT} --query="select count() > 0, lower(status::String), errorCodeToName(exception_code)

View File

@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
drop table if exists data;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9;
"
@ -26,7 +26,7 @@ read_methods=(
for read_method in "${read_methods[@]}"; do
query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "select * from data format Null settings max_local_read_bandwidth='1M', local_filesystem_read_method='$read_method'"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS;
SELECT
'$read_method',

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
drop table if exists data;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9;
"
@ -13,7 +13,7 @@ $CLICKHOUSE_CLIENT -nm -q "
query_id=$(random_str 10)
# writes 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "insert into data select * from numbers(1e6) settings max_local_write_bandwidth='1M'"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS;
SELECT
query_duration_ms >= 7e3,

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
drop table if exists data;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9;
"
@ -15,7 +15,7 @@ $CLICKHOUSE_CLIENT -q "insert into data select * from numbers(1e6)"
query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to Disk('backups', '$CLICKHOUSE_DATABASE/data/backup1')" --max_backup_bandwidth=1M > /dev/null
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS;
SELECT
query_duration_ms >= 7e3,

View File

@ -23,7 +23,7 @@ function wait_for_alter()
done
}
${CLICKHOUSE_CLIENT} -n --query "
${CLICKHOUSE_CLIENT} --query "
DROP TABLE IF EXISTS t_limit_mutations SYNC;
CREATE TABLE t_limit_mutations (id UInt64, v UInt64)
@ -48,14 +48,14 @@ SELECT count() FROM system.mutations WHERE database = currentDatabase() AND tabl
SHOW CREATE TABLE t_limit_mutations;
"
${CLICKHOUSE_CLIENT} -n --query "
${CLICKHOUSE_CLIENT} --query "
ALTER TABLE t_limit_mutations UPDATE v = 6 WHERE 1 SETTINGS number_of_mutations_to_throw = 100;
ALTER TABLE t_limit_mutations MODIFY COLUMN v String SETTINGS number_of_mutations_to_throw = 100, alter_sync = 0;
"
wait_for_alter "String"
${CLICKHOUSE_CLIENT} -n --query "
${CLICKHOUSE_CLIENT} --query "
SELECT * FROM t_limit_mutations ORDER BY id;
SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_limit_mutations' AND NOT is_done;
SHOW CREATE TABLE t_limit_mutations;
@ -65,7 +65,7 @@ ${CLICKHOUSE_CLIENT} --query "SYSTEM START MERGES t_limit_mutations"
wait_for_mutation "t_limit_mutations" "0000000003"
${CLICKHOUSE_CLIENT} -n --query "
${CLICKHOUSE_CLIENT} --query "
SELECT * FROM t_limit_mutations ORDER BY id;
SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_limit_mutations' AND NOT is_done;
SHOW CREATE TABLE t_limit_mutations;

View File

@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -n --query "
${CLICKHOUSE_CLIENT} --query "
DROP TABLE IF EXISTS t_mt_async_insert;
DROP TABLE IF EXISTS t_mt_sync_insert;
@ -19,7 +19,7 @@ url="${CLICKHOUSE_URL}&async_insert=0&wait_for_async_insert=1"
${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO t_mt_async_insert VALUES (1, 'aa'), (2, 'bb')"
${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO t_mt_sync_insert VALUES (1, 'aa'), (2, 'bb')"
${CLICKHOUSE_CLIENT} -n --query "
${CLICKHOUSE_CLIENT} --query "
SELECT count() FROM t_mt_async_insert;
SELECT count() FROM t_mt_sync_insert;

View File

@ -7,12 +7,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
echo "create table test (x UInt64) engine=Memory;
insert into test from infile 'data'; -- {clientError BAD_ARGUMENTS}" | $CLICKHOUSE_LOCAL -nm
insert into test from infile 'data'; -- {clientError BAD_ARGUMENTS}" | $CLICKHOUSE_LOCAL -m
echo "create table test (x UInt64) engine=Memory;
insert into test from infile 'data';" | $CLICKHOUSE_LOCAL -nm --ignore-error
insert into test from infile 'data';" | $CLICKHOUSE_LOCAL -m --ignore-error
echo "create table test (x UInt64) engine=Memory;
insert into test from infile 'data'; -- {clientError BAD_ARGUMENTS}
select 1" | $CLICKHOUSE_LOCAL -nm
select 1" | $CLICKHOUSE_LOCAL -m

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm -q """
${CLICKHOUSE_CLIENT} -m -q """
DROP TABLE IF EXISTS with_lonely;
CREATE TABLE with_lonely
@ -23,7 +23,7 @@ ORDER BY (id);
"""
create_optimize_partition() {
${CLICKHOUSE_CLIENT} -nm -q """
${CLICKHOUSE_CLIENT} -m -q """
INSERT INTO with_lonely SELECT number, '$1', number*10, 0 FROM numbers(10);
INSERT INTO with_lonely SELECT number+500000, '$1', number*10, 1 FROM numbers(10);
"""
@ -39,7 +39,7 @@ create_optimize_partition "2022-10-29"
create_optimize_partition "2022-10-30"
create_optimize_partition "2022-10-31"
${CLICKHOUSE_CLIENT} -nm -q """
${CLICKHOUSE_CLIENT} -m -q """
SYSTEM STOP MERGES with_lonely;
INSERT INTO with_lonely SELECT number, '2022-11-01', number*10, 0 FROM numbers(10);

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# head by default print 10 rows, but it is not enough to query 11 rows, since
# we need to overflow the default pipe size, hence just 1 million of rows (it
# should be around 6 MiB in text representation, should be definitelly enough).
$CLICKHOUSE_CLIENT --ignore-error -nm --pager head -q "
$CLICKHOUSE_CLIENT --ignore-error -m --pager head -q "
select * from numbers(1e6); -- { clientError CANNOT_WRITE_TO_FILE_DESCRIPTOR }
select * from numbers(1e6); -- { clientError CANNOT_WRITE_TO_FILE_DESCRIPTOR }
"

View File

@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT -q "select 1; select 2;"
$CLICKHOUSE_LOCAL -q "select 1; select 2;"
# -n is a no-op
$CLICKHOUSE_CLIENT -n -q "select 1; select 2;"
$CLICKHOUSE_LOCAL -n -q "select 1; select 2;"
$CLICKHOUSE_CLIENT -q "select 1; select 2;"
$CLICKHOUSE_LOCAL -q "select 1; select 2;"
exit 0

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nq "
$CLICKHOUSE_CLIENT -q "
CREATE TABLE t1
(
a UInt32,
@ -57,7 +57,7 @@ ORDER BY
b DESC
FORMAT Null;"
$CLICKHOUSE_CLIENT -nq "
$CLICKHOUSE_CLIENT -q "
SYSTEM FLUSH LOGS;
SELECT ProfileEvents['SelectedMarks']

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_LOCAL -q "select 42 as x format Native" > $CLICKHOUSE_TEST_UNIQUE_NAME.native
$CLICKHOUSE_LOCAL -n -q "
$CLICKHOUSE_LOCAL -q "
create table test (x UInt64, y UInt64) engine=Memory;
insert into test (x) select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.native');
insert into test (y) select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.native');

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_LOCAL -q "select 1 as x format Native" > $CLICKHOUSE_TEST_UNIQUE_NAME.native
$CLICKHOUSE_LOCAL -n -q "
$CLICKHOUSE_LOCAL -q "
create table test (x UInt64, y UInt64 default 42) engine=Memory;
insert into test select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.native');
select * from test;

View File

@ -8,7 +8,7 @@ u1="${CLICKHOUSE_TEST_UNIQUE_NAME}_collection1"
u2="${CLICKHOUSE_TEST_UNIQUE_NAME}_collection2"
u3="${CLICKHOUSE_TEST_UNIQUE_NAME}_collection3"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
DROP NAMED COLLECTION IF EXISTS $u1;
DROP NAMED COLLECTION IF EXISTS $u2;

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS t_async_insert_native_1;
CREATE TABLE t_async_insert_native_1 (id UInt64, s String) ENGINE = MergeTree ORDER BY id;
"
@ -22,7 +22,7 @@ echo '{"id": 1, "s": "aaa"}' \
| $CLICKHOUSE_CLIENT $async_insert_options -q 'INSERT INTO t_async_insert_native_1 FORMAT JSONEachRow {"id": 2, "s": "bbb"}' 2>&1 \
| grep -o "NOT_IMPLEMENTED"
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
SELECT sum(length(entries.bytes)) FROM system.asynchronous_inserts
WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_async_insert_native_1';

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS t_async_insert_native_2;
CREATE TABLE t_async_insert_native_2 (id UInt64, s String) ENGINE = MergeTree ORDER BY id;
"
@ -18,7 +18,7 @@ echo "(3, 'ccc') (4, 'ddd') (5, 'eee')" | $CLICKHOUSE_CLIENT $async_insert_optio
wait
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
SELECT * FROM t_async_insert_native_2 ORDER BY id;
SYSTEM FLUSH LOGS;

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS t_async_insert_native_3;
CREATE TABLE t_async_insert_native_3 (id UInt64, s String) ENGINE = MergeTree ORDER BY id;
"
@ -21,7 +21,7 @@ $CLICKHOUSE_CLIENT $async_insert_options -q "INSERT INTO t_async_insert_native_3
wait
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
SELECT format, length(entries.bytes) FROM system.asynchronous_inserts
WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_async_insert_native_3'
ORDER BY format;

View File

@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS t_async_insert_native_4;
CREATE TABLE t_async_insert_native_4 (id UInt64) ENGINE = MergeTree ORDER BY id;
"
@ -20,7 +20,7 @@ echo "(2) (3) (4) (5)" | $CLICKHOUSE_CLIENT_WITH_LOG $async_insert_options --asy
-q 'INSERT INTO t_async_insert_native_4 FORMAT Values' 2>&1 \
| grep -c "too much data"
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
SELECT * FROM t_async_insert_native_4 ORDER BY id;
SYSTEM FLUSH LOGS;

View File

@ -9,7 +9,7 @@ $CLICKHOUSE_LOCAL -q "select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl', a
$CLICKHOUSE_LOCAL -q "select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl', auto, 'x UInt64 Alias y, y UInt64')" 2>&1 | grep -c "BAD_ARGUMENTS"
$CLICKHOUSE_LOCAL -q "select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl', auto, 'x UInt64 Materialized 42, y UInt64')" 2>&1 | grep -c "BAD_ARGUMENTS"
$CLICKHOUSE_LOCAL -n -q "
$CLICKHOUSE_LOCAL -q "
create table test (x UInt64 Ephemeral, y UInt64 default x + 1) engine=Memory;
insert into test (x, y) select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl');
select * from test;

View File

@ -9,7 +9,7 @@ mkdir -p ${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME}/
rm -rf ${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME:?}/*
chmod 777 ${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME}/
${CLICKHOUSE_CLIENT} -n -q --ignore-error "
${CLICKHOUSE_CLIENT} -q --ignore-error "
DROP DATABASE IF EXISTS npy_output_02895;
CREATE DATABASE IF NOT EXISTS npy_output_02895;

View File

@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS data;
DROP TABLE IF EXISTS data2;
DROP VIEW IF EXISTS mv1;

View File

@ -5,13 +5,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
# setting disabled and no order by or primary key; expect error
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
DROP TABLE IF EXISTS test_empty_order_by;
CREATE TABLE test_empty_order_by(a UInt8) ENGINE = MergeTree() SETTINGS index_granularity = 8192;
" 2>&1 \ | grep -F -q "You must provide an ORDER BY or PRIMARY KEY expression in the table definition." && echo 'OK' || echo 'FAIL'
# setting disabled and primary key in table definition
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
DROP TABLE IF EXISTS test_empty_order_by;
CREATE TABLE test_empty_order_by(a UInt8) ENGINE = MergeTree() PRIMARY KEY a SETTINGS index_granularity = 8192;
SHOW CREATE TABLE test_empty_order_by;

View File

@ -15,7 +15,7 @@ if [[ $($CLICKHOUSE_CLIENT -q "select count()>0 from system.clusters where clust
cluster=test_cluster_database_replicated
fi
$CLICKHOUSE_CLIENT -nm --distributed_ddl_output_mode=none -q "
$CLICKHOUSE_CLIENT -m --distributed_ddl_output_mode=none -q "
drop table if exists rmt1;
drop table if exists rmt2;
@ -46,7 +46,7 @@ part_name='%'
# wait while there be at least one 'No active replica has part all_0_1_1 or covering part' in logs
for _ in {0..50}; do
no_active_repilica_messages=$($CLICKHOUSE_CLIENT -nm -q "
no_active_repilica_messages=$($CLICKHOUSE_CLIENT -m -q "
system flush logs;
select count()
@ -65,7 +65,7 @@ for _ in {0..50}; do
sleep 1
done
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
system start pulling replication log rmt2;
system flush logs;

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
# setting enabled and no order by or primary key
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SET create_table_empty_primary_key_by_default = true;
DROP TABLE IF EXISTS test_empty_order_by;
CREATE TABLE test_empty_order_by(a UInt8) ENGINE = MergeTree() SETTINGS index_granularity = 8192;
@ -13,7 +13,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
" 2>&1 \ | grep -F -q "ORDER BY tuple()" && echo 'OK' || echo 'FAIL'
# setting enabled and per-column primary key
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SET create_table_empty_primary_key_by_default = true;
DROP TABLE IF EXISTS test_empty_order_by;
CREATE TABLE test_empty_order_by(a UInt8 PRIMARY KEY, b String PRIMARY KEY) ENGINE = MergeTree() SETTINGS index_granularity = 8192;
@ -21,7 +21,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
" 2>&1 \ | grep -F -q "ORDER BY (a, b)" && echo 'OK' || echo 'FAIL'
# setting enabled and primary key in table definition (not per-column or order by)
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SET create_table_empty_primary_key_by_default = true;
DROP TABLE IF EXISTS test_empty_order_by;
CREATE TABLE test_empty_order_by(a UInt8, b String) ENGINE = MergeTree() PRIMARY KEY (a) SETTINGS index_granularity = 8192;
@ -29,7 +29,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
" 2>&1 \ | grep -F -q "ORDER BY a" && echo 'OK' || echo 'FAIL'
# setting enabled and order by in table definition (no primary key)
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SET create_table_empty_primary_key_by_default = true;
DROP TABLE IF EXISTS test_empty_order_by;
CREATE TABLE test_empty_order_by(a UInt8, b String) ENGINE = MergeTree() ORDER BY (a, b) SETTINGS index_granularity = 8192;

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table if exists src;
create table src (a Int32) engine = MergeTree() order by tuple();
@ -15,14 +15,14 @@ create materialized view mv (a Int32) engine = MergeTree() order by tuple() as s
uuid=$(${CLICKHOUSE_CLIENT} --query "select uuid from system.tables where table='mv' and database == currentDatabase()")
inner_table=".inner_id.${uuid}"
${CLICKHOUSE_CLIENT} -nm --query "drop table \`$inner_table\` sync"
${CLICKHOUSE_CLIENT} -m --query "drop table \`$inner_table\` sync"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
set send_logs_level = 'error';
backup table ${CLICKHOUSE_DATABASE}.\`mv\` to Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}');
" | grep -o "BACKUP_CREATED"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table mv;
restore table ${CLICKHOUSE_DATABASE}.\`mv\` from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}');
" | grep -o "RESTORED"

View File

@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table if exists src;
create table src (a Int32) engine = MergeTree() order by tuple();
@ -15,18 +15,18 @@ drop table if exists mv;
create materialized view mv to dst (a Int32) as select * from src;
"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table src;
backup database ${CLICKHOUSE_DATABASE} on cluster test_shard_localhost to Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}');
" | grep -o "BACKUP_CREATED"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table mv;
set allow_deprecated_database_ordinary=1;
restore table ${CLICKHOUSE_DATABASE}.mv on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}');
" | grep -o "RESTORED"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table if exists src;
create table src (a Int32) engine = MergeTree() order by tuple();
@ -37,13 +37,13 @@ drop table if exists mv;
create materialized view mv to dst (a Int32) as select * from src;
"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table src;
drop table dst;
backup database ${CLICKHOUSE_DATABASE} on cluster test_shard_localhost to Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}2');
" | grep -o "BACKUP_CREATED"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table mv;
set allow_deprecated_database_ordinary=1;
restore table ${CLICKHOUSE_DATABASE}.mv on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}2');

View File

@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table if exists test;
set data_type_default_nullable = 0;
create table test (test String) ENGINE = MergeTree() ORDER BY tuple();
@ -13,7 +13,7 @@ backup table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost to Disk
${CLICKHOUSE_CLIENT} --query "show create table test"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table test sync;
set data_type_default_nullable = 1;
restore table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}');

View File

@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table if exists test;
set flatten_nested = 0;
create table test (test Array(Tuple(foo String, bar Float64))) ENGINE = MergeTree() ORDER BY tuple();
@ -13,7 +13,7 @@ backup table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost to Disk
${CLICKHOUSE_CLIENT} --query "show create table test"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table if exists test2;
set flatten_nested = 0;
create table test2 (test Nested(foo String, bar Float64)) ENGINE = MergeTree() ORDER BY tuple();
@ -22,7 +22,7 @@ backup table ${CLICKHOUSE_DATABASE}.test2 on cluster test_shard_localhost to Dis
${CLICKHOUSE_CLIENT} --query "show create table test2"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table test sync;
set flatten_nested = 1;
restore table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}');
@ -30,7 +30,7 @@ restore table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost from D
${CLICKHOUSE_CLIENT} --query "show create table test"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table test2 sync;
set flatten_nested = 1;
restore table ${CLICKHOUSE_DATABASE}.test2 on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}2');

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -n -q "
${CLICKHOUSE_CLIENT} -q "
DROP DICTIONARY IF EXISTS 02907_dictionary;
DROP TABLE IF EXISTS 02907_table;

View File

@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
drop table if exists test;
create table test (a Int32) engine = MergeTree() order by tuple();
"
@ -12,10 +12,10 @@ create table test (a Int32) engine = MergeTree() order by tuple();
backup_id=${CLICKHOUSE_TEST_UNIQUE_NAME}
backup_name="Disk('backups', '$backup_id')";
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
backup table ${CLICKHOUSE_DATABASE}.test to $backup_name;
" | grep -o "BACKUP_CREATED"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
select ProfileEvents['BackupEntriesCollectorMicroseconds'] > 10 from system.backups where name='Disk(\'backups\', \'$backup_id\')'
"

View File

@ -7,13 +7,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy') settings optimize_count_from_files=0"
$CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy') settings optimize_count_from_files=1"
$CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy', auto, 'array Int64') settings optimize_count_from_files=1"
$CLICKHOUSE_LOCAL -nm -q "
$CLICKHOUSE_LOCAL -m -q "
desc file('$CURDIR/data_npy/one_dim.npy');
select number_of_rows from system.schema_inference_cache where format='Npy';
"
$CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/npy_big.npy') settings optimize_count_from_files=0"
$CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/npy_big.npy') settings optimize_count_from_files=1"
$CLICKHOUSE_LOCAL -nm -q "
$CLICKHOUSE_LOCAL -m -q "
desc file('$CURDIR/data_npy/npy_big.npy');
select number_of_rows from system.schema_inference_cache where format='Npy';
"

View File

@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS 02908_dependent;
DROP TABLE IF EXISTS 02908_main;
@ -14,11 +14,11 @@ $CLICKHOUSE_CLIENT -nm -q "
CREATE TABLE 02908_dependent (a UInt32, ts DateTime) ENGINE = MergeTree ORDER BY a TTL ts + 1 WHERE a IN (SELECT a FROM ${CLICKHOUSE_DATABASE}.02908_main);
"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
DROP TABLE 02908_main;
" 2>&1 | grep -F -q "HAVE_DEPENDENT_OBJECTS"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
DROP TABLE 02908_dependent;
DROP TABLE 02908_main;
"

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
echo '{"x" : 42}' > $CLICKHOUSE_TEST_UNIQUE_NAME.json
$CLICKHOUSE_LOCAL -nm -q "
$CLICKHOUSE_LOCAL -m -q "
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.json') SETTINGS schema_inference_make_columns_nullable=1;
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.json') SETTINGS schema_inference_make_columns_nullable=0;
SELECT count() from system.schema_inference_cache where format = 'JSON' and additional_format_info like '%schema_inference_make_columns_nullable%';"

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
CREATE TABLE IF NOT EXISTS ts_data_double_raw
(
device_id UInt32 NOT NULL CODEC(ZSTD),

View File

@ -13,40 +13,40 @@ b_backup="Disk('backups', '$b_backup_id')"
c_backup_id=${CLICKHOUSE_TEST_UNIQUE_NAME}_c
c_backup="Disk('backups', '$c_backup_id')"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE IF EXISTS tbl1;
DROP TABLE IF EXISTS tbl2;
DROP TABLE IF EXISTS tbl3;
"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
CREATE TABLE tbl1 (a Int32) ENGINE = MergeTree() ORDER BY tuple();
"
# The following BACKUP command must write backup 'a'.
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
BACKUP DATABASE ${CLICKHOUSE_DATABASE} TO $a_backup SETTINGS id='$a_backup_id';
" | grep -o "BACKUP_CREATED"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
CREATE TABLE tbl2 (a Int32) ENGINE = MergeTree() ORDER BY tuple();
"
# The following BACKUP command must read backup 'a' and write backup 'b'.
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
BACKUP DATABASE ${CLICKHOUSE_DATABASE} TO $b_backup SETTINGS id='$b_backup_id', base_backup=$a_backup;
" | grep -o "BACKUP_CREATED"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
CREATE TABLE tbl3 (a Int32) ENGINE = MergeTree() ORDER BY tuple();
"
# The following BACKUP command must read only backup 'b' (and not 'a') and write backup 'c'.
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
BACKUP DATABASE ${CLICKHOUSE_DATABASE} TO $c_backup SETTINGS id='$c_backup_id', base_backup=$b_backup;
" | grep -o "BACKUP_CREATED"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE tbl1;
DROP TABLE tbl2;
DROP TABLE tbl3;
@ -57,28 +57,28 @@ r2_restore_id=${CLICKHOUSE_TEST_UNIQUE_NAME}_r2
r3_restore_id=${CLICKHOUSE_TEST_UNIQUE_NAME}_r3
# The following RESTORE command must read all 3 backups 'a', 'b', c' because the table 'tbl1' was in the first backup.
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
RESTORE TABLE ${CLICKHOUSE_DATABASE}.tbl1 FROM $c_backup SETTINGS id='$r1_restore_id';
" | grep -o "RESTORED"
# The following RESTORE command must read only 2 backups 'b', c' (and not 'a') because the table 'tbl2' was in the second backup.
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
RESTORE TABLE ${CLICKHOUSE_DATABASE}.tbl2 FROM $c_backup SETTINGS id='$r2_restore_id';
" | grep -o "RESTORED"
# The following RESTORE command must read only 1 backup 'c' (and not 'a' or 'b') because the table 'tbl3' was in the third backup.
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
RESTORE TABLE ${CLICKHOUSE_DATABASE}.tbl3 FROM $c_backup SETTINGS id='$r3_restore_id';
" | grep -o "RESTORED"
all_ids="['$a_backup_id', '$b_backup_id', '$c_backup_id', '$r1_restore_id', '$r2_restore_id', '$r3_restore_id']"
id_prefix_len=`expr "${CLICKHOUSE_TEST_UNIQUE_NAME}_" : '.*'`
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
SELECT substr(id, 1 + $id_prefix_len) as short_id, ProfileEvents['BackupsOpenedForRead'], ProfileEvents['BackupsOpenedForWrite'] FROM system.backups WHERE id IN ${all_ids} ORDER BY short_id
"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE tbl1;
DROP TABLE tbl2;
DROP TABLE tbl3;

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
username="user_${CLICKHOUSE_TEST_UNIQUE_NAME}"
dictname="dict_${CLICKHOUSE_TEST_UNIQUE_NAME}"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
CREATE DICTIONARY IF NOT EXISTS ${dictname}
(
id UInt64,
@ -23,15 +23,15 @@ ${CLICKHOUSE_CLIENT} -nm --query "
SELECT dictGet(${dictname}, 'value', 1);
"
$CLICKHOUSE_CLIENT -nm --user="${username}" --query "
$CLICKHOUSE_CLIENT -m --user="${username}" --query "
SELECT * FROM dictionary(${dictname});
" 2>&1 | grep -o ACCESS_DENIED | uniq
$CLICKHOUSE_CLIENT -nm --user="${username}" --query "
$CLICKHOUSE_CLIENT -m --user="${username}" --query "
SELECT dictGet(${dictname}, 'value', 1);
" 2>&1 | grep -o ACCESS_DENIED | uniq
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
DROP DICTIONARY IF EXISTS ${dictname};
DROP USER IF EXISTS ${username};
"

View File

@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# We test the dependency on the DROP
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS Sub_distributed;
DROP TABLE IF EXISTS Sub;
DROP TABLE IF EXISTS Mapping;
@ -20,7 +20,7 @@ $CLICKHOUSE_CLIENT -q "
DROP TABLE Mapping;
" 2>&1 | grep -cm1 "HAVE_DEPENDENT_OBJECTS"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
DROP TABLE Sub_distributed;
DROP TABLE Sub;
DROP TABLE Mapping;

View File

@ -14,7 +14,7 @@ echo -n 'select 4242' >> "$file2"
$CLICKHOUSE_CLIENT --queries-file "$file1" "$file2" <<<'select 42'
$CLICKHOUSE_CLIENT --log_comment foo --queries-file /dev/stdin <<<'select 424242'
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
system flush logs;
select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 42' and type != 'QueryStart';
select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 4242' and type != 'QueryStart';

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
echo "1,2" > $CLICKHOUSE_TEST_UNIQUE_NAME.csv
$CLICKHOUSE_LOCAL -nm -q "
$CLICKHOUSE_LOCAL -m -q "
create table test (x UInt64, y UInt32, size UInt64) engine=Memory;
insert into test select c1, c2, _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') settings use_structure_from_insertion_table_in_table_functions=1;
select * from test;

View File

@ -9,12 +9,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# no message at all
echo "thread = 0"
$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_id = 0" |& grep -F -o 'Send signal to'
$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -m -q "select * from system.stack_trace where thread_id = 0" |& grep -F -o 'Send signal to'
# send messages to some threads
echo "thread != 0"
$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_id != 0 format Null" |& grep -F -o 'Send signal to' | grep -v 'Send signal to 0 threads (total)'
$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -m -q "select * from system.stack_trace where thread_id != 0 format Null" |& grep -F -o 'Send signal to' | grep -v 'Send signal to 0 threads (total)'
# there is no thread with comm="foo", so no signals will be sent
echo "thread_name = 'foo'"
$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_name = 'foo' format Null" |& grep -F -o 'Send signal to 0 threads (total)'
$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -m -q "select * from system.stack_trace where thread_name = 'foo' format Null" |& grep -F -o 'Send signal to 0 threads (total)'

View File

@ -26,7 +26,7 @@ function wait_part()
function restore_failpoints()
{
# restore entry error with failpoints (to avoid endless errors in logs)
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
system enable failpoint replicated_queue_unfail_entries;
system sync replica $failed_replica;
system disable failpoint replicated_queue_unfail_entries;
@ -34,7 +34,7 @@ function restore_failpoints()
}
trap restore_failpoints EXIT
$CLICKHOUSE_CLIENT -nm --insert_keeper_fault_injection_probability=0 -q "
$CLICKHOUSE_CLIENT -m --insert_keeper_fault_injection_probability=0 -q "
drop table if exists data_r1;
drop table if exists data_r2;
@ -45,7 +45,7 @@ $CLICKHOUSE_CLIENT -nm --insert_keeper_fault_injection_probability=0 -q "
"
# will fail ALTER_METADATA on one of replicas
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
system enable failpoint replicated_queue_fail_next_entry;
alter table data_r1 drop index value_idx settings alter_sync=0; -- part all_0_0_0_1
@ -80,7 +80,7 @@ fi
# This will create MERGE_PARTS, on failed replica it will be fetched from source replica (since it does not have all parts to execute merge)
$CLICKHOUSE_CLIENT -q "optimize table $success_replica final settings optimize_throw_if_noop=1, alter_sync=1" # part all_0_0_1_1
$CLICKHOUSE_CLIENT -nm --insert_keeper_fault_injection_probability=0 -q "
$CLICKHOUSE_CLIENT -m --insert_keeper_fault_injection_probability=0 -q "
insert into $success_replica (key) values (2); -- part all_2_2_0
-- Avoid 'Cannot select parts for optimization: Entry for part all_2_2_0 hasn't been read from the replication log yet'
system sync replica $success_replica pull;

View File

@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
user_name="${CLICKHOUSE_DATABASE}_test_user_02947"
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS t_merge_tree_index;
DROP USER IF EXISTS $user_name;
@ -44,7 +44,7 @@ $CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT arr.size
$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT b FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK"
$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT b.mark FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK"
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS t_merge_tree_index;
DROP USER IF EXISTS $user_name;
"

View File

@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
CREATE TABLE source_table
(
id UInt64,

View File

@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
# CREATE TABLE local (x UInt8) Engine=Memory;
# CREATE TABLE distributed ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), x)
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS local;
DROP TABLE IF EXISTS distributed;
CREATE TABLE local (x UInt8) Engine=Memory;

View File

@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE IF EXISTS tbl;
CREATE TABLE tbl (a Int32) ENGINE = MergeTree() ORDER BY tuple();
INSERT INTO tbl VALUES (2), (80), (-12345);
@ -14,7 +14,7 @@ backup_name="Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}')"
${CLICKHOUSE_CLIENT} --query "BACKUP TABLE tbl TO ${backup_name} FORMAT Null"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE tbl;
RESTORE ALL FROM ${backup_name} FORMAT Null
"

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nq """
${CLICKHOUSE_CLIENT} -q """
CREATE TABLE t1_local
(
n UInt64,

View File

@ -35,7 +35,7 @@ THIS_RUN+=" deduplicate_src_table=$deduplicate_src_table"
THIS_RUN+=" deduplicate_dst_table=$deduplicate_dst_table"
THIS_RUN+=" insert_unique_blocks=$insert_unique_blocks"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq "
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \
--insert-method $insert_method \
--table-engine $engine \
@ -48,7 +48,7 @@ $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq "
)
" 1>/dev/null 2>&1 && echo 'insert_several_blocks_into_table OK' || echo "FAIL: insert_several_blocks_into_table ${THIS_RUN}"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq "
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \
--insert-method $insert_method \
--table-engine $engine \
@ -61,7 +61,7 @@ $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq "
)
" 1>/dev/null 2>&1 && echo 'mv_generates_several_blocks OK' || echo "FAIL: mv_generates_several_blocks ${THIS_RUN}"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq "
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \
--insert-method $insert_method \
--table-engine $engine \

View File

@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
${CLICKHOUSE_CLIENT} --query "drop table if exists 03008_test_local_mt sync"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
create table 03008_test_local_mt (a Int32, b Int64, c Int64)
engine = MergeTree() partition by intDiv(a, 1000) order by tuple(a, b)
settings disk = disk(
@ -19,35 +19,35 @@ settings disk = disk(
path = '/var/lib/clickhouse/disks/local_plain_rewritable/')
"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
insert into 03008_test_local_mt (*) values (1, 2, 0), (2, 2, 2), (3, 1, 9), (4, 7, 7), (5, 10, 2), (6, 12, 5);
insert into 03008_test_local_mt (*) select number, number, number from numbers_mt(10000);
"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
select count(*) from 03008_test_local_mt;
select (*) from 03008_test_local_mt order by tuple(a, b) limit 10;
"
${CLICKHOUSE_CLIENT} --query "optimize table 03008_test_local_mt final;"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
alter table 03008_test_local_mt modify setting disk = '03008_local_plain_rewritable', old_parts_lifetime = 3600;
select engine_full from system.tables WHERE database = currentDatabase() AND name = '03008_test_local_mt';
" | grep -c "old_parts_lifetime = 3600"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
select count(*) from 03008_test_local_mt;
select (*) from 03008_test_local_mt order by tuple(a, b) limit 10;
"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
alter table 03008_test_local_mt update c = 0 where a % 2 = 1;
alter table 03008_test_local_mt add column d Int64 after c;
alter table 03008_test_local_mt drop column c;
" 2>&1 | grep -Fq "SUPPORT_IS_DISABLED"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
truncate table 03008_test_local_mt;
select count(*) from 03008_test_local_mt;
"

View File

@ -6,15 +6,15 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
tmp_file="$CUR_DIR/$CLICKHOUSE_DATABASE.txt"
echo '# foo'
$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -n -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select * from input('x String') format LineAsString" <<<foo
$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select * from input('x String') format LineAsString" <<<foo
cat "$tmp_file"
echo '# !foo'
$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -n -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select * from input('x String') where x != 'foo' format LineAsString" <<<foo
$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select * from input('x String') where x != 'foo' format LineAsString" <<<foo
cat "$tmp_file"
echo '# bar'
$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -n -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select y from input('x String, y String') format TSV" <<<$'foo\tbar'
$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select y from input('x String, y String') format TSV" <<<$'foo\tbar'
cat "$tmp_file"
echo '# defaults'
$CLICKHOUSE_LOCAL --input_format_tsv_empty_as_default=1 --engine_file_truncate_on_insert=1 -n -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select y from input('x String, y String DEFAULT \\'bam\\'') format TSV" <<<$'foo\t'
$CLICKHOUSE_LOCAL --input_format_tsv_empty_as_default=1 --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select y from input('x String, y String DEFAULT \\'bam\\'') format TSV" <<<$'foo\t'
cat "$tmp_file"
rm -f "${tmp_file:?}"

View File

@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE IF EXISTS tbl;
DROP TABLE IF EXISTS tbl2;
CREATE TABLE tbl (a Int32) ENGINE = MergeTree() ORDER BY tuple();
@ -51,7 +51,7 @@ wait_status "${restore_operation_id}" "RESTORED"
# Check the result of that restoration.
${CLICKHOUSE_CLIENT} --query "SELECT * FROM tbl2"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE tbl;
DROP TABLE tbl2;
"

View File

@ -21,7 +21,7 @@ CHANGED_SETTING_VALUE="42"
SHOW_CURRENT_ROLES_QUERY="SELECT role_name FROM system.current_roles ORDER BY role_name ASC"
SHOW_CHANGED_SETTINGS_QUERY="SELECT name, value FROM system.settings WHERE changed = 1 AND name = '$CHANGED_SETTING_NAME' ORDER BY name ASC"
$CLICKHOUSE_CLIENT -n --query "
$CLICKHOUSE_CLIENT --query "
DROP USER IF EXISTS $TEST_USER;
DROP ROLE IF EXISTS $TEST_ROLE1;
DROP ROLE IF EXISTS $TEST_ROLE2;
@ -94,7 +94,7 @@ OUT=$($CLICKHOUSE_CURL -u $TEST_USER_AUTH -sS "$CLICKHOUSE_URL&role=$TEST_ROLE1&
echo -ne $OUT | grep -o "Code: 512" || echo "expected code 512, got: $OUT"
echo -ne $OUT | grep -o "SET_NON_GRANTED_ROLE" || echo "expected SET_NON_GRANTED_ROLE error, got: $OUT"
$CLICKHOUSE_CLIENT -n --query "
$CLICKHOUSE_CLIENT --query "
DROP USER $TEST_USER;
DROP ROLE $TEST_ROLE1;
DROP ROLE $TEST_ROLE2;

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
INPUT_FILE=$CUR_DIR/$CLICKHOUSE_DATABASE.tsv
echo "foo" > "$INPUT_FILE"
$CLICKHOUSE_CLIENT --external --file="$INPUT_FILE" --name=t --structure='x String' -nm -q "
$CLICKHOUSE_CLIENT --external --file="$INPUT_FILE" --name=t --structure='x String' -m -q "
select * from t;
select * from t;
"

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nq "
${CLICKHOUSE_CLIENT} -q "
DROP TABLE IF EXISTS t;
CREATE TABLE t(a UInt32, b UInt32, c UInt32, d UInt32) ENGINE=MergeTree ORDER BY a SETTINGS min_bytes_for_wide_part=0, min_rows_for_wide_part=0;
@ -25,7 +25,7 @@ client_opts=(
--max_threads 8
)
${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_1" -nq "
${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_1" -q "
SELECT *
FROM t
PREWHERE (b % 8192) = 42
@ -33,7 +33,7 @@ PREWHERE (b % 8192) = 42
FORMAT Null
"
${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_2" -nq "
${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_2" -q "
SELECT *
FROM t
PREWHERE (b % 8192) = 42 AND (c % 8192) = 42
@ -42,7 +42,7 @@ PREWHERE (b % 8192) = 42 AND (c % 8192) = 42
settings enable_multiple_prewhere_read_steps=1;
"
${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_3" -nq "
${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_3" -q "
SELECT *
FROM t
PREWHERE (b % 8192) = 42 AND (c % 16384) = 42
@ -51,7 +51,7 @@ PREWHERE (b % 8192) = 42 AND (c % 16384) = 42
settings enable_multiple_prewhere_read_steps=0;
"
${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_4" -nq "
${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_4" -q "
SELECT b, c
FROM t
PREWHERE (b % 8192) = 42 AND (c % 8192) = 42
@ -59,7 +59,7 @@ PREWHERE (b % 8192) = 42 AND (c % 8192) = 42
settings enable_multiple_prewhere_read_steps=1;
"
${CLICKHOUSE_CLIENT} -nq "
${CLICKHOUSE_CLIENT} -q "
SYSTEM FLUSH LOGS;
-- 52503 which is 43 * number of granules, 10000000

View File

@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
drop table if exists tp_1;
create table tp_1 (x Int32, y Int32, projection p (select x, y order by x)) engine = MergeTree order by y partition by intDiv(y, 100) settings max_parts_to_merge_at_once=1;
insert into tp_1 select number, number from numbers(3);
@ -25,7 +25,7 @@ alter table tp_1 drop projection pp;
alter table tp_1 attach partition '0';
"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
set send_logs_level='fatal';
check table tp_1 settings check_query_single_value_result = 0;" | grep -o "Found unexpected projection directories: pp.proj"
@ -34,19 +34,19 @@ $CLICKHOUSE_CLIENT -q "
backup table tp_1 to Disk('backups', '$backup_id');
" | grep -o "BACKUP_CREATED"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
set send_logs_level='fatal';
drop table tp_1;
restore table tp_1 from Disk('backups', '$backup_id');
" | grep -o "RESTORED"
$CLICKHOUSE_CLIENT -q "select count() from tp_1;"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
set send_logs_level='fatal';
check table tp_1 settings check_query_single_value_result = 0;" | grep -o "Found unexpected projection directories: pp.proj"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
set send_logs_level='fatal';
check table tp_1"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
set send_logs_level='fatal';
drop table tp_1"

View File

@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nq "
${CLICKHOUSE_CLIENT} -q "
CREATE TABLE event_envoy
(
timestamp_interval DateTime CODEC(DoubleDelta),
@ -18,7 +18,7 @@ ${CLICKHOUSE_CLIENT} -nq "
INSERT INTO event_envoy SELECT now() - number, 'us-east-1', 'ch_super_fast' FROM numbers_mt(1e5);
"
${CLICKHOUSE_CLIENT} -nq "
${CLICKHOUSE_CLIENT} -q "
CREATE TABLE event_envoy_remote
(
timestamp_interval DateTime CODEC(DoubleDelta),

View File

@ -53,6 +53,6 @@ SELECT * FROM TEST2 ORDER BY value;
DROP TABLE TEST1; DROP TABLE TEST2;
EOF
$CLICKHOUSE_CLIENT -m -n < "$SQL_FILE_NAME"
$CLICKHOUSE_CLIENT -m < "$SQL_FILE_NAME"
rm "$SQL_FILE_NAME"

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
echo "1,2" > $CLICKHOUSE_TEST_UNIQUE_NAME.csv
sleep 1
$CLICKHOUSE_LOCAL -nm -q "
$CLICKHOUSE_LOCAL -m -q "
select _size, (dateDiff('millisecond', _time, now()) < 600000 AND dateDiff('millisecond', _time, now()) > 0) from file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv');
"
rm $CLICKHOUSE_TEST_UNIQUE_NAME.csv

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nq "
$CLICKHOUSE_CLIENT -q "
CREATE TABLE ids (id UUID, whatever String) Engine=MergeTree ORDER BY tuple();
INSERT INTO ids VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', 'whatever');

View File

@ -5,25 +5,25 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
echo -e 'a,b,c\n1,2,3' > $CLICKHOUSE_TEST_UNIQUE_NAME.csv
$CLICKHOUSE_LOCAL -nm -q "
$CLICKHOUSE_LOCAL -m -q "
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_skip_first_lines=1;
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_skip_first_lines=0;
SELECT count() from system.schema_inference_cache where format = 'CSV' and additional_format_info like '%skip_first_lines%';"
echo -e 'a,b,c\n"1",2,3' > $CLICKHOUSE_TEST_UNIQUE_NAME.csv
$CLICKHOUSE_LOCAL -nm -q "
$CLICKHOUSE_LOCAL -m -q "
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_numbers_from_strings=1;
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_numbers_from_strings=0;
SELECT count() from system.schema_inference_cache where format = 'CSV' and additional_format_info like '%try_infer_numbers_from_strings%';"
echo -e 'a,b,c\n"(1,2,3)",2,3' > $CLICKHOUSE_TEST_UNIQUE_NAME.csv
$CLICKHOUSE_LOCAL -nm -q "
$CLICKHOUSE_LOCAL -m -q "
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_strings_from_quoted_tuples=1;
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_strings_from_quoted_tuples=0;
SELECT count() from system.schema_inference_cache where format = 'CSV' and additional_format_info like '%try_infer_strings_from_quoted_tuples%';"
echo -e 'a\tb\tc\n1\t2\t3' > $CLICKHOUSE_TEST_UNIQUE_NAME.tsv
$CLICKHOUSE_LOCAL -nm -q "
$CLICKHOUSE_LOCAL -m -q "
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.tsv') SETTINGS input_format_tsv_skip_first_lines=1;
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.tsv') SETTINGS input_format_tsv_skip_first_lines=0;
SELECT count() from system.schema_inference_cache where format = 'TSV' and additional_format_info like '%skip_first_lines%';"

View File

@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n "
$CLICKHOUSE_CLIENT "
DROP TABLE IF EXISTS t_unload_primary_key;
CREATE TABLE t_unload_primary_key (a UInt64, b UInt64)
@ -26,7 +26,7 @@ for _ in {1..100}; do
sleep 0.3
done
$CLICKHOUSE_CLIENT -n "
$CLICKHOUSE_CLIENT "
SELECT name, active, primary_key_bytes_in_memory FROM system.parts WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_unload_primary_key' ORDER BY name;
DROP TABLE IF EXISTS t_unload_primary_key;
"

View File

@ -8,7 +8,7 @@ username="user_${CLICKHOUSE_TEST_UNIQUE_NAME}"
dictname="dict_${CLICKHOUSE_TEST_UNIQUE_NAME}"
dicttablename="dict_table_${CLICKHOUSE_TEST_UNIQUE_NAME}"
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
CREATE DICTIONARY IF NOT EXISTS ${dictname}
(
id UInt64,
@ -26,15 +26,15 @@ ${CLICKHOUSE_CLIENT} -nm --query "
SELECT * FROM ${dicttablename};
"
$CLICKHOUSE_CLIENT -nm --user="${username}" --query "
$CLICKHOUSE_CLIENT -m --user="${username}" --query "
SELECT * FROM ${dictname};
" 2>&1 | grep -o ACCESS_DENIED | uniq
$CLICKHOUSE_CLIENT -nm --user="${username}" --query "
$CLICKHOUSE_CLIENT -m --user="${username}" --query "
SELECT * FROM ${dicttablename};
" 2>&1 | grep -o ACCESS_DENIED | uniq
${CLICKHOUSE_CLIENT} -nm --query "
${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE IF EXISTS ${dicttablename} SYNC;
DROP DICTIONARY IF EXISTS ${dictname};
DROP USER IF EXISTS ${username};

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
echo '{"x" : 42}' > $CLICKHOUSE_TEST_UNIQUE_NAME.json
$CLICKHOUSE_LOCAL -nm -q "
$CLICKHOUSE_LOCAL -m -q "
DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.json') SETTINGS input_format_max_bytes_to_read_for_schema_inference=1000;
SELECT additional_format_info from system.schema_inference_cache"