This commit is contained in:
Robert Schulze 2024-09-16 15:26:08 +02:00 committed by GitHub
commit 0eb0830a13
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
83 changed files with 298 additions and 299 deletions

View File

@ -105,7 +105,7 @@ setup_logs_replication
clickhouse-client --query "SHOW DATABASES" clickhouse-client --query "SHOW DATABASES"
clickhouse-client --query "CREATE DATABASE datasets" clickhouse-client --query "CREATE DATABASE datasets"
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql clickhouse-client < /repo/tests/docker_scripts/create.sql
clickhouse-client --query "SHOW TABLES FROM datasets" clickhouse-client --query "SHOW TABLES FROM datasets"
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then

View File

@ -62,7 +62,7 @@ start_server
setup_logs_replication setup_logs_replication
clickhouse-client --query "CREATE DATABASE datasets" clickhouse-client --query "CREATE DATABASE datasets"
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql clickhouse-client < /repo/tests/docker_scripts/create.sql
clickhouse-client --query "SHOW TABLES FROM datasets" clickhouse-client --query "SHOW TABLES FROM datasets"
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"

View File

@ -89,7 +89,6 @@ class Client:
command = self.command[:] command = self.command[:]
if stdin is None: if stdin is None:
command += ["--multiquery"]
stdin = sql stdin = sql
else: else:
command += ["--query", sql] command += ["--query", sql]

View File

@ -427,7 +427,7 @@ do
done done
# for each query run, prepare array of metrics from query log # for each query run, prepare array of metrics from query log
clickhouse-local --multiquery --query " clickhouse-local --query "
create view query_runs as select * from file('analyze/query-runs.tsv', TSV, create view query_runs as select * from file('analyze/query-runs.tsv', TSV,
'test text, query_index int, query_id text, version UInt8, time float'); 'test text, query_index int, query_id text, version UInt8, time float');
@ -582,7 +582,7 @@ numactl --cpunodebind=all --membind=all numactl --show
# If the available memory falls below 2 * size, GNU parallel will suspend some of the running jobs. # If the available memory falls below 2 * size, GNU parallel will suspend some of the running jobs.
numactl --cpunodebind=all --membind=all parallel -v --joblog analyze/parallel-log.txt --memsuspend 15G --null < analyze/commands.txt 2>> analyze/errors.log numactl --cpunodebind=all --membind=all parallel -v --joblog analyze/parallel-log.txt --memsuspend 15G --null < analyze/commands.txt 2>> analyze/errors.log
clickhouse-local --multiquery --query " clickhouse-local --query "
-- Join the metric names back to the metric statistics we've calculated, and make -- Join the metric names back to the metric statistics we've calculated, and make
-- a denormalized table of them -- statistics for all metrics for all queries. -- a denormalized table of them -- statistics for all metrics for all queries.
-- The WITH, ARRAY JOIN and CROSS JOIN do not like each other: -- The WITH, ARRAY JOIN and CROSS JOIN do not like each other:
@ -680,7 +680,7 @@ rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.ts
cat analyze/errors.log >> report/errors.log ||: cat analyze/errors.log >> report/errors.log ||:
cat profile-errors.log >> report/errors.log ||: cat profile-errors.log >> report/errors.log ||:
clickhouse-local --multiquery --query " clickhouse-local --query "
create view query_display_names as select * from create view query_display_names as select * from
file('analyze/query-display-names.tsv', TSV, file('analyze/query-display-names.tsv', TSV,
'test text, query_index int, query_display_name text') 'test text, query_index int, query_display_name text')
@ -981,7 +981,7 @@ create table all_query_metrics_tsv engine File(TSV, 'report/all-query-metrics.ts
for version in {right,left} for version in {right,left}
do do
rm -rf data rm -rf data
clickhouse-local --multiquery --query " clickhouse-local --query "
create view query_profiles as create view query_profiles as
with 0 as left, 1 as right with 0 as left, 1 as right
select * from file('analyze/query-profiles.tsv', TSV, select * from file('analyze/query-profiles.tsv', TSV,
@ -1151,7 +1151,7 @@ function report_metrics
rm -rf metrics ||: rm -rf metrics ||:
mkdir metrics mkdir metrics
clickhouse-local --multiquery --query " clickhouse-local --query "
create view right_async_metric_log as create view right_async_metric_log as
select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes) select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes)
; ;
@ -1211,7 +1211,7 @@ function upload_results
# Prepare info for the CI checks table. # Prepare info for the CI checks table.
rm -f ci-checks.tsv rm -f ci-checks.tsv
clickhouse-local --multiquery --query " clickhouse-local --query "
create view queries as select * from file('report/queries.tsv', TSVWithNamesAndTypes); create view queries as select * from file('report/queries.tsv', TSVWithNamesAndTypes);
create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv') create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv')

View File

@ -5,4 +5,4 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*' $CLICKHOUSE_CLIENT -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*'

View File

@ -12,14 +12,14 @@ echo "
DROP TABLE IF EXISTS rocksdb_race; DROP TABLE IF EXISTS rocksdb_race;
CREATE TABLE rocksdb_race (key String, value UInt32) Engine=EmbeddedRocksDB PRIMARY KEY(key); CREATE TABLE rocksdb_race (key String, value UInt32) Engine=EmbeddedRocksDB PRIMARY KEY(key);
INSERT INTO rocksdb_race SELECT '1_' || toString(number), number FROM numbers(100000); INSERT INTO rocksdb_race SELECT '1_' || toString(number), number FROM numbers(100000);
" | $CLICKHOUSE_CLIENT -n " | $CLICKHOUSE_CLIENT
function read_stat_thread() function read_stat_thread()
{ {
while true; do while true; do
echo " echo "
SELECT * FROM system.rocksdb FORMAT Null; SELECT * FROM system.rocksdb FORMAT Null;
" | $CLICKHOUSE_CLIENT -n " | $CLICKHOUSE_CLIENT
done done
} }
@ -29,7 +29,7 @@ function truncate_thread()
sleep 3s; sleep 3s;
echo " echo "
TRUNCATE TABLE rocksdb_race; TRUNCATE TABLE rocksdb_race;
" | $CLICKHOUSE_CLIENT -n " | $CLICKHOUSE_CLIENT
done done
} }

View File

@ -12,7 +12,7 @@ opts=(
--join_algorithm='parallel_hash' --join_algorithm='parallel_hash'
) )
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
CREATE TABLE t1(a UInt32, b UInt32) ENGINE=MergeTree ORDER BY (); CREATE TABLE t1(a UInt32, b UInt32) ENGINE=MergeTree ORDER BY ();
INSERT INTO t1 SELECT number, number FROM numbers_mt(1e6); INSERT INTO t1 SELECT number, number FROM numbers_mt(1e6);

View File

@ -5,12 +5,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
timeout -s INT 3s $CLICKHOUSE_CLIENT --max_block_size 1 -nm -q " timeout -s INT 3s $CLICKHOUSE_CLIENT --max_block_size 1 -m -q "
SELECT sleep(1) FROM numbers(100) FORMAT Null; SELECT sleep(1) FROM numbers(100) FORMAT Null;
SELECT 'FAIL'; SELECT 'FAIL';
" "
timeout -s INT 3s $CLICKHOUSE_LOCAL --max_block_size 1 -nm -q " timeout -s INT 3s $CLICKHOUSE_LOCAL --max_block_size 1 -m -q "
SELECT sleep(1) FROM numbers(100) FORMAT Null; SELECT sleep(1) FROM numbers(100) FORMAT Null;
SELECT 'FAIL'; SELECT 'FAIL';
" "

View File

@ -16,7 +16,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --echo --query "SYSTEM DROP FILESYSTEM CACHE" $CLICKHOUSE_CLIENT --echo --query "SYSTEM DROP FILESYSTEM CACHE"
$CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
FROM FROM
( (
SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
@ -37,7 +37,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)" $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)"
$CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
FROM FROM
( (
SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
@ -70,7 +70,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200)" $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200)"
$CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state
FROM FROM
( (
SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
@ -109,7 +109,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --echo --query "SYSTEM FLUSH LOGS" $CLICKHOUSE_CLIENT --echo --query "SYSTEM FLUSH LOGS"
$CLICKHOUSE_CLIENT -n --query "SELECT $CLICKHOUSE_CLIENT --query "SELECT
query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read
FROM FROM
system.query_log system.query_log

View File

@ -15,7 +15,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP ROLE IF EXISTS test_role_02242; DROP ROLE IF EXISTS test_role_02242;
CREATE ROLE test_role_02242; CREATE ROLE test_role_02242;
" "

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP ROLE IF EXISTS test_role_02244; DROP ROLE IF EXISTS test_role_02244;
CREATE ROLE test_role_02244; CREATE ROLE test_role_02244;
DROP USER IF EXISTS kek_02243; DROP USER IF EXISTS kek_02243;
@ -37,4 +37,4 @@ $CLICKHOUSE_CLIENT --user kek_02243 -q "SELECT * FROM test" 2>&1| grep -Fa "Exce
$CLICKHOUSE_CLIENT -q "DROP ROLE IF EXISTS test_role_02243" $CLICKHOUSE_CLIENT -q "DROP ROLE IF EXISTS test_role_02243"
$CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS test_user_02243" $CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS test_user_02243"
$CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS kek_02243" $CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS kek_02243"

View File

@ -44,7 +44,7 @@ protobuf_info() {
fi fi
} }
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
DROP TABLE IF EXISTS $MAIN_TABLE; DROP TABLE IF EXISTS $MAIN_TABLE;
DROP TABLE IF EXISTS $ROUNDTRIP_TABLE; DROP TABLE IF EXISTS $ROUNDTRIP_TABLE;
DROP TABLE IF EXISTS $COMPATIBILITY_TABLE; DROP TABLE IF EXISTS $COMPATIBILITY_TABLE;
@ -78,14 +78,14 @@ echo $SET_OUTPUT
echo echo
echo "Insert $INITIAL_INSERT_VALUES into table (Nullable(String), Int32):" echo "Insert $INITIAL_INSERT_VALUES into table (Nullable(String), Int32):"
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
INSERT INTO $MAIN_TABLE VALUES $INITIAL_INSERT_VALUES; INSERT INTO $MAIN_TABLE VALUES $INITIAL_INSERT_VALUES;
SELECT * FROM $MAIN_TABLE; SELECT * FROM $MAIN_TABLE;
" "
echo echo
echo "Protobuf representation of the second row:" echo "Protobuf representation of the second row:"
$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $MAIN_TABLE WHERE ref = 2 LIMIT 1 $(protobuf_info output ProtobufSingle Message)" > "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $MAIN_TABLE WHERE ref = 2 LIMIT 1 $(protobuf_info output ProtobufSingle Message)" > "$BINARY_FILE_PATH"
hexdump -C $BINARY_FILE_PATH hexdump -C $BINARY_FILE_PATH
echo echo
@ -101,12 +101,12 @@ hexdump -C $MESSAGE_FILE_PATH
echo echo
echo "Insert proto message into table (Nullable(String), Int32):" echo "Insert proto message into table (Nullable(String), Int32):"
$CLICKHOUSE_CLIENT -n --query "$SET_INPUT INSERT INTO $ROUNDTRIP_TABLE $(protobuf_info input Protobuf Message)" < "$MESSAGE_FILE_PATH" $CLICKHOUSE_CLIENT --query "$SET_INPUT INSERT INTO $ROUNDTRIP_TABLE $(protobuf_info input Protobuf Message)" < "$MESSAGE_FILE_PATH"
$CLICKHOUSE_CLIENT --query "SELECT * FROM $ROUNDTRIP_TABLE" $CLICKHOUSE_CLIENT --query "SELECT * FROM $ROUNDTRIP_TABLE"
echo echo
echo "Proto output of the table using Google wrapper:" echo "Proto output of the table using Google wrapper:"
$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $ROUNDTRIP_TABLE $(protobuf_info output Protobuf Message)" > "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $ROUNDTRIP_TABLE $(protobuf_info output Protobuf Message)" > "$BINARY_FILE_PATH"
hexdump -C $BINARY_FILE_PATH hexdump -C $BINARY_FILE_PATH
echo echo
@ -124,14 +124,14 @@ echo
echo "Insert $MULTI_WRAPPER_VALUES and reinsert using Google wrappers into:" echo "Insert $MULTI_WRAPPER_VALUES and reinsert using Google wrappers into:"
echo "Table (Nullable(Int32), Nullable(Int32), Int32):" echo "Table (Nullable(Int32), Nullable(Int32), Int32):"
$CLICKHOUSE_CLIENT --query "INSERT INTO $MULTI_TABLE VALUES $MULTI_WRAPPER_VALUES" $CLICKHOUSE_CLIENT --query "INSERT INTO $MULTI_TABLE VALUES $MULTI_WRAPPER_VALUES"
$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $MULTI_TABLE $(protobuf_info output Protobuf MessageMultiWrapper)" > "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $MULTI_TABLE $(protobuf_info output Protobuf MessageMultiWrapper)" > "$BINARY_FILE_PATH"
$CLICKHOUSE_CLIENT -n --query "$SET_INPUT INSERT INTO $MULTI_TABLE $(protobuf_info input Protobuf MessageMultiWrapper)" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "$SET_INPUT INSERT INTO $MULTI_TABLE $(protobuf_info input Protobuf MessageMultiWrapper)" < "$BINARY_FILE_PATH"
$CLICKHOUSE_CLIENT --query "SELECT * FROM $MULTI_TABLE" $CLICKHOUSE_CLIENT --query "SELECT * FROM $MULTI_TABLE"
rm "$BINARY_FILE_PATH" rm "$BINARY_FILE_PATH"
rm "$MESSAGE_FILE_PATH" rm "$MESSAGE_FILE_PATH"
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
DROP TABLE $MAIN_TABLE; DROP TABLE $MAIN_TABLE;
DROP TABLE $ROUNDTRIP_TABLE; DROP TABLE $ROUNDTRIP_TABLE;
DROP TABLE $COMPATIBILITY_TABLE; DROP TABLE $COMPATIBILITY_TABLE;

View File

@ -11,7 +11,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
echo "Using storage policy: $STORAGE_POLICY" echo "Using storage policy: $STORAGE_POLICY"
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_02286" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_02286"
$CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_02286 (key UInt32, value String) $CLICKHOUSE_CLIENT --query "CREATE TABLE test_02286 (key UInt32, value String)
Engine=MergeTree() Engine=MergeTree()
ORDER BY key ORDER BY key
SETTINGS storage_policy='$STORAGE_POLICY', min_bytes_for_wide_part = 10485760" SETTINGS storage_policy='$STORAGE_POLICY', min_bytes_for_wide_part = 10485760"
@ -38,7 +38,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --query "SELECT * FROM test_02286 FORMAT Null" $CLICKHOUSE_CLIENT --query "SELECT * FROM test_02286 FORMAT Null"
$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache" $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache"
$CLICKHOUSE_CLIENT -n --query "SELECT count() $CLICKHOUSE_CLIENT --query "SELECT count()
FROM ( FROM (
SELECT SELECT
arrayJoin(cache_paths) AS cache_path, arrayJoin(cache_paths) AS cache_path,
@ -54,7 +54,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache" $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache"
$CLICKHOUSE_CLIENT --query "SELECT cache_path FROM system.filesystem_cache" $CLICKHOUSE_CLIENT --query "SELECT cache_path FROM system.filesystem_cache"
$CLICKHOUSE_CLIENT -n --query "SELECT cache_path, local_path $CLICKHOUSE_CLIENT --query "SELECT cache_path, local_path
FROM ( FROM (
SELECT SELECT
arrayJoin(cache_paths) AS cache_path, arrayJoin(cache_paths) AS cache_path,

View File

@ -23,7 +23,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi' 'azure_cache'; d
ORDER BY tuple() ORDER BY tuple()
SETTINGS storage_policy = '$STORAGE_POLICY'" > /dev/null SETTINGS storage_policy = '$STORAGE_POLICY'" > /dev/null
$CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 -n --query "INSERT INTO test_02313 $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 --query "INSERT INTO test_02313
SELECT * FROM SELECT * FROM
generateRandom('id Int32, val String') generateRandom('id Int32, val String')
LIMIT 100000" LIMIT 100000"

View File

@ -9,7 +9,7 @@ function check_refcnt_for_table()
{ {
local table=$1 && shift local table=$1 && shift
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
system stop merges $table; system stop merges $table;
-- cleanup thread may hold the parts lock -- cleanup thread may hold the parts lock
system stop cleanup $table; system stop cleanup $table;
@ -66,14 +66,14 @@ function check_refcnt_for_table()
# NOTE: index_granularity=1 to cancel ASAP # NOTE: index_granularity=1 to cancel ASAP
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
drop table if exists data_02340; drop table if exists data_02340;
create table data_02340 (key Int, part Int) engine=MergeTree() partition by part order by key settings index_granularity=1; create table data_02340 (key Int, part Int) engine=MergeTree() partition by part order by key settings index_granularity=1;
" || exit 1 " || exit 1
check_refcnt_for_table data_02340 check_refcnt_for_table data_02340
$CLICKHOUSE_CLIENT -q "drop table data_02340 sync" $CLICKHOUSE_CLIENT -q "drop table data_02340 sync"
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
drop table if exists data_02340_rep sync; drop table if exists data_02340_rep sync;
create table data_02340_rep (key Int, part Int) engine=ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') partition by part order by key settings index_granularity=1; create table data_02340_rep (key Int, part Int) engine=ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') partition by part order by key settings index_granularity=1;
" || exit 1 " || exit 1

View File

@ -7,14 +7,14 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
disk_name="02344_describe_cache_test" disk_name="02344_describe_cache_test"
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, b String) CREATE TABLE test (a Int32, b String)
ENGINE = MergeTree() ORDER BY tuple() ENGINE = MergeTree() ORDER BY tuple()
SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk', load_metadata_asynchronously = 0); SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk', load_metadata_asynchronously = 0);
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.disks WHERE name = '$disk_name' SELECT count() FROM system.disks WHERE name = '$disk_name'
""" """

View File

@ -24,7 +24,7 @@ function wait_query_by_id_started()
# wait for query to be started # wait for query to be started
while [ "$($CLICKHOUSE_CLIENT "$@" -q "select count() from system.processes where query_id = '$query_id'")" -ne 1 ]; do while [ "$($CLICKHOUSE_CLIENT "$@" -q "select count() from system.processes where query_id = '$query_id'")" -ne 1 ]; do
if [ "$( if [ "$(
$CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -nm -q " $CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -m -q "
system flush logs; system flush logs;
select count() from system.query_log select count() from system.query_log
@ -52,7 +52,7 @@ $CLICKHOUSE_CLIENT -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_ordinary Engine=Or
# debug build on CI, so if this will happen, then DROP query will be # debug build on CI, so if this will happen, then DROP query will be
# finished instantly, and to avoid flakiness we will retry in this case # finished instantly, and to avoid flakiness we will retry in this case
while :; do while :; do
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}_ordinary.data_02352; DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}_ordinary.data_02352;
CREATE TABLE ${CLICKHOUSE_DATABASE}_ordinary.data_02352 (key Int) Engine=Null(); CREATE TABLE ${CLICKHOUSE_DATABASE}_ordinary.data_02352 (key Int) Engine=Null();
" "

View File

@ -9,13 +9,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "ATTACH TABLE mv" |& { $CLICKHOUSE_CLIENT -m -q "ATTACH TABLE mv" |& {
# CANNOT_GET_CREATE_TABLE_QUERY -- ATTACH TABLE IF EXISTS # CANNOT_GET_CREATE_TABLE_QUERY -- ATTACH TABLE IF EXISTS
# TABLE_ALREADY_EXISTS -- ATTACH TABLE IF NOT EXISTS # TABLE_ALREADY_EXISTS -- ATTACH TABLE IF NOT EXISTS
grep -F -m1 Exception | grep -v -e CANNOT_GET_CREATE_TABLE_QUERY -e TABLE_ALREADY_EXISTS grep -F -m1 Exception | grep -v -e CANNOT_GET_CREATE_TABLE_QUERY -e TABLE_ALREADY_EXISTS
} }
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS null; DROP TABLE IF EXISTS null;
CREATE TABLE null (key Int) ENGINE = Null; CREATE TABLE null (key Int) ENGINE = Null;
DROP TABLE IF EXISTS mv; DROP TABLE IF EXISTS mv;

View File

@ -27,7 +27,7 @@ function insert()
function check_span() function check_span()
{ {
${CLICKHOUSE_CLIENT} -nq " ${CLICKHOUSE_CLIENT} -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT operation_name, SELECT operation_name,
@ -50,7 +50,7 @@ ${CLICKHOUSE_CLIENT} -nq "
# $2 - value of distributed_foreground_insert # $2 - value of distributed_foreground_insert
function check_span_kind() function check_span_kind()
{ {
${CLICKHOUSE_CLIENT} -nq " ${CLICKHOUSE_CLIENT} -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT count() SELECT count()
@ -65,7 +65,7 @@ ${CLICKHOUSE_CLIENT} -nq "
# #
# Prepare tables for tests # Prepare tables for tests
# #
${CLICKHOUSE_CLIENT} -nq " ${CLICKHOUSE_CLIENT} -q "
DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.dist_opentelemetry; DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.dist_opentelemetry;
DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.local_opentelemetry; DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.local_opentelemetry;
@ -122,7 +122,7 @@ check_span_kind $trace_id 'CLIENT'
# #
# Cleanup # Cleanup
# #
${CLICKHOUSE_CLIENT} -nq " ${CLICKHOUSE_CLIENT} -q "
DROP TABLE ${CLICKHOUSE_DATABASE}.dist_opentelemetry; DROP TABLE ${CLICKHOUSE_DATABASE}.dist_opentelemetry;
DROP TABLE ${CLICKHOUSE_DATABASE}.local_opentelemetry; DROP TABLE ${CLICKHOUSE_DATABASE}.local_opentelemetry;
" "

View File

@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS 02419_test SYNC;"
test_primary_key() test_primary_key()
{ {
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
CREATE TABLE 02419_test (key UInt64, value Float64) Engine=KeeperMap('/' || currentDatabase() || '/test2418', 3) PRIMARY KEY($1); CREATE TABLE 02419_test (key UInt64, value Float64) Engine=KeeperMap('/' || currentDatabase() || '/test2418', 3) PRIMARY KEY($1);
INSERT INTO 02419_test VALUES (1, 1.1), (2, 2.2); INSERT INTO 02419_test VALUES (1, 1.1), (2, 2.2);
SELECT value FROM 02419_test WHERE key = 1; SELECT value FROM 02419_test WHERE key = 1;

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/replication.lib . "$CURDIR"/replication.lib
$CLICKHOUSE_CLIENT -n -q " $CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table0;
DROP TABLE IF EXISTS alter_table1; DROP TABLE IF EXISTS alter_table1;

View File

@ -21,7 +21,7 @@ wait_for_number_of_parts() {
echo "$res" echo "$res"
} }
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE IF EXISTS test_without_merge; DROP TABLE IF EXISTS test_without_merge;
DROP TABLE IF EXISTS test_with_merge; DROP TABLE IF EXISTS test_with_merge;
@ -34,7 +34,7 @@ INSERT INTO test_without_merge SELECT 3;"
wait_for_number_of_parts 'test_without_merge' 1 10 wait_for_number_of_parts 'test_without_merge' 1 10
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE test_without_merge; DROP TABLE test_without_merge;
SELECT 'With merge any part range'; SELECT 'With merge any part range';
@ -47,7 +47,7 @@ INSERT INTO test_with_merge SELECT 3;"
wait_for_number_of_parts 'test_with_merge' 1 100 wait_for_number_of_parts 'test_with_merge' 1 100
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE test_with_merge; DROP TABLE test_with_merge;
SELECT 'With merge partition only'; SELECT 'With merge partition only';
@ -60,7 +60,7 @@ INSERT INTO test_with_merge SELECT 3;"
wait_for_number_of_parts 'test_with_merge' 1 100 wait_for_number_of_parts 'test_with_merge' 1 100
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one
SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_with_merge' AND active; SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_with_merge' AND active;

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
-- Limit S3 PUT request per second rate -- Limit S3 PUT request per second rate
SET s3_max_put_rps = 2; SET s3_max_put_rps = 2;
SET s3_max_put_burst = 1; SET s3_max_put_burst = 1;

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
DROP TABLE IF EXISTS wikistat1 SYNC; DROP TABLE IF EXISTS wikistat1 SYNC;
DROP TABLE IF EXISTS wikistat2 SYNC; DROP TABLE IF EXISTS wikistat2 SYNC;
" "
@ -60,7 +60,7 @@ wait
$CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat1 WHERE NOT ignore(*)" $CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat1 WHERE NOT ignore(*)"
$CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat2 WHERE NOT ignore(*)" $CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat2 WHERE NOT ignore(*)"
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
DROP TABLE IF EXISTS wikistat1 SYNC; DROP TABLE IF EXISTS wikistat1 SYNC;
DROP TABLE IF EXISTS wikistat2 SYNC; DROP TABLE IF EXISTS wikistat2 SYNC;
" "

View File

@ -11,7 +11,7 @@ cp $CURDIR/data_ua_parser/os.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/
cp $CURDIR/data_ua_parser/browser.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/ cp $CURDIR/data_ua_parser/browser.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/
cp $CURDIR/data_ua_parser/device.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/ cp $CURDIR/data_ua_parser/device.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_os; drop dictionary if exists regexp_os;
drop dictionary if exists regexp_browser; drop dictionary if exists regexp_browser;
drop dictionary if exists regexp_device; drop dictionary if exists regexp_device;
@ -61,10 +61,10 @@ create table user_agents
Engine = Log(); Engine = Log();
" "
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
insert into user_agents select ua from input('ua String') FORMAT LineAsString" < $CURDIR/data_ua_parser/useragents.txt insert into user_agents select ua from input('ua String') FORMAT LineAsString" < $CURDIR/data_ua_parser/useragents.txt
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
select ua, device, select ua, device,
concat(tupleElement(browser, 1), ' ', tupleElement(browser, 2), '.', tupleElement(browser, 3)) as browser , concat(tupleElement(browser, 1), ' ', tupleElement(browser, 2), '.', tupleElement(browser, 3)) as browser ,
concat(tupleElement(os, 1), ' ', tupleElement(os, 2), '.', tupleElement(os, 3), '.', tupleElement(os, 4)) as os concat(tupleElement(os, 1), ' ', tupleElement(os, 2), '.', tupleElement(os, 3), '.', tupleElement(os, 4)) as os
@ -74,7 +74,7 @@ from (
dictGet('regexp_device', 'device_replacement', ua) device from user_agents) order by ua; dictGet('regexp_device', 'device_replacement', ua) device from user_agents) order by ua;
" "
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_os; drop dictionary if exists regexp_os;
drop dictionary if exists regexp_browser; drop dictionary if exists regexp_browser;
drop dictionary if exists regexp_device; drop dictionary if exists regexp_device;

View File

@ -27,7 +27,7 @@ cat > "$yaml" <<EOL
version: '10' version: '10'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_dict1; drop dictionary if exists regexp_dict1;
create dictionary regexp_dict1 create dictionary regexp_dict1
( (
@ -69,7 +69,7 @@ cat > "$yaml" <<EOL
lucky: 'abcde' lucky: 'abcde'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
system reload dictionary regexp_dict1; -- { serverError 489 } system reload dictionary regexp_dict1; -- { serverError 489 }
" "
@ -79,7 +79,7 @@ cat > "$yaml" <<EOL
version: '\1' version: '\1'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
system reload dictionary regexp_dict1; -- { serverError 318 } system reload dictionary regexp_dict1; -- { serverError 318 }
" "
@ -92,7 +92,7 @@ cat > "$yaml" <<EOL
version: '\2.\3' version: '\2.\3'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
system reload dictionary regexp_dict1; system reload dictionary regexp_dict1;
select dictGet('regexp_dict1', ('name', 'version'), 'Mozilla/5.0 (BB10; Touch) AppleWebKit/537.3+ (KHTML, like Gecko) Version/10.0.9.388 Mobile Safari/537.3+'); select dictGet('regexp_dict1', ('name', 'version'), 'Mozilla/5.0 (BB10; Touch) AppleWebKit/537.3+ (KHTML, like Gecko) Version/10.0.9.388 Mobile Safari/537.3+');
select dictGet('regexp_dict1', ('name', 'version'), 'Mozilla/5.0 (PlayBook; U; RIM Tablet OS 1.0.0; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/0.0.1 Safari/534.8+'); select dictGet('regexp_dict1', ('name', 'version'), 'Mozilla/5.0 (PlayBook; U; RIM Tablet OS 1.0.0; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/0.0.1 Safari/534.8+');
@ -107,7 +107,7 @@ cat > "$yaml" <<EOL
col_array: '[1,2,3,-1,-2,-3]' col_array: '[1,2,3,-1,-2,-3]'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
create dictionary regexp_dict2 create dictionary regexp_dict2
( (
regexp String, regexp String,
@ -147,7 +147,7 @@ cat > "$yaml" <<EOL
EOL EOL
# dictGetAll # dictGetAll
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_dict3; drop dictionary if exists regexp_dict3;
create dictionary regexp_dict3 create dictionary regexp_dict3
( (
@ -192,7 +192,7 @@ cat > "$yaml" <<EOL
tag: 'Documentation' tag: 'Documentation'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_dict3; drop dictionary if exists regexp_dict3;
create dictionary regexp_dict3 create dictionary regexp_dict3
( (
@ -252,7 +252,7 @@ cat > "$yaml" <<EOL
pattern: '(?-i)hello.*world' pattern: '(?-i)hello.*world'
EOL EOL
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary if exists regexp_dict4; drop dictionary if exists regexp_dict4;
create dictionary regexp_dict4 create dictionary regexp_dict4
( (
@ -291,7 +291,7 @@ select dictGetAll('regexp_dict4', 'pattern', 'HELLO WORLD');
select dictGetAll('regexp_dict4', 'pattern', 'HELLO\nWORLD'); select dictGetAll('regexp_dict4', 'pattern', 'HELLO\nWORLD');
" "
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT --query="
drop dictionary regexp_dict1; drop dictionary regexp_dict1;
drop dictionary regexp_dict2; drop dictionary regexp_dict2;
drop dictionary regexp_dict3; drop dictionary regexp_dict3;

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# Check that if the background cleanup thread works correctly. # Check that if the background cleanup thread works correctly.
CLICKHOUSE_TEST_ZOOKEEPER_PREFIX="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}" CLICKHOUSE_TEST_ZOOKEEPER_PREFIX="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}"
$CLICKHOUSE_CLIENT -n --query " $CLICKHOUSE_CLIENT --query "
DROP TABLE IF EXISTS t_async_insert_cleanup SYNC; DROP TABLE IF EXISTS t_async_insert_cleanup SYNC;
CREATE TABLE t_async_insert_cleanup ( CREATE TABLE t_async_insert_cleanup (
KeyID UInt32 KeyID UInt32
@ -27,7 +27,7 @@ old_answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper W
for i in {1..300}; do for i in {1..300}; do
answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'") answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'")
if [ $answer == '10' ]; then if [ $answer == '10' ]; then
$CLICKHOUSE_CLIENT -n --query "DROP TABLE t_async_insert_cleanup SYNC;" $CLICKHOUSE_CLIENT --query "DROP TABLE t_async_insert_cleanup SYNC;"
exit 0 exit 0
fi fi
sleep 1 sleep 1
@ -36,4 +36,4 @@ done
$CLICKHOUSE_CLIENT --query "SELECT count(*) FROM t_async_insert_cleanup" $CLICKHOUSE_CLIENT --query "SELECT count(*) FROM t_async_insert_cleanup"
echo $old_answer echo $old_answer
$CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'" $CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'"
$CLICKHOUSE_CLIENT -n --query "DROP TABLE t_async_insert_cleanup SYNC;" $CLICKHOUSE_CLIENT --query "DROP TABLE t_async_insert_cleanup SYNC;"

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
QUERY_ID="${CLICKHOUSE_DATABASE}_test_02585_query_to_kill_id_1" QUERY_ID="${CLICKHOUSE_DATABASE}_test_02585_query_to_kill_id_1"
$CLICKHOUSE_CLIENT --query_id="$QUERY_ID" --max_rows_to_read 0 -n -q " $CLICKHOUSE_CLIENT --query_id="$QUERY_ID" --max_rows_to_read 0 -q "
create temporary table tmp as select * from numbers(100000000); create temporary table tmp as select * from numbers(100000000);
select * from remote('127.0.0.2', 'system.numbers_mt') where number in (select * from tmp);" &> /dev/null & select * from remote('127.0.0.2', 'system.numbers_mt') where number in (select * from tmp);" &> /dev/null &

View File

@ -13,7 +13,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
function get_query_id() { random_str 10; } function get_query_id() { random_str 10; }
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists buf; drop table if exists buf;
drop table if exists dist; drop table if exists dist;
drop table if exists data; drop table if exists data;
@ -31,7 +31,7 @@ query_id="$(get_query_id)"
# test, since we care about the difference between NOW() and there should # test, since we care about the difference between NOW() and there should
# not be any significant difference. # not be any significant difference.
$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist" $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist"
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "
system flush logs; system flush logs;
select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String};
" "
@ -42,25 +42,25 @@ query_id="$(get_query_id)"
# this query (and all subsequent) should reuse the previous connection (at least most of the time) # this query (and all subsequent) should reuse the previous connection (at least most of the time)
$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist" $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist"
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "
system flush logs; system flush logs;
select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String};
" "
echo "INSERT" echo "INSERT"
query_id="$(get_query_id)" query_id="$(get_query_id)"
$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -nm -q " $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -m -q "
insert into dist_dist values (1),(2); insert into dist_dist values (1),(2);
select * from data; select * from data;
" "
sleep 1 sleep 1
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist_dist" $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "system flush distributed dist_dist"
sleep 1 sleep 1
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist" $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "system flush distributed dist"
echo "CHECK" echo "CHECK"
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "
select * from data order by key; select * from data order by key;
system flush logs; system flush logs;
select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String};

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
echo "INSERT TO S3" echo "INSERT TO S3"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/profile_events.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(1000000) SETTINGS s3_max_single_part_upload_size = 10, s3_truncate_on_insert = 1; INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/profile_events.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(1000000) SETTINGS s3_max_single_part_upload_size = 10, s3_truncate_on_insert = 1;
" 2>&1 | $CLICKHOUSE_LOCAL -q " " 2>&1 | $CLICKHOUSE_LOCAL -q "
WITH '(\\w+): (\\d+)' AS pattern, WITH '(\\w+): (\\d+)' AS pattern,
@ -30,7 +30,7 @@ SELECT * FROM (
" "
echo "CHECK WITH query_log" echo "CHECK WITH query_log"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT type, SELECT type,
'S3CreateMultipartUpload', ProfileEvents['S3CreateMultipartUpload'], 'S3CreateMultipartUpload', ProfileEvents['S3CreateMultipartUpload'],
@ -45,7 +45,7 @@ ORDER BY query_start_time DESC;
" "
echo "CREATE" echo "CREATE"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS times; DROP TABLE IF EXISTS times;
CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t
SETTINGS SETTINGS
@ -56,29 +56,29 @@ CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t
" "
echo "INSERT" echo "INSERT"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0;
" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* '
echo "READ" echo "READ"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* '
echo "INSERT and READ INSERT" echo "INSERT and READ INSERT"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q "
INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0;
SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0;
" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* '
echo "DROP" echo "DROP"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
DROP TABLE times; DROP TABLE times;
" "
echo "CHECK with query_log" echo "CHECK with query_log"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT type, SELECT type,
query, query,

View File

@ -21,7 +21,7 @@ wait_for_number_of_parts() {
echo "$res" echo "$res"
} }
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE IF EXISTS test_without_merge; DROP TABLE IF EXISTS test_without_merge;
DROP TABLE IF EXISTS test_replicated; DROP TABLE IF EXISTS test_replicated;
@ -34,7 +34,7 @@ INSERT INTO test_without_merge SELECT 3;"
wait_for_number_of_parts 'test_without_merge' 1 10 wait_for_number_of_parts 'test_without_merge' 1 10
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE test_without_merge; DROP TABLE test_without_merge;
SELECT 'With merge replicated any part range'; SELECT 'With merge replicated any part range';
@ -47,7 +47,7 @@ INSERT INTO test_replicated SELECT 3;"
wait_for_number_of_parts 'test_replicated' 1 100 wait_for_number_of_parts 'test_replicated' 1 100
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
DROP TABLE test_replicated; DROP TABLE test_replicated;
SELECT 'With merge replicated partition only'; SELECT 'With merge replicated partition only';
@ -60,7 +60,7 @@ INSERT INTO test_replicated SELECT 3;"
wait_for_number_of_parts 'test_replicated' 1 100 wait_for_number_of_parts 'test_replicated' 1 100
$CLICKHOUSE_CLIENT -nmq " $CLICKHOUSE_CLIENT -mq "
SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one
SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_replicated' AND active; SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_replicated' AND active;

View File

@ -9,6 +9,6 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
echo " echo "
DROP USER IF EXISTS postgresql_user; DROP USER IF EXISTS postgresql_user;
CREATE USER postgresql_user HOST IP '127.0.0.1' IDENTIFIED WITH no_password; CREATE USER postgresql_user HOST IP '127.0.0.1' IDENTIFIED WITH no_password;
" | $CLICKHOUSE_CLIENT -n " | $CLICKHOUSE_CLIENT
psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} ${CLICKHOUSE_DATABASE} --user postgresql_user -c "SELECT 1.23::Decimal256(70) AS test;" psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} ${CLICKHOUSE_DATABASE} --user postgresql_user -c "SELECT 1.23::Decimal256(70) AS test;"

View File

@ -12,7 +12,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# too slow with this. # too slow with this.
# #
# Unfortunately, the test has to buffer it in memory. # Unfortunately, the test has to buffer it in memory.
$CLICKHOUSE_CLIENT --max_memory_usage 16G -nm -q " $CLICKHOUSE_CLIENT --max_memory_usage 16G -m -q "
INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV') INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV')
SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024) SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024)
SETTINGS s3_max_single_part_upload_size = '5Gi'; SETTINGS s3_max_single_part_upload_size = '5Gi';

View File

@ -10,7 +10,7 @@ set -e
NUM_REPLICAS=5 NUM_REPLICAS=5
for i in $(seq 1 $NUM_REPLICAS); do for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q " $CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS r$i SYNC; DROP TABLE IF EXISTS r$i SYNC;
CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x SETTINGS replicated_deduplication_window = 1, allow_remote_fs_zero_copy_replication = 1; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x SETTINGS replicated_deduplication_window = 1, allow_remote_fs_zero_copy_replication = 1;
" "

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
for DISK in s3_disk s3_cache for DISK in s3_disk s3_cache
do do
${CLICKHOUSE_CLIENT} -n --query " ${CLICKHOUSE_CLIENT} --query "
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (id Int32, empty Array(Int32)) CREATE TABLE test (id Int32, empty Array(Int32))
ENGINE=MergeTree ORDER BY id ENGINE=MergeTree ORDER BY id
@ -17,13 +17,13 @@ do
SELECT * FROM test; SELECT * FROM test;
" "
${CLICKHOUSE_CLIENT} -n --query " ${CLICKHOUSE_CLIENT} --query "
BACKUP TABLE test TO Disk('backups', 'test_s3_backup'); BACKUP TABLE test TO Disk('backups', 'test_s3_backup');
DROP TABLE test; DROP TABLE test;
RESTORE TABLE test FROM Disk('backups', 'test_s3_backup'); RESTORE TABLE test FROM Disk('backups', 'test_s3_backup');
" &>/dev/null " &>/dev/null
${CLICKHOUSE_CLIENT} -n --query " ${CLICKHOUSE_CLIENT} --query "
SELECT * FROM test; SELECT * FROM test;
SELECT empty FROM test; SELECT empty FROM test;
" "

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n -q " $CLICKHOUSE_CLIENT -q "
CREATE TEMPORARY TABLE IF NOT EXISTS aboba CREATE TEMPORARY TABLE IF NOT EXISTS aboba
( (
user_id UInt32, user_id UInt32,

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE IF EXISTS test_s3; DROP TABLE IF EXISTS test_s3;
CREATE TABLE test_s3 (a UInt64, b UInt64) CREATE TABLE test_s3 (a UInt64, b UInt64)
@ -17,7 +17,7 @@ INSERT INTO test_s3 SELECT number, number FROM numbers(1000000);
query="SELECT sum(b) FROM test_s3 WHERE a >= 100000 AND a <= 102000" query="SELECT sum(b) FROM test_s3 WHERE a >= 100000 AND a <= 102000"
query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1)
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
SELECT SELECT
ProfileEvents['S3ReadRequestsCount'], ProfileEvents['S3ReadRequestsCount'],
ProfileEvents['ReadBufferFromS3Bytes'], ProfileEvents['ReadBufferFromS3Bytes'],

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, b String) CREATE TABLE test (a Int32, b String)
ENGINE = MergeTree() ENGINE = MergeTree()
@ -22,7 +22,7 @@ INSERT INTO test SELECT number, randomString(100) FROM numbers(1000000);
" "
QUERY_ID=$RANDOM QUERY_ID=$RANDOM
$CLICKHOUSE_CLIENT --query_id "$QUERY_ID" -nm -q " $CLICKHOUSE_CLIENT --query_id "$QUERY_ID" -m -q "
SET enable_filesystem_cache_log = 1; SET enable_filesystem_cache_log = 1;
SYSTEM DROP FILESYSTEM CACHE; SYSTEM DROP FILESYSTEM CACHE;
SELECT * FROM test WHERE NOT ignore() LIMIT 1 FORMAT Null; SELECT * FROM test WHERE NOT ignore() LIMIT 1 FORMAT Null;
@ -49,14 +49,14 @@ WHERE query_id = '$QUERY_ID' "
# File segments cannot be less that 20Mi, # File segments cannot be less that 20Mi,
# except for last file segment in a file or if file size is less. # except for last file segment in a file or if file size is less.
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query) SELECT count() FROM ($query)
WHERE file_segment_size < file_size WHERE file_segment_size < file_size
AND end_offset + 1 != file_size AND end_offset + 1 != file_size
AND file_segment_size < 20 * 1024 * 1024; AND file_segment_size < 20 * 1024 * 1024;
" "
all=$($CLICKHOUSE_CLIENT -nm -q " all=$($CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query) SELECT count() FROM ($query)
WHERE file_segment_size < file_size AND end_offset + 1 != file_size; WHERE file_segment_size < file_size AND end_offset + 1 != file_size;
") ")
@ -68,7 +68,7 @@ else
echo "FAIL" echo "FAIL"
fi fi
count=$($CLICKHOUSE_CLIENT -nm -q " count=$($CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query) SELECT count() FROM ($query)
WHERE file_segment_size < file_size WHERE file_segment_size < file_size
AND end_offset + 1 != file_size AND end_offset + 1 != file_size
@ -87,21 +87,21 @@ FROM (SELECT * FROM ($query)) AS cache_log
INNER JOIN system.filesystem_cache AS cache INNER JOIN system.filesystem_cache AS cache
ON cache_log.cache_path = cache.cache_path " ON cache_log.cache_path = cache.cache_path "
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query2) SELECT count() FROM ($query2)
WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size
AND file_segment_range_end + 1 != file_size AND file_segment_range_end + 1 != file_size
AND downloaded_size < 20 * 1024 * 1024; AND downloaded_size < 20 * 1024 * 1024;
" "
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query2) SELECT count() FROM ($query2)
WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size
AND file_segment_range_end + 1 != file_size AND file_segment_range_end + 1 != file_size
AND formatReadableSize(downloaded_size) not in ('20.00 MiB', '40.00 MiB'); AND formatReadableSize(downloaded_size) not in ('20.00 MiB', '40.00 MiB');
" "
all=$($CLICKHOUSE_CLIENT -nm -q " all=$($CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query2) SELECT count() FROM ($query2)
WHERE file_segment_size < file_size AND file_segment_range_end + 1 != file_size; WHERE file_segment_size < file_size AND file_segment_range_end + 1 != file_size;
") ")
@ -112,7 +112,7 @@ else
echo "FAIL" echo "FAIL"
fi fi
count2=$($CLICKHOUSE_CLIENT -nm -q " count2=$($CLICKHOUSE_CLIENT -m -q "
SELECT count() FROM ($query2) SELECT count() FROM ($query2)
WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size
AND file_segment_range_end + 1 != file_size AND file_segment_range_end + 1 != file_size

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE IF EXISTS test_s3; DROP TABLE IF EXISTS test_s3;
CREATE TABLE test_s3 (a UInt64, b UInt64) CREATE TABLE test_s3 (a UInt64, b UInt64)
@ -25,7 +25,7 @@ do
query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1)
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
RES=$(${CLICKHOUSE_CLIENT} -nm --query " RES=$(${CLICKHOUSE_CLIENT} -m --query "
SELECT ProfileEvents['DiskConnectionsPreserved'] > 0 SELECT ProfileEvents['DiskConnectionsPreserved'] > 0
FROM system.query_log FROM system.query_log
WHERE type = 'QueryFinish' WHERE type = 'QueryFinish'
@ -41,7 +41,7 @@ done
while true while true
do do
query_id=$(${CLICKHOUSE_CLIENT} -nq " query_id=$(${CLICKHOUSE_CLIENT} -q "
create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n; create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n;
set insert_keeper_fault_injection_probability=0; set insert_keeper_fault_injection_probability=0;
insert into mut values (1, 2, 3), (10, 20, 30); insert into mut values (1, 2, 3), (10, 20, 30);
@ -60,7 +60,7 @@ do
) limit 1 settings max_threads=1; ) limit 1 settings max_threads=1;
" 2>&1) " 2>&1)
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
RES=$(${CLICKHOUSE_CLIENT} -nm --query " RES=$(${CLICKHOUSE_CLIENT} -m --query "
SELECT ProfileEvents['StorageConnectionsPreserved'] > 0 SELECT ProfileEvents['StorageConnectionsPreserved'] > 0
FROM system.query_log FROM system.query_log
WHERE type = 'QueryFinish' WHERE type = 'QueryFinish'

View File

@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e set -e
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data; drop table if exists data;
create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk'; create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk';
insert into data select * from numbers(10); insert into data select * from numbers(10);
@ -16,28 +16,28 @@ $CLICKHOUSE_CLIENT -nm -q "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true" $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
" "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false" $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
" "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true" $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
" "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_no_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false" $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_no_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
" "

View File

@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e set -e
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data; drop table if exists data;
create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk'; create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk';
insert into data select * from numbers(10); insert into data select * from numbers(10);

View File

@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, b String) CREATE TABLE test (a Int32, b String)
ENGINE = MergeTree() ORDER BY tuple() ENGINE = MergeTree() ORDER BY tuple()
@ -17,17 +17,17 @@ SETTINGS disk = disk(name = 's3_disk', type = cache, max_size = '100Ki', path =
disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}"
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.disks WHERE name = '$disk_name' SELECT count() FROM system.disks WHERE name = '$disk_name'
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, b String) CREATE TABLE test (a Int32, b String)
ENGINE = MergeTree() ORDER BY tuple() ENGINE = MergeTree() ORDER BY tuple()
SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, disk = s3_disk); SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, disk = s3_disk);
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.disks WHERE name = '$disk_name' SELECT count() FROM system.disks WHERE name = '$disk_name'
""" """

View File

@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}"
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, b String) CREATE TABLE test (a Int32, b String)
ENGINE = MergeTree() ORDER BY tuple() ENGINE = MergeTree() ORDER BY tuple()
@ -22,29 +22,29 @@ query_id=$RANDOM
$CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1" $CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1"
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY kek; SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY kek;
""" 2>&1 | grep -q "Invalid cache key hex: kek" && echo "OK" || echo "FAIL" """ 2>&1 | grep -q "Invalid cache key hex: kek" && echo "OK" || echo "FAIL"
${CLICKHOUSE_CLIENT} -q " system flush logs" ${CLICKHOUSE_CLIENT} -q " system flush logs"
key=$($CLICKHOUSE_CLIENT -nm --query """ key=$($CLICKHOUSE_CLIENT -m --query """
SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1;
""") """)
offset=$($CLICKHOUSE_CLIENT -nm --query """ offset=$($CLICKHOUSE_CLIENT -m --query """
SELECT offset FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; SELECT offset FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1;
""") """)
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset;
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key OFFSET $offset; SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key OFFSET $offset;
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset;
""" """
@ -54,18 +54,18 @@ $CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Nul
${CLICKHOUSE_CLIENT} -q " system flush logs" ${CLICKHOUSE_CLIENT} -q " system flush logs"
key=$($CLICKHOUSE_CLIENT -nm --query """ key=$($CLICKHOUSE_CLIENT -m --query """
SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1;
""") """)
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.filesystem_cache WHERE key = '$key'; SELECT count() FROM system.filesystem_cache WHERE key = '$key';
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key
""" """
$CLICKHOUSE_CLIENT -nm --query """ $CLICKHOUSE_CLIENT -m --query """
SELECT count() FROM system.filesystem_cache WHERE key = '$key'; SELECT count() FROM system.filesystem_cache WHERE key = '$key';
""" """

View File

@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
DROP TABLE IF EXISTS data; DROP TABLE IF EXISTS data;
DROP TABLE IF EXISTS data_1; DROP TABLE IF EXISTS data_1;
DROP TABLE IF EXISTS data_2; DROP TABLE IF EXISTS data_2;

View File

@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data; drop table if exists data;
create table data (key Int) engine=MergeTree() order by tuple(); create table data (key Int) engine=MergeTree() order by tuple();
insert into data select * from numbers(10); insert into data select * from numbers(10);

View File

@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data; drop table if exists data;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, disk='s3_disk'; create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, disk='s3_disk';
-- reading 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds -- reading 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds
@ -15,7 +15,7 @@ $CLICKHOUSE_CLIENT -nm -q "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup2') SETTINGS allow_s3_native_copy=1" --max_backup_bandwidth=1M > /dev/null $CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup2') SETTINGS allow_s3_native_copy=1" --max_backup_bandwidth=1M > /dev/null
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT SELECT
'native_copy', 'native_copy',
@ -26,7 +26,7 @@ $CLICKHOUSE_CLIENT -nm -q "
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup3') SETTINGS allow_s3_native_copy=0" --max_backup_bandwidth=1M > /dev/null $CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup3') SETTINGS allow_s3_native_copy=0" --max_backup_bandwidth=1M > /dev/null
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT SELECT
'no_native_copy', 'no_native_copy',

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -mn -q """ $CLICKHOUSE_CLIENT -m -q """
DROP TABLE IF EXISTS t1_02867; DROP TABLE IF EXISTS t1_02867;
CREATE TABLE t1_02867 (x UInt64) ENGINE=Set(); CREATE TABLE t1_02867 (x UInt64) ENGINE=Set();
""" """
@ -39,4 +39,4 @@ repeat_truncate_insert &
sleep 10 sleep 10
$CLICKHOUSE_CLIENT -mn -q "DROP TABLE IF EXISTS t1_02867;" $CLICKHOUSE_CLIENT -m -q "DROP TABLE IF EXISTS t1_02867;"

View File

@ -10,14 +10,14 @@ echo '{"a" : 1, "obj" : {"f1" : 1, "f2" : "2020-01-01"}}' > $CLICKHOUSE_TEST_UNI
echo '{"b" : 2, "obj" : {"f3" : 2, "f2" : "Some string"}}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data2.jsonl echo '{"b" : 2, "obj" : {"f3" : 2, "f2" : "Some string"}}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data2.jsonl
echo '{"c" : "hello"}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl echo '{"c" : "hello"}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl
$CLICKHOUSE_LOCAL -nm -q " $CLICKHOUSE_LOCAL -m -q "
set schema_inference_mode = 'union'; set schema_inference_mode = 'union';
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl');
select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow; select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow;
select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file; select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file;
" "
$CLICKHOUSE_LOCAL -nm -q " $CLICKHOUSE_LOCAL -m -q "
set schema_inference_mode = 'union'; set schema_inference_mode = 'union';
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl');
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl');
@ -25,14 +25,14 @@ desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl');
cd $CLICKHOUSE_TEST_UNIQUE_NAME/ && tar -cf archive.tar data1.jsonl data2.jsonl data3.jsonl && cd .. cd $CLICKHOUSE_TEST_UNIQUE_NAME/ && tar -cf archive.tar data1.jsonl data2.jsonl data3.jsonl && cd ..
$CLICKHOUSE_LOCAL -nm -q " $CLICKHOUSE_LOCAL -m -q "
set schema_inference_mode = 'union'; set schema_inference_mode = 'union';
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl');
select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow; select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow;
select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file; select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file;
" "
$CLICKHOUSE_LOCAL -nm -q " $CLICKHOUSE_LOCAL -m -q "
set schema_inference_mode = 'union'; set schema_inference_mode = 'union';
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data3.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data3.jsonl');
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl');
@ -41,7 +41,7 @@ desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl');
echo 'Error' > $CLICKHOUSE_TEST_UNIQUE_NAME/data4.jsonl echo 'Error' > $CLICKHOUSE_TEST_UNIQUE_NAME/data4.jsonl
$CLICKHOUSE_LOCAL -q "desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl') settings schema_inference_mode='union'" 2>&1 | grep -c -F "CANNOT_EXTRACT_TABLE_STRUCTURE" $CLICKHOUSE_LOCAL -q "desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl') settings schema_inference_mode='union'" 2>&1 | grep -c -F "CANNOT_EXTRACT_TABLE_STRUCTURE"
$CLICKHOUSE_LOCAL -nm -q " $CLICKHOUSE_LOCAL -m -q "
set schema_inference_mode = 'union'; set schema_inference_mode = 'union';
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{2,3}.jsonl');
desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl');

View File

@ -67,7 +67,7 @@ curl "$CLICKHOUSE_URL" --silent --fail --show-error --data "SELECT sum(is_leader
wait; wait;
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
-- Check that number of ZK request is less then a half of (total replicas * concurrency) -- Check that number of ZK request is less then a half of (total replicas * concurrency)

View File

@ -8,7 +8,7 @@ CURDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists r1; drop table if exists r1;
drop table if exists r2; drop table if exists r2;
@ -64,7 +64,7 @@ function insert_duplicates() {
wait wait
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
system sync replica r1; system sync replica r1;
system sync replica r2; system sync replica r2;
" "
@ -84,7 +84,7 @@ function loop()
do do
while ! insert_duplicates while ! insert_duplicates
do do
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
truncate table r1; truncate table r1;
truncate table r2; truncate table r2;
system sync replica r1; system sync replica r1;
@ -137,8 +137,8 @@ function list_keeper_nodes() {
list_keeper_nodes "${table_shared_id}" list_keeper_nodes "${table_shared_id}"
$CLICKHOUSE_CLIENT -nm -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" & $CLICKHOUSE_CLIENT -m -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" &
$CLICKHOUSE_CLIENT -nm -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" & $CLICKHOUSE_CLIENT -m -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" &
wait wait
list_keeper_nodes "${table_shared_id}" list_keeper_nodes "${table_shared_id}"

View File

@ -10,11 +10,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`"
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`"
$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" $CLICKHOUSE_CLIENT -q "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view"
# Basic refreshing. # Basic refreshing.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view a create materialized view a
refresh after 2 second refresh after 2 second
engine Memory engine Memory
@ -23,41 +23,41 @@ $CLICKHOUSE_CLIENT -nq "
select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes; select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes;
show create a;" show create a;"
# Wait for any refresh. (xargs trims the string and turns \t and \n into spaces) # Wait for any refresh. (xargs trims the string and turns \t and \n into spaces)
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ]
do do
sleep 0.5 sleep 0.5
done done
start_time="`$CLICKHOUSE_CLIENT -nq "select reinterpret(now64(), 'Int64')"`" start_time="`$CLICKHOUSE_CLIENT -q "select reinterpret(now64(), 'Int64')"`"
# Check table contents. # Check table contents.
$CLICKHOUSE_CLIENT -nq "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" $CLICKHOUSE_CLIENT -q "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a"
# Wait for table contents to change. # Wait for table contents to change.
res1="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values'`" res1="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values'`"
while : while :
do do
res2="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" res2="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values -- $LINENO'`"
[ "$res2" == "$res1" ] || break [ "$res2" == "$res1" ] || break
sleep 0.5 sleep 0.5
done done
# Wait for another change. # Wait for another change.
while : while :
do do
res3="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" res3="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values -- $LINENO'`"
[ "$res3" == "$res2" ] || break [ "$res3" == "$res2" ] || break
sleep 0.5 sleep 0.5
done done
# Check that the two changes were at least 1 second apart, in particular that we're not refreshing # Check that the two changes were at least 1 second apart, in particular that we're not refreshing
# like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer # like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer
# to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above. # to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000); select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000);
select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;" select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;"
# Create a source table from which views will read. # Create a source table from which views will read.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create table src (x Int8) engine Memory as select 1;" create table src (x Int8) engine Memory as select 1;"
# Switch to fake clock, change refresh schedule, change query. # Switch to fake clock, change refresh schedule, change query.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
system test view a set fake time '2050-01-01 00:00:01'; system test view a set fake time '2050-01-01 00:00:01';
system wait view a; system wait view a;
system refresh view a; system refresh view a;
@ -68,19 +68,19 @@ $CLICKHOUSE_CLIENT -nq "
select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes; select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes;
show create a;" show create a;"
# Advance time to trigger the refresh. # Advance time to trigger the refresh.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<5: no refresh>', count() from a; select '<5: no refresh>', count() from a;
system test view a set fake time '2052-02-03 04:05:06';" system test view a set fake time '2052-02-03 04:05:06';"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<6: refreshed>', * from a; select '<6: refreshed>', * from a;
select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;" select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;"
# Create a dependent view, refresh it once. # Create a dependent view, refresh it once.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a; create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a;
show create b; show create b;
system test view b set fake time '2052-11-11 11:11:11'; system test view b set fake time '2052-11-11 11:11:11';
@ -88,89 +88,89 @@ $CLICKHOUSE_CLIENT -nq "
system wait view b; system wait view b;
select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';" select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';"
# Next refresh shouldn't start until the dependency refreshes. # Next refresh shouldn't start until the dependency refreshes.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<8: refreshed>', * from b; select '<8: refreshed>', * from b;
select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes; select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes;
system test view b set fake time '2054-01-24 23:22:21';" system test view b set fake time '2054-01-24 23:22:21';"
while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] while [ "`$CLICKHOUSE_CLIENT -q "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ]
do do
sleep 0.5 sleep 0.5
done done
# Drop the source table, check that refresh fails and doesn't leave a temp table behind. # Drop the source table, check that refresh fails and doesn't leave a temp table behind.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase(); select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();
drop table src; drop table src;
system refresh view a;" system refresh view a;"
$CLICKHOUSE_CLIENT -nq "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" $CLICKHOUSE_CLIENT -q "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();" select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();"
# Create the source table again, check that refresh succeeds (in particular that tables are looked # Create the source table again, check that refresh succeeds (in particular that tables are looked
# up by name rather than uuid). # up by name rather than uuid).
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes; select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes;
create table src (x Int16) engine Memory as select 2; create table src (x Int16) engine Memory as select 2;
system test view a set fake time '2054-01-01 00:00:01';" system test view a set fake time '2054-01-01 00:00:01';"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ]
do do
sleep 0.5 sleep 0.5
done done
# Both tables should've refreshed. # Both tables should've refreshed.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<11: chain-refreshed a>', * from a; select '<11: chain-refreshed a>', * from a;
select '<12: chain-refreshed b>', * from b; select '<12: chain-refreshed b>', * from b;
select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;" select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;"
# Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to # Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to
# catch up to the same cycle. # catch up to the same cycle.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
system test view b set fake time '2059-01-01 00:00:00'; system test view b set fake time '2059-01-01 00:00:00';
system refresh view b;" system refresh view b;"
while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
system test view b set fake time '2061-01-01 00:00:00'; system test view b set fake time '2061-01-01 00:00:00';
system test view a set fake time '2057-01-01 00:00:00';" system test view a set fake time '2057-01-01 00:00:00';"
while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] while [ "`$CLICKHOUSE_CLIENT -q "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes; select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes;
truncate src; truncate src;
insert into src values (3); insert into src values (3);
system test view a set fake time '2060-02-02 02:02:02';" system test view a set fake time '2060-02-02 02:02:02';"
while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<15: chain-refreshed a>', * from a; select '<15: chain-refreshed a>', * from a;
select '<16: chain-refreshed b>', * from b; select '<16: chain-refreshed b>', * from b;
select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;" select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;"
# Get to WaitingForDependencies state and remove the depencency. # Get to WaitingForDependencies state and remove the depencency.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
system test view b set fake time '2062-03-03 03:03:03'" system test view b set fake time '2062-03-03 03:03:03'"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
alter table b modify refresh every 2 year" alter table b modify refresh every 2 year"
while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] while [ "`$CLICKHOUSE_CLIENT -q "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b'; select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b';
show create b;" show create b;"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
drop table src; drop table src;
drop table a; drop table a;
drop table b; drop table b;

View File

@ -12,29 +12,29 @@ CLICKHOUSE_LOG_COMMENT=
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`"
CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --allow_materialized_view_with_bad_select=0 --session_timezone Etc/UTC"`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --allow_materialized_view_with_bad_select=0 --session_timezone Etc/UTC"`"
$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" $CLICKHOUSE_CLIENT -q "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view"
# Select from a table that doesn't exist, get an exception. # Select from a table that doesn't exist, get an exception.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create table src (x Int8) engine Memory as select 1; create table src (x Int8) engine Memory as select 1;
create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src; create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src;
drop table src;" drop table src;"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ]
do do
sleep 0.5 sleep 0.5
done done
# Check exception, create src, expect successful refresh. # Check exception, create src, expect successful refresh.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' ? '1' : exception from refreshes where view = 'c'; select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' ? '1' : exception from refreshes where view = 'c';
create table src (x Int64) engine Memory as select 1; create table src (x Int64) engine Memory as select 1;
system refresh view c;" system refresh view c;"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
do do
sleep 0.5 sleep 0.5
done done
# Rename table. # Rename table.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<20: unexception>', * from c; select '<20: unexception>', * from c;
rename table c to d; rename table c to d;
select '<21: rename>', * from d; select '<21: rename>', * from d;
@ -42,130 +42,130 @@ $CLICKHOUSE_CLIENT -nq "
# Do various things during a refresh. # Do various things during a refresh.
# First make a nonempty view. # First make a nonempty view.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
drop table d; drop table d;
truncate src; truncate src;
insert into src values (1); insert into src values (1);
create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;" create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
do do
sleep 0.5 sleep 0.5
done done
# Stop refreshes. # Stop refreshes.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<23: simple refresh>', * from e; select '<23: simple refresh>', * from e;
system stop view e;" system stop view e;"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ]
do do
sleep 0.5 sleep 0.5
done done
# Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure # Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure
# we wait for a slow refresh, not a previous fast one.) # we wait for a slow refresh, not a previous fast one.)
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
insert into src select * from numbers(1000) settings max_block_size=1; insert into src select * from numbers(1000) settings max_block_size=1;
system start view e;" system start view e;"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes -- $LINENO" | xargs`" != 'Running' ]
do do
sleep 0.5 sleep 0.5
done done
# Rename. # Rename.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
rename table e to f; rename table e to f;
select '<24: rename during refresh>', * from f; select '<24: rename during refresh>', * from f;
select '<25: rename during refresh>', view, status from refreshes where view = 'f'; select '<25: rename during refresh>', view, status from refreshes where view = 'f';
alter table f modify refresh after 10 year;" alter table f modify refresh after 10 year;"
# Cancel. # Cancel.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
system cancel view f;" system cancel view f;"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ]
do do
sleep 0.5 sleep 0.5
done done
# Check that another refresh doesn't immediately start after the cancelled one. # Check that another refresh doesn't immediately start after the cancelled one.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<27: cancelled>', view, status, last_refresh_result from refreshes where view = 'f'; select '<27: cancelled>', view, status, last_refresh_result from refreshes where view = 'f';
system refresh view f;" system refresh view f;"
while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ] while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ]
do do
sleep 0.5 sleep 0.5
done done
# Drop. # Drop.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
drop table f; drop table f;
select '<28: drop during refresh>', view, status from refreshes; select '<28: drop during refresh>', view, status from refreshes;
select '<28: drop during refresh>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase()" select '<28: drop during refresh>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase()"
# Try OFFSET and RANDOMIZE FOR. # Try OFFSET and RANDOMIZE FOR.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42 as x; create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42 as x;
show create g; show create g;
system test view g set fake time '2050-02-03 15:30:13';" system test view g set fake time '2050-02-03 15:30:13';"
while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
with '2050-02-10 04:00:00'::DateTime as expected with '2050-02-10 04:00:00'::DateTime as expected
select '<29: randomize>', abs(next_refresh_time::Int64 - expected::Int64) <= 3600*(24*4+1), next_refresh_time != expected from refreshes;" select '<29: randomize>', abs(next_refresh_time::Int64 - expected::Int64) <= 3600*(24*4+1), next_refresh_time != expected from refreshes;"
# Send data 'TO' an existing table. # Send data 'TO' an existing table.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
drop table g; drop table g;
create table dest (x Int64) engine MergeTree order by x; create table dest (x Int64) engine MergeTree order by x;
truncate src; truncate src;
insert into src values (1); insert into src values (1);
create materialized view h refresh every 1 second to dest empty as select x*10 as x from src; create materialized view h refresh every 1 second to dest empty as select x*10 as x from src;
show create h;" show create h;"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<30: to existing table>', * from dest; select '<30: to existing table>', * from dest;
insert into src values (2);" insert into src values (2);"
while [ "`$CLICKHOUSE_CLIENT -nq "select count() from dest -- $LINENO" | xargs`" != '2' ] while [ "`$CLICKHOUSE_CLIENT -q "select count() from dest -- $LINENO" | xargs`" != '2' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<31: to existing table>', * from dest; select '<31: to existing table>', * from dest;
drop table dest; drop table dest;
drop table h;" drop table h;"
# Retries. # Retries.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view h2 refresh after 1 year settings refresh_retries = 10 (x Int64) engine Memory as select x*10 + throwIf(x % 2 == 0) as x from src;" create materialized view h2 refresh after 1 year settings refresh_retries = 10 (x Int64) engine Memory as select x*10 + throwIf(x % 2 == 0) as x from src;"
$CLICKHOUSE_CLIENT -nq "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" $CLICKHOUSE_CLIENT -q "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes; select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes;
create table src2 (x Int8) engine Memory; create table src2 (x Int8) engine Memory;
insert into src2 values (1); insert into src2 values (1);
exchange tables src and src2; exchange tables src and src2;
drop table src2;" drop table src2;"
while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ] while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<31.6: did retry>', x from h2; select '<31.6: did retry>', x from h2;
drop table h2" drop table h2"
# EMPTY # EMPTY
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2); create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2);
create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2);" create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2);"
while [ "`$CLICKHOUSE_CLIENT -nq "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] while [ "`$CLICKHOUSE_CLIENT -q "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ]
do do
sleep 0.5 sleep 0.5
done done
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
select '<32: empty>', view, status, last_refresh_result, retry from refreshes order by view; select '<32: empty>', view, status, last_refresh_result, retry from refreshes order by view;
drop table i; drop table i;
drop table j;" drop table j;"
# APPEND # APPEND
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view k refresh every 10 year append (x Int64) engine Memory empty as select x*10 as x from src; create materialized view k refresh every 10 year append (x Int64) engine Memory empty as select x*10 as x from src;
select '<33: append>', * from k; select '<33: append>', * from k;
system refresh view k; system refresh view k;
@ -177,7 +177,7 @@ $CLICKHOUSE_CLIENT -nq "
system wait view k; system wait view k;
select '<35: append>', * from k order by x;" select '<35: append>', * from k order by x;"
# ALTER to non-APPEND # ALTER to non-APPEND
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
alter table k modify refresh every 10 year; alter table k modify refresh every 10 year;
system wait view k; system wait view k;
system refresh view k; system refresh view k;
@ -187,7 +187,7 @@ $CLICKHOUSE_CLIENT -nq "
truncate table src;" truncate table src;"
# APPEND + TO + regular materialized view reading from it. # APPEND + TO + regular materialized view reading from it.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create table mid (x Int64) engine MergeTree order by x; create table mid (x Int64) engine MergeTree order by x;
create materialized view l refresh every 10 year append to mid empty as select x*10 as x from src; create materialized view l refresh every 10 year append to mid empty as select x*10 as x from src;
create materialized view m (x Int64) engine Memory as select x*10 as x from mid; create materialized view m (x Int64) engine Memory as select x*10 as x from mid;
@ -204,19 +204,19 @@ $CLICKHOUSE_CLIENT -nq "
drop table mid;" drop table mid;"
# Failing to create inner table. # Failing to create inner table.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view n refresh every 1 second (x Int64) engine MergeTree as select 1 as x from numbers(2);" 2>/dev/null || echo "creating MergeTree without ORDER BY failed, as expected" create materialized view n refresh every 1 second (x Int64) engine MergeTree as select 1 as x from numbers(2);" 2>/dev/null || echo "creating MergeTree without ORDER BY failed, as expected"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view n refresh every 1 second (x Int64) engine MergeTree order by x as select 1 as x from numbers(2); create materialized view n refresh every 1 second (x Int64) engine MergeTree order by x as select 1 as x from numbers(2);
drop table n;" drop table n;"
# Reading from table that doesn't exist yet. # Reading from table that doesn't exist yet.
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE }
create materialized view o (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } create materialized view o (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE }
create materialized view o (x Int64) engine Memory as select x from nope.nonexist; -- { serverError UNKNOWN_DATABASE } create materialized view o (x Int64) engine Memory as select x from nope.nonexist; -- { serverError UNKNOWN_DATABASE }
create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nope.nonexist settings allow_materialized_view_with_bad_select = 1; create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nope.nonexist settings allow_materialized_view_with_bad_select = 1;
drop table o;" drop table o;"
$CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -q "
drop table refreshes;" drop table refreshes;"

View File

@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic
function test1_insert() function test1_insert()
{ {
echo "test1 insert" echo "test1 insert"
$CH_CLIENT -nmq "insert into test select number, NULL from numbers(3); $CH_CLIENT -mq "insert into test select number, NULL from numbers(3);
insert into test select number + 3, number from numbers(3); insert into test select number + 3, number from numbers(3);
insert into test select number + 6, ('str_' || toString(number))::Variant(String) from numbers(3); insert into test select number + 6, ('str_' || toString(number))::Variant(String) from numbers(3);
insert into test select number + 9, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(3); insert into test select number + 9, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(3);
@ -21,7 +21,7 @@ insert into test select number + 15, range(number + 1)::Array(UInt64) from numbe
function test1_select() function test1_select()
{ {
echo "test1 select" echo "test1 select"
$CH_CLIENT -nmq "select v from test order by id; $CH_CLIENT -mq "select v from test order by id;
select v.String from test order by id; select v.String from test order by id;
select v.UInt64 from test order by id; select v.UInt64 from test order by id;
select v.\`LowCardinality(String)\` from test order by id; select v.\`LowCardinality(String)\` from test order by id;
@ -36,7 +36,7 @@ select v.\`Array(UInt64)\`.size0 from test order by id;"
function test2_insert() function test2_insert()
{ {
echo "test2 insert" echo "test2 insert"
$CH_CLIENT -nmq "insert into test select number, NULL from numbers(3); $CH_CLIENT -mq "insert into test select number, NULL from numbers(3);
insert into test select number + 3, number % 2 ? NULL : number from numbers(3); insert into test select number + 3, number % 2 ? NULL : number from numbers(3);
insert into test select number + 6, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(3); insert into test select number + 6, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(3);
insert into test select number + 9, number % 2 ? CAST(NULL, 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') : CAST(('lc_str_' || toString(number))::LowCardinality(String), 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') from numbers(3); insert into test select number + 9, number % 2 ? CAST(NULL, 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') : CAST(('lc_str_' || toString(number))::LowCardinality(String), 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') from numbers(3);
@ -47,7 +47,7 @@ insert into test select number + 15, number % 2 ? CAST(NULL, 'Variant(String, UI
function test2_select() function test2_select()
{ {
echo "test2 select" echo "test2 select"
$CH_CLIENT -nmq "select v from test order by id; $CH_CLIENT -mq "select v from test order by id;
select v.String from test order by id; select v.String from test order by id;
select v.UInt64 from test order by id; select v.UInt64 from test order by id;
select v.\`LowCardinality(String)\` from test order by id; select v.\`LowCardinality(String)\` from test order by id;
@ -68,7 +68,7 @@ function test3_insert()
function test3_select() function test3_select()
{ {
echo "test3 select" echo "test3 select"
$CH_CLIENT -nmq "select v from test order by id; $CH_CLIENT -mq "select v from test order by id;
select v.String from test order by id; select v.String from test order by id;
select v.UInt64 from test order by id; select v.UInt64 from test order by id;
select v.\`LowCardinality(String)\` from test order by id; select v.\`LowCardinality(String)\` from test order by id;

View File

@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic
function test4_insert() function test4_insert()
{ {
echo "test4 insert" echo "test4 insert"
$CH_CLIENT -nmq "insert into test select number, NULL from numbers(100000); $CH_CLIENT -mq "insert into test select number, NULL from numbers(100000);
insert into test select number + 100000, number from numbers(100000); insert into test select number + 100000, number from numbers(100000);
insert into test select number + 200000, ('str_' || toString(number))::Variant(String) from numbers(100000); insert into test select number + 200000, ('str_' || toString(number))::Variant(String) from numbers(100000);
insert into test select number + 300000, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(100000); insert into test select number + 300000, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(100000);
@ -21,7 +21,7 @@ insert into test select number + 500000, range(number % 20 + 1)::Array(UInt64) f
function test4_select function test4_select
{ {
echo "test4 select" echo "test4 select"
$CH_CLIENT -nmq "select v from test format Null; $CH_CLIENT -mq "select v from test format Null;
select count() from test where isNotNull(v); select count() from test where isNotNull(v);
select v.String from test format Null; select v.String from test format Null;
select count() from test where isNotNull(v.String); select count() from test where isNotNull(v.String);

View File

@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic
function test5_insert() function test5_insert()
{ {
echo "test5 insert" echo "test5 insert"
$CH_CLIENT -nmq " $CH_CLIENT -mq "
insert into test select number, NULL from numbers(200000); insert into test select number, NULL from numbers(200000);
insert into test select number + 200000, number % 2 ? NULL : number from numbers(200000); insert into test select number + 200000, number % 2 ? NULL : number from numbers(200000);
insert into test select number + 400000, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(200000); insert into test select number + 400000, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(200000);
@ -22,7 +22,7 @@ insert into test select number + 1000000, number % 2 ? CAST(NULL, 'Variant(Strin
function test5_select() function test5_select()
{ {
echo "test5 select" echo "test5 select"
$CH_CLIENT -nmq " $CH_CLIENT -mq "
select v from test format Null; select v from test format Null;
select count() from test where isNotNull(v); select count() from test where isNotNull(v);
select v.String from test format Null; select v.String from test format Null;

View File

@ -17,7 +17,7 @@ function test6_insert()
function test6_select() function test6_select()
{ {
echo "test6 select" echo "test6 select"
$CH_CLIENT -nmq "select v from test format Null; $CH_CLIENT -mq "select v from test format Null;
select count() from test where isNotNull(v); select count() from test where isNotNull(v);
select v.String from test format Null; select v.String from test format Null;
select count() from test where isNotNull(v.String); select count() from test where isNotNull(v.String);

View File

@ -10,7 +10,7 @@ disk_name="s3_cache_02944"
$CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE" $CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE"
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name'; CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name';
INSERT INTO test SELECT randomString(100); INSERT INTO test SELECT randomString(100);
@ -33,7 +33,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='fatal'; set send_logs_level='fatal';
SYSTEM RELOAD CONFIG" SYSTEM RELOAD CONFIG"
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
@ -47,7 +47,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='fatal'; set send_logs_level='fatal';
SYSTEM RELOAD CONFIG" SYSTEM RELOAD CONFIG"
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
@ -63,7 +63,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='fatal'; set send_logs_level='fatal';
SYSTEM RELOAD CONFIG" SYSTEM RELOAD CONFIG"
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"
@ -77,7 +77,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='fatal'; set send_logs_level='fatal';
SYSTEM RELOAD CONFIG" SYSTEM RELOAD CONFIG"
$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'"

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_LOCAL -nm -q "CREATE TABLE test_table (geom MultiPolygon) engine=MergeTree ORDER BY geom; $CLICKHOUSE_LOCAL -m -q "CREATE TABLE test_table (geom MultiPolygon) engine=MergeTree ORDER BY geom;
INSERT INTO test_table SELECT * FROM file('$CURDIR/data_parquet/02960_polygon_bound_bug.parquet', Parquet); INSERT INTO test_table SELECT * FROM file('$CURDIR/data_parquet/02960_polygon_bound_bug.parquet', Parquet);
CREATE DICTIONARY test_dict (geom MultiPolygon) PRIMARY KEY geom SOURCE (CLICKHOUSE(TABLE 'test_table')) LIFETIME(MIN 0 MAX 0) LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1)); CREATE DICTIONARY test_dict (geom MultiPolygon) PRIMARY KEY geom SOURCE (CLICKHOUSE(TABLE 'test_table')) LIFETIME(MIN 0 MAX 0) LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1));
SELECT dictHas(test_dict,(174.84729269276494,-36.99524960275426));" SELECT dictHas(test_dict,(174.84729269276494,-36.99524960275426));"

View File

@ -24,7 +24,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='error'; set send_logs_level='error';
SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must be unique across the policy' SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must be unique across the policy'
@ -40,7 +40,7 @@ cat $config_path \
> $config_path_tmp > $config_path_tmp
mv $config_path_tmp $config_path mv $config_path_tmp $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='error'; set send_logs_level='error';
SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must cover the range from 1 to N (lowest priority specified) without gaps' SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must cover the range from 1 to N (lowest priority specified) without gaps'

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
disk_name="02963_remote_read_bug" disk_name="02963_remote_read_bug"
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a Int32, s String) CREATE TABLE test (a Int32, s String)
@ -22,7 +22,7 @@ OPTIMIZE TABLE test FINAL;
query_id=$(random_str 10) query_id=$(random_str 10)
$CLICKHOUSE_CLIENT -nm --query_id "$query_id" --query " $CLICKHOUSE_CLIENT -m --query_id "$query_id" --query "
WITH RANDOM_SET AS ( WITH RANDOM_SET AS (
SELECT rand32() % 10000 FROM numbers(100) SELECT rand32() % 10000 FROM numbers(100)
) )
@ -37,7 +37,7 @@ SETTINGS
merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem = 1, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem = 1; merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem = 1, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem = 1;
" "
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
-- This threshold was determined experimentally - before the fix this ratio had values around 50K -- This threshold was determined experimentally - before the fix this ratio had values around 50K

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists num_1; drop table if exists num_1;
drop table if exists num_2; drop table if exists num_2;

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists num_1; drop table if exists num_1;
drop table if exists num_2; drop table if exists num_2;

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists num_1; drop table if exists num_1;
drop table if exists num_2; drop table if exists num_2;

View File

@ -24,12 +24,12 @@ $CLICKHOUSE_LOCAL -q "select * from generateRandom('a UInt64, b String, c Array(
$CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE', auto, 'a UInt64, b String, c Array(UInt64), d Tuple(a UInt64, b String)')" $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE', auto, 'a UInt64, b String, c Array(UInt64), d Tuple(a UInt64, b String)')"
$CLICKHOUSE_LOCAL -nmq " $CLICKHOUSE_LOCAL -mq "
desc file('$DATA_FILE'); desc file('$DATA_FILE');
desc file('$DATA_FILE'); desc file('$DATA_FILE');
" "
$CLICKHOUSE_LOCAL -nmq " $CLICKHOUSE_LOCAL -mq "
desc file('$DATA_FILE', JSONEachRow); desc file('$DATA_FILE', JSONEachRow);
desc file('$DATA_FILE'); desc file('$DATA_FILE');
" "
@ -39,7 +39,7 @@ $CLICKHOUSE_LOCAL -q "select * from generateRandom('a UInt64, b String, c Array(
$CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}')" $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}')"
$CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}') settings schema_inference_mode='union'" 2>&1 | grep -c "CANNOT_DETECT_FORMAT" $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}') settings schema_inference_mode='union'" 2>&1 | grep -c "CANNOT_DETECT_FORMAT"
$CLICKHOUSE_LOCAL -nmq " $CLICKHOUSE_LOCAL -mq "
desc file('$DATA_FILE.2'); desc file('$DATA_FILE.2');
desc file('$DATA_FILE.{1,2}'); desc file('$DATA_FILE.{1,2}');
" "

View File

@ -19,7 +19,7 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -q "create database $n
CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database} CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database}
CLICKHOUSE_DATABASE="$new_database" CLICKHOUSE_DATABASE="$new_database"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data; drop table if exists data;
create table data (key Int) engine=MergeTree() order by key; create table data (key Int) engine=MergeTree() order by key;
insert into data values (1); insert into data values (1);
@ -29,7 +29,7 @@ $CLICKHOUSE_CLIENT -nm -q "
# suppress output # suppress output
$CLICKHOUSE_CLIENT -q "backup table data to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null $CLICKHOUSE_CLIENT -q "backup table data to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table data; drop table data;
attach table data (key Int) engine=MergeTree() order by key attach table data (key Int) engine=MergeTree() order by key
settings settings

View File

@ -18,7 +18,7 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -q "create database $n
CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database} CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database}
CLICKHOUSE_DATABASE="$new_database" CLICKHOUSE_DATABASE="$new_database"
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table if exists data_read; drop table if exists data_read;
drop table if exists data_write; drop table if exists data_write;
@ -33,7 +33,7 @@ $CLICKHOUSE_CLIENT -nm -q "
# suppress output # suppress output
$CLICKHOUSE_CLIENT -q "backup table data_read to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null $CLICKHOUSE_CLIENT -q "backup table data_read to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
drop table data_read; drop table data_read;
attach table data_read (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'read') order by key attach table data_read (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'read') order by key
settings settings
@ -57,7 +57,7 @@ echo "Files before DETACH TABLE"
# sed to match any part, since in case of fault injection part name may not be all_0_0_0 but all_1_1_0 # sed to match any part, since in case of fault injection part name may not be all_0_0_0 but all_1_1_0
clickhouse-disks -C "$config" --disk s3_plain_disk --query "list --recursive $path" | tail -n+2 | sed 's/all_[^_]*_[^_]*_0/all_X_X_X/g' clickhouse-disks -C "$config" --disk s3_plain_disk --query "list --recursive $path" | tail -n+2 | sed 's/all_[^_]*_[^_]*_0/all_X_X_X/g'
$CLICKHOUSE_CLIENT -nm -q " $CLICKHOUSE_CLIENT -m -q "
detach table data_read; detach table data_read;
detach table data_write; detach table data_write;
" "

View File

@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do
fi fi
echo "$THIS_RUN" echo "$THIS_RUN"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \ $(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \
--insert-method $insert_method \ --insert-method $insert_method \
--table-engine $ENGINE \ --table-engine $ENGINE \

View File

@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do
fi fi
echo "$THIS_RUN" echo "$THIS_RUN"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \ $(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \
--insert-method $insert_method \ --insert-method $insert_method \
--table-engine $ENGINE \ --table-engine $ENGINE \

View File

@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do
fi fi
echo "$THIS_RUN" echo "$THIS_RUN"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \ $(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \
--insert-method $insert_method \ --insert-method $insert_method \
--table-engine $ENGINE \ --table-engine $ENGINE \

View File

@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do
fi fi
echo "$THIS_RUN" echo "$THIS_RUN"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \ $(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \
--insert-method $insert_method \ --insert-method $insert_method \
--table-engine $ENGINE \ --table-engine $ENGINE \

View File

@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do
fi fi
echo "$THIS_RUN" echo "$THIS_RUN"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \ $(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \
--insert-method $insert_method \ --insert-method $insert_method \
--table-engine $ENGINE \ --table-engine $ENGINE \

View File

@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do
fi fi
echo "$THIS_RUN" echo "$THIS_RUN"
$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq "
$(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \ $(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \
--insert-method $insert_method \ --insert-method $insert_method \
--table-engine $ENGINE \ --table-engine $ENGINE \

View File

@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
${CLICKHOUSE_CLIENT} --query "drop table if exists test_s3_mt" ${CLICKHOUSE_CLIENT} --query "drop table if exists test_s3_mt"
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
create table test_s3_mt (a Int32, b Int64, c Int64) engine = MergeTree() partition by intDiv(a, 1000) order by tuple(a, b) create table test_s3_mt (a Int32, b Int64, c Int64) engine = MergeTree() partition by intDiv(a, 1000) order by tuple(a, b)
settings disk = disk( settings disk = disk(
name = 03008_s3_plain_rewritable, name = 03008_s3_plain_rewritable,
@ -19,7 +19,7 @@ settings disk = disk(
secret_access_key = clickhouse); secret_access_key = clickhouse);
" "
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
insert into test_s3_mt (*) values (1, 2, 0), (2, 2, 2), (3, 1, 9), (4, 7, 7), (5, 10, 2), (6, 12, 5); insert into test_s3_mt (*) values (1, 2, 0), (2, 2, 2), (3, 1, 9), (4, 7, 7), (5, 10, 2), (6, 12, 5);
insert into test_s3_mt (*) select number, number, number from numbers_mt(10000); insert into test_s3_mt (*) select number, number, number from numbers_mt(10000);
select count(*) from test_s3_mt; select count(*) from test_s3_mt;
@ -31,13 +31,13 @@ ${CLICKHOUSE_CLIENT} --query "optimize table test_s3_mt final"
${CLICKHOUSE_CLIENT} -m --query " ${CLICKHOUSE_CLIENT} -m --query "
alter table test_s3_mt add projection test_s3_mt_projection (select * order by b)" 2>&1 | grep -Fq "SUPPORT_IS_DISABLED" alter table test_s3_mt add projection test_s3_mt_projection (select * order by b)" 2>&1 | grep -Fq "SUPPORT_IS_DISABLED"
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
alter table test_s3_mt update c = 0 where a % 2 = 1; alter table test_s3_mt update c = 0 where a % 2 = 1;
alter table test_s3_mt add column d Int64 after c; alter table test_s3_mt add column d Int64 after c;
alter table test_s3_mt drop column c; alter table test_s3_mt drop column c;
" 2>&1 | grep -Fq "SUPPORT_IS_DISABLED" " 2>&1 | grep -Fq "SUPPORT_IS_DISABLED"
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
detach table test_s3_mt; detach table test_s3_mt;
attach table test_s3_mt; attach table test_s3_mt;
" "

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
disk_name="s3_cache" disk_name="s3_cache"
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test;
CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name'; CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name';
INSERT INTO test SELECT randomString(1000); INSERT INTO test SELECT randomString(1000);
@ -26,7 +26,7 @@ sed -i "s|<max_size>$prev_max_size<\/max_size>|<max_size>$new_max_size<\/max_siz
# echo $prev_max_size # echo $prev_max_size
# echo $new_max_size # echo $new_max_size
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='fatal'; set send_logs_level='fatal';
SYSTEM RELOAD CONFIG" SYSTEM RELOAD CONFIG"
@ -36,7 +36,7 @@ $CLICKHOUSE_CLIENT --query "SELECT current_size <= max_size FROM system.filesyst
sed -i "s|<max_size>$new_max_size<\/max_size>|<max_size>$prev_max_size<\/max_size>|" $config_path sed -i "s|<max_size>$new_max_size<\/max_size>|<max_size>$prev_max_size<\/max_size>|" $config_path
$CLICKHOUSE_CLIENT -nm --query " $CLICKHOUSE_CLIENT -m --query "
set send_logs_level='fatal'; set send_logs_level='fatal';
SYSTEM RELOAD CONFIG" SYSTEM RELOAD CONFIG"

View File

@ -17,7 +17,7 @@ function test()
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "select count(), sum from (select sumMerge(sum) as sum from test group by id, _part) group by sum order by sum, count()" $CH_CLIENT -q "select count(), sum from (select sumMerge(sum) as sum from test group by id, _part) group by sum order by sum, count()"
$CH_CLIENT -nm -q "system start merges test; optimize table test final" $CH_CLIENT -m -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "select count(), sum from (select sumMerge(sum) as sum from test group by id, _part) group by sum order by sum, count()" $CH_CLIENT -q "select count(), sum from (select sumMerge(sum) as sum from test group by id, _part) group by sum order by sum, count()"
$CH_CLIENT -q "drop table test" $CH_CLIENT -q "drop table test"

View File

@ -16,7 +16,7 @@ function test()
$CH_CLIENT -q "insert into test select number, -1, 'str_' || toString(number) from numbers(50000, 100000)" $CH_CLIENT -q "insert into test select number, -1, 'str_' || toString(number) from numbers(50000, 100000)"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -nm -q "system start merges test; optimize table test final" $CH_CLIENT -m -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "drop table test" $CH_CLIENT -q "drop table test"
} }

View File

@ -17,7 +17,7 @@ function test()
$CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(50000, 100000)" $CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(50000, 100000)"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -nm -q "system start merges test; optimize table test final" $CH_CLIENT -m -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "drop table test" $CH_CLIENT -q "drop table test"
} }

View File

@ -17,7 +17,7 @@ function test()
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()" $CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()"
$CH_CLIENT -nm -q "system start merges test; optimize table test final" $CH_CLIENT -m -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()" $CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()"
$CH_CLIENT -q "drop table test" $CH_CLIENT -q "drop table test"

View File

@ -17,7 +17,7 @@ function test()
$CH_CLIENT -q "insert into test select number, -1, number >= 75000 ? 2 : 1, 'str_' || toString(number) from numbers(50000, 100000)" $CH_CLIENT -q "insert into test select number, -1, number >= 75000 ? 2 : 1, 'str_' || toString(number) from numbers(50000, 100000)"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -nm -q "system start merges test; optimize table test final" $CH_CLIENT -m -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "drop table test" $CH_CLIENT -q "drop table test"
} }

View File

@ -42,8 +42,8 @@ function thread_alter_settings()
{ {
local TIMELIMIT=$((SECONDS+$1)) local TIMELIMIT=$((SECONDS+$1))
while [ $SECONDS -lt "$TIMELIMIT" ]; do while [ $SECONDS -lt "$TIMELIMIT" ]; do
$CLICKHOUSE_CLIENT -n --query "ALTER TABLE t MODIFY SETTING primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns=0.$RANDOM" $CLICKHOUSE_CLIENT --query "ALTER TABLE t MODIFY SETTING primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns=0.$RANDOM"
$CLICKHOUSE_CLIENT -n --query "SYSTEM UNLOAD PRIMARY KEY t" $CLICKHOUSE_CLIENT --query "SYSTEM UNLOAD PRIMARY KEY t"
sleep 0.0$RANDOM sleep 0.0$RANDOM
done done
} }
@ -52,7 +52,7 @@ function thread_query_table()
{ {
local TIMELIMIT=$((SECONDS+$1)) local TIMELIMIT=$((SECONDS+$1))
while [ $SECONDS -lt "$TIMELIMIT" ]; do while [ $SECONDS -lt "$TIMELIMIT" ]; do
COUNT=$($CLICKHOUSE_CLIENT -n --query "SELECT count() FROM t where not ignore(*);") COUNT=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM t where not ignore(*);")
if [ "$COUNT" -ne "2000" ]; then if [ "$COUNT" -ne "2000" ]; then
echo "$COUNT" echo "$COUNT"
fi fi

View File

@ -15,7 +15,7 @@ errors_222=$($CLICKHOUSE_CLIENT -q "SELECT sum(value) FROM system.error_log WHER
errors_333=$($CLICKHOUSE_CLIENT -q "SELECT sum(value) FROM system.error_log WHERE code = 333") errors_333=$($CLICKHOUSE_CLIENT -q "SELECT sum(value) FROM system.error_log WHERE code = 333")
# Throw three random errors: 111, 222 and 333 and wait for more than collect_interval_milliseconds to ensure system.error_log is flushed # Throw three random errors: 111, 222 and 333 and wait for more than collect_interval_milliseconds to ensure system.error_log is flushed
$CLICKHOUSE_CLIENT -mn -q " $CLICKHOUSE_CLIENT -m -q "
SELECT throwIf(true, 'error_log', toInt16(111)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 111 } SELECT throwIf(true, 'error_log', toInt16(111)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 111 }
SELECT throwIf(true, 'error_log', toInt16(222)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 222 } SELECT throwIf(true, 'error_log', toInt16(222)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 222 }
SELECT throwIf(true, 'error_log', toInt16(333)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 333 } SELECT throwIf(true, 'error_log', toInt16(333)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 333 }
@ -24,14 +24,14 @@ SYSTEM FLUSH LOGS;
" "
# Check that the three random errors are propagated # Check that the three random errors are propagated
$CLICKHOUSE_CLIENT -mn -q " $CLICKHOUSE_CLIENT -m -q "
SELECT sum(value) > $errors_111 FROM system.error_log WHERE code = 111; SELECT sum(value) > $errors_111 FROM system.error_log WHERE code = 111;
SELECT sum(value) > $errors_222 FROM system.error_log WHERE code = 222; SELECT sum(value) > $errors_222 FROM system.error_log WHERE code = 222;
SELECT sum(value) > $errors_333 FROM system.error_log WHERE code = 333; SELECT sum(value) > $errors_333 FROM system.error_log WHERE code = 333;
" "
# Ensure that if we throw them again, they're still propagated # Ensure that if we throw them again, they're still propagated
$CLICKHOUSE_CLIENT -mn -q " $CLICKHOUSE_CLIENT -m -q "
SELECT throwIf(true, 'error_log', toInt16(111)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 111 } SELECT throwIf(true, 'error_log', toInt16(111)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 111 }
SELECT throwIf(true, 'error_log', toInt16(222)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 222 } SELECT throwIf(true, 'error_log', toInt16(222)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 222 }
SELECT throwIf(true, 'error_log', toInt16(333)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 333 } SELECT throwIf(true, 'error_log', toInt16(333)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 333 }
@ -39,7 +39,7 @@ SELECT sleep(2) format NULL;
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
" "
$CLICKHOUSE_CLIENT -mn -q " $CLICKHOUSE_CLIENT -m -q "
SELECT sum(value) > $(($errors_111+1)) FROM system.error_log WHERE code = 111; SELECT sum(value) > $(($errors_111+1)) FROM system.error_log WHERE code = 111;
SELECT sum(value) > $(($errors_222+1)) FROM system.error_log WHERE code = 222; SELECT sum(value) > $(($errors_222+1)) FROM system.error_log WHERE code = 222;
SELECT sum(value) > $(($errors_333+1)) FROM system.error_log WHERE code = 333; SELECT sum(value) > $(($errors_333+1)) FROM system.error_log WHERE code = 333;

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'"
$CLICKHOUSE_LOCAL -n -q """ $CLICKHOUSE_LOCAL -q """
set use_hive_partitioning = 1; set use_hive_partitioning = 1;
SELECT *, column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; SELECT *, column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;
@ -22,20 +22,20 @@ SELECT toTypeName(array), toTypeName(float) FROM file('$CURDIR/data_hive/partiti
SELECT count(*) FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') WHERE number = 42; SELECT count(*) FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') WHERE number = 42;
""" """
$CLICKHOUSE_LOCAL -n -q """ $CLICKHOUSE_LOCAL -q """
set use_hive_partitioning = 1; set use_hive_partitioning = 1;
SELECT identifier FROM file('$CURDIR/data_hive/partitioning/identifier=*/email.csv') LIMIT 2; SELECT identifier FROM file('$CURDIR/data_hive/partitioning/identifier=*/email.csv') LIMIT 2;
SELECT a FROM file('$CURDIR/data_hive/partitioning/a=b/a=b/sample.parquet') LIMIT 1; SELECT a FROM file('$CURDIR/data_hive/partitioning/a=b/a=b/sample.parquet') LIMIT 1;
""" """
$CLICKHOUSE_LOCAL -n -q """ $CLICKHOUSE_LOCAL -q """
set use_hive_partitioning = 1; set use_hive_partitioning = 1;
SELECT *, column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth1/sample.parquet') LIMIT 10; SELECT *, column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth1/sample.parquet') LIMIT 10;
""" 2>&1 | grep -c "INCORRECT_DATA" """ 2>&1 | grep -c "INCORRECT_DATA"
$CLICKHOUSE_LOCAL -n -q """ $CLICKHOUSE_LOCAL -q """
set use_hive_partitioning = 0; set use_hive_partitioning = 0;
SELECT *, non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; SELECT *, non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10;
@ -45,14 +45,14 @@ SELECT *, non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_exis
$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'" $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'"
$CLICKHOUSE_LOCAL -n -q """ $CLICKHOUSE_LOCAL -q """
set use_hive_partitioning = 1; set use_hive_partitioning = 1;
SELECT *, column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; SELECT *, column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;
SELECT *, non_existing_column FROM url('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10;""" SELECT *, non_existing_column FROM url('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10;"""
$CLICKHOUSE_LOCAL -n -q """ $CLICKHOUSE_LOCAL -q """
set use_hive_partitioning = 0; set use_hive_partitioning = 0;
SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;
@ -62,7 +62,7 @@ SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/colum
$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'" $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'"
$CLICKHOUSE_CLIENT -n -q """ $CLICKHOUSE_CLIENT -q """
set use_hive_partitioning = 1; set use_hive_partitioning = 1;
SELECT *, column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; SELECT *, column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;
@ -71,7 +71,7 @@ SELECT *, non_existing_column FROM s3('http://localhost:11111/test/hive_partitio
SELECT *, column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=*/sample.parquet') WHERE column0 = 'Elizabeth' LIMIT 10; SELECT *, column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=*/sample.parquet') WHERE column0 = 'Elizabeth' LIMIT 10;
""" """
$CLICKHOUSE_CLIENT -n -q """ $CLICKHOUSE_CLIENT -q """
set use_hive_partitioning = 0; set use_hive_partitioning = 0;
SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;
@ -79,7 +79,7 @@ SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column
$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3CLUSTER PARTITIONING'" $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3CLUSTER PARTITIONING'"
$CLICKHOUSE_CLIENT -n -q """ $CLICKHOUSE_CLIENT -q """
set use_hive_partitioning = 1; set use_hive_partitioning = 1;
SELECT *, column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; SELECT *, column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# In this test we restore from "/tests/queries/0_stateless/backups/mt_250_parts.zip" # In this test we restore from "/tests/queries/0_stateless/backups/mt_250_parts.zip"
backup_name="$($CURDIR/helpers/install_predefined_backup.sh mt_250_parts.zip)" backup_name="$($CURDIR/helpers/install_predefined_backup.sh mt_250_parts.zip)"
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
DROP TABLE IF EXISTS manyparts; DROP TABLE IF EXISTS manyparts;
CREATE TABLE manyparts (x Int64) ENGINE=MergeTree ORDER BY tuple() SETTINGS merge_tree_clear_old_temporary_directories_interval_seconds=1, temporary_directories_lifetime=1; CREATE TABLE manyparts (x Int64) ENGINE=MergeTree ORDER BY tuple() SETTINGS merge_tree_clear_old_temporary_directories_interval_seconds=1, temporary_directories_lifetime=1;
" "
@ -16,7 +16,7 @@ CREATE TABLE manyparts (x Int64) ENGINE=MergeTree ORDER BY tuple() SETTINGS merg
# RESTORE must protect its temporary directories from removing. # RESTORE must protect its temporary directories from removing.
${CLICKHOUSE_CLIENT} --query "RESTORE TABLE default.mt_250_parts AS manyparts FROM Disk('backups', '${backup_name}') SETTINGS allow_different_table_def=true" | grep -o "RESTORED" ${CLICKHOUSE_CLIENT} --query "RESTORE TABLE default.mt_250_parts AS manyparts FROM Disk('backups', '${backup_name}') SETTINGS allow_different_table_def=true" | grep -o "RESTORED"
${CLICKHOUSE_CLIENT} -nm --query " ${CLICKHOUSE_CLIENT} -m --query "
SELECT count(), sum(x) FROM manyparts; SELECT count(), sum(x) FROM manyparts;
DROP TABLE manyparts; DROP TABLE manyparts;
" "