diff --git a/tests/queries/0_stateless/00002_system_numbers.sql b/tests/queries/0_stateless/00002_system_numbers.sql index 95f75573201..d5934c7d387 100644 --- a/tests/queries/0_stateless/00002_system_numbers.sql +++ b/tests/queries/0_stateless/00002_system_numbers.sql @@ -6,7 +6,7 @@ SELECT number FROM system.numbers WHERE number >= 5 LIMIT 2; SELECT * FROM system.numbers WHERE number == 7 LIMIT 1; SELECT number AS n FROM system.numbers WHERE number IN(8, 9) LIMIT 2; select number from system.numbers limit 0; -select x from system.numbers limit 1; -- { clientError 0 serverError 47 } +select x from system.numbers limit 1; -- { serverError UNKNOWN_IDENTIFIER } SELECT x, number FROM system.numbers LIMIT 1; -- { serverError 47 } SELECT * FROM system.number LIMIT 1; -- { serverError 60 } SELECT * FROM system LIMIT 1; -- { serverError 60 } diff --git a/tests/queries/0_stateless/00386_has_column_in_table.sql b/tests/queries/0_stateless/00386_has_column_in_table.sql index d543bb42ca7..7347293e05b 100644 --- a/tests/queries/0_stateless/00386_has_column_in_table.sql +++ b/tests/queries/0_stateless/00386_has_column_in_table.sql @@ -21,11 +21,11 @@ SELECT hasColumnInTable('localhost', currentDatabase(), 'has_column_in_table', ' SELECT hasColumnInTable('system', 'one', ''); /* bad queries */ -SELECT hasColumnInTable('', '', ''); -- { serverError 60; } -SELECT hasColumnInTable('', 't', 'c'); -- { serverError 81; } -SELECT hasColumnInTable(currentDatabase(), '', 'c'); -- { serverError 60; } -SELECT hasColumnInTable('d', 't', 's'); -- { serverError 81; } -SELECT hasColumnInTable(currentDatabase(), 't', 's'); -- { serverError 60; } +SELECT hasColumnInTable('', '', ''); -- { serverError 60 } +SELECT hasColumnInTable('', 't', 'c'); -- { serverError 81 } +SELECT hasColumnInTable(currentDatabase(), '', 'c'); -- { serverError 60 } +SELECT hasColumnInTable('d', 't', 's'); -- { serverError 81 } +SELECT hasColumnInTable(currentDatabase(), 't', 's'); -- { serverError 60 } DROP TABLE has_column_in_table; diff --git a/tests/queries/0_stateless/00718_format_datetime.sql b/tests/queries/0_stateless/00718_format_datetime.sql index 74ec03d83d3..3f8c927dfe7 100644 --- a/tests/queries/0_stateless/00718_format_datetime.sql +++ b/tests/queries/0_stateless/00718_format_datetime.sql @@ -1,14 +1,14 @@ SET send_logs_level = 'fatal'; -SELECT formatDateTime(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH (42) } -SELECT formatDateTime('not a datetime', 'IGNORED'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT (43) } -SELECT formatDateTime(now(), now()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT (43) } -SELECT formatDateTime(now(), 'good format pattern', now()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT (43) } -SELECT formatDateTime(now(), 'unescaped %'); -- { serverError BAD_ARGUMENTS (36) } -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%U'); -- { serverError NOT_IMPLEMENTED (48) } -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%v'); -- { serverError NOT_IMPLEMENTED (48) } -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%x'); -- { serverError NOT_IMPLEMENTED (48) } -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%X'); -- { serverError NOT_IMPLEMENTED (48) } +SELECT formatDateTime(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT formatDateTime('not a datetime', 'IGNORED'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatDateTime(now(), now()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatDateTime(now(), 'good format pattern', now()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatDateTime(now(), 'unescaped %'); -- { serverError BAD_ARGUMENTS } +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%U'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%v'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%x'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%X'); -- { serverError NOT_IMPLEMENTED } SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%a'), formatDateTime(toDate32('2018-01-02'), '%a'); SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%b'), formatDateTime(toDate32('2018-01-02'), '%b'); diff --git a/tests/queries/0_stateless/00975_values_list.sql b/tests/queries/0_stateless/00975_values_list.sql index 40c86898966..35afc99e93e 100644 --- a/tests/queries/0_stateless/00975_values_list.sql +++ b/tests/queries/0_stateless/00975_values_list.sql @@ -12,8 +12,8 @@ SELECT * FROM VALUES('n UInt64, s String, ss String', (1 + 22, '23', toString(23 SELECT * FROM VALUES('a Decimal(4, 4), b String, c String', (divide(toDecimal32(5, 3), 3), 'a', 'b')); -SELECT * FROM VALUES('x Float64', toUInt64(-1)); -- { serverError 69; } -SELECT * FROM VALUES('x Float64', NULL); -- { serverError 53; } +SELECT * FROM VALUES('x Float64', toUInt64(-1)); -- { serverError 69 } +SELECT * FROM VALUES('x Float64', NULL); -- { serverError 53 } SELECT * FROM VALUES('x Nullable(Float64)', NULL); DROP TABLE values_list; diff --git a/tests/queries/0_stateless/01056_create_table_as.sql b/tests/queries/0_stateless/01056_create_table_as.sql index 62db8282ac0..2e146d67ca9 100644 --- a/tests/queries/0_stateless/01056_create_table_as.sql +++ b/tests/queries/0_stateless/01056_create_table_as.sql @@ -19,12 +19,12 @@ DROP TABLE t3; -- live view SET allow_experimental_live_view=1; CREATE LIVE VIEW lv AS SELECT * FROM t1; -CREATE TABLE t3 AS lv; -- { serverError 80; } +CREATE TABLE t3 AS lv; -- { serverError 80 } DROP TABLE lv; -- view CREATE VIEW v AS SELECT * FROM t1; -CREATE TABLE t3 AS v; -- { serverError 80; } +CREATE TABLE t3 AS v; -- { serverError 80 } DROP TABLE v; -- dictionary @@ -43,7 +43,7 @@ SOURCE(CLICKHOUSE( TABLE 'dict_data' DB 'test_01056_dict_data' USER 'default' PASSWORD '')) LIFETIME(MIN 0 MAX 0) LAYOUT(SPARSE_HASHED()); -CREATE TABLE t3 AS dict; -- { serverError 80; } +CREATE TABLE t3 AS dict; -- { serverError 80 } DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t3; diff --git a/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql b/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql index 85c239765bc..24eaaacb8bd 100644 --- a/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql +++ b/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql @@ -16,16 +16,16 @@ select * from dist_01072 where key=toInt32OrZero(toString(xxHash64(0))); select * from dist_01072 where key=toInt32(xxHash32(0)); select * from dist_01072 where key=toInt32(toInt32(xxHash32(0))); select * from dist_01072 where key=toInt32(toInt32(toInt32(xxHash32(0)))); -select * from dist_01072 where key=value; -- { serverError 507; } -select * from dist_01072 where key=toInt32(value); -- { serverError 507; } +select * from dist_01072 where key=value; -- { serverError 507 } +select * from dist_01072 where key=toInt32(value); -- { serverError 507 } select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0; select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0; drop table dist_01072; create table dist_01072 (key Int, value Nullable(Int), str String) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key%2); select * from dist_01072 where key=toInt32(xxHash32(0)); -select * from dist_01072 where key=value; -- { serverError 507; } -select * from dist_01072 where key=toInt32(value); -- { serverError 507; } +select * from dist_01072 where key=value; -- { serverError 507 } +select * from dist_01072 where key=toInt32(value); -- { serverError 507 } select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0; select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0; @@ -34,16 +34,16 @@ set allow_suspicious_low_cardinality_types=1; drop table dist_01072; create table dist_01072 (key Int, value LowCardinality(Int), str String) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key%2); select * from dist_01072 where key=toInt32(xxHash32(0)); -select * from dist_01072 where key=value; -- { serverError 507; } -select * from dist_01072 where key=toInt32(value); -- { serverError 507; } +select * from dist_01072 where key=value; -- { serverError 507 } +select * from dist_01072 where key=toInt32(value); -- { serverError 507 } select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0; select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0; drop table dist_01072; create table dist_01072 (key Int, value LowCardinality(Nullable(Int)), str String) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key%2); select * from dist_01072 where key=toInt32(xxHash32(0)); -select * from dist_01072 where key=value; -- { serverError 507; } -select * from dist_01072 where key=toInt32(value); -- { serverError 507; } +select * from dist_01072 where key=value; -- { serverError 507 } +select * from dist_01072 where key=toInt32(value); -- { serverError 507 } select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0; select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0; diff --git a/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql b/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql index 65adaf3ad71..de41132df62 100644 --- a/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql +++ b/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql @@ -9,7 +9,7 @@ create table data_02000 (key Int) Engine=Null(); create table dist_02000 as data_02000 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_02000, key); select * from data_02000 where key = 0xdeadbeafdeadbeaf; -select * from dist_02000 where key = 0xdeadbeafdeadbeaf settings force_optimize_skip_unused_shards=2; -- { serverError 507; } +select * from dist_02000 where key = 0xdeadbeafdeadbeaf settings force_optimize_skip_unused_shards=2; -- { serverError 507 } select * from dist_02000 where key = 0xdeadbeafdeadbeaf; drop table data_02000; diff --git a/tests/queries/0_stateless/01225_drop_dictionary_as_table.sql b/tests/queries/0_stateless/01225_drop_dictionary_as_table.sql index 513ecbd4ed4..be2f7b2a9bf 100644 --- a/tests/queries/0_stateless/01225_drop_dictionary_as_table.sql +++ b/tests/queries/0_stateless/01225_drop_dictionary_as_table.sql @@ -16,7 +16,7 @@ LAYOUT(FLAT()); SYSTEM RELOAD DICTIONARY dict_db_01225.dict; -DROP TABLE dict_db_01225.dict; -- { serverError 520; } +DROP TABLE dict_db_01225.dict; -- { serverError 520 } DROP DICTIONARY dict_db_01225.dict; DROP DATABASE dict_db_01225; diff --git a/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql index 09cde642ed2..bc733a0c546 100644 --- a/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql +++ b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql @@ -18,7 +18,7 @@ LIFETIME(MIN 0 MAX 0) LAYOUT(FLAT()); SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.dict` FORMAT TSVRaw; -SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.no_such_dict`; -- { serverError 487; } +SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.no_such_dict`; -- { serverError 487 } DROP DATABASE dict_db_01225; DROP DATABASE dict_db_01225_dictionary; diff --git a/tests/queries/0_stateless/01231_log_queries_min_type.sql b/tests/queries/0_stateless/01231_log_queries_min_type.sql index c2470bb9a56..0ed5e3e605c 100644 --- a/tests/queries/0_stateless/01231_log_queries_min_type.sql +++ b/tests/queries/0_stateless/01231_log_queries_min_type.sql @@ -15,7 +15,7 @@ select count() from system.query_log where current_database = currentDatabase() set max_rows_to_read='100K'; set log_queries_min_type='EXCEPTION_WHILE_PROCESSING'; -select '01231_log_queries_min_type/EXCEPTION_WHILE_PROCESSING', max(number) from system.numbers limit 1e6; -- { serverError 158; } +select '01231_log_queries_min_type/EXCEPTION_WHILE_PROCESSING', max(number) from system.numbers limit 1e6; -- { serverError 158 } set max_rows_to_read=0; system flush logs; select count() from system.query_log where current_database = currentDatabase() @@ -23,7 +23,7 @@ select count() from system.query_log where current_database = currentDatabase() and event_date >= yesterday() and type = 'ExceptionWhileProcessing'; set max_rows_to_read='100K'; -select '01231_log_queries_min_type w/ Settings/EXCEPTION_WHILE_PROCESSING', max(number) from system.numbers limit 1e6; -- { serverError 158; } +select '01231_log_queries_min_type w/ Settings/EXCEPTION_WHILE_PROCESSING', max(number) from system.numbers limit 1e6; -- { serverError 158 } system flush logs; set max_rows_to_read=0; select count() from system.query_log where diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh index 0e258bbbb09..08cc97c84bf 100755 --- a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -76,7 +76,7 @@ insert into data_01278 select reinterpretAsString(number), // s6 reinterpretAsString(number), // s7 reinterpretAsString(number) // s8 -from numbers(100000); -- { serverError 241; }" > /dev/null 2>&1 +from numbers(100000); -- { serverError 241 }" > /dev/null 2>&1 local ret_code=$? if [[ $ret_code -eq 0 ]]; then diff --git a/tests/queries/0_stateless/01284_port.sql.j2 b/tests/queries/0_stateless/01284_port.sql.j2 index 6f78b3b8e3b..50e096c6deb 100644 --- a/tests/queries/0_stateless/01284_port.sql.j2 +++ b/tests/queries/0_stateless/01284_port.sql.j2 @@ -19,9 +19,9 @@ select port{{ suffix }}('http://127.0.0.1/', toUInt16(80)); select port{{ suffix }}('http://foobar.com/', toUInt16(80)); -- unsupported -/* ILLEGAL_TYPE_OF_ARGUMENT */ select port(toFixedString('', 1)); -- { serverError 43; } -/* ILLEGAL_TYPE_OF_ARGUMENT */ select port{{ suffix }}('', 1); -- { serverError 43; } -/* NUMBER_OF_ARGUMENTS_DOESNT_MATCH */ select port{{ suffix }}('', 1, 1); -- { serverError 42; } +/* ILLEGAL_TYPE_OF_ARGUMENT */ select port(toFixedString('', 1)); -- { serverError 43 } +/* ILLEGAL_TYPE_OF_ARGUMENT */ select port{{ suffix }}('', 1); -- { serverError 43 } +/* NUMBER_OF_ARGUMENTS_DOESNT_MATCH */ select port{{ suffix }}('', 1, 1); -- { serverError 42 } -- -- Known limitations of domain() (getURLHost()) diff --git a/tests/queries/0_stateless/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS.sql b/tests/queries/0_stateless/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS.sql index 88a2b25c2db..8ff9cd2b9f2 100644 --- a/tests/queries/0_stateless/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS.sql +++ b/tests/queries/0_stateless/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS.sql @@ -1 +1 @@ -SELECT dictGetString(concat('default', '.countryId'), 'country', toUInt64(number)) AS country FROM numbers(2) GROUP BY country; -- { serverError 36; } +SELECT dictGetString(concat('default', '.countryId'), 'country', toUInt64(number)) AS country FROM numbers(2) GROUP BY country; -- { serverError 36 } diff --git a/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.sql b/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.sql index 258d96829a5..29ffcb46fbf 100644 --- a/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.sql +++ b/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.sql @@ -1,7 +1,7 @@ -- Tags: no-parallel -- https://github.com/ClickHouse/ClickHouse/issues/11469 -SELECT dictGet('default.countryId', 'country', toUInt64(number)) AS country FROM numbers(2) GROUP BY country; -- { serverError 36; } +SELECT dictGet('default.countryId', 'country', toUInt64(number)) AS country FROM numbers(2) GROUP BY country; -- { serverError 36 } -- with real dictionary diff --git a/tests/queries/0_stateless/01402_cast_nullable_string_to_enum.sql b/tests/queries/0_stateless/01402_cast_nullable_string_to_enum.sql index 3b53e593095..b8b5370515a 100644 --- a/tests/queries/0_stateless/01402_cast_nullable_string_to_enum.sql +++ b/tests/queries/0_stateless/01402_cast_nullable_string_to_enum.sql @@ -5,9 +5,9 @@ SELECT CAST(CAST(NULL AS Nullable(String)) AS Nullable(Enum8('Hello' = 1))); SELECT CAST(CAST(NULL AS Nullable(FixedString(1))) AS Nullable(Enum8('Hello' = 1))); -- empty string still not acceptable -SELECT CAST(CAST('' AS Nullable(String)) AS Nullable(Enum8('Hello' = 1))); -- { serverError 36; } -SELECT CAST(CAST('' AS Nullable(FixedString(1))) AS Nullable(Enum8('Hello' = 1))); -- { serverError 36; } +SELECT CAST(CAST('' AS Nullable(String)) AS Nullable(Enum8('Hello' = 1))); -- { serverError 36 } +SELECT CAST(CAST('' AS Nullable(FixedString(1))) AS Nullable(Enum8('Hello' = 1))); -- { serverError 36 } -- non-Nullable Enum() still not acceptable -SELECT CAST(CAST(NULL AS Nullable(String)) AS Enum8('Hello' = 1)); -- { serverError 349; } -SELECT CAST(CAST(NULL AS Nullable(FixedString(1))) AS Enum8('Hello' = 1)); -- { serverError 349; } +SELECT CAST(CAST(NULL AS Nullable(String)) AS Enum8('Hello' = 1)); -- { serverError 349 } +SELECT CAST(CAST(NULL AS Nullable(FixedString(1))) AS Enum8('Hello' = 1)); -- { serverError 349 } diff --git a/tests/queries/0_stateless/01404_roundUpToPowerOfTwoOrZero_safety.sql b/tests/queries/0_stateless/01404_roundUpToPowerOfTwoOrZero_safety.sql index 4ee6e1fa5e4..d61a35c9999 100644 --- a/tests/queries/0_stateless/01404_roundUpToPowerOfTwoOrZero_safety.sql +++ b/tests/queries/0_stateless/01404_roundUpToPowerOfTwoOrZero_safety.sql @@ -1,4 +1,4 @@ -- repeat() with this length and this number of rows will allocation huge enough region (MSB set), -- which will cause roundUpToPowerOfTwoOrZero() returns 0 for such allocation (before the fix), -- and later repeat() will try to use this memory and will got SIGSEGV. -SELECT repeat('0.0001048576', number * (number * (number * 255))) FROM numbers(65535); -- { serverError 131; } +SELECT repeat('0.0001048576', number * (number * (number * 255))) FROM numbers(65535); -- { serverError 131 } diff --git a/tests/queries/0_stateless/01407_lambda_arrayJoin.sql b/tests/queries/0_stateless/01407_lambda_arrayJoin.sql index 363b1d92dbb..e1b8c1d5a76 100644 --- a/tests/queries/0_stateless/01407_lambda_arrayJoin.sql +++ b/tests/queries/0_stateless/01407_lambda_arrayJoin.sql @@ -1,5 +1,5 @@ SELECT arrayFilter((a) -> ((a, arrayJoin([])) IN (Null, [Null])), []); SELECT arrayFilter((a) -> ((a, arrayJoin([[]])) IN (Null, [Null])), []); -SELECT * FROM system.one ARRAY JOIN arrayFilter((a) -> ((a, arrayJoin([])) IN (NULL)), []) AS arr_x; -- { serverError 43; } +SELECT * FROM system.one ARRAY JOIN arrayFilter((a) -> ((a, arrayJoin([])) IN (NULL)), []) AS arr_x; -- { serverError 43 } SELECT * FROM numbers(1) LEFT ARRAY JOIN arrayFilter((x_0, x_1) -> (arrayJoin([]) IN (NULL)), [], []) AS arr_x; diff --git a/tests/queries/0_stateless/01408_range_overflow.sql b/tests/queries/0_stateless/01408_range_overflow.sql index 2107e8c3f36..d26507f8358 100644 --- a/tests/queries/0_stateless/01408_range_overflow.sql +++ b/tests/queries/0_stateless/01408_range_overflow.sql @@ -1,7 +1,7 @@ -- executeGeneric() SELECT range(1025, 1048576 + 9223372036854775807, 9223372036854775807); SELECT range(1025, 1048576 + (9223372036854775807 AS i), i); -SELECT range(1025, 18446744073709551615, 1); -- { serverError 69; } +SELECT range(1025, 18446744073709551615, 1); -- { serverError 69 } -- executeConstStep() SELECT range(number, 1048576 + 9223372036854775807, 9223372036854775807) FROM system.numbers LIMIT 1 OFFSET 1025; diff --git a/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql b/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql index e4e2e3dd76a..e8643a4468c 100644 --- a/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql +++ b/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql @@ -7,16 +7,16 @@ insert into test1 values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), set max_rows_to_read = 1; -- non-optimized -select count() from test1 settings max_parallel_replicas = 3; -- { serverError 158; } +select count() from test1 settings max_parallel_replicas = 3; -- { serverError 158 } -- optimized (toYear is monotonic and we provide the partition expr as is) select count() from test1 where toYear(toDate(p)) = 1999; -- non-optimized (toDate(DateTime) is always monotonic, but we cannot relaxing the predicates to do trivial count()) -select count() from test1 where p > toDateTime('2020-09-01 10:00:00'); -- { serverError 158; } +select count() from test1 where p > toDateTime('2020-09-01 10:00:00'); -- { serverError 158 } -- optimized (partition expr wrapped with non-monotonic functions) select count() FROM test1 where toDate(p) = '2020-09-01' and sipHash64(toString(toDate(p))) % 2 = 1; select count() FROM test1 where toDate(p) = '2020-09-01' and sipHash64(toString(toDate(p))) % 2 = 0; -- non-optimized (some predicate depends on non-partition_expr columns) -select count() FROM test1 where toDate(p) = '2020-09-01' and k = 2; -- { serverError 158; } +select count() FROM test1 where toDate(p) = '2020-09-01' and k = 2; -- { serverError 158 } -- optimized select count() from test1 where toDate(p) > '2020-09-01'; -- non-optimized @@ -35,10 +35,10 @@ select count() from test_tuple where i > 2; -- optimized select count() from test_tuple where i < 1; -- non-optimized -select count() from test_tuple array join [p,p] as c where toDate(p) = '2020-09-01'; -- { serverError 158; } +select count() from test_tuple array join [p,p] as c where toDate(p) = '2020-09-01'; -- { serverError 158 } select count() from test_tuple array join [1,2] as c where toDate(p) = '2020-09-01' settings max_rows_to_read = 4; -- non-optimized -select count() from test_tuple array join [1,2,3] as c where toDate(p) = '2020-09-01'; -- { serverError 158; } +select count() from test_tuple array join [1,2,3] as c where toDate(p) = '2020-09-01'; -- { serverError 158 } select count() from test_tuple array join [1,2,3] as c where toDate(p) = '2020-09-01' settings max_rows_to_read = 6; create table test_two_args(i int, j int, k int) engine MergeTree partition by i + j order by k settings index_granularity = 1; @@ -48,7 +48,7 @@ insert into test_two_args values (1, 2, 3), (2, 1, 3), (0, 3, 4); -- optimized select count() from test_two_args where i + j = 3; -- non-optimized -select count() from test_two_args where i = 1; -- { serverError 158; } +select count() from test_two_args where i = 1; -- { serverError 158 } drop table test1; drop table test_tuple; diff --git a/tests/queries/0_stateless/01513_optimize_aggregation_in_order_memory_long.sql b/tests/queries/0_stateless/01513_optimize_aggregation_in_order_memory_long.sql index 228e4d73167..3d57518d0f4 100644 --- a/tests/queries/0_stateless/01513_optimize_aggregation_in_order_memory_long.sql +++ b/tests/queries/0_stateless/01513_optimize_aggregation_in_order_memory_long.sql @@ -13,9 +13,9 @@ set max_memory_usage='500M'; set max_threads=1; set max_block_size=500; -select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=0; -- { serverError 241; } +select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=0; -- { serverError 241 } select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=1; -- for WITH TOTALS previous groups should be kept. -select key, groupArray(repeat('a', 200)), count() from data_01513 group by key with totals format Null settings optimize_aggregation_in_order=1; -- { serverError 241; } +select key, groupArray(repeat('a', 200)), count() from data_01513 group by key with totals format Null settings optimize_aggregation_in_order=1; -- { serverError 241 } drop table data_01513; diff --git a/tests/queries/0_stateless/01516_create_table_primary_key.sql b/tests/queries/0_stateless/01516_create_table_primary_key.sql index b2b9f288eab..630c573c2cc 100644 --- a/tests/queries/0_stateless/01516_create_table_primary_key.sql +++ b/tests/queries/0_stateless/01516_create_table_primary_key.sql @@ -35,7 +35,7 @@ ATTACH TABLE primary_key_test(v1 Int32, v2 Int32) ENGINE=ReplacingMergeTree ORDE SELECT * FROM primary_key_test FINAL; DROP TABLE primary_key_test; -CREATE TABLE primary_key_test(v1 Int64, v2 Int32, v3 String, PRIMARY KEY(v1, gcd(v1, v2))) ENGINE=ReplacingMergeTree ORDER BY v1; -- { serverError 36; } +CREATE TABLE primary_key_test(v1 Int64, v2 Int32, v3 String, PRIMARY KEY(v1, gcd(v1, v2))) ENGINE=ReplacingMergeTree ORDER BY v1; -- { serverError 36 } CREATE TABLE primary_key_test(v1 Int64, v2 Int32, v3 String, PRIMARY KEY(v1, gcd(v1, v2))) ENGINE=ReplacingMergeTree ORDER BY (v1, gcd(v1, v2)); diff --git a/tests/queries/0_stateless/01528_allow_nondeterministic_optimize_skip_unused_shards.sql b/tests/queries/0_stateless/01528_allow_nondeterministic_optimize_skip_unused_shards.sql index 08fba7480d1..ac04178e585 100644 --- a/tests/queries/0_stateless/01528_allow_nondeterministic_optimize_skip_unused_shards.sql +++ b/tests/queries/0_stateless/01528_allow_nondeterministic_optimize_skip_unused_shards.sql @@ -5,7 +5,7 @@ create table dist_01528 as system.one engine=Distributed('test_cluster_two_shard set optimize_skip_unused_shards=1; set force_optimize_skip_unused_shards=1; -select * from dist_01528 where dummy = 2; -- { serverError 507; } +select * from dist_01528 where dummy = 2; -- { serverError 507 } select * from dist_01528 where dummy = 2 settings allow_nondeterministic_optimize_skip_unused_shards=1; drop table dist_01528; diff --git a/tests/queries/0_stateless/01530_drop_database_atomic_sync.sql b/tests/queries/0_stateless/01530_drop_database_atomic_sync.sql index 7a2e64742cf..13b4a4e331b 100644 --- a/tests/queries/0_stateless/01530_drop_database_atomic_sync.sql +++ b/tests/queries/0_stateless/01530_drop_database_atomic_sync.sql @@ -30,7 +30,7 @@ create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickho drop database db_01530_atomic; create database db_01530_atomic Engine=Atomic; -create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; -- { serverError 253; } +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; -- { serverError 253 } set database_atomic_wait_for_drop_and_detach_synchronously=1; diff --git a/tests/queries/0_stateless/01555_system_distribution_queue_mask.sql b/tests/queries/0_stateless/01555_system_distribution_queue_mask.sql index fea75e1439f..f19c77c68a3 100644 --- a/tests/queries/0_stateless/01555_system_distribution_queue_mask.sql +++ b/tests/queries/0_stateless/01555_system_distribution_queue_mask.sql @@ -17,7 +17,7 @@ create table dist_01555 (key Int) Engine=Distributed(test_cluster_with_incorrect insert into dist_01555 values (1)(2); -- since test_cluster_with_incorrect_pw contains incorrect password ignore error -system flush distributed dist_01555; -- { serverError 516; } +system flush distributed dist_01555; -- { serverError 516 } select length(splitByChar('*', data_path)), replaceRegexpOne(data_path, '^.*/([^/]*)/' , '\\1'), extract(last_exception, 'AUTHENTICATION_FAILED'), dateDiff('s', last_exception_time, now()) < 5 from system.distribution_queue where database = currentDatabase() and table = 'dist_01555' format CSV; drop table dist_01555; diff --git a/tests/queries/0_stateless/01592_toUnixTimestamp_Date.sql b/tests/queries/0_stateless/01592_toUnixTimestamp_Date.sql index 5dc87e31f75..e8411484d71 100644 --- a/tests/queries/0_stateless/01592_toUnixTimestamp_Date.sql +++ b/tests/queries/0_stateless/01592_toUnixTimestamp_Date.sql @@ -1 +1 @@ -select toUnixTimestamp(today()); -- { serverError 44; } +select toUnixTimestamp(today()); -- { serverError 44 } diff --git a/tests/queries/0_stateless/01595_countMatches.sql b/tests/queries/0_stateless/01595_countMatches.sql index 6374fe7bc5b..0b170945d44 100644 --- a/tests/queries/0_stateless/01595_countMatches.sql +++ b/tests/queries/0_stateless/01595_countMatches.sql @@ -25,5 +25,5 @@ select countMatchesCaseInsensitive('foo.com BAR.COM baz.com bam.com', '([^. ]+)\ select countMatchesCaseInsensitive('foo.com@foo.com bar.com@foo.com BAZ.com@foo.com bam.com@foo.com', '([^. ]+)\.([^. ]+)@([^. ]+)\.([^. ]+)'); select 'errors'; -select countMatches(1, 'foo') from numbers(1); -- { serverError 43; } -select countMatches('foobarfoo', toString(number)) from numbers(1); -- { serverError 44; } +select countMatches(1, 'foo') from numbers(1); -- { serverError 43 } +select countMatches('foobarfoo', toString(number)) from numbers(1); -- { serverError 44 } diff --git a/tests/queries/0_stateless/01709_inactive_parts_to_throw_insert.sql b/tests/queries/0_stateless/01709_inactive_parts_to_throw_insert.sql index 6de0d4f4e0c..2bb92aec713 100644 --- a/tests/queries/0_stateless/01709_inactive_parts_to_throw_insert.sql +++ b/tests/queries/0_stateless/01709_inactive_parts_to_throw_insert.sql @@ -7,6 +7,6 @@ insert into data_01709 values (2); optimize table data_01709 final; -insert into data_01709 values (3); -- { serverError 252; } +insert into data_01709 values (3); -- { serverError 252 } drop table data_01709; diff --git a/tests/queries/0_stateless/01710_projection_with_mixed_pipeline.sql b/tests/queries/0_stateless/01710_projection_with_mixed_pipeline.sql index 734aa659146..5169c667b81 100644 --- a/tests/queries/0_stateless/01710_projection_with_mixed_pipeline.sql +++ b/tests/queries/0_stateless/01710_projection_with_mixed_pipeline.sql @@ -4,6 +4,6 @@ create table t (x UInt32) engine = MergeTree order by tuple() settings index_gra insert into t select number from numbers(100); alter table t add projection p (select uniqHLL12(x)); insert into t select number + 100 from numbers(100); -select uniqHLL12(x) from t settings allow_experimental_projection_optimization = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError 307; } +select uniqHLL12(x) from t settings allow_experimental_projection_optimization = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError 307 } drop table if exists t; diff --git a/tests/queries/0_stateless/01888_read_int_safe.sql b/tests/queries/0_stateless/01888_read_int_safe.sql index 3aea8e38ab0..197338775c4 100644 --- a/tests/queries/0_stateless/01888_read_int_safe.sql +++ b/tests/queries/0_stateless/01888_read_int_safe.sql @@ -1,10 +1,10 @@ -select toInt64('--1'); -- { serverError 72; } -select toInt64('+-1'); -- { serverError 72; } -select toInt64('++1'); -- { serverError 72; } -select toInt64('++'); -- { serverError 72; } -select toInt64('+'); -- { serverError 72; } -select toInt64('1+1'); -- { serverError 6; } -select toInt64('1-1'); -- { serverError 6; } -select toInt64(''); -- { serverError 32; } +select toInt64('--1'); -- { serverError 72 } +select toInt64('+-1'); -- { serverError 72 } +select toInt64('++1'); -- { serverError 72 } +select toInt64('++'); -- { serverError 72 } +select toInt64('+'); -- { serverError 72 } +select toInt64('1+1'); -- { serverError 6 } +select toInt64('1-1'); -- { serverError 6 } +select toInt64(''); -- { serverError 32 } select toInt64('1'); select toInt64('-1'); diff --git a/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql b/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql index 59987a86590..1f6026bb61e 100644 --- a/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql +++ b/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql @@ -19,7 +19,7 @@ INSERT INTO test02008 VALUES (tuple(3.3, 5.5, 6.6)); SELECT untuple(arrayJoin(tupleToNameValuePairs(col))) from test02008; DROP TABLE IF EXISTS test02008; -SELECT tupleToNameValuePairs(tuple(1, 1.3)); -- { serverError 43; } -SELECT tupleToNameValuePairs(tuple(1, [1,2])); -- { serverError 43; } -SELECT tupleToNameValuePairs(tuple(1, 'a')); -- { serverError 43; } -SELECT tupleToNameValuePairs(33); -- { serverError 43; } +SELECT tupleToNameValuePairs(tuple(1, 1.3)); -- { serverError 43 } +SELECT tupleToNameValuePairs(tuple(1, [1,2])); -- { serverError 43 } +SELECT tupleToNameValuePairs(tuple(1, 'a')); -- { serverError 43 } +SELECT tupleToNameValuePairs(33); -- { serverError 43 }