mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Try manually fixing tests
This commit is contained in:
parent
eb2ed1b123
commit
6c43781d11
@ -6,7 +6,7 @@ SELECT number FROM system.numbers WHERE number >= 5 LIMIT 2;
|
||||
SELECT * FROM system.numbers WHERE number == 7 LIMIT 1;
|
||||
SELECT number AS n FROM system.numbers WHERE number IN(8, 9) LIMIT 2;
|
||||
select number from system.numbers limit 0;
|
||||
select x from system.numbers limit 1; -- { clientError 0 serverError 47 }
|
||||
select x from system.numbers limit 1; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
SELECT x, number FROM system.numbers LIMIT 1; -- { serverError 47 }
|
||||
SELECT * FROM system.number LIMIT 1; -- { serverError 60 }
|
||||
SELECT * FROM system LIMIT 1; -- { serverError 60 }
|
||||
|
@ -21,11 +21,11 @@ SELECT hasColumnInTable('localhost', currentDatabase(), 'has_column_in_table', '
|
||||
SELECT hasColumnInTable('system', 'one', '');
|
||||
|
||||
/* bad queries */
|
||||
SELECT hasColumnInTable('', '', ''); -- { serverError 60; }
|
||||
SELECT hasColumnInTable('', 't', 'c'); -- { serverError 81; }
|
||||
SELECT hasColumnInTable(currentDatabase(), '', 'c'); -- { serverError 60; }
|
||||
SELECT hasColumnInTable('d', 't', 's'); -- { serverError 81; }
|
||||
SELECT hasColumnInTable(currentDatabase(), 't', 's'); -- { serverError 60; }
|
||||
SELECT hasColumnInTable('', '', ''); -- { serverError 60 }
|
||||
SELECT hasColumnInTable('', 't', 'c'); -- { serverError 81 }
|
||||
SELECT hasColumnInTable(currentDatabase(), '', 'c'); -- { serverError 60 }
|
||||
SELECT hasColumnInTable('d', 't', 's'); -- { serverError 81 }
|
||||
SELECT hasColumnInTable(currentDatabase(), 't', 's'); -- { serverError 60 }
|
||||
|
||||
|
||||
DROP TABLE has_column_in_table;
|
||||
|
@ -1,14 +1,14 @@
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
SELECT formatDateTime(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH (42) }
|
||||
SELECT formatDateTime('not a datetime', 'IGNORED'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT (43) }
|
||||
SELECT formatDateTime(now(), now()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT (43) }
|
||||
SELECT formatDateTime(now(), 'good format pattern', now()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT (43) }
|
||||
SELECT formatDateTime(now(), 'unescaped %'); -- { serverError BAD_ARGUMENTS (36) }
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%U'); -- { serverError NOT_IMPLEMENTED (48) }
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%v'); -- { serverError NOT_IMPLEMENTED (48) }
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%x'); -- { serverError NOT_IMPLEMENTED (48) }
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%X'); -- { serverError NOT_IMPLEMENTED (48) }
|
||||
SELECT formatDateTime(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||
SELECT formatDateTime('not a datetime', 'IGNORED'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||
SELECT formatDateTime(now(), now()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||
SELECT formatDateTime(now(), 'good format pattern', now()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||
SELECT formatDateTime(now(), 'unescaped %'); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%U'); -- { serverError NOT_IMPLEMENTED }
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%v'); -- { serverError NOT_IMPLEMENTED }
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%x'); -- { serverError NOT_IMPLEMENTED }
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%X'); -- { serverError NOT_IMPLEMENTED }
|
||||
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%a'), formatDateTime(toDate32('2018-01-02'), '%a');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%b'), formatDateTime(toDate32('2018-01-02'), '%b');
|
||||
|
@ -12,8 +12,8 @@ SELECT * FROM VALUES('n UInt64, s String, ss String', (1 + 22, '23', toString(23
|
||||
|
||||
SELECT * FROM VALUES('a Decimal(4, 4), b String, c String', (divide(toDecimal32(5, 3), 3), 'a', 'b'));
|
||||
|
||||
SELECT * FROM VALUES('x Float64', toUInt64(-1)); -- { serverError 69; }
|
||||
SELECT * FROM VALUES('x Float64', NULL); -- { serverError 53; }
|
||||
SELECT * FROM VALUES('x Float64', toUInt64(-1)); -- { serverError 69 }
|
||||
SELECT * FROM VALUES('x Float64', NULL); -- { serverError 53 }
|
||||
SELECT * FROM VALUES('x Nullable(Float64)', NULL);
|
||||
|
||||
DROP TABLE values_list;
|
||||
|
@ -19,12 +19,12 @@ DROP TABLE t3;
|
||||
-- live view
|
||||
SET allow_experimental_live_view=1;
|
||||
CREATE LIVE VIEW lv AS SELECT * FROM t1;
|
||||
CREATE TABLE t3 AS lv; -- { serverError 80; }
|
||||
CREATE TABLE t3 AS lv; -- { serverError 80 }
|
||||
DROP TABLE lv;
|
||||
|
||||
-- view
|
||||
CREATE VIEW v AS SELECT * FROM t1;
|
||||
CREATE TABLE t3 AS v; -- { serverError 80; }
|
||||
CREATE TABLE t3 AS v; -- { serverError 80 }
|
||||
DROP TABLE v;
|
||||
|
||||
-- dictionary
|
||||
@ -43,7 +43,7 @@ SOURCE(CLICKHOUSE(
|
||||
TABLE 'dict_data' DB 'test_01056_dict_data' USER 'default' PASSWORD ''))
|
||||
LIFETIME(MIN 0 MAX 0)
|
||||
LAYOUT(SPARSE_HASHED());
|
||||
CREATE TABLE t3 AS dict; -- { serverError 80; }
|
||||
CREATE TABLE t3 AS dict; -- { serverError 80 }
|
||||
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t3;
|
||||
|
@ -16,16 +16,16 @@ select * from dist_01072 where key=toInt32OrZero(toString(xxHash64(0)));
|
||||
select * from dist_01072 where key=toInt32(xxHash32(0));
|
||||
select * from dist_01072 where key=toInt32(toInt32(xxHash32(0)));
|
||||
select * from dist_01072 where key=toInt32(toInt32(toInt32(xxHash32(0))));
|
||||
select * from dist_01072 where key=value; -- { serverError 507; }
|
||||
select * from dist_01072 where key=toInt32(value); -- { serverError 507; }
|
||||
select * from dist_01072 where key=value; -- { serverError 507 }
|
||||
select * from dist_01072 where key=toInt32(value); -- { serverError 507 }
|
||||
select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0;
|
||||
select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0;
|
||||
|
||||
drop table dist_01072;
|
||||
create table dist_01072 (key Int, value Nullable(Int), str String) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key%2);
|
||||
select * from dist_01072 where key=toInt32(xxHash32(0));
|
||||
select * from dist_01072 where key=value; -- { serverError 507; }
|
||||
select * from dist_01072 where key=toInt32(value); -- { serverError 507; }
|
||||
select * from dist_01072 where key=value; -- { serverError 507 }
|
||||
select * from dist_01072 where key=toInt32(value); -- { serverError 507 }
|
||||
select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0;
|
||||
select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0;
|
||||
|
||||
@ -34,16 +34,16 @@ set allow_suspicious_low_cardinality_types=1;
|
||||
drop table dist_01072;
|
||||
create table dist_01072 (key Int, value LowCardinality(Int), str String) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key%2);
|
||||
select * from dist_01072 where key=toInt32(xxHash32(0));
|
||||
select * from dist_01072 where key=value; -- { serverError 507; }
|
||||
select * from dist_01072 where key=toInt32(value); -- { serverError 507; }
|
||||
select * from dist_01072 where key=value; -- { serverError 507 }
|
||||
select * from dist_01072 where key=toInt32(value); -- { serverError 507 }
|
||||
select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0;
|
||||
select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0;
|
||||
|
||||
drop table dist_01072;
|
||||
create table dist_01072 (key Int, value LowCardinality(Nullable(Int)), str String) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key%2);
|
||||
select * from dist_01072 where key=toInt32(xxHash32(0));
|
||||
select * from dist_01072 where key=value; -- { serverError 507; }
|
||||
select * from dist_01072 where key=toInt32(value); -- { serverError 507; }
|
||||
select * from dist_01072 where key=value; -- { serverError 507 }
|
||||
select * from dist_01072 where key=toInt32(value); -- { serverError 507 }
|
||||
select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0;
|
||||
select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0;
|
||||
|
||||
|
@ -9,7 +9,7 @@ create table data_02000 (key Int) Engine=Null();
|
||||
create table dist_02000 as data_02000 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_02000, key);
|
||||
|
||||
select * from data_02000 where key = 0xdeadbeafdeadbeaf;
|
||||
select * from dist_02000 where key = 0xdeadbeafdeadbeaf settings force_optimize_skip_unused_shards=2; -- { serverError 507; }
|
||||
select * from dist_02000 where key = 0xdeadbeafdeadbeaf settings force_optimize_skip_unused_shards=2; -- { serverError 507 }
|
||||
select * from dist_02000 where key = 0xdeadbeafdeadbeaf;
|
||||
|
||||
drop table data_02000;
|
||||
|
@ -16,7 +16,7 @@ LAYOUT(FLAT());
|
||||
|
||||
SYSTEM RELOAD DICTIONARY dict_db_01225.dict;
|
||||
|
||||
DROP TABLE dict_db_01225.dict; -- { serverError 520; }
|
||||
DROP TABLE dict_db_01225.dict; -- { serverError 520 }
|
||||
DROP DICTIONARY dict_db_01225.dict;
|
||||
|
||||
DROP DATABASE dict_db_01225;
|
||||
|
@ -18,7 +18,7 @@ LIFETIME(MIN 0 MAX 0)
|
||||
LAYOUT(FLAT());
|
||||
|
||||
SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.dict` FORMAT TSVRaw;
|
||||
SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.no_such_dict`; -- { serverError 487; }
|
||||
SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.no_such_dict`; -- { serverError 487 }
|
||||
|
||||
DROP DATABASE dict_db_01225;
|
||||
DROP DATABASE dict_db_01225_dictionary;
|
||||
|
@ -15,7 +15,7 @@ select count() from system.query_log where current_database = currentDatabase()
|
||||
|
||||
set max_rows_to_read='100K';
|
||||
set log_queries_min_type='EXCEPTION_WHILE_PROCESSING';
|
||||
select '01231_log_queries_min_type/EXCEPTION_WHILE_PROCESSING', max(number) from system.numbers limit 1e6; -- { serverError 158; }
|
||||
select '01231_log_queries_min_type/EXCEPTION_WHILE_PROCESSING', max(number) from system.numbers limit 1e6; -- { serverError 158 }
|
||||
set max_rows_to_read=0;
|
||||
system flush logs;
|
||||
select count() from system.query_log where current_database = currentDatabase()
|
||||
@ -23,7 +23,7 @@ select count() from system.query_log where current_database = currentDatabase()
|
||||
and event_date >= yesterday() and type = 'ExceptionWhileProcessing';
|
||||
|
||||
set max_rows_to_read='100K';
|
||||
select '01231_log_queries_min_type w/ Settings/EXCEPTION_WHILE_PROCESSING', max(number) from system.numbers limit 1e6; -- { serverError 158; }
|
||||
select '01231_log_queries_min_type w/ Settings/EXCEPTION_WHILE_PROCESSING', max(number) from system.numbers limit 1e6; -- { serverError 158 }
|
||||
system flush logs;
|
||||
set max_rows_to_read=0;
|
||||
select count() from system.query_log where
|
||||
|
@ -76,7 +76,7 @@ insert into data_01278 select
|
||||
reinterpretAsString(number), // s6
|
||||
reinterpretAsString(number), // s7
|
||||
reinterpretAsString(number) // s8
|
||||
from numbers(100000); -- { serverError 241; }" > /dev/null 2>&1
|
||||
from numbers(100000); -- { serverError 241 }" > /dev/null 2>&1
|
||||
local ret_code=$?
|
||||
if [[ $ret_code -eq 0 ]];
|
||||
then
|
||||
|
@ -19,9 +19,9 @@ select port{{ suffix }}('http://127.0.0.1/', toUInt16(80));
|
||||
select port{{ suffix }}('http://foobar.com/', toUInt16(80));
|
||||
|
||||
-- unsupported
|
||||
/* ILLEGAL_TYPE_OF_ARGUMENT */ select port(toFixedString('', 1)); -- { serverError 43; }
|
||||
/* ILLEGAL_TYPE_OF_ARGUMENT */ select port{{ suffix }}('', 1); -- { serverError 43; }
|
||||
/* NUMBER_OF_ARGUMENTS_DOESNT_MATCH */ select port{{ suffix }}('', 1, 1); -- { serverError 42; }
|
||||
/* ILLEGAL_TYPE_OF_ARGUMENT */ select port(toFixedString('', 1)); -- { serverError 43 }
|
||||
/* ILLEGAL_TYPE_OF_ARGUMENT */ select port{{ suffix }}('', 1); -- { serverError 43 }
|
||||
/* NUMBER_OF_ARGUMENTS_DOESNT_MATCH */ select port{{ suffix }}('', 1, 1); -- { serverError 42 }
|
||||
|
||||
--
|
||||
-- Known limitations of domain() (getURLHost())
|
||||
|
@ -1 +1 @@
|
||||
SELECT dictGetString(concat('default', '.countryId'), 'country', toUInt64(number)) AS country FROM numbers(2) GROUP BY country; -- { serverError 36; }
|
||||
SELECT dictGetString(concat('default', '.countryId'), 'country', toUInt64(number)) AS country FROM numbers(2) GROUP BY country; -- { serverError 36 }
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/11469
|
||||
SELECT dictGet('default.countryId', 'country', toUInt64(number)) AS country FROM numbers(2) GROUP BY country; -- { serverError 36; }
|
||||
SELECT dictGet('default.countryId', 'country', toUInt64(number)) AS country FROM numbers(2) GROUP BY country; -- { serverError 36 }
|
||||
|
||||
|
||||
-- with real dictionary
|
||||
|
@ -5,9 +5,9 @@ SELECT CAST(CAST(NULL AS Nullable(String)) AS Nullable(Enum8('Hello' = 1)));
|
||||
SELECT CAST(CAST(NULL AS Nullable(FixedString(1))) AS Nullable(Enum8('Hello' = 1)));
|
||||
|
||||
-- empty string still not acceptable
|
||||
SELECT CAST(CAST('' AS Nullable(String)) AS Nullable(Enum8('Hello' = 1))); -- { serverError 36; }
|
||||
SELECT CAST(CAST('' AS Nullable(FixedString(1))) AS Nullable(Enum8('Hello' = 1))); -- { serverError 36; }
|
||||
SELECT CAST(CAST('' AS Nullable(String)) AS Nullable(Enum8('Hello' = 1))); -- { serverError 36 }
|
||||
SELECT CAST(CAST('' AS Nullable(FixedString(1))) AS Nullable(Enum8('Hello' = 1))); -- { serverError 36 }
|
||||
|
||||
-- non-Nullable Enum() still not acceptable
|
||||
SELECT CAST(CAST(NULL AS Nullable(String)) AS Enum8('Hello' = 1)); -- { serverError 349; }
|
||||
SELECT CAST(CAST(NULL AS Nullable(FixedString(1))) AS Enum8('Hello' = 1)); -- { serverError 349; }
|
||||
SELECT CAST(CAST(NULL AS Nullable(String)) AS Enum8('Hello' = 1)); -- { serverError 349 }
|
||||
SELECT CAST(CAST(NULL AS Nullable(FixedString(1))) AS Enum8('Hello' = 1)); -- { serverError 349 }
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- repeat() with this length and this number of rows will allocation huge enough region (MSB set),
|
||||
-- which will cause roundUpToPowerOfTwoOrZero() returns 0 for such allocation (before the fix),
|
||||
-- and later repeat() will try to use this memory and will got SIGSEGV.
|
||||
SELECT repeat('0.0001048576', number * (number * (number * 255))) FROM numbers(65535); -- { serverError 131; }
|
||||
SELECT repeat('0.0001048576', number * (number * (number * 255))) FROM numbers(65535); -- { serverError 131 }
|
||||
|
@ -1,5 +1,5 @@
|
||||
SELECT arrayFilter((a) -> ((a, arrayJoin([])) IN (Null, [Null])), []);
|
||||
SELECT arrayFilter((a) -> ((a, arrayJoin([[]])) IN (Null, [Null])), []);
|
||||
|
||||
SELECT * FROM system.one ARRAY JOIN arrayFilter((a) -> ((a, arrayJoin([])) IN (NULL)), []) AS arr_x; -- { serverError 43; }
|
||||
SELECT * FROM system.one ARRAY JOIN arrayFilter((a) -> ((a, arrayJoin([])) IN (NULL)), []) AS arr_x; -- { serverError 43 }
|
||||
SELECT * FROM numbers(1) LEFT ARRAY JOIN arrayFilter((x_0, x_1) -> (arrayJoin([]) IN (NULL)), [], []) AS arr_x;
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- executeGeneric()
|
||||
SELECT range(1025, 1048576 + 9223372036854775807, 9223372036854775807);
|
||||
SELECT range(1025, 1048576 + (9223372036854775807 AS i), i);
|
||||
SELECT range(1025, 18446744073709551615, 1); -- { serverError 69; }
|
||||
SELECT range(1025, 18446744073709551615, 1); -- { serverError 69 }
|
||||
|
||||
-- executeConstStep()
|
||||
SELECT range(number, 1048576 + 9223372036854775807, 9223372036854775807) FROM system.numbers LIMIT 1 OFFSET 1025;
|
||||
|
@ -7,16 +7,16 @@ insert into test1 values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2),
|
||||
|
||||
set max_rows_to_read = 1;
|
||||
-- non-optimized
|
||||
select count() from test1 settings max_parallel_replicas = 3; -- { serverError 158; }
|
||||
select count() from test1 settings max_parallel_replicas = 3; -- { serverError 158 }
|
||||
-- optimized (toYear is monotonic and we provide the partition expr as is)
|
||||
select count() from test1 where toYear(toDate(p)) = 1999;
|
||||
-- non-optimized (toDate(DateTime) is always monotonic, but we cannot relaxing the predicates to do trivial count())
|
||||
select count() from test1 where p > toDateTime('2020-09-01 10:00:00'); -- { serverError 158; }
|
||||
select count() from test1 where p > toDateTime('2020-09-01 10:00:00'); -- { serverError 158 }
|
||||
-- optimized (partition expr wrapped with non-monotonic functions)
|
||||
select count() FROM test1 where toDate(p) = '2020-09-01' and sipHash64(toString(toDate(p))) % 2 = 1;
|
||||
select count() FROM test1 where toDate(p) = '2020-09-01' and sipHash64(toString(toDate(p))) % 2 = 0;
|
||||
-- non-optimized (some predicate depends on non-partition_expr columns)
|
||||
select count() FROM test1 where toDate(p) = '2020-09-01' and k = 2; -- { serverError 158; }
|
||||
select count() FROM test1 where toDate(p) = '2020-09-01' and k = 2; -- { serverError 158 }
|
||||
-- optimized
|
||||
select count() from test1 where toDate(p) > '2020-09-01';
|
||||
-- non-optimized
|
||||
@ -35,10 +35,10 @@ select count() from test_tuple where i > 2;
|
||||
-- optimized
|
||||
select count() from test_tuple where i < 1;
|
||||
-- non-optimized
|
||||
select count() from test_tuple array join [p,p] as c where toDate(p) = '2020-09-01'; -- { serverError 158; }
|
||||
select count() from test_tuple array join [p,p] as c where toDate(p) = '2020-09-01'; -- { serverError 158 }
|
||||
select count() from test_tuple array join [1,2] as c where toDate(p) = '2020-09-01' settings max_rows_to_read = 4;
|
||||
-- non-optimized
|
||||
select count() from test_tuple array join [1,2,3] as c where toDate(p) = '2020-09-01'; -- { serverError 158; }
|
||||
select count() from test_tuple array join [1,2,3] as c where toDate(p) = '2020-09-01'; -- { serverError 158 }
|
||||
select count() from test_tuple array join [1,2,3] as c where toDate(p) = '2020-09-01' settings max_rows_to_read = 6;
|
||||
|
||||
create table test_two_args(i int, j int, k int) engine MergeTree partition by i + j order by k settings index_granularity = 1;
|
||||
@ -48,7 +48,7 @@ insert into test_two_args values (1, 2, 3), (2, 1, 3), (0, 3, 4);
|
||||
-- optimized
|
||||
select count() from test_two_args where i + j = 3;
|
||||
-- non-optimized
|
||||
select count() from test_two_args where i = 1; -- { serverError 158; }
|
||||
select count() from test_two_args where i = 1; -- { serverError 158 }
|
||||
|
||||
drop table test1;
|
||||
drop table test_tuple;
|
||||
|
@ -13,9 +13,9 @@ set max_memory_usage='500M';
|
||||
set max_threads=1;
|
||||
set max_block_size=500;
|
||||
|
||||
select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=0; -- { serverError 241; }
|
||||
select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=0; -- { serverError 241 }
|
||||
select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=1;
|
||||
-- for WITH TOTALS previous groups should be kept.
|
||||
select key, groupArray(repeat('a', 200)), count() from data_01513 group by key with totals format Null settings optimize_aggregation_in_order=1; -- { serverError 241; }
|
||||
select key, groupArray(repeat('a', 200)), count() from data_01513 group by key with totals format Null settings optimize_aggregation_in_order=1; -- { serverError 241 }
|
||||
|
||||
drop table data_01513;
|
||||
|
@ -35,7 +35,7 @@ ATTACH TABLE primary_key_test(v1 Int32, v2 Int32) ENGINE=ReplacingMergeTree ORDE
|
||||
SELECT * FROM primary_key_test FINAL;
|
||||
DROP TABLE primary_key_test;
|
||||
|
||||
CREATE TABLE primary_key_test(v1 Int64, v2 Int32, v3 String, PRIMARY KEY(v1, gcd(v1, v2))) ENGINE=ReplacingMergeTree ORDER BY v1; -- { serverError 36; }
|
||||
CREATE TABLE primary_key_test(v1 Int64, v2 Int32, v3 String, PRIMARY KEY(v1, gcd(v1, v2))) ENGINE=ReplacingMergeTree ORDER BY v1; -- { serverError 36 }
|
||||
|
||||
CREATE TABLE primary_key_test(v1 Int64, v2 Int32, v3 String, PRIMARY KEY(v1, gcd(v1, v2))) ENGINE=ReplacingMergeTree ORDER BY (v1, gcd(v1, v2));
|
||||
|
||||
|
@ -5,7 +5,7 @@ create table dist_01528 as system.one engine=Distributed('test_cluster_two_shard
|
||||
|
||||
set optimize_skip_unused_shards=1;
|
||||
set force_optimize_skip_unused_shards=1;
|
||||
select * from dist_01528 where dummy = 2; -- { serverError 507; }
|
||||
select * from dist_01528 where dummy = 2; -- { serverError 507 }
|
||||
select * from dist_01528 where dummy = 2 settings allow_nondeterministic_optimize_skip_unused_shards=1;
|
||||
|
||||
drop table dist_01528;
|
||||
|
@ -30,7 +30,7 @@ create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickho
|
||||
drop database db_01530_atomic;
|
||||
|
||||
create database db_01530_atomic Engine=Atomic;
|
||||
create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; -- { serverError 253; }
|
||||
create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; -- { serverError 253 }
|
||||
|
||||
set database_atomic_wait_for_drop_and_detach_synchronously=1;
|
||||
|
||||
|
@ -17,7 +17,7 @@ create table dist_01555 (key Int) Engine=Distributed(test_cluster_with_incorrect
|
||||
|
||||
insert into dist_01555 values (1)(2);
|
||||
-- since test_cluster_with_incorrect_pw contains incorrect password ignore error
|
||||
system flush distributed dist_01555; -- { serverError 516; }
|
||||
system flush distributed dist_01555; -- { serverError 516 }
|
||||
select length(splitByChar('*', data_path)), replaceRegexpOne(data_path, '^.*/([^/]*)/' , '\\1'), extract(last_exception, 'AUTHENTICATION_FAILED'), dateDiff('s', last_exception_time, now()) < 5 from system.distribution_queue where database = currentDatabase() and table = 'dist_01555' format CSV;
|
||||
|
||||
drop table dist_01555;
|
||||
|
@ -1 +1 @@
|
||||
select toUnixTimestamp(today()); -- { serverError 44; }
|
||||
select toUnixTimestamp(today()); -- { serverError 44 }
|
||||
|
@ -25,5 +25,5 @@ select countMatchesCaseInsensitive('foo.com BAR.COM baz.com bam.com', '([^. ]+)\
|
||||
select countMatchesCaseInsensitive('foo.com@foo.com bar.com@foo.com BAZ.com@foo.com bam.com@foo.com', '([^. ]+)\.([^. ]+)@([^. ]+)\.([^. ]+)');
|
||||
|
||||
select 'errors';
|
||||
select countMatches(1, 'foo') from numbers(1); -- { serverError 43; }
|
||||
select countMatches('foobarfoo', toString(number)) from numbers(1); -- { serverError 44; }
|
||||
select countMatches(1, 'foo') from numbers(1); -- { serverError 43 }
|
||||
select countMatches('foobarfoo', toString(number)) from numbers(1); -- { serverError 44 }
|
||||
|
@ -7,6 +7,6 @@ insert into data_01709 values (2);
|
||||
|
||||
optimize table data_01709 final;
|
||||
|
||||
insert into data_01709 values (3); -- { serverError 252; }
|
||||
insert into data_01709 values (3); -- { serverError 252 }
|
||||
|
||||
drop table data_01709;
|
||||
|
@ -4,6 +4,6 @@ create table t (x UInt32) engine = MergeTree order by tuple() settings index_gra
|
||||
insert into t select number from numbers(100);
|
||||
alter table t add projection p (select uniqHLL12(x));
|
||||
insert into t select number + 100 from numbers(100);
|
||||
select uniqHLL12(x) from t settings allow_experimental_projection_optimization = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError 307; }
|
||||
select uniqHLL12(x) from t settings allow_experimental_projection_optimization = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError 307 }
|
||||
|
||||
drop table if exists t;
|
||||
|
@ -1,10 +1,10 @@
|
||||
select toInt64('--1'); -- { serverError 72; }
|
||||
select toInt64('+-1'); -- { serverError 72; }
|
||||
select toInt64('++1'); -- { serverError 72; }
|
||||
select toInt64('++'); -- { serverError 72; }
|
||||
select toInt64('+'); -- { serverError 72; }
|
||||
select toInt64('1+1'); -- { serverError 6; }
|
||||
select toInt64('1-1'); -- { serverError 6; }
|
||||
select toInt64(''); -- { serverError 32; }
|
||||
select toInt64('--1'); -- { serverError 72 }
|
||||
select toInt64('+-1'); -- { serverError 72 }
|
||||
select toInt64('++1'); -- { serverError 72 }
|
||||
select toInt64('++'); -- { serverError 72 }
|
||||
select toInt64('+'); -- { serverError 72 }
|
||||
select toInt64('1+1'); -- { serverError 6 }
|
||||
select toInt64('1-1'); -- { serverError 6 }
|
||||
select toInt64(''); -- { serverError 32 }
|
||||
select toInt64('1');
|
||||
select toInt64('-1');
|
||||
|
@ -19,7 +19,7 @@ INSERT INTO test02008 VALUES (tuple(3.3, 5.5, 6.6));
|
||||
SELECT untuple(arrayJoin(tupleToNameValuePairs(col))) from test02008;
|
||||
|
||||
DROP TABLE IF EXISTS test02008;
|
||||
SELECT tupleToNameValuePairs(tuple(1, 1.3)); -- { serverError 43; }
|
||||
SELECT tupleToNameValuePairs(tuple(1, [1,2])); -- { serverError 43; }
|
||||
SELECT tupleToNameValuePairs(tuple(1, 'a')); -- { serverError 43; }
|
||||
SELECT tupleToNameValuePairs(33); -- { serverError 43; }
|
||||
SELECT tupleToNameValuePairs(tuple(1, 1.3)); -- { serverError 43 }
|
||||
SELECT tupleToNameValuePairs(tuple(1, [1,2])); -- { serverError 43 }
|
||||
SELECT tupleToNameValuePairs(tuple(1, 'a')); -- { serverError 43 }
|
||||
SELECT tupleToNameValuePairs(33); -- { serverError 43 }
|
||||
|
Loading…
Reference in New Issue
Block a user