mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge pull request #49420 from ClickHouse/fix_00002
Fix some bad error messages
This commit is contained in:
commit
1ec3040d0c
@ -554,7 +554,8 @@ public:
|
||||
if (capacity < size_to_reserve)
|
||||
{
|
||||
if (unlikely(MAX_STRING_SIZE < size_to_reserve))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({})", size_to_reserve);
|
||||
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({}), maximum: {}",
|
||||
size_to_reserve, MAX_STRING_SIZE);
|
||||
|
||||
size_t rounded_capacity = roundUpToPowerOfTwoOrZero(size_to_reserve);
|
||||
chassert(rounded_capacity <= MAX_STRING_SIZE + 1); /// rounded_capacity <= 2^31
|
||||
@ -624,7 +625,8 @@ public:
|
||||
void changeImpl(StringRef value, Arena * arena)
|
||||
{
|
||||
if (unlikely(MAX_STRING_SIZE < value.size))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({})", value.size);
|
||||
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({}), maximum: {}",
|
||||
value.size, MAX_STRING_SIZE);
|
||||
|
||||
UInt32 value_size = static_cast<UInt32>(value.size);
|
||||
|
||||
|
@ -1048,7 +1048,7 @@ void DatabaseReplicated::dropReplica(
|
||||
assert(!database || database_zookeeper_path == database->zookeeper_path);
|
||||
|
||||
if (full_replica_name.find('/') != std::string::npos)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid replica name: {}", full_replica_name);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid replica name, '/' is not allowed: {}", full_replica_name);
|
||||
|
||||
auto zookeeper = Context::getGlobalContextInstance()->getZooKeeper();
|
||||
|
||||
|
@ -150,7 +150,7 @@ namespace
|
||||
if (text == "bc")
|
||||
throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, "Era BC exceeds the range of DateTime");
|
||||
else if (text != "ad")
|
||||
throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, "Unknown era {}", text);
|
||||
throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, "Unknown era {} (expected 'ad' or 'bc')", text);
|
||||
}
|
||||
|
||||
void setCentury(Int32 century)
|
||||
|
@ -33,7 +33,7 @@ size_t HTTPChunkedReadBuffer::readChunkHeader()
|
||||
} while (!in->eof() && isHexDigit(*in->position()));
|
||||
|
||||
if (res > max_chunk_size)
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Chunk size exceeded the limit");
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Chunk size exceeded the limit (max size: {})", max_chunk_size);
|
||||
|
||||
/// NOTE: If we want to read any chunk extensions, it should be done here.
|
||||
|
||||
|
@ -2012,7 +2012,7 @@ struct WindowFunctionNtile final : public WindowFunction
|
||||
|
||||
if (!buckets)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "ntile's argument must > 0");
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "ntile's argument must be greater than 0");
|
||||
}
|
||||
}
|
||||
// new partition
|
||||
|
@ -2117,7 +2117,14 @@ def reportLogStats(args):
|
||||
'Column ''{}'' already exists', 'No macro {} in config', 'Invalid origin H3 index: {}',
|
||||
'Invalid session timeout: ''{}''', 'Tuple cannot be empty', 'Database name is empty',
|
||||
'Table {} is not a Dictionary', 'Expected function, got: {}', 'Unknown identifier: ''{}''',
|
||||
'Failed to {} input ''{}''', '{}.{} is not a VIEW', 'Cannot convert NULL to {}', 'Dictionary {} doesn''t exist'
|
||||
'Failed to {} input ''{}''', '{}.{} is not a VIEW', 'Cannot convert NULL to {}', 'Dictionary {} doesn''t exist',
|
||||
'Write file: {}', 'Unable to parse JSONPath', 'Host is empty in S3 URI.', 'Expected end of line',
|
||||
'inflate failed: {}{}', 'Center is not valid', 'Column ''{}'' is ambiguous', 'Cannot parse object', 'Invalid date: {}',
|
||||
'There is no cache by name: {}', 'No part {} in table', '`{}` should be a String', 'There are duplicate id {}',
|
||||
'Invalid replica name: {}', 'Unexpected value {} in enum', 'Unknown BSON type: {}', 'Point is not valid',
|
||||
'Invalid qualified name: {}', 'INTO OUTFILE is not allowed', 'Arguments must not be NaN', 'Cell is not valid',
|
||||
'brotli decode error{}', 'Invalid H3 index: {}', 'Too large node state size', 'No additional keys found.',
|
||||
'Attempt to read after EOF.', 'Replication was stopped', '{} building file infos', 'Cannot parse uuid {}'
|
||||
) AS known_short_messages
|
||||
SELECT count() AS c, message_format_string, substr(any(message), 1, 120)
|
||||
FROM system.text_log
|
||||
|
@ -2,7 +2,7 @@ runtime messages 0.001
|
||||
runtime exceptions 0.05
|
||||
messages shorter than 10 1
|
||||
messages shorter than 16 3
|
||||
exceptions shorter than 30 30
|
||||
exceptions shorter than 30 3
|
||||
noisy messages 0.3
|
||||
noisy Trace messages 0.16
|
||||
noisy Debug messages 0.09
|
||||
|
@ -49,7 +49,14 @@ create temporary table known_short_messages (s String) as select * from (select
|
||||
'Column ''{}'' already exists', 'No macro {} in config', 'Invalid origin H3 index: {}',
|
||||
'Invalid session timeout: ''{}''', 'Tuple cannot be empty', 'Database name is empty',
|
||||
'Table {} is not a Dictionary', 'Expected function, got: {}', 'Unknown identifier: ''{}''',
|
||||
'Failed to {} input ''{}''', '{}.{} is not a VIEW', 'Cannot convert NULL to {}', 'Dictionary {} doesn''t exist'
|
||||
'Failed to {} input ''{}''', '{}.{} is not a VIEW', 'Cannot convert NULL to {}', 'Dictionary {} doesn''t exist',
|
||||
'Write file: {}', 'Unable to parse JSONPath', 'Host is empty in S3 URI.', 'Expected end of line',
|
||||
'inflate failed: {}{}', 'Center is not valid', 'Column ''{}'' is ambiguous', 'Cannot parse object', 'Invalid date: {}',
|
||||
'There is no cache by name: {}', 'No part {} in table', '`{}` should be a String', 'There are duplicate id {}',
|
||||
'Invalid replica name: {}', 'Unexpected value {} in enum', 'Unknown BSON type: {}', 'Point is not valid',
|
||||
'Invalid qualified name: {}', 'INTO OUTFILE is not allowed', 'Arguments must not be NaN', 'Cell is not valid',
|
||||
'brotli decode error{}', 'Invalid H3 index: {}', 'Too large node state size', 'No additional keys found.',
|
||||
'Attempt to read after EOF.', 'Replication was stopped', '{} building file infos', 'Cannot parse uuid {}'
|
||||
] as arr) array join arr;
|
||||
|
||||
-- Check that we don't have too many short meaningless message patterns.
|
||||
@ -59,7 +66,7 @@ select 'messages shorter than 10', max2(countDistinctOrDefault(message_format_st
|
||||
select 'messages shorter than 16', max2(countDistinctOrDefault(message_format_string), 3) from logs where length(message_format_string) < 16 and message_format_string not in known_short_messages;
|
||||
|
||||
-- Same as above, but exceptions must be more informative. Feel free to update the threshold or remove this query if really necessary
|
||||
select 'exceptions shorter than 30', max2(countDistinctOrDefault(message_format_string), 30) from logs where length(message_format_string) < 30 and message ilike '%DB::Exception%' and message_format_string not in known_short_messages;
|
||||
select 'exceptions shorter than 30', max2(countDistinctOrDefault(message_format_string), 3) from logs where length(message_format_string) < 30 and message ilike '%DB::Exception%' and message_format_string not in known_short_messages;
|
||||
|
||||
|
||||
-- Avoid too noisy messages: top 1 message frequency must be less than 30%. We should reduce the threshold
|
||||
@ -98,7 +105,9 @@ select 'incorrect patterns', max2(countDistinct(message_format_string), 15) from
|
||||
where ((rand() % 8) = 0)
|
||||
and message not like (replaceRegexpAll(message_format_string, '{[:.0-9dfx]*}', '%') as s)
|
||||
and message not like (s || ' (skipped % similar messages)')
|
||||
and message not like ('%Exception: '||s||'%') group by message_format_string
|
||||
and message not like ('%Exception: '||s||'%')
|
||||
and message not like ('%(skipped % similar messages)%')
|
||||
group by message_format_string
|
||||
) where any_message not like '%Poco::Exception%';
|
||||
|
||||
drop table logs;
|
||||
|
Loading…
Reference in New Issue
Block a user