mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Fixed wording #245
This commit is contained in:
parent
97b30d0b33
commit
3f8ef38b41
@ -20,7 +20,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int TOO_SLOW;
|
||||
extern const int TOO_LESS_ARGUMENTS_FOR_FUNCTION;
|
||||
extern const int TOO_MUCH_ARGUMENTS_FOR_FUNCTION;
|
||||
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
|
||||
extern const int SYNTAX_ERROR;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
@ -152,7 +152,7 @@ public:
|
||||
if (arg_count - 1 > AggregateFunctionSequenceMatchData::max_events)
|
||||
throw Exception{"Aggregate function " + derived().getName() + " supports up to " +
|
||||
toString(AggregateFunctionSequenceMatchData::max_events) + " event arguments.",
|
||||
ErrorCodes::TOO_MUCH_ARGUMENTS_FOR_FUNCTION};
|
||||
ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION};
|
||||
|
||||
const auto time_arg = arguments.front().get();
|
||||
if (!typeid_cast<const DataTypeDateTime *>(time_arg))
|
||||
|
@ -41,7 +41,7 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_READ_COMPRESSED_CHUNK = 31;
|
||||
extern const int ATTEMPT_TO_READ_AFTER_EOF = 32;
|
||||
extern const int CANNOT_READ_ALL_DATA = 33;
|
||||
extern const int TOO_MUCH_ARGUMENTS_FOR_FUNCTION = 34;
|
||||
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION = 34;
|
||||
extern const int TOO_LESS_ARGUMENTS_FOR_FUNCTION = 35;
|
||||
extern const int BAD_ARGUMENTS = 36;
|
||||
extern const int UNKNOWN_ELEMENT_IN_AST = 37;
|
||||
@ -164,15 +164,15 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_RELATION = 155;
|
||||
extern const int DICTIONARIES_WAS_NOT_LOADED = 156;
|
||||
extern const int ILLEGAL_OVERFLOW_MODE = 157;
|
||||
extern const int TOO_MUCH_ROWS = 158;
|
||||
extern const int TOO_MANY_ROWS = 158;
|
||||
extern const int TIMEOUT_EXCEEDED = 159;
|
||||
extern const int TOO_SLOW = 160;
|
||||
extern const int TOO_MUCH_COLUMNS = 161;
|
||||
extern const int TOO_MANY_COLUMNS = 161;
|
||||
extern const int TOO_DEEP_SUBQUERIES = 162;
|
||||
extern const int TOO_DEEP_PIPELINE = 163;
|
||||
extern const int READONLY = 164;
|
||||
extern const int TOO_MUCH_TEMPORARY_COLUMNS = 165;
|
||||
extern const int TOO_MUCH_TEMPORARY_NON_CONST_COLUMNS = 166;
|
||||
extern const int TOO_MANY_TEMPORARY_COLUMNS = 165;
|
||||
extern const int TOO_MANY_TEMPORARY_NON_CONST_COLUMNS = 166;
|
||||
extern const int TOO_DEEP_AST = 167;
|
||||
extern const int TOO_BIG_AST = 168;
|
||||
extern const int BAD_TYPE_OF_FIELD = 169;
|
||||
@ -207,7 +207,7 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_QUOTA = 199;
|
||||
extern const int QUOTA_DOESNT_ALLOW_KEYS = 200;
|
||||
extern const int QUOTA_EXPIRED = 201;
|
||||
extern const int TOO_MUCH_SIMULTANEOUS_QUERIES = 202;
|
||||
extern const int TOO_MANY_SIMULTANEOUS_QUERIES = 202;
|
||||
extern const int NO_FREE_CONNECTION = 203;
|
||||
extern const int CANNOT_FSYNC = 204;
|
||||
extern const int NESTED_TYPE_TOO_DEEP = 205;
|
||||
@ -255,10 +255,10 @@ namespace ErrorCodes
|
||||
extern const int INVALID_PARTITION_VALUE = 248;
|
||||
extern const int NOT_ENOUGH_BLOCK_NUMBERS = 250;
|
||||
extern const int NO_SUCH_REPLICA = 251;
|
||||
extern const int TOO_MUCH_PARTS = 252;
|
||||
extern const int TOO_MANY_PARTS = 252;
|
||||
extern const int REPLICA_IS_ALREADY_EXIST = 253;
|
||||
extern const int NO_ACTIVE_REPLICAS = 254;
|
||||
extern const int TOO_MUCH_RETRIES_TO_FETCH_PARTS = 255;
|
||||
extern const int TOO_MANY_RETRIES_TO_FETCH_PARTS = 255;
|
||||
extern const int PARTITION_ALREADY_EXISTS = 256;
|
||||
extern const int PARTITION_DOESNT_EXIST = 257;
|
||||
extern const int UNION_ALL_RESULT_STRUCTURES_MISMATCH = 258;
|
||||
@ -308,7 +308,7 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_WAITPID = 304;
|
||||
extern const int TABLE_WAS_NOT_DROPPED = 305;
|
||||
extern const int TOO_DEEP_RECURSION = 306;
|
||||
extern const int TOO_MUCH_BYTES = 307;
|
||||
extern const int TOO_MANY_BYTES = 307;
|
||||
extern const int UNEXPECTED_NODE_IN_ZOOKEEPER = 308;
|
||||
extern const int FUNCTION_CANNOT_HAVE_PARAMETERS = 309;
|
||||
extern const int INVALID_SHARD_WEIGHT = 317;
|
||||
@ -342,7 +342,7 @@ namespace ErrorCodes
|
||||
extern const int RECEIVED_ERROR_TOO_MANY_REQUESTS = 364;
|
||||
extern const int OUTPUT_IS_NOT_SORTED = 365;
|
||||
extern const int SIZES_OF_NESTED_COLUMNS_ARE_INCONSISTENT = 366;
|
||||
extern const int TOO_MUCH_FETCHES = 367;
|
||||
extern const int TOO_MANY_FETCHES = 367;
|
||||
extern const int BAD_CAST = 368;
|
||||
extern const int ALL_REPLICAS_ARE_STALE = 369;
|
||||
extern const int DATA_TYPE_CANNOT_BE_USED_IN_TABLES = 370;
|
||||
|
@ -11,8 +11,8 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int TOO_MUCH_ROWS;
|
||||
extern const int TOO_MUCH_BYTES;
|
||||
extern const int TOO_MANY_ROWS;
|
||||
extern const int TOO_MANY_BYTES;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int TOO_SLOW;
|
||||
extern const int LOGICAL_ERROR;
|
||||
@ -203,14 +203,14 @@ bool IProfilingBlockInputStream::checkDataSizeLimits()
|
||||
std::string("Limit for result rows")
|
||||
+ " exceeded: read " + toString(info.rows)
|
||||
+ " rows, maximum: " + toString(limits.max_rows_to_read),
|
||||
ErrorCodes::TOO_MUCH_ROWS);
|
||||
ErrorCodes::TOO_MANY_ROWS);
|
||||
|
||||
if (limits.max_bytes_to_read && info.bytes > limits.max_bytes_to_read)
|
||||
return handleOverflowMode(limits.read_overflow_mode,
|
||||
std::string("Limit for result bytes (uncompressed)")
|
||||
+ " exceeded: read " + toString(info.bytes)
|
||||
+ " bytes, maximum: " + toString(limits.max_bytes_to_read),
|
||||
ErrorCodes::TOO_MUCH_BYTES);
|
||||
ErrorCodes::TOO_MANY_BYTES);
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -286,11 +286,11 @@ void IProfilingBlockInputStream::progressImpl(const Progress & value)
|
||||
if (limits.max_rows_to_read && total_rows_estimate > limits.max_rows_to_read)
|
||||
throw Exception("Limit for rows to read exceeded: " + toString(total_rows_estimate)
|
||||
+ " rows read (or to read), maximum: " + toString(limits.max_rows_to_read),
|
||||
ErrorCodes::TOO_MUCH_ROWS);
|
||||
ErrorCodes::TOO_MANY_ROWS);
|
||||
else
|
||||
throw Exception("Limit for (uncompressed) bytes to read exceeded: " + toString(progress.bytes)
|
||||
+ " bytes read, maximum: " + toString(limits.max_bytes_to_read),
|
||||
ErrorCodes::TOO_MUCH_BYTES);
|
||||
ErrorCodes::TOO_MANY_BYTES);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ Block MergeSortingBlockInputStream::readImpl()
|
||||
|
||||
/** Algorithm:
|
||||
* - read to memory blocks from source stream;
|
||||
* - if too much of them and if external sorting is enabled,
|
||||
* - if too many of them and if external sorting is enabled,
|
||||
* - merge all blocks to sorted stream and write it to temporary file;
|
||||
* - at the end, merge all sorted streams from temporary files and also from rest of blocks in memory.
|
||||
*/
|
||||
@ -93,7 +93,7 @@ Block MergeSortingBlockInputStream::readImpl()
|
||||
blocks.push_back(block);
|
||||
sum_bytes_in_blocks += block.bytes();
|
||||
|
||||
/** If too much of them and if external sorting is enabled,
|
||||
/** If too many of them and if external sorting is enabled,
|
||||
* will merge blocks that we have in memory at this moment and write merged stream to temporary (compressed) file.
|
||||
* NOTE. It's possible to check free space in filesystem.
|
||||
*/
|
||||
|
@ -6,8 +6,8 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int TOO_MUCH_COLUMNS;
|
||||
extern const int TOO_MUCH_ROWS;
|
||||
extern const int TOO_MANY_COLUMNS;
|
||||
extern const int TOO_MANY_ROWS;
|
||||
extern const int RECEIVED_EMPTY_DATA;
|
||||
}
|
||||
|
||||
@ -22,13 +22,13 @@ std::string readInvalidateQuery(IProfilingBlockInputStream & block_input_stream)
|
||||
|
||||
auto columns = block.columns();
|
||||
if (columns > 1)
|
||||
throw Exception("Expected single column in resultset, got " + std::to_string(columns), ErrorCodes::TOO_MUCH_COLUMNS);
|
||||
throw Exception("Expected single column in resultset, got " + std::to_string(columns), ErrorCodes::TOO_MANY_COLUMNS);
|
||||
|
||||
auto rows = block.rows();
|
||||
if (rows == 0)
|
||||
throw Exception("Expected single row in resultset, got 0", ErrorCodes::RECEIVED_EMPTY_DATA);
|
||||
if (rows > 1)
|
||||
throw Exception("Expected single row in resultset, got at least " + std::to_string(rows), ErrorCodes::TOO_MUCH_ROWS);
|
||||
throw Exception("Expected single row in resultset, got at least " + std::to_string(rows), ErrorCodes::TOO_MANY_ROWS);
|
||||
|
||||
auto column = block.getByPosition(0).column;
|
||||
response = column->getDataAt(0).toString();
|
||||
@ -36,7 +36,7 @@ std::string readInvalidateQuery(IProfilingBlockInputStream & block_input_stream)
|
||||
while ((block = block_input_stream.read()))
|
||||
{
|
||||
if (block.rows() > 0)
|
||||
throw Exception("Expected single row in resultset, got at least " + std::to_string(rows + 1), ErrorCodes::TOO_MUCH_ROWS);
|
||||
throw Exception("Expected single row in resultset, got at least " + std::to_string(rows + 1), ErrorCodes::TOO_MANY_ROWS);
|
||||
}
|
||||
|
||||
block_input_stream.readSuffix();
|
||||
|
@ -44,7 +44,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_COMPILE_CODE;
|
||||
extern const int TOO_MUCH_ROWS;
|
||||
extern const int TOO_MANY_ROWS;
|
||||
extern const int EMPTY_DATA_PASSED;
|
||||
extern const int CANNOT_MERGE_DIFFERENT_AGGREGATED_DATA_VARIANTS;
|
||||
}
|
||||
@ -973,7 +973,7 @@ bool Aggregator::checkLimits(size_t result_size, bool & no_more_keys) const
|
||||
case OverflowMode::THROW:
|
||||
throw Exception("Limit for rows to GROUP BY exceeded: has " + toString(result_size)
|
||||
+ " rows, maximum: " + toString(params.max_rows_to_group_by),
|
||||
ErrorCodes::TOO_MUCH_ROWS);
|
||||
ErrorCodes::TOO_MANY_ROWS);
|
||||
|
||||
case OverflowMode::BREAK:
|
||||
return false;
|
||||
|
@ -27,8 +27,8 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_ACTION;
|
||||
extern const int NOT_FOUND_COLUMN_IN_BLOCK;
|
||||
extern const int SIZES_OF_ARRAYS_DOESNT_MATCH;
|
||||
extern const int TOO_MUCH_TEMPORARY_COLUMNS;
|
||||
extern const int TOO_MUCH_TEMPORARY_NON_CONST_COLUMNS;
|
||||
extern const int TOO_MANY_TEMPORARY_COLUMNS;
|
||||
extern const int TOO_MANY_TEMPORARY_NON_CONST_COLUMNS;
|
||||
}
|
||||
|
||||
|
||||
@ -503,7 +503,7 @@ void ExpressionActions::checkLimits(Block & block) const
|
||||
if (limits.max_temporary_columns && block.columns() > limits.max_temporary_columns)
|
||||
throw Exception("Too many temporary columns: " + block.dumpNames()
|
||||
+ ". Maximum: " + limits.max_temporary_columns.toString(),
|
||||
ErrorCodes::TOO_MUCH_TEMPORARY_COLUMNS);
|
||||
ErrorCodes::TOO_MANY_TEMPORARY_COLUMNS);
|
||||
|
||||
if (limits.max_temporary_non_const_columns)
|
||||
{
|
||||
@ -521,7 +521,7 @@ void ExpressionActions::checkLimits(Block & block) const
|
||||
|
||||
throw Exception("Too many temporary non-const columns:" + list_of_non_const_columns.str()
|
||||
+ ". Maximum: " + limits.max_temporary_non_const_columns.toString(),
|
||||
ErrorCodes::TOO_MUCH_TEMPORARY_NON_CONST_COLUMNS);
|
||||
ErrorCodes::TOO_MANY_TEMPORARY_NON_CONST_COLUMNS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_IDENTIFIER;
|
||||
extern const int CYCLIC_ALIASES;
|
||||
extern const int INCORRECT_RESULT_OF_SCALAR_SUBQUERY;
|
||||
extern const int TOO_MUCH_ROWS;
|
||||
extern const int TOO_MANY_ROWS;
|
||||
extern const int NOT_FOUND_COLUMN_IN_BLOCK;
|
||||
extern const int INCORRECT_ELEMENT_OF_SET;
|
||||
extern const int ALIAS_REQUIRED;
|
||||
@ -1236,7 +1236,7 @@ void ExpressionAnalyzer::executeScalarSubqueriesImpl(ASTPtr & ast)
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
if (e.code() == ErrorCodes::TOO_MUCH_ROWS)
|
||||
if (e.code() == ErrorCodes::TOO_MANY_ROWS)
|
||||
throw Exception("Scalar subquery returned more than one row", ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY);
|
||||
else
|
||||
throw;
|
||||
|
@ -61,7 +61,7 @@ namespace ErrorCodes
|
||||
extern const int SAMPLING_NOT_SUPPORTED;
|
||||
extern const int ILLEGAL_FINAL;
|
||||
extern const int ILLEGAL_PREWHERE;
|
||||
extern const int TOO_MUCH_COLUMNS;
|
||||
extern const int TOO_MANY_COLUMNS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
@ -577,7 +577,7 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns(Pipeline
|
||||
throw Exception("Limit for number of columns to read exceeded. "
|
||||
"Requested: " + toString(required_columns.size())
|
||||
+ ", maximum: " + settings.limits.max_columns_to_read.toString(),
|
||||
ErrorCodes::TOO_MUCH_COLUMNS);
|
||||
ErrorCodes::TOO_MANY_COLUMNS);
|
||||
|
||||
size_t limit_length = 0;
|
||||
size_t limit_offset = 0;
|
||||
|
@ -13,7 +13,7 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int TOO_MUCH_SIMULTANEOUS_QUERIES;
|
||||
extern const int TOO_MANY_SIMULTANEOUS_QUERIES;
|
||||
extern const int QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
@ -33,7 +33,7 @@ ProcessList::EntryPtr ProcessList::insert(
|
||||
|
||||
if (!is_kill_query && max_size && cur_size >= max_size
|
||||
&& (!settings.queue_max_wait_ms.totalMilliseconds() || !have_space.tryWait(mutex, settings.queue_max_wait_ms.totalMilliseconds())))
|
||||
throw Exception("Too many simultaneous queries. Maximum: " + toString(max_size), ErrorCodes::TOO_MUCH_SIMULTANEOUS_QUERIES);
|
||||
throw Exception("Too many simultaneous queries. Maximum: " + toString(max_size), ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES);
|
||||
|
||||
/** Why we use current user?
|
||||
* Because initial one is passed by client and credentials for it is not verified,
|
||||
@ -55,7 +55,7 @@ ProcessList::EntryPtr ProcessList::insert(
|
||||
throw Exception("Too many simultaneous queries for user " + client_info.current_user
|
||||
+ ". Current: " + toString(user_process_list->second.queries.size())
|
||||
+ ", maximum: " + settings.max_concurrent_queries_for_user.toString(),
|
||||
ErrorCodes::TOO_MUCH_SIMULTANEOUS_QUERIES);
|
||||
ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES);
|
||||
|
||||
auto range = user_process_list->second.queries.equal_range(client_info.current_query_id);
|
||||
if (range.first != range.second)
|
||||
|
@ -20,7 +20,7 @@ namespace ErrorCodes
|
||||
extern const int POCO_EXCEPTION;
|
||||
extern const int STD_EXCEPTION;
|
||||
extern const int UNKNOWN_EXCEPTION;
|
||||
extern const int TOO_MUCH_SIMULTANEOUS_QUERIES;
|
||||
extern const int TOO_MANY_SIMULTANEOUS_QUERIES;
|
||||
}
|
||||
|
||||
void InterserverIOHTTPHandler::processQuery(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response)
|
||||
@ -71,7 +71,7 @@ void InterserverIOHTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & requ
|
||||
catch (Exception & e)
|
||||
{
|
||||
|
||||
if (e.code() == ErrorCodes::TOO_MUCH_SIMULTANEOUS_QUERIES)
|
||||
if (e.code() == ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES)
|
||||
{
|
||||
if (!response.sent())
|
||||
response.send();
|
||||
|
@ -22,7 +22,6 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int ABORTED;
|
||||
extern const int BAD_SIZE_OF_FILE_IN_DATA_PART;
|
||||
extern const int TOO_MUCH_SIMULTANEOUS_QUERIES;
|
||||
extern const int CANNOT_WRITE_TO_OSTREAM;
|
||||
}
|
||||
|
||||
|
@ -74,6 +74,7 @@ namespace ErrorCodes
|
||||
extern const int INVALID_PARTITION_VALUE;
|
||||
extern const int METADATA_MISMATCH;
|
||||
extern const int PART_IS_TEMPORARILY_LOCKED;
|
||||
extern const int TOO_MANY_PARTS;
|
||||
}
|
||||
|
||||
|
||||
@ -1726,7 +1727,7 @@ void MergeTreeData::delayInsertIfNeeded(Poco::Event * until)
|
||||
if (parts_count >= settings.parts_to_throw_insert)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::RejectedInserts);
|
||||
throw Exception("Too many parts (" + toString(parts_count) + "). Merges are processing significantly slower than inserts.", ErrorCodes::TOO_MUCH_PARTS);
|
||||
throw Exception("Too many parts (" + toString(parts_count) + "). Merges are processing significantly slower than inserts.", ErrorCodes::TOO_MANY_PARTS);
|
||||
}
|
||||
|
||||
const size_t max_k = settings.parts_to_throw_insert - settings.parts_to_delay_insert; /// always > 0
|
||||
|
@ -27,7 +27,6 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int INVALID_PARTITION_NAME;
|
||||
extern const int TOO_MUCH_PARTS;
|
||||
extern const int NO_SUCH_DATA_PART;
|
||||
extern const int DUPLICATE_DATA_PART;
|
||||
extern const int DIRECTORY_ALREADY_EXISTS;
|
||||
|
@ -81,7 +81,7 @@ namespace ErrorCodes
|
||||
extern const int TABLE_IS_READ_ONLY;
|
||||
extern const int TABLE_WAS_NOT_DROPPED;
|
||||
extern const int PARTITION_ALREADY_EXISTS;
|
||||
extern const int TOO_MUCH_RETRIES_TO_FETCH_PARTS;
|
||||
extern const int TOO_MANY_RETRIES_TO_FETCH_PARTS;
|
||||
extern const int RECEIVED_ERROR_FROM_REMOTE_IO_SERVER;
|
||||
extern const int PARTITION_DOESNT_EXIST;
|
||||
extern const int CHECKSUM_DOESNT_MATCH;
|
||||
@ -91,7 +91,7 @@ namespace ErrorCodes
|
||||
extern const int UNFINISHED;
|
||||
extern const int METADATA_MISMATCH;
|
||||
extern const int RECEIVED_ERROR_TOO_MANY_REQUESTS;
|
||||
extern const int TOO_MUCH_FETCHES;
|
||||
extern const int TOO_MANY_FETCHES;
|
||||
extern const int BAD_DATA_PART_NAME;
|
||||
extern const int PART_IS_TEMPORARILY_LOCKED;
|
||||
extern const int INCORRECT_FILE_NAME;
|
||||
@ -1271,7 +1271,7 @@ bool StorageReplicatedMergeTree::executeFetch(const StorageReplicatedMergeTree::
|
||||
if (data.settings.replicated_max_parallel_fetches && total_fetches >= data.settings.replicated_max_parallel_fetches)
|
||||
{
|
||||
throw Exception("Too many total fetches from replicas, maximum: " + data.settings.replicated_max_parallel_fetches.toString(),
|
||||
ErrorCodes::TOO_MUCH_FETCHES);
|
||||
ErrorCodes::TOO_MANY_FETCHES);
|
||||
}
|
||||
|
||||
++total_fetches;
|
||||
@ -1280,7 +1280,7 @@ bool StorageReplicatedMergeTree::executeFetch(const StorageReplicatedMergeTree::
|
||||
if (data.settings.replicated_max_parallel_fetches_for_table && current_table_fetches >= data.settings.replicated_max_parallel_fetches_for_table)
|
||||
{
|
||||
throw Exception("Too many fetches from replicas for table, maximum: " + data.settings.replicated_max_parallel_fetches_for_table.toString(),
|
||||
ErrorCodes::TOO_MUCH_FETCHES);
|
||||
ErrorCodes::TOO_MANY_FETCHES);
|
||||
}
|
||||
|
||||
++current_table_fetches;
|
||||
@ -3481,7 +3481,7 @@ void StorageReplicatedMergeTree::fetchPartition(const ASTPtr & partition, const
|
||||
LOG_INFO(log, "Some of parts (" << missing_parts.size() << ") are missing. Will try to fetch covering parts.");
|
||||
|
||||
if (try_no >= 5)
|
||||
throw Exception("Too many retries to fetch parts from " + best_replica_path, ErrorCodes::TOO_MUCH_RETRIES_TO_FETCH_PARTS);
|
||||
throw Exception("Too many retries to fetch parts from " + best_replica_path, ErrorCodes::TOO_MANY_RETRIES_TO_FETCH_PARTS);
|
||||
|
||||
Strings parts = getZooKeeper()->getChildren(best_replica_path + "/parts");
|
||||
ActiveDataPartSet active_parts_set(data.format_version, parts);
|
||||
|
Loading…
Reference in New Issue
Block a user