diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 9c3506742fd..c9f6bcabcc1 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -783,9 +783,12 @@ void registerStorageKafka(StorageFactory & factory) if (num_consumers > max_consumers) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Number of consumers can not be bigger than {}, it just doesn't make sense. " - "Note that kafka_num_consumers is not number of consumers for Kafka partitions -- they are managed by Kafka client library. " - "kafka_num_consumers is internal amount of threads for ClickHouse and it shouldn't be big", max_consumers); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The number of consumers can not be bigger than {}. " + "A single consumer can read any number of partitions. Extra consumers are relatively expensive, " + "and using a lot of them can lead to high memory and CPU usage. To achieve better performance " + "of getting data from Kafka, consider using a setting kafka_thread_per_consumer=1, " + "and ensure you have enough threads in MessageBrokerSchedulePool (background_message_broker_schedule_pool_size). " + "See also https://clickhouse.com/docs/integrations/kafka/kafka-table-engine#tuning-performance", max_consumers); } else if (num_consumers < 1) {