M(Query,"Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \
M(ReadBufferFromFileDescriptorReadBytes,"Number of bytes read from file descriptors. If the file is compressed, this will show the compressed data size.") \
M(WriteBufferFromFileDescriptorWrite,"Number of writes (write/pwrite) to a file descriptor. Does not include sockets.") \
M(WriteBufferFromFileDescriptorWriteFailed,"Number of times the write (write/pwrite) to a file descriptor have failed.") \
M(WriteBufferFromFileDescriptorWriteBytes,"Number of bytes written to file descriptors. If the file is compressed, this will show compressed data size.") \
M(ReadCompressedBytes,"Number of bytes (the number of bytes before decompression) read from compressed sources (files, network).") \
M(CompressedReadBufferBlocks,"Number of compressed blocks (the blocks of data that are compressed independent of each other) read from compressed sources (files, network).") \
M(CompressedReadBufferBytes,"Number of uncompressed bytes (the number of bytes after decompression) read from compressed sources (files, network).") \
M(IOBufferAllocs,"Number of allocations of IO buffers (for ReadBuffer/WriteBuffer).") \
M(IOBufferAllocBytes,"Number of bytes allocated for IO buffers (for ReadBuffer/WriteBuffer).") \
M(ArenaAllocChunks,"Number of chunks allocated for memory Arena (used for GROUP BY and similar operations)") \
M(ArenaAllocBytes,"Number of bytes allocated for memory Arena (used for GROUP BY and similar operations)") \
M(FunctionExecute,"Number of SQL ordinary function calls (SQL functions are called on per-block basis, so this number represents the number of blocks).") \
M(TableFunctionExecute,"Number of table function calls.") \
M(MarkCacheHits,"Number of times an entry has been found in the mark cache, so we didn't have to load a mark file.") \
M(MarkCacheMisses,"Number of times an entry has not been found in the mark cache, so we had to load a mark file in memory, which is a costly operation, adding to query latency.") \
M(CreatedReadBufferOrdinary,"Number of times ordinary read buffer was created for reading data (while choosing among other read methods).") \
M(CreatedReadBufferDirectIO,"Number of times a read buffer with O_DIRECT was created for reading data (while choosing among other read methods).") \
M(CreatedReadBufferDirectIOFailed,"Number of times a read buffer with O_DIRECT was attempted to be created for reading data (while choosing among other read methods), but the OS did not allow it (due to lack of filesystem support or other reasons) and we fallen back to the ordinary reading method.") \
M(CreatedReadBufferMMap,"Number of times a read buffer using 'mmap' was created for reading data (while choosing among other read methods).") \
M(CreatedReadBufferMMapFailed,"Number of times a read buffer with 'mmap' was attempted to be created for reading data (while choosing among other read methods), but the OS did not allow it (due to lack of filesystem support or other reasons) and we fallen back to the ordinary reading method.") \
M(NetworkReceiveElapsedMicroseconds,"Total time spent waiting for data to receive or receiving data from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \
M(NetworkSendElapsedMicroseconds,"Total time spent waiting for data to send to network or sending data to network. Only ClickHouse-related network interaction is included, not by 3rd party libraries..") \
M(NetworkReceiveBytes,"Total number of bytes received from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \
M(NetworkSendBytes,"Total number of bytes send to network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \
M(ObsoleteReplicatedParts,"Number of times a data part was covered by another data part that has been fetched from a replica (so, we have marked a covered data part as obsolete and no longer needed).") \
M(ReplicatedPartFetchesOfMerged,"Number of times we prefer to download already merged part from replica of ReplicatedMergeTree table instead of performing a merge ourself (usually we prefer doing a merge ourself to save network traffic). This happens when we have not all source parts to perform a merge or when the data part is old enough.") \
M(ReplicatedPartMutations,"Number of times data parts of ReplicatedMergeTree tables were successfully mutated.") \
M(ReplicatedPartChecks,"Number of times we had to perform advanced search for a data part on replicas or to clarify the need of an existing data part.") \
M(ReplicatedPartChecksFailed,"Number of times the advanced search for a data part on replicas did not give result or when unexpected part has been found and moved away.") \
M(ReplicatedDataLoss,"Number of times a data part that we wanted doesn't exist on any replica (even on replicas that are offline right now). That data parts are definitely lost. This is normal due to asynchronous replication (if quorum inserts were not enabled), when the replica on which the data part was written was failed and when it became online after fail it doesn't contain that data part.") \
\
M(InsertedRows,"Number of rows INSERTed to all tables.") \
M(InsertedBytes,"Number of bytes (uncompressed; for columns as they stored in memory) INSERTed to all tables.") \
M(DelayedInserts,"Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition.") \
M(RejectedInserts,"Number of times the INSERT of a block to a MergeTree table was rejected with 'Too many parts' exception due to high number of active data parts for partition.") \
M(DelayedInsertsMilliseconds,"Total number of milliseconds spent while the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition.") \
M(DistributedDelayedInserts,"Number of times the INSERT of a block to a Distributed table was throttled due to high number of pending bytes.") \
M(DistributedRejectedInserts,"Number of times the INSERT of a block to a Distributed table was rejected with 'Too many bytes' exception due to high number of pending bytes.") \
M(DistributedDelayedInsertsMilliseconds,"Total number of milliseconds spent while the INSERT of a block to a Distributed table was throttled due to high number of pending bytes.") \
M(ZooKeeperInit,"Number of times connection with ZooKeeper has been established.") \
M(ZooKeeperTransactions,"Number of ZooKeeper operations, which include both read and write operations as well as multi-transactions.") \
M(ZooKeeperList,"Number of 'list' (getChildren) requests to ZooKeeper.") \
M(ZooKeeperCreate,"Number of 'create' requests to ZooKeeper.") \
M(ZooKeeperRemove,"Number of 'remove' requests to ZooKeeper.") \
M(ZooKeeperExists,"Number of 'exists' requests to ZooKeeper.") \
M(ZooKeeperGet,"Number of 'get' requests to ZooKeeper.") \
M(ZooKeeperSet,"Number of 'set' requests to ZooKeeper.") \
M(ZooKeeperMulti,"Number of 'multi' requests to ZooKeeper (compound transactions).") \
M(ZooKeeperCheck,"Number of 'check' requests to ZooKeeper. Usually they don't make sense in isolation, only as part of a complex transaction.") \
M(ZooKeeperSync,"Number of 'sync' requests to ZooKeeper. These requests are rarely needed or usable.") \
M(ZooKeeperClose,"Number of times connection with ZooKeeper has been closed voluntary.") \
M(ZooKeeperWatchResponse,"Number of times watch notification has been received from ZooKeeper.") \
M(ZooKeeperUserExceptions,"Number of exceptions while working with ZooKeeper related to the data (no node, bad version or similar).") \
M(ZooKeeperHardwareExceptions,"Number of exceptions while working with ZooKeeper related to network (connection loss or similar).") \
M(ZooKeeperOtherExceptions,"Number of exceptions while working with ZooKeeper other than ZooKeeperUserExceptions and ZooKeeperHardwareExceptions.") \
M(ZooKeeperWaitMicroseconds,"Number of microseconds spent waiting for responses from ZooKeeper after creating a request, summed across all the requesting threads.") \
M(ZooKeeperBytesSent,"Number of bytes send over network while communicating with ZooKeeper.") \
M(ZooKeeperBytesReceived,"Number of bytes received over network while communicating with ZooKeeper.") \
\
M(DistributedConnectionFailTry,"Total count when distributed connection fails with retry.") \
M(DistributedConnectionMissingTable,"Number of times we rejected a replica from a distributed query, because it did not contain a table needed for the query.") \
M(DistributedConnectionStaleReplica,"Number of times we rejected a replica from a distributed query, because some table needed for a query had replication lag higher than the configured threshold.") \
M(DistributedConnectionFailAtAll,"Total count when distributed connection fails after all retries finished.") \
M(ExternalProcessingUncompressedBytesTotal,"Amount of data (uncompressed, before compression) written by external processing (sorting/aggragating/joining)") \
M(ReplicaPartialShutdown,"How many times Replicated table has to deinitialize its state due to session expiration in ZooKeeper. The state is reinitialized every time when ZooKeeper is available again.") \
M(MergedRows,"Rows read for background merges. This is the number of rows before merge.") \
M(MergedUncompressedBytes,"Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge.") \
M(MergesTimeMilliseconds,"Total time spent for background merges.")\
\
M(MergeTreeDataWriterRows,"Number of rows INSERTed to MergeTree tables.") \
M(MergeTreeDataWriterUncompressedBytes,"Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables.") \
M(MergeTreeDataWriterCompressedBytes,"Bytes written to filesystem for data INSERTed to MergeTree tables.") \
M(MergeTreeDataWriterBlocks,"Number of blocks INSERTed to MergeTree tables. Each block forms a data part of level zero.") \
M(MergeTreeDataWriterBlocksAlreadySorted,"Number of blocks INSERTed to MergeTree tables that appeared to be already sorted.") \
M(MergeTreeDataProjectionWriterRows,"Number of rows INSERTed to MergeTree tables projection.") \
M(MergeTreeDataProjectionWriterUncompressedBytes,"Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables projection.") \
M(MergeTreeDataProjectionWriterCompressedBytes,"Bytes written to filesystem for data INSERTed to MergeTree tables projection.") \
M(MergeTreeDataProjectionWriterBlocks,"Number of blocks INSERTed to MergeTree tables projection. Each block forms a data part of level zero.") \
M(MergeTreeDataProjectionWriterBlocksAlreadySorted,"Number of blocks INSERTed to MergeTree tables projection that appeared to be already sorted.") \
M(CannotRemoveEphemeralNode,"Number of times an error happened while trying to remove ephemeral node. This is not an issue, because our implementation of ZooKeeper library guarantee that the session will expire and the node will be removed.") \
\
M(RegexpCreated,"Compiled regular expressions. Identical regular expressions compiled just once and cached forever.") \
M(ContextLock,"Number of times the lock of Context was acquired or tried to acquire. This is global lock.") \
M(StorageBufferFlush,"Number of times a buffer in a 'Buffer' table was flushed.") \
M(StorageBufferErrorOnFlush,"Number of times a buffer in the 'Buffer' table has not been able to flush due to error writing in the destination table.") \
M(StorageBufferPassedAllMinThresholds,"Number of times a criteria on min thresholds has been reached to flush a buffer in a 'Buffer' table.") \
M(StorageBufferPassedTimeMaxThreshold,"Number of times a criteria on max time threshold has been reached to flush a buffer in a 'Buffer' table.") \
M(StorageBufferPassedRowsMaxThreshold,"Number of times a criteria on max rows threshold has been reached to flush a buffer in a 'Buffer' table.") \
M(StorageBufferPassedBytesMaxThreshold,"Number of times a criteria on max bytes threshold has been reached to flush a buffer in a 'Buffer' table.") \
M(StorageBufferPassedTimeFlushThreshold,"Number of times background-only flush threshold on time has been reached to flush a buffer in a 'Buffer' table. This is expert-only metric. If you read this and you are not an expert, stop reading.") \
M(StorageBufferPassedRowsFlushThreshold,"Number of times background-only flush threshold on rows has been reached to flush a buffer in a 'Buffer' table. This is expert-only metric. If you read this and you are not an expert, stop reading.") \
M(StorageBufferPassedBytesFlushThreshold,"Number of times background-only flush threshold on bytes has been reached to flush a buffer in a 'Buffer' table. This is expert-only metric. If you read this and you are not an expert, stop reading.") \
M(StorageBufferLayerLockReadersWaitMilliseconds,"Time for waiting for Buffer layer during reading.") \
M(StorageBufferLayerLockWritersWaitMilliseconds,"Time for waiting free Buffer layer to write to (can be used to tune Buffer layers).") \
\
M(DictCacheKeysRequested,"Number of keys requested from the data source for the dictionaries of 'cache' types.") \
M(DictCacheKeysRequestedMiss,"Number of keys requested from the data source for dictionaries of 'cache' types but not found in the data source.") \
M(DictCacheKeysRequestedFound,"Number of keys requested from the data source for dictionaries of 'cache' types and found in the data source.") \
M(DictCacheKeysExpired,"Number of keys looked up in the dictionaries of 'cache' types and found in the cache but they were obsolete.") \
M(DictCacheKeysNotFound,"Number of keys looked up in the dictionaries of 'cache' types and not found.") \
M(DictCacheKeysHit,"Number of keys looked up in the dictionaries of 'cache' types and found in the cache.") \
M(DictCacheRequestTimeNs,"Number of nanoseconds spend in querying the external data sources for the dictionaries of 'cache' types.") \
M(DictCacheRequests,"Number of bulk requests to the external data sources for the dictionaries of 'cache' types.") \
M(DictCacheLockWriteNs,"Number of nanoseconds spend in waiting for write lock to update the data for the dictionaries of 'cache' types.") \
M(DictCacheLockReadNs,"Number of nanoseconds spend in waiting for read lock to lookup the data for the dictionaries of 'cache' types.") \
\
M(DistributedSyncInsertionTimeoutExceeded,"A timeout has exceeded while waiting for shards during synchronous insertion into a Distributed table (with 'insert_distributed_sync' = 1)") \
M(DataAfterMutationDiffersFromReplica,"Number of times data after mutation is not byte-identical to the data on another replicas. In addition to the reasons described in 'DataAfterMergeDiffersFromReplica', it is also possible due to non-deterministic mutation.") \
M(PolygonsAddedToPool,"A polygon has been added to the cache (pool) for the 'pointInPolygon' function.") \
M(PolygonsInPoolAllocatedBytes,"The number of bytes for polygons added to the cache (pool) for the 'pointInPolygon' function.") \
M(RealTimeMicroseconds,"Total (wall clock) time spent in processing (queries and other tasks) threads (not that this is a sum).") \
M(UserTimeMicroseconds,"Total time spent in processing (queries and other tasks) threads executing CPU instructions in user space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \
M(SystemTimeMicroseconds,"Total time spent in processing (queries and other tasks) threads executing CPU instructions in OS kernel space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \
M(SoftPageFaults,"The number of soft page faults in query execution threads. Soft page fault usually means a miss in the memory allocator cache which required a new memory mapping from the OS and subsequent allocation of a page of physical memory.") \
M(HardPageFaults,"The number of hard page faults in query execution threads. High values indicate either that you forgot to turn off swap on your server, or eviction of memory pages of the ClickHouse binary during very high memory pressure, or successful usage of the 'mmap' read method for the tables data.") \
M(OSIOWaitMicroseconds,"Total time a thread spent waiting for a result of IO operation, from the OS point of view. This is real IO that doesn't include page cache.") \
M(OSCPUWaitMicroseconds,"Total time a thread was ready for execution but waiting to be scheduled by OS, from the OS point of view.") \
M(OSCPUVirtualTimeMicroseconds,"CPU time spent seen by OS. Does not include involuntary waits due to virtualization.") \
M(OSReadBytes,"Number of bytes read from disks or block devices. Doesn't include bytes read from page cache. May include excessive data due to block size, readahead, etc.") \
M(OSWriteBytes,"Number of bytes written to disks or block devices. Doesn't include bytes that are in page cache dirty pages. May not include data that was written by OS asynchronously.") \
M(OSReadChars,"Number of bytes read from filesystem, including page cache.") \
M(OSWriteChars,"Number of bytes written to filesystem, including page cache.") \
M(PerfCpuCycles,"Total cycles. Be wary of what happens during CPU frequency scaling.") \
M(PerfInstructions,"Retired instructions. Be careful, these can be affected by various issues, most notably hardware interrupt counts.") \
M(PerfCacheReferences,"Cache accesses. Usually this indicates Last Level Cache accesses but this may vary depending on your CPU. This may include prefetches and coherency messages; again this depends on the design of your CPU.") \
M(PerfCacheMisses,"Cache misses. Usually this indicates Last Level Cache misses; this is intended to be used in con‐junction with the PERFCOUNTHWCACHEREFERENCES event to calculate cache miss rates.") \
M(PerfBranchInstructions,"Retired branch instructions. Prior to Linux 2.6.35, this used the wrong event on AMD processors.") \
M(PerfTaskClock,"A clock count specific to the task that is running") \
M(PerfContextSwitches,"Number of context switches") \
M(PerfCpuMigrations,"Number of times the process has migrated to a new CPU") \
M(PerfAlignmentFaults,"Number of alignment faults. These happen when unaligned memory accesses happen; the kernel can handle these but it reduces performance. This happens only on some architectures (never on x86).") \
M(PerfEmulationFaults,"Number of emulation faults. The kernel sometimes traps on unimplemented instructions and emulates them for user space. This can negatively impact performance.") \
M(CannotWriteToWriteBufferDiscard,"Number of stack traces dropped by query profiler or signal handler because pipe is full or cannot write to pipe.") \
M(QueryProfilerSignalOverruns,"Number of times we drop processing of a query profiler signal due to overrun plus the number of signals that OS has not delivered due to overrun.") \
M(QueryProfilerRuns,"Number of times QueryProfiler had been run.") \
M(ThreadpoolReaderTaskMicroseconds,"Time spent getting the data in asynchronous reading") \
M(ThreadpoolReaderReadBytes,"Bytes read from a threadpool task in asynchronous reading") \
\
M(FileSegmentWaitReadBufferMicroseconds,"Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \
M(FileSegmentReadMicroseconds,"Metric per file segment. Time spend reading from file") \
M(FileSegmentCacheWriteMicroseconds,"Metric per file segment. Time spend writing data to cache") \
M(FileSegmentPredownloadMicroseconds,"Metric per file segment. Time spent predownloading data to cache (predownloading - finishing file segment download (after someone who failed to do that) up to the point current thread was requested to do)") \
M(FileSegmentUsedBytes,"Metric per file segment. How many bytes were actually used from current file segment") \
M(ThreadPoolReaderPageCacheHit,"Number of times the read inside ThreadPoolReader was done from page cache.") \
M(ThreadPoolReaderPageCacheHitBytes,"Number of bytes read inside ThreadPoolReader when it was done from page cache.") \
M(ThreadPoolReaderPageCacheHitElapsedMicroseconds,"Time spent reading data from page cache in ThreadPoolReader.") \
M(ThreadPoolReaderPageCacheMiss,"Number of times the read inside ThreadPoolReader was not done from page cache and was hand off to thread pool.") \
M(ThreadPoolReaderPageCacheMissBytes,"Number of bytes read inside ThreadPoolReader when read was not done from page cache and was hand off to thread pool.") \
M(ThreadPoolReaderPageCacheMissElapsedMicroseconds,"Time spent reading data inside the asynchronous job in ThreadPoolReader - when read was not done from page cache.") \
M(KafkaRebalanceRevocations,"Number of partition revocations (the first stage of consumer group rebalance)") \
M(KafkaRebalanceAssignments,"Number of partition assignments (the final stage of consumer group rebalance)") \
M(KafkaRebalanceErrors,"Number of failed consumer group rebalances") \
M(KafkaMessagesPolled,"Number of Kafka messages polled from librdkafka to ClickHouse") \
M(KafkaMessagesRead,"Number of Kafka messages already processed by ClickHouse") \
M(KafkaMessagesFailed,"Number of Kafka messages ClickHouse failed to parse") \
M(KafkaRowsRead,"Number of rows parsed from Kafka messages") \
M(KafkaRowsRejected,"Number of parsed rows which were later rejected (due to rebalances / errors or similar reasons). Those rows will be consumed again after the rebalance.") \
M(KafkaDirectReads,"Number of direct selects from Kafka tables since server start") \
M(KafkaBackgroundReads,"Number of background reads populating materialized views from Kafka since server start") \
M(KafkaCommits,"Number of successful commits of consumed offsets to Kafka (normally should be the same as KafkaBackgroundReads)") \
M(KafkaCommitFailures,"Number of failed commits of consumed offsets to Kafka (usually is a sign of some data duplication)") \
M(KafkaConsumerErrors,"Number of errors reported by librdkafka during polls") \
M(KafkaWrites,"Number of writes (inserts) to Kafka tables ") \
M(KafkaRowsWritten,"Number of rows inserted into Kafka tables") \
M(KafkaProducerFlushes,"Number of explicit flushes to Kafka producer") \
M(KafkaMessagesProduced,"Number of messages produced to Kafka") \
M(KafkaProducerErrors,"Number of errors during producing the messages to Kafka") \
M(OverflowBreak,"Number of times, data processing was cancelled by query complexity limitation with setting '*_overflow_mode' = 'break' and the result is incomplete.") \
M(OverflowThrow,"Number of times, data processing was cancelled by query complexity limitation with setting '*_overflow_mode' = 'throw' and exception was thrown.") \
M(OverflowAny,"Number of times approximate GROUP BY was in effect: when aggregation was performed only on top of first 'max_rows_to_group_by' unique keys and other keys were ignored due to 'group_by_overflow_mode' = 'any'.") \