Merge pull request #56936 from jrdi/fs-cache-hit-profile-events

Add CachedReadBufferReadFromCache{Hits,Misses} profile events
This commit is contained in:
Kseniia Sumarokova 2023-11-22 11:26:00 +01:00 committed by GitHub
commit 2880e6437e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 36 additions and 24 deletions

View File

@ -395,6 +395,8 @@ The server successfully detected this situation and will download merged part fr
M(WriteBufferFromS3WaitInflightLimitMicroseconds, "Time spent on waiting while some of the current requests are done when its number reached the limit defined by s3_max_inflight_parts_for_one_file.") \
M(QueryMemoryLimitExceeded, "Number of times when memory limit exceeded for query.") \
\
M(CachedReadBufferReadFromCacheHits, "Number of times the read from filesystem cache hit the cache.") \
M(CachedReadBufferReadFromCacheMisses, "Number of times the read from filesystem cache miss the cache.") \
M(CachedReadBufferReadFromSourceMicroseconds, "Time reading from filesystem cache source (from remote filesystem, etc)") \
M(CachedReadBufferReadFromCacheMicroseconds, "Time reading from filesystem cache") \
M(CachedReadBufferReadFromSourceBytes, "Bytes read from filesystem cache source (from remote fs, etc)") \

View File

@ -28,6 +28,9 @@ extern const Event CachedReadBufferReadFromSourceBytes;
extern const Event CachedReadBufferReadFromCacheBytes;
extern const Event CachedReadBufferCacheWriteBytes;
extern const Event CachedReadBufferCreateBufferMicroseconds;
extern const Event CachedReadBufferReadFromCacheHits;
extern const Event CachedReadBufferReadFromCacheMisses;
}
namespace DB
@ -949,11 +952,13 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep()
if (read_type == ReadType::CACHED)
{
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromCacheHits);
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromCacheBytes, size);
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromCacheMicroseconds, elapsed);
}
else
{
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromCacheMisses);
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceBytes, size);
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceMicroseconds, elapsed);
}

View File

@ -1,15 +1,15 @@
Using storage policy: s3_cache
1 0 1
0 1 0
0 1 0
0 1 1 0 1
1 0 0 1 0
1 0 0 1 0
0
Using storage policy: local_cache
1 0 1
0 1 0
0 1 0
0 1 1 0 1
1 0 0 1 0
1 0 0 1 0
0
Using storage policy: azure_cache
1 0 1
0 1 0
0 1 0
0 1 1 0 1
1 0 0 1 0
1 0 0 1 0
0

View File

@ -7,11 +7,10 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
echo "Using storage policy: $STORAGE_POLICY"
clickhouse client --multiquery --multiline --query """
$CLICKHOUSE_CLIENT --multiquery --multiline --query """
SET max_memory_usage='20G';
SET enable_filesystem_cache_on_write_operations = 0;
@ -24,11 +23,13 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
query="SELECT * FROM test_02226 LIMIT 10"
query_id=$(clickhouse client --query "select queryID() from ($query) limit 1" 2>&1)
query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1" 2>&1)
clickhouse client --multiquery --multiline --query """
$CLICKHOUSE_CLIENT --multiquery --multiline --query """
SYSTEM FLUSH LOGS;
SELECT ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read,
SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit,
ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss,
ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read,
ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read,
ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download
FROM system.query_log
@ -39,16 +40,18 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
LIMIT 1;
"""
clickhouse client --multiquery --multiline --query """
$CLICKHOUSE_CLIENT --multiquery --multiline --query """
set remote_filesystem_read_method = 'read';
set local_filesystem_read_method = 'pread';
"""
query_id=$(clickhouse client --query "select queryID() from ($query) limit 1" 2>&1)
query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1" 2>&1)
clickhouse client --multiquery --multiline --query """
$CLICKHOUSE_CLIENT --multiquery --multiline --query """
SYSTEM FLUSH LOGS;
SELECT ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read,
SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit,
ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss,
ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read,
ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read,
ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download
FROM system.query_log
@ -60,15 +63,17 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
"""
clickhouse client --multiquery --multiline --query """
$CLICKHOUSE_CLIENT --multiquery --multiline --query """
set remote_filesystem_read_method='threadpool';
"""
query_id=$(clickhouse client --query "select queryID() from ($query) limit 1")
query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1")
clickhouse client --multiquery --multiline --query """
$CLICKHOUSE_CLIENT --multiquery --multiline --query """
SYSTEM FLUSH LOGS;
SELECT ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read,
SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit,
ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss,
ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read,
ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read,
ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download
FROM system.query_log
@ -79,7 +84,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
LIMIT 1;
"""
clickhouse client --multiquery --multiline --query """
$CLICKHOUSE_CLIENT --multiquery --multiline --query """
SELECT * FROM test_02226 WHERE value LIKE '%abc%' ORDER BY value LIMIT 10 FORMAT Null;
SET enable_filesystem_cache_on_write_operations = 1;
@ -92,5 +97,5 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
INSERT INTO test_02226 SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000;
"""
clickhouse client --query "DROP TABLE test_02226"
$CLICKHOUSE_CLIENT --query "DROP TABLE test_02226"
done