Merge branch 's3-streams-scheduler' of github.com:ClickHouse/ClickHouse into s3-streams-scheduler

This commit is contained in:
serxa 2024-06-14 16:11:01 +00:00
commit eaf91c1bb2

View File

@ -49,9 +49,9 @@ def test_s3_resource_request_granularity():
"""
)
total_bytes = 50000000 # Approximate data size
max_bytes_per_request = 2000000 # Should be ~1MB or less in general
min_bytes_per_request = 6000 # Small requests are ok, but we don't want hurt performance with too often resource requests
total_bytes = 50000000 # Approximate data size
max_bytes_per_request = 2000000 # Should be ~1MB or less in general
min_bytes_per_request = 6000 # Small requests are ok, but we don't want hurt performance with too often resource requests
writes_before = int(
node.query(
@ -68,7 +68,9 @@ def test_s3_resource_request_granularity():
f"select budget from system.scheduler where resource='network_write' and path='/prio/admin'"
).strip()
)
node.query(f"insert into data select number, randomString(10000000) from numbers(5) SETTINGS workload='admin'")
node.query(
f"insert into data select number, randomString(10000000) from numbers(5) SETTINGS workload='admin'"
)
writes_after = int(
node.query(
f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/admin'"
@ -111,7 +113,9 @@ def test_s3_resource_request_granularity():
f"select budget from system.scheduler where resource='network_read' and path='/prio/admin'"
).strip()
)
node.query(f"select count() from data where not ignore(*) SETTINGS workload='admin'")
node.query(
f"select count() from data where not ignore(*) SETTINGS workload='admin'"
)
reads_after = int(
node.query(
f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/admin'"