mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Added more extensive testing to new settings.
This commit is contained in:
parent
707210ed38
commit
d0677e2343
@ -108,7 +108,7 @@ static constexpr auto DEFAULT_QUERY_CACHE_MAX_SIZE = 1_GiB;
|
|||||||
static constexpr auto DEFAULT_QUERY_CACHE_MAX_ENTRIES = 1024uz;
|
static constexpr auto DEFAULT_QUERY_CACHE_MAX_ENTRIES = 1024uz;
|
||||||
static constexpr auto DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_BYTES = 1_MiB;
|
static constexpr auto DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_BYTES = 1_MiB;
|
||||||
static constexpr auto DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_ROWS = 30'000'000uz;
|
static constexpr auto DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_ROWS = 30'000'000uz;
|
||||||
static constexpr Float64 DEFAULT_MIN_FREE_DISK_RATIO = 0.0;
|
static constexpr auto DEFAULT_MIN_FREE_DISK_RATIO = 0.0;
|
||||||
|
|
||||||
/// Query profiler cannot work with sanitizers.
|
/// Query profiler cannot work with sanitizers.
|
||||||
/// Sanitizers are using quick "frame walking" stack unwinding (this implies -fno-omit-frame-pointer)
|
/// Sanitizers are using quick "frame walking" stack unwinding (this implies -fno-omit-frame-pointer)
|
||||||
|
@ -21,7 +21,67 @@ def start_cluster():
|
|||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
def test_min_free_disk_settings(start_cluster):
|
||||||
|
# min_free_disk_bytes_to_throw_insert (default 0)
|
||||||
|
# min_free_disk_ratio_to_throw_insert (default 0.0)
|
||||||
|
|
||||||
|
node.query("DROP TABLE IF EXISTS test_table")
|
||||||
|
|
||||||
|
node.query(
|
||||||
|
f"""
|
||||||
|
CREATE TABLE test_table (
|
||||||
|
id UInt32,
|
||||||
|
data String
|
||||||
|
) ENGINE = MergeTree()
|
||||||
|
ORDER BY id
|
||||||
|
SETTINGS storage_policy = 'only_disk1'
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
node.query("INSERT INTO test_table (id, data) values (1, 'a')")
|
||||||
|
|
||||||
|
free_bytes = 7 * 1024 * 1024 # 7MB -- size of disk
|
||||||
|
node.query(f"SET min_free_disk_bytes_to_throw_insert = {free_bytes}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
node.query("INSERT INTO test_table (id, data) values (1, 'a')")
|
||||||
|
except QueryRuntimeException as e:
|
||||||
|
assert "NOT_ENOUGH_SPACE" in str(e)
|
||||||
|
|
||||||
|
node.query("SET min_free_disk_bytes_to_throw_insert = 0")
|
||||||
|
node.query("INSERT INTO test_table (id, data) values (1, 'a')")
|
||||||
|
|
||||||
|
free_ratio = 1.0
|
||||||
|
node.query(f"SET min_free_disk_ratio_to_throw_insert = {free_ratio}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
node.query("INSERT INTO test_table (id, data) values (1, 'a')")
|
||||||
|
except QueryRuntimeException as e:
|
||||||
|
assert "NOT_ENOUGH_SPACE" in str(e)
|
||||||
|
|
||||||
|
node.query("DROP TABLE test_table")
|
||||||
|
|
||||||
|
# server setting for min_free_disk_ratio_to_throw_insert is 1 but we can overwrite at table level
|
||||||
|
node.query(
|
||||||
|
f"""
|
||||||
|
CREATE TABLE test_table (
|
||||||
|
id UInt32,
|
||||||
|
data String
|
||||||
|
) ENGINE = MergeTree()
|
||||||
|
ORDER BY id
|
||||||
|
SETTINGS storage_policy = 'only_disk1', min_free_disk_ratio_to_throw_insert = 0.0
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
node.query("INSERT INTO test_table (id, data) values (1, 'a')")
|
||||||
|
|
||||||
|
node.query("DROP TABLE test_table")
|
||||||
|
node.query("SET min_free_disk_ratio_to_throw_insert = 0.0")
|
||||||
|
|
||||||
|
|
||||||
def test_insert_stops_when_disk_full(start_cluster):
|
def test_insert_stops_when_disk_full(start_cluster):
|
||||||
|
node.query("DROP TABLE IF EXISTS test_table")
|
||||||
|
|
||||||
min_free_bytes = 3 * 1024 * 1024 # 3 MiB
|
min_free_bytes = 3 * 1024 * 1024 # 3 MiB
|
||||||
|
|
||||||
node.query(
|
node.query(
|
||||||
@ -58,4 +118,4 @@ def test_insert_stops_when_disk_full(start_cluster):
|
|||||||
rows = int(node.query("SELECT count() from test_table").strip())
|
rows = int(node.query("SELECT count() from test_table").strip())
|
||||||
assert rows == count
|
assert rows == count
|
||||||
|
|
||||||
node.query("DROP TABLE test_table")
|
node.query("DROP TABLE test_table")
|
||||||
|
Loading…
Reference in New Issue
Block a user