mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 11:02:08 +00:00
Update test_s3_plain_rewritable test
- add cache_s3_plain_rewritable caching disk - simplify, don't look up counters
This commit is contained in:
parent
b0d6b0ffdf
commit
d4e6f2e8d6
@ -8,6 +8,13 @@
|
|||||||
<access_key_id>minio</access_key_id>
|
<access_key_id>minio</access_key_id>
|
||||||
<secret_access_key>minio123</secret_access_key>
|
<secret_access_key>minio123</secret_access_key>
|
||||||
</disk_s3_plain_rewritable>
|
</disk_s3_plain_rewritable>
|
||||||
|
<disk_cache_s3_plain_rewritable>
|
||||||
|
<type>cache</type>
|
||||||
|
<disk>disk_s3_plain_rewritable</disk>
|
||||||
|
<path>/var/lib/clickhouse/disks/s3_plain_rewritable_cache/</path>
|
||||||
|
<max_size>1000000000</max_size>
|
||||||
|
<cache_on_write_operations>1</cache_on_write_operations>
|
||||||
|
</disk_cache_s3_plain_rewritable>
|
||||||
</disks>
|
</disks>
|
||||||
<policies>
|
<policies>
|
||||||
<s3_plain_rewritable>
|
<s3_plain_rewritable>
|
||||||
@ -17,6 +24,13 @@
|
|||||||
</main>
|
</main>
|
||||||
</volumes>
|
</volumes>
|
||||||
</s3_plain_rewritable>
|
</s3_plain_rewritable>
|
||||||
|
<cache_s3_plain_rewritable>
|
||||||
|
<volumes>
|
||||||
|
<main>
|
||||||
|
<disk>disk_cache_s3_plain_rewritable</disk>
|
||||||
|
</main>
|
||||||
|
</volumes>
|
||||||
|
</cache_s3_plain_rewritable>
|
||||||
</policies>
|
</policies>
|
||||||
</storage_configuration>
|
</storage_configuration>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
|
@ -8,11 +8,8 @@ from helpers.cluster import ClickHouseCluster
|
|||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
NUM_WORKERS = 5
|
NUM_WORKERS = 5
|
||||||
|
|
||||||
MAX_ROWS = 1000
|
MAX_ROWS = 1000
|
||||||
|
|
||||||
dirs_created = []
|
|
||||||
|
|
||||||
|
|
||||||
def gen_insert_values(size):
|
def gen_insert_values(size):
|
||||||
return ",".join(
|
return ",".join(
|
||||||
@ -46,8 +43,14 @@ def start_cluster():
|
|||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.order(0)
|
@pytest.mark.parametrize(
|
||||||
def test_insert():
|
"storage_policy",
|
||||||
|
[
|
||||||
|
pytest.param("s3_plain_rewritable"),
|
||||||
|
pytest.param("cache_s3_plain_rewritable"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test(storage_policy):
|
||||||
def create_insert(node, insert_values):
|
def create_insert(node, insert_values):
|
||||||
node.query(
|
node.query(
|
||||||
"""
|
"""
|
||||||
@ -56,8 +59,10 @@ def test_insert():
|
|||||||
data String
|
data String
|
||||||
) ENGINE=MergeTree()
|
) ENGINE=MergeTree()
|
||||||
ORDER BY id
|
ORDER BY id
|
||||||
SETTINGS storage_policy='s3_plain_rewritable'
|
SETTINGS storage_policy='{}'
|
||||||
"""
|
""".format(
|
||||||
|
storage_policy
|
||||||
|
)
|
||||||
)
|
)
|
||||||
node.query("INSERT INTO test VALUES {}".format(insert_values))
|
node.query("INSERT INTO test VALUES {}".format(insert_values))
|
||||||
|
|
||||||
@ -107,25 +112,6 @@ def test_insert():
|
|||||||
!= -1
|
!= -1
|
||||||
)
|
)
|
||||||
|
|
||||||
created = int(
|
|
||||||
node.query(
|
|
||||||
"SELECT value FROM system.events WHERE event = 'DiskPlainRewritableS3DirectoryCreated'"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert created > 0
|
|
||||||
dirs_created.append(created)
|
|
||||||
assert (
|
|
||||||
int(
|
|
||||||
node.query(
|
|
||||||
"SELECT value FROM system.metrics WHERE metric = 'DiskPlainRewritableS3DirectoryMapSize'"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
== created
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.order(1)
|
|
||||||
def test_restart():
|
|
||||||
insert_values_arr = []
|
insert_values_arr = []
|
||||||
for i in range(NUM_WORKERS):
|
for i in range(NUM_WORKERS):
|
||||||
node = cluster.instances[f"node{i + 1}"]
|
node = cluster.instances[f"node{i + 1}"]
|
||||||
@ -138,6 +124,7 @@ def test_restart():
|
|||||||
|
|
||||||
threads = []
|
threads = []
|
||||||
for i in range(NUM_WORKERS):
|
for i in range(NUM_WORKERS):
|
||||||
|
node = cluster.instances[f"node{i + 1}"]
|
||||||
t = threading.Thread(target=restart, args=(node,))
|
t = threading.Thread(target=restart, args=(node,))
|
||||||
threads.append(t)
|
threads.append(t)
|
||||||
t.start()
|
t.start()
|
||||||
@ -152,21 +139,10 @@ def test_restart():
|
|||||||
== insert_values_arr[i]
|
== insert_values_arr[i]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.order(2)
|
|
||||||
def test_drop():
|
|
||||||
for i in range(NUM_WORKERS):
|
for i in range(NUM_WORKERS):
|
||||||
node = cluster.instances[f"node{i + 1}"]
|
node = cluster.instances[f"node{i + 1}"]
|
||||||
node.query("DROP TABLE IF EXISTS test SYNC")
|
node.query("DROP TABLE IF EXISTS test SYNC")
|
||||||
|
|
||||||
removed = int(
|
|
||||||
node.query(
|
|
||||||
"SELECT value FROM system.events WHERE event = 'DiskPlainRewritableS3DirectoryRemoved'"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
assert dirs_created[i] == removed
|
|
||||||
|
|
||||||
it = cluster.minio_client.list_objects(
|
it = cluster.minio_client.list_objects(
|
||||||
cluster.minio_bucket, "data/", recursive=True
|
cluster.minio_bucket, "data/", recursive=True
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user