Clean up storage_conf.xml, use dynamic disks for tests

This commit is contained in:
kssenii 2023-05-05 15:43:23 +02:00
parent 30464b9397
commit 9032e23f87
6 changed files with 51 additions and 140 deletions

View File

@ -10,46 +10,6 @@
<secret_access_key>clickhouse</secret_access_key>
<request_timeout_ms>20000</request_timeout_ms>
</s3_disk>
<s3_disk_2>
<type>s3</type>
<path>s3_disk_2/</path>
<endpoint>http://localhost:11111/test/00170_test/</endpoint>
<access_key_id>clickhouse</access_key_id>
<secret_access_key>clickhouse</secret_access_key>
<request_timeout_ms>20000</request_timeout_ms>
</s3_disk_2>
<s3_disk_3>
<type>s3</type>
<path>s3_disk_3/</path>
<endpoint>http://localhost:11111/test/00170_test/</endpoint>
<access_key_id>clickhouse</access_key_id>
<secret_access_key>clickhouse</secret_access_key>
<request_timeout_ms>20000</request_timeout_ms>
</s3_disk_3>
<s3_disk_4>
<type>s3</type>
<path>s3_disk_4/</path>
<endpoint>http://localhost:11111/test/00170_test/</endpoint>
<access_key_id>clickhouse</access_key_id>
<secret_access_key>clickhouse</secret_access_key>
<request_timeout_ms>20000</request_timeout_ms>
</s3_disk_4>
<s3_disk_5>
<type>s3</type>
<path>s3_disk_5/</path>
<endpoint>http://localhost:11111/test/00170_test/</endpoint>
<access_key_id>clickhouse</access_key_id>
<secret_access_key>clickhouse</secret_access_key>
<request_timeout_ms>20000</request_timeout_ms>
</s3_disk_5>
<s3_disk_6>
<type>s3</type>
<path>s3_disk_6/</path>
<endpoint>http://localhost:11111/test/00170_test/</endpoint>
<access_key_id>clickhouse</access_key_id>
<secret_access_key>clickhouse</secret_access_key>
<request_timeout_ms>20000</request_timeout_ms>
</s3_disk_6>
<!-- cache for s3 disks -->
<s3_cache>
<type>cache</type>
@ -60,54 +20,6 @@
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
<delayed_cleanup_interval_ms>100</delayed_cleanup_interval_ms>
</s3_cache>
<s3_cache_2>
<type>cache</type>
<disk>s3_disk_2</disk>
<path>s3_cache_2/</path>
<max_size>128Mi</max_size>
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
<max_file_segment_size>100Mi</max_file_segment_size>
<delayed_cleanup_interval_ms>100</delayed_cleanup_interval_ms>
</s3_cache_2>
<s3_cache_3>
<type>cache</type>
<disk>s3_disk_3</disk>
<path>s3_disk_3_cache/</path>
<max_size>128Mi</max_size>
<data_cache_max_size>22548578304</data_cache_max_size>
<cache_on_write_operations>1</cache_on_write_operations>
<enable_cache_hits_threshold>1</enable_cache_hits_threshold>
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
<delayed_cleanup_interval_ms>100</delayed_cleanup_interval_ms>
</s3_cache_3>
<s3_cache_4>
<type>cache</type>
<disk>s3_disk_4</disk>
<path>s3_cache_4/</path>
<max_size>128Mi</max_size>
<cache_on_write_operations>1</cache_on_write_operations>
<enable_filesystem_query_cache_limit>1</enable_filesystem_query_cache_limit>
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
<delayed_cleanup_interval_ms>100</delayed_cleanup_interval_ms>
</s3_cache_4>
<s3_cache_5>
<type>cache</type>
<disk>s3_disk_5</disk>
<path>s3_cache_5/</path>
<max_size>128Mi</max_size>
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
<delayed_cleanup_interval_ms>100</delayed_cleanup_interval_ms>
</s3_cache_5>
<s3_cache_6>
<type>cache</type>
<disk>s3_disk_6</disk>
<path>s3_cache_6/</path>
<max_size>128Mi</max_size>
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
<enable_bypass_cache_with_threashold>1</enable_bypass_cache_with_threashold>
<bypass_cache_threashold>100</bypass_cache_threashold>
<delayed_cleanup_interval_ms>100</delayed_cleanup_interval_ms>
</s3_cache_6>
<s3_cache_small>
<type>cache</type>
<disk>s3_disk_6</disk>
@ -116,16 +28,6 @@
<do_not_evict_index_and_mark_files>1</do_not_evict_index_and_mark_files>
<delayed_cleanup_interval_ms>100</delayed_cleanup_interval_ms>
</s3_cache_small>
<s3_cache_small_segment_size>
<type>cache</type>
<disk>s3_disk_6</disk>
<path>s3_cache_small_segment_size/</path>
<max_size>128Mi</max_size>
<max_file_segment_size>10Ki</max_file_segment_size>
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
<cache_on_write_operations>1</cache_on_write_operations>
<delayed_cleanup_interval_ms>100</delayed_cleanup_interval_ms>
</s3_cache_small_segment_size>
<!-- local disks -->
<local_disk>
<type>local_blob_storage</type>
@ -193,34 +95,6 @@
</main>
</volumes>
</s3_cache>
<s3_cache_2>
<volumes>
<main>
<disk>s3_cache_2</disk>
</main>
</volumes>
</s3_cache_2>
<s3_cache_3>
<volumes>
<main>
<disk>s3_cache_3</disk>
</main>
</volumes>
</s3_cache_3>
<s3_cache_4>
<volumes>
<main>
<disk>s3_cache_4</disk>
</main>
</volumes>
</s3_cache_4>
<s3_cache_6>
<volumes>
<main>
<disk>s3_cache_6</disk>
</main>
</volumes>
</s3_cache_6>
<s3_cache_multi>
<volumes>
<main>
@ -256,13 +130,6 @@
</main>
</volumes>
</local_cache_3>
<s3_cache_small_segment_size>
<volumes>
<main>
<disk>s3_cache_small_segment_size</disk>
</main>
</volumes>
</s3_cache_small_segment_size>
</policies>
</storage_configuration>
</clickhouse>

View File

@ -6,7 +6,24 @@ SYSTEM DROP FILESYSTEM CACHE;
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
CREATE TABLE test (key UInt32, value String)
Engine=MergeTree()
ORDER BY key
SETTINGS min_bytes_for_wide_part = 10485760,
compress_marks=false,
compress_primary_key=false,
disk = disk(
type = cache,
max_size = '128Mi',
path = '/var/lib/clickhouse/${CLICKHOUSE_TEST_UNIQUE_NAME}_cache',
enable_bypass_cache_with_threashold = 1,
bypass_cache_threashold = 100,
cache_on_write_operations = 1,
enable_filesystem_query_cache_limit = 1,
do_not_evict_index_and_mark_files = 0,
delayed_cleanup_interval_ms = 100,
disk = 's3_disk');
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;

View File

@ -8,7 +8,23 @@ SET skip_download_if_exceeds_query_cache=1;
SET filesystem_cache_max_download_size=128;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
CREATE TABLE test (key UInt32, value String)
Engine=MergeTree()
ORDER BY key
SETTINGS min_bytes_for_wide_part = 10485760,
compress_marks=false,
compress_primary_key=false,
disk = disk(
type = cache,
max_size = '128Mi',
path = '/var/lib/clickhouse/${CLICKHOUSE_TEST_UNIQUE_NAME}_cache',
cache_on_write_operations= 1,
enable_filesystem_query_cache_limit = 1,
do_not_evict_index_and_mark_files = 0,
delayed_cleanup_interval_ms = 100,
disk = 's3_disk');
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;

View File

@ -1,2 +1 @@
134217728 1048576 104857600 1 0 0 0 /var/lib/clickhouse/caches/s3_cache/ 0
134217728 1048576 104857600 0 0 0 0 /var/lib/clickhouse/caches/s3_cache_2/ 0

View File

@ -1,7 +1,4 @@
-- Tags: no-fasttest, no-parallel
SYSTEM DROP FILESYSTEM CACHE 's3_cache';
SYSTEM DROP FILESYSTEM CACHE 's3_cache_2';
DESCRIBE FILESYSTEM CACHE 's3_cache';
DESCRIBE FILESYSTEM CACHE 's3_cache_2';

View File

@ -13,7 +13,22 @@ function random {
${CLICKHOUSE_CLIENT} --multiline --multiquery -q "
drop table if exists ttt;
create table ttt (id Int32, value String) engine=MergeTree() order by tuple() settings storage_policy='s3_cache_small_segment_size', min_bytes_for_wide_part=0;
CREATE TABLE ttt (id Int32, value String)
Engine=MergeTree()
ORDER BY tuple()
SETTINGS min_bytes_for_wide_part = 0,
disk = disk(
type = cache,
max_size = '128Mi',
max_file_segment_size = '10Ki',
path = '/var/lib/clickhouse/${CLICKHOUSE_TEST_UNIQUE_NAME}_cache',
cache_on_write_operations = 1,
enable_filesystem_query_cache_limit = 1,
do_not_evict_index_and_mark_files = 0,
delayed_cleanup_interval_ms = 100,
disk = 's3_disk');
insert into ttt select number, toString(number) from numbers(100000) settings throw_on_error_from_cache_on_write_operations = 1;
"