mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-28 02:21:59 +00:00
fix more tests
This commit is contained in:
parent
8a201a28c0
commit
ac9ba23bdf
@ -24,7 +24,8 @@ def started_cluster():
|
||||
|
||||
node1.query('''
|
||||
CREATE TABLE non_replicated_mt(date Date, id UInt32, value Int32)
|
||||
ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id;
|
||||
ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id
|
||||
SETTINGS min_bytes_for_wide_part=0;
|
||||
''')
|
||||
|
||||
yield cluster
|
||||
|
@ -0,0 +1,6 @@
|
||||
<yandex>
|
||||
<merge_tree>
|
||||
<min_rows_for_wide_part>0</min_rows_for_wide_part>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
@ -6,9 +6,9 @@ from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/default_compression.xml'], with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/default_compression.xml'], with_zookeeper=True)
|
||||
node3 = cluster.add_instance('node3', main_configs=['configs/default_compression.xml'], image='yandex/clickhouse-server', tag='20.3.16', stay_alive=True, with_installed_binary=True)
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True)
|
||||
node3 = cluster.add_instance('node3', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], image='yandex/clickhouse-server', tag='20.3.16', stay_alive=True, with_installed_binary=True)
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_cluster():
|
||||
|
@ -18,4 +18,8 @@
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
|
||||
<merge_tree>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
||||
|
@ -40,7 +40,8 @@ def get_query_stat(instance, hint):
|
||||
return result
|
||||
|
||||
|
||||
def test_write_is_cached(cluster):
|
||||
@pytest.mark.parametrize("min_rows_for_wide_part,read_requests", [(0, 2), (8192, 1)])
|
||||
def test_write_is_cached(cluster, min_rows_for_wide_part, read_requests):
|
||||
node = cluster.instances["node"]
|
||||
|
||||
node.query(
|
||||
@ -50,8 +51,8 @@ def test_write_is_cached(cluster):
|
||||
data String
|
||||
) ENGINE=MergeTree()
|
||||
ORDER BY id
|
||||
SETTINGS storage_policy='s3'
|
||||
"""
|
||||
SETTINGS storage_policy='s3', min_rows_for_wide_part={}
|
||||
""".format(min_rows_for_wide_part)
|
||||
)
|
||||
|
||||
node.query("SYSTEM FLUSH LOGS")
|
||||
@ -63,12 +64,12 @@ def test_write_is_cached(cluster):
|
||||
assert node.query(select_query) == "(0,'data'),(1,'data')"
|
||||
|
||||
stat = get_query_stat(node, select_query)
|
||||
assert stat["S3ReadRequestsCount"] == 2 # Only .bin files should be accessed from S3.
|
||||
assert stat["S3ReadRequestsCount"] == read_requests # Only .bin files should be accessed from S3.
|
||||
|
||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
||||
|
||||
|
||||
def test_read_after_cache_is_wiped(cluster):
|
||||
@pytest.mark.parametrize("min_rows_for_wide_part,all_files,bin_files", [(0, 4, 2), (8192, 2, 1)])
|
||||
def test_read_after_cache_is_wiped(cluster, min_rows_for_wide_part, all_files, bin_files):
|
||||
node = cluster.instances["node"]
|
||||
|
||||
node.query(
|
||||
@ -78,8 +79,8 @@ def test_read_after_cache_is_wiped(cluster):
|
||||
data String
|
||||
) ENGINE=MergeTree()
|
||||
ORDER BY id
|
||||
SETTINGS storage_policy='s3'
|
||||
"""
|
||||
SETTINGS storage_policy='s3', min_rows_for_wide_part={}
|
||||
""".format(min_rows_for_wide_part)
|
||||
)
|
||||
|
||||
node.query("SYSTEM FLUSH LOGS")
|
||||
@ -93,12 +94,12 @@ def test_read_after_cache_is_wiped(cluster):
|
||||
select_query = "SELECT * FROM s3_test"
|
||||
node.query(select_query)
|
||||
stat = get_query_stat(node, select_query)
|
||||
assert stat["S3ReadRequestsCount"] == 4 # .mrk and .bin files should be accessed from S3.
|
||||
assert stat["S3ReadRequestsCount"] == all_files # .mrk and .bin files should be accessed from S3.
|
||||
|
||||
# After cache is populated again, only .bin files should be accessed from S3.
|
||||
select_query = "SELECT * FROM s3_test order by id FORMAT Values"
|
||||
assert node.query(select_query) == "(0,'data'),(1,'data')"
|
||||
stat = get_query_stat(node, select_query)
|
||||
assert stat["S3ReadRequestsCount"] == 2
|
||||
assert stat["S3ReadRequestsCount"] == bin_files
|
||||
|
||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
||||
|
@ -0,0 +1,6 @@
|
||||
<yandex>
|
||||
<merge_tree>
|
||||
<min_rows_for_wide_part>0</min_rows_for_wide_part>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
@ -9,7 +9,7 @@ from multiprocessing.dummy import Pool
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance('node1')
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/wide_parts_only.xml'])
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
|
Loading…
Reference in New Issue
Block a user