mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Update setting remote_filesystem_read_method
This commit is contained in:
parent
e5d44befbe
commit
0dda1f280b
@ -508,7 +508,7 @@ class IColumn;
|
||||
M(ShortCircuitFunctionEvaluation, short_circuit_function_evaluation, ShortCircuitFunctionEvaluation::ENABLE, "Setting for short-circuit function evaluation configuration. Possible values: 'enable' - use short-circuit function evaluation for functions that are suitable for it, 'disable' - disable short-circuit function evaluation, 'force_enable' - use short-circuit function evaluation for all functions.", 0) \
|
||||
\
|
||||
M(String, local_filesystem_read_method, "pread", "Method of reading data from local filesystem, one of: read, pread, mmap, pread_threadpool.", 0) \
|
||||
M(String, remote_filesystem_read_method, "read", "Method of reading data from remote filesystem, one of: read, threadpool.", 0) \
|
||||
M(String, remote_filesystem_read_method, "threadpool", "Method of reading data from remote filesystem, one of: read, threadpool.", 0) \
|
||||
M(Bool, local_filesystem_read_prefetch, false, "Should use prefetching when reading data from local filesystem.", 0) \
|
||||
M(Bool, remote_filesystem_read_prefetch, true, "Should use prefetching when reading data from remote filesystem.", 0) \
|
||||
M(Int64, read_priority, 0, "Priority to read data from local filesystem. Only supported for 'pread_threadpool' method.", 0) \
|
||||
|
@ -1,7 +0,0 @@
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
<remote_filesystem_read_method>threadpool</remote_filesystem_read_method>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
@ -11,7 +11,6 @@ def cluster():
|
||||
cluster.add_instance("node1", main_configs=["configs/storage_conf.xml"], with_nginx=True)
|
||||
cluster.add_instance("node2", main_configs=["configs/storage_conf_web.xml"], with_nginx=True)
|
||||
cluster.add_instance("node3", main_configs=["configs/storage_conf_web.xml"], with_nginx=True)
|
||||
cluster.add_instance("node_async_read", main_configs=["configs/storage_conf_web.xml"], user_configs=["configs/async_read.xml"], with_nginx=True)
|
||||
cluster.start()
|
||||
|
||||
node1 = cluster.instances["node1"]
|
||||
@ -38,7 +37,7 @@ def cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("node_name", ["node2", "node_async_read"])
|
||||
@pytest.mark.parametrize("node_name", ["node2"])
|
||||
def test_usage(cluster, node_name):
|
||||
node1 = cluster.instances["node1"]
|
||||
node2 = cluster.instances[node_name]
|
||||
|
@ -1,7 +0,0 @@
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
<remote_filesystem_read_method>threadpool</remote_filesystem_read_method>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
@ -50,11 +50,6 @@ def cluster():
|
||||
main_configs=["configs/config.d/storage_conf.xml",
|
||||
"configs/config.d/bg_processing_pool_conf.xml"],
|
||||
with_minio=True)
|
||||
cluster.add_instance("node_async_read",
|
||||
main_configs=["configs/config.d/storage_conf.xml",
|
||||
"configs/config.d/bg_processing_pool_conf.xml"],
|
||||
user_configs=["configs/config.d/async_read.xml"],
|
||||
with_minio=True)
|
||||
logging.info("Starting cluster...")
|
||||
cluster.start()
|
||||
logging.info("Cluster started")
|
||||
@ -145,7 +140,7 @@ def wait_for_delete_s3_objects(cluster, expected, timeout=30):
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
@pytest.mark.parametrize("node_name", ["node", "node_async_read"])
|
||||
@pytest.mark.parametrize("node_name", ["node"])
|
||||
def drop_table(cluster, node_name):
|
||||
yield
|
||||
node = cluster.instances[node_name]
|
||||
@ -165,9 +160,7 @@ def drop_table(cluster, node_name):
|
||||
"min_rows_for_wide_part,files_per_part,node_name",
|
||||
[
|
||||
(0, FILES_OVERHEAD_PER_PART_WIDE, "node"),
|
||||
(8192, FILES_OVERHEAD_PER_PART_COMPACT, "node"),
|
||||
(0, FILES_OVERHEAD_PER_PART_WIDE, "node_async_read"),
|
||||
(8192, FILES_OVERHEAD_PER_PART_COMPACT, "node_async_read")
|
||||
(8192, FILES_OVERHEAD_PER_PART_COMPACT, "node")
|
||||
]
|
||||
)
|
||||
def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part, node_name):
|
||||
@ -191,9 +184,7 @@ def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part, n
|
||||
@pytest.mark.parametrize(
|
||||
"merge_vertical,node_name", [
|
||||
(True, "node"),
|
||||
(False, "node"),
|
||||
(True, "node_async_read"),
|
||||
(False, "node_async_read")
|
||||
(False, "node")
|
||||
])
|
||||
def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name):
|
||||
settings = {}
|
||||
@ -235,7 +226,7 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name):
|
||||
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("node_name", ["node", "node_async_read"])
|
||||
@pytest.mark.parametrize("node_name", ["node"])
|
||||
def test_alter_table_columns(cluster, node_name):
|
||||
node = cluster.instances[node_name]
|
||||
create_table(node, "s3_test")
|
||||
@ -264,7 +255,7 @@ def test_alter_table_columns(cluster, node_name):
|
||||
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("node_name", ["node", "node_async_read"])
|
||||
@pytest.mark.parametrize("node_name", ["node"])
|
||||
def test_attach_detach_partition(cluster, node_name):
|
||||
node = cluster.instances[node_name]
|
||||
create_table(node, "s3_test")
|
||||
@ -296,7 +287,7 @@ def test_attach_detach_partition(cluster, node_name):
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
|
||||
|
||||
|
||||
@pytest.mark.parametrize("node_name", ["node", "node_async_read"])
|
||||
@pytest.mark.parametrize("node_name", ["node"])
|
||||
def test_move_partition_to_another_disk(cluster, node_name):
|
||||
node = cluster.instances[node_name]
|
||||
create_table(node, "s3_test")
|
||||
@ -346,7 +337,7 @@ def test_table_manipulations(cluster, node_name):
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
|
||||
|
||||
|
||||
@pytest.mark.parametrize("node_name", ["node", "node_async_read"])
|
||||
@pytest.mark.parametrize("node_name", ["node"])
|
||||
def test_move_replace_partition_to_another_table(cluster, node_name):
|
||||
node = cluster.instances[node_name]
|
||||
create_table(node, "s3_test")
|
||||
@ -498,7 +489,7 @@ def test_s3_disk_restart_during_load(cluster, node_name):
|
||||
thread.join()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("node_name", ["node", "node_async_read"])
|
||||
@pytest.mark.parametrize("node_name", ["node"])
|
||||
def test_s3_disk_reads_on_unstable_connection(cluster, node_name):
|
||||
node = cluster.instances[node_name]
|
||||
create_table(node, "s3_test", storage_policy='unstable_s3')
|
||||
|
Loading…
Reference in New Issue
Block a user