Move setting to merge-tree level

This commit is contained in:
kssenii 2024-02-27 23:22:04 +08:00
parent 1eba06dc11
commit ffd69e0e12
5 changed files with 6 additions and 6 deletions

View File

@ -102,9 +102,6 @@ namespace DB
M(UInt64, tables_loader_background_pool_size, 0, "The maximum number of threads that will be used for background async loading of tables. Zero means use all CPUs.", 0) \
M(Bool, async_load_databases, false, "Enable asynchronous loading of databases and tables to speedup server startup. Queries to not yet loaded entity will be blocked until load is finished.", 0) \
M(Bool, display_secrets_in_show_and_select, false, "Allow showing secrets in SHOW and SELECT queries via a format setting and a grant", 0) \
\
M(Bool, force_read_through_cache_for_merges, false, "Force read-through filesystem cache for merges", 0) \
\
M(Seconds, keep_alive_timeout, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT, "The number of seconds that ClickHouse waits for incoming requests before closing the connection.", 0) \
M(Seconds, replicated_fetches_http_connection_timeout, 0, "HTTP connection timeout for part fetch requests. Inherited from default profile `http_connection_timeout` if not set explicitly.", 0) \
M(Seconds, replicated_fetches_http_send_timeout, 0, "HTTP send timeout for part fetch requests. Inherited from default profile `http_send_timeout` if not set explicitly.", 0) \

View File

@ -5079,7 +5079,6 @@ ReadSettings Context::getReadSettings() const
res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache;
res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log;
res.filesystem_cache_segments_batch_size = settings.filesystem_cache_segments_batch_size;
res.force_read_through_cache_merges = getServerSettings().force_read_through_cache_for_merges;
res.filesystem_cache_max_download_size = settings.filesystem_cache_max_download_size;
res.skip_download_if_exceeds_query_cache = settings.skip_download_if_exceeds_query_cache;

View File

@ -151,7 +151,8 @@ MergeTreeSequentialSource::MergeTreeSequentialSource(
const auto & context = storage.getContext();
ReadSettings read_settings = context->getReadSettings();
read_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = !read_settings.force_read_through_cache_merges;
read_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = !storage.getSettings()->force_read_through_cache_for_merges;
/// It does not make sense to use pthread_threadpool for background merges/mutations
/// And also to preserve backward compatibility
read_settings.local_fs_method = LocalFSReadMethod::pread;

View File

@ -191,6 +191,7 @@ struct Settings;
M(String, remote_fs_zero_copy_zookeeper_path, "/clickhouse/zero_copy", "ZooKeeper path for zero-copy table-independent info.", 0) \
M(Bool, remote_fs_zero_copy_path_compatible_mode, false, "Run zero-copy in compatible mode during conversion process.", 0) \
M(Bool, cache_populated_by_fetch, false, "Only available in ClickHouse Cloud", 0) \
M(Bool, force_read_through_cache_for_merges, false, "Force read-through filesystem cache for merges", 0) \
M(Bool, allow_experimental_block_number_column, false, "Enable persisting column _block_number for each row.", 0) \
M(Bool, allow_experimental_replacing_merge_with_cleanup, false, "Allow experimental CLEANUP merges for ReplacingMergeTree with is_deleted column.", 0) \
\

View File

@ -1,3 +1,5 @@
<clickhouse>
<force_read_through_cache_for_merges>1</force_read_through_cache_for_merges>
<merge_tree>
<force_read_through_cache_for_merges>1</force_read_through_cache_for_merges>
</merge_tree>
</clickhouse>