mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 08:40:50 +00:00
Merge pull request #54602 from fionera/issue-49028
Add setting http_make_head_request
This commit is contained in:
commit
252cb8a507
@ -460,6 +460,12 @@ Possible values:
|
||||
|
||||
Default value: 1048576.
|
||||
|
||||
## http_make_head_request {#http-make-head-request}
|
||||
|
||||
The `http_make_head_request` setting allows the execution of a `HEAD` request while reading data from HTTP to retrieve information about the file to be read, such as its size. Since it's enabled by default, it may be desirable to disable this setting in cases where the server does not support `HEAD` requests.
|
||||
|
||||
Default value: `true`.
|
||||
|
||||
## table_function_remote_max_addresses {#table_function_remote_max_addresses}
|
||||
|
||||
Sets the maximum number of addresses generated from patterns for the [remote](../../sql-reference/table-functions/remote.md) function.
|
||||
|
@ -338,6 +338,7 @@ class IColumn;
|
||||
M(UInt64, http_max_field_value_size, 128 * 1024, "Maximum length of field value in HTTP header", 0) \
|
||||
M(UInt64, http_max_chunk_size, 100_GiB, "Maximum value of a chunk size in HTTP chunked transfer encoding", 0) \
|
||||
M(Bool, http_skip_not_found_url_for_globs, true, "Skip url's for globs with HTTP_NOT_FOUND error", 0) \
|
||||
M(Bool, http_make_head_request, true, "Allows the execution of a `HEAD` request while reading data from HTTP to retrieve information about the file to be read, such as its size", 0) \
|
||||
M(Bool, optimize_throw_if_noop, false, "If setting is enabled and OPTIMIZE query didn't actually assign a merge then an explanatory exception is thrown", 0) \
|
||||
M(Bool, use_index_for_in_with_subqueries, true, "Try using an index if there is a subquery or a table expression on the right side of the IN operator.", 0) \
|
||||
M(UInt64, use_index_for_in_with_subqueries_max_values, 0, "The maximum size of set in the right hand side of the IN operator to use table index for filtering. It allows to avoid performance degradation and higher memory usage due to preparation of additional data structures for large queries. Zero means no limit.", 0) \
|
||||
|
@ -120,6 +120,7 @@ struct ReadSettings
|
||||
size_t http_retry_initial_backoff_ms = 100;
|
||||
size_t http_retry_max_backoff_ms = 1600;
|
||||
bool http_skip_not_found_url_for_globs = true;
|
||||
bool http_make_head_request = true;
|
||||
|
||||
/// Monitoring
|
||||
bool for_object_storage = false; // to choose which profile events should be incremented
|
||||
|
@ -808,6 +808,11 @@ std::optional<time_t> ReadWriteBufferFromHTTPBase<UpdatableSessionPtr>::tryGetLa
|
||||
template <typename UpdatableSessionPtr>
|
||||
HTTPFileInfo ReadWriteBufferFromHTTPBase<UpdatableSessionPtr>::getFileInfo()
|
||||
{
|
||||
/// May be disabled in case the user knows in advance that the server doesn't support HEAD requests.
|
||||
/// Allows to avoid making unnecessary requests in such cases.
|
||||
if (!settings.http_make_head_request)
|
||||
return HTTPFileInfo{};
|
||||
|
||||
Poco::Net::HTTPResponse response;
|
||||
try
|
||||
{
|
||||
|
@ -4973,6 +4973,7 @@ ReadSettings Context::getReadSettings() const
|
||||
res.http_retry_initial_backoff_ms = settings.http_retry_initial_backoff_ms;
|
||||
res.http_retry_max_backoff_ms = settings.http_retry_max_backoff_ms;
|
||||
res.http_skip_not_found_url_for_globs = settings.http_skip_not_found_url_for_globs;
|
||||
res.http_make_head_request = settings.http_make_head_request;
|
||||
|
||||
res.mmap_cache = getMMappedFileCache().get();
|
||||
|
||||
|
@ -62,6 +62,9 @@ def get_options(i: int, upgrade_check: bool) -> str:
|
||||
if random.random() < 0.1:
|
||||
client_options.append("optimize_trivial_approximate_count_query=1")
|
||||
|
||||
if random.random() < 0.3:
|
||||
client_options.append(f"http_make_head_request={random.randint(0, 1)}")
|
||||
|
||||
if client_options:
|
||||
options.append(" --client-option " + " ".join(client_options))
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user