mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 00:30:49 +00:00
Fix tests
This commit is contained in:
parent
592fa77987
commit
894513f6cd
@ -784,10 +784,19 @@ template <typename UpdatableSessionPtr>
|
||||
const std::string & ReadWriteBufferFromHTTPBase<UpdatableSessionPtr>::getCompressionMethod() const { return content_encoding; }
|
||||
|
||||
template <typename UpdatableSessionPtr>
|
||||
std::optional<time_t> ReadWriteBufferFromHTTPBase<UpdatableSessionPtr>::getLastModificationTime()
|
||||
std::optional<time_t> ReadWriteBufferFromHTTPBase<UpdatableSessionPtr>::tryGetLastModificationTime()
|
||||
{
|
||||
if (!file_info)
|
||||
file_info = getFileInfo();
|
||||
{
|
||||
try
|
||||
{
|
||||
file_info = getFileInfo();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
return file_info->last_modified;
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ namespace detail
|
||||
|
||||
const std::string & getCompressionMethod() const;
|
||||
|
||||
std::optional<time_t> getLastModificationTime();
|
||||
std::optional<time_t> tryGetLastModificationTime();
|
||||
|
||||
HTTPFileInfo getFileInfo();
|
||||
|
||||
|
@ -208,7 +208,7 @@ namespace
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "file_info shouldn't be null");
|
||||
for (int i = 0; i < ls.length; ++i)
|
||||
{
|
||||
const String full_path = String(ls.file_info[i].mName);
|
||||
const String full_path = fs::path(ls.file_info[i].mName).lexically_normal();
|
||||
const size_t last_slash = full_path.rfind('/');
|
||||
const String file_name = full_path.substr(last_slash);
|
||||
const bool looking_for_directory = next_slash_after_glob_pos != std::string::npos;
|
||||
@ -218,7 +218,7 @@ namespace
|
||||
{
|
||||
if (re2::RE2::FullMatch(file_name, matcher))
|
||||
result.push_back(StorageHDFS::PathWithInfo{
|
||||
String(file_name),
|
||||
String(full_path),
|
||||
StorageHDFS::PathInfo{ls.file_info[i].mLastMod, static_cast<size_t>(ls.file_info[i].mSize)}});
|
||||
}
|
||||
else if (is_directory && looking_for_directory)
|
||||
@ -253,7 +253,8 @@ namespace
|
||||
HDFSBuilderWrapper builder = createHDFSBuilder(uri_without_path + "/", context->getGlobalContext()->getConfigRef());
|
||||
HDFSFSPtr fs = createHDFSFS(builder.get());
|
||||
|
||||
return LSWithRegexpMatching("/", fs, path_from_uri);
|
||||
auto res = LSWithRegexpMatching("/", fs, path_from_uri);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -992,7 +992,7 @@ Chunk StorageAzureBlobSource::generate()
|
||||
if (const auto * input_format = reader.getInputFormat())
|
||||
chunk_size = input_format->getApproxBytesReadForChunk();
|
||||
progress(num_rows, chunk_size ? chunk_size : chunk.bytes());
|
||||
VirtualColumnUtils::addRequestedPathAndFileVirtualsToChunk(chunk, requested_virtual_columns, reader.getPath());
|
||||
VirtualColumnUtils::addRequestedPathAndFileVirtualsToChunk(chunk, requested_virtual_columns, reader.getRelativePath());
|
||||
return chunk;
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ StorageURLSource::StorageURLSource(
|
||||
while (getContext()->getSettingsRef().engine_url_skip_empty_files && uri_and_buf.second->eof());
|
||||
|
||||
curr_uri = uri_and_buf.first;
|
||||
auto last_mod_time = uri_and_buf.second->getLastModificationTime();
|
||||
auto last_mod_time = uri_and_buf.second->tryGetLastModificationTime();
|
||||
read_buf = std::move(uri_and_buf.second);
|
||||
|
||||
if (auto file_progress_callback = getContext()->getFileProgressCallback())
|
||||
@ -998,7 +998,7 @@ std::optional<ColumnsDescription> IStorageURLBase::tryGetColumnsFromCache(
|
||||
{
|
||||
auto get_last_mod_time = [&]() -> std::optional<time_t>
|
||||
{
|
||||
auto last_mod_time = getLastModificationTime(url, headers, credentials, context);
|
||||
auto last_mod_time = tryGetLastModificationTime(url, headers, credentials, context);
|
||||
/// Some URLs could not have Last-Modified header, in this case we cannot be sure that
|
||||
/// data wasn't changed after adding it's schema to cache. Use schema from cache only if
|
||||
/// special setting for this case is enabled.
|
||||
@ -1028,7 +1028,7 @@ void IStorageURLBase::addColumnsToCache(
|
||||
schema_cache.addManyColumns(cache_keys, columns);
|
||||
}
|
||||
|
||||
std::optional<time_t> IStorageURLBase::getLastModificationTime(
|
||||
std::optional<time_t> IStorageURLBase::tryGetLastModificationTime(
|
||||
const String & url,
|
||||
const HTTPHeaderEntries & headers,
|
||||
const Poco::Net::HTTPBasicCredentials & credentials,
|
||||
@ -1036,29 +1036,22 @@ std::optional<time_t> IStorageURLBase::getLastModificationTime(
|
||||
{
|
||||
auto settings = context->getSettingsRef();
|
||||
|
||||
try
|
||||
{
|
||||
ReadWriteBufferFromHTTP buf(
|
||||
Poco::URI(url),
|
||||
Poco::Net::HTTPRequest::HTTP_GET,
|
||||
{},
|
||||
getHTTPTimeouts(context),
|
||||
credentials,
|
||||
settings.max_http_get_redirects,
|
||||
settings.max_read_buffer_size,
|
||||
context->getReadSettings(),
|
||||
headers,
|
||||
&context->getRemoteHostFilter(),
|
||||
true,
|
||||
false,
|
||||
false);
|
||||
ReadWriteBufferFromHTTP buf(
|
||||
Poco::URI(url),
|
||||
Poco::Net::HTTPRequest::HTTP_GET,
|
||||
{},
|
||||
getHTTPTimeouts(context),
|
||||
credentials,
|
||||
settings.max_http_get_redirects,
|
||||
settings.max_read_buffer_size,
|
||||
context->getReadSettings(),
|
||||
headers,
|
||||
&context->getRemoteHostFilter(),
|
||||
true,
|
||||
false,
|
||||
false);
|
||||
|
||||
return buf.getLastModificationTime();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
return buf.tryGetLastModificationTime();
|
||||
}
|
||||
|
||||
StorageURL::StorageURL(
|
||||
|
@ -59,7 +59,7 @@ public:
|
||||
|
||||
static SchemaCache & getSchemaCache(const ContextPtr & context);
|
||||
|
||||
static std::optional<time_t> getLastModificationTime(
|
||||
static std::optional<time_t> tryGetLastModificationTime(
|
||||
const String & url,
|
||||
const HTTPHeaderEntries & headers,
|
||||
const Poco::Net::HTTPBasicCredentials & credentials,
|
||||
|
@ -44,10 +44,10 @@ def azure_query(node, query, expect_error="false", try_num=10, settings={}):
|
||||
return node.query(query, settings=settings)
|
||||
except Exception as ex:
|
||||
retriable_errors = [
|
||||
"DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response",
|
||||
"DB::Exception: Azure::Core::Http::TransportException: Connection closed before getting full response or response is less than expected",
|
||||
"DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response",
|
||||
"DB::Exception: Azure::Core::Http::TransportException: Error while polling for socket ready read",
|
||||
"Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response",
|
||||
"Azure::Core::Http::TransportException: Connection closed before getting full response or response is less than expected",
|
||||
"Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response",
|
||||
"Azure::Core::Http::TransportException: Error while polling for socket ready read",
|
||||
]
|
||||
retry = False
|
||||
for error in retriable_errors:
|
||||
|
@ -318,7 +318,7 @@ def test_virtual_columns(started_cluster):
|
||||
hdfs_api.write_data("/file1", "1\n")
|
||||
hdfs_api.write_data("/file2", "2\n")
|
||||
hdfs_api.write_data("/file3", "3\n")
|
||||
expected = "1\tfile1\thdfs://hdfs1:9000//file1\n2\tfile2\thdfs://hdfs1:9000//file2\n3\tfile3\thdfs://hdfs1:9000//file3\n"
|
||||
expected = "1\tfile1\thdfs://hdfs1:9000/file1\n2\tfile2\thdfs://hdfs1:9000/file2\n3\tfile3\thdfs://hdfs1:9000/file3\n"
|
||||
assert (
|
||||
node1.query(
|
||||
"select id, _file as file_name, _path as file_path from virtual_cols order by id"
|
||||
|
Loading…
Reference in New Issue
Block a user