mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-15 10:52:30 +00:00
fix
This commit is contained in:
parent
e0abb251e5
commit
979b68a488
@ -120,7 +120,7 @@ SpanHolder::SpanHolder(std::string_view _operation_name, SpanKind _kind)
|
||||
this->start_time_us
|
||||
= std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
|
||||
/// Add new initialization here
|
||||
this->addAttribute("clickhouse.thread_id", getThreadId());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -152,11 +152,6 @@ CachedOnDiskReadBufferFromFile::getCacheReadBuffer(const FileSegment & file_segm
|
||||
if (use_external_buffer)
|
||||
local_read_settings.local_fs_buffer_size = 0;
|
||||
|
||||
// The buffer will unnecessarily allocate a Memory of size local_fs_buffer_size, which will then
|
||||
// most likely be unused because we're swap()ping our own internal_buffer into
|
||||
// implementation_buffer before each read. But we can't just set local_fs_buffer_size = 0 here
|
||||
// because some buffer implementations actually use that memory (e.g. for prefetching).
|
||||
|
||||
auto buf = createReadBufferFromFileBase(path, local_read_settings);
|
||||
|
||||
if (getFileSizeFromReadBuffer(*buf) == 0)
|
||||
@ -827,7 +822,7 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep()
|
||||
}
|
||||
}
|
||||
|
||||
if (use_external_buffer && initialized)
|
||||
if (use_external_buffer && !internal_buffer.empty())
|
||||
internal_buffer.resize(original_buffer_size);
|
||||
|
||||
chassert(!file_segment.isDownloader());
|
||||
|
@ -71,7 +71,8 @@ MergeTreeReadPool::MergeTreeReadPool(
|
||||
{
|
||||
const auto min_bytes_per_task = settings.merge_tree_min_bytes_per_task_for_remote_reading;
|
||||
const auto avg_mark_bytes = std::max<size_t>(total_compressed_bytes / total_marks, 1);
|
||||
const auto heuristic_min_marks = std::min(total_marks / threads_, min_bytes_per_task / avg_mark_bytes);
|
||||
/// We're taking min here because number of tasks shouldn't be too low - it will make task stealing impossible.
|
||||
const auto heuristic_min_marks = std::min(total_marks / threads_ / 8, min_bytes_per_task / avg_mark_bytes);
|
||||
if (heuristic_min_marks > min_marks_for_concurrent_read)
|
||||
{
|
||||
min_marks_for_concurrent_read = heuristic_min_marks;
|
||||
|
Loading…
Reference in New Issue
Block a user