PODArray: Avoid call to memcpy with (nullptr, 0) arguments (#18526)

* Avoid call to memcpy with nullptr, 0 arguments

* Add assert to PODArray

* Fix build

* Fix build

* Minor fixes for min/sim hash

* Fix build

* Fix build

* Fix build

* Fix error

* Fix "not actually an error"

* Fix build

* Fix build

* Fix build
This commit is contained in:
alexey-milovidov 2021-01-02 17:07:54 +03:00 committed by GitHub
parent 19e0e1a403
commit aff724ea7d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 59 additions and 11 deletions

View File

@ -188,13 +188,13 @@ public:
if (!limit_num_elems)
{
if (rhs_elems.value.size())
cur_elems.value.insert(rhs_elems.value.begin(), rhs_elems.value.end(), arena);
cur_elems.value.insertByOffsets(rhs_elems.value, 0, rhs_elems.value.size(), arena);
}
else
{
UInt64 elems_to_insert = std::min(static_cast<size_t>(max_elems) - cur_elems.value.size(), rhs_elems.value.size());
if (elems_to_insert)
cur_elems.value.insert(rhs_elems.value.begin(), rhs_elems.value.begin() + elems_to_insert, arena);
cur_elems.value.insertByOffsets(rhs_elems.value, 0, elems_to_insert, arena);
}
}

View File

@ -89,8 +89,8 @@ protected:
static constexpr size_t pad_right = integerRoundUp(pad_right_, ELEMENT_SIZE);
/// pad_left is also rounded up to 16 bytes to maintain alignment of allocated memory.
static constexpr size_t pad_left = integerRoundUp(integerRoundUp(pad_left_, ELEMENT_SIZE), 16);
/// Empty array will point to this static memory as padding.
static constexpr char * null = pad_left ? const_cast<char *>(empty_pod_array) + empty_pod_array_size : nullptr;
/// Empty array will point to this static memory as padding and begin/end.
static constexpr char * null = const_cast<char *>(empty_pod_array) + pad_left;
static_assert(pad_left <= empty_pod_array_size && "Left Padding exceeds empty_pod_array_size. Is the element size too large?");
@ -268,8 +268,11 @@ public:
reserve(required_capacity, std::forward<TAllocatorParams>(allocator_params)...);
size_t items_byte_size = byte_size(number_of_items);
memcpy(c_end, ptr, items_byte_size);
c_end += items_byte_size;
if (items_byte_size)
{
memcpy(c_end, ptr, items_byte_size);
c_end += items_byte_size;
}
}
void protect()
@ -289,6 +292,18 @@ public:
#endif
}
template <typename It1, typename It2>
inline void assertNotIntersects(It1 from_begin [[maybe_unused]], It2 from_end [[maybe_unused]])
{
#if !defined(NDEBUG)
const char * ptr_begin = reinterpret_cast<const char *>(&*from_begin);
const char * ptr_end = reinterpret_cast<const char *>(&*from_end);
/// Also it's safe if the range is empty.
assert(!((ptr_begin >= c_start && ptr_begin <= c_end) || (ptr_end >= c_start && ptr_end <= c_end)) || (ptr_begin == ptr_end));
#endif
}
~PODArrayBase()
{
dealloc();
@ -444,6 +459,7 @@ public:
template <typename It1, typename It2, typename ... TAllocatorParams>
void insertPrepare(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
{
this->assertNotIntersects(from_begin, from_end);
size_t required_capacity = this->size() + (from_end - from_begin);
if (required_capacity > this->capacity())
this->reserve(roundUpToPowerOfTwoOrZero(required_capacity), std::forward<TAllocatorParams>(allocator_params)...);
@ -457,6 +473,28 @@ public:
insert_assume_reserved(from_begin, from_end);
}
/// In contrast to 'insert' this method is Ok even for inserting from itself.
/// Because we obtain iterators after reserving memory.
template <typename Container, typename ... TAllocatorParams>
void insertByOffsets(Container && rhs, size_t from_begin, size_t from_end, TAllocatorParams &&... allocator_params)
{
static_assert(memcpy_can_be_used_for_assignment<std::decay_t<T>, std::decay_t<decltype(rhs.front())>>);
assert(from_end >= from_begin);
assert(from_end <= rhs.size());
size_t required_capacity = this->size() + (from_end - from_begin);
if (required_capacity > this->capacity())
this->reserve(roundUpToPowerOfTwoOrZero(required_capacity), std::forward<TAllocatorParams>(allocator_params)...);
size_t bytes_to_copy = this->byte_size(from_end - from_begin);
if (bytes_to_copy)
{
memcpy(this->c_end, reinterpret_cast<const void *>(rhs.begin() + from_begin), bytes_to_copy);
this->c_end += bytes_to_copy;
}
}
/// Works under assumption, that it's possible to read up to 15 excessive bytes after `from_end` and this PODArray is padded.
template <typename It1, typename It2, typename ... TAllocatorParams>
void insertSmallAllowReadWriteOverflow15(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
@ -476,6 +514,9 @@ public:
static_assert(memcpy_can_be_used_for_assignment<std::decay_t<T>, std::decay_t<decltype(*from_begin)>>);
size_t bytes_to_copy = this->byte_size(from_end - from_begin);
if (!bytes_to_copy)
return;
size_t bytes_to_move = this->byte_size(end() - it);
insertPrepare(from_begin, from_end);
@ -492,10 +533,14 @@ public:
void insert_assume_reserved(It1 from_begin, It2 from_end)
{
static_assert(memcpy_can_be_used_for_assignment<std::decay_t<T>, std::decay_t<decltype(*from_begin)>>);
this->assertNotIntersects(from_begin, from_end);
size_t bytes_to_copy = this->byte_size(from_end - from_begin);
memcpy(this->c_end, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end += bytes_to_copy;
if (bytes_to_copy)
{
memcpy(this->c_end, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end += bytes_to_copy;
}
}
template <typename... TAllocatorParams>
@ -626,15 +671,18 @@ public:
void assign(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
{
static_assert(memcpy_can_be_used_for_assignment<std::decay_t<T>, std::decay_t<decltype(*from_begin)>>);
this->assertNotIntersects(from_begin, from_end);
size_t required_capacity = from_end - from_begin;
if (required_capacity > this->capacity())
this->reserve_exact(required_capacity, std::forward<TAllocatorParams>(allocator_params)...);
size_t bytes_to_copy = this->byte_size(required_capacity);
memcpy(this->c_start, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end = this->c_start + bytes_to_copy;
if (bytes_to_copy)
{
memcpy(this->c_start, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end = this->c_start + bytes_to_copy;
}
}
// ISO C++ has strict ambiguity rules, thus we cannot apply TAllocatorParams here.