mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Require explicit type in unalignedStore
This is a follow-up to PR #5786, which fixed a segfault caused by an unexpected deduced type for unalignedStore. To prevent future errors of this kind, require a caller to specify the stored type explicitly.
This commit is contained in:
parent
4db84332fa
commit
0116c10e41
@ -33,7 +33,7 @@ template <typename T>
|
||||
StringRef ColumnVector<T>::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
|
||||
{
|
||||
auto pos = arena.allocContinue(sizeof(T), begin);
|
||||
unalignedStore(pos, data[n]);
|
||||
unalignedStore<T>(pos, data[n]);
|
||||
return StringRef(pos, sizeof(T));
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
while (source < source_end)
|
||||
{
|
||||
accumulator += unalignedLoad<T>(source);
|
||||
unalignedStore(dest, accumulator);
|
||||
unalignedStore<T>(dest, accumulator);
|
||||
|
||||
source += sizeof(T);
|
||||
dest += sizeof(T);
|
||||
|
@ -90,7 +90,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
const char * source_end = source + source_size;
|
||||
|
||||
const UInt32 items_count = source_size / sizeof(T);
|
||||
unalignedStore(dest, items_count);
|
||||
unalignedStore<UInt32>(dest, items_count);
|
||||
dest += sizeof(items_count);
|
||||
|
||||
T prev_value{};
|
||||
@ -99,7 +99,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
if (source < source_end)
|
||||
{
|
||||
prev_value = unalignedLoad<T>(source);
|
||||
unalignedStore(dest, prev_value);
|
||||
unalignedStore<T>(dest, prev_value);
|
||||
|
||||
source += sizeof(prev_value);
|
||||
dest += sizeof(prev_value);
|
||||
@ -109,7 +109,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
{
|
||||
const T curr_value = unalignedLoad<T>(source);
|
||||
prev_delta = static_cast<DeltaType>(curr_value - prev_value);
|
||||
unalignedStore(dest, prev_delta);
|
||||
unalignedStore<T>(dest, prev_delta);
|
||||
|
||||
source += sizeof(curr_value);
|
||||
dest += sizeof(prev_delta);
|
||||
@ -164,7 +164,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
if (source < source_end)
|
||||
{
|
||||
prev_value = unalignedLoad<T>(source);
|
||||
unalignedStore(dest, prev_value);
|
||||
unalignedStore<T>(dest, prev_value);
|
||||
|
||||
source += sizeof(prev_value);
|
||||
dest += sizeof(prev_value);
|
||||
@ -174,7 +174,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
{
|
||||
prev_delta = unalignedLoad<DeltaType>(source);
|
||||
prev_value = static_cast<T>(prev_value + prev_delta);
|
||||
unalignedStore(dest, prev_value);
|
||||
unalignedStore<T>(dest, prev_value);
|
||||
|
||||
source += sizeof(prev_delta);
|
||||
dest += sizeof(prev_value);
|
||||
@ -209,7 +209,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
// else if first bit is zero, no need to read more data.
|
||||
|
||||
const T curr_value = static_cast<T>(prev_value + prev_delta + double_delta);
|
||||
unalignedStore(dest, curr_value);
|
||||
unalignedStore<T>(dest, curr_value);
|
||||
dest += sizeof(curr_value);
|
||||
|
||||
prev_delta = curr_value - prev_value;
|
||||
|
@ -94,7 +94,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
|
||||
const UInt32 items_count = source_size / sizeof(T);
|
||||
|
||||
unalignedStore(dest, items_count);
|
||||
unalignedStore<UInt32>(dest, items_count);
|
||||
dest += sizeof(items_count);
|
||||
|
||||
T prev_value{};
|
||||
@ -104,7 +104,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
if (source < source_end)
|
||||
{
|
||||
prev_value = unalignedLoad<T>(source);
|
||||
unalignedStore(dest, prev_value);
|
||||
unalignedStore<T>(dest, prev_value);
|
||||
|
||||
source += sizeof(prev_value);
|
||||
dest += sizeof(prev_value);
|
||||
@ -166,7 +166,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
if (source < source_end)
|
||||
{
|
||||
prev_value = unalignedLoad<T>(source);
|
||||
unalignedStore(dest, prev_value);
|
||||
unalignedStore<T>(dest, prev_value);
|
||||
|
||||
source += sizeof(prev_value);
|
||||
dest += sizeof(prev_value);
|
||||
@ -210,7 +210,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
}
|
||||
// else: 0b0 prefix - use prev_value
|
||||
|
||||
unalignedStore(dest, curr_value);
|
||||
unalignedStore<T>(dest, curr_value);
|
||||
dest += sizeof(curr_value);
|
||||
|
||||
prev_xored_info = curr_xored_info;
|
||||
|
@ -390,7 +390,7 @@ void decompressData(const char * src, UInt32 bytes_size, char * dst, UInt32 unco
|
||||
{
|
||||
_T min_value = min;
|
||||
for (UInt32 i = 0; i < num_elements; ++i, dst += sizeof(_T))
|
||||
unalignedStore(dst, min_value);
|
||||
unalignedStore<_T>(dst, min_value);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -200,7 +200,7 @@ inline void copyOverlap8Shuffle(UInt8 * op, const UInt8 *& match, const size_t o
|
||||
0, 1, 2, 3, 4, 5, 6, 0,
|
||||
};
|
||||
|
||||
unalignedStore(op, vtbl1_u8(unalignedLoad<uint8x8_t>(match), unalignedLoad<uint8x8_t>(masks + 8 * offset)));
|
||||
unalignedStore<uint8x8_t>(op, vtbl1_u8(unalignedLoad<uint8x8_t>(match), unalignedLoad<uint8x8_t>(masks + 8 * offset)));
|
||||
match += masks[offset];
|
||||
}
|
||||
|
||||
@ -328,10 +328,10 @@ inline void copyOverlap16Shuffle(UInt8 * op, const UInt8 *& match, const size_t
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0,
|
||||
};
|
||||
|
||||
unalignedStore(op,
|
||||
unalignedStore<uint8x8_t>(op,
|
||||
vtbl2_u8(unalignedLoad<uint8x8x2_t>(match), unalignedLoad<uint8x8_t>(masks + 16 * offset)));
|
||||
|
||||
unalignedStore(op + 8,
|
||||
unalignedStore<uint8x8_t>(op + 8,
|
||||
vtbl2_u8(unalignedLoad<uint8x8x2_t>(match), unalignedLoad<uint8x8_t>(masks + 16 * offset + 8)));
|
||||
|
||||
match += masks[offset];
|
||||
|
@ -57,10 +57,10 @@ void RandImpl::execute(char * output, size_t size)
|
||||
|
||||
for (const char * end = output + size; output < end; output += 16)
|
||||
{
|
||||
unalignedStore(output, generator0.next());
|
||||
unalignedStore(output + 4, generator1.next());
|
||||
unalignedStore(output + 8, generator2.next());
|
||||
unalignedStore(output + 12, generator3.next());
|
||||
unalignedStore<UInt32>(output, generator0.next());
|
||||
unalignedStore<UInt32>(output + 4, generator1.next());
|
||||
unalignedStore<UInt32>(output + 8, generator2.next());
|
||||
unalignedStore<UInt32>(output + 12, generator3.next());
|
||||
}
|
||||
|
||||
/// It is guaranteed (by PaddedPODArray) that we can overwrite up to 15 bytes after end.
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <string.h>
|
||||
#include <type_traits>
|
||||
|
||||
|
||||
template <typename T>
|
||||
@ -11,8 +12,14 @@ inline T unalignedLoad(const void * address)
|
||||
return res;
|
||||
}
|
||||
|
||||
/// We've had troubles before with wrong store size due to integral promotions
|
||||
/// (e.g., unalignedStore(dest, uint16_t + uint16_t) stores an uint32_t).
|
||||
/// To prevent this, make the caller specify the stored type explicitly.
|
||||
/// To disable deduction of T, wrap the argument type with std::enable_if.
|
||||
template <typename T>
|
||||
inline void unalignedStore(void * address, const T & src)
|
||||
inline void unalignedStore(void * address,
|
||||
const typename std::enable_if<true, T>::type & src)
|
||||
{
|
||||
static_assert(std::is_trivially_copyable_v<T>);
|
||||
memcpy(address, &src, sizeof(src));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user