mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-17 13:13:36 +00:00
Fixed function if of FixedString arguments [#CLICKHOUSE-3202].
This commit is contained in:
parent
0bb8b32cb9
commit
dadf5ac400
@ -140,263 +140,6 @@ public:
|
||||
};
|
||||
|
||||
|
||||
struct StringIfImpl
|
||||
{
|
||||
static void vector_vector(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const ColumnString::Chars_t & a_data, const ColumnString::Offsets_t & a_offsets,
|
||||
const ColumnString::Chars_t & b_data, const ColumnString::Offsets_t & b_offsets,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
size_t size = cond.size();
|
||||
c_offsets.resize(size);
|
||||
c_data.reserve(std::max(a_data.size(), b_data.size()));
|
||||
|
||||
ColumnString::Offset_t a_prev_offset = 0;
|
||||
ColumnString::Offset_t b_prev_offset = 0;
|
||||
ColumnString::Offset_t c_prev_offset = 0;
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
if (cond[i])
|
||||
{
|
||||
size_t size_to_write = a_offsets[i] - a_prev_offset;
|
||||
c_data.resize(c_data.size() + size_to_write);
|
||||
memcpySmallAllowReadWriteOverflow15(&c_data[c_prev_offset], &a_data[a_prev_offset], size_to_write);
|
||||
c_prev_offset += size_to_write;
|
||||
c_offsets[i] = c_prev_offset;
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t size_to_write = b_offsets[i] - b_prev_offset;
|
||||
c_data.resize(c_data.size() + size_to_write);
|
||||
memcpySmallAllowReadWriteOverflow15(&c_data[c_prev_offset], &b_data[b_prev_offset], size_to_write);
|
||||
c_prev_offset += size_to_write;
|
||||
c_offsets[i] = c_prev_offset;
|
||||
}
|
||||
|
||||
a_prev_offset = a_offsets[i];
|
||||
b_prev_offset = b_offsets[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void vector_fixed_vector_fixed(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const ColumnFixedString::Chars_t & a_data,
|
||||
const ColumnFixedString::Chars_t & b_data,
|
||||
const size_t N,
|
||||
ColumnFixedString::Chars_t & c_data)
|
||||
{
|
||||
size_t size = cond.size();
|
||||
c_data.resize(a_data.size());
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
if (cond[i])
|
||||
memcpySmallAllowReadWriteOverflow15(&c_data[i * N], &a_data[i * N], N);
|
||||
else
|
||||
memcpySmallAllowReadWriteOverflow15(&c_data[i * N], &b_data[i * N], N);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool negative>
|
||||
static void vector_vector_fixed_impl(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const ColumnString::Chars_t & a_data, const ColumnString::Offsets_t & a_offsets,
|
||||
const ColumnFixedString::Chars_t & b_data, const size_t b_N,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
size_t size = cond.size();
|
||||
c_offsets.resize(size);
|
||||
c_data.reserve(std::max(a_data.size(), b_data.size() + size));
|
||||
|
||||
ColumnString::Offset_t a_prev_offset = 0;
|
||||
ColumnString::Offset_t c_prev_offset = 0;
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
if (negative != cond[i])
|
||||
{
|
||||
size_t size_to_write = a_offsets[i] - a_prev_offset;
|
||||
c_data.resize(c_data.size() + size_to_write);
|
||||
memcpySmallAllowReadWriteOverflow15(&c_data[c_prev_offset], &a_data[a_prev_offset], size_to_write);
|
||||
c_prev_offset += size_to_write;
|
||||
c_offsets[i] = c_prev_offset;
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t size_to_write = b_N;
|
||||
c_data.resize(c_data.size() + size_to_write + 1);
|
||||
memcpySmallAllowReadWriteOverflow15(&c_data[c_prev_offset], &b_data[i * b_N], size_to_write);
|
||||
c_data.back() = 0;
|
||||
c_prev_offset += size_to_write + 1;
|
||||
c_offsets[i] = c_prev_offset;
|
||||
}
|
||||
|
||||
a_prev_offset = a_offsets[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void vector_vector_fixed(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const ColumnString::Chars_t & a_data, const ColumnString::Offsets_t & a_offsets,
|
||||
const ColumnFixedString::Chars_t & b_data, const size_t b_N,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
vector_vector_fixed_impl<false>(cond, a_data, a_offsets, b_data, b_N, c_data, c_offsets);
|
||||
}
|
||||
|
||||
static void vector_fixed_vector(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const ColumnFixedString::Chars_t & a_data, const size_t a_N,
|
||||
const ColumnString::Chars_t & b_data, const ColumnString::Offsets_t & b_offsets,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
vector_vector_fixed_impl<true>(cond, b_data, b_offsets, a_data, a_N, c_data, c_offsets);
|
||||
}
|
||||
|
||||
template <bool negative>
|
||||
static void vector_constant_impl(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const ColumnString::Chars_t & a_data, const ColumnString::Offsets_t & a_offsets,
|
||||
const String & b,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
size_t size = cond.size();
|
||||
c_offsets.resize(size);
|
||||
c_data.reserve(a_data.size());
|
||||
|
||||
ColumnString::Offset_t a_prev_offset = 0;
|
||||
ColumnString::Offset_t c_prev_offset = 0;
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
if (negative != cond[i])
|
||||
{
|
||||
size_t size_to_write = a_offsets[i] - a_prev_offset;
|
||||
c_data.resize(c_data.size() + size_to_write);
|
||||
memcpySmallAllowReadWriteOverflow15(&c_data[c_prev_offset], &a_data[a_prev_offset], size_to_write);
|
||||
c_prev_offset += size_to_write;
|
||||
c_offsets[i] = c_prev_offset;
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t size_to_write = b.size() + 1;
|
||||
c_data.resize(c_data.size() + size_to_write);
|
||||
memcpy(&c_data[c_prev_offset], b.data(), size_to_write);
|
||||
c_prev_offset += size_to_write;
|
||||
c_offsets[i] = c_prev_offset;
|
||||
}
|
||||
|
||||
a_prev_offset = a_offsets[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void vector_constant(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const ColumnString::Chars_t & a_data, const ColumnString::Offsets_t & a_offsets,
|
||||
const String & b,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
return vector_constant_impl<false>(cond, a_data, a_offsets, b, c_data, c_offsets);
|
||||
}
|
||||
|
||||
static void constant_vector(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const String & a,
|
||||
const ColumnString::Chars_t & b_data, const ColumnString::Offsets_t & b_offsets,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
return vector_constant_impl<true>(cond, b_data, b_offsets, a, c_data, c_offsets);
|
||||
}
|
||||
|
||||
template <bool negative>
|
||||
static void vector_fixed_constant_impl(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const ColumnFixedString::Chars_t & a_data, const size_t a_N,
|
||||
const String & b,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
size_t size = cond.size();
|
||||
c_offsets.resize(size);
|
||||
c_data.reserve(a_data.size());
|
||||
|
||||
ColumnString::Offset_t c_prev_offset = 0;
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
if (negative != cond[i])
|
||||
{
|
||||
size_t size_to_write = a_N;
|
||||
c_data.resize(c_data.size() + size_to_write + 1);
|
||||
memcpySmallAllowReadWriteOverflow15(&c_data[c_prev_offset], &a_data[i * a_N], size_to_write);
|
||||
c_data.back() = 0;
|
||||
c_prev_offset += size_to_write + 1;
|
||||
c_offsets[i] = c_prev_offset;
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t size_to_write = b.size() + 1;
|
||||
c_data.resize(c_data.size() + size_to_write);
|
||||
memcpy(&c_data[c_prev_offset], b.data(), size_to_write);
|
||||
c_prev_offset += size_to_write;
|
||||
c_offsets[i] = c_prev_offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void vector_fixed_constant(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const ColumnFixedString::Chars_t & a_data, const size_t N,
|
||||
const String & b,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
vector_fixed_constant_impl<false>(cond, a_data, N, b, c_data, c_offsets);
|
||||
}
|
||||
|
||||
static void constant_vector_fixed(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const String & a,
|
||||
const ColumnFixedString::Chars_t & b_data, const size_t N,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
vector_fixed_constant_impl<true>(cond, b_data, N, a, c_data, c_offsets);
|
||||
}
|
||||
|
||||
static void constant_constant(
|
||||
const PaddedPODArray<UInt8> & cond,
|
||||
const String & a, const String & b,
|
||||
ColumnString::Chars_t & c_data, ColumnString::Offsets_t & c_offsets)
|
||||
{
|
||||
size_t size = cond.size();
|
||||
c_offsets.resize(size);
|
||||
c_data.reserve((std::max(a.size(), b.size()) + 1) * size);
|
||||
|
||||
ColumnString::Offset_t c_prev_offset = 0;
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
if (cond[i])
|
||||
{
|
||||
size_t size_to_write = a.size() + 1;
|
||||
c_data.resize(c_data.size() + size_to_write);
|
||||
memcpy(&c_data[c_prev_offset], a.data(), size_to_write);
|
||||
c_prev_offset += size_to_write;
|
||||
c_offsets[i] = c_prev_offset;
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t size_to_write = b.size() + 1;
|
||||
c_data.resize(c_data.size() + size_to_write);
|
||||
memcpy(&c_data[c_prev_offset], b.data(), size_to_write);
|
||||
c_prev_offset += size_to_write;
|
||||
c_offsets[i] = c_prev_offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename A, typename B, typename ResultType>
|
||||
struct NumArrayIfImpl
|
||||
{
|
||||
|
@ -424,7 +424,7 @@ struct IStringSource
|
||||
};
|
||||
|
||||
template <typename Impl>
|
||||
struct DynamicStringSource : IStringSource
|
||||
struct DynamicStringSource final : IStringSource
|
||||
{
|
||||
Impl impl;
|
||||
|
||||
@ -564,7 +564,7 @@ struct GenericArraySink
|
||||
/// Methods to copy Slice to Sink, overloaded for various combinations of types.
|
||||
|
||||
template <typename T>
|
||||
void writeSlice(const NumericArraySlice<T> & slice, NumericArraySink<T> & sink)
|
||||
void ALWAYS_INLINE writeSlice(const NumericArraySlice<T> & slice, NumericArraySink<T> & sink)
|
||||
{
|
||||
sink.elements.resize(sink.current_offset + slice.size);
|
||||
memcpySmallAllowReadWriteOverflow15(&sink.elements[sink.current_offset], slice.data, slice.size * sizeof(T));
|
||||
@ -572,7 +572,7 @@ void writeSlice(const NumericArraySlice<T> & slice, NumericArraySink<T> & sink)
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
void writeSlice(const NumericArraySlice<T> & slice, NumericArraySink<U> & sink)
|
||||
void ALWAYS_INLINE writeSlice(const NumericArraySlice<T> & slice, NumericArraySink<U> & sink)
|
||||
{
|
||||
sink.elements.resize(sink.current_offset + slice.size);
|
||||
for (size_t i = 0; i < slice.size; ++i)
|
||||
@ -582,20 +582,20 @@ void writeSlice(const NumericArraySlice<T> & slice, NumericArraySink<U> & sink)
|
||||
}
|
||||
}
|
||||
|
||||
inline void writeSlice(const StringSource::Slice & slice, StringSink & sink)
|
||||
inline ALWAYS_INLINE void writeSlice(const StringSource::Slice & slice, StringSink & sink)
|
||||
{
|
||||
sink.elements.resize(sink.current_offset + slice.size);
|
||||
memcpySmallAllowReadWriteOverflow15(&sink.elements[sink.current_offset], slice.data, slice.size);
|
||||
sink.current_offset += slice.size;
|
||||
}
|
||||
|
||||
inline void writeSlice(const StringSource::Slice & slice, FixedStringSink & sink)
|
||||
inline ALWAYS_INLINE void writeSlice(const StringSource::Slice & slice, FixedStringSink & sink)
|
||||
{
|
||||
memcpySmallAllowReadWriteOverflow15(&sink.elements[sink.current_offset], slice.data, slice.size);
|
||||
}
|
||||
|
||||
/// Assuming same types of underlying columns for slice and sink.
|
||||
inline void writeSlice(const GenericArraySlice & slice, GenericArraySink & sink)
|
||||
inline ALWAYS_INLINE void writeSlice(const GenericArraySlice & slice, GenericArraySink & sink)
|
||||
{
|
||||
sink.elements.insertRangeFrom(*slice.elements, slice.begin, slice.size);
|
||||
sink.current_offset += slice.size;
|
||||
|
Loading…
Reference in New Issue
Block a user