Dynamic dispatch for intDiv

This commit is contained in:
Alexey Milovidov 2021-04-12 23:02:42 +03:00
parent ff0d3860d4
commit d5580a8e71

View File

@ -1,12 +1,29 @@
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionBinaryArithmetic.h>
#include <Functions/TargetSpecific.h>
#if defined(__SSE2__)
# define LIBDIVIDE_SSE2 1
#if defined(__x86_64__)
#define LIBDIVIDE_SSE2 1
#define LIBDIVIDE_AVX2 1
#if defined(__clang__)
#pragma clang attribute push(__attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2"))), apply_to=function)
#else
#pragma GCC push_options
#pragma GCC target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,tune=native")
#endif
#endif
#include <libdivide.h>
#if defined(__x86_64__)
#if defined(__clang__)
#pragma clang attribute pop
#else
#pragma GCC pop_options
#endif
#endif
namespace DB
{
@ -20,6 +37,83 @@ namespace
/// Optimizations for integer division by a constant.
#if defined(__x86_64__)
DECLARE_DEFAULT_CODE (
template <typename A, typename B, typename ResultType>
void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size)
{
libdivide::divider<A> divider(b);
const A * a_end = a_pos + size;
static constexpr size_t values_per_simd_register = 16 / sizeof(A);
const A * a_end_simd = a_pos + size / values_per_simd_register * values_per_simd_register;
while (a_pos < a_end_simd)
{
_mm_storeu_si128(reinterpret_cast<__m128i *>(c_pos),
_mm_loadu_si128(reinterpret_cast<const __m128i *>(a_pos)) / divider);
a_pos += values_per_simd_register;
c_pos += values_per_simd_register;
}
while (a_pos < a_end)
{
*c_pos = *a_pos / divider;
++a_pos;
++c_pos;
}
}
)
DECLARE_AVX2_SPECIFIC_CODE (
template <typename A, typename B, typename ResultType>
void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size)
{
libdivide::divider<A> divider(b);
const A * a_end = a_pos + size;
static constexpr size_t values_per_simd_register = 32 / sizeof(A);
const A * a_end_simd = a_pos + size / values_per_simd_register * values_per_simd_register;
while (a_pos < a_end_simd)
{
_mm256_storeu_si256(reinterpret_cast<__m256i *>(c_pos),
_mm256_loadu_si256(reinterpret_cast<const __m256i *>(a_pos)) / divider);
a_pos += values_per_simd_register;
c_pos += values_per_simd_register;
}
while (a_pos < a_end)
{
*c_pos = *a_pos / divider;
++a_pos;
++c_pos;
}
}
)
#else
template <typename A, typename B, typename ResultType>
void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size)
{
libdivide::divider<A> divider(b);
const A * a_end = a_pos + size;
while (a_pos < a_end)
{
*c_pos = *a_pos / divider;
++a_pos;
++c_pos;
}
}
#endif
template <typename A, typename B>
struct DivideIntegralByConstantImpl
: BinaryOperation<A, B, DivideIntegralImpl<A, B>>
@ -70,29 +164,19 @@ struct DivideIntegralByConstantImpl
if (unlikely(static_cast<A>(b) == 0))
throw Exception("Division by zero", ErrorCodes::ILLEGAL_DIVISION);
libdivide::divider<A> divider(b);
const A * a_end = a_pos + size;
#if defined(__SSE2__)
static constexpr size_t values_per_sse_register = 16 / sizeof(A);
const A * a_end_sse = a_pos + size / values_per_sse_register * values_per_sse_register;
while (a_pos < a_end_sse)
#if USE_MULTITARGET_CODE
if (isArchSupported(TargetArch::AVX2))
{
_mm_storeu_si128(reinterpret_cast<__m128i *>(c_pos),
_mm_loadu_si128(reinterpret_cast<const __m128i *>(a_pos)) / divider);
a_pos += values_per_sse_register;
c_pos += values_per_sse_register;
TargetSpecific::AVX2::divideImpl(a_pos, b, c_pos, size);
}
else
#endif
while (a_pos < a_end)
{
*c_pos = *a_pos / divider;
++a_pos;
++c_pos;
#if __x86_64__
TargetSpecific::Default::divideImpl(a_pos, b, c_pos, size);
#else
divideImpl(a_pos, b, c_pos, size);
#endif
}
}
};