#pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace DB { namespace ErrorCodes { extern const int TOO_LARGE_STRING_SIZE; extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int LOGICAL_ERROR; extern const int NOT_IMPLEMENTED; } /** Comparison functions: ==, !=, <, >, <=, >=. * The comparison functions always return 0 or 1 (UInt8). * * You can compare the following types: * - numbers and decimals; * - strings and fixed strings; * - dates; * - datetimes; * within each group, but not from different groups; * - tuples (lexicographic comparison). * * Exception: You can compare the date and datetime with a constant string. Example: EventDate = '2015-01-01'. */ template struct NumComparisonImpl { /// If you don't specify NO_INLINE, the compiler will inline this function, but we don't need this as this function contains tight loop inside. static void NO_INLINE vector_vector(const PaddedPODArray & a, const PaddedPODArray & b, PaddedPODArray & c) { /** GCC 4.8.2 vectorizes a loop only if it is written in this form. * In this case, if you loop through the array index (the code will look simpler), * the loop will not be vectorized. */ size_t size = a.size(); const A * a_pos = a.data(); const B * b_pos = b.data(); UInt8 * c_pos = c.data(); const A * a_end = a_pos + size; while (a_pos < a_end) { *c_pos = Op::apply(*a_pos, *b_pos); ++a_pos; ++b_pos; ++c_pos; } } static void NO_INLINE vector_constant(const PaddedPODArray & a, B b, PaddedPODArray & c) { size_t size = a.size(); const A * a_pos = a.data(); UInt8 * c_pos = c.data(); const A * a_end = a_pos + size; while (a_pos < a_end) { *c_pos = Op::apply(*a_pos, b); ++a_pos; ++c_pos; } } static void constant_vector(A a, const PaddedPODArray & b, PaddedPODArray & c) { NumComparisonImpl::vector_constant(b, a, c); } static void constant_constant(A a, B b, UInt8 & c) { c = Op::apply(a, b); } }; inline int memcmp16(const void * a, const void * b) { /// Assuming little endian. UInt64 a_hi = __builtin_bswap64(unalignedLoad(a)); UInt64 b_hi = __builtin_bswap64(unalignedLoad(b)); if (a_hi < b_hi) return -1; if (a_hi > b_hi) return 1; UInt64 a_lo = __builtin_bswap64(unalignedLoad(reinterpret_cast(a) + 8)); UInt64 b_lo = __builtin_bswap64(unalignedLoad(reinterpret_cast(b) + 8)); if (a_lo < b_lo) return -1; if (a_lo > b_lo) return 1; return 0; } template struct StringComparisonImpl { static void NO_INLINE string_vector_string_vector( const ColumnString::Chars & a_data, const ColumnString::Offsets & a_offsets, const ColumnString::Chars & b_data, const ColumnString::Offsets & b_offsets, PaddedPODArray & c) { size_t size = a_offsets.size(); for (size_t i = 0; i < size; ++i) { /// Trailing zero byte of the smaller string is included in the comparison. size_t a_size; size_t b_size; int res; if (i == 0) { a_size = a_offsets[0]; b_size = b_offsets[0]; res = memcmp(a_data.data(), b_data.data(), std::min(a_size, b_size)); } else { a_size = a_offsets[i] - a_offsets[i - 1]; b_size = b_offsets[i] - b_offsets[i - 1]; res = memcmp(&a_data[a_offsets[i - 1]], &b_data[b_offsets[i - 1]], std::min(a_size, b_size)); } c[i] = Op::apply(res, 0) || (res == 0 && Op::apply(a_size, b_size)); } } static void NO_INLINE string_vector_fixed_string_vector( const ColumnString::Chars & a_data, const ColumnString::Offsets & a_offsets, const ColumnString::Chars & b_data, ColumnString::Offset b_n, PaddedPODArray & c) { size_t size = a_offsets.size(); for (size_t i = 0; i < size; ++i) { if (i == 0) { int res = memcmp(a_data.data(), b_data.data(), std::min(a_offsets[0] - 1, b_n)); c[i] = Op::apply(res, 0) || (res == 0 && Op::apply(a_offsets[0], b_n + 1)); } else { int res = memcmp(&a_data[a_offsets[i - 1]], &b_data[i * b_n], std::min(a_offsets[i] - a_offsets[i - 1] - 1, b_n)); c[i] = Op::apply(res, 0) || (res == 0 && Op::apply(a_offsets[i] - a_offsets[i - 1], b_n + 1)); } } } static void NO_INLINE string_vector_constant( const ColumnString::Chars & a_data, const ColumnString::Offsets & a_offsets, const std::string & b, PaddedPODArray & c) { size_t size = a_offsets.size(); ColumnString::Offset b_size = b.size() + 1; const UInt8 * b_data = reinterpret_cast(b.data()); for (size_t i = 0; i < size; ++i) { /// Trailing zero byte of the smaller string is included in the comparison. if (i == 0) { int res = memcmp(a_data.data(), b_data, std::min(a_offsets[0], b_size)); c[i] = Op::apply(res, 0) || (res == 0 && Op::apply(a_offsets[0], b_size)); } else { int res = memcmp(&a_data[a_offsets[i - 1]], b_data, std::min(a_offsets[i] - a_offsets[i - 1], b_size)); c[i] = Op::apply(res, 0) || (res == 0 && Op::apply(a_offsets[i] - a_offsets[i - 1], b_size)); } } } static void fixed_string_vector_string_vector( const ColumnString::Chars & a_data, ColumnString::Offset a_n, const ColumnString::Chars & b_data, const ColumnString::Offsets & b_offsets, PaddedPODArray & c) { StringComparisonImpl::string_vector_fixed_string_vector(b_data, b_offsets, a_data, a_n, c); } static void NO_INLINE fixed_string_vector_fixed_string_vector_16( const ColumnString::Chars & a_data, const ColumnString::Chars & b_data, PaddedPODArray & c) { size_t size = a_data.size(); for (size_t i = 0, j = 0; i < size; i += 16, ++j) c[j] = Op::apply(memcmp16(&a_data[i], &b_data[i]), 0); } static void NO_INLINE fixed_string_vector_constant_16( const ColumnString::Chars & a_data, const std::string & b, PaddedPODArray & c) { size_t size = a_data.size(); for (size_t i = 0, j = 0; i < size; i += 16, ++j) c[j] = Op::apply(memcmp16(&a_data[i], b.data()), 0); } static void NO_INLINE fixed_string_vector_fixed_string_vector( const ColumnString::Chars & a_data, ColumnString::Offset a_n, const ColumnString::Chars & b_data, ColumnString::Offset b_n, PaddedPODArray & c) { /** Specialization if both sizes are 16. * To more efficient comparison of IPv6 addresses stored in FixedString(16). */ if (a_n == 16 && b_n == 16) { fixed_string_vector_fixed_string_vector_16(a_data, b_data, c); } else { /// Generic implementation, less efficient. size_t size = a_data.size(); for (size_t i = 0, j = 0; i < size; i += a_n, ++j) { int res = memcmp(&a_data[i], &b_data[i], std::min(a_n, b_n)); c[j] = Op::apply(res, 0) || (res == 0 && Op::apply(a_n, b_n)); } } } static void NO_INLINE fixed_string_vector_constant( const ColumnString::Chars & a_data, ColumnString::Offset a_n, const std::string & b, PaddedPODArray & c) { ColumnString::Offset b_n = b.size(); if (a_n == 16 && b_n == 16) { fixed_string_vector_constant_16(a_data, b, c); } else { size_t size = a_data.size(); const UInt8 * b_data = reinterpret_cast(b.data()); for (size_t i = 0, j = 0; i < size; i += a_n, ++j) { int res = memcmp(&a_data[i], b_data, std::min(a_n, b_n)); c[j] = Op::apply(res, 0) || (res == 0 && Op::apply(a_n, b_n)); } } } static void constant_string_vector( const std::string & a, const ColumnString::Chars & b_data, const ColumnString::Offsets & b_offsets, PaddedPODArray & c) { StringComparisonImpl::string_vector_constant(b_data, b_offsets, a, c); } static void constant_fixed_string_vector( const std::string & a, const ColumnString::Chars & b_data, ColumnString::Offset b_n, PaddedPODArray & c) { StringComparisonImpl::fixed_string_vector_constant(b_data, b_n, a, c); } static void constant_constant( const std::string & a, const std::string & b, UInt8 & c) { size_t a_n = a.size(); size_t b_n = b.size(); int res = memcmp(a.data(), b.data(), std::min(a_n, b_n)); c = Op::apply(res, 0) || (res == 0 && Op::apply(a_n, b_n)); } }; /// Comparisons for equality/inequality are implemented slightly more efficient. template struct StringEqualsImpl { static void NO_INLINE string_vector_string_vector( const ColumnString::Chars & a_data, const ColumnString::Offsets & a_offsets, const ColumnString::Chars & b_data, const ColumnString::Offsets & b_offsets, PaddedPODArray & c) { size_t size = a_offsets.size(); for (size_t i = 0; i < size; ++i) c[i] = positive == ((i == 0) ? (a_offsets[0] == b_offsets[0] && !memcmp(a_data.data(), b_data.data(), a_offsets[0] - 1)) : (a_offsets[i] - a_offsets[i - 1] == b_offsets[i] - b_offsets[i - 1] && !memcmp(&a_data[a_offsets[i - 1]], &b_data[b_offsets[i - 1]], a_offsets[i] - a_offsets[i - 1] - 1))); } static void NO_INLINE string_vector_fixed_string_vector( const ColumnString::Chars & a_data, const ColumnString::Offsets & a_offsets, const ColumnString::Chars & b_data, ColumnString::Offset b_n, PaddedPODArray & c) { size_t size = a_offsets.size(); for (size_t i = 0; i < size; ++i) c[i] = positive == ((i == 0) ? (a_offsets[0] == b_n + 1 && !memcmp(a_data.data(), b_data.data(), b_n)) : (a_offsets[i] - a_offsets[i - 1] == b_n + 1 && !memcmp(&a_data[a_offsets[i - 1]], &b_data[b_n * i], b_n))); } static void NO_INLINE string_vector_constant( const ColumnString::Chars & a_data, const ColumnString::Offsets & a_offsets, const std::string & b, PaddedPODArray & c) { size_t size = a_offsets.size(); ColumnString::Offset b_n = b.size(); const UInt8 * b_data = reinterpret_cast(b.data()); for (size_t i = 0; i < size; ++i) c[i] = positive == ((i == 0) ? (a_offsets[0] == b_n + 1 && !memcmp(a_data.data(), b_data, b_n)) : (a_offsets[i] - a_offsets[i - 1] == b_n + 1 && !memcmp(&a_data[a_offsets[i - 1]], b_data, b_n))); } #if __SSE2__ static void NO_INLINE fixed_string_vector_fixed_string_vector_16( const ColumnString::Chars & a_data, const ColumnString::Chars & b_data, PaddedPODArray & c) { size_t size = c.size(); const __m128i * a_pos = reinterpret_cast(a_data.data()); const __m128i * b_pos = reinterpret_cast(b_data.data()); UInt8 * c_pos = c.data(); UInt8 * c_end = c_pos + size; while (c_pos < c_end) { *c_pos = positive == (0xFFFF == _mm_movemask_epi8(_mm_cmpeq_epi8( _mm_loadu_si128(a_pos), _mm_loadu_si128(b_pos)))); ++a_pos; ++b_pos; ++c_pos; } } static void NO_INLINE fixed_string_vector_constant_16( const ColumnString::Chars & a_data, const std::string & b, PaddedPODArray & c) { size_t size = c.size(); const __m128i * a_pos = reinterpret_cast(a_data.data()); const __m128i b_value = _mm_loadu_si128(reinterpret_cast(b.data())); UInt8 * c_pos = c.data(); UInt8 * c_end = c_pos + size; while (c_pos < c_end) { *c_pos = positive == (0xFFFF == _mm_movemask_epi8(_mm_cmpeq_epi8( _mm_loadu_si128(a_pos), b_value))); ++a_pos; ++c_pos; } } #endif static void NO_INLINE fixed_string_vector_fixed_string_vector( const ColumnString::Chars & a_data, ColumnString::Offset a_n, const ColumnString::Chars & b_data, ColumnString::Offset b_n, PaddedPODArray & c) { /** Specialization if both sizes are 16. * To more efficient comparison of IPv6 addresses stored in FixedString(16). */ #if __SSE2__ if (a_n == 16 && b_n == 16) { fixed_string_vector_fixed_string_vector_16(a_data, b_data, c); } else #endif { size_t size = a_data.size(); for (size_t i = 0, j = 0; i < size; i += a_n, ++j) c[j] = positive == (a_n == b_n && !memcmp(&a_data[i], &b_data[i], a_n)); } } static void NO_INLINE fixed_string_vector_constant( const ColumnString::Chars & a_data, ColumnString::Offset a_n, const std::string & b, PaddedPODArray & c) { ColumnString::Offset b_n = b.size(); #if __SSE2__ if (a_n == 16 && b_n == 16) { fixed_string_vector_constant_16(a_data, b, c); } else #endif { size_t size = a_data.size(); const UInt8 * b_data = reinterpret_cast(b.data()); for (size_t i = 0, j = 0; i < size; i += a_n, ++j) c[j] = positive == (a_n == b_n && !memcmp(&a_data[i], b_data, a_n)); } } static void fixed_string_vector_string_vector( const ColumnString::Chars & a_data, ColumnString::Offset a_n, const ColumnString::Chars & b_data, const ColumnString::Offsets & b_offsets, PaddedPODArray & c) { string_vector_fixed_string_vector(b_data, b_offsets, a_data, a_n, c); } static void constant_string_vector( const std::string & a, const ColumnString::Chars & b_data, const ColumnString::Offsets & b_offsets, PaddedPODArray & c) { string_vector_constant(b_data, b_offsets, a, c); } static void constant_fixed_string_vector( const std::string & a, const ColumnString::Chars & b_data, ColumnString::Offset b_n, PaddedPODArray & c) { fixed_string_vector_constant(b_data, b_n, a, c); } static void constant_constant( const std::string & a, const std::string & b, UInt8 & c) { c = positive == (a == b); } }; template struct StringComparisonImpl> : StringEqualsImpl {}; template struct StringComparisonImpl> : StringEqualsImpl {}; /// Generic version, implemented for columns of same type. template struct GenericComparisonImpl { static void NO_INLINE vector_vector(const IColumn & a, const IColumn & b, PaddedPODArray & c) { for (size_t i = 0, size = a.size(); i < size; ++i) c[i] = Op::apply(a.compareAt(i, i, b, 1), 0); } static void NO_INLINE vector_constant(const IColumn & a, const IColumn & b, PaddedPODArray & c) { auto b_materialized = b.cloneResized(1)->convertToFullColumnIfConst(); for (size_t i = 0, size = a.size(); i < size; ++i) c[i] = Op::apply(a.compareAt(i, 0, *b_materialized, 1), 0); } static void constant_vector(const IColumn & a, const IColumn & b, PaddedPODArray & c) { GenericComparisonImpl::vector_constant(b, a, c); } static void constant_constant(const IColumn & a, const IColumn & b, UInt8 & c) { c = Op::apply(a.compareAt(0, 0, b, 1), 0); } }; #if USE_EMBEDDED_COMPILER template