Merge pull request #5742 from 4ertus2/t64

T64 full bit-transpose variant
This commit is contained in:
alexey-milovidov 2019-07-03 01:56:34 +03:00 committed by GitHub
commit 7ea33203d7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 354 additions and 139 deletions

View File

@ -91,9 +91,10 @@ TypeIndex typeIdx(const DataTypePtr & data_type)
return TypeIndex::Nothing;
}
void transpose64x8(const UInt64 * src, UInt64 * dst, UInt32)
void transpose64x8(UInt64 * src_dst)
{
auto * src8 = reinterpret_cast<const UInt8 *>(src);
auto * src8 = reinterpret_cast<const UInt8 *>(src_dst);
UInt64 dst[8] = {};
for (UInt32 i = 0; i < 64; ++i)
{
@ -107,142 +108,171 @@ void transpose64x8(const UInt64 * src, UInt64 * dst, UInt32)
dst[6] |= ((value >> 6) & 0x1) << i;
dst[7] |= ((value >> 7) & 0x1) << i;
}
memcpy(src_dst, dst, 8 * sizeof(UInt64));
}
void revTranspose64x8(const UInt64 * src, UInt64 * dst, UInt32)
void reverseTranspose64x8(UInt64 * src_dst)
{
auto * dst8 = reinterpret_cast<UInt8 *>(dst);
UInt8 dst8[64];
for (UInt32 i = 0; i < 64; ++i)
{
dst8[i] = ((src[0] >> i) & 0x1)
| (((src[1] >> i) & 0x1) << 1)
| (((src[2] >> i) & 0x1) << 2)
| (((src[3] >> i) & 0x1) << 3)
| (((src[4] >> i) & 0x1) << 4)
| (((src[5] >> i) & 0x1) << 5)
| (((src[6] >> i) & 0x1) << 6)
| (((src[7] >> i) & 0x1) << 7);
dst8[i] = ((src_dst[0] >> i) & 0x1)
| (((src_dst[1] >> i) & 0x1) << 1)
| (((src_dst[2] >> i) & 0x1) << 2)
| (((src_dst[3] >> i) & 0x1) << 3)
| (((src_dst[4] >> i) & 0x1) << 4)
| (((src_dst[5] >> i) & 0x1) << 5)
| (((src_dst[6] >> i) & 0x1) << 6)
| (((src_dst[7] >> i) & 0x1) << 7);
}
memcpy(src_dst, dst8, 8 * sizeof(UInt64));
}
template <typename _T>
void transposeBytes(_T value, UInt64 * mx, UInt32 col)
template <typename T>
void transposeBytes(T value, UInt64 * matrix, UInt32 col)
{
UInt8 * mx8 = reinterpret_cast<UInt8 *>(mx);
UInt8 * matrix8 = reinterpret_cast<UInt8 *>(matrix);
const UInt8 * value8 = reinterpret_cast<const UInt8 *>(&value);
if constexpr (sizeof(_T) > 4)
if constexpr (sizeof(T) > 4)
{
mx8[64 * 7 + col] = value8[7];
mx8[64 * 6 + col] = value8[6];
mx8[64 * 5 + col] = value8[5];
mx8[64 * 4 + col] = value8[4];
matrix8[64 * 7 + col] = value8[7];
matrix8[64 * 6 + col] = value8[6];
matrix8[64 * 5 + col] = value8[5];
matrix8[64 * 4 + col] = value8[4];
}
if constexpr (sizeof(_T) > 2)
if constexpr (sizeof(T) > 2)
{
mx8[64 * 3 + col] = value8[3];
mx8[64 * 2 + col] = value8[2];
matrix8[64 * 3 + col] = value8[3];
matrix8[64 * 2 + col] = value8[2];
}
if constexpr (sizeof(_T) > 1)
mx8[64 * 1 + col] = value8[1];
if constexpr (sizeof(T) > 1)
matrix8[64 * 1 + col] = value8[1];
mx8[64 * 0 + col] = value8[0];
matrix8[64 * 0 + col] = value8[0];
}
template <typename _T>
void revTransposeBytes(const UInt64 * mx, UInt32 col, _T & value)
template <typename T>
void reverseTransposeBytes(const UInt64 * matrix, UInt32 col, T & value)
{
auto * mx8 = reinterpret_cast<const UInt8 *>(mx);
auto * matrix8 = reinterpret_cast<const UInt8 *>(matrix);
if constexpr (sizeof(_T) > 4)
if constexpr (sizeof(T) > 4)
{
value |= UInt64(mx8[64 * 7 + col]) << (8 * 7);
value |= UInt64(mx8[64 * 6 + col]) << (8 * 6);
value |= UInt64(mx8[64 * 5 + col]) << (8 * 5);
value |= UInt64(mx8[64 * 4 + col]) << (8 * 4);
value |= UInt64(matrix8[64 * 7 + col]) << (8 * 7);
value |= UInt64(matrix8[64 * 6 + col]) << (8 * 6);
value |= UInt64(matrix8[64 * 5 + col]) << (8 * 5);
value |= UInt64(matrix8[64 * 4 + col]) << (8 * 4);
}
if constexpr (sizeof(_T) > 2)
if constexpr (sizeof(T) > 2)
{
value |= UInt32(mx8[64 * 3 + col]) << (8 * 3);
value |= UInt32(mx8[64 * 2 + col]) << (8 * 2);
value |= UInt32(matrix8[64 * 3 + col]) << (8 * 3);
value |= UInt32(matrix8[64 * 2 + col]) << (8 * 2);
}
if constexpr (sizeof(_T) > 1)
value |= UInt32(mx8[64 * 1 + col]) << (8 * 1);
if constexpr (sizeof(T) > 1)
value |= UInt32(matrix8[64 * 1 + col]) << (8 * 1);
value |= UInt32(mx8[col]);
value |= UInt32(matrix8[col]);
}
template <typename T>
void load(const char * src, T * buf, UInt32 tail = 64)
{
memcpy(buf, src, tail * sizeof(T));
}
template <typename T>
void store(const T * buf, char * dst, UInt32 tail = 64)
{
memcpy(dst, buf, tail * sizeof(T));
}
template <typename T>
void clear(T * buf)
{
for (UInt32 i = 0; i < 64; ++i)
buf[i] = 0;
}
/// UIntX[64] -> UInt64[N] transposed matrix, N <= X
template <typename _T>
void transpose(const char * src, char * dst, UInt32 num_bits, UInt32 tail = 64)
template <typename T, bool full = false>
void transpose(const T * src, char * dst, UInt32 num_bits, UInt32 tail = 64)
{
UInt32 full_bytes = num_bits / 8;
UInt32 part_bits = num_bits % 8;
UInt64 mx[64] = {};
const char * ptr = src;
for (UInt32 col = 0; col < tail; ++col, ptr += sizeof(_T))
UInt64 matrix[64] = {};
for (UInt32 col = 0; col < tail; ++col)
transposeBytes(src[col], matrix, col);
if constexpr (full)
{
_T value = unalignedLoad<_T>(ptr);
transposeBytes(value, mx, col);
UInt64 * matrix_line = matrix;
for (UInt32 byte = 0; byte < full_bytes; ++byte, matrix_line += 8)
transpose64x8(matrix_line);
}
UInt32 full_size = sizeof(UInt64) * (num_bits - part_bits);
memcpy(dst, mx, full_size);
memcpy(dst, matrix, full_size);
dst += full_size;
/// transpose only partially filled last byte
if (part_bits)
{
UInt64 * partial = &mx[full_bytes * 8];
UInt64 res[8] = {};
transpose64x8(partial, res, part_bits);
memcpy(dst, res, part_bits * sizeof(UInt64));
UInt64 * matrix_line = &matrix[full_bytes * 8];
transpose64x8(matrix_line);
memcpy(dst, matrix_line, part_bits * sizeof(UInt64));
}
}
/// UInt64[N] transposed matrix -> UIntX[64]
template <typename _T, typename _MinMaxT = std::conditional_t<std::is_signed_v<_T>, Int64, UInt64>>
void revTranspose(const char * src, char * dst, UInt32 num_bits, _MinMaxT min, _MinMaxT max [[maybe_unused]], UInt32 tail = 64)
template <typename T, bool full = false>
void reverseTranspose(const char * src, T * buf, UInt32 num_bits, UInt32 tail = 64)
{
UInt64 mx[64] = {};
memcpy(mx, src, num_bits * sizeof(UInt64));
UInt64 matrix[64] = {};
memcpy(matrix, src, num_bits * sizeof(UInt64));
UInt32 full_bytes = num_bits / 8;
UInt32 part_bits = num_bits % 8;
UInt64 * partial = &mx[full_bytes * 8];
if constexpr (full)
{
UInt64 * matrix_line = matrix;
for (UInt32 byte = 0; byte < full_bytes; ++byte, matrix_line += 8)
reverseTranspose64x8(matrix_line);
}
if (part_bits)
{
UInt64 res[8] = {};
revTranspose64x8(partial, res, part_bits);
memcpy(partial, res, 8 * sizeof(UInt64));
UInt64 * matrix_line = &matrix[full_bytes * 8];
reverseTranspose64x8(matrix_line);
}
_T upper_min = 0;
if (num_bits < 64)
upper_min = UInt64(min) >> num_bits << num_bits;
clear(buf);
for (UInt32 col = 0; col < tail; ++col)
reverseTransposeBytes(matrix, col, buf[col]);
}
_T buf[64] = {};
if constexpr (std::is_signed_v<_T>)
template <typename T, typename MinMaxT = std::conditional_t<std::is_signed_v<T>, Int64, UInt64>>
void restoreUpperBits(T * buf, T upper_min, T upper_max [[maybe_unused]], T sign_bit [[maybe_unused]], UInt32 tail = 64)
{
if constexpr (std::is_signed_v<T>)
{
/// Restore some data as negatives and others as positives
if (min < 0 && max >= 0 && num_bits < 64)
if (sign_bit)
{
_T sign_bit = 1ull << (num_bits - 1);
_T upper_max = UInt64(max) >> num_bits << num_bits;
for (UInt32 col = 0; col < tail; ++col)
{
_T & value = buf[col];
revTransposeBytes(mx, col, value);
T & value = buf[col];
if (value & sign_bit)
value |= upper_min;
@ -250,20 +280,12 @@ void revTranspose(const char * src, char * dst, UInt32 num_bits, _MinMaxT min, _
value |= upper_max;
}
memcpy(dst, buf, tail * sizeof(_T));
return;
}
}
for (UInt32 col = 0; col < tail; ++col)
{
_T & value = buf[col];
revTransposeBytes(mx, col, value);
value |= upper_min;
}
memcpy(dst, buf, tail * sizeof(_T));
buf[col] |= upper_min;
}
@ -289,16 +311,16 @@ UInt32 getValuableBitsNumber(Int64 min, Int64 max)
}
template <typename _T>
void findMinMax(const char * src, UInt32 src_size, _T & min, _T & max)
template <typename T>
void findMinMax(const char * src, UInt32 src_size, T & min, T & max)
{
min = unalignedLoad<_T>(src);
max = unalignedLoad<_T>(src);
min = unalignedLoad<T>(src);
max = unalignedLoad<T>(src);
const char * end = src + src_size;
for (; src < end; src += sizeof(_T))
for (; src < end; src += sizeof(T))
{
auto current = unalignedLoad<_T>(src);
auto current = unalignedLoad<T>(src);
if (current < min)
min = current;
if (current > max)
@ -306,24 +328,27 @@ void findMinMax(const char * src, UInt32 src_size, _T & min, _T & max)
}
}
template <typename _T>
using Variant = CompressionCodecT64::Variant;
template <typename T, bool full>
UInt32 compressData(const char * src, UInt32 bytes_size, char * dst)
{
using MinMaxType = std::conditional_t<std::is_signed_v<_T>, Int64, UInt64>;
using MinMaxType = std::conditional_t<std::is_signed_v<T>, Int64, UInt64>;
const UInt32 mx_size = 64;
const UInt32 header_size = 2 * sizeof(UInt64);
static constexpr const UInt32 matrix_size = 64;
static constexpr const UInt32 header_size = 2 * sizeof(UInt64);
if (bytes_size % sizeof(_T))
throw Exception("Cannot compress, data size " + toString(bytes_size) + " is not multiplier of " + toString(sizeof(_T)),
if (bytes_size % sizeof(T))
throw Exception("Cannot compress, data size " + toString(bytes_size) + " is not multiplier of " + toString(sizeof(T)),
ErrorCodes::CANNOT_COMPRESS);
UInt32 src_size = bytes_size / sizeof(_T);
UInt32 num_full = src_size / mx_size;
UInt32 tail = src_size % mx_size;
UInt32 src_size = bytes_size / sizeof(T);
UInt32 num_full = src_size / matrix_size;
UInt32 tail = src_size % matrix_size;
_T min, max;
findMinMax<_T>(src, bytes_size, min, max);
T min, max;
findMinMax<T>(src, bytes_size, min, max);
MinMaxType min64 = min;
MinMaxType max64 = max;
@ -338,11 +363,13 @@ UInt32 compressData(const char * src, UInt32 bytes_size, char * dst)
if (!num_bits)
return header_size;
UInt32 src_shift = sizeof(_T) * mx_size;
T buf[matrix_size];
UInt32 src_shift = sizeof(T) * matrix_size;
UInt32 dst_shift = sizeof(UInt64) * num_bits;
for (UInt32 i = 0; i < num_full; ++i)
{
transpose<_T>(src, dst, num_bits);
load<T>(src, buf, matrix_size);
transpose<T, full>(buf, dst, num_bits);
src += src_shift;
dst += dst_shift;
}
@ -351,29 +378,31 @@ UInt32 compressData(const char * src, UInt32 bytes_size, char * dst)
if (tail)
{
transpose<_T>(src, dst, num_bits, tail);
load<T>(src, buf, tail);
transpose<T, full>(buf, dst, num_bits, tail);
dst_bytes += dst_shift;
}
return header_size + dst_bytes;
}
template <typename _T>
template <typename T, bool full>
void decompressData(const char * src, UInt32 bytes_size, char * dst, UInt32 uncompressed_size)
{
using MinMaxType = std::conditional_t<std::is_signed_v<_T>, Int64, UInt64>;
using MinMaxType = std::conditional_t<std::is_signed_v<T>, Int64, UInt64>;
const UInt32 header_size = 2 * sizeof(UInt64);
static constexpr const UInt32 matrix_size = 64;
static constexpr const UInt32 header_size = 2 * sizeof(UInt64);
if (bytes_size < header_size)
throw Exception("Cannot decompress, data size " + toString(bytes_size) + " is less then T64 header",
ErrorCodes::CANNOT_DECOMPRESS);
if (uncompressed_size % sizeof(_T))
if (uncompressed_size % sizeof(T))
throw Exception("Cannot decompress, unexpected uncompressed size " + toString(uncompressed_size),
ErrorCodes::CANNOT_DECOMPRESS);
UInt64 num_elements = uncompressed_size / sizeof(_T);
UInt64 num_elements = uncompressed_size / sizeof(T);
MinMaxType min;
MinMaxType max;
@ -388,60 +417,101 @@ void decompressData(const char * src, UInt32 bytes_size, char * dst, UInt32 unco
UInt32 num_bits = getValuableBitsNumber(min, max);
if (!num_bits)
{
_T min_value = min;
for (UInt32 i = 0; i < num_elements; ++i, dst += sizeof(_T))
unalignedStore<_T>(dst, min_value);
T min_value = min;
for (UInt32 i = 0; i < num_elements; ++i, dst += sizeof(T))
unalignedStore<T>(dst, min_value);
return;
}
UInt32 src_shift = sizeof(UInt64) * num_bits;
UInt32 dst_shift = sizeof(_T) * 64;
UInt32 dst_shift = sizeof(T) * matrix_size;
if (!bytes_size || bytes_size % src_shift)
throw Exception("Cannot decompress, data size " + toString(bytes_size) + " is not multiplier of " + toString(src_shift),
ErrorCodes::CANNOT_DECOMPRESS);
UInt32 num_full = bytes_size / src_shift;
UInt32 tail = num_elements % 64;
UInt32 tail = num_elements % matrix_size;
if (tail)
--num_full;
T upper_min = 0;
T upper_max [[maybe_unused]] = 0;
T sign_bit [[maybe_unused]] = 0;
if (num_bits < 64)
upper_min = UInt64(min) >> num_bits << num_bits;
if constexpr (std::is_signed_v<T>)
{
if (min < 0 && max >= 0 && num_bits < 64)
{
sign_bit = 1ull << (num_bits - 1);
upper_max = UInt64(max) >> num_bits << num_bits;
}
}
T buf[matrix_size];
for (UInt32 i = 0; i < num_full; ++i)
{
revTranspose<_T>(src, dst, num_bits, min, max);
reverseTranspose<T, full>(src, buf, num_bits);
restoreUpperBits(buf, upper_min, upper_max, sign_bit);
store<T>(buf, dst, matrix_size);
src += src_shift;
dst += dst_shift;
}
if (tail)
revTranspose<_T>(src, dst, num_bits, min, max, tail);
{
reverseTranspose<T, full>(src, buf, num_bits, tail);
restoreUpperBits(buf, upper_min, upper_max, sign_bit, tail);
store<T>(buf, dst, tail);
}
}
template <typename T>
UInt32 compressData(const char * src, UInt32 src_size, char * dst, Variant variant)
{
if (variant == Variant::Bit)
return compressData<T, true>(src, src_size, dst);
return compressData<T, false>(src, src_size, dst);
}
template <typename T>
void decompressData(const char * src, UInt32 src_size, char * dst, UInt32 uncompressed_size, Variant variant)
{
if (variant == Variant::Bit)
decompressData<T, true>(src, src_size, dst, uncompressed_size);
else
decompressData<T, false>(src, src_size, dst, uncompressed_size);
}
}
UInt32 CompressionCodecT64::doCompressData(const char * src, UInt32 src_size, char * dst) const
{
memcpy(dst, &type_idx, 1);
UInt8 cookie = static_cast<UInt8>(type_idx) | (static_cast<UInt8>(variant) << 7);
memcpy(dst, &cookie, 1);
dst += 1;
switch (baseType(type_idx))
{
case TypeIndex::Int8:
return 1 + compressData<Int8>(src, src_size, dst);
return 1 + compressData<Int8>(src, src_size, dst, variant);
case TypeIndex::Int16:
return 1 + compressData<Int16>(src, src_size, dst);
return 1 + compressData<Int16>(src, src_size, dst, variant);
case TypeIndex::Int32:
return 1 + compressData<Int32>(src, src_size, dst);
return 1 + compressData<Int32>(src, src_size, dst, variant);
case TypeIndex::Int64:
return 1 + compressData<Int64>(src, src_size, dst);
return 1 + compressData<Int64>(src, src_size, dst, variant);
case TypeIndex::UInt8:
return 1 + compressData<UInt8>(src, src_size, dst);
return 1 + compressData<UInt8>(src, src_size, dst, variant);
case TypeIndex::UInt16:
return 1 + compressData<UInt16>(src, src_size, dst);
return 1 + compressData<UInt16>(src, src_size, dst, variant);
case TypeIndex::UInt32:
return 1 + compressData<UInt32>(src, src_size, dst);
return 1 + compressData<UInt32>(src, src_size, dst, variant);
case TypeIndex::UInt64:
return 1 + compressData<UInt64>(src, src_size, dst);
return 1 + compressData<UInt64>(src, src_size, dst, variant);
default:
break;
}
@ -454,32 +524,31 @@ void CompressionCodecT64::doDecompressData(const char * src, UInt32 src_size, ch
if (!src_size)
throw Exception("Connot decompress with T64", ErrorCodes::CANNOT_DECOMPRESS);
UInt8 saved_type_id = unalignedLoad<UInt8>(src);
UInt8 cookie = unalignedLoad<UInt8>(src);
src += 1;
src_size -= 1;
TypeIndex actual_type_id = type_idx;
if (actual_type_id == TypeIndex::Nothing)
actual_type_id = static_cast<TypeIndex>(saved_type_id);
auto saved_variant = static_cast<Variant>(cookie >> 7);
auto saved_type_id = static_cast<TypeIndex>(cookie & 0x7F);
switch (baseType(actual_type_id))
switch (baseType(saved_type_id))
{
case TypeIndex::Int8:
return decompressData<Int8>(src, src_size, dst, uncompressed_size);
return decompressData<Int8>(src, src_size, dst, uncompressed_size, saved_variant);
case TypeIndex::Int16:
return decompressData<Int16>(src, src_size, dst, uncompressed_size);
return decompressData<Int16>(src, src_size, dst, uncompressed_size, saved_variant);
case TypeIndex::Int32:
return decompressData<Int32>(src, src_size, dst, uncompressed_size);
return decompressData<Int32>(src, src_size, dst, uncompressed_size, saved_variant);
case TypeIndex::Int64:
return decompressData<Int64>(src, src_size, dst, uncompressed_size);
return decompressData<Int64>(src, src_size, dst, uncompressed_size, saved_variant);
case TypeIndex::UInt8:
return decompressData<UInt8>(src, src_size, dst, uncompressed_size);
return decompressData<UInt8>(src, src_size, dst, uncompressed_size, saved_variant);
case TypeIndex::UInt16:
return decompressData<UInt16>(src, src_size, dst, uncompressed_size);
return decompressData<UInt16>(src, src_size, dst, uncompressed_size, saved_variant);
case TypeIndex::UInt32:
return decompressData<UInt32>(src, src_size, dst, uncompressed_size);
return decompressData<UInt32>(src, src_size, dst, uncompressed_size, saved_variant);
case TypeIndex::UInt64:
return decompressData<UInt64>(src, src_size, dst, uncompressed_size);
return decompressData<UInt64>(src, src_size, dst, uncompressed_size, saved_variant);
default:
break;
}
@ -506,13 +575,33 @@ void registerCodecT64(CompressionCodecFactory & factory)
{
auto reg_func = [&](const ASTPtr & arguments, DataTypePtr type) -> CompressionCodecPtr
{
Variant variant = Variant::Byte;
if (arguments && !arguments->children.empty())
throw Exception("T64 codec should not have parameters", ErrorCodes::ILLEGAL_SYNTAX_FOR_CODEC_TYPE);
{
if (arguments->children.size() > 1)
throw Exception("T64 support zero or one parameter, given " + std::to_string(arguments->children.size()),
ErrorCodes::ILLEGAL_SYNTAX_FOR_CODEC_TYPE);
const auto children = arguments->children;
const auto * literal = children[0]->as<ASTLiteral>();
if (!literal)
throw Exception("Wrong modification for T64. Expected: 'bit', 'byte')",
ErrorCodes::ILLEGAL_CODEC_PARAMETER);
String name = literal->value.safeGet<String>();
if (name == "byte")
variant = Variant::Byte;
else if (name == "bit")
variant = Variant::Bit;
else
throw Exception("Wrong modification for T64: " + name, ErrorCodes::ILLEGAL_CODEC_PARAMETER);
}
auto type_idx = typeIdx(type);
if (type && type_idx == TypeIndex::Nothing)
throw Exception("T64 codec is not supported for specified type", ErrorCodes::ILLEGAL_SYNTAX_FOR_CODEC_TYPE);
return std::make_shared<CompressionCodecT64>(type_idx);
return std::make_shared<CompressionCodecT64>(type_idx, variant);
};
factory.registerCompressionCodecWithType("T64", codecId(), reg_func);

View File

@ -6,18 +6,35 @@
namespace DB
{
/// Get 64 integer valuses, makes 64x64 bit matrix, transpose it and crop unused bits (most significant zeroes).
/// In example, if we have UInt8 with only 0 and 1 inside 64xUInt8 would be compressed into 1xUInt64.
/// It detects unused bits by calculating min and max values of data part, saving them in header in compression phase.
/// There's a special case with signed integers parts with crossing zero data. Here it stores one more bit to detect sign of value.
class CompressionCodecT64 : public ICompressionCodec
{
public:
static constexpr UInt32 HEADER_SIZE = 1 + 2 * sizeof(UInt64);
static constexpr UInt32 MAX_COMPRESSED_BLOCK_SIZE = sizeof(UInt64) * 64;
CompressionCodecT64(TypeIndex type_idx_)
/// There're 2 compression variants:
/// Byte - transpose bit matrix by bytes (only the last not full byte is transposed by bits). It's default.
/// Bits - full bit-transpose of the bit matrix. It uses more resources and leads to better compression with ZSTD (but worse with LZ4).
enum class Variant
{
Byte,
Bit
};
CompressionCodecT64(TypeIndex type_idx_, Variant variant_)
: type_idx(type_idx_)
, variant(variant_)
{}
UInt8 getMethodByte() const override;
String getCodecDesc() const override { return "T64"; }
String getCodecDesc() const override
{
return String("T64") + ((variant == Variant::Byte) ? "" : "(\'bit\')");
}
void useInfoAboutType(DataTypePtr data_type) override;
@ -33,6 +50,7 @@ protected:
private:
TypeIndex type_idx;
Variant variant;
};
}

View File

@ -0,0 +1,7 @@
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1
42 42 42 42 42 42 42 42
42 42 42 42 42 42 42 42
42 42 42 42 42 42 42 42
42 42 42 42 42 42 42 42

View File

@ -0,0 +1,101 @@
DROP TABLE IF EXISTS test.t64;
CREATE TABLE test.t64
(
u8 UInt8,
t_u8 UInt8 Codec(T64('bit'), LZ4),
u16 UInt16,
t_u16 UInt16 Codec(T64('bit'), LZ4),
u32 UInt32,
t_u32 UInt32 Codec(T64('bit'), LZ4),
u64 UInt64,
t_u64 UInt64 Codec(T64('bit'), LZ4)
) ENGINE MergeTree() ORDER BY tuple();
INSERT INTO test.t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(1);
INSERT INTO test.t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(2);
INSERT INTO test.t64 SELECT 42 AS x, x, x, x, x, x, x, x FROM numbers(4);
SELECT * FROM test.t64 ORDER BY u64;
INSERT INTO test.t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(intExp2(8));
INSERT INTO test.t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(intExp2(9));
SELECT * FROM test.t64 WHERE u8 != t_u8;
SELECT * FROM test.t64 WHERE u16 != t_u16;
SELECT * FROM test.t64 WHERE u32 != t_u32;
SELECT * FROM test.t64 WHERE u64 != t_u64;
INSERT INTO test.t64 SELECT (intExp2(16) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10);
INSERT INTO test.t64 SELECT (intExp2(16) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(11);
INSERT INTO test.t64 SELECT (intExp2(16) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(64);
INSERT INTO test.t64 SELECT (intExp2(16) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(65);
INSERT INTO test.t64 SELECT (intExp2(16) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(65);
SELECT * FROM test.t64 WHERE u8 != t_u8;
SELECT * FROM test.t64 WHERE u16 != t_u16;
SELECT * FROM test.t64 WHERE u32 != t_u32;
SELECT * FROM test.t64 WHERE u64 != t_u64;
INSERT INTO test.t64 SELECT (intExp2(24) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10);
INSERT INTO test.t64 SELECT (intExp2(24) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(11);
INSERT INTO test.t64 SELECT (intExp2(24) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(128);
INSERT INTO test.t64 SELECT (intExp2(24) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(129);
INSERT INTO test.t64 SELECT (intExp2(24) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(129);
SELECT * FROM test.t64 WHERE u8 != t_u8;
SELECT * FROM test.t64 WHERE u16 != t_u16;
SELECT * FROM test.t64 WHERE u32 != t_u32;
SELECT * FROM test.t64 WHERE u64 != t_u64;
INSERT INTO test.t64 SELECT (intExp2(32) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10);
INSERT INTO test.t64 SELECT (intExp2(32) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20);
INSERT INTO test.t64 SELECT (intExp2(32) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(256);
INSERT INTO test.t64 SELECT (intExp2(32) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(257);
INSERT INTO test.t64 SELECT (intExp2(32) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(257);
SELECT * FROM test.t64 WHERE u8 != t_u8;
SELECT * FROM test.t64 WHERE u16 != t_u16;
SELECT * FROM test.t64 WHERE u32 != t_u32;
SELECT * FROM test.t64 WHERE u64 != t_u64;
INSERT INTO test.t64 SELECT (intExp2(40) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10);
INSERT INTO test.t64 SELECT (intExp2(40) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20);
INSERT INTO test.t64 SELECT (intExp2(40) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(512);
INSERT INTO test.t64 SELECT (intExp2(40) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(513);
INSERT INTO test.t64 SELECT (intExp2(40) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(513);
SELECT * FROM test.t64 WHERE u8 != t_u8;
SELECT * FROM test.t64 WHERE u16 != t_u16;
SELECT * FROM test.t64 WHERE u32 != t_u32;
SELECT * FROM test.t64 WHERE u64 != t_u64;
INSERT INTO test.t64 SELECT (intExp2(48) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10);
INSERT INTO test.t64 SELECT (intExp2(48) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20);
INSERT INTO test.t64 SELECT (intExp2(48) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(1024);
INSERT INTO test.t64 SELECT (intExp2(48) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(1025);
INSERT INTO test.t64 SELECT (intExp2(48) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(1025);
SELECT * FROM test.t64 WHERE u8 != t_u8;
SELECT * FROM test.t64 WHERE u16 != t_u16;
SELECT * FROM test.t64 WHERE u32 != t_u32;
SELECT * FROM test.t64 WHERE u64 != t_u64;
INSERT INTO test.t64 SELECT (intExp2(56) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10);
INSERT INTO test.t64 SELECT (intExp2(56) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20);
INSERT INTO test.t64 SELECT (intExp2(56) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(2048);
INSERT INTO test.t64 SELECT (intExp2(56) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(2049);
INSERT INTO test.t64 SELECT (intExp2(56) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(2049);
SELECT * FROM test.t64 WHERE u8 != t_u8;
SELECT * FROM test.t64 WHERE u16 != t_u16;
SELECT * FROM test.t64 WHERE u32 != t_u32;
SELECT * FROM test.t64 WHERE u64 != t_u64;
INSERT INTO test.t64 SELECT (intExp2(63) + number * intExp2(62)) AS x, x, x, x, x, x, x, x FROM numbers(10);
SELECT * FROM test.t64 WHERE u8 != t_u8;
SELECT * FROM test.t64 WHERE u16 != t_u16;
SELECT * FROM test.t64 WHERE u32 != t_u32;
SELECT * FROM test.t64 WHERE u64 != t_u64;
OPTIMIZE TABLE test.t64 FINAL;
SELECT * FROM test.t64 WHERE u8 != t_u8;
SELECT * FROM test.t64 WHERE u16 != t_u16;
SELECT * FROM test.t64 WHERE u32 != t_u32;
SELECT * FROM test.t64 WHERE u64 != t_u64;
DROP TABLE test.t64;