mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
minor fixes
This commit is contained in:
parent
30a867e387
commit
a419267dc6
@ -39,7 +39,6 @@ ASTPtr CompressionCodecDelta::getCodecDesc() const
|
|||||||
void CompressionCodecDelta::updateHash(SipHash & hash) const
|
void CompressionCodecDelta::updateHash(SipHash & hash) const
|
||||||
{
|
{
|
||||||
getCodecDesc()->updateTreeHash(hash);
|
getCodecDesc()->updateTreeHash(hash);
|
||||||
hash.update(delta_bytes_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
|
@ -123,6 +123,11 @@ public:
|
|||||||
return bytes_ignored;
|
return bytes_ignored;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ignoreAll()
|
||||||
|
{
|
||||||
|
tryIgnore(std::numeric_limits<size_t>::max());
|
||||||
|
}
|
||||||
|
|
||||||
/** Reads a single byte. */
|
/** Reads a single byte. */
|
||||||
bool ALWAYS_INLINE read(char & c)
|
bool ALWAYS_INLINE read(char & c)
|
||||||
{
|
{
|
||||||
|
@ -112,8 +112,11 @@ void MergeTreeDataPartWriterCompact::writeBlock(const Block & block)
|
|||||||
{
|
{
|
||||||
auto & stream = compressed_streams[i];
|
auto & stream = compressed_streams[i];
|
||||||
|
|
||||||
|
/// Offset should be 0, because compressed block is written for every granule.
|
||||||
|
assert(stream->hashing_buf.offset() == 0);
|
||||||
|
|
||||||
writeIntBinary(plain_hashing.count(), marks);
|
writeIntBinary(plain_hashing.count(), marks);
|
||||||
writeIntBinary(stream->hashing_buf.offset(), marks);
|
writeIntBinary(UInt64(0), marks);
|
||||||
|
|
||||||
writeColumnSingleGranule(block.getByName(name_and_type->name), stream, current_row, rows_to_write);
|
writeColumnSingleGranule(block.getByName(name_and_type->name), stream, current_row, rows_to_write);
|
||||||
|
|
||||||
@ -162,6 +165,12 @@ void MergeTreeDataPartWriterCompact::finishDataSerialization(IMergeTreeDataPart:
|
|||||||
if (columns_buffer.size() != 0)
|
if (columns_buffer.size() != 0)
|
||||||
writeBlock(header.cloneWithColumns(columns_buffer.releaseColumns()));
|
writeBlock(header.cloneWithColumns(columns_buffer.releaseColumns()));
|
||||||
|
|
||||||
|
#ifndef NDEBUG
|
||||||
|
/// Offsets should be 0, because compressed block is written for every granule.
|
||||||
|
for (const auto & [_, stream] : streams_by_codec)
|
||||||
|
assert(stream->hashing_buf.offset() == 0);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (with_final_mark && data_written)
|
if (with_final_mark && data_written)
|
||||||
{
|
{
|
||||||
for (size_t i = 0; i < columns_list.size(); ++i)
|
for (size_t i = 0; i < columns_list.size(); ++i)
|
||||||
|
@ -89,7 +89,7 @@ IMergeTreeDataPart::Checksums checkDataPart(
|
|||||||
CompressedReadBuffer uncompressing_buf(compressed_hashing_buf);
|
CompressedReadBuffer uncompressing_buf(compressed_hashing_buf);
|
||||||
HashingReadBuffer uncompressed_hashing_buf(uncompressing_buf);
|
HashingReadBuffer uncompressed_hashing_buf(uncompressing_buf);
|
||||||
|
|
||||||
uncompressed_hashing_buf.tryIgnore(std::numeric_limits<size_t>::max());
|
uncompressed_hashing_buf.ignoreAll();
|
||||||
return IMergeTreeDataPart::Checksums::Checksum
|
return IMergeTreeDataPart::Checksums::Checksum
|
||||||
{
|
{
|
||||||
compressed_hashing_buf.count(), compressed_hashing_buf.getHash(),
|
compressed_hashing_buf.count(), compressed_hashing_buf.getHash(),
|
||||||
@ -102,7 +102,7 @@ IMergeTreeDataPart::Checksums checkDataPart(
|
|||||||
{
|
{
|
||||||
auto file_buf = disk_->readFile(file_path);
|
auto file_buf = disk_->readFile(file_path);
|
||||||
HashingReadBuffer hashing_buf(*file_buf);
|
HashingReadBuffer hashing_buf(*file_buf);
|
||||||
hashing_buf.tryIgnore(std::numeric_limits<size_t>::max());
|
hashing_buf.ignoreAll();
|
||||||
return IMergeTreeDataPart::Checksums::Checksum{hashing_buf.count(), hashing_buf.getHash()};
|
return IMergeTreeDataPart::Checksums::Checksum{hashing_buf.count(), hashing_buf.getHash()};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
12000 11890
|
12000 11890
|
||||||
499500 499500 999
|
499500 499500 999
|
||||||
|
499500 499500 999
|
||||||
11965 11890
|
11965 11890
|
||||||
499500 499500 999
|
499500 499500 999
|
||||||
|
499500 499500 999
|
||||||
5858 11890
|
5858 11890
|
||||||
499500 499500 999
|
499500 499500 999
|
||||||
|
499500 499500 999
|
||||||
|
@ -10,6 +10,11 @@ SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes)
|
|||||||
|
|
||||||
SELECT sum(id), sum(val), max(s) FROM codecs;
|
SELECT sum(id), sum(val), max(s) FROM codecs;
|
||||||
|
|
||||||
|
DETACH TABLE codecs;
|
||||||
|
ATTACH table codecs;
|
||||||
|
|
||||||
|
SELECT sum(id), sum(val), max(s) FROM codecs;
|
||||||
|
|
||||||
DROP TABLE codecs;
|
DROP TABLE codecs;
|
||||||
|
|
||||||
CREATE TABLE codecs (id UInt32 CODEC(NONE), val UInt32 CODEC(NONE), s String CODEC(NONE))
|
CREATE TABLE codecs (id UInt32 CODEC(NONE), val UInt32 CODEC(NONE), s String CODEC(NONE))
|
||||||
@ -22,6 +27,11 @@ SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes)
|
|||||||
|
|
||||||
SELECT sum(id), sum(val), max(s) FROM codecs;
|
SELECT sum(id), sum(val), max(s) FROM codecs;
|
||||||
|
|
||||||
|
DETACH TABLE codecs;
|
||||||
|
ATTACH table codecs;
|
||||||
|
|
||||||
|
SELECT sum(id), sum(val), max(s) FROM codecs;
|
||||||
|
|
||||||
DROP TABLE codecs;
|
DROP TABLE codecs;
|
||||||
|
|
||||||
CREATE TABLE codecs (id UInt32, val UInt32 CODEC(Delta, ZSTD), s String CODEC(ZSTD))
|
CREATE TABLE codecs (id UInt32, val UInt32 CODEC(Delta, ZSTD), s String CODEC(ZSTD))
|
||||||
@ -34,4 +44,9 @@ SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes)
|
|||||||
|
|
||||||
SELECT sum(id), sum(val), max(s) FROM codecs;
|
SELECT sum(id), sum(val), max(s) FROM codecs;
|
||||||
|
|
||||||
|
DETACH TABLE codecs;
|
||||||
|
ATTACH table codecs;
|
||||||
|
|
||||||
|
SELECT sum(id), sum(val), max(s) FROM codecs;
|
||||||
|
|
||||||
DROP TABLE codecs;
|
DROP TABLE codecs;
|
||||||
|
Loading…
Reference in New Issue
Block a user