2017-09-11 17:55:41 +00:00
|
|
|
#include <Storages/MergeTree/MergeTreePartition.h>
|
|
|
|
#include <Storages/MergeTree/MergeTreeData.h>
|
|
|
|
#include <Storages/MergeTree/MergeTreeDataPart.h>
|
|
|
|
#include <IO/ReadBufferFromFile.h>
|
|
|
|
#include <IO/HashingWriteBuffer.h>
|
2017-11-24 13:55:31 +00:00
|
|
|
#include <Common/FieldVisitors.h>
|
2017-09-11 17:55:41 +00:00
|
|
|
#include <DataTypes/DataTypeDate.h>
|
|
|
|
#include <Common/SipHash.h>
|
|
|
|
#include <Common/typeid_cast.h>
|
|
|
|
#include <Common/hex.h>
|
|
|
|
|
|
|
|
#include <Poco/File.h>
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
|
|
|
static ReadBufferFromFile openForReading(const String & path)
|
|
|
|
{
|
|
|
|
return ReadBufferFromFile(path, std::min(static_cast<Poco::File::FileSize>(DBMS_DEFAULT_BUFFER_SIZE), Poco::File(path).getSize()));
|
|
|
|
}
|
|
|
|
|
2017-09-13 16:22:04 +00:00
|
|
|
/// NOTE: This ID is used to create part names which are then persisted in ZK and as directory names on the file system.
|
|
|
|
/// So if you want to change this method, be sure to guarantee compatibility with existing table data.
|
2017-09-11 17:55:41 +00:00
|
|
|
String MergeTreePartition::getID(const MergeTreeData & storage) const
|
|
|
|
{
|
2018-02-21 17:05:21 +00:00
|
|
|
if (value.size() != storage.partition_key_sample.columns())
|
2017-09-11 17:55:41 +00:00
|
|
|
throw Exception("Invalid partition key size: " + toString(value.size()), ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
|
|
|
if (value.empty())
|
2017-09-13 16:22:04 +00:00
|
|
|
return "all"; /// It is tempting to use an empty string here. But that would break directory structure in ZK.
|
2017-09-11 17:55:41 +00:00
|
|
|
|
2017-09-13 16:22:04 +00:00
|
|
|
/// In case all partition fields are represented by integral types, try to produce a human-readable ID.
|
2017-09-11 17:55:41 +00:00
|
|
|
/// Otherwise use a hex-encoded hash.
|
|
|
|
bool are_all_integral = true;
|
|
|
|
for (const Field & field : value)
|
|
|
|
{
|
|
|
|
if (field.getType() != Field::Types::UInt64 && field.getType() != Field::Types::Int64)
|
|
|
|
{
|
|
|
|
are_all_integral = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
String result;
|
|
|
|
|
|
|
|
if (are_all_integral)
|
|
|
|
{
|
|
|
|
FieldVisitorToString to_string_visitor;
|
|
|
|
for (size_t i = 0; i < value.size(); ++i)
|
|
|
|
{
|
|
|
|
if (i > 0)
|
|
|
|
result += '-';
|
|
|
|
|
2018-02-21 17:05:21 +00:00
|
|
|
if (typeid_cast<const DataTypeDate *>(storage.partition_key_sample.getByPosition(i).type.get()))
|
2018-05-25 13:29:15 +00:00
|
|
|
result += toString(DateLUT::instance().toNumYYYYMMDD(DayNum(value[i].safeGet<UInt64>())));
|
2017-09-11 17:55:41 +00:00
|
|
|
else
|
|
|
|
result += applyVisitor(to_string_visitor, value[i]);
|
2017-09-13 16:22:04 +00:00
|
|
|
|
|
|
|
/// It is tempting to output DateTime as YYYYMMDDhhmmss, but that would make partition ID
|
|
|
|
/// timezone-dependent.
|
2017-09-11 17:55:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
SipHash hash;
|
|
|
|
FieldVisitorHash hashing_visitor(hash);
|
|
|
|
for (const Field & field : value)
|
|
|
|
applyVisitor(hashing_visitor, field);
|
|
|
|
|
|
|
|
char hash_data[16];
|
|
|
|
hash.get128(hash_data);
|
|
|
|
result.resize(32);
|
|
|
|
for (size_t i = 0; i < 16; ++i)
|
|
|
|
writeHexByteLowercase(hash_data[i], &result[2 * i]);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-06-08 01:51:55 +00:00
|
|
|
void MergeTreePartition::serializeTextQuoted(const MergeTreeData & storage, WriteBuffer & out, const FormatSettings & format_settings) const
|
2017-09-11 17:55:41 +00:00
|
|
|
{
|
2018-02-21 17:05:21 +00:00
|
|
|
size_t key_size = storage.partition_key_sample.columns();
|
2017-09-11 17:55:41 +00:00
|
|
|
|
|
|
|
if (key_size == 0)
|
|
|
|
{
|
|
|
|
writeCString("tuple()", out);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (key_size > 1)
|
|
|
|
writeChar('(', out);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < key_size; ++i)
|
|
|
|
{
|
|
|
|
if (i > 0)
|
|
|
|
writeCString(", ", out);
|
|
|
|
|
2018-02-21 17:05:21 +00:00
|
|
|
const DataTypePtr & type = storage.partition_key_sample.getByPosition(i).type;
|
2017-12-15 20:48:46 +00:00
|
|
|
auto column = type->createColumn();
|
2017-09-11 17:55:41 +00:00
|
|
|
column->insert(value[i]);
|
2018-06-08 01:51:55 +00:00
|
|
|
type->serializeTextQuoted(*column, 0, out, format_settings);
|
2017-09-11 17:55:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (key_size > 1)
|
|
|
|
writeChar(')', out);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MergeTreePartition::load(const MergeTreeData & storage, const String & part_path)
|
|
|
|
{
|
|
|
|
if (!storage.partition_expr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ReadBufferFromFile file = openForReading(part_path + "partition.dat");
|
2018-02-21 17:05:21 +00:00
|
|
|
value.resize(storage.partition_key_sample.columns());
|
|
|
|
for (size_t i = 0; i < storage.partition_key_sample.columns(); ++i)
|
|
|
|
storage.partition_key_sample.getByPosition(i).type->deserializeBinary(value[i], file);
|
2017-09-11 17:55:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MergeTreePartition::store(const MergeTreeData & storage, const String & part_path, MergeTreeDataPartChecksums & checksums) const
|
|
|
|
{
|
|
|
|
if (!storage.partition_expr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
WriteBufferFromFile out(part_path + "partition.dat");
|
|
|
|
HashingWriteBuffer out_hashing(out);
|
|
|
|
for (size_t i = 0; i < value.size(); ++i)
|
2018-02-21 17:05:21 +00:00
|
|
|
storage.partition_key_sample.getByPosition(i).type->serializeBinary(value[i], out_hashing);
|
2017-10-24 14:11:53 +00:00
|
|
|
out_hashing.next();
|
2017-09-11 17:55:41 +00:00
|
|
|
checksums.files["partition.dat"].file_size = out_hashing.count();
|
|
|
|
checksums.files["partition.dat"].file_hash = out_hashing.getHash();
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|