2014-07-22 08:21:16 +00:00
|
|
|
|
#include <DB/Storages/MergeTree/MergeTreePartChecker.h>
|
2015-12-28 14:24:31 +00:00
|
|
|
|
#include <DB/DataStreams/MarkInCompressedFile.h>
|
2014-07-22 08:21:16 +00:00
|
|
|
|
#include <DB/DataTypes/DataTypeString.h>
|
|
|
|
|
#include <DB/DataTypes/DataTypeDate.h>
|
|
|
|
|
#include <DB/DataTypes/DataTypeDateTime.h>
|
|
|
|
|
#include <DB/DataTypes/DataTypesNumberFixed.h>
|
|
|
|
|
#include <DB/DataTypes/DataTypeFixedString.h>
|
2014-08-13 08:53:38 +00:00
|
|
|
|
#include <DB/DataTypes/DataTypeAggregateFunction.h>
|
2014-10-16 13:37:01 +00:00
|
|
|
|
#include <DB/DataTypes/DataTypeArray.h>
|
2016-08-18 17:08:05 +00:00
|
|
|
|
#include <DB/DataTypes/DataTypeNullable.h>
|
2014-10-16 13:37:01 +00:00
|
|
|
|
#include <DB/DataTypes/DataTypeNested.h>
|
2014-07-22 08:21:16 +00:00
|
|
|
|
#include <DB/IO/CompressedReadBuffer.h>
|
|
|
|
|
#include <DB/IO/HashingReadBuffer.h>
|
|
|
|
|
#include <DB/Columns/ColumnsNumber.h>
|
2016-01-21 01:47:28 +00:00
|
|
|
|
#include <DB/Common/CurrentMetrics.h>
|
2016-02-14 04:58:47 +00:00
|
|
|
|
#include <DB/Common/escapeForFileName.h>
|
2017-01-21 04:24:28 +00:00
|
|
|
|
#include <Poco/File.h>
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
|
|
|
|
|
2016-10-24 04:06:27 +00:00
|
|
|
|
namespace CurrentMetrics
|
|
|
|
|
{
|
|
|
|
|
extern const Metric ReplicatedChecks;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-22 08:21:16 +00:00
|
|
|
|
namespace DB
|
|
|
|
|
{
|
|
|
|
|
|
2016-01-11 21:46:36 +00:00
|
|
|
|
namespace ErrorCodes
|
|
|
|
|
{
|
|
|
|
|
extern const int CORRUPTED_DATA;
|
|
|
|
|
extern const int INCORRECT_MARK;
|
|
|
|
|
extern const int EMPTY_LIST_OF_COLUMNS_PASSED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-03-31 00:09:12 +00:00
|
|
|
|
namespace
|
|
|
|
|
{
|
|
|
|
|
|
2016-07-24 19:32:21 +00:00
|
|
|
|
constexpr auto DATA_FILE_EXTENSION = ".bin";
|
2017-01-04 04:15:38 +00:00
|
|
|
|
constexpr auto NULL_MAP_FILE_EXTENSION = ".null.bin";
|
2016-07-24 19:32:21 +00:00
|
|
|
|
constexpr auto MARKS_FILE_EXTENSION = ".mrk";
|
2017-01-04 04:15:38 +00:00
|
|
|
|
constexpr auto NULL_MARKS_FILE_EXTENSION = ".null.mrk";
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
2016-07-24 19:32:21 +00:00
|
|
|
|
struct Stream
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
Stream(const String & path, const String & name, const DataTypePtr & type,
|
|
|
|
|
const std::string & extension_, const std::string & mrk_extension_)
|
|
|
|
|
: path(path), name(name), type(type),
|
|
|
|
|
extension{extension_}, mrk_extension{mrk_extension_},
|
|
|
|
|
file_buf(path + name + extension), compressed_hashing_buf(file_buf),
|
|
|
|
|
uncompressing_buf(compressed_hashing_buf),
|
|
|
|
|
uncompressed_hashing_buf(uncompressing_buf),
|
|
|
|
|
mrk_file_buf(path + name + mrk_extension),
|
|
|
|
|
mrk_hashing_buf(mrk_file_buf)
|
2016-03-31 00:09:12 +00:00
|
|
|
|
{
|
2016-05-04 19:01:26 +00:00
|
|
|
|
/// Stream создаётся для типа - внутренностей массива. Случай, когда внутренность массива - массив - не поддерживается.
|
|
|
|
|
if (typeid_cast<const DataTypeArray *>(type.get()))
|
|
|
|
|
throw Exception("Multidimensional arrays are not supported", ErrorCodes::NOT_IMPLEMENTED);
|
2016-03-31 00:09:12 +00:00
|
|
|
|
}
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
2014-07-22 08:50:08 +00:00
|
|
|
|
bool marksEOF()
|
2014-07-22 08:21:16 +00:00
|
|
|
|
{
|
2014-07-22 08:50:08 +00:00
|
|
|
|
return mrk_hashing_buf.eof();
|
2014-07-22 08:21:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-13 08:53:38 +00:00
|
|
|
|
void ignore()
|
|
|
|
|
{
|
|
|
|
|
uncompressed_hashing_buf.ignore(std::numeric_limits<size_t>::max());
|
|
|
|
|
mrk_hashing_buf.ignore(std::numeric_limits<size_t>::max());
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-22 08:21:16 +00:00
|
|
|
|
size_t read(size_t rows)
|
|
|
|
|
{
|
2016-05-04 19:01:26 +00:00
|
|
|
|
ColumnPtr column = type->createColumn();
|
2017-01-02 22:47:28 +00:00
|
|
|
|
type->deserializeBinaryBulk(*column, uncompressed_hashing_buf, rows, 0);
|
2016-05-04 19:01:26 +00:00
|
|
|
|
return column->size();
|
2014-07-22 08:21:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t readUInt64(size_t rows, ColumnUInt64::Container_t & data)
|
|
|
|
|
{
|
|
|
|
|
if (data.size() < rows)
|
|
|
|
|
data.resize(rows);
|
|
|
|
|
size_t size = uncompressed_hashing_buf.readBig(reinterpret_cast<char *>(&data[0]), sizeof(UInt64) * rows);
|
|
|
|
|
if (size % sizeof(UInt64))
|
|
|
|
|
throw Exception("Read " + toString(size) + " bytes, which is not divisible by " + toString(sizeof(UInt64)),
|
2016-05-04 19:01:26 +00:00
|
|
|
|
ErrorCodes::CORRUPTED_DATA);
|
2014-07-22 08:21:16 +00:00
|
|
|
|
return size / sizeof(UInt64);
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-08 08:28:13 +00:00
|
|
|
|
void assertMark()
|
2014-07-22 08:21:16 +00:00
|
|
|
|
{
|
|
|
|
|
MarkInCompressedFile mrk_mark;
|
|
|
|
|
readIntBinary(mrk_mark.offset_in_compressed_file, mrk_hashing_buf);
|
|
|
|
|
readIntBinary(mrk_mark.offset_in_decompressed_block, mrk_hashing_buf);
|
|
|
|
|
|
2014-08-06 09:25:38 +00:00
|
|
|
|
bool has_alternative_mark = false;
|
|
|
|
|
MarkInCompressedFile alternative_data_mark;
|
2014-07-22 08:21:16 +00:00
|
|
|
|
MarkInCompressedFile data_mark;
|
|
|
|
|
|
2014-08-06 09:25:38 +00:00
|
|
|
|
/// Если засечка должна быть ровно на границе блоков, нам подходит и засечка, указывающая на конец предыдущего блока,
|
|
|
|
|
/// и на начало следующего.
|
2015-02-07 23:13:04 +00:00
|
|
|
|
if (!uncompressed_hashing_buf.hasPendingData())
|
2014-07-22 08:50:08 +00:00
|
|
|
|
{
|
2014-08-06 09:25:38 +00:00
|
|
|
|
/// Получим засечку, указывающую на конец предыдущего блока.
|
|
|
|
|
has_alternative_mark = true;
|
|
|
|
|
alternative_data_mark.offset_in_compressed_file = compressed_hashing_buf.count() - uncompressing_buf.getSizeCompressed();
|
|
|
|
|
alternative_data_mark.offset_in_decompressed_block = uncompressed_hashing_buf.offset();
|
2014-07-22 08:50:08 +00:00
|
|
|
|
|
2014-08-06 09:25:38 +00:00
|
|
|
|
if (mrk_mark == alternative_data_mark)
|
2014-07-23 09:50:29 +00:00
|
|
|
|
return;
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
2014-07-22 10:35:24 +00:00
|
|
|
|
uncompressed_hashing_buf.next();
|
2014-08-06 09:25:38 +00:00
|
|
|
|
|
|
|
|
|
/// В конце файла compressed_hashing_buf.count() указывает на конец файла даже до вызова next(),
|
|
|
|
|
/// и только что выполненная проверка работает неправильно. Для простоты не будем проверять последнюю засечку.
|
|
|
|
|
if (uncompressed_hashing_buf.eof())
|
|
|
|
|
return;
|
2014-07-22 10:35:24 +00:00
|
|
|
|
}
|
2015-06-22 15:45:30 +00:00
|
|
|
|
|
2014-07-22 10:35:24 +00:00
|
|
|
|
data_mark.offset_in_compressed_file = compressed_hashing_buf.count() - uncompressing_buf.getSizeCompressed();
|
2014-07-22 08:21:16 +00:00
|
|
|
|
data_mark.offset_in_decompressed_block = uncompressed_hashing_buf.offset();
|
|
|
|
|
|
|
|
|
|
if (mrk_mark != data_mark)
|
2014-08-06 09:25:38 +00:00
|
|
|
|
throw Exception("Incorrect mark: " + data_mark.toString() +
|
|
|
|
|
(has_alternative_mark ? " or " + alternative_data_mark.toString() : "") + " in data, " +
|
2016-07-24 19:32:21 +00:00
|
|
|
|
mrk_mark.toString() + " in " + mrk_extension + " file", ErrorCodes::INCORRECT_MARK);
|
2014-07-22 08:21:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void assertEnd(MergeTreeData::DataPart::Checksums & checksums)
|
|
|
|
|
{
|
|
|
|
|
if (!uncompressed_hashing_buf.eof())
|
|
|
|
|
throw Exception("EOF expected in column data", ErrorCodes::CORRUPTED_DATA);
|
|
|
|
|
if (!mrk_hashing_buf.eof())
|
|
|
|
|
throw Exception("EOF expected in .mrk file", ErrorCodes::CORRUPTED_DATA);
|
|
|
|
|
|
2016-07-24 19:32:21 +00:00
|
|
|
|
checksums.files[name + extension] = MergeTreeData::DataPart::Checksums::Checksum(
|
2014-07-22 08:21:16 +00:00
|
|
|
|
compressed_hashing_buf.count(), compressed_hashing_buf.getHash(),
|
|
|
|
|
uncompressed_hashing_buf.count(), uncompressed_hashing_buf.getHash());
|
2016-07-24 19:32:21 +00:00
|
|
|
|
checksums.files[name + mrk_extension] = MergeTreeData::DataPart::Checksums::Checksum(
|
2014-07-22 08:21:16 +00:00
|
|
|
|
mrk_hashing_buf.count(), mrk_hashing_buf.getHash());
|
|
|
|
|
}
|
2016-07-24 19:32:21 +00:00
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
String path;
|
|
|
|
|
String name;
|
|
|
|
|
DataTypePtr type;
|
|
|
|
|
std::string extension;
|
|
|
|
|
std::string mrk_extension;
|
|
|
|
|
|
|
|
|
|
ReadBufferFromFile file_buf;
|
|
|
|
|
HashingReadBuffer compressed_hashing_buf;
|
|
|
|
|
CompressedReadBuffer uncompressing_buf;
|
|
|
|
|
HashingReadBuffer uncompressed_hashing_buf;
|
|
|
|
|
|
|
|
|
|
ReadBufferFromFile mrk_file_buf;
|
|
|
|
|
HashingReadBuffer mrk_hashing_buf;
|
2014-07-22 08:21:16 +00:00
|
|
|
|
};
|
|
|
|
|
|
2016-08-19 12:01:10 +00:00
|
|
|
|
/// Updates the checksum value for the null map information of the
|
|
|
|
|
/// specified column. Returns the number of read rows.
|
|
|
|
|
size_t checkNullMap(const String & path,
|
|
|
|
|
const String & name,
|
|
|
|
|
const MergeTreePartChecker::Settings & settings,
|
|
|
|
|
MergeTreeData::DataPart::Checksums & checksums,
|
|
|
|
|
std::atomic<bool> * is_cancelled)
|
|
|
|
|
{
|
|
|
|
|
size_t rows = 0;
|
|
|
|
|
|
|
|
|
|
DataTypePtr type = std::make_shared<DataTypeUInt8>();
|
|
|
|
|
Stream data_stream(path, escapeForFileName(name), type,
|
|
|
|
|
NULL_MAP_FILE_EXTENSION, NULL_MARKS_FILE_EXTENSION);
|
|
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
|
{
|
|
|
|
|
if (is_cancelled && *is_cancelled)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (data_stream.marksEOF())
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
data_stream.assertMark();
|
|
|
|
|
|
|
|
|
|
size_t cur_rows = data_stream.read(settings.index_granularity);
|
|
|
|
|
|
|
|
|
|
rows += cur_rows;
|
|
|
|
|
if (cur_rows < settings.index_granularity)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data_stream.assertEnd(checksums);
|
|
|
|
|
|
|
|
|
|
return rows;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Updates the checksum value for the specified column.
|
|
|
|
|
/// Returns the number of read rows.
|
2016-07-24 19:32:21 +00:00
|
|
|
|
size_t checkColumn(
|
2016-03-31 00:09:12 +00:00
|
|
|
|
const String & path,
|
|
|
|
|
const String & name,
|
|
|
|
|
DataTypePtr type,
|
|
|
|
|
const MergeTreePartChecker::Settings & settings,
|
|
|
|
|
MergeTreeData::DataPart::Checksums & checksums,
|
2016-07-31 03:53:16 +00:00
|
|
|
|
std::atomic<bool> * is_cancelled)
|
2014-07-22 08:21:16 +00:00
|
|
|
|
{
|
|
|
|
|
size_t rows = 0;
|
|
|
|
|
|
|
|
|
|
try
|
|
|
|
|
{
|
2016-08-19 12:01:10 +00:00
|
|
|
|
if (auto array = typeid_cast<const DataTypeArray *>(type.get()))
|
2014-07-22 08:21:16 +00:00
|
|
|
|
{
|
2014-07-22 08:50:08 +00:00
|
|
|
|
String sizes_name = DataTypeNested::extractNestedTableName(name);
|
2016-07-24 19:32:21 +00:00
|
|
|
|
Stream sizes_stream(path, escapeForFileName(sizes_name) + ".size0", std::make_shared<DataTypeUInt64>(),
|
|
|
|
|
DATA_FILE_EXTENSION, MARKS_FILE_EXTENSION);
|
|
|
|
|
Stream data_stream(path, escapeForFileName(name), array->getNestedType(),
|
|
|
|
|
DATA_FILE_EXTENSION, MARKS_FILE_EXTENSION);
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
|
|
|
|
ColumnUInt64::Container_t sizes;
|
|
|
|
|
while (true)
|
|
|
|
|
{
|
2016-03-31 00:09:12 +00:00
|
|
|
|
if (is_cancelled && *is_cancelled)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2014-07-22 08:50:08 +00:00
|
|
|
|
if (sizes_stream.marksEOF())
|
2014-07-22 08:21:16 +00:00
|
|
|
|
break;
|
|
|
|
|
|
2014-08-08 08:28:13 +00:00
|
|
|
|
sizes_stream.assertMark();
|
|
|
|
|
data_stream.assertMark();
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
2014-08-08 08:28:13 +00:00
|
|
|
|
size_t cur_rows = sizes_stream.readUInt64(settings.index_granularity, sizes);
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
|
|
|
|
size_t sum = 0;
|
|
|
|
|
for (size_t i = 0; i < cur_rows; ++i)
|
|
|
|
|
{
|
|
|
|
|
size_t new_sum = sum + sizes[i];
|
|
|
|
|
if (sizes[i] > (1ul << 31) || new_sum < sum)
|
|
|
|
|
throw Exception("Array size " + toString(sizes[i]) + " is too long.", ErrorCodes::CORRUPTED_DATA);
|
|
|
|
|
sum = new_sum;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data_stream.read(sum);
|
|
|
|
|
|
|
|
|
|
rows += cur_rows;
|
2014-08-08 08:28:13 +00:00
|
|
|
|
if (cur_rows < settings.index_granularity)
|
2014-07-22 08:21:16 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sizes_stream.assertEnd(checksums);
|
|
|
|
|
data_stream.assertEnd(checksums);
|
|
|
|
|
|
|
|
|
|
return rows;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
2016-07-24 19:32:21 +00:00
|
|
|
|
Stream data_stream(path, escapeForFileName(name), type,
|
|
|
|
|
DATA_FILE_EXTENSION, MARKS_FILE_EXTENSION);
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
|
{
|
2016-03-31 00:09:12 +00:00
|
|
|
|
if (is_cancelled && *is_cancelled)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2014-07-22 08:50:08 +00:00
|
|
|
|
if (data_stream.marksEOF())
|
2014-07-22 08:21:16 +00:00
|
|
|
|
break;
|
|
|
|
|
|
2014-08-08 08:28:13 +00:00
|
|
|
|
data_stream.assertMark();
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
2014-08-08 08:28:13 +00:00
|
|
|
|
size_t cur_rows = data_stream.read(settings.index_granularity);
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
2016-05-04 19:01:26 +00:00
|
|
|
|
rows += cur_rows;
|
2014-08-08 08:28:13 +00:00
|
|
|
|
if (cur_rows < settings.index_granularity)
|
2014-07-22 08:21:16 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data_stream.assertEnd(checksums);
|
|
|
|
|
|
|
|
|
|
return rows;
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-05-04 19:01:26 +00:00
|
|
|
|
catch (Exception & e)
|
2014-07-22 08:21:16 +00:00
|
|
|
|
{
|
|
|
|
|
e.addMessage(" (column: " + path + name + ", last mark at " + toString(rows) + " rows)");
|
|
|
|
|
throw;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-31 00:09:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2015-05-28 04:32:38 +00:00
|
|
|
|
void MergeTreePartChecker::checkDataPart(
|
|
|
|
|
String path,
|
|
|
|
|
const Settings & settings,
|
2015-11-29 08:06:29 +00:00
|
|
|
|
const DataTypes & primary_key_data_types,
|
2016-03-31 00:09:12 +00:00
|
|
|
|
MergeTreeData::DataPart::Checksums * out_checksums,
|
2016-07-31 03:53:16 +00:00
|
|
|
|
std::atomic<bool> * is_cancelled)
|
2014-07-22 08:21:16 +00:00
|
|
|
|
{
|
2016-01-21 01:47:28 +00:00
|
|
|
|
CurrentMetrics::Increment metric_increment{CurrentMetrics::ReplicatedChecks};
|
|
|
|
|
|
2014-10-09 23:14:06 +00:00
|
|
|
|
if (!path.empty() && path.back() != '/')
|
2014-07-22 08:21:16 +00:00
|
|
|
|
path += "/";
|
|
|
|
|
|
|
|
|
|
NamesAndTypesList columns;
|
2014-11-30 05:11:04 +00:00
|
|
|
|
|
|
|
|
|
/// Чексуммы из файла checksums.txt. Могут отсутствовать. Если присутствуют - впоследствии сравниваются с реальными чексуммами данных.
|
2014-07-22 08:21:16 +00:00
|
|
|
|
MergeTreeData::DataPart::Checksums checksums_txt;
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
ReadBufferFromFile buf(path + "columns.txt");
|
2015-05-28 03:49:28 +00:00
|
|
|
|
columns.readText(buf);
|
2014-07-22 08:21:16 +00:00
|
|
|
|
assertEOF(buf);
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-08 08:28:13 +00:00
|
|
|
|
if (settings.require_checksums || Poco::File(path + "checksums.txt").exists())
|
2014-07-22 08:21:16 +00:00
|
|
|
|
{
|
|
|
|
|
ReadBufferFromFile buf(path + "checksums.txt");
|
2014-12-17 18:37:23 +00:00
|
|
|
|
checksums_txt.read(buf);
|
2014-07-22 08:21:16 +00:00
|
|
|
|
assertEOF(buf);
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-30 05:11:04 +00:00
|
|
|
|
/// Реальные чексуммы по содержимому данных. Их несоответствие checksums_txt будет говорить о битых данных.
|
2014-07-22 08:21:16 +00:00
|
|
|
|
MergeTreeData::DataPart::Checksums checksums_data;
|
2014-07-22 10:35:24 +00:00
|
|
|
|
|
2015-05-28 04:32:38 +00:00
|
|
|
|
size_t marks_in_primary_key = 0;
|
2014-07-22 10:35:24 +00:00
|
|
|
|
{
|
|
|
|
|
ReadBufferFromFile file_buf(path + "primary.idx");
|
|
|
|
|
HashingReadBuffer hashing_buf(file_buf);
|
2015-05-28 04:32:38 +00:00
|
|
|
|
|
2015-11-29 08:06:29 +00:00
|
|
|
|
if (!primary_key_data_types.empty())
|
2015-05-28 04:32:38 +00:00
|
|
|
|
{
|
2015-11-29 08:06:29 +00:00
|
|
|
|
size_t key_size = primary_key_data_types.size();
|
2016-02-16 16:39:39 +00:00
|
|
|
|
Columns tmp_columns(key_size);
|
|
|
|
|
|
|
|
|
|
for (size_t j = 0; j < key_size; ++j)
|
|
|
|
|
tmp_columns[j] = primary_key_data_types[j].get()->createColumn();
|
|
|
|
|
|
2015-05-28 04:32:38 +00:00
|
|
|
|
while (!hashing_buf.eof())
|
|
|
|
|
{
|
2016-03-31 00:09:12 +00:00
|
|
|
|
if (is_cancelled && *is_cancelled)
|
|
|
|
|
return;
|
|
|
|
|
|
2015-05-28 04:32:38 +00:00
|
|
|
|
++marks_in_primary_key;
|
|
|
|
|
for (size_t j = 0; j < key_size; ++j)
|
2016-02-16 16:39:39 +00:00
|
|
|
|
primary_key_data_types[j].get()->deserializeBinary(*tmp_columns[j].get(), hashing_buf);
|
2015-05-28 04:32:38 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
hashing_buf.tryIgnore(std::numeric_limits<size_t>::max());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t primary_idx_size = hashing_buf.count();
|
|
|
|
|
|
2014-07-22 10:35:24 +00:00
|
|
|
|
checksums_data.files["primary.idx"] = MergeTreeData::DataPart::Checksums::Checksum(primary_idx_size, hashing_buf.getHash());
|
|
|
|
|
}
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
2016-03-31 00:09:12 +00:00
|
|
|
|
if (is_cancelled && *is_cancelled)
|
|
|
|
|
return;
|
|
|
|
|
|
2014-08-13 08:53:38 +00:00
|
|
|
|
String any_column_name;
|
2016-05-04 19:01:26 +00:00
|
|
|
|
|
|
|
|
|
static constexpr size_t UNKNOWN = std::numeric_limits<size_t>::max();
|
|
|
|
|
|
|
|
|
|
size_t rows = UNKNOWN;
|
2015-10-05 05:40:27 +00:00
|
|
|
|
std::exception_ptr first_exception;
|
2014-07-22 10:35:24 +00:00
|
|
|
|
|
2016-08-19 12:01:10 +00:00
|
|
|
|
/// Verify that the number of rows is consistent between all the columns.
|
|
|
|
|
auto check_row_count = [&rows, &any_column_name](size_t cur_rows, const std::string & col_name)
|
|
|
|
|
{
|
|
|
|
|
if (rows == UNKNOWN)
|
|
|
|
|
{
|
|
|
|
|
rows = cur_rows;
|
|
|
|
|
any_column_name = col_name;
|
|
|
|
|
}
|
|
|
|
|
else if (rows != cur_rows)
|
|
|
|
|
throw Exception{"Different number of rows in columns " + any_column_name + " and " + col_name,
|
|
|
|
|
ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH};
|
|
|
|
|
};
|
|
|
|
|
|
2014-07-22 08:21:16 +00:00
|
|
|
|
for (const NameAndTypePair & column : columns)
|
|
|
|
|
{
|
2014-08-08 08:28:13 +00:00
|
|
|
|
if (settings.verbose)
|
2014-07-22 10:35:24 +00:00
|
|
|
|
{
|
|
|
|
|
std::cerr << column.name << ":";
|
|
|
|
|
std::cerr.flush();
|
|
|
|
|
}
|
2014-07-22 08:50:08 +00:00
|
|
|
|
|
2014-07-22 10:35:24 +00:00
|
|
|
|
bool ok = false;
|
|
|
|
|
try
|
2014-07-22 08:21:16 +00:00
|
|
|
|
{
|
2016-12-12 07:25:31 +00:00
|
|
|
|
if (!settings.require_column_files && !Poco::File(path + escapeForFileName(column.name) + DATA_FILE_EXTENSION).exists())
|
2014-07-22 10:35:24 +00:00
|
|
|
|
{
|
2014-08-08 08:28:13 +00:00
|
|
|
|
if (settings.verbose)
|
2014-07-22 10:35:24 +00:00
|
|
|
|
std::cerr << " no files" << std::endl;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-19 12:01:10 +00:00
|
|
|
|
const DataTypePtr * observed_type;
|
|
|
|
|
|
|
|
|
|
/// If the current column is nullable, first we process its null map and the
|
|
|
|
|
/// corresponding marks.
|
|
|
|
|
if (column.type->isNullable())
|
|
|
|
|
{
|
|
|
|
|
const auto & nullable_type = static_cast<const DataTypeNullable &>(column.type);
|
|
|
|
|
observed_type = &nullable_type.getNestedType();
|
|
|
|
|
|
|
|
|
|
size_t cur_rows = checkNullMap(path, column.name, settings, checksums_data, is_cancelled);
|
|
|
|
|
|
|
|
|
|
if (is_cancelled && *is_cancelled)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
check_row_count(cur_rows, column.name);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
observed_type = &column.type;
|
|
|
|
|
|
|
|
|
|
/// Update the checksum from the data of the column.
|
|
|
|
|
size_t cur_rows = checkColumn(path, column.name, *observed_type, settings, checksums_data, is_cancelled);
|
2016-03-31 00:09:12 +00:00
|
|
|
|
|
|
|
|
|
if (is_cancelled && *is_cancelled)
|
|
|
|
|
return;
|
|
|
|
|
|
2016-08-19 12:01:10 +00:00
|
|
|
|
check_row_count(cur_rows, column.name);
|
2014-07-22 10:35:24 +00:00
|
|
|
|
|
|
|
|
|
ok = true;
|
2014-07-22 08:21:16 +00:00
|
|
|
|
}
|
2014-07-22 10:35:24 +00:00
|
|
|
|
catch (...)
|
2014-07-22 08:21:16 +00:00
|
|
|
|
{
|
2014-08-08 08:28:13 +00:00
|
|
|
|
if (!settings.verbose)
|
2014-07-22 10:35:24 +00:00
|
|
|
|
throw;
|
2015-10-05 05:40:27 +00:00
|
|
|
|
|
|
|
|
|
std::exception_ptr e = std::current_exception();
|
2014-07-22 10:35:24 +00:00
|
|
|
|
if (!first_exception)
|
|
|
|
|
first_exception = e;
|
|
|
|
|
|
2015-10-05 05:40:27 +00:00
|
|
|
|
std::cerr << getCurrentExceptionMessage(true) << std::endl;
|
2014-07-22 08:21:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-08 08:28:13 +00:00
|
|
|
|
if (settings.verbose && ok)
|
2014-07-22 10:35:24 +00:00
|
|
|
|
std::cerr << " ok" << std::endl;
|
2014-07-22 08:21:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-04 19:01:26 +00:00
|
|
|
|
if (rows == UNKNOWN)
|
2014-07-22 08:50:08 +00:00
|
|
|
|
throw Exception("No columns", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED);
|
|
|
|
|
|
2015-11-29 08:06:29 +00:00
|
|
|
|
if (!primary_key_data_types.empty())
|
2015-05-28 04:32:38 +00:00
|
|
|
|
{
|
|
|
|
|
const size_t expected_marks = (rows - 1) / settings.index_granularity + 1;
|
|
|
|
|
if (expected_marks != marks_in_primary_key)
|
|
|
|
|
throw Exception("Size of primary key doesn't match expected number of marks."
|
|
|
|
|
" Number of rows in columns: " + toString(rows)
|
|
|
|
|
+ ", index_granularity: " + toString(settings.index_granularity)
|
|
|
|
|
+ ", expected number of marks: " + toString(expected_marks)
|
|
|
|
|
+ ", size of primary key: " + toString(marks_in_primary_key),
|
|
|
|
|
ErrorCodes::CORRUPTED_DATA);
|
|
|
|
|
}
|
2014-07-22 10:35:24 +00:00
|
|
|
|
|
2014-08-08 08:28:13 +00:00
|
|
|
|
if (settings.require_checksums || !checksums_txt.files.empty())
|
2014-07-22 10:35:24 +00:00
|
|
|
|
checksums_txt.checkEqual(checksums_data, true);
|
|
|
|
|
|
|
|
|
|
if (first_exception)
|
2015-10-05 05:40:27 +00:00
|
|
|
|
std::rethrow_exception(first_exception);
|
2014-11-30 05:11:04 +00:00
|
|
|
|
|
|
|
|
|
if (out_checksums)
|
2014-11-30 05:43:04 +00:00
|
|
|
|
*out_checksums = checksums_data;
|
2014-07-22 08:21:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|