2017-12-03 00:46:34 +00:00
|
|
|
#include <optional>
|
|
|
|
|
|
|
|
#include <Poco/File.h>
|
2017-12-03 03:43:48 +00:00
|
|
|
#include <Poco/DirectoryIterator.h>
|
2017-12-03 00:46:34 +00:00
|
|
|
|
2017-12-03 00:48:19 +00:00
|
|
|
#include <Storages/MergeTree/checkDataPart.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <DataStreams/MarkInCompressedFile.h>
|
|
|
|
#include <IO/CompressedReadBuffer.h>
|
|
|
|
#include <IO/HashingReadBuffer.h>
|
|
|
|
#include <Common/CurrentMetrics.h>
|
2014-07-22 08:21:16 +00:00
|
|
|
|
|
|
|
|
2016-10-24 04:06:27 +00:00
|
|
|
namespace CurrentMetrics
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
extern const Metric ReplicatedChecks;
|
2016-10-24 04:06:27 +00:00
|
|
|
}
|
|
|
|
|
2014-07-22 08:21:16 +00:00
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2016-01-11 21:46:36 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
extern const int CORRUPTED_DATA;
|
2018-11-22 21:19:58 +00:00
|
|
|
extern const int LOGICAL_ERROR;
|
2017-04-01 07:20:54 +00:00
|
|
|
extern const int INCORRECT_MARK;
|
|
|
|
extern const int EMPTY_LIST_OF_COLUMNS_PASSED;
|
2016-01-11 21:46:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-03-31 00:09:12 +00:00
|
|
|
namespace
|
|
|
|
{
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
/** To read and checksum single stream (a pair of .bin, .mrk files) for a single column.
|
|
|
|
*/
|
2017-12-03 04:57:33 +00:00
|
|
|
class Stream
|
2016-07-24 19:32:21 +00:00
|
|
|
{
|
2017-12-03 04:57:33 +00:00
|
|
|
public:
|
2017-12-03 00:46:34 +00:00
|
|
|
String base_name;
|
2017-12-03 04:19:46 +00:00
|
|
|
String bin_file_path;
|
|
|
|
String mrk_file_path;
|
2017-12-03 04:57:33 +00:00
|
|
|
private:
|
2017-12-03 00:46:34 +00:00
|
|
|
ReadBufferFromFile file_buf;
|
|
|
|
HashingReadBuffer compressed_hashing_buf;
|
|
|
|
CompressedReadBuffer uncompressing_buf;
|
2017-12-03 04:57:33 +00:00
|
|
|
public:
|
2017-12-03 00:46:34 +00:00
|
|
|
HashingReadBuffer uncompressed_hashing_buf;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-12-03 04:57:33 +00:00
|
|
|
private:
|
2017-12-03 00:46:34 +00:00
|
|
|
ReadBufferFromFile mrk_file_buf;
|
2017-12-03 04:57:33 +00:00
|
|
|
public:
|
2017-12-03 00:46:34 +00:00
|
|
|
HashingReadBuffer mrk_hashing_buf;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
Stream(const String & path, const String & base_name)
|
|
|
|
:
|
|
|
|
base_name(base_name),
|
2017-12-03 04:19:46 +00:00
|
|
|
bin_file_path(path + base_name + ".bin"),
|
|
|
|
mrk_file_path(path + base_name + ".mrk"),
|
|
|
|
file_buf(bin_file_path),
|
2017-12-03 00:46:34 +00:00
|
|
|
compressed_hashing_buf(file_buf),
|
|
|
|
uncompressing_buf(compressed_hashing_buf),
|
|
|
|
uncompressed_hashing_buf(uncompressing_buf),
|
2017-12-03 04:19:46 +00:00
|
|
|
mrk_file_buf(mrk_file_path),
|
2017-12-03 00:46:34 +00:00
|
|
|
mrk_hashing_buf(mrk_file_buf)
|
|
|
|
{}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
void assertMark()
|
|
|
|
{
|
|
|
|
MarkInCompressedFile mrk_mark;
|
|
|
|
readIntBinary(mrk_mark.offset_in_compressed_file, mrk_hashing_buf);
|
|
|
|
readIntBinary(mrk_mark.offset_in_decompressed_block, mrk_hashing_buf);
|
|
|
|
|
|
|
|
bool has_alternative_mark = false;
|
2018-05-08 19:44:54 +00:00
|
|
|
MarkInCompressedFile alternative_data_mark = {};
|
|
|
|
MarkInCompressedFile data_mark = {};
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
/// If the mark should be exactly at the border of blocks, we can also use a mark pointing to the end of previous block,
|
|
|
|
/// and the beginning of next.
|
|
|
|
if (!uncompressed_hashing_buf.hasPendingData())
|
|
|
|
{
|
|
|
|
/// Get a mark pointing to the end of previous block.
|
|
|
|
has_alternative_mark = true;
|
|
|
|
alternative_data_mark.offset_in_compressed_file = compressed_hashing_buf.count() - uncompressing_buf.getSizeCompressed();
|
|
|
|
alternative_data_mark.offset_in_decompressed_block = uncompressed_hashing_buf.offset();
|
|
|
|
|
|
|
|
if (mrk_mark == alternative_data_mark)
|
|
|
|
return;
|
|
|
|
|
|
|
|
uncompressed_hashing_buf.next();
|
|
|
|
|
|
|
|
/// At the end of file `compressed_hashing_buf.count()` points to the end of the file even before `calling next()`,
|
|
|
|
/// and the check you just performed does not work correctly. For simplicity, we will not check the last mark.
|
|
|
|
if (uncompressed_hashing_buf.eof())
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
data_mark.offset_in_compressed_file = compressed_hashing_buf.count() - uncompressing_buf.getSizeCompressed();
|
|
|
|
data_mark.offset_in_decompressed_block = uncompressed_hashing_buf.offset();
|
|
|
|
|
|
|
|
if (mrk_mark != data_mark)
|
|
|
|
throw Exception("Incorrect mark: " + data_mark.toString() +
|
|
|
|
(has_alternative_mark ? " or " + alternative_data_mark.toString() : "") + " in data, " +
|
2017-12-03 04:19:46 +00:00
|
|
|
mrk_mark.toString() + " in " + mrk_file_path + " file", ErrorCodes::INCORRECT_MARK);
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
void assertEnd()
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
|
|
|
if (!uncompressed_hashing_buf.eof())
|
2017-12-03 04:40:21 +00:00
|
|
|
throw Exception("EOF expected in " + bin_file_path + " file"
|
|
|
|
+ " at position "
|
|
|
|
+ toString(compressed_hashing_buf.count()) + " (compressed), "
|
|
|
|
+ toString(uncompressed_hashing_buf.count()) + " (uncompressed)", ErrorCodes::CORRUPTED_DATA);
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
if (!mrk_hashing_buf.eof())
|
2017-12-03 04:40:21 +00:00
|
|
|
throw Exception("EOF expected in " + mrk_file_path + " file"
|
|
|
|
+ " at position "
|
2017-12-03 04:43:38 +00:00
|
|
|
+ toString(mrk_hashing_buf.count()), ErrorCodes::CORRUPTED_DATA);
|
2017-12-03 00:46:34 +00:00
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
void saveChecksums(MergeTreeData::DataPart::Checksums & checksums)
|
|
|
|
{
|
|
|
|
checksums.files[base_name + ".bin"] = MergeTreeData::DataPart::Checksums::Checksum(
|
2017-04-01 07:20:54 +00:00
|
|
|
compressed_hashing_buf.count(), compressed_hashing_buf.getHash(),
|
|
|
|
uncompressed_hashing_buf.count(), uncompressed_hashing_buf.getHash());
|
2017-12-03 00:46:34 +00:00
|
|
|
|
|
|
|
checksums.files[base_name + ".mrk"] = MergeTreeData::DataPart::Checksums::Checksum(
|
2017-04-01 07:20:54 +00:00
|
|
|
mrk_hashing_buf.count(), mrk_hashing_buf.getHash());
|
|
|
|
}
|
2014-07-22 08:21:16 +00:00
|
|
|
};
|
|
|
|
|
2016-08-19 12:01:10 +00:00
|
|
|
}
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
MergeTreeData::DataPart::Checksums checkDataPart(
|
|
|
|
const String & path_,
|
|
|
|
size_t index_granularity,
|
|
|
|
bool require_checksums,
|
2017-04-01 07:20:54 +00:00
|
|
|
const DataTypes & primary_key_data_types,
|
2017-12-03 00:46:34 +00:00
|
|
|
std::function<bool()> is_cancelled)
|
2014-07-22 08:21:16 +00:00
|
|
|
{
|
2017-12-03 00:46:34 +00:00
|
|
|
Logger * log = &Logger::get("checkDataPart");
|
|
|
|
|
|
|
|
/** Responsibility:
|
|
|
|
* - read list of columns from columns.txt;
|
|
|
|
* - read checksums if exist;
|
|
|
|
* - read (and validate checksum) of primary.idx; obtain number of marks;
|
|
|
|
* - read data files and marks for each stream of each column; calculate and validate checksums;
|
|
|
|
* - check that there are the same number of rows in each column;
|
|
|
|
* - check that all marks files have the same size;
|
|
|
|
*/
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
CurrentMetrics::Increment metric_increment{CurrentMetrics::ReplicatedChecks};
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
String path = path_;
|
2017-04-01 07:20:54 +00:00
|
|
|
if (!path.empty() && path.back() != '/')
|
|
|
|
path += "/";
|
|
|
|
|
2017-12-25 21:57:29 +00:00
|
|
|
NamesAndTypesList columns;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
ReadBufferFromFile buf(path + "columns.txt");
|
|
|
|
columns.readText(buf);
|
|
|
|
assertEOF(buf);
|
|
|
|
}
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
/// Checksums from file checksums.txt. May be absent. If present, they are subsequently compared with the actual data checksums.
|
|
|
|
MergeTreeData::DataPart::Checksums checksums_txt;
|
|
|
|
|
|
|
|
if (require_checksums || Poco::File(path + "checksums.txt").exists())
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
|
|
|
ReadBufferFromFile buf(path + "checksums.txt");
|
|
|
|
checksums_txt.read(buf);
|
|
|
|
assertEOF(buf);
|
|
|
|
}
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
/// Real checksums based on contents of data. Must correspond to checksums.txt. If not - it means the data is broken.
|
2017-04-01 07:20:54 +00:00
|
|
|
MergeTreeData::DataPart::Checksums checksums_data;
|
|
|
|
|
|
|
|
size_t marks_in_primary_key = 0;
|
|
|
|
{
|
|
|
|
ReadBufferFromFile file_buf(path + "primary.idx");
|
|
|
|
HashingReadBuffer hashing_buf(file_buf);
|
|
|
|
|
|
|
|
if (!primary_key_data_types.empty())
|
|
|
|
{
|
|
|
|
size_t key_size = primary_key_data_types.size();
|
2017-12-15 20:48:46 +00:00
|
|
|
MutableColumns tmp_columns(key_size);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
for (size_t j = 0; j < key_size; ++j)
|
2017-09-01 18:21:01 +00:00
|
|
|
tmp_columns[j] = primary_key_data_types[j]->createColumn();
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
while (!hashing_buf.eof())
|
|
|
|
{
|
2017-12-03 00:46:34 +00:00
|
|
|
if (is_cancelled())
|
|
|
|
return {};
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
++marks_in_primary_key;
|
|
|
|
for (size_t j = 0; j < key_size; ++j)
|
2017-09-01 18:21:01 +00:00
|
|
|
primary_key_data_types[j]->deserializeBinary(*tmp_columns[j].get(), hashing_buf);
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
hashing_buf.tryIgnore(std::numeric_limits<size_t>::max());
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t primary_idx_size = hashing_buf.count();
|
|
|
|
|
|
|
|
checksums_data.files["primary.idx"] = MergeTreeData::DataPart::Checksums::Checksum(primary_idx_size, hashing_buf.getHash());
|
|
|
|
}
|
|
|
|
|
2017-12-03 03:43:48 +00:00
|
|
|
/// Optional files count.txt, partition.dat, minmax_*.idx. Just calculate checksums for existing files.
|
|
|
|
Poco::DirectoryIterator dir_end;
|
|
|
|
for (Poco::DirectoryIterator dir_it(path); dir_it != dir_end; ++dir_it)
|
|
|
|
{
|
|
|
|
const String & file_name = dir_it.name();
|
|
|
|
if (file_name == "count.txt"
|
|
|
|
|| file_name == "partition.dat"
|
|
|
|
|| (startsWith(file_name, "minmax_") && endsWith(file_name, ".idx")))
|
|
|
|
{
|
|
|
|
ReadBufferFromFile file_buf(dir_it->path());
|
|
|
|
HashingReadBuffer hashing_buf(file_buf);
|
|
|
|
hashing_buf.tryIgnore(std::numeric_limits<size_t>::max());
|
|
|
|
checksums_data.files[file_name] = MergeTreeData::DataPart::Checksums::Checksum(hashing_buf.count(), hashing_buf.getHash());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
if (is_cancelled())
|
|
|
|
return {};
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-12-03 03:43:48 +00:00
|
|
|
/// If count.txt file exists, use it as source of truth for number of rows. Otherwise just check that all columns have equal amount of rows.
|
2017-12-03 00:46:34 +00:00
|
|
|
std::optional<size_t> rows;
|
2017-12-03 03:43:48 +00:00
|
|
|
|
|
|
|
if (Poco::File(path + "count.txt").exists())
|
|
|
|
{
|
|
|
|
ReadBufferFromFile buf(path + "count.txt");
|
|
|
|
size_t count = 0;
|
|
|
|
readText(count, buf);
|
|
|
|
assertEOF(buf);
|
|
|
|
rows = count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Read all columns, calculate checksums and validate marks.
|
2017-12-25 21:57:29 +00:00
|
|
|
for (const NameAndTypePair & name_type : columns)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2017-12-03 00:46:34 +00:00
|
|
|
LOG_DEBUG(log, "Checking column " + name_type.name + " in " + path);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
std::map<String, Stream> streams;
|
|
|
|
size_t column_size = 0;
|
2017-12-03 04:19:46 +00:00
|
|
|
size_t mark_num = 0;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
while (true)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2018-06-07 18:14:37 +00:00
|
|
|
IDataType::DeserializeBinaryBulkSettings settings;
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
/// Check that mark points to current position in file.
|
2017-12-03 04:57:33 +00:00
|
|
|
bool marks_eof = false;
|
2017-12-03 00:46:34 +00:00
|
|
|
name_type.type->enumerateStreams([&](const IDataType::SubstreamPath & substream_path)
|
|
|
|
{
|
|
|
|
String file_name = IDataType::getFileNameForStream(name_type.name, substream_path);
|
|
|
|
auto & stream = streams.try_emplace(file_name, path, file_name).first->second;
|
2017-12-03 04:19:46 +00:00
|
|
|
|
|
|
|
try
|
|
|
|
{
|
2017-12-03 04:57:33 +00:00
|
|
|
if (!stream.mrk_hashing_buf.eof())
|
2017-12-03 04:23:03 +00:00
|
|
|
stream.assertMark();
|
2017-12-03 04:57:33 +00:00
|
|
|
else
|
|
|
|
marks_eof = true;
|
2017-12-03 04:19:46 +00:00
|
|
|
}
|
|
|
|
catch (Exception & e)
|
|
|
|
{
|
|
|
|
e.addMessage("Cannot read mark " + toString(mark_num) + " at row " + toString(column_size)
|
|
|
|
+ " in file " + stream.mrk_file_path
|
|
|
|
+ ", mrk file offset: " + toString(stream.mrk_hashing_buf.count()));
|
|
|
|
throw;
|
|
|
|
}
|
2018-06-07 18:14:37 +00:00
|
|
|
}, settings.path);
|
2017-12-03 00:46:34 +00:00
|
|
|
|
2017-12-03 04:19:46 +00:00
|
|
|
++mark_num;
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
/// Read index_granularity rows from column.
|
2017-12-03 04:25:41 +00:00
|
|
|
/// NOTE Shared array sizes of Nested columns are read more than once. That's Ok.
|
|
|
|
|
2017-12-15 20:48:46 +00:00
|
|
|
MutableColumnPtr tmp_column = name_type.type->createColumn();
|
2018-06-07 18:14:37 +00:00
|
|
|
settings.getter = [&](const IDataType::SubstreamPath & substream_path)
|
|
|
|
{
|
|
|
|
String file_name = IDataType::getFileNameForStream(name_type.name, substream_path);
|
|
|
|
auto stream_it = streams.find(file_name);
|
|
|
|
if (stream_it == streams.end())
|
2018-11-22 21:19:58 +00:00
|
|
|
throw Exception("Logical error: cannot find stream " + file_name, ErrorCodes::LOGICAL_ERROR);
|
2018-06-07 18:14:37 +00:00
|
|
|
return &stream_it->second.uncompressed_hashing_buf;
|
|
|
|
};
|
|
|
|
|
|
|
|
IDataType::DeserializeBinaryBulkStatePtr state;
|
|
|
|
name_type.type->deserializeBinaryBulkStatePrefix(settings, state);
|
|
|
|
name_type.type->deserializeBinaryBulkWithMultipleStreams(*tmp_column, index_granularity, settings, state);
|
2017-12-03 00:46:34 +00:00
|
|
|
|
|
|
|
size_t read_size = tmp_column->size();
|
|
|
|
column_size += read_size;
|
|
|
|
|
|
|
|
if (read_size < index_granularity)
|
|
|
|
break;
|
2017-12-03 04:57:33 +00:00
|
|
|
else if (marks_eof)
|
|
|
|
throw Exception("Unexpected end of mrk file while reading column " + name_type.name, ErrorCodes::CORRUPTED_DATA);
|
2017-12-03 00:46:34 +00:00
|
|
|
|
|
|
|
if (is_cancelled())
|
|
|
|
return {};
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
/// Check that number of rows are equal in each column.
|
|
|
|
if (!rows)
|
|
|
|
rows = column_size;
|
|
|
|
else if (*rows != column_size)
|
2017-12-03 03:43:48 +00:00
|
|
|
throw Exception{"Unexpected number of rows in column "
|
|
|
|
+ name_type.name + " (" + toString(column_size) + ", expected: " + toString(*rows) + ")",
|
2017-12-03 00:46:34 +00:00
|
|
|
ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH};
|
|
|
|
|
|
|
|
/// Save checksums for column.
|
|
|
|
name_type.type->enumerateStreams([&](const IDataType::SubstreamPath & substream_path)
|
|
|
|
{
|
|
|
|
String file_name = IDataType::getFileNameForStream(name_type.name, substream_path);
|
|
|
|
auto stream_it = streams.find(file_name);
|
|
|
|
if (stream_it == streams.end())
|
2018-11-22 21:19:58 +00:00
|
|
|
throw Exception("Logical error: cannot find stream " + file_name, ErrorCodes::LOGICAL_ERROR);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
stream_it->second.assertEnd();
|
|
|
|
stream_it->second.saveChecksums(checksums_data);
|
|
|
|
}, {});
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
if (is_cancelled())
|
|
|
|
return {};
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
if (!rows)
|
|
|
|
throw Exception("No columns in data part", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
if (!primary_key_data_types.empty())
|
|
|
|
{
|
2017-12-03 00:46:34 +00:00
|
|
|
size_t expected_marks = (*rows - 1) / index_granularity + 1;
|
2017-04-01 07:20:54 +00:00
|
|
|
if (expected_marks != marks_in_primary_key)
|
|
|
|
throw Exception("Size of primary key doesn't match expected number of marks."
|
2017-12-03 00:46:34 +00:00
|
|
|
" Number of rows in columns: " + toString(*rows)
|
|
|
|
+ ", index_granularity: " + toString(index_granularity)
|
2017-04-01 07:20:54 +00:00
|
|
|
+ ", expected number of marks: " + toString(expected_marks)
|
|
|
|
+ ", size of primary key: " + toString(marks_in_primary_key),
|
|
|
|
ErrorCodes::CORRUPTED_DATA);
|
|
|
|
}
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
if (require_checksums || !checksums_txt.files.empty())
|
2017-04-01 07:20:54 +00:00
|
|
|
checksums_txt.checkEqual(checksums_data, true);
|
|
|
|
|
2017-12-03 00:46:34 +00:00
|
|
|
return checksums_data;
|
2014-07-22 08:21:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|