fix building

This commit is contained in:
taiyang-li 2021-12-28 19:29:01 +08:00
parent 2a1fe52b9f
commit 98f37afc80
14 changed files with 42 additions and 46 deletions

View File

@ -820,7 +820,7 @@ if (ThreadFuzzer::instance().isEffective())
#if USE_ROCKSDB
/// initialize meta file cache
/// Initialize merge tree metadata cache
{
size_t size = config().getUInt64("meta_file_cache_size", 256 << 20);
global_context->initializeMergeTreeMetadataCache(path_str + "/" + "rocksdb", size);

View File

@ -99,7 +99,7 @@ public:
if (!data || !data->getSettings()->use_metadata_cache)
throw Exception("The table in function " + getName() + " must be in MergeTree Family", ErrorCodes::ILLEGAL_COLUMN);
/// Fill in result
/// Fill in checking results.
auto col_result = result_type->createColumn();
auto & col_arr = assert_cast<ColumnArray &>(*col_result);
auto & col_tuple = assert_cast<ColumnTuple &>(col_arr.getData());

View File

@ -283,7 +283,7 @@ struct ContextSharedPart
Context::ConfigReloadCallback config_reload_callback;
#if USE_ROCKSDB
/// MergeTree metadata cache stored in rocksdb.
/// Global merge tree metadata cache, stored in rocksdb.
MergeTreeMetadataCachePtr merge_tree_metadata_cache;
#endif
@ -399,7 +399,7 @@ struct ContextSharedPart
zookeeper.reset();
#if USE_ROCKSDB
/// Shutdown meta file cache
/// Shutdown merge tree metadata cache
if (merge_tree_metadata_cache)
{
merge_tree_metadata_cache->shutdown();

View File

@ -708,7 +708,6 @@ public:
MergeTreeMetadataCachePtr getMergeTreeMetadataCache() const;
#endif
#if USE_NURAFT
std::shared_ptr<KeeperDispatcher> & getKeeperDispatcher() const;
#endif

View File

@ -90,11 +90,11 @@ void IMergeTreeDataPart::MinMaxIndex::load(
{
std::unique_ptr<SeekableReadBuffer> file;
#if USE_ROCKSDB
String _;
if (cache)
{
String file_name = "minmax_" + escapeForFileName(minmax_column_names[i]) + ".idx";
String value;
file = cache->readOrSetMeta(disk, file_name, value);
file = cache->readOrSet(disk, file_name, _);
}
else
{
@ -325,7 +325,7 @@ IMergeTreeDataPart::IMergeTreeDataPart(
, index_granularity_info(storage_, part_type_)
, part_type(part_type_)
, parent_part(parent_part_)
, use_metadata_cache(storage.getSettings()->use_metadata_cache)
, use_metadata_cache(storage.use_metadata_cache)
{
if (parent_part)
state = State::Committed;
@ -357,7 +357,7 @@ IMergeTreeDataPart::IMergeTreeDataPart(
, index_granularity_info(storage_, part_type_)
, part_type(part_type_)
, parent_part(parent_part_)
, use_metadata_cache(storage.getSettings()->use_metadata_cache)
, use_metadata_cache(storage.use_metadata_cache)
{
if (parent_part)
state = State::Committed;
@ -716,7 +716,6 @@ void IMergeTreeDataPart::appendFilesOfColumnsChecksumsIndexes(Strings & files, b
size_t IMergeTreeDataPart::fileNumberOfColumnsChecksumsIndexes() const
{
Strings files;
files.reserve(16);
appendFilesOfColumnsChecksumsIndexes(files, true);
return files.size();
}
@ -773,10 +772,10 @@ void IMergeTreeDataPart::loadIndex()
std::unique_ptr<SeekableReadBuffer> index_file;
#if USE_ROCKSDB
String _;
if (use_metadata_cache)
{
String value;
index_file = metadata_cache->readOrSetMeta(volume->getDisk(), "primary.idx", value);
index_file = metadata_cache->readOrSet(volume->getDisk(), "primary.idx", _);
}
else
{
@ -854,10 +853,10 @@ void IMergeTreeDataPart::loadDefaultCompressionCodec()
bool exists = false;
std::unique_ptr<SeekableReadBuffer> file_buf;
#if USE_ROCKSDB
String _;
if (use_metadata_cache)
{
String v;
file_buf = metadata_cache->readOrSetMeta(volume->getDisk(), DEFAULT_COMPRESSION_CODEC_FILE_NAME, v);
file_buf = metadata_cache->readOrSet(volume->getDisk(), DEFAULT_COMPRESSION_CODEC_FILE_NAME, _);
exists = file_buf != nullptr;
}
else
@ -1027,10 +1026,10 @@ void IMergeTreeDataPart::loadChecksums(bool require)
std::unique_ptr<SeekableReadBuffer> buf;
#if USE_ROCKSDB
String _;
if (use_metadata_cache)
{
String value;
buf = metadata_cache->readOrSetMeta(volume->getDisk(), "checksums.txt", value);
buf = metadata_cache->readOrSet(volume->getDisk(), "checksums.txt", _);
exists = buf != nullptr;
}
else
@ -1100,10 +1099,10 @@ void IMergeTreeDataPart::loadRowsCount()
else if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING || part_type == Type::COMPACT || parent_part)
{
#if USE_ROCKSDB
String _;
if (use_metadata_cache)
{
String v;
auto buf = metadata_cache->readOrSetMeta(volume->getDisk(), "count.txt", v);
auto buf = metadata_cache->readOrSet(volume->getDisk(), "count.txt", _);
if (!buf)
throw Exception("No count.txt in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART);
@ -1229,10 +1228,10 @@ void IMergeTreeDataPart::loadTTLInfos()
std::unique_ptr<SeekableReadBuffer> in;
#if USE_ROCKSDB
String _;
if (use_metadata_cache)
{
String v;
in = metadata_cache->readOrSetMeta(volume->getDisk(), "ttl.txt", v);
in = metadata_cache->readOrSet(volume->getDisk(), "ttl.txt", _);
exists = in != nullptr;
}
else
@ -1283,10 +1282,10 @@ void IMergeTreeDataPart::loadUUID()
std::unique_ptr<SeekableReadBuffer> in;
#if USE_ROCKSDB
String _;
if (use_metadata_cache)
{
String v;
in = metadata_cache->readOrSetMeta(volume->getDisk(), UUID_FILE_NAME, v);
in = metadata_cache->readOrSet(volume->getDisk(), UUID_FILE_NAME, _);
exists = in != nullptr;
}
else
@ -1325,10 +1324,10 @@ void IMergeTreeDataPart::loadColumns(bool require)
bool exists = false;
std::unique_ptr<SeekableReadBuffer> in;
#if USE_ROCKSDB
String _;
if (use_metadata_cache)
{
String v;
in = metadata_cache->readOrSetMeta(volume->getDisk(), "columns.txt", v);
in = metadata_cache->readOrSet(volume->getDisk(), "columns.txt", _);
exists = in != nullptr;
}
else
@ -1490,10 +1489,10 @@ void IMergeTreeDataPart::modifyAllMetadataCaches(ModifyCacheType type, bool incl
switch (type)
{
case ModifyCacheType::PUT:
metadata_cache->setMetas(volume->getDisk(), files);
metadata_cache->batchSet(volume->getDisk(), files);
break;
case ModifyCacheType::DROP:
metadata_cache->dropMetas(files);
metadata_cache->batchDelete(files);
break;
}
}

View File

@ -206,6 +206,7 @@ MergeTreeData::MergeTreeData(
, parts_mover(this)
, background_operations_assignee(*this, BackgroundJobsAssignee::Type::DataProcessing, getContext())
, background_moves_assignee(*this, BackgroundJobsAssignee::Type::Moving, getContext())
, use_metadata_cache(getSettings()->use_metadata_cache)
{
context_->getGlobalContext()->initializeBackgroundExecutorsIfNeeded();
@ -318,13 +319,13 @@ MergeTreeData::MergeTreeData(
"'min_rows_for_compact_part' and 'min_bytes_for_compact_part' will be ignored.", reason);
#if !USE_ROCKSDB
if (settings->use_metadata_cache)
if (use_metadata_cache)
{
LOG_WARNING(
log,
"Can't use merge tree metadata cache if clickhouse was compiled without rocksdb."
"set use_metadata_cache to false forcely");
settings->use_metadata_cache = false;
use_metadata_cache = false;
}
#endif

View File

@ -972,6 +972,7 @@ protected:
/// And for ReplicatedMergeTree we don't have LogEntry type for this operation.
BackgroundJobsAssignee background_operations_assignee;
BackgroundJobsAssignee background_moves_assignee;
bool use_metadata_cache;
/// Strongly connected with two fields above.
/// Every task that is finished will ask to assign a new one into an executor.

View File

@ -192,12 +192,11 @@ MergeTreeDataPartCompact::~MergeTreeDataPartCompact()
removeIfNeeded();
}
/// Do not cache mark file, because cache other meta files is enough to speed up loading.
/// We don't cache mark file, because cache other metadata files is enough to speed up loading.
void MergeTreeDataPartCompact::appendFilesOfIndexGranularity(Strings& /* files */) const
{
}
/// find all connected file and do modification
Strings MergeTreeDataPartCompact::getIndexGranularityFiles() const
{
auto marks_file = index_granularity_info.getMarksFilePath("data");

View File

@ -268,7 +268,7 @@ void MergeTreeDataPartWide::calculateEachColumnSizes(ColumnSizeByName & each_col
}
}
// Do not cache mark files of part, because cache other meta files is enough to speed up loading.
/// We don't cache mark files of part, because cache other meta files is enough to speed up loading.
void MergeTreeDataPartWide::appendFilesOfIndexGranularity(Strings& /* files */) const
{
}

View File

@ -373,7 +373,7 @@ void MergeTreePartition::load(const MergeTreeData & storage, const DiskPtr & dis
if (metadata_cache)
{
String v;
file = metadata_cache->readOrSetMeta(disk, "partition.dat", v);
file = metadata_cache->readOrSet(disk, "partition.dat", v);
}
else
{

View File

@ -24,7 +24,7 @@ namespace DB
{
std::unique_ptr<SeekableReadBuffer>
PartMetadataCache::readOrSetMeta(const DiskPtr & disk, const String & file_name, String & value)
PartMetadataCache::readOrSet(const DiskPtr & disk, const String & file_name, String & value)
{
String file_path = fs::path(getFullRelativePath()) / file_name;
auto status = cache->get(file_path, value);
@ -50,7 +50,7 @@ PartMetadataCache::readOrSetMeta(const DiskPtr & disk, const String & file_name,
return std::make_unique<ReadBufferFromString>(value);
}
void PartMetadataCache::setMetas(const DiskPtr & disk, const Strings & file_names)
void PartMetadataCache::batchSet(const DiskPtr & disk, const Strings & file_names)
{
String text;
String read_value;
@ -76,7 +76,7 @@ void PartMetadataCache::setMetas(const DiskPtr & disk, const Strings & file_name
}
}
void PartMetadataCache::dropMetas(const Strings & file_names)
void PartMetadataCache::batchDelete(const Strings & file_names)
{
for (const auto & file_name : file_names)
{
@ -93,7 +93,7 @@ void PartMetadataCache::dropMetas(const Strings & file_names)
}
}
void PartMetadataCache::setMeta(const String & file_name, const String & value)
void PartMetadataCache::set(const String & file_name, const String & value)
{
String file_path = fs::path(getFullRelativePath()) / file_name;
String read_value;
@ -116,7 +116,6 @@ void PartMetadataCache::getFilesAndCheckSums(Strings & files, std::vector<uint12
{
String prefix = fs::path(getFullRelativePath()) / "";
Strings values;
values.reserve(files.capacity());
cache->getByPrefix(prefix, files, values);
size_t size = files.size();
for (size_t i = 0; i < size; ++i)

View File

@ -30,11 +30,11 @@ public:
}
std::unique_ptr<SeekableReadBuffer>
readOrSetMeta(const DiskPtr & disk, const String & file_name, String & value);
void setMetas(const DiskPtr & disk, const Strings & file_names);
void dropMetas(const Strings & file_names);
void setMeta(const String & file_name, const String & value);
void getFilesAndCheckSums(Strings & file_names, std::vector<uint128> & checksums) const;
readOrSet(const DiskPtr & disk, const String & file_name, String & value);
void batchSet(const DiskPtr & disk, const Strings & file_names);
void batchDelete(const Strings & file_names);
void set(const String & file_name, const String & value);
void getFilesAndCheckSums(Strings & files, std::vector<uint128> & checksums) const;
private:
std::string getFullRelativePath() const;

View File

@ -82,8 +82,7 @@ static bool extractKeyImpl(const IAST & elem, String & res, bool & precise)
}
/** Retrieve from the query a condition of the form `key= 'key'`, from conjunctions in the WHERE clause.
*/
/// Retrieve from the query a condition of the form `key= 'key'`, from conjunctions in the WHERE clause.
static String extractKey(const ASTPtr & query, bool& precise)
{
const auto & select = query->as<ASTSelectQuery &>();

View File

@ -12,8 +12,7 @@ namespace DB
class Context;
/** Implements `merge_tree_metadata_cache` system table, which allows you to view the metadata cache data in rocksdb for debugging purposes.
*/
/// Implements `merge_tree_metadata_cache` system table, which allows you to view the metadata cache data in rocksdb for testing purposes.
class StorageSystemMergeTreeMetadataCache : public shared_ptr_helper<StorageSystemMergeTreeMetadataCache>, public IStorageSystemOneBlock<StorageSystemMergeTreeMetadataCache>
{
friend struct shared_ptr_helper<StorageSystemMergeTreeMetadataCache>;