Merge pull request #34935 from xlwh/fix_comments_style

Fix some code comments style
This commit is contained in:
Kseniia Sumarokova 2022-02-28 12:12:47 +01:00 committed by GitHub
commit 7edef29f78
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 40 additions and 40 deletions

View File

@ -16,7 +16,7 @@ namespace ErrorCodes
}
/// For possible values for template parameters, see AggregateFunctionMinMaxAny.h
/// For possible values for template parameters, see 'AggregateFunctionMinMaxAny.h'.
template <typename ResultData, typename ValueData>
struct AggregateFunctionArgMinMaxData
{

View File

@ -144,7 +144,7 @@ void ExternalDataSourceCache::initOnce(ContextPtr context, const String & root_d
local_cache_bytes_read_before_flush = bytes_read_before_flush_;
lru_caches = std::make_unique<RemoteFileCacheType>(limit_size_);
/// create if root_dir not exists
/// Create if root_dir not exists.
if (!fs::exists(fs::path(root_dir)))
{
fs::create_directories(fs::path(root_dir));
@ -156,7 +156,7 @@ void ExternalDataSourceCache::initOnce(ContextPtr context, const String & root_d
String ExternalDataSourceCache::calculateLocalPath(IRemoteFileMetadataPtr metadata) const
{
// add version into the full_path, and not block to read the new version
// Add version into the full_path, and not block to read the new version.
String full_path = metadata->getName() + ":" + metadata->remote_path + ":" + metadata->getVersion();
UInt128 hashcode = sipHash128(full_path.c_str(), full_path.size());
String hashcode_str = getHexUIntLowercase(hashcode);
@ -166,7 +166,7 @@ String ExternalDataSourceCache::calculateLocalPath(IRemoteFileMetadataPtr metada
std::pair<std::unique_ptr<LocalFileHolder>, std::unique_ptr<ReadBuffer>> ExternalDataSourceCache::createReader(
ContextPtr context, IRemoteFileMetadataPtr remote_file_metadata, std::unique_ptr<ReadBuffer> & read_buffer)
{
// If something is wrong on startup, rollback to read from the original ReadBuffer
// If something is wrong on startup, rollback to read from the original ReadBuffer.
if (!isInitialized())
{
LOG_ERROR(log, "ExternalDataSourceCache has not been initialized");
@ -180,7 +180,7 @@ std::pair<std::unique_ptr<LocalFileHolder>, std::unique_ptr<ReadBuffer>> Externa
auto cache = lru_caches->get(local_path);
if (cache)
{
// the remote file has been updated, need to redownload
// The remote file has been updated, need to redownload.
if (!cache->value().isValid() || cache->value().isModified(remote_file_metadata))
{
LOG_TRACE(
@ -201,7 +201,7 @@ std::pair<std::unique_ptr<LocalFileHolder>, std::unique_ptr<ReadBuffer>> Externa
if (!fs::exists(local_path))
fs::create_directories(local_path);
// cache is not found or is invalid, try to remove it at first
// Cache is not found or is invalid, try to remove it at first.
lru_caches->tryRemove(local_path);
auto new_cache_controller

View File

@ -78,7 +78,7 @@ protected:
ExternalDataSourceCache();
private:
// root directory of local cache for remote filesystem
// Root directory of local cache for remote filesystem.
String root_dir;
size_t local_cache_bytes_read_before_flush = 0;

View File

@ -18,7 +18,7 @@ public:
// serialize
virtual String toString() const = 0;
// used for comparing two file metadatas are the same or not.
// Used for comparing two file metadatas are the same or not.
virtual String getVersion() const = 0;
String remote_path;

View File

@ -31,9 +31,9 @@ std::shared_ptr<RemoteCacheController> RemoteCacheController::recover(const std:
auto cache_controller = std::make_shared<RemoteCacheController>(nullptr, local_path_, 0);
if (cache_controller->file_status != DOWNLOADED)
{
// do not load this invalid cached file and clear it. the clear action is in
// Do not load this invalid cached file and clear it. the clear action is in
// ExternalDataSourceCache::recoverTask(), because deleting directories during iteration will
// cause unexpected behaviors
// cause unexpected behaviors.
LOG_INFO(log, "Recover cached file failed. local path:{}", local_path_.string());
return nullptr;
}
@ -76,7 +76,7 @@ RemoteCacheController::RemoteCacheController(
, local_cache_bytes_read_before_flush(cache_bytes_before_flush_)
, current_offset(0)
{
// on recover, file_metadata_ptr is null, but it will be allocated after loading from metadata.txt
// On recover, file_metadata_ptr is null, but it will be allocated after loading from metadata.txt
// when we allocate a whole new file cachefile_metadata_ptr must not be null.
if (file_metadata_ptr)
{
@ -106,14 +106,14 @@ void RemoteCacheController::waitMoreData(size_t start_offset_, size_t end_offset
std::unique_lock lock{mutex};
if (file_status == DOWNLOADED)
{
// finish reading
// Finish reading.
if (start_offset_ >= current_offset)
{
lock.unlock();
return;
}
}
else // block until more data is ready
else // Block until more data is ready.
{
if (current_offset >= end_offset_)
{

View File

@ -29,7 +29,7 @@ public:
IRemoteFileMetadataPtr file_metadata_, const std::filesystem::path & local_path_, size_t cache_bytes_before_flush_);
~RemoteCacheController();
// recover from local disk
// Recover from local disk.
static std::shared_ptr<RemoteCacheController> recover(const std::filesystem::path & local_path);
/**
@ -40,9 +40,9 @@ public:
void close();
/**
* called in LocalCachedFileReader read(), the reading process would be blocked until
* Called in LocalCachedFileReader read(), the reading process would be blocked until
* enough data be downloaded.
* If the file has finished download, the process would unblocked
* If the file has finished download, the process would unblocked.
*/
void waitMoreData(size_t start_offset_, size_t end_offset_);
@ -69,7 +69,7 @@ public:
void startBackgroundDownload(std::unique_ptr<ReadBuffer> in_readbuffer_, BackgroundSchedulePool & thread_pool);
private:
// flush file and status information
// Flush file and status information.
void flush(bool need_flush_status = false);
BackgroundSchedulePool::TaskHolder download_task_holder;
@ -79,7 +79,7 @@ private:
std::condition_variable more_data_signal;
String metadata_class;
LocalFileStatus file_status = TO_DOWNLOAD; // for tracking download process
LocalFileStatus file_status = TO_DOWNLOAD; // For tracking download process.
IRemoteFileMetadataPtr file_metadata_ptr;
std::filesystem::path local_path;

View File

@ -219,7 +219,7 @@ namespace
return distributed_header;
}
/// remote_error argument is used to decide whether some errors should be
/// 'remote_error' argument is used to decide whether some errors should be
/// ignored or not, in particular:
///
/// - ATTEMPT_TO_READ_AFTER_EOF should not be ignored
@ -399,7 +399,7 @@ void StorageDistributedDirectoryMonitor::flushAllData()
{
processFiles(files);
/// Update counters
/// Update counters.
getFiles();
}
}
@ -475,7 +475,7 @@ void StorageDistributedDirectoryMonitor::run()
break;
}
/// Update counters
/// Update counters.
getFiles();
if (!quit && do_sleep)
@ -491,8 +491,8 @@ ConnectionPoolPtr StorageDistributedDirectoryMonitor::createPool(const std::stri
const auto & shards_info = cluster->getShardsInfo();
const auto & shards_addresses = cluster->getShardsAddresses();
/// check new format shard{shard_index}_replica{replica_index}
/// (shard_index and replica_index starts from 1)
/// Check new format shard{shard_index}_replica{replica_index}
/// (shard_index and replica_index starts from 1).
if (address.shard_index != 0)
{
if (!address.replica_index)
@ -511,7 +511,7 @@ ConnectionPoolPtr StorageDistributedDirectoryMonitor::createPool(const std::stri
return shard_info.per_replica_pools[address.replica_index - 1];
}
/// existing connections pool have a higher priority
/// Existing connections pool have a higher priority.
for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index)
{
const Cluster::Addresses & replicas_addresses = shards_addresses[shard_index];
@ -1152,7 +1152,7 @@ void StorageDistributedDirectoryMonitor::markAsSend(const std::string & file_pat
bool StorageDistributedDirectoryMonitor::maybeMarkAsBroken(const std::string & file_path, const Exception & e)
{
/// mark file as broken if necessary
/// Mark file as broken if necessary.
if (isFileBrokenErrorCode(e.code(), e.isRemoteException()))
{
markAsBroken(file_path);

View File

@ -52,7 +52,7 @@ public:
static std::shared_ptr<ISource> createSourceFromFile(const String & file_name);
/// For scheduling via DistributedBlockOutputStream
/// For scheduling via DistributedBlockOutputStream.
bool addAndSchedule(size_t file_size, size_t ms);
struct InternalStatus

View File

@ -45,7 +45,7 @@ public:
private:
friend class DirectoryWatcherBase;
/// Here must pass by value, otherwise will lead to stack-use-of-scope
/// Here must pass by value, otherwise will lead to stack-use-of-scope.
void onItemAdded(DirectoryWatcherBase::DirectoryEvent ev);
void onItemRemoved(DirectoryWatcherBase::DirectoryEvent ev);
void onItemModified(DirectoryWatcherBase::DirectoryEvent ev);

View File

@ -52,7 +52,7 @@ private:
Block virtual_header;
/// The start pos and end pos of files responsible by this stream,
/// does not include end
/// does not include end.
size_t start;
size_t end;
};

View File

@ -56,7 +56,7 @@ public:
enum class FileStatus
{
OPEN, /// first time open file after table start up
OPEN, /// First time open file after table start up.
NO_CHANGE,
UPDATED,
REMOVED,
@ -83,7 +83,7 @@ public:
{
InodeToFileMeta meta_by_inode;
FileNameToContext context_by_name;
/// file names without path
/// File names without path.
Names file_names;
};
@ -199,7 +199,7 @@ private:
/// Used in shutdown()
void serialize() const;
/// Used in FileSource closeFileAndStoreMeta(file_name);
/// Used in FileSource closeFileAndStoreMeta(file_name).
void serialize(UInt64 inode, const FileMeta & file_meta) const;
void deserialize();

View File

@ -150,7 +150,7 @@ private:
std::unique_ptr<ReadBuffer> read_buf;
std::unique_ptr<QueryPipeline> pipeline;
std::unique_ptr<PullingPipelineExecutor> reader;
/// onCancel and generate can be called concurrently
/// onCancel and generate can be called concurrently.
std::mutex reader_mutex;
String current_path;

View File

@ -12,13 +12,13 @@ void attach(ContextPtr context, IDatabase & system_database, const String & tabl
assert(system_database.getDatabaseName() == DatabaseCatalog::SYSTEM_DATABASE);
if (system_database.getUUID() == UUIDHelpers::Nil)
{
/// Attach to Ordinary database
/// Attach to Ordinary database.
auto table_id = StorageID(DatabaseCatalog::SYSTEM_DATABASE, table_name);
system_database.attachTable(context, table_name, StorageT::create(table_id, std::forward<StorageArgs>(args)...));
}
else
{
/// Attach to Atomic database
/// Attach to Atomic database.
/// NOTE: UUIDs are not persistent, but it's ok since no data are stored on disk for these storages
/// and path is actually not used
auto table_id = StorageID(DatabaseCatalog::SYSTEM_DATABASE, table_name, UUIDHelpers::generateV4());

View File

@ -10,10 +10,10 @@ namespace DB
/* remote ('address', db, table) - creates a temporary StorageDistributed.
* To get the table structure, a DESC TABLE request is made to the remote server.
* For example
* For example:
* SELECT count() FROM remote('example01-01-1', merge, hits) - go to `example01-01-1`, in the merge database, the hits table.
* An expression that generates a set of shards and replicas can also be specified as the host name - see below.
* Also, there is a cluster version of the function: cluster('existing_cluster_name', 'db', 'table')
* Also, there is a cluster version of the function: cluster('existing_cluster_name', 'db', 'table').
*/
class TableFunctionRemote : public ITableFunction
{

View File

@ -13,7 +13,7 @@ namespace DB
class Context;
/* s3(source, [access_key_id, secret_access_key,] format, structure[, compression]) - creates a temporary storage for a file in S3
/* s3(source, [access_key_id, secret_access_key,] format, structure[, compression]) - creates a temporary storage for a file in S3.
*/
class TableFunctionS3 : public ITableFunction
{

View File

@ -13,7 +13,7 @@ namespace DB
class Context;
/**
* s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, structure)
* s3cluster(cluster_name, source, [access_key_id, secret_access_key,] format, structure)
* A table function, which allows to process many files from S3 on a specific cluster
* On initiator it creates a connection to _all_ nodes in cluster, discloses asterics
* in S3 file path and dispatch each file dynamically.

View File

@ -10,7 +10,7 @@ namespace DB
class Context;
/* url(source, format[, structure, compression]) - creates a temporary storage from url
/* url(source, format[, structure, compression]) - creates a temporary storage from url.
*/
class TableFunctionURL : public ITableFunctionFileLike
{

View File

@ -5,7 +5,7 @@
namespace DB
{
/* values(structure, values...) - creates a temporary storage filling columns with values
* values is case-insensitive table function
* values is case-insensitive table function.
*/
class TableFunctionValues : public ITableFunction
{