Fix typos with new codespell

This commit is contained in:
Antonio Andelic 2022-09-02 08:54:48 +00:00
parent 319d8b00a7
commit e64436fef3
42 changed files with 46 additions and 41 deletions

View File

@ -15,7 +15,7 @@
*
* Allow to search for next character from the set of 'symbols...' in a string.
* It is similar to 'strpbrk', 'strcspn' (and 'strchr', 'memchr' in the case of one symbol and '\0'),
* but with the following differencies:
* but with the following differences:
* - works with any memory ranges, including containing zero bytes;
* - doesn't require terminating zero byte: end of memory range is passed explicitly;
* - if not found, returns pointer to end instead of nullptr;

View File

@ -63,7 +63,7 @@
* Very large size of memcpy typically indicates suboptimal (not cache friendly) algorithms in code or unrealistic scenarios,
* so we don't pay attention to using non-temporary stores.
*
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most benefitial,
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most beneficial,
* even comparing to non-temporary aligned unrolled stores even with the most wide registers.
*
* memcpy can be written in asm, C or C++. The latter can also use inline asm.

View File

@ -101,7 +101,7 @@
#endif
/*
* The pcg_extras namespace contains some support code that is likley to
* The pcg_extras namespace contains some support code that is likely to
* be useful for a variety of RNGs, including:
* - 128-bit int support for platforms where it isn't available natively
* - bit twiddling operations

View File

@ -22,7 +22,7 @@
/*
* This code provides a a C++ class that can provide 128-bit (or higher)
* integers. To produce 2K-bit integers, it uses two K-bit integers,
* placed in a union that allowes the code to also see them as four K/2 bit
* placed in a union that allows the code to also see them as four K/2 bit
* integers (and access them either directly name, or by index).
*
* It may seem like we're reinventing the wheel here, because several

View File

@ -723,7 +723,7 @@ bool Client::processWithFuzzing(const String & full_query)
// queries, for lack of a better solution.
// There is also a problem that fuzzer substitutes positive Int64
// literals or Decimal literals, which are then parsed back as
// UInt64, and suddenly duplicate alias substitition starts or stops
// UInt64, and suddenly duplicate alias substitution starts or stops
// working (ASTWithAlias::formatImpl) or something like that.
// So we compare not even the first and second formatting of the
// query, but second and third.

View File

@ -67,7 +67,7 @@ Run this tool inside your git repository. It will create .tsv files that can be
The tool can process large enough repositories in a reasonable time.
It has been tested on:
- ClickHouse: 31 seconds; 3 million rows;
- LLVM: 8 minues; 62 million rows;
- LLVM: 8 minutes; 62 million rows;
- Linux - 12 minutes; 85 million rows;
- Chromium - 67 minutes; 343 million rows;
(the numbers as of Sep 2020)

View File

@ -557,7 +557,7 @@ void Connection::sendQuery(
/// Send correct hash only for !INITIAL_QUERY, due to:
/// - this will avoid extra protocol complexity for simplest cases
/// - there is no need in hash for the INITIAL_QUERY anyway
/// (since there is no secure/unsecure changes)
/// (since there is no secure/non-secure changes)
if (client_info && !cluster_secret.empty() && client_info->query_kind != ClientInfo::QueryKind::INITIAL_QUERY)
{
#if USE_SSL

View File

@ -41,7 +41,7 @@ HedgedConnectionsFactory::HedgedConnectionsFactory(
HedgedConnectionsFactory::~HedgedConnectionsFactory()
{
/// Stop anything that maybe in progress,
/// to avoid interfer with the subsequent connections.
/// to avoid interference with the subsequent connections.
///
/// I.e. some replcas may be in the establishing state,
/// this means that hedged connection is waiting for TablesStatusResponse,

View File

@ -64,7 +64,7 @@ struct IntervalKind
const char * toNameOfFunctionExtractTimePart() const;
/// Converts the string representation of an interval kind to its IntervalKind equivalent.
/// Returns false if the conversion unsucceeded.
/// Returns false if the conversion did not succeed.
/// For example, `IntervalKind::tryParseString('second', result)` returns `result` equals `IntervalKind::Kind::Second`.
static bool tryParseString(const std::string & kind, IntervalKind::Kind & result);
};

View File

@ -33,7 +33,7 @@ public:
* max_protected_size shows how many of the most frequently used entries will not be evicted after a sequential scan.
* max_protected_size == 0 means that the default protected size is equal to half of the total max size.
*/
/// TODO: construct from special struct with cache policy parametrs (also with max_protected_size).
/// TODO: construct from special struct with cache policy parameters (also with max_protected_size).
SLRUCachePolicy(size_t max_size_, size_t max_elements_size_ = 0, double size_ratio = 0.5, OnWeightLossFunction on_weight_loss_function_ = {})
: max_protected_size(max_size_ * std::min(1.0, size_ratio))
, max_size(max_size_)

View File

@ -31,7 +31,7 @@ inline UInt64 clock_gettime_ns_adjusted(UInt64 prev_time, clockid_t clock_type =
}
/** Differs from Poco::Stopwatch only by using 'clock_gettime' instead of 'gettimeofday',
* returns nanoseconds instead of microseconds, and also by other minor differencies.
* returns nanoseconds instead of microseconds, and also by other minor differences.
*/
class Stopwatch
{

View File

@ -497,7 +497,7 @@ private:
/// last index of offsets that was not processed
size_t last;
/// limit for adding to hashtable. In worst case with case insentive search, the table will be filled at most as half
/// limit for adding to hashtable. In worst case with case insensitive search, the table will be filled at most as half
static constexpr size_t small_limit = VolnitskyTraits::hash_size / 8;
public:

View File

@ -58,7 +58,7 @@ Fuzzing data consists of:
else:
read_key()
if (7):
read_nonce (simillar to read_key)
read_nonce (similar to read_key)
if (8):
set current_key

View File

@ -27,7 +27,7 @@ enum SnapshotVersion : uint8_t
static constexpr auto CURRENT_SNAPSHOT_VERSION = SnapshotVersion::V5;
/// What is stored in binary shapsnot
/// What is stored in binary snapshot
struct SnapshotDeserializationResult
{
/// Storage

View File

@ -2192,7 +2192,7 @@ void KeeperStorage::rollbackRequest(int64_t rollback_zxid, bool allow_missing)
}
catch (...)
{
LOG_FATAL(&Poco::Logger::get("KeeperStorage"), "Failed to rollback log. Terminating to avoid incosistencies");
LOG_FATAL(&Poco::Logger::get("KeeperStorage"), "Failed to rollback log. Terminating to avoid inconsistencies");
std::terminate();
}
}

View File

@ -53,7 +53,7 @@ public:
/// Session was actually removed
bool remove(int64_t session_id);
/// Update session expiry time (must be called on hearbeats)
/// Update session expiry time (must be called on heartbeats)
void addNewSessionOrUpdate(int64_t session_id, int64_t timeout_ms);
/// Get all expired sessions

View File

@ -1339,7 +1339,7 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint
nuraft::async_result<bool>::handler_type when_done = [&snapshot_created] (bool & ret, nuraft::ptr<std::exception> &/*exception*/)
{
snapshot_created = ret;
std::cerr << "Snapshot finised\n";
std::cerr << "Snapshot finished\n";
};
state_machine->create_snapshot(s, when_done);

View File

@ -149,7 +149,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
\
M(UInt64, parallel_distributed_insert_select, 0, "Process distributed INSERT SELECT query in the same cluster on local tables on every shard; if set to 1 - SELECT is executed on each shard; if set to 2 - SELECT and INSERT are executed on each shard", 0) \
M(UInt64, distributed_group_by_no_merge, 0, "If 1, Do not merge aggregation states from different servers for distributed queries (shards will process query up to the Complete stage, initiator just proxies the data from the shards). If 2 the initiator will apply ORDER BY and LIMIT stages (it is not in case when shard process query up to the Complete stage)", 0) \
M(UInt64, distributed_push_down_limit, 1, "If 1, LIMIT will be applied on each shard separatelly. Usually you don't need to use it, since this will be done automatically if it is possible, i.e. for simple query SELECT FROM LIMIT.", 0) \
M(UInt64, distributed_push_down_limit, 1, "If 1, LIMIT will be applied on each shard separately. Usually you don't need to use it, since this will be done automatically if it is possible, i.e. for simple query SELECT FROM LIMIT.", 0) \
M(Bool, optimize_distributed_group_by_sharding_key, true, "Optimize GROUP BY sharding_key queries (by avoiding costly aggregation on the initiator server).", 0) \
M(UInt64, optimize_skip_unused_shards_limit, 1000, "Limit for number of sharding key values, turns off optimize_skip_unused_shards if the limit is reached", 0) \
M(Bool, optimize_skip_unused_shards, false, "Assumes that data is distributed by sharding_key. Optimization to skip unused shards if SELECT query filters by sharding_key.", 0) \

View File

@ -89,7 +89,7 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
{"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}},
{"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}},
{"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"},
{"use_hedged_requests", false, true, "Enable Hedged Requests feature bu default"}}},
{"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}},
{"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}},
{"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}},
{"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"},

View File

@ -153,7 +153,7 @@ enum class HandleKafkaErrorMode
{
DEFAULT = 0, // Ignore errors with threshold.
STREAM, // Put errors to stream in the virtual column named ``_error.
/*FIXED_SYSTEM_TABLE, Put errors to in a fixed system table likey system.kafka_errors. This is not implemented now. */
/*FIXED_SYSTEM_TABLE, Put errors to in a fixed system table likely system.kafka_errors. This is not implemented now. */
/*CUSTOM_SYSTEM_TABLE, Put errors to in a custom system table. This is not implemented now. */
};

View File

@ -180,7 +180,7 @@ namespace detail
/** Returns array with UInt8 represent if key from in_keys array is in hierarchy of key from keys column.
* If value in result array is 1 that means key from in_keys array is in hierarchy of key from
* keys array with same index, 0 therwise.
* keys array with same index, 0 otherwise.
* For getting hierarchy implementation uses getKeysHierarchy function.
*
* Not: keys size must be equal to in_keys_size.

View File

@ -118,7 +118,7 @@ MongoDBDictionarySource::MongoDBDictionarySource(
Poco::URI poco_uri(uri);
// Parse database from URI. This is required for correctness -- the
// cursor is created using database name and colleciton name, so we have
// cursor is created using database name and collection name, so we have
// to specify them properly.
db = poco_uri.getPath();
// getPath() may return a leading slash, remove it.

View File

@ -244,7 +244,7 @@ void buildAttributeExpressionIfNeeded(
root->appendChild(expression_element);
}
/** Transofrms single dictionary attribute to configuration
/** Transforms single dictionary attribute to configuration
* third_column UInt8 DEFAULT 2 EXPRESSION rand() % 100 * 77
* to
* <attribute>

View File

@ -124,7 +124,7 @@ public:
virtual ~IMetadataStorage() = default;
/// ==== More specefic methods. Previous were almost general purpose. ====
/// ==== More specific methods. Previous were almost general purpose. ====
/// Read multiple metadata files into strings and return mapping from file_path -> metadata
virtual std::unordered_map<std::string, std::string> getSerializedMetadata(const std::vector<String> & file_paths) const = 0;

View File

@ -134,7 +134,7 @@ using FunctionArgumentDescriptors = std::vector<FunctionArgumentDescriptor>;
* (e.g. depending on result type or other trait).
* First, checks that number of arguments is as expected (including optional arguments).
* Second, checks that mandatory args present and have valid type.
* Third, checks optional arguents types, skipping ones that are missing.
* Third, checks optional arguments types, skipping ones that are missing.
*
* Please note that if you have several optional arguments, like f([a, b, c]),
* only these calls are considered valid:

View File

@ -453,7 +453,7 @@ void optimizeMonotonousFunctionsInOrderBy(ASTSelectQuery * select_query, Context
return;
/// Do not apply optimization for Distributed and Merge storages,
/// because we can't get the sorting key of their undelying tables
/// because we can't get the sorting key of their underlying tables
/// and we can break the matching of the sorting key for `read_in_order`
/// optimization by removing monotonous functions from the prefix of key.
if (result.is_remote_storage || (result.storage && result.storage->getName() == "Merge"))

View File

@ -55,7 +55,7 @@ void InsertQuerySettingsPushDownMatcher::visit(ASTSelectQuery & select_query, AS
insert_settings.push_back(setting);
else
{
/// Do not ovewrite setting that was passed for INSERT
/// Do not overwrite setting that was passed for INSERT
/// by settings that was passed for SELECT
}
}

View File

@ -11,7 +11,7 @@ struct SettingChange;
class SettingsChanges;
/// Pushdown SETTINGS clause that goes after FORMAT to the SELECT query:
/// (since settings after FORMAT parsed separatelly not in the ParserSelectQuery but in ParserQueryWithOutput)
/// (since settings after FORMAT parsed separately not in the ParserSelectQuery but in ParserQueryWithOutput)
///
/// SELECT 1 FORMAT Null SETTINGS max_block_size = 1 ->
/// SELECT 1 SETTINGS max_block_size = 1 FORMAT Null SETTINGS max_block_size = 1

View File

@ -39,7 +39,7 @@ SQLiteSource::SQLiteSource(
if (status != SQLITE_OK)
throw Exception(ErrorCodes::SQLITE_ENGINE_ERROR,
"Cannot prepate sqlite statement. Status: {}. Message: {}",
"Cannot prepare sqlite statement. Status: {}. Message: {}",
status, sqlite3_errstr(status));
compiled_statement = std::unique_ptr<sqlite3_stmt, StatementDeleter>(compiled_stmt, StatementDeleter());

View File

@ -513,7 +513,7 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::allJoin(JoinKind kind)
Columns lcols;
if (!left_to_right_key_remap.empty())
{
/// If we have remapped columns, then we need to get values from right columns insead of defaults
/// If we have remapped columns, then we need to get values from right columns instead of defaults
const auto & indices = idx_map[0];
const auto & left_src = cursors[0]->getCurrent().getColumns();

View File

@ -1274,7 +1274,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const
throw Exception{"Table doesn't have SAMPLE BY, cannot remove", ErrorCodes::BAD_ARGUMENTS};
}
/// Collect default expressions for MODIFY and ADD comands
/// Collect default expressions for MODIFY and ADD commands
if (command.type == AlterCommand::MODIFY_COLUMN || command.type == AlterCommand::ADD_COLUMN)
{
if (command.default_expression)

View File

@ -810,7 +810,7 @@ void registerStorageKafka(StorageFactory & factory)
/** Arguments of engine is following:
* - Kafka broker list
* - List of topics
* - Group ID (may be a constaint expression with a string result)
* - Group ID (may be a constraint expression with a string result)
* - Message format (string)
* - Row delimiter
* - Schema (optional, if the format supports it)

View File

@ -382,7 +382,7 @@ bool StorageLiveView::getNewBlocks()
BlocksMetadataPtr new_blocks_metadata = std::make_shared<BlocksMetadata>();
/// can't set mergeable_blocks here or anywhere else outside the writeIntoLiveView function
/// as there could be a race codition when the new block has been inserted into
/// as there could be a race condition when the new block has been inserted into
/// the source table by the PushingToViews chain and this method
/// called before writeIntoLiveView function is called which can lead to
/// the same block added twice to the mergeable_blocks leading to

View File

@ -155,7 +155,7 @@ private:
* We use boost::circular_buffer as a container for queues not to do any allocations.
*
* Another nuisance that we faces with is than background operations always interact with an associated Storage.
* So, when a Storage want to shutdown, it must wait until all its background operaions are finished.
* So, when a Storage want to shutdown, it must wait until all its background operations are finished.
*/
template <class Queue>
class MergeTreeBackgroundExecutor final : boost::noncopyable

View File

@ -16,7 +16,7 @@ struct MergeTreeDataPartTTLInfo
time_t max = 0;
/// This TTL was computed on completely expired part. It doesn't make sense
/// to select such parts for TTL again. But make sense to recalcuate TTL
/// to select such parts for TTL again. But make sense to recalculate TTL
/// again for merge with multiple parts.
std::optional<bool> ttl_finished;
bool finished() const { return ttl_finished.value_or(false); }

View File

@ -279,7 +279,7 @@ private:
/// Very large queue entries may appear occasionally.
/// We cannot process MAX_MULTI_OPS at once because it will fail.
/// But we have to process more than one entry at once because otherwise lagged replicas keep up slowly.
/// Let's start with one entry per transaction and icrease it exponentially towards MAX_MULTI_OPS.
/// Let's start with one entry per transaction and increase it exponentially towards MAX_MULTI_OPS.
/// It will allow to make some progress before failing and remain operational even in extreme cases.
size_t current_multi_batch_size = 1;

View File

@ -104,7 +104,7 @@ struct PartitionCommandResultInfo
using PartitionCommandsResultInfo = std::vector<PartitionCommandResultInfo>;
/// Convert partition comands result to Source from single Chunk, which will be
/// Convert partition commands result to Source from single Chunk, which will be
/// used to print info to the user. Tries to create narrowest table for given
/// results. For example, if all commands were FREEZE commands, than
/// old_part_name column will be absent.

View File

@ -126,7 +126,7 @@ private:
static void assertCorrectInsertion(StorageData::Buffer & buffer, size_t column_idx);
/// lsn - log sequnce nuumber, like wal offset (64 bit).
/// lsn - log sequence number, like wal offset (64 bit).
static Int64 getLSNValue(const std::string & lsn)
{
UInt32 upper_half, lower_half;

View File

@ -963,7 +963,7 @@ bool StorageMergeTree::merge(
if (!merge_mutate_entry)
return false;
/// Copying a vector of columns `deduplicate bu columns.
/// Copying a vector of columns `deduplicate by columns.
IExecutableTask::TaskResultCallback f = [](bool) {};
auto task = std::make_shared<MergePlainMergeTreeTask>(
*this, metadata_snapshot, deduplicate, deduplicate_by_columns, merge_mutate_entry, table_lock_holder, f);

View File

@ -57,7 +57,7 @@ std::string maskDataPath(const std::string & path)
size_t user_pw_end = masked_path.find('@', node_pos);
if (user_pw_end == std::string::npos)
{
/// Likey new format (use_compact_format_in_distributed_parts_names=1)
/// Likely new format (use_compact_format_in_distributed_parts_names=1)
return path;
}

View File

@ -5,7 +5,7 @@
ROOT_PATH=$(git rev-parse --show-toplevel)
codespell \
--skip "*generated*,*gperf*,*.bin,*.mrk*,*.idx,checksums.txt,*.dat,*.pyc,*.kate-swp,*obfuscateQueries.cpp,d3-*.js,*.min.js,${ROOT_PATH}/utils/check-style/aspell-ignore" \
--skip "*generated*,*gperf*,*.bin,*.mrk*,*.idx,checksums.txt,*.dat,*.pyc,*.kate-swp,*obfuscateQueries.cpp,d3-*.js,*.min.js,*.sum,${ROOT_PATH}/utils/check-style/aspell-ignore" \
--ignore-words "${ROOT_PATH}/utils/check-style/codespell-ignore-words.list" \
--exclude-file "${ROOT_PATH}/utils/check-style/codespell-ignore-lines.list" \
--quiet-level 2 \

View File

@ -16,3 +16,8 @@ ot
te
fo
ba
ro
rightt
iiterator
hastable
nam