Fix typos

This commit is contained in:
Alexey Milovidov 2020-06-27 22:05:00 +03:00
parent 1d7d5e74f6
commit 1462a66d1e
41 changed files with 51 additions and 47 deletions

View File

@ -9,7 +9,7 @@
// Include this header last, because it is an auto-generated dump of questionable
// garbage that breaks the build (e.g. it changes _POSIX_C_SOURCE).
// TODO: find out what it is. On github, they have proper inteface headers like
// TODO: find out what it is. On github, they have proper interface headers like
// this one: https://github.com/RoaringBitmap/CRoaring/blob/master/include/roaring/roaring.h
#include <roaring/roaring.h>

View File

@ -52,7 +52,7 @@ namespace CurrentMetrics
add(metric, -value);
}
/// For lifetime of object, add amout for specified metric. Then subtract.
/// For lifetime of object, add amount for specified metric. Then subtract.
class Increment
{
private:

View File

@ -14,13 +14,13 @@ namespace Util
/// SensitiveDataMasker allows to remove sensitive data from queries using set of regexp-based rules
/// It's used as a singelton via getInstance method
/// It's used as a singleton via getInstance method
/// Initially it's empty (nullptr) and after manual initialization
/// (one-time, done by setInstance call) it takes the proper value which
/// is stored in unique_ptr.
/// It looks like the singelton is the best option here, as
/// It looks like the singleton is the best option here, as
/// two users of that object (OwnSplitChannel & Interpreters/executeQuery)
/// can't own/share that Masker properly without syncronization & locks,
/// and we can't afford setting global locks for each logged line.

View File

@ -95,7 +95,7 @@ namespace
enum ComputeWidthMode
{
Width, /// Calcualte and return visible width
Width, /// Calculate and return visible width
BytesBeforLimit /// Calculate and return the maximum number of bytes when substring fits in visible width.
};

View File

@ -22,7 +22,7 @@ using TemporaryFile = Poco::TemporaryFile;
bool enoughSpaceInDirectory(const std::string & path, size_t data_size);
std::unique_ptr<TemporaryFile> createTemporaryFile(const std::string & path);
/// Returns mount point of filesystem where absoulte_path (must exist) is located
/// Returns mount point of filesystem where absolute_path (must exist) is located
std::filesystem::path getMountPoint(std::filesystem::path absolute_path);
/// Returns name of filesystem mounted to mount_point

View File

@ -26,7 +26,7 @@ struct InitializeJemallocZoneAllocatorForOSX
/// and even will be optimized out:
///
/// It is ok to call it twice (i.e. in case of shared libraries)
/// Since zone_register() is a no-op if the defualt zone is already replaced with something.
/// Since zone_register() is a no-op if the default zone is already replaced with something.
///
/// https://github.com/jemalloc/jemalloc/issues/708
zone_register();

View File

@ -152,7 +152,7 @@ UInt32 getCompressedDataSize(UInt8 data_bytes_size, UInt32 uncompressed_size)
template <typename ValueType>
UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
{
// Since only unsinged int has granted 2-complement overflow handling,
// Since only unsigned int has granted 2-complement overflow handling,
// we are doing math here only on unsigned types.
// To simplify and booletproof code, we enforce ValueType to be unsigned too.
static_assert(is_unsigned_v<ValueType>, "ValueType must be unsigned.");
@ -218,7 +218,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
const SignedDeltaType signed_dd = static_cast<SignedDeltaType>(double_delta);
const auto sign = signed_dd < 0;
// -1 shirnks dd down to fit into number of bits, and there can't be 0, so it is OK.
// -1 shrinks dd down to fit into number of bits, and there can't be 0, so it is OK.
const auto abs_value = static_cast<UnsignedDeltaType>(std::abs(signed_dd) - 1);
const auto write_spec = getDeltaWriteSpec(signed_dd);

View File

@ -11,7 +11,7 @@ namespace DB
* to support 64bit types. The drawback is 1 extra bit for 32-byte wide deltas: 5-bit prefix
* instead of 4-bit prefix.
*
* This codec is best used against monotonic integer sequences with constant (or almost contant)
* This codec is best used against monotonic integer sequences with constant (or almost constant)
* stride, like event timestamp for some monitoring application.
*
* Given input sequence a: [a0, a1, ... an]:
@ -45,7 +45,7 @@ namespace DB
* write sign bit (1 if signed): x
* write 64-1 bits of abs(double_delta - 1): xxxxxxxxxxx...
*
* @example sequence of UInt8 values [1, 2, 3, 4, 5, 6, 7, 8, 9 10] is encoded as (codec header is ommited):
* @example sequence of UInt8 values [1, 2, 3, 4, 5, 6, 7, 8, 9 10] is encoded as (codec header is omitted):
*
* .- 4-byte little-endian sequence length (10 == 0xa)
* | .- 1 byte (sizeof(UInt8) a[0] : 0x01

View File

@ -546,7 +546,7 @@ TEST_P(CodecTest, TranscodingWithoutDataType)
class CodecTestCompatibility : public ::testing::TestWithParam<std::tuple<Codec, std::tuple<CodecTestSequence, std::string>>>
{};
// Check that iput sequence when encoded matches the encoded string binary.
// Check that input sequence when encoded matches the encoded string binary.
TEST_P(CodecTestCompatibility, Encoding)
{
const auto & codec_spec = std::get<0>(GetParam());
@ -1275,7 +1275,7 @@ INSTANTIATE_TEST_SUITE_P(Gorilla,
);
// These 'tests' try to measure performance of encoding and decoding and hence only make sence to be run locally,
// also they require pretty big data to run agains and generating this data slows down startup of unit test process.
// also they require pretty big data to run against and generating this data slows down startup of unit test process.
// So un-comment only at your discretion.
// Just as if all sequences from generatePyramidOfSequences were appended to one-by-one to the first one.

View File

@ -49,7 +49,7 @@ namespace DB
///
/// If a query returns data, the server sends an empty header block containing
/// the description of resulting columns before executing the query.
/// Using this block the client can initialise the output formatter and display the prefix of resulting table
/// Using this block the client can initialize the output formatter and display the prefix of resulting table
/// beforehand.
namespace Protocol

View File

@ -56,7 +56,7 @@ void testGetFractional(const DecimalUtilsSplitAndCombineTestParam & param)
DecimalUtils::getFractionalPart(DecimalType{param.decimal_value}, param.scale));
}
// unfortunatelly typed parametrized tests () are not supported in this version of gtest, so I have to emulate by hand.
// Unfortunately typed parametrized tests () are not supported in this version of gtest, so I have to emulate by hand.
TEST_P(DecimalUtilsSplitAndCombineTest, splitDecimal32)
{
testSplit<Decimal32>(GetParam());

View File

@ -112,6 +112,10 @@ void registerDataTypeDomainIPv4AndIPv6(DataTypeFactory & factory)
return std::make_pair(DataTypeFactory::instance().get("FixedString(16)"),
std::make_unique<DataTypeCustomDesc>(std::make_unique<DataTypeCustomFixedName>("IPv6"), std::make_unique<DataTypeCustomIPv6Serialization>()));
});
/// MySQL, MariaDB
factory.registerAlias("INET4", "IPv4", DataTypeFactory::CaseInsensitive);
factory.registerAlias("INET6", "IPv6", DataTypeFactory::CaseInsensitive);
}
}

View File

@ -62,7 +62,7 @@ public:
* OR
* R execute(DateTime64 value, Int64 scale_factor, ... , const TimeZoneImpl &)
*
* Wehere R and T could be arbitrary types.
* Where R and T could be arbitrary types.
*/
template <typename Transform>
class TransformDateTime64 : public Transform

View File

@ -397,7 +397,7 @@ void registerDataTypeString(DataTypeFactory & factory)
{
factory.registerDataType("String", create);
/// These synonyms are added for compatibility.
/// These synonims are added for compatibility.
factory.registerAlias("CHAR", "String", DataTypeFactory::CaseInsensitive);
factory.registerAlias("NCHAR", "String", DataTypeFactory::CaseInsensitive);

View File

@ -48,7 +48,7 @@ void registerDataTypeNumbers(DataTypeFactory & factory)
factory.registerDataType("Float32", createNumericDataType<Float32>);
factory.registerDataType("Float64", createNumericDataType<Float64>);
/// These synonyms are added for compatibility.
/// These synonims are added for compatibility.
factory.registerAlias("TINYINT", "Int8", DataTypeFactory::CaseInsensitive);
factory.registerAlias("BOOL", "Int8", DataTypeFactory::CaseInsensitive);

View File

@ -423,7 +423,7 @@ void DatabaseOnDisk::iterateMetadataFiles(const Context & context, const Iterati
}
}
ASTPtr DatabaseOnDisk::parseQueryFromMetadata(Poco::Logger * loger, const Context & context, const String & metadata_file_path, bool throw_on_error /*= true*/, bool remove_empty /*= false*/)
ASTPtr DatabaseOnDisk::parseQueryFromMetadata(Poco::Logger * logger, const Context & context, const String & metadata_file_path, bool throw_on_error /*= true*/, bool remove_empty /*= false*/)
{
String query;
@ -445,7 +445,7 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(Poco::Logger * loger, const Contex
*/
if (remove_empty && query.empty())
{
LOG_ERROR(loger, "File {} is empty. Removing.", metadata_file_path);
LOG_ERROR(logger, "File {} is empty. Removing.", metadata_file_path);
Poco::File(metadata_file_path).remove();
return nullptr;
}
@ -469,7 +469,7 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(Poco::Logger * loger, const Contex
table_name = unescapeForFileName(table_name);
if (create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER)
LOG_WARNING(loger, "File {} contains both UUID and table name. Will use name `{}` instead of `{}`", metadata_file_path, table_name, create.table);
LOG_WARNING(logger, "File {} contains both UUID and table name. Will use name `{}` instead of `{}`", metadata_file_path, table_name, create.table);
create.table = table_name;
}

View File

@ -73,7 +73,7 @@ namespace DB
ErrorCodes::INVALID_CONFIG_PARAMETER};
if (dict_struct.key->size() != 2)
throw Exception{"Redis source with storage type \'hash_map\' requiers 2 keys",
throw Exception{"Redis source with storage type \'hash_map\' requires 2 keys",
ErrorCodes::INVALID_CONFIG_PARAMETER};
// suppose key[0] is primary key, key[1] is secondary key
}

View File

@ -7,7 +7,7 @@ VolumePtr createVolumeFromReservation(const ReservationPtr & reservation, Volume
{
if (other_volume->getType() == VolumeType::JBOD || other_volume->getType() == VolumeType::SINGLE_DISK)
{
/// Since reservation on JBOD chosies one of disks and makes reservation there, volume
/// Since reservation on JBOD choices one of disks and makes reservation there, volume
/// for such type of reservation will be with one disk.
return std::make_shared<SingleDiskVolume>(other_volume->getName(), reservation->getDisk());
}

View File

@ -1084,7 +1084,7 @@ public:
bool both_represented_by_number = arguments[0]->isValueRepresentedByNumber() && arguments[1]->isValueRepresentedByNumber();
bool has_date = left.isDate() || right.isDate();
if (!((both_represented_by_number && !has_date) /// Do not allow compare date and number.
if (!((both_represented_by_number && !has_date) /// Do not allow to compare date and number.
|| (left.isStringOrFixedString() || right.isStringOrFixedString()) /// Everything can be compared with string by conversion.
/// You can compare the date, datetime, or datatime64 and an enumeration with a constant string.
|| (left.isDateOrDateTime() && right.isDateOrDateTime() && left.idx == right.idx) /// only date vs date, or datetime vs datetime

View File

@ -931,7 +931,7 @@ public:
// toUnixTimestamp(value[, timezone : String])
|| std::is_same_v<Name, NameToUnixTimestamp>
// toDate(value[, timezone : String])
|| std::is_same_v<ToDataType, DataTypeDate> // TODO: shall we allow timestamp argument for toDate? DateTime knows nothing about timezones and this arument is ignored below.
|| std::is_same_v<ToDataType, DataTypeDate> // TODO: shall we allow timestamp argument for toDate? DateTime knows nothing about timezones and this argument is ignored below.
// toDateTime(value[, timezone: String])
|| std::is_same_v<ToDataType, DataTypeDateTime>
// toDateTime64(value, scale : Integer[, timezone: String])

View File

@ -349,7 +349,7 @@ struct MurmurHash3Impl128
/// http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452
/// Care should be taken to do all calculation in unsigned integers (to avoid undefined behaviour on overflow)
/// but obtain the same result as it is done in singed integers with two's complement arithmetic.
/// but obtain the same result as it is done in signed integers with two's complement arithmetic.
struct JavaHashImpl
{
static constexpr auto name = "javaHash";

View File

@ -138,7 +138,7 @@ struct NgramDistanceImpl
/// This is not a really true case insensitive utf8. We zero the 5-th bit of every byte.
/// And first bit of first byte if there are two bytes.
/// For ASCII it works https://catonmat.net/ascii-case-conversion-trick. For most cyrrilic letters also does.
/// For ASCII it works https://catonmat.net/ascii-case-conversion-trick. For most cyrillic letters also does.
/// For others, we don't care now. Lowering UTF is not a cheap operation.
if constexpr (case_insensitive)
{

View File

@ -177,7 +177,7 @@ namespace detail
* /// There could be as many implementation for every target as you want.
* selector.registerImplementation<TargetArch::Default, MyDefaultImpl>();
* #if USE_MULTITARGET_CODE
* selector.registreImplementation<TargetArch::AVX2, TargetSpecific::AVX2::MyAVX2Impl>();
* selector.registerImplementation<TargetArch::AVX2, TargetSpecific::AVX2::MyAVX2Impl>();
* #endif
* }
*

View File

@ -143,7 +143,7 @@ namespace MultiRegexps
patterns.push_back(ref.data);
/* Flags below are the pattern matching flags.
* HS_FLAG_DOTALL is a compile flag where matching a . will not exclude newlines. This is a good
* performance practice accrording to Hyperscan API. https://intel.github.io/hyperscan/dev-reference/performance.html#dot-all-mode
* performance practice according to Hyperscan API. https://intel.github.io/hyperscan/dev-reference/performance.html#dot-all-mode
* HS_FLAG_ALLOWEMPTY is a compile flag where empty strings are allowed to match.
* HS_FLAG_UTF8 is a flag where UTF8 literals are matched.
* HS_FLAG_SINGLEMATCH is a compile flag where each pattern match will be returned only once. it is a good performance practice

View File

@ -85,7 +85,7 @@ public:
/// We do not sleep if the block is empty.
if (size > 0)
{
/// When sleeping, the query cannot be cancelled. For abitily to cancel query, we limit sleep time.
/// When sleeping, the query cannot be cancelled. For ability to cancel query, we limit sleep time.
if (seconds > 3.0) /// The choice is arbitrary
throw Exception("The maximum sleep time is 3 seconds. Requested: " + toString(seconds), ErrorCodes::TOO_SLOW);

View File

@ -1935,7 +1935,7 @@ void Context::reloadConfig() const
{
/// Use mutex if callback may be changed after startup.
if (!shared->config_reload_callback)
throw Exception("Can't reload config beacuse config_reload_callback is not set.", ErrorCodes::LOGICAL_ERROR);
throw Exception("Can't reload config because config_reload_callback is not set.", ErrorCodes::LOGICAL_ERROR);
shared->config_reload_callback();
}

View File

@ -151,7 +151,7 @@ struct ColumnAliasesMatcher
void replaceIdentifiersWithAliases()
{
String hide_prefix = "--"; /// @note restriction: user should not use alises like `--table.column`
String hide_prefix = "--"; /// @note restriction: user should not use aliases like `--table.column`
for (auto & [identifier, is_public] : compound_identifiers)
{

View File

@ -11,7 +11,7 @@ class ASTSelectQuery;
class Context;
/// AST transformer. It replaces multiple joins to (subselect + join) track.
/// 'select * from t1 join t2 on ... join t3 on ... join t4 on ...' would be rewriten with
/// 'select * from t1 join t2 on ... join t3 on ... join t4 on ...' would be rewritten with
/// 'select * from (select * from t1 join t2 on ...) join t3 on ...) join t4 on ...'
class JoinToSubqueryTransformMatcher
{

View File

@ -22,7 +22,7 @@ using StorageMetadataPtr = std::shared_ptr<const StorageInMemoryMetadata>;
class JoinedTables
{
public:
JoinedTables(Context && contex, const ASTSelectQuery & select_query);
JoinedTables(Context && context, const ASTSelectQuery & select_query);
void reset(const ASTSelectQuery & select_query)
{

View File

@ -17,7 +17,7 @@ void ASTLiteral::updateTreeHashImpl(SipHash & hash_state) const
}
/// Writes 'tuple' word before tuple literals for backward compatibility reasons.
/// TODO: remove, when versions lower than 20.3 will be rearely used.
/// TODO: remove, when versions lower than 20.3 will be rarely used.
class FieldVisitorToColumnName : public StaticVisitor<String>
{
public:

View File

@ -10,7 +10,7 @@ namespace DB
{
/// If output stream set dumps node with indents and some additional info. Do nothing otherwise.
/// Allow to print kay-value pairs inside of tree dump.
/// Allow to print key-value pairs inside of tree dump.
class DumpASTNode
{
public:

View File

@ -1097,7 +1097,7 @@ const char * ParserAlias::restricted_keywords[] =
"ASOF",
"SEMI",
"ANTI",
"ONLY", /// YQL synonym for ANTI
"ONLY", /// YQL synonim for ANTI. Note: YQL is the name of one of Yandex proprietary languages, completely unrelated to ClickHouse.
"ON",
"USING",
"PREWHERE",

View File

@ -35,7 +35,7 @@ protected:
private:
std::unique_ptr<QueryPipeline> pipeline;
/// One of executors is used.
std::unique_ptr<PullingPipelineExecutor> executor; /// for singe thread.
std::unique_ptr<PullingPipelineExecutor> executor; /// for single thread.
std::unique_ptr<PullingAsyncPipelineExecutor> async_executor; /// for many threads.
bool is_execution_started = false;

View File

@ -51,7 +51,7 @@ public:
total_merged_rows += num_rows;
merged_rows = num_rows;
/// We don't cate about granularity here. Because, for fast-forward optimization, chunk will be moved as-is.
/// We don't care about granularity here. Because, for fast-forward optimization, chunk will be moved as-is.
/// sum_blocks_granularity += block_size * num_rows;
}

View File

@ -89,7 +89,7 @@ IMergingAlgorithm::Status VersionedCollapsingAlgorithm::merge()
num_rows_to_insert = 1;
}
/// Insert ready roes if any.
/// Insert ready rows if any.
while (num_rows_to_insert)
{
const auto & row = current_keys.front();

View File

@ -91,7 +91,7 @@ void QueryPlan::addStep(QueryPlanStepPtr step)
{
if (isInitialized())
throw Exception("Cannot add step " + step->getName() + " to QueryPlan because "
"step has no inputs, but QueryPlan is already initialised", ErrorCodes::LOGICAL_ERROR);
"step has no inputs, but QueryPlan is already initialized", ErrorCodes::LOGICAL_ERROR);
nodes.emplace_back(Node{.step = std::move(step)});
root = &nodes.back();
@ -102,7 +102,7 @@ void QueryPlan::addStep(QueryPlanStepPtr step)
{
if (!isInitialized())
throw Exception("Cannot add step " + step->getName() + " to QueryPlan because "
"step has input, but QueryPlan is not initialised", ErrorCodes::LOGICAL_ERROR);
"step has input, but QueryPlan is not initialized", ErrorCodes::LOGICAL_ERROR);
const auto & root_header = root->step->getOutputStream().header;
const auto & step_header = step->getInputStreams().front().header;

View File

@ -852,7 +852,7 @@ protected:
private:
/// RAII Wrapper for atomic work with currently moving parts
/// Acuire them in constructor and remove them in destructor
/// Acquire them in constructor and remove them in destructor
/// Uses data.currently_moving_parts_mutex
struct CurrentlyMovingPartsTagger
{

View File

@ -55,7 +55,7 @@ public:
/// Replaces cloned part from detached directory into active data parts set.
/// Replacing part changes state to DeleteOnDestroy and will be removed from disk after destructor of
///IMergeTreeDataPart called. If replacing part doesn't exists or not active (commited) than
/// cloned part will be removed and loge message will be reported. It may happen in case of concurrent
/// cloned part will be removed and log message will be reported. It may happen in case of concurrent
/// merge or mutation.
void swapClonedPart(const std::shared_ptr<const IMergeTreeDataPart> & cloned_parts) const;

View File

@ -35,7 +35,7 @@ struct ReplicatedMergeTreeMutationEntry
/// Replica which initiated mutation
String source_replica;
/// Accuired numbers of blocks
/// Accured numbers of blocks
/// partition_id -> block_number
std::map<String, Int64> block_numbers;

View File

@ -80,7 +80,7 @@ struct ReplicatedMergeTreeQuorumAddedParts
return parts_in_quorum;
}
/// Read blocks when node in ZooKeeper suppors multiple partitions.
/// Read blocks when node in ZooKeeper supports multiple partitions.
PartitionIdToPartName readV2(ReadBuffer & in)
{
assertString("parts count: ", in);

View File

@ -3726,7 +3726,7 @@ void StorageReplicatedMergeTree::alter(
DatabaseCatalog::instance().getDatabase(table_id.database_name)->alterTable(query_context, table_id, metadata_copy);
}
/// We can be sure, that in case of successfull commit in zookeeper our
/// We can be sure, that in case of successful commit in zookeeper our
/// version will increments by 1. Because we update with version check.
int new_metadata_version = metadata_version + 1;