diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 7945834be50..97301fb1bee 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -404,7 +404,7 @@ public: a date at start of january) In this case one can get 53 for the first week of next year. This flag ensures that the week is relevant for the given year. Note that this flag is only - releveant if WeekModeFlag::JANUARY is not set. + relevant if WeekModeFlag::JANUARY is not set. If set Week is in range 1-53. diff --git a/base/common/sleep.cpp b/base/common/sleep.cpp index 85bbc8edfcc..4974d65a39d 100644 --- a/base/common/sleep.cpp +++ b/base/common/sleep.cpp @@ -26,7 +26,7 @@ void sleepForNanoseconds(uint64_t nanoseconds) if (timebase_info.denom == 0) mach_timebase_info(&timebase_info); - uint64_t time_to_wait = nanoseconds * timebase_info.denom / timebase_info.numer; + uint64_t time_to_wait = nanoseconds * timebase_info.denom / timebase_info.number; uint64_t now = mach_absolute_time(); while (mach_wait_until(now + time_to_wait) != KERN_SUCCESS); diff --git a/base/daemon/BaseDaemon.h b/base/daemon/BaseDaemon.h index 41d4ad58869..08bbfa291c4 100644 --- a/base/daemon/BaseDaemon.h +++ b/base/daemon/BaseDaemon.h @@ -29,7 +29,7 @@ namespace Poco { class TaskManager; } -/// \brief Base class for applications that can run as deamons. +/// \brief Base class for applications that can run as daemons. /// /// \code /// # Some possible command line options: diff --git a/base/ext/chrono_io.h b/base/ext/chrono_io.h index 0b1c47d3874..967b7c5e475 100644 --- a/base/ext/chrono_io.h +++ b/base/ext/chrono_io.h @@ -26,12 +26,12 @@ namespace ext } template > - std::string to_string(const std::chrono::duration & dur) + std::string to_string(const std::chrono::duration & duration) { - auto seconds_as_int = std::chrono::duration_cast(dur); - if (seconds_as_int == dur) + auto seconds_as_int = std::chrono::duration_cast(duration); + if (seconds_as_int == duration) return std::to_string(seconds_as_int.count()) + "s"; - auto seconds_as_double = std::chrono::duration_cast>(dur); + auto seconds_as_double = std::chrono::duration_cast>(duration); return std::to_string(seconds_as_double.count()) + "s"; } @@ -42,8 +42,8 @@ namespace ext } template > - std::ostream & operator<<(std::ostream & o, const std::chrono::duration & dur) + std::ostream & operator<<(std::ostream & o, const std::chrono::duration & duration) { - return o << to_string(dur); + return o << to_string(duration); } } diff --git a/base/glibc-compatibility/musl/lgamma.c b/base/glibc-compatibility/musl/lgamma.c index b0e4f3aa537..fbbe1fedc92 100644 --- a/base/glibc-compatibility/musl/lgamma.c +++ b/base/glibc-compatibility/musl/lgamma.c @@ -24,7 +24,7 @@ * = log(6.3*5.3) + lgamma(5.3) * = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3) * 2. Polynomial approximation of lgamma around its - * minimun ymin=1.461632144968362245 to maintain monotonicity. + * minimum ymin=1.461632144968362245 to maintain monotonicity. * On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use * Let z = x-ymin; * lgamma(x) = -1.214862905358496078218 + z^2*poly(z) diff --git a/base/loggers/Loggers.h b/base/loggers/Loggers.h index 0095516a738..264bf23a1f7 100644 --- a/base/loggers/Loggers.h +++ b/base/loggers/Loggers.h @@ -21,7 +21,7 @@ public: std::optional getLayer() const { - return layer; /// layer setted in inheritor class BaseDaemonApplication. + return layer; /// layer set in inheritor class BaseDaemonApplication. } void setTextLog(std::shared_ptr log, int max_priority); diff --git a/base/mysqlxx/mysqlxx.h b/base/mysqlxx/mysqlxx.h index 179d550519e..d64dadfc367 100644 --- a/base/mysqlxx/mysqlxx.h +++ b/base/mysqlxx/mysqlxx.h @@ -53,8 +53,6 @@ * These assumptions are specific for Yandex.Metrica. Your mileage may vary. * * mysqlxx could not be considered as separate full-featured library, - * because it is developed from the principle - "everything that we don't need is not implemented", - * and also the library depends on some other libraries from Yandex.Metrica code. - * (dependencied could be easily removed if necessary). + * because it is developed from the principle - "everything that we don't need is not implemented". * It is assumed that the user will add all missing functionality that is needed. */ diff --git a/base/pcg-random/pcg_extras.hpp b/base/pcg-random/pcg_extras.hpp index 118b726dd57..b71e859a25f 100644 --- a/base/pcg-random/pcg_extras.hpp +++ b/base/pcg-random/pcg_extras.hpp @@ -110,7 +110,7 @@ namespace pcg_extras { /* * C++ requires us to be able to serialize RNG state by printing or reading * it from a stream. Because we use 128-bit ints, we also need to be able - * ot print them, so here is code to do so. + * or print them, so here is code to do so. * * This code provides enough functionality to print 128-bit ints in decimal * and zero-padded in hex. It's not a full-featured implementation. @@ -253,7 +253,7 @@ inline std::istream& operator>>(std::istream& in, uint8_t& value) */ /* - * XorShifts are invertable, but they are someting of a pain to invert. + * XorShifts are invertable, but they are something of a pain to invert. * This function backs them out. It's used by the whacky "inside out" * generator defined later. */ diff --git a/base/pcg-random/pcg_random.hpp b/base/pcg-random/pcg_random.hpp index ea441013d18..d96d5895b31 100644 --- a/base/pcg-random/pcg_random.hpp +++ b/base/pcg-random/pcg_random.hpp @@ -174,7 +174,7 @@ PCG_DEFINE_CONSTANT(pcg128_t, default, increment, * period * specific stream - the constant can be changed at any time, selecting * a different random sequence - * unique stream - the constant is based on the memory addresss of the + * unique stream - the constant is based on the memory address of the * object, thus every RNG has its own unique sequence * * This variation is provided though mixin classes which define a function @@ -352,7 +352,7 @@ protected: * (reducing register pressure). * * Given the high level of parameterization, the code has to use some - * template-metaprogramming tricks to handle some of the suble variations + * template-metaprogramming tricks to handle some of the subtle variations * involved. */ diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 440d080637f..0ffe1552f15 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -789,7 +789,7 @@ private: // in particular, it can't distinguish the end of partial input buffer // and the final end of input file. This means we have to try to split // the input into separate queries here. Two patterns of input are - // especially interesing: + // especially interesting: // 1) multiline query: // select 1 // from system.numbers; diff --git a/programs/client/QueryFuzzer.h b/programs/client/QueryFuzzer.h index db1102a94d8..0c7cec8dc84 100644 --- a/programs/client/QueryFuzzer.h +++ b/programs/client/QueryFuzzer.h @@ -40,7 +40,7 @@ struct QueryFuzzer // ASTPtr to point to new AST with some random changes. void fuzzMain(ASTPtr & ast); - // Variuos helper functions follow, normally you shouldn't have to call them. + // Various helper functions follow, normally you shouldn't have to call them. Field getRandomField(int type); Field fuzzField(Field field); ASTPtr getRandomColumnLike(); diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index d5544703aa2..b3d1ca7bcec 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -328,7 +328,7 @@ void ClusterCopier::process(const ConnectionTimeouts & timeouts) /* * Creates task worker node and checks maximum number of workers not to exceed the limit. - * To achive this we have to check version of workers_version_path node and create current_worker_path + * To achieve this we have to check version of workers_version_path node and create current_worker_path * node atomically. * */ @@ -529,7 +529,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t inject_fault = value < move_fault_probability; } - LOG_DEBUG(log, "Try to move {} to destionation table", partition_name); + LOG_DEBUG(log, "Try to move {} to destination table", partition_name); auto zookeeper = context.getZooKeeper(); @@ -1001,7 +1001,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab } catch (...) { - tryLogCurrentException(log, "Some error occured while moving pieces to destination table for partition " + partition_name); + tryLogCurrentException(log, "Some error occurred while moving pieces to destination table for partition " + partition_name); } } } @@ -1649,7 +1649,7 @@ void ClusterCopier::createShardInternalTables(const ConnectionTimeouts & timeout dropAndCreateLocalTable(create_table_split_piece_ast); - /// Create auxilary split tables for each piece + /// Create auxiliary split tables for each piece for (const auto & piece_number : ext::range(0, task_table.number_of_splits)) { const auto & storage_piece_split_ast = task_table.auxiliary_engine_split_asts[piece_number]; diff --git a/programs/copier/ClusterCopierApp.h b/programs/copier/ClusterCopierApp.h index 173aacc4361..257b10cf196 100644 --- a/programs/copier/ClusterCopierApp.h +++ b/programs/copier/ClusterCopierApp.h @@ -13,7 +13,7 @@ * Implementation details: * * cluster-copier workers pull each partition of each shard of the source cluster and push it to the destination cluster through - * Distributed table (to preform data resharding). So, worker job is a partition of a source shard. + * Distributed table (to perform data resharding). So, worker job is a partition of a source shard. * A job has three states: Active, Finished and Abandoned. Abandoned means that worker died and did not finish the job. * * If an error occurred during the copying (a worker failed or a worker did not finish the INSERT), then the whole partition (on diff --git a/programs/copier/TaskTableAndShard.h b/programs/copier/TaskTableAndShard.h index 0ac533d9209..11ceffd12cd 100644 --- a/programs/copier/TaskTableAndShard.h +++ b/programs/copier/TaskTableAndShard.h @@ -50,7 +50,7 @@ struct TaskTable bool isReplicatedTable() const { return engine_push_zk_path != ""; } - /// Partitions will be splitted into number-of-splits pieces. + /// Partitions will be split into number-of-splits pieces. /// Each piece will be copied independently. (10 by default) size_t number_of_splits; @@ -91,8 +91,8 @@ struct TaskTable ASTPtr main_engine_split_ast; /* - * To copy partiton piece form one cluster to another we have to use Distributed table. - * In case of usage separate table (engine_push) for each partiton piece, + * To copy partition piece form one cluster to another we have to use Distributed table. + * In case of usage separate table (engine_push) for each partition piece, * we have to use many Distributed tables. * */ ASTs auxiliary_engine_split_asts; @@ -113,7 +113,7 @@ struct TaskTable /** * Prioritized list of shards * all_shards contains information about all shards in the table. - * So we have to check whether particular shard have current partiton or not while processing. + * So we have to check whether particular shard have current partition or not while processing. */ TasksShard all_shards; TasksShard local_shards; @@ -122,7 +122,7 @@ struct TaskTable ClusterPartitions cluster_partitions; NameSet finished_cluster_partitions; - /// Parition names to process in user-specified order + /// Partition names to process in user-specified order Strings ordered_partition_names; ClusterPartition & getClusterPartition(const String & partition_name) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index e3a5306a6f1..ca348382281 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -114,7 +114,7 @@ void LocalServer::tryInitPath() if (path.empty()) { throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Cannot work with emtpy storage path that is explicitly specified" + "Cannot work with empty storage path that is explicitly specified" " by the --path option. Please check the program options and" " correct the --path."); } diff --git a/programs/server/config.xml b/programs/server/config.xml index e494a539a98..506e7d0b006 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -1,7 +1,7 @@ @@ -103,7 +103,7 @@ 9009 - + @@ -455,7 +455,7 @@ --> - + + N/A diff --git a/utils/release/push_packages b/utils/release/push_packages index 68d72bb39fe..43e75a723da 100755 --- a/utils/release/push_packages +++ b/utils/release/push_packages @@ -188,7 +188,7 @@ def _fix_args(args): if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') - parser = argparse.ArgumentParser(description="Programm to push clickhouse packages to repository") + parser = argparse.ArgumentParser(description="Program to push clickhouse packages to repository") parser.add_argument('--deb-directory') parser.add_argument('--rpm-directory') parser.add_argument('--tgz-directory') diff --git a/utils/s3tools/s3uploader b/utils/s3tools/s3uploader index 66322be623f..91fb60ed962 100755 --- a/utils/s3tools/s3uploader +++ b/utils/s3tools/s3uploader @@ -90,7 +90,7 @@ if __name__ == "__main__": help='Path to clickhouse database on filesystem') parser.add_argument('--s3-path', help='Path in s3, where to upload file') parser.add_argument('--tmp-prefix', default='/tmp', - help='Prefix to store temporay downloaded file') + help='Prefix to store temporary downloaded file') data_group = parser.add_mutually_exclusive_group(required=True) table_name_argument = data_group.add_argument('--table-name', help='Name of table with database, if you are uploading partitions') diff --git a/utils/test_history/README.md b/utils/test_history/README.md index 1de9bf0a4ab..2d4af866012 100644 --- a/utils/test_history/README.md +++ b/utils/test_history/README.md @@ -10,11 +10,11 @@ $ test-history --token XXX --since '2020-01-22 00:00:00' --substr Performance' +---------------------|---------|--------------------+ | Date | SHA | Performance test | +=====================+=========+====================+ -| 2020-01-22 12:54:59 | 47ffa40 | succes | +| 2020-01-22 12:54:59 | 47ffa40 | success | +---------------------|---------|--------------------+ | 2020-01-22 13:06:16 | 0d484be | failure | +---------------------|---------|--------------------+ -| 2020-01-22 14:18:34 | 289f169 | succes | +| 2020-01-22 14:18:34 | 289f169 | success | +---------------------|---------|--------------------+ | 2020-01-22 14:27:27 | e357c6f | not run | +---------------------|---------|--------------------+ @@ -26,11 +26,11 @@ $ test-history --token XXX --since '2020-01-22 00:00:00' --substr Performance' +---------------------|---------|--------------------+ | 2020-01-22 16:58:36 | d68f8d1 | pending | +---------------------|---------|--------------------+ -| 2020-01-22 17:59:43 | ba7ab32 | succes | +| 2020-01-22 17:59:43 | ba7ab32 | success | +---------------------|---------|--------------------+ | 2020-01-22 18:32:38 | eadb902 | failure | +---------------------|---------|--------------------+ -| 2020-01-22 19:11:34 | 8f241ea | succes | +| 2020-01-22 19:11:34 | 8f241ea | success | +---------------------|---------|--------------------+ | 2020-01-22 19:56:49 | f0b7422 | failure | +---------------------|---------|--------------------+ @@ -40,11 +40,11 @@ $ test-history --token XXX --since '2020-01-22 00:00:00' --substr Performance' +---------------------|---------|--------------------+ | 2020-01-22 23:09:23 | 8cfe9a4 | failure | +---------------------|---------|--------------------+ -| 2020-01-23 00:10:33 | a02b59f | succes | +| 2020-01-23 00:10:33 | a02b59f | success | +---------------------|---------|--------------------+ | 2020-01-23 05:56:11 | 48b3f33 | failure | +---------------------|---------|--------------------+ -| 2020-01-23 05:56:54 | d807088 | succes | +| 2020-01-23 05:56:54 | d807088 | success | +---------------------|---------|--------------------+ | 2020-01-23 06:01:48 | 2e84949 | failure | +---------------------|---------|--------------------+ diff --git a/utils/test_history/test-history b/utils/test_history/test-history index 783f25ff822..dca62625c9f 100755 --- a/utils/test_history/test-history +++ b/utils/test_history/test-history @@ -11,7 +11,7 @@ from termcolor import colored import sys COLORMAP = { - "success": colored("succes", 'green'), + "success": colored("success", 'green'), "failure": colored("failure", 'red'), "error": colored("error", 'red'), "pending": colored("pending", 'yellow'),