mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-03 13:02:00 +00:00
Fix typos, the last 1%
This commit is contained in:
parent
12f66fa82c
commit
0cbbe153cd
@ -404,7 +404,7 @@ public:
|
||||
a date at start of january) In this case one can get 53 for the
|
||||
first week of next year. This flag ensures that the week is
|
||||
relevant for the given year. Note that this flag is only
|
||||
releveant if WeekModeFlag::JANUARY is not set.
|
||||
relevant if WeekModeFlag::JANUARY is not set.
|
||||
|
||||
If set Week is in range 1-53.
|
||||
|
||||
|
@ -26,7 +26,7 @@ void sleepForNanoseconds(uint64_t nanoseconds)
|
||||
if (timebase_info.denom == 0)
|
||||
mach_timebase_info(&timebase_info);
|
||||
|
||||
uint64_t time_to_wait = nanoseconds * timebase_info.denom / timebase_info.numer;
|
||||
uint64_t time_to_wait = nanoseconds * timebase_info.denom / timebase_info.number;
|
||||
uint64_t now = mach_absolute_time();
|
||||
|
||||
while (mach_wait_until(now + time_to_wait) != KERN_SUCCESS);
|
||||
|
@ -29,7 +29,7 @@
|
||||
namespace Poco { class TaskManager; }
|
||||
|
||||
|
||||
/// \brief Base class for applications that can run as deamons.
|
||||
/// \brief Base class for applications that can run as daemons.
|
||||
///
|
||||
/// \code
|
||||
/// # Some possible command line options:
|
||||
|
@ -26,12 +26,12 @@ namespace ext
|
||||
}
|
||||
|
||||
template <typename Rep, typename Period = std::ratio<1>>
|
||||
std::string to_string(const std::chrono::duration<Rep, Period> & dur)
|
||||
std::string to_string(const std::chrono::duration<Rep, Period> & duration)
|
||||
{
|
||||
auto seconds_as_int = std::chrono::duration_cast<std::chrono::seconds>(dur);
|
||||
if (seconds_as_int == dur)
|
||||
auto seconds_as_int = std::chrono::duration_cast<std::chrono::seconds>(duration);
|
||||
if (seconds_as_int == duration)
|
||||
return std::to_string(seconds_as_int.count()) + "s";
|
||||
auto seconds_as_double = std::chrono::duration_cast<std::chrono::duration<double>>(dur);
|
||||
auto seconds_as_double = std::chrono::duration_cast<std::chrono::duration<double>>(duration);
|
||||
return std::to_string(seconds_as_double.count()) + "s";
|
||||
}
|
||||
|
||||
@ -42,8 +42,8 @@ namespace ext
|
||||
}
|
||||
|
||||
template <typename Rep, typename Period = std::ratio<1>>
|
||||
std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Period> & dur)
|
||||
std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Period> & duration)
|
||||
{
|
||||
return o << to_string(dur);
|
||||
return o << to_string(duration);
|
||||
}
|
||||
}
|
||||
|
@ -24,7 +24,7 @@
|
||||
* = log(6.3*5.3) + lgamma(5.3)
|
||||
* = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3)
|
||||
* 2. Polynomial approximation of lgamma around its
|
||||
* minimun ymin=1.461632144968362245 to maintain monotonicity.
|
||||
* minimum ymin=1.461632144968362245 to maintain monotonicity.
|
||||
* On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use
|
||||
* Let z = x-ymin;
|
||||
* lgamma(x) = -1.214862905358496078218 + z^2*poly(z)
|
||||
|
@ -21,7 +21,7 @@ public:
|
||||
|
||||
std::optional<size_t> getLayer() const
|
||||
{
|
||||
return layer; /// layer setted in inheritor class BaseDaemonApplication.
|
||||
return layer; /// layer set in inheritor class BaseDaemonApplication.
|
||||
}
|
||||
|
||||
void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
|
||||
|
@ -53,8 +53,6 @@
|
||||
* These assumptions are specific for Yandex.Metrica. Your mileage may vary.
|
||||
*
|
||||
* mysqlxx could not be considered as separate full-featured library,
|
||||
* because it is developed from the principle - "everything that we don't need is not implemented",
|
||||
* and also the library depends on some other libraries from Yandex.Metrica code.
|
||||
* (dependencied could be easily removed if necessary).
|
||||
* because it is developed from the principle - "everything that we don't need is not implemented".
|
||||
* It is assumed that the user will add all missing functionality that is needed.
|
||||
*/
|
||||
|
@ -110,7 +110,7 @@ namespace pcg_extras {
|
||||
/*
|
||||
* C++ requires us to be able to serialize RNG state by printing or reading
|
||||
* it from a stream. Because we use 128-bit ints, we also need to be able
|
||||
* ot print them, so here is code to do so.
|
||||
* or print them, so here is code to do so.
|
||||
*
|
||||
* This code provides enough functionality to print 128-bit ints in decimal
|
||||
* and zero-padded in hex. It's not a full-featured implementation.
|
||||
@ -253,7 +253,7 @@ inline std::istream& operator>>(std::istream& in, uint8_t& value)
|
||||
*/
|
||||
|
||||
/*
|
||||
* XorShifts are invertable, but they are someting of a pain to invert.
|
||||
* XorShifts are invertable, but they are something of a pain to invert.
|
||||
* This function backs them out. It's used by the whacky "inside out"
|
||||
* generator defined later.
|
||||
*/
|
||||
|
@ -174,7 +174,7 @@ PCG_DEFINE_CONSTANT(pcg128_t, default, increment,
|
||||
* period
|
||||
* specific stream - the constant can be changed at any time, selecting
|
||||
* a different random sequence
|
||||
* unique stream - the constant is based on the memory addresss of the
|
||||
* unique stream - the constant is based on the memory address of the
|
||||
* object, thus every RNG has its own unique sequence
|
||||
*
|
||||
* This variation is provided though mixin classes which define a function
|
||||
@ -352,7 +352,7 @@ protected:
|
||||
* (reducing register pressure).
|
||||
*
|
||||
* Given the high level of parameterization, the code has to use some
|
||||
* template-metaprogramming tricks to handle some of the suble variations
|
||||
* template-metaprogramming tricks to handle some of the subtle variations
|
||||
* involved.
|
||||
*/
|
||||
|
||||
|
@ -789,7 +789,7 @@ private:
|
||||
// in particular, it can't distinguish the end of partial input buffer
|
||||
// and the final end of input file. This means we have to try to split
|
||||
// the input into separate queries here. Two patterns of input are
|
||||
// especially interesing:
|
||||
// especially interesting:
|
||||
// 1) multiline query:
|
||||
// select 1
|
||||
// from system.numbers;
|
||||
|
@ -40,7 +40,7 @@ struct QueryFuzzer
|
||||
// ASTPtr to point to new AST with some random changes.
|
||||
void fuzzMain(ASTPtr & ast);
|
||||
|
||||
// Variuos helper functions follow, normally you shouldn't have to call them.
|
||||
// Various helper functions follow, normally you shouldn't have to call them.
|
||||
Field getRandomField(int type);
|
||||
Field fuzzField(Field field);
|
||||
ASTPtr getRandomColumnLike();
|
||||
|
@ -328,7 +328,7 @@ void ClusterCopier::process(const ConnectionTimeouts & timeouts)
|
||||
|
||||
/*
|
||||
* Creates task worker node and checks maximum number of workers not to exceed the limit.
|
||||
* To achive this we have to check version of workers_version_path node and create current_worker_path
|
||||
* To achieve this we have to check version of workers_version_path node and create current_worker_path
|
||||
* node atomically.
|
||||
* */
|
||||
|
||||
@ -529,7 +529,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
|
||||
inject_fault = value < move_fault_probability;
|
||||
}
|
||||
|
||||
LOG_DEBUG(log, "Try to move {} to destionation table", partition_name);
|
||||
LOG_DEBUG(log, "Try to move {} to destination table", partition_name);
|
||||
|
||||
auto zookeeper = context.getZooKeeper();
|
||||
|
||||
@ -1001,7 +1001,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Some error occured while moving pieces to destination table for partition " + partition_name);
|
||||
tryLogCurrentException(log, "Some error occurred while moving pieces to destination table for partition " + partition_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1649,7 +1649,7 @@ void ClusterCopier::createShardInternalTables(const ConnectionTimeouts & timeout
|
||||
|
||||
dropAndCreateLocalTable(create_table_split_piece_ast);
|
||||
|
||||
/// Create auxilary split tables for each piece
|
||||
/// Create auxiliary split tables for each piece
|
||||
for (const auto & piece_number : ext::range(0, task_table.number_of_splits))
|
||||
{
|
||||
const auto & storage_piece_split_ast = task_table.auxiliary_engine_split_asts[piece_number];
|
||||
|
@ -13,7 +13,7 @@
|
||||
* Implementation details:
|
||||
*
|
||||
* cluster-copier workers pull each partition of each shard of the source cluster and push it to the destination cluster through
|
||||
* Distributed table (to preform data resharding). So, worker job is a partition of a source shard.
|
||||
* Distributed table (to perform data resharding). So, worker job is a partition of a source shard.
|
||||
* A job has three states: Active, Finished and Abandoned. Abandoned means that worker died and did not finish the job.
|
||||
*
|
||||
* If an error occurred during the copying (a worker failed or a worker did not finish the INSERT), then the whole partition (on
|
||||
|
@ -50,7 +50,7 @@ struct TaskTable
|
||||
|
||||
bool isReplicatedTable() const { return engine_push_zk_path != ""; }
|
||||
|
||||
/// Partitions will be splitted into number-of-splits pieces.
|
||||
/// Partitions will be split into number-of-splits pieces.
|
||||
/// Each piece will be copied independently. (10 by default)
|
||||
size_t number_of_splits;
|
||||
|
||||
@ -91,8 +91,8 @@ struct TaskTable
|
||||
ASTPtr main_engine_split_ast;
|
||||
|
||||
/*
|
||||
* To copy partiton piece form one cluster to another we have to use Distributed table.
|
||||
* In case of usage separate table (engine_push) for each partiton piece,
|
||||
* To copy partition piece form one cluster to another we have to use Distributed table.
|
||||
* In case of usage separate table (engine_push) for each partition piece,
|
||||
* we have to use many Distributed tables.
|
||||
* */
|
||||
ASTs auxiliary_engine_split_asts;
|
||||
@ -113,7 +113,7 @@ struct TaskTable
|
||||
/**
|
||||
* Prioritized list of shards
|
||||
* all_shards contains information about all shards in the table.
|
||||
* So we have to check whether particular shard have current partiton or not while processing.
|
||||
* So we have to check whether particular shard have current partition or not while processing.
|
||||
*/
|
||||
TasksShard all_shards;
|
||||
TasksShard local_shards;
|
||||
@ -122,7 +122,7 @@ struct TaskTable
|
||||
ClusterPartitions cluster_partitions;
|
||||
NameSet finished_cluster_partitions;
|
||||
|
||||
/// Parition names to process in user-specified order
|
||||
/// Partition names to process in user-specified order
|
||||
Strings ordered_partition_names;
|
||||
|
||||
ClusterPartition & getClusterPartition(const String & partition_name)
|
||||
|
@ -114,7 +114,7 @@ void LocalServer::tryInitPath()
|
||||
if (path.empty())
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Cannot work with emtpy storage path that is explicitly specified"
|
||||
"Cannot work with empty storage path that is explicitly specified"
|
||||
" by the --path option. Please check the program options and"
|
||||
" correct the --path.");
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
NOTE: User and query level settings are set up in "users.xml" file.
|
||||
If you have accidentially specified user-level settings here, server won't start.
|
||||
If you have accidentally specified user-level settings here, server won't start.
|
||||
You can either move the settings to the right place inside "users.xml" file
|
||||
or add <skip_check_for_incorrect_settings>1</skip_check_for_incorrect_settings> here.
|
||||
-->
|
||||
@ -103,7 +103,7 @@
|
||||
<interserver_http_port>9009</interserver_http_port>
|
||||
|
||||
<!-- Hostname that is used by other replicas to request this server.
|
||||
If not specified, than it is determined analoguous to 'hostname -f' command.
|
||||
If not specified, than it is determined analogous to 'hostname -f' command.
|
||||
This setting could be used to switch replication to another network interface.
|
||||
-->
|
||||
<!--
|
||||
@ -265,7 +265,7 @@
|
||||
<!-- Comma-separated list of prefixes for user-defined settings. -->
|
||||
<custom_settings_prefixes></custom_settings_prefixes>
|
||||
|
||||
<!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distibuted DDL worker and so on). -->
|
||||
<!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distributed DDL worker and so on). -->
|
||||
<!-- <system_profile>default</system_profile> -->
|
||||
|
||||
<!-- Default database. -->
|
||||
@ -455,7 +455,7 @@
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
<!-- Serve endpoint fot Prometheus monitoring. -->
|
||||
<!-- Serve endpoint for Prometheus monitoring. -->
|
||||
<!--
|
||||
endpoint - mertics path (relative to root, statring with "/")
|
||||
port - port to setup server. If not defined or 0 than http_port used
|
||||
|
@ -694,7 +694,7 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
|
||||
assertOrSetUUID(create, database);
|
||||
|
||||
/** If the request specifies IF NOT EXISTS, we allow concurrent CREATE queries (which do nothing).
|
||||
* If table doesnt exist, one thread is creating table, while others wait in DDLGuard.
|
||||
* If table doesn't exist, one thread is creating table, while others wait in DDLGuard.
|
||||
*/
|
||||
guard = DatabaseCatalog::instance().getDDLGuard(create.database, table_name);
|
||||
|
||||
|
@ -1135,7 +1135,7 @@ bool ParserAlias::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
/** In this case, the alias can not match the keyword -
|
||||
* so that in the query "SELECT x FROM t", the word FROM was not considered an alias,
|
||||
* and in the query "SELECT x FRO FROM t", the word FRO was considered an alias.
|
||||
* and in the query "SELECT x FR FROM t", the word FR was considered an alias.
|
||||
*/
|
||||
|
||||
const String name = getIdentifierName(node);
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <Common/hex.h>
|
||||
|
||||
|
||||
/** A tool to easily proove if "Checksum doesn't match: corrupted data"
|
||||
/** A tool to easily prove if "Checksum doesn't match: corrupted data"
|
||||
* errors are caused by random bit flips due to hardware issues.
|
||||
* It frequently happens due to bad memory on network switches
|
||||
* (at least about a few times a year in a fleet of ~1200 ClickHouse servers).
|
||||
|
@ -1,4 +1,4 @@
|
||||
## Build and test ClickHouse on various plaforms
|
||||
## Build and test ClickHouse on various platforms
|
||||
|
||||
Quick and dirty scripts.
|
||||
|
||||
|
@ -57,7 +57,7 @@ function die {
|
||||
|
||||
./install-os-packages.sh prepare
|
||||
|
||||
# Configuration parameters may be overriden with CONFIG environment variable pointing to config file.
|
||||
# Configuration parameters may be overridden with CONFIG environment variable pointing to config file.
|
||||
[[ -n "$CONFIG" ]] && source $CONFIG
|
||||
|
||||
mkdir -p $WORKSPACE
|
||||
|
@ -13,7 +13,7 @@ EXAMPLE:
|
||||
- start version 20.1:
|
||||
clickhouse-docker 20.1
|
||||
|
||||
- list avaliable versions:
|
||||
- list available versions:
|
||||
clickhouse-docker list
|
||||
HELP
|
||||
exit
|
||||
|
@ -284,7 +284,7 @@ class ClickHouseInserter(object):
|
||||
except Exception as ex:
|
||||
print("Cannot insert with exception %s", str(ex))
|
||||
if response:
|
||||
print("Reponse text %s", response.text)
|
||||
print("Response text %s", response.text)
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
raise Exception("Cannot insert data into clickhouse")
|
||||
|
@ -44,7 +44,7 @@ class Backport:
|
||||
# pull-requests are sorted by ancestry from the least recent.
|
||||
for pr in prs:
|
||||
while repo.comparator(branches[-1][1]) >= repo.comparator(pr['mergeCommit']['oid']):
|
||||
logging.info("PR #{} is already inside {}. Dropping this branch for futher PRs".format(pr['number'], branches[-1][0]))
|
||||
logging.info("PR #{} is already inside {}. Dropping this branch for further PRs".format(pr['number'], branches[-1][0]))
|
||||
branches.pop()
|
||||
|
||||
logging.info("Processing PR #{}".format(pr['number']))
|
||||
|
@ -512,7 +512,7 @@ class Query:
|
||||
if request.status_code == 200:
|
||||
result = request.json()
|
||||
if 'errors' in result:
|
||||
raise Exception('Errors occured: {}\nOriginal query: {}'.format(result["errors"], query))
|
||||
raise Exception('Errors occurred: {}\nOriginal query: {}'.format(result["errors"], query))
|
||||
|
||||
if not is_mutation:
|
||||
import inspect
|
||||
|
@ -68,7 +68,7 @@ K_LAST : L A S T;
|
||||
K_LEFT : L E F T;
|
||||
K_LIKE : L I K E;
|
||||
K_LIMIT : L I M I T;
|
||||
K_MAIN : M A I N; // not a clickhouse reverved word
|
||||
K_MAIN : M A I N; // not a clickhouse reserved word
|
||||
K_MATERIALIZED : M A T E R I A L I Z E D;
|
||||
K_MINUTE : M I N U T E;
|
||||
K_MODIFY : M O D I F Y;
|
||||
@ -88,7 +88,7 @@ K_PREWHERE : P R E W H E R E;
|
||||
K_PROCESSLIST : P R O C E S S L I S T;
|
||||
K_QUERY : Q U E R Y;
|
||||
K_RENAME : R E N A M E;
|
||||
K_RETURN : R E T U R N; // not a clickhouse reverved word
|
||||
K_RETURN : R E T U R N; // not a clickhouse reserved word
|
||||
K_RIGHT : R I G H T;
|
||||
K_SAMPLE : S A M P L E;
|
||||
K_SECOND : S E C O N D;
|
||||
|
@ -317,7 +317,7 @@ under the License.
|
||||
</xsl:call-template>
|
||||
</code>
|
||||
</xsl:template>
|
||||
<!-- Style for the error and failure in the tescase template -->
|
||||
<!-- Style for the error and failure in the testcase template -->
|
||||
<xsl:template name="display-failures">
|
||||
<xsl:choose>
|
||||
<xsl:when test="not(@message)">N/A</xsl:when>
|
||||
|
@ -188,7 +188,7 @@ def _fix_args(args):
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="Programm to push clickhouse packages to repository")
|
||||
parser = argparse.ArgumentParser(description="Program to push clickhouse packages to repository")
|
||||
parser.add_argument('--deb-directory')
|
||||
parser.add_argument('--rpm-directory')
|
||||
parser.add_argument('--tgz-directory')
|
||||
|
@ -90,7 +90,7 @@ if __name__ == "__main__":
|
||||
help='Path to clickhouse database on filesystem')
|
||||
parser.add_argument('--s3-path', help='Path in s3, where to upload file')
|
||||
parser.add_argument('--tmp-prefix', default='/tmp',
|
||||
help='Prefix to store temporay downloaded file')
|
||||
help='Prefix to store temporary downloaded file')
|
||||
data_group = parser.add_mutually_exclusive_group(required=True)
|
||||
table_name_argument = data_group.add_argument('--table-name',
|
||||
help='Name of table with database, if you are uploading partitions')
|
||||
|
@ -10,11 +10,11 @@ $ test-history --token XXX --since '2020-01-22 00:00:00' --substr Performance'
|
||||
+---------------------|---------|--------------------+
|
||||
| Date | SHA | Performance test |
|
||||
+=====================+=========+====================+
|
||||
| 2020-01-22 12:54:59 | 47ffa40 | succes |
|
||||
| 2020-01-22 12:54:59 | 47ffa40 | success |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-22 13:06:16 | 0d484be | failure |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-22 14:18:34 | 289f169 | succes |
|
||||
| 2020-01-22 14:18:34 | 289f169 | success |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-22 14:27:27 | e357c6f | not run |
|
||||
+---------------------|---------|--------------------+
|
||||
@ -26,11 +26,11 @@ $ test-history --token XXX --since '2020-01-22 00:00:00' --substr Performance'
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-22 16:58:36 | d68f8d1 | pending |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-22 17:59:43 | ba7ab32 | succes |
|
||||
| 2020-01-22 17:59:43 | ba7ab32 | success |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-22 18:32:38 | eadb902 | failure |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-22 19:11:34 | 8f241ea | succes |
|
||||
| 2020-01-22 19:11:34 | 8f241ea | success |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-22 19:56:49 | f0b7422 | failure |
|
||||
+---------------------|---------|--------------------+
|
||||
@ -40,11 +40,11 @@ $ test-history --token XXX --since '2020-01-22 00:00:00' --substr Performance'
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-22 23:09:23 | 8cfe9a4 | failure |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-23 00:10:33 | a02b59f | succes |
|
||||
| 2020-01-23 00:10:33 | a02b59f | success |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-23 05:56:11 | 48b3f33 | failure |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-23 05:56:54 | d807088 | succes |
|
||||
| 2020-01-23 05:56:54 | d807088 | success |
|
||||
+---------------------|---------|--------------------+
|
||||
| 2020-01-23 06:01:48 | 2e84949 | failure |
|
||||
+---------------------|---------|--------------------+
|
||||
|
@ -11,7 +11,7 @@ from termcolor import colored
|
||||
import sys
|
||||
|
||||
COLORMAP = {
|
||||
"success": colored("succes", 'green'),
|
||||
"success": colored("success", 'green'),
|
||||
"failure": colored("failure", 'red'),
|
||||
"error": colored("error", 'red'),
|
||||
"pending": colored("pending", 'yellow'),
|
||||
|
Loading…
Reference in New Issue
Block a user