mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge pull request #4122 from maiha/dbms-fix-misspells
dbms: Fixed misspells in comments
This commit is contained in:
commit
cd874be05a
@ -860,7 +860,7 @@ private:
|
||||
}
|
||||
|
||||
|
||||
/// Process the query that doesn't require transfering data blocks to the server.
|
||||
/// Process the query that doesn't require transferring data blocks to the server.
|
||||
void processOrdinaryQuery()
|
||||
{
|
||||
connection->sendQuery(query, query_id, QueryProcessingStage::Complete, &context.getSettingsRef(), nullptr, true);
|
||||
@ -869,7 +869,7 @@ private:
|
||||
}
|
||||
|
||||
|
||||
/// Process the query that requires transfering data blocks to the server.
|
||||
/// Process the query that requires transferring data blocks to the server.
|
||||
void processInsertQuery()
|
||||
{
|
||||
/// Send part of query without data, because data will be sent separately.
|
||||
@ -1136,7 +1136,7 @@ private:
|
||||
}
|
||||
|
||||
|
||||
/// Process Log packets, exit when recieve Exception or EndOfStream
|
||||
/// Process Log packets, exit when receive Exception or EndOfStream
|
||||
bool receiveEndOfQuery()
|
||||
{
|
||||
while (true)
|
||||
|
@ -137,7 +137,7 @@ try
|
||||
static KillingErrorHandler error_handler;
|
||||
Poco::ErrorHandler::set(&error_handler);
|
||||
|
||||
/// Don't initilaize DateLUT
|
||||
/// Don't initialize DateLUT
|
||||
|
||||
registerFunctions();
|
||||
registerAggregateFunctions();
|
||||
|
@ -51,7 +51,7 @@ It is designed to retain the following properties of data:
|
||||
- probability distributions of length of strings;
|
||||
- probability of zero values of numbers; empty strings and arrays, NULLs;
|
||||
- data compression ratio when compressed with LZ77 and entropy family of codecs;
|
||||
- continuouty (magnitude of difference) of time values across table; continuouty of floating point values.
|
||||
- continuity (magnitude of difference) of time values across table; continuity of floating point values.
|
||||
- date component of DateTime values;
|
||||
- UTF-8 validity of string values;
|
||||
- string values continue to look somewhat natural.
|
||||
@ -246,7 +246,7 @@ Float transformFloatMantissa(Float x, UInt64 seed)
|
||||
|
||||
|
||||
/// Transform difference from previous number by applying pseudorandom permutation to mantissa part of it.
|
||||
/// It allows to retain some continuouty property of source data.
|
||||
/// It allows to retain some continuity property of source data.
|
||||
template <typename Float>
|
||||
class FloatModel : public IModel
|
||||
{
|
||||
|
@ -22,7 +22,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
/// Connection string is a list of name, value pairs.
|
||||
/// name and value are separated by '='.
|
||||
/// names are case insensitive.
|
||||
/// name=value pairs are sepated by ';'.
|
||||
/// name=value pairs are separated by ';'.
|
||||
/// ASCII whitespace characters are skipped before and after delimiters.
|
||||
/// value may be optionally enclosed by {}
|
||||
/// in enclosed value, } is escaped as }}.
|
||||
|
@ -13,9 +13,9 @@
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minumum number of different symbols between replica's hostname and local hostname
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is choosen in specified order.
|
||||
in_order - first live replica is chosen in specified order.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
@ -109,7 +109,7 @@ struct AggreagteFunctionGroupUniqArrayGenericData
|
||||
};
|
||||
|
||||
/** Template parameter with true value should be used for columns that store their elements in memory continuously.
|
||||
* For such columns groupUniqArray() can be implemented more efficently (especially for small numeric arrays).
|
||||
* For such columns groupUniqArray() can be implemented more efficiently (especially for small numeric arrays).
|
||||
*/
|
||||
template <bool is_plain_column = false>
|
||||
class AggreagteFunctionGroupUniqArrayGeneric
|
||||
|
@ -123,7 +123,7 @@ struct AggregateFunctionTopKGenericData
|
||||
};
|
||||
|
||||
/** Template parameter with true value should be used for columns that store their elements in memory continuously.
|
||||
* For such columns topK() can be implemented more efficently (especially for small numeric arrays).
|
||||
* For such columns topK() can be implemented more efficiently (especially for small numeric arrays).
|
||||
*/
|
||||
template <bool is_plain_column = false>
|
||||
class AggregateFunctionTopKGeneric : public IAggregateFunctionDataHelper<AggregateFunctionTopKGenericData, AggregateFunctionTopKGeneric<is_plain_column>>
|
||||
|
@ -12,7 +12,7 @@ namespace ErrorCodes
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
/** Calculates quantile by counting number of occurences for each value in a hash map.
|
||||
/** Calculates quantile by counting number of occurrences for each value in a hash map.
|
||||
*
|
||||
* It use O(distinct(N)) memory. Can be naturally applied for values with weight.
|
||||
* In case of many identical values, it can be more efficient than QuantileExact even when weight is not used.
|
||||
|
@ -43,7 +43,7 @@ public:
|
||||
virtual const char * getFamilyName() const = 0;
|
||||
|
||||
/** If column isn't constant, returns nullptr (or itself).
|
||||
* If column is constant, transforms constant to full column (if column type allows such tranform) and return it.
|
||||
* If column is constant, transforms constant to full column (if column type allows such transform) and return it.
|
||||
*/
|
||||
virtual Ptr convertToFullColumnIfConst() const { return getPtr(); }
|
||||
|
||||
@ -149,7 +149,7 @@ public:
|
||||
virtual void insertDefault() = 0;
|
||||
|
||||
/** Removes last n elements.
|
||||
* Is used to support exeption-safety of several operations.
|
||||
* Is used to support exception-safety of several operations.
|
||||
* For example, sometimes insertion should be reverted if we catch an exception during operation processing.
|
||||
* If column has less than n elements or n == 0 - undefined behavior.
|
||||
*/
|
||||
@ -234,8 +234,8 @@ public:
|
||||
virtual void gather(ColumnGathererStream & gatherer_stream) = 0;
|
||||
|
||||
/** Computes minimum and maximum element of the column.
|
||||
* In addition to numeric types, the funtion is completely implemented for Date and DateTime.
|
||||
* For strings and arrays function should retrurn default value.
|
||||
* In addition to numeric types, the function is completely implemented for Date and DateTime.
|
||||
* For strings and arrays function should return default value.
|
||||
* (except for constant columns; they should return value of the constant).
|
||||
* If column is empty function should return default value.
|
||||
*/
|
||||
|
@ -64,7 +64,7 @@ namespace DB
|
||||
* During insertion, each key is locked - to avoid parallel initialization of regions for same key.
|
||||
*
|
||||
* On insertion, we search for free region of at least requested size.
|
||||
* If nothing was found, we evict oldest unused region; if not enogh size, we evict it neighbours; and try again.
|
||||
* If nothing was found, we evict oldest unused region; if not enough size, we evict it neighbours; and try again.
|
||||
*
|
||||
* Metadata is allocated by usual allocator and its memory usage is not accounted.
|
||||
*
|
||||
|
@ -23,7 +23,7 @@ using namespace Poco::XML;
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// For cutting prerpocessed path to this base
|
||||
/// For cutting preprocessed path to this base
|
||||
static std::string main_config_path;
|
||||
|
||||
/// Extracts from a string the first encountered number consisting of at least two digits.
|
||||
|
@ -12,7 +12,7 @@
|
||||
* - in typical implementation of standard library, hash function for integers is trivial and just use lower bits;
|
||||
* - traffic is non-uniformly distributed across a day;
|
||||
* - we are using open-addressing linear probing hash tables that are most critical to hash function quality,
|
||||
* and trivial hash function gives disasterous results.
|
||||
* and trivial hash function gives disastrous results.
|
||||
*/
|
||||
|
||||
/** Taken from MurmurHash. This is Murmur finalizer.
|
||||
@ -160,7 +160,7 @@ struct TrivialHash
|
||||
* NOTE Salting is far from perfect, because it commutes with first steps of calculation.
|
||||
*
|
||||
* NOTE As mentioned, this function is slower than intHash64.
|
||||
* But occasionaly, it is faster, when written in a loop and loop is vectorized.
|
||||
* But occasionally, it is faster, when written in a loop and loop is vectorized.
|
||||
*/
|
||||
template <DB::UInt64 salt>
|
||||
inline DB::UInt32 intHash32(DB::UInt64 key)
|
||||
|
@ -165,7 +165,7 @@ void OptimizedRegularExpressionImpl<thread_safe>::analyze(
|
||||
++pos;
|
||||
break;
|
||||
|
||||
/// Quantifiers that allow a zero number of occurences.
|
||||
/// Quantifiers that allow a zero number of occurrences.
|
||||
case '{':
|
||||
in_curly_braces = true;
|
||||
[[fallthrough]];
|
||||
|
@ -40,7 +40,7 @@ struct SpaceSavingArena
|
||||
|
||||
/*
|
||||
* Specialized storage for StringRef with a freelist arena.
|
||||
* Keys of this type that are retained on insertion must be serialised into local storage,
|
||||
* Keys of this type that are retained on insertion must be serialized into local storage,
|
||||
* otherwise the reference would be invalid after the processed block is released.
|
||||
*/
|
||||
template <>
|
||||
|
@ -41,7 +41,7 @@
|
||||
* - extremely creepy code for implementation of "chroot" feature.
|
||||
*
|
||||
* As of 2018, there are no active maintainers of libzookeeper:
|
||||
* - bugs in JIRA are fixed only occasionaly with ad-hoc patches by library users.
|
||||
* - bugs in JIRA are fixed only occasionally with ad-hoc patches by library users.
|
||||
*
|
||||
* libzookeeper is a classical example of bad code written in C.
|
||||
*
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
/// A tool for reproducing https://issues.apache.org/jira/browse/ZOOKEEPER-706
|
||||
/// Original libzookeeper can't reconnect the session if the length of SET_WATCHES message
|
||||
/// exceeeds jute.maxbuffer (0xfffff by default).
|
||||
/// exceeds jute.maxbuffer (0xfffff by default).
|
||||
/// This happens when the number of watches exceeds ~29000.
|
||||
///
|
||||
/// Session reconnect can be caused by forbidding packets to the current zookeeper server, e.g.
|
||||
|
@ -51,7 +51,7 @@ using MergedRowSources = PODArray<RowSourcePart>;
|
||||
|
||||
/** Gather single stream from multiple streams according to streams mask.
|
||||
* Stream mask maps row number to index of source stream.
|
||||
* Streams should conatin exactly one column.
|
||||
* Streams should contain exactly one column.
|
||||
*/
|
||||
class ColumnGathererStream : public IProfilingBlockInputStream
|
||||
{
|
||||
|
@ -85,7 +85,7 @@ Block FinishSortingBlockInputStream::readImpl()
|
||||
{
|
||||
Block block = children.back()->read();
|
||||
|
||||
/// End of input stream, but we can`t return immediatly, we need to merge already read blocks.
|
||||
/// End of input stream, but we can`t return immediately, we need to merge already read blocks.
|
||||
/// Check it later, when get end of stream from impl.
|
||||
if (!block)
|
||||
{
|
||||
@ -102,7 +102,7 @@ Block FinishSortingBlockInputStream::readImpl()
|
||||
if (size == 0)
|
||||
continue;
|
||||
|
||||
/// We need to sort each block separatly before merging.
|
||||
/// We need to sort each block separately before merging.
|
||||
sortBlock(block, description_to_sort);
|
||||
|
||||
removeConstantsFromBlock(block);
|
||||
|
@ -17,7 +17,7 @@ namespace DB
|
||||
/** Intended for implementation of "rollup" - aggregation (rounding) of older data
|
||||
* for a table with Graphite data (Graphite is the system for time series monitoring).
|
||||
*
|
||||
* Table with graphite data has at least the folowing columns (accurate to the name):
|
||||
* Table with graphite data has at least the following columns (accurate to the name):
|
||||
* Path, Time, Value, Version
|
||||
*
|
||||
* Path - name of metric (sensor);
|
||||
|
@ -322,7 +322,7 @@ void registerDataTypeString(DataTypeFactory & factory)
|
||||
|
||||
factory.registerSimpleDataType("String", creator);
|
||||
|
||||
/// These synonims are added for compatibility.
|
||||
/// These synonyms are added for compatibility.
|
||||
|
||||
factory.registerAlias("CHAR", "String", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("VARCHAR", "String", DataTypeFactory::CaseInsensitive);
|
||||
|
@ -20,7 +20,7 @@ void registerDataTypeNumbers(DataTypeFactory & factory)
|
||||
factory.registerSimpleDataType("Float32", [] { return DataTypePtr(std::make_shared<DataTypeFloat32>()); });
|
||||
factory.registerSimpleDataType("Float64", [] { return DataTypePtr(std::make_shared<DataTypeFloat64>()); });
|
||||
|
||||
/// These synonims are added for compatibility.
|
||||
/// These synonyms are added for compatibility.
|
||||
|
||||
factory.registerAlias("TINYINT", "Int8", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("SMALLINT", "Int16", DataTypeFactory::CaseInsensitive);
|
||||
|
@ -188,7 +188,7 @@ template <typename A> struct ToInteger
|
||||
|
||||
|
||||
// CLICKHOUSE-29. The same depth, different signs
|
||||
// NOTE: This case is applied for 64-bit integers only (for backward compability), but could be used for any-bit integers
|
||||
// NOTE: This case is applied for 64-bit integers only (for backward compatibility), but could be used for any-bit integers
|
||||
template <typename A, typename B>
|
||||
constexpr bool LeastGreatestSpecialCase =
|
||||
std::is_integral_v<A> && std::is_integral_v<B>
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <Common/config.h>
|
||||
|
||||
/** More efficient implementations of mathematical functions are possible when using a separate library.
|
||||
* Disabled due to licence compatibility limitations.
|
||||
* Disabled due to license compatibility limitations.
|
||||
* To enable: download http://www.agner.org/optimize/vectorclass.zip and unpack to contrib/vectorclass
|
||||
* Then rebuild with -DENABLE_VECTORCLASS=1
|
||||
*/
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <Common/config.h>
|
||||
|
||||
/** More efficient implementations of mathematical functions are possible when using a separate library.
|
||||
* Disabled due to licence compatibility limitations.
|
||||
* Disabled due to license compatibility limitations.
|
||||
* To enable: download http://www.agner.org/optimize/vectorclass.zip and unpack to contrib/vectorclass
|
||||
* Then rebuild with -DENABLE_VECTORCLASS=1
|
||||
*/
|
||||
|
@ -24,7 +24,7 @@ namespace ErrorCodes
|
||||
*
|
||||
* Non-cryptographic generators:
|
||||
*
|
||||
* rand - linear congruental generator 0 .. 2^32 - 1.
|
||||
* rand - linear congruential generator 0 .. 2^32 - 1.
|
||||
* rand64 - combines several rand values to get values from the range 0 .. 2^64 - 1.
|
||||
*
|
||||
* randConstant - service function, produces a constant column with a random value.
|
||||
|
@ -39,7 +39,7 @@ namespace DB
|
||||
* replaceRegexpOne(haystack, pattern, replacement) - replaces the pattern with the specified regexp, only the first occurrence.
|
||||
* replaceRegexpAll(haystack, pattern, replacement) - replaces the pattern with the specified type, all occurrences.
|
||||
*
|
||||
* multiPosition(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- find first occurences (positions) of all the const patterns inside haystack
|
||||
* multiPosition(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- find first occurrences (positions) of all the const patterns inside haystack
|
||||
* multiPositionUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiPositionCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiPositionCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
|
@ -23,7 +23,7 @@ namespace DB
|
||||
* queryStringAndFragment
|
||||
*
|
||||
* Functions, removing parts from URL.
|
||||
* If URL has nothing like, then it is retured unchanged.
|
||||
* If URL has nothing like, then it is returned unchanged.
|
||||
*
|
||||
* cutWWW
|
||||
* cutFragment
|
||||
|
@ -163,7 +163,7 @@ public:
|
||||
* Function could be injective with some arguments fixed to some constant values.
|
||||
* Examples:
|
||||
* plus(const, x);
|
||||
* multiply(const, x) where x is an integer and constant is not divisable by two;
|
||||
* multiply(const, x) where x is an integer and constant is not divisible by two;
|
||||
* concat(x, 'const');
|
||||
* concat(x, 'const', y) where const contain at least one non-numeric character;
|
||||
* concat with FixedString
|
||||
|
@ -36,7 +36,7 @@ namespace ErrorCodes
|
||||
*
|
||||
* It is implemented in two steps.
|
||||
* At first step, it creates a pattern of zeros, literal characters, whitespaces, etc.
|
||||
* and quickly fills resulting charater array (string column) with this pattern.
|
||||
* and quickly fills resulting character array (string column) with this pattern.
|
||||
* At second step, it walks across the resulting character array and modifies/replaces specific charaters,
|
||||
* by calling some functions by pointers and shifting cursor by specified amount.
|
||||
*
|
||||
|
@ -17,7 +17,7 @@ struct MinusImpl
|
||||
return static_cast<Result>(a) - b;
|
||||
}
|
||||
|
||||
/// Apply operation and check overflow. It's used for Deciamal operations. @returns true if overflowed, false othervise.
|
||||
/// Apply operation and check overflow. It's used for Deciamal operations. @returns true if overflowed, false otherwise.
|
||||
template <typename Result = ResultType>
|
||||
static inline bool apply(A a, B b, Result & c)
|
||||
{
|
||||
|
@ -17,7 +17,7 @@ struct MultiplyImpl
|
||||
return static_cast<Result>(a) * b;
|
||||
}
|
||||
|
||||
/// Apply operation and check overflow. It's used for Deciamal operations. @returns true if overflowed, false othervise.
|
||||
/// Apply operation and check overflow. It's used for Deciamal operations. @returns true if overflowed, false otherwise.
|
||||
template <typename Result = ResultType>
|
||||
static inline bool apply(A a, B b, Result & c)
|
||||
{
|
||||
|
@ -18,7 +18,7 @@ struct PlusImpl
|
||||
return static_cast<Result>(a) + b;
|
||||
}
|
||||
|
||||
/// Apply operation and check overflow. It's used for Deciamal operations. @returns true if overflowed, false othervise.
|
||||
/// Apply operation and check overflow. It's used for Deciamal operations. @returns true if overflowed, false otherwise.
|
||||
template <typename Result = ResultType>
|
||||
static inline bool apply(A a, B b, Result & c)
|
||||
{
|
||||
|
@ -991,7 +991,7 @@ void skipToUnescapedNextLineOrEOF(ReadBuffer & buf)
|
||||
if (buf.eof())
|
||||
return;
|
||||
|
||||
/// Skip escaped character. We do not consider escape sequences with more than one charater after backslash (\x01).
|
||||
/// Skip escaped character. We do not consider escape sequences with more than one character after backslash (\x01).
|
||||
/// It's ok for the purpose of this function, because we are interested only in \n and \\.
|
||||
++buf.position();
|
||||
continue;
|
||||
|
@ -581,7 +581,7 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons
|
||||
{
|
||||
/** Read 10 characters, that could represent unix timestamp.
|
||||
* Only unix timestamp of 5-10 characters is supported.
|
||||
* Then look at 5th charater. If it is a number - treat whole as unix timestamp.
|
||||
* Then look at 5th character. If it is a number - treat whole as unix timestamp.
|
||||
* If it is not a number - then parse datetime in YYYY-MM-DD hh:mm:ss format.
|
||||
*/
|
||||
|
||||
|
@ -124,7 +124,7 @@ struct ContextShared
|
||||
ConfigurationPtr config; /// Global configuration settings.
|
||||
|
||||
Databases databases; /// List of databases and tables in them.
|
||||
mutable std::shared_ptr<EmbeddedDictionaries> embedded_dictionaries; /// Metrica's dictionaeis. Have lazy initialization.
|
||||
mutable std::shared_ptr<EmbeddedDictionaries> embedded_dictionaries; /// Metrica's dictionaries. Have lazy initialization.
|
||||
mutable std::shared_ptr<ExternalDictionaries> external_dictionaries;
|
||||
mutable std::shared_ptr<ExternalModels> external_models;
|
||||
String default_profile_name; /// Default profile name used for default values.
|
||||
|
@ -276,7 +276,7 @@ bool DDLWorker::initAndCheckTask(const String & entry_name, String & out_reason)
|
||||
catch (...)
|
||||
{
|
||||
/// What should we do if we even cannot parse host name and therefore cannot properly submit execution status?
|
||||
/// We can try to create fail node using FQDN if it equal to host name in cluster config attempt will be sucessfull.
|
||||
/// We can try to create fail node using FQDN if it equal to host name in cluster config attempt will be successful.
|
||||
/// Otherwise, that node will be ignored by DDLQueryStatusInputSream.
|
||||
|
||||
tryLogCurrentException(log, "Cannot parse DDL task " + entry_name + ", will try to send error status");
|
||||
@ -1153,7 +1153,7 @@ private:
|
||||
Strings current_active_hosts; /// Hosts that were in active state at the last check
|
||||
size_t num_hosts_finished = 0;
|
||||
|
||||
/// Save the first detected error and throw it at the end of excecution
|
||||
/// Save the first detected error and throw it at the end of execution
|
||||
std::unique_ptr<Exception> first_exception;
|
||||
|
||||
Int64 timeout_seconds = 120;
|
||||
|
@ -77,7 +77,7 @@ BlockIO InterpreterRenameQuery::execute()
|
||||
|
||||
std::set<UniqueTableName> unique_tables_from;
|
||||
|
||||
/// Don't allow to drop tables (that we are renaming); do't allow to create tables in places where tables will be renamed.
|
||||
/// Don't allow to drop tables (that we are renaming); don't allow to create tables in places where tables will be renamed.
|
||||
std::map<UniqueTableName, std::unique_ptr<DDLGuard>> table_guards;
|
||||
|
||||
for (const auto & elem : rename.elements)
|
||||
|
@ -586,7 +586,7 @@ void InterpreterSelectQuery::executeImpl(Pipeline & pipeline, const BlockInputSt
|
||||
executePreLimit(pipeline);
|
||||
}
|
||||
|
||||
// If there is no global subqueries, we can run subqueries only when recieve them on server.
|
||||
// If there is no global subqueries, we can run subqueries only when receive them on server.
|
||||
if (!query_analyzer->hasGlobalSubqueries() && !expressions.subqueries_for_sets.empty())
|
||||
executeSubqueriesInSetsAndJoins(pipeline, expressions.subqueries_for_sets);
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ ExecutionStatus getOverallExecutionStatusOfCommands()
|
||||
return ExecutionStatus(0);
|
||||
}
|
||||
|
||||
/// Consequently tries to execute all commands and genreates final exception message for failed commands
|
||||
/// Consequently tries to execute all commands and generates final exception message for failed commands
|
||||
template <typename Callable, typename ... Callables>
|
||||
ExecutionStatus getOverallExecutionStatusOfCommands(Callable && command, Callables && ... commands)
|
||||
{
|
||||
|
@ -278,7 +278,7 @@ struct Settings
|
||||
M(SettingBool, log_profile_events, true, "Log query performance statistics into the query_log and query_thread_log.") \
|
||||
M(SettingBool, log_query_settings, true, "Log query settings into the query_log.") \
|
||||
M(SettingBool, log_query_threads, true, "Log query threads into system.query_thread_log table. This setting have effect only when 'log_queries' is true.") \
|
||||
M(SettingLogsLevel, send_logs_level, "none", "Send server text logs with specified minumum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'none'") \
|
||||
M(SettingLogsLevel, send_logs_level, "none", "Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'none'") \
|
||||
M(SettingBool, enable_optimize_predicate_expression, 0, "If it is set to true, optimize predicates to subqueries.") \
|
||||
\
|
||||
M(SettingUInt64, low_cardinality_max_dictionary_size, 8192, "Maximum size (in rows) of shared global dictionary for LowCardinality type.") \
|
||||
|
@ -766,7 +766,7 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyze(
|
||||
/// Executing scalar subqueries - replacing them with constant values.
|
||||
executeScalarSubqueries(query, context, subquery_depth);
|
||||
|
||||
/// Optimize if with constant condition after constants was substituted instead of sclalar subqueries.
|
||||
/// Optimize if with constant condition after constants was substituted instead of scalar subqueries.
|
||||
OptimizeIfWithConstantConditionVisitor(result.aliases).visit(query);
|
||||
|
||||
if (select_query)
|
||||
|
@ -115,7 +115,7 @@ std::vector<ASTPtr *> TranslateQualifiedNamesMatcher::visit(ASTTableJoin & join,
|
||||
|
||||
std::vector<ASTPtr *> TranslateQualifiedNamesMatcher::visit(ASTSelectQuery & select, const ASTPtr & , Data &)
|
||||
{
|
||||
/// If the WHERE clause or HAVING consists of a single quailified column, the reference must be translated not only in children,
|
||||
/// If the WHERE clause or HAVING consists of a single qualified column, the reference must be translated not only in children,
|
||||
/// but also in where_expression and having_expression.
|
||||
std::vector<ASTPtr *> out;
|
||||
if (select.prewhere_expression)
|
||||
|
@ -330,7 +330,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
{
|
||||
if (auto counting_stream = dynamic_cast<const CountingBlockOutputStream *>(stream_out))
|
||||
{
|
||||
/// NOTE: Redundancy. The same values coulld be extracted from process_list_elem->progress_out.query_settings = process_list_elem->progress_in
|
||||
/// NOTE: Redundancy. The same values could be extracted from process_list_elem->progress_out.query_settings = process_list_elem->progress_in
|
||||
elem.result_rows = counting_stream->getProgress().rows;
|
||||
elem.result_bytes = counting_stream->getProgress().bytes;
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ namespace DB
|
||||
* or
|
||||
* (subquery)
|
||||
*
|
||||
* Optionally with alias (correllation name):
|
||||
* Optionally with alias (correlation name):
|
||||
* [AS] alias
|
||||
*
|
||||
* Table may contain FINAL and SAMPLE modifiers:
|
||||
|
@ -132,7 +132,7 @@ Token Lexer::nextTokenImpl()
|
||||
++pos;
|
||||
}
|
||||
|
||||
/// exponentation (base 10 or base 2)
|
||||
/// exponentiation (base 10 or base 2)
|
||||
if (pos + 1 < end && (hex ? (*pos == 'p' || *pos == 'P') : (*pos == 'e' || *pos == 'E')))
|
||||
{
|
||||
++pos;
|
||||
@ -195,7 +195,7 @@ Token Lexer::nextTokenImpl()
|
||||
while (pos < end && isNumericASCII(*pos))
|
||||
++pos;
|
||||
|
||||
/// exponentation
|
||||
/// exponentiation
|
||||
if (pos + 1 < end && (*pos == 'e' || *pos == 'E'))
|
||||
{
|
||||
++pos;
|
||||
|
@ -49,7 +49,7 @@ ASTPtr parseQuery(
|
||||
|
||||
|
||||
/** Split queries separated by ; on to list of single queries
|
||||
* Returns pointer to the end of last sucessfuly parsed query (first), and true if all queries are sucessfuly parsed (second)
|
||||
* Returns pointer to the end of last successfully parsed query (first), and true if all queries are successfully parsed (second)
|
||||
* NOTE: INSERT's data should be placed in single line.
|
||||
*/
|
||||
std::pair<const char *, bool> splitMultipartQuery(const std::string & queries, std::vector<std::string> & queries_list);
|
||||
|
@ -155,7 +155,7 @@ public:
|
||||
return;
|
||||
|
||||
// An error was thrown during the stream or it did not finish successfully
|
||||
// The read offsets weren't comitted, so consumer must rejoin the group from the original starting point
|
||||
// The read offsets weren't committed, so consumer must rejoin the group from the original starting point
|
||||
if (!finalized)
|
||||
{
|
||||
LOG_TRACE(storage.log, "KafkaBlockInputStream did not finish successfully, unsubscribing from assignments and rejoining");
|
||||
|
@ -6,7 +6,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Select all parts within partition (having at least two parts) with minumum total size.
|
||||
/// Select all parts within partition (having at least two parts) with minimum total size.
|
||||
class AllMergeSelector : public IMergeSelector
|
||||
{
|
||||
public:
|
||||
|
@ -339,7 +339,7 @@ private:
|
||||
|
||||
/** Is node the key column
|
||||
* or expression in which column of key is wrapped by chain of functions,
|
||||
* that can be monotomic on certain ranges?
|
||||
* that can be monotonic on certain ranges?
|
||||
* If these conditions are true, then returns number of column in key, type of resulting expression
|
||||
* and fills chain of possibly-monotonic functions.
|
||||
*/
|
||||
|
@ -84,7 +84,7 @@ MergeTreeBlockSizePredictor::MergeTreeBlockSizePredictor(
|
||||
: data_part(data_part_)
|
||||
{
|
||||
number_of_rows_in_part = data_part->rows_count;
|
||||
/// Initialize with sample block untill update won't called.
|
||||
/// Initialize with sample block until update won't called.
|
||||
initialize(sample_block, columns);
|
||||
}
|
||||
|
||||
@ -173,7 +173,7 @@ void MergeTreeBlockSizePredictor::update(const Block & block, double decay)
|
||||
block_size_rows = new_rows;
|
||||
|
||||
/// Make recursive updates for each read row: v_{i+1} = (1 - decay) v_{i} + decay v_{target}
|
||||
/// Use sum of gemetric sequence formula to update multiple rows: v{n} = (1 - decay)^n v_{0} + (1 - (1 - decay)^n) v_{target}
|
||||
/// Use sum of geometric sequence formula to update multiple rows: v{n} = (1 - decay)^n v_{0} + (1 - (1 - decay)^n) v_{target}
|
||||
/// NOTE: DEFAULT and MATERIALIZED columns without data has inaccurate estimation of v_{target}
|
||||
double alpha = std::pow(1. - decay, diff_rows);
|
||||
|
||||
|
@ -714,7 +714,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor
|
||||
if (disk_reservation && sum_input_rows_upper_bound)
|
||||
{
|
||||
/// The same progress from merge_entry could be used for both algorithms (it should be more accurate)
|
||||
/// But now we are using inaccurate row-based estimation in Horizontal case for backward compability
|
||||
/// But now we are using inaccurate row-based estimation in Horizontal case for backward compatibility
|
||||
Float64 progress = (merge_alg == MergeAlgorithm::Horizontal)
|
||||
? std::min(1., 1. * rows_written / sum_input_rows_upper_bound)
|
||||
: std::min(1., merge_entry->progress.load(std::memory_order_relaxed));
|
||||
|
@ -16,7 +16,7 @@ public:
|
||||
|
||||
/** Minimum ratio of size of one part to all parts in set of parts to merge (for usual cases).
|
||||
* For example, if all parts have equal size, it means, that at least 'base' number of parts should be merged.
|
||||
* If parts has non-uniform sizes, then minumum number of parts to merge is effectively increased.
|
||||
* If parts has non-uniform sizes, then minimum number of parts to merge is effectively increased.
|
||||
* This behaviour balances merge-tree workload.
|
||||
* It called 'base', because merge-tree depth could be estimated as logarithm with that base.
|
||||
*
|
||||
|
@ -131,7 +131,7 @@ public:
|
||||
if (storage.table_fd_init_offset < 0)
|
||||
throw Exception("File descriptor isn't seekable, inside " + storage.getName(), ErrorCodes::CANNOT_SEEK_THROUGH_FILE);
|
||||
|
||||
/// ReadBuffer's seek() doesn't make sence, since cache is empty
|
||||
/// ReadBuffer's seek() doesn't make sense, since cache is empty
|
||||
if (lseek(storage.table_fd, storage.table_fd_init_offset, SEEK_SET) < 0)
|
||||
throwFromErrno("Cannot seek file descriptor, inside " + storage.getName(), ErrorCodes::CANNOT_SEEK_THROUGH_FILE);
|
||||
}
|
||||
|
@ -1592,7 +1592,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
|
||||
MergeTreePartInfo new_part_info;
|
||||
String checksum_hex;
|
||||
|
||||
/// Part which will be comitted
|
||||
/// Part which will be committed
|
||||
MergeTreeData::MutableDataPartPtr res_part;
|
||||
|
||||
/// We could find a covering part
|
||||
@ -1624,7 +1624,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
|
||||
data.format_version));
|
||||
}
|
||||
|
||||
/// What parts we should add? Or we have already added all required parts (we an replica-intializer)
|
||||
/// What parts we should add? Or we have already added all required parts (we an replica-initializer)
|
||||
{
|
||||
auto data_parts_lock = data.lockParts();
|
||||
|
||||
@ -3427,7 +3427,7 @@ bool StorageReplicatedMergeTree::getFakePartCoveringAllPartsInPartition(const St
|
||||
|
||||
--right;
|
||||
|
||||
/// Artificial high level is choosen, to make this part "covering" all parts inside.
|
||||
/// Artificial high level is chosen, to make this part "covering" all parts inside.
|
||||
part_info = MergeTreePartInfo(partition_id, left, right, MergeTreePartInfo::MAX_LEVEL, mutation_version);
|
||||
return true;
|
||||
}
|
||||
@ -3692,7 +3692,7 @@ std::optional<EphemeralLockInZooKeeper>
|
||||
StorageReplicatedMergeTree::allocateBlockNumber(
|
||||
const String & partition_id, zkutil::ZooKeeperPtr & zookeeper, const String & zookeeper_block_id_path)
|
||||
{
|
||||
/// Lets check for duplicates in advance, to avoid superflous block numbers allocation
|
||||
/// Lets check for duplicates in advance, to avoid superfluous block numbers allocation
|
||||
Coordination::Requests deduplication_check_ops;
|
||||
if (!zookeeper_block_id_path.empty())
|
||||
{
|
||||
@ -4742,7 +4742,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_
|
||||
for (size_t i = 0; i < src_all_parts.size(); ++i)
|
||||
{
|
||||
/// We also make some kind of deduplication to avoid duplicated parts in case of ATTACH PARTITION
|
||||
/// Assume that merges in the partiton are quite rare
|
||||
/// Assume that merges in the partition are quite rare
|
||||
/// Save deduplication block ids with special prefix replace_partition
|
||||
|
||||
auto & src_part = src_all_parts[i];
|
||||
|
@ -33,5 +33,5 @@ SELECT roundToExp2(number) AS k, length(groupArray(1)([hex(number)] AS i)), leng
|
||||
|
||||
DROP TABLE test.numbers_mt;
|
||||
|
||||
-- Check binary compability:
|
||||
-- Check binary compatibility:
|
||||
-- clickhouse-client -h old -q "SELECT arrayReduce('groupArrayState', [['1'], ['22'], ['333']]) FORMAT RowBinary" | clickhouse-local -s --input-format RowBinary --structure "d AggregateFunction(groupArray2, Array(String))" -q "SELECT groupArray2Merge(d) FROM table"
|
||||
|
Loading…
Reference in New Issue
Block a user