Merge pull request #18463 from qoega/codespell-changes

Fix codespell warnings. Split style checks. Update style checks docker
This commit is contained in:
Ilya Yatsishin 2020-12-24 16:53:11 +03:00 committed by GitHub
commit 608b9a28ba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 71 additions and 55 deletions

View File

@ -38,7 +38,7 @@
* = log(6.3*5.3) + lgamma(5.3) * = log(6.3*5.3) + lgamma(5.3)
* = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3) * = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3)
* 2. Polynomial approximation of lgamma around its * 2. Polynomial approximation of lgamma around its
* minimun ymin=1.461632144968362245 to maintain monotonicity. * minimum ymin=1.461632144968362245 to maintain monotonicity.
* On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use * On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use
* Let z = x-ymin; * Let z = x-ymin;
* lgamma(x) = -1.214862905358496078218 + z^2*poly(z) * lgamma(x) = -1.214862905358496078218 + z^2*poly(z)

View File

@ -202,7 +202,7 @@ long double powl(long double x, long double y)
volatile long double z=0; volatile long double z=0;
long double w=0, W=0, Wa=0, Wb=0, ya=0, yb=0, u=0; long double w=0, W=0, Wa=0, Wb=0, ya=0, yb=0, u=0;
/* make sure no invalid exception is raised by nan comparision */ /* make sure no invalid exception is raised by nan comparison */
if (isnan(x)) { if (isnan(x)) {
if (!isnan(y) && y == 0.0) if (!isnan(y) && y == 0.0)
return 1.0; return 1.0;

View File

@ -129,7 +129,7 @@ using namespace pcg_extras;
* *
* default_multiplier<uint32_t>::multiplier() * default_multiplier<uint32_t>::multiplier()
* *
* gives you the default multipler for 32-bit integers. We use the name * gives you the default multiplier for 32-bit integers. We use the name
* of the constant and not a generic word like value to allow these classes * of the constant and not a generic word like value to allow these classes
* to be used as mixins. * to be used as mixins.
*/ */

View File

@ -4,5 +4,8 @@ FROM ubuntu:20.04
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes shellcheck libxml2-utils git python3-pip && pip3 install codespell RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes shellcheck libxml2-utils git python3-pip && pip3 install codespell
CMD cd /ClickHouse/utils/check-style && ./check-style -n | tee /test_output/style_output.txt && \ CMD cd /ClickHouse/utils/check-style && \
./check-style -n | tee /test_output/style_output.txt && \
./check-typos | tee /test_output/typos_output.txt && \
./check-whitespaces -n | tee /test_output/whitespaces_output.txt && \
./check-duplicate-includes.sh | tee /test_output/duplicate_output.txt ./check-duplicate-includes.sh | tee /test_output/duplicate_output.txt

View File

@ -107,7 +107,7 @@ public:
/// TODO Do positions need to be 1-based for this function? /// TODO Do positions need to be 1-based for this function?
size_t position = columns[1]->getUInt(row_num); size_t position = columns[1]->getUInt(row_num);
/// If position is larger than size to which array will be cutted - simply ignore value. /// If position is larger than size to which array will be cut - simply ignore value.
if (length_to_resize && position >= length_to_resize) if (length_to_resize && position >= length_to_resize)
return; return;

View File

@ -97,7 +97,7 @@ public:
/// If preprocessed_dir is empty - calculate from loaded_config.path + /preprocessed_configs/ /// If preprocessed_dir is empty - calculate from loaded_config.path + /preprocessed_configs/
void savePreprocessedConfig(const LoadedConfig & loaded_config, std::string preprocessed_dir); void savePreprocessedConfig(const LoadedConfig & loaded_config, std::string preprocessed_dir);
/// Set path of main config.xml. It will be cutted from all configs placed to preprocessed_configs/ /// Set path of main config.xml. It will be cut from all configs placed to preprocessed_configs/
static void setConfigPath(const std::string & config_path); static void setConfigPath(const std::string & config_path);
public: public:

View File

@ -98,7 +98,7 @@ private:
/// Special class of exceptions, used mostly in ParallelParsingInputFormat for /// Special class of exceptions, used mostly in ParallelParsingInputFormat for
/// more convinient calculation of problem line number. /// more convenient calculation of problem line number.
class ParsingException : public Exception class ParsingException : public Exception
{ {
public: public:

View File

@ -1008,7 +1008,7 @@ public:
* then deleting a erased_key_position will break search for it, so we need to move next_element * then deleting a erased_key_position will break search for it, so we need to move next_element
* to erased_key_position. Now we have empty place at next_element, so we apply the identical * to erased_key_position. Now we have empty place at next_element, so we apply the identical
* procedure for it. * procedure for it.
* If an empty element is encoutered then means that there is no more next elements for which we can * If an empty element is encountered then means that there is no more next elements for which we can
* break the search so we can exit. * break the search so we can exit.
*/ */

View File

@ -44,7 +44,7 @@ namespace QueryProcessingStage
: "Unknown stage"; : "Unknown stage";
} }
/// This methid is used for the program options, /// This method is used for the program options,
/// hence it accept under_score notation for stage: /// hence it accept under_score notation for stage:
/// - complete /// - complete
/// - fetch_columns /// - fetch_columns

View File

@ -185,7 +185,7 @@ using Decimal64 = Decimal<Int64>;
using Decimal128 = Decimal<Int128>; using Decimal128 = Decimal<Int128>;
using Decimal256 = Decimal<Int256>; using Decimal256 = Decimal<Int256>;
// Distinguishable type to allow function resultion/deduction based on value type, // Distinguishable type to allow function resolution/deduction based on value type,
// but also relatively easy to convert to/from Decimal64. // but also relatively easy to convert to/from Decimal64.
class DateTime64 : public Decimal64 class DateTime64 : public Decimal64
{ {

View File

@ -204,7 +204,7 @@ void DataTypeMap::serializeText(const IColumn & column, size_t row_num, WriteBuf
void DataTypeMap::deserializeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const void DataTypeMap::deserializeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{ {
// need_safe_get_int_key is set for Interger to prevent to readIntTextUnsafe // need_safe_get_int_key is set for Integer to prevent to readIntTextUnsafe
bool need_safe_get_int_key = isInteger(key_type); bool need_safe_get_int_key = isInteger(key_type);
deserializeTextImpl(column, istr, need_safe_get_int_key, deserializeTextImpl(column, istr, need_safe_get_int_key,
@ -226,7 +226,7 @@ void DataTypeMap::serializeTextJSON(const IColumn & column, size_t row_num, Writ
void DataTypeMap::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const void DataTypeMap::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{ {
// need_safe_get_int_key is set for Interger to prevent to readIntTextUnsafe // need_safe_get_int_key is set for Integer to prevent to readIntTextUnsafe
bool need_safe_get_int_key = isInteger(key_type); bool need_safe_get_int_key = isInteger(key_type);
deserializeTextImpl(column, istr, need_safe_get_int_key, deserializeTextImpl(column, istr, need_safe_get_int_key,

View File

@ -67,7 +67,7 @@ public:
* If the data type require single stream (it's true for most of data types), the stream will have empty path. * If the data type require single stream (it's true for most of data types), the stream will have empty path.
* Otherwise, the path can have components like "array elements", "array sizes", etc. * Otherwise, the path can have components like "array elements", "array sizes", etc.
* *
* For multidimensional arrays, path can have arbiraty length. * For multidimensional arrays, path can have arbitrary length.
* As an example, for 2-dimensional arrays of numbers we have at least three streams: * As an example, for 2-dimensional arrays of numbers we have at least three streams:
* - array sizes; (sizes of top level arrays) * - array sizes; (sizes of top level arrays)
* - array elements / array sizes; (sizes of second level (nested) arrays) * - array elements / array sizes; (sizes of second level (nested) arrays)

View File

@ -229,7 +229,7 @@ void DatabaseOnDisk::removeDetachedPermanentlyFlag(const String & table_name, co
} }
catch (Exception & e) catch (Exception & e)
{ {
e.addMessage("while trying to remove permanenty detached flag. Table {}.{} may still be marked as permanently detached, and will not be reattached during server restart.", backQuote(getDatabaseName()), backQuote(table_name)); e.addMessage("while trying to remove permanently detached flag. Table {}.{} may still be marked as permanently detached, and will not be reattached during server restart.", backQuote(getDatabaseName()), backQuote(table_name));
throw; throw;
} }
} }
@ -264,7 +264,7 @@ void DatabaseOnDisk::detachTablePermanently(const String & table_name)
} }
catch (Exception & e) catch (Exception & e)
{ {
e.addMessage("while trying to set permanenty detached flag. Table {}.{} may be reattached during server restart.", backQuote(getDatabaseName()), backQuote(table_name)); e.addMessage("while trying to set permanently detached flag. Table {}.{} may be reattached during server restart.", backQuote(getDatabaseName()), backQuote(table_name));
throw; throw;
} }
} }

View File

@ -147,7 +147,7 @@ void CacheDictionary::getItemsNumberImpl(
tryPushToUpdateQueueOrThrow(update_unit_ptr); tryPushToUpdateQueueOrThrow(update_unit_ptr);
waitForCurrentUpdateFinish(update_unit_ptr); waitForCurrentUpdateFinish(update_unit_ptr);
/// Add updated keys to asnwer. /// Add updated keys to answer.
const size_t attribute_index = getAttributeIndex(attribute.name); const size_t attribute_index = getAttributeIndex(attribute.name);

View File

@ -61,7 +61,7 @@ struct ExtractStringImpl
// read a ASCII word // read a ASCII word
static ALWAYS_INLINE inline size_t readOneASCIIWord(PaddedPODArray<UInt8> & word_buf, const char *& pos, const char * end) static ALWAYS_INLINE inline size_t readOneASCIIWord(PaddedPODArray<UInt8> & word_buf, const char *& pos, const char * end)
{ {
// jump seperators // jump separators
while (pos < end && !isAlphaNumericASCII(*pos)) while (pos < end && !isAlphaNumericASCII(*pos))
++pos; ++pos;
@ -93,7 +93,7 @@ struct ExtractStringImpl
// read one UTF8 word from pos to word // read one UTF8 word from pos to word
static ALWAYS_INLINE inline size_t readOneUTF8Word(PaddedPODArray<UInt32> & word_buf, const char *& pos, const char * end) static ALWAYS_INLINE inline size_t readOneUTF8Word(PaddedPODArray<UInt32> & word_buf, const char *& pos, const char * end)
{ {
// jump UTF8 seperator // jump UTF8 separator
while (pos < end && isUTF8Sep(*pos)) while (pos < end && isUTF8Sep(*pos))
++pos; ++pos;
word_buf.clear(); word_buf.clear();
@ -112,7 +112,7 @@ private:
((cont[Offset + I] = std::tolower(cont[Offset + I])), ...); ((cont[Offset + I] = std::tolower(cont[Offset + I])), ...);
} }
// we use ASCII non-alphanum character as UTF8 seperator // we use ASCII non-alphanum character as UTF8 separator
static ALWAYS_INLINE inline bool isUTF8Sep(const UInt8 c) { return c < 128 && !isAlphaNumericASCII(c); } static ALWAYS_INLINE inline bool isUTF8Sep(const UInt8 c) { return c < 128 && !isAlphaNumericASCII(c); }
// read one UTF8 character and return it // read one UTF8 character and return it

View File

@ -151,7 +151,7 @@ template <size_t N, typename CodePoint, bool UTF8, bool Ngram, bool CaseInsensit
struct SimhashImpl struct SimhashImpl
{ {
using StrOp = ExtractStringImpl<N, CaseInsensitive>; using StrOp = ExtractStringImpl<N, CaseInsensitive>;
// we made an assumption that the size of one word cann't exceed 128, which may not true // we made an assumption that the size of one word can't exceed 128, which may not true
// if some word's size exceed 128, it would be cut up to several word // if some word's size exceed 128, it would be cut up to several word
static constexpr size_t max_string_size = 1u << 15; static constexpr size_t max_string_size = 1u << 15;
static constexpr size_t simultaneously_codepoints_num = StrOp::buffer_size; static constexpr size_t simultaneously_codepoints_num = StrOp::buffer_size;
@ -203,7 +203,7 @@ struct SimhashImpl
return res_bit.to_ullong(); return res_bit.to_ullong();
} }
// Simhash word shingle calculate funtion: String -> UInt64 // Simhash word shingle calculate function: String -> UInt64
// this function extracting n word shingle from input string, and maintain a 64-dimensions vector as well // this function extracting n word shingle from input string, and maintain a 64-dimensions vector as well
// for each word shingle, calculate a 64 bit hash value, and update the vector according the hash value // for each word shingle, calculate a 64 bit hash value, and update the vector according the hash value
// finally return a 64 bit value(UInt64), i'th bit is 1 means vector[i] > 0, otherwise, vector[i] < 0 // finally return a 64 bit value(UInt64), i'th bit is 1 means vector[i] > 0, otherwise, vector[i] < 0
@ -211,8 +211,8 @@ struct SimhashImpl
// word shingle hash value calculate: // word shingle hash value calculate:
// 1. at the first, extracts N word shingles and calculate N hash values, store into an array, use this N hash values // 1. at the first, extracts N word shingles and calculate N hash values, store into an array, use this N hash values
// to calculate the first word shingle hash value // to calculate the first word shingle hash value
// 2. next, we extrac one word each time, and calculate a new hash value of the new word,then use the latest N hash // 2. next, we extract one word each time, and calculate a new hash value of the new word,then use the latest N hash
// values to caculate the next word shingle hash value // values to calculate the next word shingle hash value
static ALWAYS_INLINE inline UInt64 wordShinglesCalculateHashValue( static ALWAYS_INLINE inline UInt64 wordShinglesCalculateHashValue(
const char * data, const char * data,
size_t size, size_t size,
@ -254,12 +254,12 @@ struct SimhashImpl
// for example, N = 5, array |a0|a1|a2|a3|a4|, now , a0 is the oldest location, // for example, N = 5, array |a0|a1|a2|a3|a4|, now , a0 is the oldest location,
// so we need to store new word hash into location of a0, then ,this array become // so we need to store new word hash into location of a0, then ,this array become
// |a5|a1|a2|a3|a4|, next time, a1 become the oldest location, we need to store new // |a5|a1|a2|a3|a4|, next time, a1 become the oldest location, we need to store new
// word hash value into locaion of a1, then array become |a5|a6|a2|a3|a4| // word hash value into location of a1, then array become |a5|a6|a2|a3|a4|
nword_hashes[offset] = Hash::hashSum(word_buf.data(), word_buf.size()); nword_hashes[offset] = Hash::hashSum(word_buf.data(), word_buf.size());
offset = (offset + 1) % N; offset = (offset + 1) % N;
// according to the word hash storation way, in order to not lose the word shingle's // according to the word hash storation way, in order to not lose the word shingle's
// sequence information, when calculation word shingle hash value, we need provide the offset // sequence information, when calculation word shingle hash value, we need provide the offset
// inforation, which is the offset of the first word's hash value of the word shingle // information, which is the offset of the first word's hash value of the word shingle
hash_value = hash_functor(nword_hashes, N, offset); hash_value = hash_functor(nword_hashes, N, offset);
std::bitset<64> bits(hash_value); std::bitset<64> bits(hash_value);
for (size_t i = 0; i < 64; ++i) for (size_t i = 0; i < 64; ++i)

View File

@ -29,7 +29,7 @@ namespace DB
* multiMatchAnyIndex(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- search by re2 regular expressions pattern_i; Returns index of any match or zero if none; * multiMatchAnyIndex(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- search by re2 regular expressions pattern_i; Returns index of any match or zero if none;
* multiMatchAllIndices(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- search by re2 regular expressions pattern_i; Returns an array of matched indices in any order; * multiMatchAllIndices(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- search by re2 regular expressions pattern_i; Returns an array of matched indices in any order;
* *
* countSubstrings(haystack, needle) -- count number of occurences of needle in haystack. * countSubstrings(haystack, needle) -- count number of occurrences of needle in haystack.
* countSubstringsCaseInsensitive(haystack, needle) * countSubstringsCaseInsensitive(haystack, needle)
* *
* Applies regexp re2 and pulls: * Applies regexp re2 and pulls:

View File

@ -236,7 +236,7 @@ public:
if (const ColumnConst * col_higher_is_better = checkAndGetColumnConst<ColumnUInt8>(arguments[1].column.get())) if (const ColumnConst * col_higher_is_better = checkAndGetColumnConst<ColumnUInt8>(arguments[1].column.get()))
higher_is_better = col_higher_is_better->getBool(0); higher_is_better = col_higher_is_better->getBool(0);
else else
throw Exception("Second argument for function " + getName() + " must be Constatnt boolean", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); throw Exception("Second argument for function " + getName() + " must be Constant boolean", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
if (const ColumnConst * col_const_arr = checkAndGetColumnConst<ColumnArray>(arguments[2].column.get())) if (const ColumnConst * col_const_arr = checkAndGetColumnConst<ColumnArray>(arguments[2].column.get()))
{ {

View File

@ -86,7 +86,7 @@ private:
ColumnPtr executeTuple(const ColumnsWithTypeAndName & arguments, size_t input_rows_count) const; ColumnPtr executeTuple(const ColumnsWithTypeAndName & arguments, size_t input_rows_count) const;
/** For a map the function finds the matched value for a key. /** For a map the function finds the matched value for a key.
* Currently implemented just as linear seach in array. * Currently implemented just as linear search in array.
* However, optimizations are possible. * However, optimizations are possible.
*/ */
ColumnPtr executeMap(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const; ColumnPtr executeMap(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const;

View File

@ -15,7 +15,7 @@ namespace ErrorCodes
/** Writes data to existing std::vector or similar type. When not enough space, it doubles vector size. /** Writes data to existing std::vector or similar type. When not enough space, it doubles vector size.
* *
* In destructor, vector is cutted to the size of written data. * In destructor, vector is cut to the size of written data.
* You can call 'finalize' to resize earlier. * You can call 'finalize' to resize earlier.
* *
* The vector should live until this object is destroyed or until the 'finish' method is called. * The vector should live until this object is destroyed or until the 'finish' method is called.

View File

@ -268,7 +268,7 @@ BlockIO InterpreterInsertQuery::execute()
const auto & selects = select_query.list_of_selects->children; const auto & selects = select_query.list_of_selects->children;
const auto & union_modes = select_query.list_of_modes; const auto & union_modes = select_query.list_of_modes;
/// ASTSelectWithUnionQuery is not normalized now, so it may pass some querys which can be Trivial select querys /// ASTSelectWithUnionQuery is not normalized now, so it may pass some queries which can be Trivial select queries
is_trivial_insert_select is_trivial_insert_select
= std::all_of( = std::all_of(
union_modes.begin(), union_modes.begin(),

View File

@ -57,7 +57,7 @@ BlockIO InterpreterOptimizeQuery::execute()
{ {
// Deduplication is performed only for adjacent rows in a block, // Deduplication is performed only for adjacent rows in a block,
// and all rows in block are in the sorting key order within a single partition, // and all rows in block are in the sorting key order within a single partition,
// hence deduplication always implicitly takes sorting keys and parition keys in account. // hence deduplication always implicitly takes sorting keys and partition keys in account.
// So we just explicitly state that limitation in order to avoid confusion. // So we just explicitly state that limitation in order to avoid confusion.
if (std::find(column_names.begin(), column_names.end(), required_col) == column_names.end()) if (std::find(column_names.begin(), column_names.end(), required_col) == column_names.end())
throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, throw Exception(ErrorCodes::THERE_IS_NO_COLUMN,

View File

@ -132,7 +132,7 @@ struct CustomizeAggregateFunctionsSuffixData
} }
}; };
// Used to rewrite aggregate functions with -OrNull suffix in some cases, such as sumIfOrNull, we shoule rewrite to sumOrNullIf // Used to rewrite aggregate functions with -OrNull suffix in some cases, such as sumIfOrNull, we should rewrite to sumOrNullIf
struct CustomizeAggregateFunctionsMoveSuffixData struct CustomizeAggregateFunctionsMoveSuffixData
{ {
using TypeToVisit = ASTFunction; using TypeToVisit = ASTFunction;

View File

@ -57,7 +57,7 @@ JoinExpr::JoinExpr(JoinExpr::ExprType type, JoinExpr::JoinOpType op, JoinExpr::J
ASTPtr JoinExpr::convertToOld() const ASTPtr JoinExpr::convertToOld() const
{ {
/** The sole convertable chain of Join's may look like: /** The sole convertible chain of Join's may look like:
* *
* FROM table1 JOIN table2 ON SMTH JOIN table3 ON SMTH JOIN * FROM table1 JOIN table2 ON SMTH JOIN table3 ON SMTH JOIN
* *

View File

@ -13,7 +13,7 @@ Basic principles in code
**predetermined order** and with **predetermined type**: some elements may be `nullptr` to preserve positions of other elements. **predetermined order** and with **predetermined type**: some elements may be `nullptr` to preserve positions of other elements.
- The order may be defined as a position in vector from the start, the last element, and some pattern of variable number of elements - The order may be defined as a position in vector from the start, the last element, and some pattern of variable number of elements
in between. It's convenient to define `enum ChildIndex : Uint8 {…}` with index numbers for each class. in between. It's convenient to define `enum ChildIndex : Uint8 {…}` with index numbers for each class.
- If there is more than one variable pack of elements or the order can't be determenistic, then wrap elements into the lists and store the - If there is more than one variable pack of elements or the order can't be deterministic, then wrap elements into the lists and store the
multi-level structure (see `ColumnExpr::ExprType::FUNCTION` for example). multi-level structure (see `ColumnExpr::ExprType::FUNCTION` for example).
- Don't do multi-level structure just for nothing or to mimic the parse tree: the less is depth the better. - Don't do multi-level structure just for nothing or to mimic the parse tree: the less is depth the better.
- The whole grammar separates expressions for databases, tables and columns. That way we already assess the semantics on the parser level. - The whole grammar separates expressions for databases, tables and columns. That way we already assess the semantics on the parser level.

View File

@ -14,7 +14,7 @@ bool ParserOptimizeQueryColumnsSpecification::parseImpl(Pos & pos, ASTPtr & node
{ {
// Do not allow APPLY and REPLACE transformers. // Do not allow APPLY and REPLACE transformers.
// Since we use Columns Transformers only to get list of columns, // Since we use Columns Transformers only to get list of columns,
// ad we can't actuall modify content of the columns for deduplication. // we can't actually modify content of the columns for deduplication.
const auto allowed_transformers = ParserColumnsTransformers::ColumnTransformers{ParserColumnsTransformers::ColumnTransformer::EXCEPT}; const auto allowed_transformers = ParserColumnsTransformers::ColumnTransformers{ParserColumnsTransformers::ColumnTransformer::EXCEPT};
return ParserColumnsMatcher(allowed_transformers).parse(pos, node, expected) return ParserColumnsMatcher(allowed_transformers).parse(pos, node, expected)

View File

@ -21,19 +21,19 @@ message NameAndType {
string type = 2; string type = 2;
} }
// Desribes an external table - a table which will exists only while a query is executing. // Describes an external table - a table which will exists only while a query is executing.
message ExternalTable { message ExternalTable {
// Name of the table. If omitted, "_data" is used. // Name of the table. If omitted, "_data" is used.
string name = 1; string name = 1;
// Columns of the table. Types are required, names can be omitted. If the names are omitted, "_1", "_2", ... is used. // Columns of the table. Types are required, names can be omitted. If the names are omitted, "_1", "_2", ... is used.
repeated NameAndType columns = 2; repeated NameAndType columns = 2;
// Data to insert to the external table. // Data to insert to the external table.
// If a method with streaming input (i.e. ExecuteQueryWithStreamInput() or ExecuteQueryWithStreamIO()) is used, // If a method with streaming input (i.e. ExecuteQueryWithStreamInput() or ExecuteQueryWithStreamIO()) is used,
// then data for insertion to the same external table can be splitted between multiple QueryInfos. // then data for insertion to the same external table can be split between multiple QueryInfos.
string data = 3; string data = 3;
// Format of the data to insert to the external table. // Format of the data to insert to the external table.
string format = 4; string format = 4;
@ -57,7 +57,7 @@ message QueryInfo {
// Delimiter for input_data, inserted between input_data from adjacent QueryInfos. // Delimiter for input_data, inserted between input_data from adjacent QueryInfos.
string input_data_delimiter = 6; string input_data_delimiter = 6;
// Default output format. If not specified, 'TabSeparated' is used. // Default output format. If not specified, 'TabSeparated' is used.
string output_format = 7; string output_format = 7;
@ -71,7 +71,7 @@ message QueryInfo {
string session_id = 12; string session_id = 12;
bool session_check = 13; bool session_check = 13;
uint32 session_timeout = 14; uint32 session_timeout = 14;
// Set `cancel` to true to stop executing the query. // Set `cancel` to true to stop executing the query.
bool cancel = 15; bool cancel = 15;

View File

@ -637,7 +637,7 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar
if (!columns.empty()) if (!columns.empty())
{ {
/// If some columns absent in part, then evaulate default values /// If some columns absent in part, then evaluate default values
if (should_evaluate_missing_defaults) if (should_evaluate_missing_defaults)
{ {
auto block = prev_reader->sample_block.cloneWithColumns(read_result.columns); auto block = prev_reader->sample_block.cloneWithColumns(read_result.columns);
@ -681,7 +681,7 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar
merge_tree_reader->fillMissingColumns(read_result.columns, should_evaluate_missing_defaults, merge_tree_reader->fillMissingColumns(read_result.columns, should_evaluate_missing_defaults,
read_result.num_rows); read_result.num_rows);
/// If some columns absent in part, then evaulate default values /// If some columns absent in part, then evaluate default values
if (should_evaluate_missing_defaults) if (should_evaluate_missing_defaults)
merge_tree_reader->evaluateMissingDefaults({}, read_result.columns); merge_tree_reader->evaluateMissingDefaults({}, read_result.columns);

View File

@ -31,7 +31,7 @@ public:
*/ */
block_with_constants = KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context); block_with_constants = KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context);
/// Trasform WHERE section to Reverse Polish notation /// Transform WHERE section to Reverse Polish notation
const ASTSelectQuery & select = typeid_cast<const ASTSelectQuery &>(*query_info.query); const ASTSelectQuery & select = typeid_cast<const ASTSelectQuery &>(*query_info.query);
if (select.where()) if (select.where())
{ {

View File

@ -213,7 +213,7 @@ TEST_P(ReplicatedMergeTreeLogEntryDataTest, transcode)
// Enabling this warning would ruin test brievity without adding anything else in return, // Enabling this warning would ruin test brievity without adding anything else in return,
// since most of the fields have default constructors or be will be zero-initialized as by standard, // since most of the fields have default constructors or be will be zero-initialized as by standard,
// so values are predicatable and stable accross runs. // so values are predicatable and stable across runs.
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmissing-field-initializers" #pragma GCC diagnostic ignored "-Wmissing-field-initializers"
@ -301,7 +301,7 @@ INSTANTIATE_TEST_SUITE_P(Merge, ReplicatedMergeTreeLogEntryDataTest,
// This is just an example of how to set all fields. Can't be used as is since depending on type, // This is just an example of how to set all fields. Can't be used as is since depending on type,
// only some fields are serialized/deserialized, and even if everything works perfectly, // only some fields are serialized/deserialized, and even if everything works perfectly,
// some fileds in deserialized object would be unset (hence differ from expected). // some fields in deserialized object would be unset (hence differ from expected).
// INSTANTIATE_TEST_SUITE_P(Full, ReplicatedMergeTreeLogEntryDataTest, // INSTANTIATE_TEST_SUITE_P(Full, ReplicatedMergeTreeLogEntryDataTest,
// ::testing::ValuesIn(std::initializer_list<ReplicatedMergeTreeLogEntryData>{ // ::testing::ValuesIn(std::initializer_list<ReplicatedMergeTreeLogEntryData>{
// { // {

View File

@ -56,7 +56,7 @@ Block EmbeddedRocksDBBlockInputStream::readImpl()
finished = !iterator->Valid(); finished = !iterator->Valid();
if (!iterator->status().ok()) if (!iterator->status().ok())
{ {
throw Exception("Engine " + getName() + " got error while seeking key value datas: " + iterator->status().ToString(), throw Exception("Engine " + getName() + " got error while seeking key value data: " + iterator->status().ToString(),
ErrorCodes::ROCKSDB_ERROR); ErrorCodes::ROCKSDB_ERROR);
} }
return sample_block.cloneWithColumns(std::move(columns)); return sample_block.cloneWithColumns(std::move(columns));

View File

@ -61,13 +61,13 @@ struct StorageInMemoryMetadata
/// Sets constraints /// Sets constraints
void setConstraints(ConstraintsDescription constraints_); void setConstraints(ConstraintsDescription constraints_);
/// Set partition key for storage (methods bellow, are just wrappers for this struct). /// Set partition key for storage (methods below, are just wrappers for this struct).
void setPartitionKey(const KeyDescription & partition_key_); void setPartitionKey(const KeyDescription & partition_key_);
/// Set sorting key for storage (methods bellow, are just wrappers for this struct). /// Set sorting key for storage (methods below, are just wrappers for this struct).
void setSortingKey(const KeyDescription & sorting_key_); void setSortingKey(const KeyDescription & sorting_key_);
/// Set primary key for storage (methods bellow, are just wrappers for this struct). /// Set primary key for storage (methods below, are just wrappers for this struct).
void setPrimaryKey(const KeyDescription & primary_key_); void setPrimaryKey(const KeyDescription & primary_key_);
/// Set sampling key for storage (methods bellow, are just wrappers for this struct). /// Set sampling key for storage (methods below, are just wrappers for this struct).
void setSamplingKey(const KeyDescription & sampling_key_); void setSamplingKey(const KeyDescription & sampling_key_);
/// Set common table TTLs /// Set common table TTLs

View File

@ -3,12 +3,11 @@
# Check for typos in code. # Check for typos in code.
ROOT_PATH=$(git rev-parse --show-toplevel) ROOT_PATH=$(git rev-parse --show-toplevel)
CURDIR=$(dirname "${BASH_SOURCE[0]}")
codespell \ codespell \
--skip '*generated*,*gperf*,*.bin,*.mrk*,*.idx,checksums.txt,*.dat,*.pyc,*.kate-swp' \ --skip '*generated*,*gperf*,*.bin,*.mrk*,*.idx,checksums.txt,*.dat,*.pyc,*.kate-swp,*obfuscateQueries.cpp' \
--ignore-words "${CURDIR}/codespell-ignore-words.list" \ --ignore-words "${ROOT_PATH}/utils/check-style/codespell-ignore-words.list" \
--exclude-file "${CURDIR}/codespell-ignore-lines.list" \ --exclude-file "${ROOT_PATH}/utils/check-style/codespell-ignore-lines.list" \
--quiet-level 2 \ --quiet-level 2 \
"$ROOT_PATH"/{src,base,programs,utils} \ "$ROOT_PATH"/{src,base,programs,utils} \
$@ | grep -P '.' \ $@ | grep -P '.' \

View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
ROOT_PATH=$(git rev-parse --show-toplevel)
EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|memcpy/|consistent-hashing/|Parsers/New'
# Double whitespaces
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null |
grep -vP $EXCLUDE_DIRS |
while read i; do $ROOT_PATH/utils/check-style/double-whitespaces.pl < $i || echo -e "^ File $i contains double whitespaces\n"; done

View File

@ -5,3 +5,8 @@ parsering
nd nd
ect ect
pullrequest pullrequest
pullrequests
thenn
ths
offsett
numer