Fix some spelling mistakes

This commit is contained in:
Pradeep Chhetri 2020-01-11 17:50:41 +08:00 committed by Pradeep Chhetri
parent b29fa1b143
commit 4941fdfe4b
49 changed files with 86 additions and 86 deletions

View File

@ -254,7 +254,7 @@ private:
if (interrupt_listener.check())
{
std::cout << "Stopping launch of queries. SIGINT recieved.\n";
std::cout << "Stopping launch of queries. SIGINT received.\n";
return false;
}

View File

@ -70,7 +70,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
("hc", "use LZ4HC instead of LZ4")
("zstd", "use ZSTD instead of LZ4")
("codec", boost::program_options::value<std::vector<std::string>>()->multitoken(), "use codecs combination instead of LZ4")
("level", boost::program_options::value<int>(), "compression level for codecs spicified via flags")
("level", boost::program_options::value<int>(), "compression level for codecs specified via flags")
("none", "use no compression instead of LZ4")
("stat", "print block statistics of compressed data")
;

View File

@ -2430,7 +2430,7 @@ void ClusterCopierApp::defineOptions(Poco::Util::OptionSet & options)
.argument("copy-fault-probability").binding("copy-fault-probability"));
options.addOption(Poco::Util::Option("log-level", "", "sets log level")
.argument("log-level").binding("log-level"));
options.addOption(Poco::Util::Option("base-dir", "", "base directory for copiers, consequitive copier launches will populate /base-dir/launch_id/* directories")
options.addOption(Poco::Util::Option("base-dir", "", "base directory for copiers, consecutive copier launches will populate /base-dir/launch_id/* directories")
.argument("base-dir").binding("base-dir"));
using Me = std::decay_t<decltype(*this)>;

View File

@ -164,7 +164,7 @@ try
setupUsers();
/// Limit on total number of concurrently executing queries.
/// Threre are no need for concurrent threads, override max_concurrent_queries.
/// There is no need for concurrent threads, override max_concurrent_queries.
context->getProcessList().setMaxSize(0);
/// Size of cache for uncompressed blocks. Zero means disabled.
@ -182,7 +182,7 @@ try
context->setDefaultProfiles(config());
/** Init dummy default DB
* NOTE: We force using isolated default database to avoid conflicts with default database from server enviroment
* NOTE: We force using isolated default database to avoid conflicts with default database from server environment
* Otherwise, metadata of temporary File(format, EXPLICIT_PATH) tables will pollute metadata/ directory;
* if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons.
*/

View File

@ -40,7 +40,7 @@
#include <Common/TerminalSize.h>
static const char * documantation = R"(
static const char * documentation = R"(
Simple tool for table data obfuscation.
It reads input table and produces output table, that retain some properties of input, but contains different data.
@ -979,7 +979,7 @@ try
|| !options.count("input-format")
|| !options.count("output-format"))
{
std::cout << documantation << "\n"
std::cout << documentation << "\n"
<< "\nUsage: " << argv[0] << " [options] < in > out\n"
<< "\nInput must be seekable file (it will be read twice).\n"
<< "\n" << description << "\n"

View File

@ -138,7 +138,7 @@ void ODBCHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne
{
auto message = getCurrentExceptionMessage(true);
response.setStatusAndReason(
Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); // can't call process_error, bacause of too soon response sending
Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); // can't call process_error, because of too soon response sending
writeStringBinary(message, out);
tryLogCurrentException(log);
}

View File

@ -88,7 +88,7 @@ void ODBCBridge::defineOptions(Poco::Util::OptionSet & options)
options.addOption(
Poco::Util::Option("listen-host", "", "hostname to listen, default localhost").argument("listen-host").binding("listen-host"));
options.addOption(
Poco::Util::Option("http-timeout", "", "http timout for socket, default 1800").argument("http-timeout").binding("http-timeout"));
Poco::Util::Option("http-timeout", "", "http timeout for socket, default 1800").argument("http-timeout").binding("http-timeout"));
options.addOption(Poco::Util::Option("max-server-connections", "", "max connections to server, default 1024")
.argument("max-server-connections")

View File

@ -315,7 +315,7 @@ void PerformanceTest::runQueries(
stop_conditions.reportIterations(iteration);
if (stop_conditions.areFulfilled())
{
LOG_INFO(log, "Stop conditions fullfilled");
LOG_INFO(log, "Stop conditions fulfilled");
break;
}

View File

@ -200,7 +200,7 @@ private:
if (current.checkPreconditions())
{
LOG_INFO(log, "Preconditions for test '" << info.test_name << "' are fullfilled");
LOG_INFO(log, "Preconditions for test '" << info.test_name << "' are fulfilled");
LOG_INFO(
log,
"Preparing for run, have " << info.create_and_fill_queries.size() << " create and fill queries");
@ -219,7 +219,7 @@ private:
return {report_builder->buildFullReport(info, result, query_indexes[info.path]), current.checkSIGINT()};
}
else
LOG_INFO(log, "Preconditions for test '" << info.test_name << "' are not fullfilled, skip run");
LOG_INFO(log, "Preconditions for test '" << info.test_name << "' are not fulfilled, skip run");
return {"", current.checkSIGINT()};
}
@ -361,8 +361,8 @@ try
po::notify(options);
Poco::AutoPtr<Poco::PatternFormatter> formatter(new Poco::PatternFormatter("%Y.%m.%d %H:%M:%S.%F <%p> %s: %t"));
Poco::AutoPtr<Poco::ConsoleChannel> console_chanel(new Poco::ConsoleChannel);
Poco::AutoPtr<Poco::FormattingChannel> channel(new Poco::FormattingChannel(formatter, console_chanel));
Poco::AutoPtr<Poco::ConsoleChannel> console_channel(new Poco::ConsoleChannel);
Poco::AutoPtr<Poco::FormattingChannel> channel(new Poco::FormattingChannel(formatter, console_channel));
Poco::Logger::root().setLevel(options["log-level"].as<std::string>());
Poco::Logger::root().setChannel(channel);

View File

@ -117,7 +117,7 @@ std::string ReportBuilder::buildFullReport(
if (isASCIIString(statistics.exception))
runJSON.set("exception", jsonString(statistics.exception, settings), false);
else
runJSON.set("exception", "Some exception occured with non ASCII message. This may produce invalid JSON. Try reproduce locally.");
runJSON.set("exception", "Some exception occurred with non ASCII message. This may produce invalid JSON. Try reproduce locally.");
}
if (test_info.exec_type == ExecutionType::Loop)

View File

@ -28,23 +28,23 @@ std::pair<String, bool> InterserverIOHTTPHandler::checkAuthentication(Poco::Net:
if (config.has("interserver_http_credentials.user"))
{
if (!request.hasCredentials())
return {"Server requires HTTP Basic authentification, but client doesn't provide it", false};
return {"Server requires HTTP Basic authentication, but client doesn't provide it", false};
String scheme, info;
request.getCredentials(scheme, info);
if (scheme != "Basic")
return {"Server requires HTTP Basic authentification but client provides another method", false};
return {"Server requires HTTP Basic authentication but client provides another method", false};
String user = config.getString("interserver_http_credentials.user");
String password = config.getString("interserver_http_credentials.password", "");
Poco::Net::HTTPBasicCredentials credentials(info);
if (std::make_pair(user, password) != std::make_pair(credentials.getUsername(), credentials.getPassword()))
return {"Incorrect user or password in HTTP Basic authentification", false};
return {"Incorrect user or password in HTTP Basic authentication", false};
}
else if (request.hasCredentials())
{
return {"Client requires HTTP Basic authentification, but server doesn't provide it", false};
return {"Client requires HTTP Basic authentication, but server doesn't provide it", false};
}
return {"", true};
}
@ -99,7 +99,7 @@ void InterserverIOHTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & requ
response.setStatusAndReason(Poco::Net::HTTPServerResponse::HTTP_UNAUTHORIZED);
if (!response.sent())
writeString(message, *used_output.out);
LOG_WARNING(log, "Query processing failed request: '" << request.getURI() << "' authentification failed");
LOG_WARNING(log, "Query processing failed request: '" << request.getURI() << "' authentication failed");
}
}
catch (Exception & e)

View File

@ -31,7 +31,7 @@ public:
template <typename HandlerType>
class PrometeusRequestHandlerFactory : public Poco::Net::HTTPRequestHandlerFactory
class PrometheusRequestHandlerFactory : public Poco::Net::HTTPRequestHandlerFactory
{
private:
IServer & server;
@ -39,7 +39,7 @@ private:
PrometheusMetricsWriter metrics_writer;
public:
PrometeusRequestHandlerFactory(IServer & server_, const AsynchronousMetrics & async_metrics_)
PrometheusRequestHandlerFactory(IServer & server_, const AsynchronousMetrics & async_metrics_)
: server(server_)
, endpoint_path(server_.config().getString("prometheus.endpoint", "/metrics"))
, metrics_writer(server_.config(), "prometheus", async_metrics_)
@ -56,6 +56,6 @@ public:
}
};
using PrometeusHandlerFactory = PrometeusRequestHandlerFactory<PrometheusRequestHandler>;
using PrometheusHandlerFactory = PrometheusRequestHandlerFactory<PrometheusRequestHandler>;
}

View File

@ -554,8 +554,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
///
/// It also cannot work with sanitizers.
/// Sanitizers are using quick "frame walking" stack unwinding (this implies -fno-omit-frame-pointer)
/// And they do unwiding frequently (on every malloc/free, thread/mutex operations, etc).
/// They change %rbp during unwinding and it confuses libunwind if signal comes during sanitizer unwiding
/// And they do unwinding frequently (on every malloc/free, thread/mutex operations, etc).
/// They change %rbp during unwinding and it confuses libunwind if signal comes during sanitizer unwinding
/// and query profiler decide to unwind stack with libunwind at this moment.
///
/// Symptoms: you'll get silent Segmentation Fault - without sanitizer message and without usual ClickHouse diagnostics.
@ -724,7 +724,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
socket.setSendTimeout(settings.http_send_timeout);
auto handler_factory = createDefaultHandlerFatory<HTTPHandler>(*this, "HTTPHandler-factory");
if (config().has("prometheus") && config().getInt("prometheus.port", 0) == 0)
handler_factory->addHandler<PrometeusHandlerFactory>(async_metrics);
handler_factory->addHandler<PrometheusHandlerFactory>(async_metrics);
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
handler_factory,
@ -854,7 +854,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
socket.setReceiveTimeout(settings.http_receive_timeout);
socket.setSendTimeout(settings.http_send_timeout);
auto handler_factory = new HTTPRequestHandlerFactoryMain(*this, "PrometheusHandler-factory");
handler_factory->addHandler<PrometeusHandlerFactory>(async_metrics);
handler_factory->addHandler<PrometheusHandlerFactory>(async_metrics);
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
handler_factory,
server_pool,

View File

@ -54,7 +54,7 @@ private:
{
AggregateFunctionForEachData & state = data(place);
/// Ensure we have aggreate states for new_size elements, allocate
/// Ensure we have aggregate states for new_size elements, allocate
/// from arena if needed. When reallocating, we can't copy the
/// states to new buffer with memcpy, because they may contain pointers
/// to themselves. In particular, this happens when a state contains

View File

@ -15,7 +15,7 @@
private:
friend class COW<Column>;
/// Leave all constructors in private section. They will be avaliable through 'create' method.
/// Leave all constructors in private section. They will be available through 'create' method.
Column();
/// Provide 'clone' method. It can be virtual if you want polymorphic behaviour.

View File

@ -516,13 +516,13 @@ UInt32 CompressionCodecT64::doCompressData(const char * src, UInt32 src_size, ch
break;
}
throw Exception("Connot compress with T64", ErrorCodes::CANNOT_COMPRESS);
throw Exception("Cannot compress with T64", ErrorCodes::CANNOT_COMPRESS);
}
void CompressionCodecT64::doDecompressData(const char * src, UInt32 src_size, char * dst, UInt32 uncompressed_size) const
{
if (!src_size)
throw Exception("Connot decompress with T64", ErrorCodes::CANNOT_DECOMPRESS);
throw Exception("Cannot decompress with T64", ErrorCodes::CANNOT_DECOMPRESS);
UInt8 cookie = unalignedLoad<UInt8>(src);
src += 1;
@ -553,7 +553,7 @@ void CompressionCodecT64::doDecompressData(const char * src, UInt32 src_size, ch
break;
}
throw Exception("Connot decompress with T64", ErrorCodes::CANNOT_DECOMPRESS);
throw Exception("Cannot decompress with T64", ErrorCodes::CANNOT_DECOMPRESS);
}
void CompressionCodecT64::useInfoAboutType(DataTypePtr data_type)

View File

@ -35,7 +35,7 @@ DataTypePtr DataTypeFactory::get(const ASTPtr & ast) const
if (const auto * func = ast->as<ASTFunction>())
{
if (func->parameters)
throw Exception("Data type cannot have multiple parenthesed parameters.", ErrorCodes::ILLEGAL_SYNTAX_FOR_DATA_TYPE);
throw Exception("Data type cannot have multiple parenthesized parameters.", ErrorCodes::ILLEGAL_SYNTAX_FOR_DATA_TYPE);
return get(func->name, func->arguments);
}

View File

@ -43,7 +43,7 @@ public:
using GetColumnsFunction = std::function<ColumnsWithTypeAndName(const Columns &, const std::vector<DictionaryAttribute> & attributes)>;
// Used to separate key columns format for storage and view.
// Calls get_key_columns_function to get key column for dictionary get fuction call
// Calls get_key_columns_function to get key column for dictionary get function call
// and get_view_columns_function to get key representation.
// Now used in trie dictionary, where columns are stored as ip and mask, and are showed as string
DictionaryBlockInputStream(

View File

@ -17,7 +17,7 @@ namespace DB
{
/// Visitors consist of functions with unified interface 'void visit(Casted & x, ASTPtr & y)', there x is y, successfully casted to Casted.
/// Both types and fuction could have const specifiers. The second argument is used by visitor to replaces AST node (y) if needed.
/// Both types and function could have const specifiers. The second argument is used by visitor to replaces AST node (y) if needed.
/// Visits AST nodes, add default database to tables if not set. There's different logic for DDLs and selects.
class AddDefaultDatabaseVisitor

View File

@ -157,7 +157,7 @@ Aggregator::Aggregator(const Params & params_)
total_size_of_aggregate_states = 0;
all_aggregates_has_trivial_destructor = true;
// aggreate_states will be aligned as below:
// aggregate_states will be aligned as below:
// |<-- state_1 -->|<-- pad_1 -->|<-- state_2 -->|<-- pad_2 -->| .....
//
// pad_N will be used to match alignment requirement for each next state.
@ -168,7 +168,7 @@ Aggregator::Aggregator(const Params & params_)
total_size_of_aggregate_states += params.aggregates[i].function->sizeOfData();
// aggreate states are aligned based on maximum requirement
// aggregate states are aligned based on maximum requirement
align_aggregate_states = std::max(align_aggregate_states, params.aggregates[i].function->alignOfData());
// If not the last aggregate_state, we need pad it so that next aggregate_state will be aligned.

View File

@ -111,7 +111,7 @@ struct ContextShared
mutable std::mutex embedded_dictionaries_mutex;
mutable std::mutex external_dictionaries_mutex;
mutable std::mutex external_models_mutex;
/// Separate mutex for re-initialization of zookeer session. This operation could take a long time and must not interfere with another operations.
/// Separate mutex for re-initialization of zookeeper session. This operation could take a long time and must not interfere with another operations.
mutable std::mutex zookeeper_mutex;
mutable zkutil::ZooKeeperPtr zookeeper; /// Client for ZooKeeper.
@ -191,7 +191,7 @@ struct ContextShared
/// Clusters for distributed tables
/// Initialized on demand (on distributed storages initialization) since Settings should be initialized
std::unique_ptr<Clusters> clusters;
ConfigurationPtr clusters_config; /// Soteres updated configs
ConfigurationPtr clusters_config; /// Stores updated configs
mutable std::mutex clusters_mutex; /// Guards clusters and clusters_config
#if USE_EMBEDDED_COMPILER

View File

@ -105,7 +105,7 @@ public:
if (node.name == NameAnd::name)
{
if (!node.arguments || node.arguments->children.empty())
throw Exception("Logical error: function requires argiment", ErrorCodes::LOGICAL_ERROR);
throw Exception("Logical error: function requires argument", ErrorCodes::LOGICAL_ERROR);
for (auto & child : node.arguments->children)
{

View File

@ -238,7 +238,7 @@ DDLWorker::DDLWorker(const std::string & zk_root_dir, Context & context_, const
if (context.getSettingsRef().readonly)
{
LOG_WARNING(log, "Distributed DDL worker is run with readonly settings, it will not be able to execute DDL queries"
<< " Set apropriate system_profile or distributed_ddl.profile to fix this.");
<< " Set appropriate system_profile or distributed_ddl.profile to fix this.");
}
host_fqdn = getFQDNOrHostName();
@ -825,7 +825,7 @@ void DDLWorker::cleanupQueue(Int64 current_time_seconds, const ZooKeeperPtr & zo
if (!zookeeper->exists(node_path, &stat))
continue;
/// Delete node if its lifetmie is expired (according to task_max_lifetime parameter)
/// Delete node if its lifetime is expired (according to task_max_lifetime parameter)
constexpr UInt64 zookeeper_time_resolution = 1000;
Int64 zookeeper_time_seconds = stat.ctime / zookeeper_time_resolution;
bool node_lifetime_is_expired = zookeeper_time_seconds + task_max_lifetime < current_time_seconds;

View File

@ -954,7 +954,7 @@ void ExpressionActions::finalize(const Names & output_columns)
/// remote table (doesn't know anything about it).
///
/// If we have combination of two previous cases, our heuristic from (1) can choose absolutely different columns,
/// so generated streams with these actions will have different headers. To avoid this we addionaly rename our "redundant" column
/// so generated streams with these actions will have different headers. To avoid this we additionally rename our "redundant" column
/// to DUMMY_COLUMN_NAME with help of COPY_COLUMN action and consequent remove of original column.
/// It doesn't affect any logic, but all streams will have same "redundant" column in header called "_dummy".

View File

@ -286,7 +286,7 @@ SetPtr SelectQueryExpressionAnalyzer::isPlainStorageSetInSubquery(const ASTPtr &
}
/// Perfomance optimisation for IN() if storage supports it.
/// Performance optimisation for IN() if storage supports it.
void SelectQueryExpressionAnalyzer::makeSetsForIndex(const ASTPtr & node)
{
if (!node || !storage() || !storage()->supportsIndexForIn())

View File

@ -19,7 +19,7 @@ ExternalLoader::LoadablePtr ExternalDictionariesLoader::create(
const std::string & name, const Poco::Util::AbstractConfiguration & config,
const std::string & key_in_config, const std::string & repository_name) const
{
/// For dictionaries from databases (created with DDL qureies) we have to perform
/// For dictionaries from databases (created with DDL queries) we have to perform
/// additional checks, so we identify them here.
bool dictionary_from_database = !repository_name.empty();
return DictionaryFactory::instance().create(name, config, key_in_config, context, dictionary_from_database);

View File

@ -609,7 +609,7 @@ public:
{
try
{
/// Maybe alredy true, if we have an exception
/// Maybe already true, if we have an exception
if (!should_update_flag)
should_update_flag = object->isModified();
}

View File

@ -306,7 +306,7 @@ size_t Join::getTotalByteCount() const
void Join::setSampleBlock(const Block & block)
{
/// You have to restore this lock if you call the fuction outside of ctor.
/// You have to restore this lock if you call the function outside of ctor.
//std::unique_lock lock(rwlock);
LOG_DEBUG(log, "setSampleBlock: " << block.dumpStructure());
@ -778,7 +778,7 @@ NO_INLINE IColumn::Filter joinRightColumns(const Map & map, AddedColumns & added
}
else if constexpr ((is_any_join || is_semi_join) && right)
{
/// Use first appered left key + it needs left columns replication
/// Use first appeared left key + it needs left columns replication
if (mapped.setUsedOnce())
{
setUsed<need_filter>(filter, i);
@ -787,7 +787,7 @@ NO_INLINE IColumn::Filter joinRightColumns(const Map & map, AddedColumns & added
}
else if constexpr (is_any_join && KIND == ASTTableJoin::Kind::Inner)
{
/// Use first appered left key only
/// Use first appeared left key only
if (mapped.setUsedOnce())
{
setUsed<need_filter>(filter, i);

View File

@ -527,7 +527,7 @@ void MergeJoin::mergeFlushedRightBlocks()
lsm->merge(callback);
flushed_right_blocks.swap(lsm->sorted_files.front());
/// Get memory limit or aproximate it from row limit and bytes per row factor
/// Get memory limit or approximate it from row limit and bytes per row factor
UInt64 memory_limit = size_limits.max_bytes;
UInt64 rows_limit = size_limits.max_rows;
if (!memory_limit && rows_limit)

View File

@ -56,7 +56,7 @@ public:
private:
/// There're two size limits for right-hand table: max_rows_in_join, max_bytes_in_join.
/// max_bytes is prefered. If it isn't set we aproximate it as (max_rows * bytes/row).
/// max_bytes is prefered. If it isn't set we approximate it as (max_rows * bytes/row).
struct BlockByteWeight
{
size_t operator()(const Block & block) const { return block.bytes(); }

View File

@ -68,7 +68,7 @@ ASTs OptimizeIfChainsVisitor::ifChain(const ASTPtr & child)
const auto * else_arg = function_args->children[2]->as<ASTFunction>();
/// Recursively collect arguments from the innermost if ("head-resursion").
/// Recursively collect arguments from the innermost if ("head-recursion").
/// Arguments will be returned in reverse order.
if (else_arg && else_arg->name == "if")

View File

@ -181,7 +181,7 @@ void renameDuplicatedColumns(const ASTSelectQuery * select_query)
/// Sometimes we have to calculate more columns in SELECT clause than will be returned from query.
/// This is the case when we have DISTINCT or arrayJoin: we require more columns in SELECT even if we need less columns in result.
/// Also we have to remove duplicates in case of GLOBAL subqueries. Their results are placed into tables so duplicates are inpossible.
/// Also we have to remove duplicates in case of GLOBAL subqueries. Their results are placed into tables so duplicates are impossible.
void removeUnneededColumnsFromSelectClause(const ASTSelectQuery * select_query, const Names & required_result_columns, bool remove_dups)
{
ASTs & elements = select_query->select()->children;
@ -632,7 +632,7 @@ std::vector<const ASTFunction *> getAggregates(const ASTPtr & query)
/// After execution, columns will only contain the list of columns needed to read from the table.
void SyntaxAnalyzerResult::collectUsedColumns(const ASTPtr & query, const NamesAndTypesList & additional_source_columns)
{
/// We caclulate required_source_columns with source_columns modifications and swap them on exit
/// We calculate required_source_columns with source_columns modifications and swap them on exit
required_source_columns = source_columns;
if (!additional_source_columns.empty())
@ -652,15 +652,15 @@ void SyntaxAnalyzerResult::collectUsedColumns(const ASTPtr & query, const NamesA
if (columns_context.has_table_join)
{
NameSet avaliable_columns;
NameSet available_columns;
for (const auto & name : source_columns)
avaliable_columns.insert(name.name);
available_columns.insert(name.name);
/// Add columns obtained by JOIN (if needed).
for (const auto & joined_column : analyzed_join->columnsFromJoinedTable())
{
auto & name = joined_column.name;
if (avaliable_columns.count(name))
if (available_columns.count(name))
continue;
if (required.count(name))
@ -845,12 +845,12 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyze(
{
if (storage)
{
const ColumnsDescription & starage_columns = storage->getColumns();
tables_with_columns.emplace_back(DatabaseAndTableWithAlias{}, starage_columns.getOrdinary().getNames());
const ColumnsDescription & storage_columns = storage->getColumns();
tables_with_columns.emplace_back(DatabaseAndTableWithAlias{}, storage_columns.getOrdinary().getNames());
auto & table = tables_with_columns.back();
table.addHiddenColumns(starage_columns.getMaterialized());
table.addHiddenColumns(starage_columns.getAliases());
table.addHiddenColumns(starage_columns.getVirtuals());
table.addHiddenColumns(storage_columns.getMaterialized());
table.addHiddenColumns(storage_columns.getAliases());
table.addHiddenColumns(storage_columns.getVirtuals());
}
else
{

View File

@ -167,7 +167,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
{
which_from_type = WhichDataType(*from_type_hint);
// This was added to mitigate converting DateTime64-Field (a typedef to a Decimal64) to DataTypeDate64-compatitable type.
// This was added to mitigate converting DateTime64-Field (a typedef to a Decimal64) to DataTypeDate64-compatible type.
if (from_type_hint && from_type_hint->equals(type))
{
return src;

View File

@ -118,7 +118,7 @@ void loadMetadata(Context & context)
}
catch (...)
{
tryLogCurrentException("Load metadata", "Can't remove force restore file to enable data santity checks");
tryLogCurrentException("Load metadata", "Can't remove force restore file to enable data sanity checks");
}
}
}

View File

@ -182,7 +182,7 @@ void DistributedBlockOutputStream::initWritingJobs(const Block & first_block)
}
if (num_shards > 1)
shard_jobs.shard_current_block_permuation.reserve(first_block.rows());
shard_jobs.shard_current_block_permutation.reserve(first_block.rows());
}
}
@ -235,7 +235,7 @@ ThreadPool::Job DistributedBlockOutputStream::runWritingJob(DistributedBlockOutp
/// Generate current shard block
if (num_shards > 1)
{
auto & shard_permutation = shard_job.shard_current_block_permuation;
auto & shard_permutation = shard_job.shard_current_block_permutation;
size_t num_shard_rows = shard_permutation.size();
for (size_t j = 0; j < current_block.columns(); ++j)
@ -348,10 +348,10 @@ void DistributedBlockOutputStream::writeSync(const Block & block)
/// Prepare row numbers for each shard
for (size_t shard_index : ext::range(0, num_shards))
per_shard_jobs[shard_index].shard_current_block_permuation.resize(0);
per_shard_jobs[shard_index].shard_current_block_permutation.resize(0);
for (size_t i = 0; i < block.rows(); ++i)
per_shard_jobs[current_selector[i]].shard_current_block_permuation.push_back(i);
per_shard_jobs[current_selector[i]].shard_current_block_permutation.push_back(i);
}
try

View File

@ -123,7 +123,7 @@ private:
struct JobShard
{
std::list<JobReplica> replicas_jobs;
IColumn::Permutation shard_current_block_permuation;
IColumn::Permutation shard_current_block_permutation;
};
std::vector<JobShard> per_shard_jobs;

View File

@ -49,7 +49,7 @@ public:
const AllowedMovingPredicate & can_move,
const std::lock_guard<std::mutex> & moving_parts_lock);
/// Copies part to selected reservation in detached folder. Throws exception if part alredy exists.
/// Copies part to selected reservation in detached folder. Throws exception if part already exists.
std::shared_ptr<const MergeTreeDataPart> clonePart(const MergeTreeMoveEntry & moving_part) const;
/// Replaces cloned part from detached directory into active data parts set.

View File

@ -23,7 +23,7 @@ ReadInOrderOptimizer::ReadInOrderOptimizer(
throw Exception("Sizes of sort description and actions are mismatched", ErrorCodes::LOGICAL_ERROR);
/// Do not analyze joined columns.
/// They may have aliases and come to descriprion as is.
/// They may have aliases and come to description as is.
/// We can mismatch them with order key columns at stage of fetching columns.
for (const auto & elem : syntax_result->array_join_result_to_source)
forbidden_columns.insert(elem.first);

View File

@ -438,7 +438,7 @@ void StorageBuffer::startup()
if (global_context.getSettingsRef().readonly)
{
LOG_WARNING(log, "Storage " << getName() << " is run with readonly settings, it will not be able to insert data."
<< " Set apropriate system_profile to fix this.");
<< " Set appropriate system_profile to fix this.");
}
flush_thread = ThreadFromGlobalPool(&StorageBuffer::flushThread, this);

View File

@ -109,7 +109,7 @@ bool isCompatible(const IAST & node)
return false;
/// A tuple with zero or one elements is represented by a function tuple(x) and is not compatible,
/// but a normal tuple with more than one element is represented as a parenthesed expression (x, y) and is perfectly compatible.
/// but a normal tuple with more than one element is represented as a parenthesized expression (x, y) and is perfectly compatible.
if (name == "tuple" && function->arguments->children.size() <= 1)
return false;

View File

@ -91,7 +91,7 @@
NOTE: In spite of this section is optional (if it is not specified, all partitions will be copied),
it is strictly recommended to specify them explicitly.
If you already have some ready paritions on destination cluster they
If you already have some ready partitions on destination cluster they
will be removed at the start of the copying since they will be interpeted
as unfinished data from the previous copying!!!
-->

View File

@ -6,7 +6,7 @@ import ssl
import csv
# Decorator used to see if authentification works for external dictionary who use a HTTP source.
# Decorator used to see if authentication works for external dictionary who use a HTTP source.
def check_auth(fn):
def wrapper(req):
auth_header = req.headers.get('authorization', None)

View File

@ -144,7 +144,7 @@ Parameters:
NOTE: In spite of this section is optional (if it is not specified, all partitions will be copied),
it is strictly recommended to specify them explicitly.
If you already have some ready paritions on destination cluster they
If you already have some ready partitions on destination cluster they
will be removed at the start of the copying since they will be interpeted
as unfinished data from the previous copying!!!
-->

View File

@ -137,9 +137,9 @@ Setting fields:
- `url` The source URL.
- `format` The file format. All the formats described in "[Formats](../../interfaces/formats.md#formats)" are supported.
- `credentials` Basic HTTP authentification. Optional parameter.
- `user` Username required for the authentification.
- `password` Password required for the authentification.
- `credentials` Basic HTTP authentication. Optional parameter.
- `user` Username required for the authentication.
- `password` Password required for the authentication.
- `headers` All custom HTTP headers entries used for the HTTP request. Optional parameter.
- `header` Single HTTP header entry.
- `name` Identifiant name used for the header send on the request.

View File

@ -175,7 +175,7 @@ $ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?data
Имя пользователя и пароль могут быть указаны в одном из двух вариантов:
1. С использованием HTTP Basic Authentification. Пример:
1. С использованием HTTP Basic Authentication. Пример:
```bash
$ echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @-

View File

@ -143,7 +143,7 @@ $ clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/pat
NOTE: In spite of this section is optional (if it is not specified, all partitions will be copied),
it is strictly recommended to specify them explicitly.
If you already have some ready paritions on destination cluster they
If you already have some ready partitions on destination cluster they
will be removed at the start of the copying since they will be interpeted
as unfinished data from the previous copying!!!
-->

View File

@ -1470,7 +1470,7 @@ function defaultClearTimeout () {
} ())
function runTimeout(fun) {
if (cachedSetTimeout === setTimeout) {
//normal enviroments in sane situations
//normal environments in sane situations
return setTimeout(fun, 0);
}
// if setTimeout wasn't available but was latter defined
@ -1495,7 +1495,7 @@ function runTimeout(fun) {
}
function runClearTimeout(marker) {
if (cachedClearTimeout === clearTimeout) {
//normal enviroments in sane situations
//normal environments in sane situations
return clearTimeout(marker);
}
// if clearTimeout wasn't available but was latter defined
@ -8028,7 +8028,7 @@ lunr.QueryParser.parseBoost = function (parser) {
} else if (typeof exports === 'object') {
/**
* Node. Does not work with strict CommonJS, but
* only CommonJS-like enviroments that support module.exports,
* only CommonJS-like environments that support module.exports,
* like Node.
*/
module.exports = factory()

View File

@ -2968,7 +2968,7 @@ lunr.QueryParser.parseBoost = function (parser) {
} else if (typeof exports === 'object') {
/**
* Node. Does not work with strict CommonJS, but
* only CommonJS-like enviroments that support module.exports,
* only CommonJS-like environments that support module.exports,
* like Node.
*/
module.exports = factory()

View File

@ -142,7 +142,7 @@ Parameters:
NOTE: In spite of this section is optional (if it is not specified, all partitions will be copied),
it is strictly recommended to specify them explicitly.
If you already have some ready paritions on destination cluster they
If you already have some ready partitions on destination cluster they
will be removed at the start of the copying since they will be interpeted
as unfinished data from the previous copying!!!
-->