mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Fix half of typos
This commit is contained in:
parent
bbbc52362b
commit
edd89a8610
@ -156,7 +156,7 @@ namespace
|
||||
subnet = IPSubnet{pattern};
|
||||
}
|
||||
|
||||
/// Extracts a subnet, a host name or a host name regular expession from a like pattern.
|
||||
/// Extracts a subnet, a host name or a host name regular expression from a like pattern.
|
||||
void parseLikePattern(
|
||||
const String & pattern, std::optional<IPSubnet> & subnet, std::optional<String> & name, std::optional<String> & name_regexp)
|
||||
{
|
||||
|
@ -34,7 +34,7 @@ public:
|
||||
template <typename EntityClassT>
|
||||
std::vector<UUID> findAll() const { return findAll(EntityClassT::TYPE); }
|
||||
|
||||
/// Searchs for an entity with specified type and name. Returns std::nullopt if not found.
|
||||
/// Searches for an entity with specified type and name. Returns std::nullopt if not found.
|
||||
std::optional<UUID> find(EntityType type, const String & name) const;
|
||||
|
||||
template <typename EntityClassT>
|
||||
@ -45,7 +45,7 @@ public:
|
||||
template <typename EntityClassT>
|
||||
std::vector<UUID> find(const Strings & names) const { return find(EntityClassT::TYPE, names); }
|
||||
|
||||
/// Searchs for an entity with specified name and type. Throws an exception if not found.
|
||||
/// Searches for an entity with specified name and type. Throws an exception if not found.
|
||||
UUID getID(EntityType type, const String & name) const;
|
||||
|
||||
template <typename EntityClassT>
|
||||
|
@ -509,20 +509,20 @@ public:
|
||||
UInt64 count = 0;
|
||||
if (isSmall())
|
||||
{
|
||||
std::vector<T> ans;
|
||||
std::vector<T> answer;
|
||||
for (const auto & x : small)
|
||||
{
|
||||
T val = x.getValue();
|
||||
if (UInt32(val) >= range_start)
|
||||
{
|
||||
ans.push_back(val);
|
||||
answer.push_back(val);
|
||||
}
|
||||
}
|
||||
sort(ans.begin(), ans.end());
|
||||
if (limit > ans.size())
|
||||
limit = ans.size();
|
||||
sort(answer.begin(), answer.end());
|
||||
if (limit > answer.size())
|
||||
limit = answer.size();
|
||||
for (size_t i = 0; i < limit; ++i)
|
||||
r1.add(ans[i]);
|
||||
r1.add(answer[i]);
|
||||
count = UInt64(limit);
|
||||
}
|
||||
else
|
||||
|
@ -200,7 +200,7 @@ void LinearModelData::merge(const DB::LinearModelData & rhs)
|
||||
updateState();
|
||||
/// can't update rhs state because it's constant
|
||||
|
||||
/// squared mean is more stable (in sence of quality of prediction) when two states with quietly different number of learning steps are merged
|
||||
/// squared mean is more stable (in sense of quality of prediction) when two states with quietly different number of learning steps are merged
|
||||
Float64 frac = (static_cast<Float64>(iter_num) * iter_num) / (iter_num * iter_num + rhs.iter_num * rhs.iter_num);
|
||||
|
||||
for (size_t i = 0; i < weights.size(); ++i)
|
||||
|
@ -28,7 +28,7 @@ namespace ErrorCodes
|
||||
/// If all rows had NULL, the behaviour is determined by "result_is_nullable" template parameter.
|
||||
/// true - return NULL; false - return value from empty aggregation state of nested function.
|
||||
|
||||
/// When serialize_flag is set to true, the flag about presense of values is serialized
|
||||
/// When serialize_flag is set to true, the flag about presence of values is serialized
|
||||
/// regardless to the "result_is_nullable" even if it's unneeded - for protocol compatibility.
|
||||
|
||||
template <bool result_is_nullable, bool serialize_flag, typename Derived>
|
||||
|
@ -261,7 +261,7 @@ private:
|
||||
if (actions.back().type != PatternActionType::SpecificEvent &&
|
||||
actions.back().type != PatternActionType::AnyEvent &&
|
||||
actions.back().type != PatternActionType::KleeneStar)
|
||||
throw Exception{"Temporal condition should be preceeded by an event condition", ErrorCodes::BAD_ARGUMENTS};
|
||||
throw Exception{"Temporal condition should be preceded by an event condition", ErrorCodes::BAD_ARGUMENTS};
|
||||
|
||||
pattern_has_time = true;
|
||||
actions.emplace_back(type, duration);
|
||||
|
@ -124,7 +124,7 @@ struct SumMapVariants
|
||||
};
|
||||
|
||||
// This template gives an aggregate function template that is narrowed
|
||||
// to accept either tuple argumen or normal argumens.
|
||||
// to accept either tuple argumen or normal arguments.
|
||||
template <bool tuple_argument>
|
||||
struct MinMapDispatchOnTupleArgument
|
||||
{
|
||||
@ -133,7 +133,7 @@ struct MinMapDispatchOnTupleArgument
|
||||
};
|
||||
|
||||
// This template gives an aggregate function template that is narrowed
|
||||
// to accept either tuple argumen or normal argumens.
|
||||
// to accept either tuple argumen or normal arguments.
|
||||
template <bool tuple_argument>
|
||||
struct MaxMapDispatchOnTupleArgument
|
||||
{
|
||||
|
@ -148,7 +148,7 @@ private:
|
||||
|
||||
|
||||
// Loop through the entire events_list, update the event timestamp value
|
||||
// The level path must be 1---2---3---...---check_events_size, find the max event level that statisfied the path in the sliding window.
|
||||
// The level path must be 1---2---3---...---check_events_size, find the max event level that satisfied the path in the sliding window.
|
||||
// If found, returns the max event level, else return 0.
|
||||
// The Algorithm complexity is O(n).
|
||||
UInt8 getEventLevel(Data & data) const
|
||||
@ -160,7 +160,7 @@ private:
|
||||
|
||||
data.sort();
|
||||
|
||||
/// events_timestamp stores the timestamp that latest i-th level event happen withing time window after previous level event.
|
||||
/// events_timestamp stores the timestamp that latest i-th level event happen within time window after previous level event.
|
||||
/// timestamp defaults to -1, which unsigned timestamp value never meet
|
||||
/// there may be some bugs when UInt64 type timstamp overflows Int64, but it works on most cases.
|
||||
std::vector<Int64> events_timestamp(events_size, -1);
|
||||
|
@ -54,7 +54,7 @@ public:
|
||||
|
||||
/** Create combined aggregate function (ex: sumIf)
|
||||
* from nested function (ex: sum)
|
||||
* and arguments for combined agggregate function (ex: UInt64, UInt8 for sumIf).
|
||||
* and arguments for combined aggregate function (ex: UInt64, UInt8 for sumIf).
|
||||
* It's assumed that function transformArguments was called before this function and 'arguments' are validated.
|
||||
*/
|
||||
virtual AggregateFunctionPtr transformAggregateFunction(
|
||||
|
@ -34,7 +34,7 @@ TimeoutSetter::~TimeoutSetter()
|
||||
}
|
||||
catch (std::exception & e)
|
||||
{
|
||||
// Sometimes catched on macos
|
||||
// Sometimes caught on macos
|
||||
LOG_ERROR(&Poco::Logger::get("Client"), "TimeoutSetter: Can't reset timeouts: {}", e.what());
|
||||
}
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ public:
|
||||
using Filter = PaddedPODArray<UInt8>;
|
||||
virtual Ptr filter(const Filter & filt, ssize_t result_size_hint) const = 0;
|
||||
|
||||
/// Permutes elements using specified permutation. Is used in sortings.
|
||||
/// Permutes elements using specified permutation. Is used in sorting.
|
||||
/// limit - if it isn't 0, puts only first limit elements in the result.
|
||||
using Permutation = PaddedPODArray<size_t>;
|
||||
virtual Ptr permute(const Permutation & perm, size_t limit) const = 0;
|
||||
@ -239,7 +239,7 @@ public:
|
||||
|
||||
/** Compares (*this)[n] and rhs[m]. Column rhs should have the same type.
|
||||
* Returns negative number, 0, or positive number (*this)[n] is less, equal, greater than rhs[m] respectively.
|
||||
* Is used in sortings.
|
||||
* Is used in sorting.
|
||||
*
|
||||
* If one of element's value is NaN or NULLs, then:
|
||||
* - if nan_direction_hint == -1, NaN and NULLs are considered as least than everything other;
|
||||
|
@ -557,7 +557,7 @@ public:
|
||||
/// Only one of several concurrent threads calling this method will call get_size or initialize,
|
||||
/// others will wait for that call to complete and will use its result (this helps prevent cache stampede).
|
||||
///
|
||||
/// Exceptions occuring in callbacks will be propagated to the caller.
|
||||
/// Exceptions occurring in callbacks will be propagated to the caller.
|
||||
/// Another thread from the set of concurrent threads will then try to call its callbacks etc.
|
||||
///
|
||||
/// Returns cached value wrapped by holder, preventing cache entry from eviction.
|
||||
|
@ -15,7 +15,7 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_ALLOCATE_MEMORY;
|
||||
}
|
||||
|
||||
/** An array of (almost) unchangable size:
|
||||
/** An array of (almost) unchangeable size:
|
||||
* the size is specified in the constructor;
|
||||
* `resize` method removes old data, and necessary only for
|
||||
* so that you can first create an empty object using the default constructor,
|
||||
|
@ -116,7 +116,7 @@ void ConfigReloader::reloadIfNewer(bool force, bool throw_on_error, bool fallbac
|
||||
}
|
||||
config_processor.savePreprocessedConfig(loaded_config, preprocessed_dir);
|
||||
|
||||
/** We should remember last modification time if and only if config was sucessfully loaded
|
||||
/** We should remember last modification time if and only if config was successfully loaded
|
||||
* Otherwise a race condition could occur during config files update:
|
||||
* File is contain raw (and non-valid) data, therefore config is not applied.
|
||||
* When file has been written (and contain valid data), we don't load new data since modification time remains the same.
|
||||
|
@ -42,7 +42,7 @@ public:
|
||||
|
||||
~ConfigReloader();
|
||||
|
||||
/// Call this method to run the backround thread.
|
||||
/// Call this method to run the background thread.
|
||||
void start();
|
||||
|
||||
/// Reload immediately. For SYSTEM RELOAD CONFIG query.
|
||||
|
@ -63,7 +63,7 @@ public:
|
||||
if (file_doesnt_exists && !create_if_need)
|
||||
{
|
||||
throw Poco::Exception("File " + path + " does not exist. "
|
||||
"You must create it manulally with appropriate value or 0 for first start.");
|
||||
"You must create it manually with appropriate value or 0 for first start.");
|
||||
}
|
||||
|
||||
int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
|
||||
|
@ -33,8 +33,8 @@
|
||||
M(QueryThread, "Number of query processing threads") \
|
||||
M(ReadonlyReplica, "Number of Replicated tables that are currently in readonly state due to re-initialization after ZooKeeper session loss or due to startup without ZooKeeper configured.") \
|
||||
M(MemoryTracking, "Total amount of memory (bytes) allocated by the server.") \
|
||||
M(MemoryTrackingInBackgroundProcessingPool, "Total amount of memory (bytes) allocated in background processing pool (that is dedicated for backround merges, mutations and fetches). Note that this value may include a drift when the memory was allocated in a context of background processing pool and freed in other context or vice-versa. This happens naturally due to caches for tables indexes and doesn't indicate memory leaks.") \
|
||||
M(MemoryTrackingInBackgroundMoveProcessingPool, "Total amount of memory (bytes) allocated in background processing pool (that is dedicated for backround moves). Note that this value may include a drift when the memory was allocated in a context of background processing pool and freed in other context or vice-versa. This happens naturally due to caches for tables indexes and doesn't indicate memory leaks.") \
|
||||
M(MemoryTrackingInBackgroundProcessingPool, "Total amount of memory (bytes) allocated in background processing pool (that is dedicated for background merges, mutations and fetches). Note that this value may include a drift when the memory was allocated in a context of background processing pool and freed in other context or vice-versa. This happens naturally due to caches for tables indexes and doesn't indicate memory leaks.") \
|
||||
M(MemoryTrackingInBackgroundMoveProcessingPool, "Total amount of memory (bytes) allocated in background processing pool (that is dedicated for background moves). Note that this value may include a drift when the memory was allocated in a context of background processing pool and freed in other context or vice-versa. This happens naturally due to caches for tables indexes and doesn't indicate memory leaks.") \
|
||||
M(MemoryTrackingInBackgroundSchedulePool, "Total amount of memory (bytes) allocated in background schedule pool (that is dedicated for bookkeeping tasks of Replicated tables).") \
|
||||
M(MemoryTrackingInBackgroundBufferFlushSchedulePool, "Total amount of memory (bytes) allocated in background buffer flushes pool (that is dedicated for background buffer flushes).") \
|
||||
M(MemoryTrackingInBackgroundDistributedSchedulePool, "Total amount of memory (bytes) allocated in background distributed schedule pool (that is dedicated for distributed sends).") \
|
||||
|
@ -32,7 +32,7 @@ class InternalTextLogsQueue;
|
||||
class CurrentThread
|
||||
{
|
||||
public:
|
||||
/// Return true in case of successful initializaiton
|
||||
/// Return true in case of successful initialization
|
||||
static bool isInitialized();
|
||||
|
||||
/// Handler to current thread
|
||||
|
@ -284,7 +284,7 @@ bool DNSResolver::updateCache()
|
||||
impl->host_name.emplace(updated_host_name);
|
||||
}
|
||||
|
||||
/// FIXME Updating may take a long time becouse we cannot manage timeouts of getaddrinfo(...) and getnameinfo(...).
|
||||
/// FIXME Updating may take a long time because we cannot manage timeouts of getaddrinfo(...) and getnameinfo(...).
|
||||
/// DROP DNS CACHE will wait on update_mutex (possibly while holding drop_mutex)
|
||||
std::lock_guard lock(impl->update_mutex);
|
||||
|
||||
|
@ -99,7 +99,7 @@ public:
|
||||
};
|
||||
|
||||
|
||||
/** Converts numberic value of any type to specified type. */
|
||||
/** Converts numeric value of any type to specified type. */
|
||||
template <typename T>
|
||||
class FieldVisitorConvertToNumber : public StaticVisitor<T>
|
||||
{
|
||||
|
@ -29,7 +29,7 @@ struct FixedHashTableCell
|
||||
void setZero() { full = false; }
|
||||
static constexpr bool need_zero_value_storage = false;
|
||||
|
||||
/// This Cell is only stored inside an iterator. It's used to accomodate the fact
|
||||
/// This Cell is only stored inside an iterator. It's used to accommodate the fact
|
||||
/// that the iterator based API always provide a reference to a continuous memory
|
||||
/// containing the Key. As a result, we have to instantiate a real Key field.
|
||||
/// All methods that return a mutable reference to the Key field are named with
|
||||
@ -92,7 +92,7 @@ struct FixedHashTableCalculatedSize
|
||||
* than a HashTable in that keys are not stored in the Cell buf, but inferred
|
||||
* inside each iterator. There are a bunch of to make it faster than using
|
||||
* HashTable: a) It doesn't have a conflict chain; b) There is no key
|
||||
* comparision; c) The number of cycles for checking cell empty is halved; d)
|
||||
* comparison; c) The number of cycles for checking cell empty is halved; d)
|
||||
* Memory layout is tighter, especially the Clearable variants.
|
||||
*
|
||||
* NOTE: For Set variants this should always be better. For Map variants
|
||||
|
@ -67,7 +67,7 @@ public:
|
||||
/// produce it, saves the result in the cache and returns it.
|
||||
/// Only one of several concurrent threads calling getOrSet() will call load_func(),
|
||||
/// others will wait for that call to complete and will use its result (this helps prevent cache stampede).
|
||||
/// Exceptions occuring in load_func will be propagated to the caller. Another thread from the
|
||||
/// Exceptions occurring in load_func will be propagated to the caller. Another thread from the
|
||||
/// set of concurrent threads will then try to call its load_func etc.
|
||||
///
|
||||
/// Returns std::pair of the cached value and a bool indicating whether the value was produced during this call.
|
||||
|
@ -79,7 +79,7 @@ void MemoryTracker::alloc(Int64 size)
|
||||
|
||||
/// Cap the limit to the total_memory_tracker, since it may include some drift.
|
||||
///
|
||||
/// And since total_memory_tracker is reseted to the process resident
|
||||
/// And since total_memory_tracker is reset to the process resident
|
||||
/// memory peridically (in AsynchronousMetrics::update()), any limit can be
|
||||
/// capped to it, to avoid possible drift.
|
||||
if (unlikely(current_hard_limit && will_be > current_hard_limit))
|
||||
|
@ -77,16 +77,16 @@ private:
|
||||
|
||||
static std::vector<String> release(DistanceIndexQueue & queue, const std::vector<String> & prompting_strings)
|
||||
{
|
||||
std::vector<String> ans;
|
||||
ans.reserve(queue.size());
|
||||
std::vector<String> answer;
|
||||
answer.reserve(queue.size());
|
||||
while (!queue.empty())
|
||||
{
|
||||
auto top = queue.top();
|
||||
queue.pop();
|
||||
ans.push_back(prompting_strings[top.second]);
|
||||
answer.push_back(prompting_strings[top.second]);
|
||||
}
|
||||
std::reverse(ans.begin(), ans.end());
|
||||
return ans;
|
||||
std::reverse(answer.begin(), answer.end());
|
||||
return answer;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -24,7 +24,7 @@ class SimpleObjectPool
|
||||
{
|
||||
protected:
|
||||
|
||||
/// Hold all avaiable objects in stack.
|
||||
/// Hold all available objects in stack.
|
||||
std::mutex mutex;
|
||||
std::stack<std::unique_ptr<T>> stack;
|
||||
|
||||
|
@ -44,7 +44,7 @@ namespace ErrorCodes
|
||||
* To be more precise - for use in ColumnVector.
|
||||
* It differs from std::vector in that it does not initialize the elements.
|
||||
*
|
||||
* Made noncopyable so that there are no accidential copies. You can copy the data using `assign` method.
|
||||
* Made noncopyable so that there are no accidental copies. You can copy the data using `assign` method.
|
||||
*
|
||||
* Only part of the std::vector interface is supported.
|
||||
*
|
||||
|
@ -25,7 +25,7 @@ namespace DB
|
||||
* 2. collect thread's current stack trace
|
||||
* 3. write collected stack trace to trace_pipe for TraceCollector
|
||||
*
|
||||
* Desctructor tries to unset timer and restore previous signal handler.
|
||||
* Destructor tries to unset timer and restore previous signal handler.
|
||||
* Note that signal handler implementation is defined by template parameter. See QueryProfilerReal and QueryProfilerCpu.
|
||||
*/
|
||||
template <typename ProfilerImpl>
|
||||
|
@ -238,7 +238,7 @@ void RWLockImpl::unlock(GroupsContainer::iterator group_it, const String & query
|
||||
{
|
||||
std::lock_guard state_lock(internal_state_mtx);
|
||||
|
||||
/// All of theses are Undefined behavior and nothing we can do!
|
||||
/// All of these are Undefined behavior and nothing we can do!
|
||||
if (rdlock_owner == readers_queue.end() && wrlock_owner == writers_queue.end())
|
||||
return;
|
||||
if (rdlock_owner != readers_queue.end() && group_it != rdlock_owner)
|
||||
|
@ -22,7 +22,7 @@ namespace Util
|
||||
|
||||
/// It looks like the singleton is the best option here, as
|
||||
/// two users of that object (OwnSplitChannel & Interpreters/executeQuery)
|
||||
/// can't own/share that Masker properly without syncronization & locks,
|
||||
/// can't own/share that Masker properly without synchronization & locks,
|
||||
/// and we can't afford setting global locks for each logged line.
|
||||
|
||||
/// I've considered singleton alternatives, but it's unclear who should own the object,
|
||||
|
@ -67,7 +67,7 @@ std::string signalToErrorMessage(int sig, const siginfo_t & info, const ucontext
|
||||
error << "Invalid address alignment.";
|
||||
break;
|
||||
case BUS_ADRERR:
|
||||
error << "Non-existant physical address.";
|
||||
error << "Non-existent physical address.";
|
||||
break;
|
||||
case BUS_OBJERR:
|
||||
error << "Object specific hardware error.";
|
||||
|
@ -33,9 +33,9 @@ But because ClickHouse is linked with most of the symbols exported (-rdynamic fl
|
||||
3. DWARF debug info. It contains the most detailed information about symbols and everything else.
|
||||
It allows to get source file names and line numbers from addresses. Only available if you use -g option for compiler.
|
||||
It is also used by default for ClickHouse builds, but because of its weight (about two gigabytes)
|
||||
it is splitted to separate binary and provided in clickhouse-common-static-dbg package.
|
||||
it is split to separate binary and provided in clickhouse-common-static-dbg package.
|
||||
This separate binary is placed in /usr/lib/debug/usr/bin/clickhouse and is loaded automatically by tools like gdb, addr2line.
|
||||
When you build ClickHouse by yourself, debug info is not splitted and present in a single huge binary.
|
||||
When you build ClickHouse by yourself, debug info is not split and present in a single huge binary.
|
||||
|
||||
What ClickHouse is using to provide good stack traces?
|
||||
|
||||
@ -315,7 +315,7 @@ void collectSymbolsFromELF(dl_phdr_info * info,
|
||||
if (ec)
|
||||
return;
|
||||
|
||||
/// Debug info and symbol table sections may be splitted to separate binary.
|
||||
/// Debug info and symbol table sections may be split to separate binary.
|
||||
std::filesystem::path debug_info_path = std::filesystem::path("/usr/lib/debug") / canonical_path.relative_path();
|
||||
|
||||
object_name = std::filesystem::exists(debug_info_path) ? debug_info_path : canonical_path;
|
||||
|
@ -18,7 +18,7 @@ public:
|
||||
|
||||
void getStat(::taskstats & out_stats, pid_t tid) const;
|
||||
|
||||
/// Whether the current process has permissions (sudo or cap_net_admin capabilties) to get taskstats info
|
||||
/// Whether the current process has permissions (sudo or cap_net_admin capabilities) to get taskstats info
|
||||
static bool checkPermissions();
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
|
@ -344,7 +344,7 @@ public:
|
||||
|
||||
auto callback = [this](const VolnitskyTraits::Ngram ngram, const int offset) { return this->putNGramBase(ngram, offset); };
|
||||
/// ssize_t is used here because unsigned can't be used with condition like `i >= 0`, unsigned always >= 0
|
||||
/// And also adding from the end guarantees that we will find first occurence because we will lookup bigger offsets first.
|
||||
/// And also adding from the end guarantees that we will find first occurrence because we will lookup bigger offsets first.
|
||||
for (auto i = static_cast<ssize_t>(needle_size - sizeof(VolnitskyTraits::Ngram)); i >= 0; --i)
|
||||
VolnitskyTraits::putNGram<CaseSensitive, ASCII>(this->needle + i, i + 1, this->needle, callback);
|
||||
}
|
||||
@ -534,11 +534,11 @@ public:
|
||||
{
|
||||
const size_t fallback_size = fallback_needles.size();
|
||||
|
||||
size_t ans = std::numeric_limits<size_t>::max();
|
||||
size_t answer = std::numeric_limits<size_t>::max();
|
||||
|
||||
for (size_t i = 0; i < fallback_size; ++i)
|
||||
if (fallback_searchers[fallback_needles[i]].search(haystack, haystack_end) != haystack_end)
|
||||
ans = std::min(ans, fallback_needles[i]);
|
||||
answer = std::min(answer, fallback_needles[i]);
|
||||
|
||||
/// check if we have one non empty volnitsky searcher
|
||||
if (step != std::numeric_limits<size_t>::max())
|
||||
@ -554,17 +554,17 @@ public:
|
||||
const auto res = pos - (hash[cell_num].off - 1);
|
||||
const size_t ind = hash[cell_num].id;
|
||||
if (res + needles[ind].size <= haystack_end && fallback_searchers[ind].compare(haystack, haystack_end, res))
|
||||
ans = std::min(ans, ind);
|
||||
answer = std::min(answer, ind);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* if nothing was found, ans + 1 will be equal to zero and we can
|
||||
* if nothing was found, answer + 1 will be equal to zero and we can
|
||||
* assign it into the result because we need to return the position starting with one
|
||||
*/
|
||||
return ans + 1;
|
||||
return answer + 1;
|
||||
}
|
||||
|
||||
template <typename CountCharsCallback>
|
||||
@ -572,11 +572,11 @@ public:
|
||||
{
|
||||
const size_t fallback_size = fallback_needles.size();
|
||||
|
||||
UInt64 ans = std::numeric_limits<UInt64>::max();
|
||||
UInt64 answer = std::numeric_limits<UInt64>::max();
|
||||
|
||||
for (size_t i = 0; i < fallback_size; ++i)
|
||||
if (auto pos = fallback_searchers[fallback_needles[i]].search(haystack, haystack_end); pos != haystack_end)
|
||||
ans = std::min<UInt64>(ans, pos - haystack);
|
||||
answer = std::min<UInt64>(answer, pos - haystack);
|
||||
|
||||
/// check if we have one non empty volnitsky searcher
|
||||
if (step != std::numeric_limits<size_t>::max())
|
||||
@ -592,25 +592,25 @@ public:
|
||||
const auto res = pos - (hash[cell_num].off - 1);
|
||||
const size_t ind = hash[cell_num].id;
|
||||
if (res + needles[ind].size <= haystack_end && fallback_searchers[ind].compare(haystack, haystack_end, res))
|
||||
ans = std::min<UInt64>(ans, res - haystack);
|
||||
answer = std::min<UInt64>(answer, res - haystack);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (ans == std::numeric_limits<UInt64>::max())
|
||||
if (answer == std::numeric_limits<UInt64>::max())
|
||||
return 0;
|
||||
return count_chars(haystack, haystack + ans);
|
||||
return count_chars(haystack, haystack + answer);
|
||||
}
|
||||
|
||||
template <typename CountCharsCallback, typename AnsType>
|
||||
inline void searchOneAll(const UInt8 * haystack, const UInt8 * haystack_end, AnsType * ans, const CountCharsCallback & count_chars) const
|
||||
inline void searchOneAll(const UInt8 * haystack, const UInt8 * haystack_end, AnsType * answer, const CountCharsCallback & count_chars) const
|
||||
{
|
||||
const size_t fallback_size = fallback_needles.size();
|
||||
for (size_t i = 0; i < fallback_size; ++i)
|
||||
{
|
||||
const UInt8 * ptr = fallback_searchers[fallback_needles[i]].search(haystack, haystack_end);
|
||||
if (ptr != haystack_end)
|
||||
ans[fallback_needles[i]] = count_chars(haystack, ptr);
|
||||
answer[fallback_needles[i]] = count_chars(haystack, ptr);
|
||||
}
|
||||
|
||||
/// check if we have one non empty volnitsky searcher
|
||||
@ -626,8 +626,10 @@ public:
|
||||
{
|
||||
const auto * res = pos - (hash[cell_num].off - 1);
|
||||
const size_t ind = hash[cell_num].id;
|
||||
if (ans[ind] == 0 && res + needles[ind].size <= haystack_end && fallback_searchers[ind].compare(haystack, haystack_end, res))
|
||||
ans[ind] = count_chars(haystack, res);
|
||||
if (answer[ind] == 0
|
||||
&& res + needles[ind].size <= haystack_end
|
||||
&& fallback_searchers[ind].compare(haystack, haystack_end, res))
|
||||
answer[ind] = count_chars(haystack, res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ public:
|
||||
|
||||
using WaitCondition = std::function<bool()>;
|
||||
/// Wait for the node to disappear or return immediately if it doesn't exist.
|
||||
/// If condition is speficied, it is used to return early (when condition returns false)
|
||||
/// If condition is specified, it is used to return early (when condition returns false)
|
||||
/// The function returns true if waited and false if waiting was interrupted by condition.
|
||||
bool waitForDisappear(const std::string & path, const WaitCondition & condition = {});
|
||||
|
||||
|
@ -110,7 +110,7 @@ int32_t err \x00\x00\x00\x00
|
||||
Client sends requests. For example, create persistent node '/hello' with value 'world'.
|
||||
|
||||
int32_t request_length \x00\x00\x00\x3a
|
||||
int32_t xid \x5a\xad\x72\x3f Arbitary number. Used for identification of requests/responses.
|
||||
int32_t xid \x5a\xad\x72\x3f Arbitrary number. Used for identification of requests/responses.
|
||||
libzookeeper uses unix timestamp for first xid and then autoincrement to that value.
|
||||
int32_t op_num \x00\x00\x00\x01 ZOO_CREATE_OP 1
|
||||
int32_t path_length \x00\x00\x00\x06
|
||||
|
@ -32,7 +32,7 @@ void formatIPv6(const unsigned char * src, char *& dst, uint8_t zeroed_tail_byte
|
||||
* In case of failure returns false and doesn't modify buffer pointed by `dst`.
|
||||
*
|
||||
* @param src - input string, expected to be non-null and null-terminated right after the IPv4 string value.
|
||||
* @param dst - where to put output bytes, expected to be non-null and atleast IPV4_BINARY_LENGTH-long.
|
||||
* @param dst - where to put output bytes, expected to be non-null and at IPV4_BINARY_LENGTH-long.
|
||||
* @return false if parsing failed, true otherwise.
|
||||
*/
|
||||
inline bool parseIPv4(const char * src, unsigned char * dst)
|
||||
@ -68,7 +68,7 @@ inline bool parseIPv4(const char * src, unsigned char * dst)
|
||||
* IPV6_BINARY_LENGTH bytes of buffer pointed by `dst`.
|
||||
*
|
||||
* @param src - input string, expected to be non-null and null-terminated right after the IPv6 string value.
|
||||
* @param dst - where to put output bytes, expected to be non-null and atleast IPV6_BINARY_LENGTH-long.
|
||||
* @param dst - where to put output bytes, expected to be non-null and at IPV6_BINARY_LENGTH-long.
|
||||
* @return false if parsing failed, true otherwise.
|
||||
*/
|
||||
inline bool parseIPv6(const char * src, unsigned char * dst)
|
||||
|
@ -14,7 +14,7 @@
|
||||
#endif
|
||||
|
||||
|
||||
/// On overlow, the function returns unspecified value.
|
||||
/// On overflow, the function returns unspecified value.
|
||||
inline NO_SANITIZE_UNDEFINED uint64_t intExp2(int x)
|
||||
{
|
||||
return 1ULL << x;
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/* Transforms string from grep-wildcard-syntax ("{N..M}", "{a,b,c}" as in remote table function and "*", "?") to perl-regexp for using re2 library fo matching
|
||||
/* Transforrms string from grep-wildcard-syntax ("{N..M}", "{a,b,c}" as in remote table function and "*", "?") to perl-regexp forr using re2 library for matching
|
||||
* with such steps:
|
||||
* 1) search intervals like {0..9} and enums like {abc,xyz,qwe} in {}, replace them by regexp with pipe (expr1|expr2|expr3),
|
||||
* 2) search and replace "*" and "?".
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <Common/ThreadFuzzer.h>
|
||||
|
||||
|
||||
/** Prooves that ThreadFuzzer helps to find concurrency bugs.
|
||||
/** Proves that ThreadFuzzer helps to find concurrency bugs.
|
||||
*
|
||||
* for i in {1..10}; do ./chaos_sanitizer 1000000; done
|
||||
* for i in {1..10}; do THREAD_FUZZER_CPU_TIME_PERIOD_US=1000 THREAD_FUZZER_SLEEP_PROBABILITY=0.1 THREAD_FUZZER_SLEEP_TIME_US=100000 ./chaos_sanitizer 1000000; done
|
||||
|
@ -44,7 +44,7 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
|
||||
|
||||
std::stringstream message;
|
||||
|
||||
/// TODO mess up of endianess in error message.
|
||||
/// TODO mess up of endianness in error message.
|
||||
message << "Checksum doesn't match: corrupted data."
|
||||
" Reference: " + getHexUIntLowercase(expected_checksum.first) + getHexUIntLowercase(expected_checksum.second)
|
||||
+ ". Actual: " + getHexUIntLowercase(calculated_checksum.first) + getHexUIntLowercase(calculated_checksum.second)
|
||||
|
@ -291,7 +291,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
if (sign)
|
||||
{
|
||||
/// It's well defined for unsigned data types.
|
||||
/// In constrast, it's undefined to do negation of the most negative signed number due to overflow.
|
||||
/// In contrast, it's undefined to do negation of the most negative signed number due to overflow.
|
||||
double_delta = -double_delta;
|
||||
}
|
||||
}
|
||||
|
@ -1274,7 +1274,7 @@ INSTANTIATE_TEST_SUITE_P(Gorilla,
|
||||
)
|
||||
);
|
||||
|
||||
// These 'tests' try to measure performance of encoding and decoding and hence only make sence to be run locally,
|
||||
// These 'tests' try to measure performance of encoding and decoding and hence only make sense to be run locally,
|
||||
// also they require pretty big data to run against and generating this data slows down startup of unit test process.
|
||||
// So un-comment only at your discretion.
|
||||
|
||||
|
@ -45,7 +45,7 @@ struct BlockInfo
|
||||
void read(ReadBuffer & in);
|
||||
};
|
||||
|
||||
/// Block extention to support delayed defaults. AddingDefaultsBlockInputStream uses it to replace missing values with column defaults.
|
||||
/// Block extension to support delayed defaults. AddingDefaultsBlockInputStream uses it to replace missing values with column defaults.
|
||||
class BlockMissingValues
|
||||
{
|
||||
public:
|
||||
|
@ -31,7 +31,7 @@ template <> inline Int64 scaleMultiplier<Int64>(UInt32 scale) { return common::e
|
||||
template <> inline Int128 scaleMultiplier<Int128>(UInt32 scale) { return common::exp10_i128(scale); }
|
||||
|
||||
/** Components of DecimalX value:
|
||||
* whole - represents whole part of decimal, can be negatve or positive.
|
||||
* whole - represents whole part of decimal, can be negative or positive.
|
||||
* fractional - for fractional part of decimal, always positive.
|
||||
*/
|
||||
template <typename T>
|
||||
|
@ -64,7 +64,7 @@
|
||||
#define DBMS_MIN_REVISION_WITH_LOW_CARDINALITY_TYPE 54405
|
||||
#define DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO 54420
|
||||
|
||||
/// Mininum revision supporting SettingsBinaryFormat::STRINGS.
|
||||
/// Minimum revision supporting SettingsBinaryFormat::STRINGS.
|
||||
#define DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS 54429
|
||||
|
||||
/// Version of ClickHouse TCP protocol. Set to git tag with latest protocol change.
|
||||
|
@ -154,7 +154,7 @@ class IColumn;
|
||||
\
|
||||
M(UInt64, max_concurrent_queries_for_user, 0, "The maximum number of concurrent requests per user.", 0) \
|
||||
\
|
||||
M(Bool, insert_deduplicate, true, "For INSERT queries in the replicated table, specifies that deduplication of insertings blocks should be preformed", 0) \
|
||||
M(Bool, insert_deduplicate, true, "For INSERT queries in the replicated table, specifies that deduplication of insertings blocks should be performed", 0) \
|
||||
\
|
||||
M(UInt64, insert_quorum, 0, "For INSERT queries in the replicated table, wait writing for the specified number of replicas and linearize the addition of the data. 0 - disabled.", 0) \
|
||||
M(Milliseconds, insert_quorum_timeout, 600000, "", 0) \
|
||||
|
@ -45,7 +45,7 @@ static void checkCalculated(const ColumnWithTypeAndName & col_read,
|
||||
throw Exception("Unexpected defaults count", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
|
||||
if (!col_read.type->equals(*col_defaults.type))
|
||||
throw Exception("Mismach column types while adding defaults", ErrorCodes::TYPE_MISMATCH);
|
||||
throw Exception("Mismatch column types while adding defaults", ErrorCodes::TYPE_MISMATCH);
|
||||
}
|
||||
|
||||
static void mixNumberColumns(
|
||||
@ -151,7 +151,7 @@ Block AddingDefaultsBlockInputStream::readImpl()
|
||||
if (block_missing_values.empty())
|
||||
return res;
|
||||
|
||||
/// res block alredy has all columns values, with default value for type
|
||||
/// res block already has all columns values, with default value for type
|
||||
/// (not value specified in table). We identify which columns we need to
|
||||
/// recalculate with help of block_missing_values.
|
||||
Block evaluate_block{res};
|
||||
|
@ -83,7 +83,7 @@ public:
|
||||
{
|
||||
IBlockInputStream::cancel(kill);
|
||||
|
||||
/// Wait for some backgroud calculations to be sure,
|
||||
/// Wait for some background calculations to be sure,
|
||||
/// that after end of stream nothing is being executing.
|
||||
if (started)
|
||||
pool.wait();
|
||||
|
@ -249,7 +249,7 @@ void MergingSortedBlockInputStream::merge(MutableColumns & merged_columns, TSort
|
||||
return;
|
||||
}
|
||||
|
||||
/// We have read all data. Ask childs to cancel providing more data.
|
||||
/// We have read all data. Ask children to cancel providing more data.
|
||||
cancel(false);
|
||||
finished = true;
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ class MergingSortedBlockInputStream : public IBlockInputStream
|
||||
{
|
||||
public:
|
||||
/** limit - if isn't 0, then we can produce only first limit rows in sorted order.
|
||||
* out_row_sources - if isn't nullptr, then at the end of execution it should contain part numbers of each readed row (and needed flag)
|
||||
* out_row_sources - if isn't nullptr, then at the end of execution it should contain part numbers of each read row (and needed flag)
|
||||
* quiet - don't log profiling info
|
||||
*/
|
||||
MergingSortedBlockInputStream(
|
||||
|
@ -327,7 +327,7 @@ private:
|
||||
* - the queue (just processed source will be processed the next time later than the rest)
|
||||
* - stack (just processed source will be processed as soon as possible).
|
||||
*
|
||||
* The stack is better than the queue when you need to do work on reading one source more consequentially,
|
||||
* The stack is better than the queue when you need to do work on reading one source more consequently,
|
||||
* and theoretically, this allows you to achieve more consequent/consistent reads from the disk.
|
||||
*
|
||||
* But when using the stack, there is a problem with distributed query processing:
|
||||
|
@ -50,7 +50,7 @@ void ParallelParsingBlockInputStream::cancel(bool kill)
|
||||
/*
|
||||
* The format parsers themselves are not being cancelled here, so we'll
|
||||
* have to wait until they process the current block. Given that the
|
||||
* chunk size is on the order of megabytes, this should't be too long.
|
||||
* chunk size is on the order of megabytes, this shouldn't be too long.
|
||||
* We can't call IInputFormat->cancel here, because the parser object is
|
||||
* local to the parser thread, and we don't want to introduce any
|
||||
* synchronization between parser threads and the other threads to get
|
||||
@ -177,7 +177,7 @@ void ParallelParsingBlockInputStream::parserThreadFunction(ThreadGroupStatusPtr
|
||||
unit.block_ext.block_missing_values.clear();
|
||||
|
||||
// We don't know how many blocks will be. So we have to read them all
|
||||
// until an empty block occured.
|
||||
// until an empty block occurred.
|
||||
Block block;
|
||||
while (!finished && (block = parser->read()) != Block())
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
|
||||
addTableLock(
|
||||
storage->lockForShare(context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout));
|
||||
|
||||
/// If the "root" table deduplactes blocks, there are no need to make deduplication for children
|
||||
/// If the "root" table deduplicates blocks, there are no need to make deduplication for children
|
||||
/// Moreover, deduplication for AggregatingMergeTree children could produce false positives due to low size of inserting blocks
|
||||
bool disable_deduplication_for_children = false;
|
||||
if (!context.getSettingsRef().deduplicate_blocks_in_dependent_materialized_views)
|
||||
|
@ -21,7 +21,7 @@ namespace DB
|
||||
* SimpleAggregateFunction(anyLast, LowCardinality(Nullable(String)))
|
||||
* SimpleAggregateFunction(anyLast, IPv4)
|
||||
*
|
||||
* Technically, a standard IDataType is instanciated and customized with IDataTypeCustomName and DataTypeCustomDesc.
|
||||
* Technically, a standard IDataType is instantiated and customized with IDataTypeCustomName and DataTypeCustomDesc.
|
||||
*/
|
||||
|
||||
class DataTypeCustomSimpleAggregateFunction : public IDataTypeCustomName
|
||||
|
@ -57,7 +57,7 @@ public:
|
||||
* * DateTime64 value and scale factor.
|
||||
*
|
||||
* Suitable Transfotm-types are commonly used in Date/DateTime manipulation functions,
|
||||
* and should implement static (or const) fucntion with following signatures:
|
||||
* and should implement static (or const) function with following signatures:
|
||||
* R execute(UInt32 whole_value, ... , const TimeZoneImpl &)
|
||||
* OR
|
||||
* R execute(DateTime64 value, Int64 scale_factor, ... , const TimeZoneImpl &)
|
||||
|
@ -307,7 +307,7 @@ public:
|
||||
virtual DataTypePtr promoteNumericType() const;
|
||||
|
||||
/** Directly insert default value into a column. Default implementation use method IColumn::insertDefault.
|
||||
* This should be overriden if data type default value differs from column default value (example: Enum data types).
|
||||
* This should be overridden if data type default value differs from column default value (example: Enum data types).
|
||||
*/
|
||||
virtual void insertDefaultInto(IColumn & column) const;
|
||||
|
||||
|
@ -84,8 +84,8 @@ std::string createCommaSeparatedStringFrom(const Names & names)
|
||||
|
||||
std::string extractTableName(const std::string & nested_name)
|
||||
{
|
||||
auto splitted = splitName(nested_name);
|
||||
return splitted.first;
|
||||
auto split = splitName(nested_name);
|
||||
return split.first;
|
||||
}
|
||||
|
||||
|
||||
@ -150,10 +150,10 @@ NamesAndTypesList collect(const NamesAndTypesList & names_and_types)
|
||||
bool collected = false;
|
||||
if (const DataTypeArray * type_arr = typeid_cast<const DataTypeArray *>(name_type.type.get()))
|
||||
{
|
||||
auto splitted = splitName(name_type.name);
|
||||
if (!splitted.second.empty())
|
||||
auto split = splitName(name_type.name);
|
||||
if (!split.second.empty())
|
||||
{
|
||||
nested[splitted.first].emplace_back(splitted.second, type_arr->getNestedType());
|
||||
nested[split.first].emplace_back(split.second, type_arr->getNestedType());
|
||||
collected = true;
|
||||
}
|
||||
}
|
||||
@ -184,12 +184,12 @@ void validateArraySizes(const Block & block)
|
||||
if (!typeid_cast<const ColumnArray *>(elem.column.get()))
|
||||
throw Exception("Column with Array type is not represented by ColumnArray column: " + elem.column->dumpStructure(), ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
auto splitted = splitName(elem.name);
|
||||
auto split = splitName(elem.name);
|
||||
|
||||
/// Is it really a column of Nested data structure.
|
||||
if (!splitted.second.empty())
|
||||
if (!split.second.empty())
|
||||
{
|
||||
auto [it, inserted] = nested.emplace(splitted.first, i);
|
||||
auto [it, inserted] = nested.emplace(split.first, i);
|
||||
|
||||
/// It's not the first column of Nested data structure.
|
||||
if (!inserted)
|
||||
@ -200,7 +200,7 @@ void validateArraySizes(const Block & block)
|
||||
if (!first_array_column.hasEqualOffsets(another_array_column))
|
||||
throw Exception("Elements '" + block.getByPosition(it->second).name
|
||||
+ "' and '" + elem.name
|
||||
+ "' of Nested data structure '" + splitted.first
|
||||
+ "' of Nested data structure '" + split.first
|
||||
+ "' (Array columns) have different array sizes.", ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH);
|
||||
}
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ DataTypePtr getLeastSupertype(const DataTypes & types)
|
||||
}
|
||||
|
||||
if (num_supported != type_ids.size())
|
||||
throw Exception(getExceptionMessagePrefix(types) + " because some of them have no lossless convertion to Decimal",
|
||||
throw Exception(getExceptionMessagePrefix(types) + " because some of them have no lossless conversion to Decimal",
|
||||
ErrorCodes::NO_COMMON_TYPE);
|
||||
|
||||
UInt32 max_scale = 0;
|
||||
|
@ -33,8 +33,8 @@ struct Int64Hasher
|
||||
/*
|
||||
Class for storing cache index.
|
||||
It consists of two arrays.
|
||||
The first one is splitted into buckets (each stores 8 elements (cells)) determined by hash of the element key.
|
||||
The second one is splitted into 4bit numbers, which are positions in bucket for next element write (So cache uses FIFO eviction algorithm inside each bucket).
|
||||
The first one is split into buckets (each stores 8 elements (cells)) determined by hash of the element key.
|
||||
The second one is split into 4bit numbers, which are positions in bucket for next element write (So cache uses FIFO eviction algorithm inside each bucket).
|
||||
*/
|
||||
template <typename K, typename V, typename Hasher, typename Deleter = EmptyDeleter>
|
||||
class BucketCacheIndex
|
||||
|
@ -333,7 +333,7 @@ private:
|
||||
|
||||
/// This lock is used for the inner cache state update function lock it for
|
||||
/// write, when it need to update cache state all other functions just
|
||||
/// readers. Suprisingly this lock is also used for last_exception pointer.
|
||||
/// readers. Surprisingly this lock is also used for last_exception pointer.
|
||||
mutable std::shared_mutex rw_lock;
|
||||
|
||||
/// Actual size will be increased to match power of 2
|
||||
@ -342,7 +342,7 @@ private:
|
||||
/// all bits to 1 mask (size - 1) (0b1000 - 1 = 0b111)
|
||||
const size_t size_overlap_mask;
|
||||
|
||||
/// Max tries to find cell, overlaped with mask: if size = 16 and start_cell=10: will try cells: 10,11,12,13,14,15,0,1,2,3
|
||||
/// Max tries to find cell, overlapped with mask: if size = 16 and start_cell=10: will try cells: 10,11,12,13,14,15,0,1,2,3
|
||||
static constexpr size_t max_collision_length = 10;
|
||||
|
||||
const size_t zero_cell_idx{getCellIdx(0)};
|
||||
@ -377,7 +377,7 @@ private:
|
||||
* they would be passed as a return value of get(), but for Unknown Reasons the dictionaries use a baroque
|
||||
* interface where get() accepts two callback, one that it calls for found values, and one for not found.
|
||||
*
|
||||
* Now we make it even uglier by doing this from multiple threads. The missing values are retreived from the
|
||||
* Now we make it even uglier by doing this from multiple threads. The missing values are retrieved from the
|
||||
* dictionary in a background thread, and this thread calls the provided callback. So if you provide the callbacks,
|
||||
* you MUST wait until the background update finishes, or god knows what happens. Unfortunately, we have no
|
||||
* way to check that you did this right, so good luck.
|
||||
@ -401,7 +401,7 @@ private:
|
||||
std::vector<Key> requested_ids;
|
||||
|
||||
/// It might seem that it is a leak of performance.
|
||||
/// But aquiring a mutex without contention is rather cheap.
|
||||
/// But acquiring a mutex without contention is rather cheap.
|
||||
std::mutex callback_mutex;
|
||||
bool can_use_callback{true};
|
||||
|
||||
|
@ -679,7 +679,7 @@ private:
|
||||
/// all bits to 1 mask (size - 1) (0b1000 - 1 = 0b111)
|
||||
const size_t size_overlap_mask;
|
||||
|
||||
/// Max tries to find cell, overlaped with mask: if size = 16 and start_cell=10: will try cells: 10,11,12,13,14,15,0,1,2,3
|
||||
/// Max tries to find cell, overlapped with mask: if size = 16 and start_cell=10: will try cells: 10,11,12,13,14,15,0,1,2,3
|
||||
static constexpr size_t max_collision_length = 10;
|
||||
|
||||
const UInt64 zero_cell_idx{getCellIdx(StringRef{})};
|
||||
|
@ -161,7 +161,7 @@ void SlabsPolygonIndex::indexBuild(const std::vector<Polygon> & polygons)
|
||||
if (l == n || sorted_x[l] != all_edges[i].l.x() || sorted_x[r] != all_edges[i].r.x())
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Error occured while building polygon index. Edge {} is [{}, {}] but found [{}, {}]. l = {}, r = {}",
|
||||
"Error occurred while building polygon index. Edge {} is [{}, {}] but found [{}, {}]. l = {}, r = {}",
|
||||
i, all_edges[i].l.x(), all_edges[i].r.x(), sorted_x[l], sorted_x[r], l, r);
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ private:
|
||||
|
||||
Poco::Logger * log;
|
||||
|
||||
/** Sorted distinct coordinates of all vertexes */
|
||||
/** Sorted distinct coordinates of all vertices */
|
||||
std::vector<Coord> sorted_x;
|
||||
std::vector<Edge> all_edges;
|
||||
|
||||
|
@ -349,7 +349,7 @@ struct DecimalBinaryOperation
|
||||
}
|
||||
|
||||
private:
|
||||
/// there's implicit type convertion here
|
||||
/// there's implicit type conversion here
|
||||
static NativeResultType apply(NativeResultType a, NativeResultType b)
|
||||
{
|
||||
if constexpr (can_overflow && _check_overflow)
|
||||
|
@ -51,7 +51,7 @@ public:
|
||||
"Function " + getName()
|
||||
+ " supports 1 or 2 or 3 arguments. The 1st argument "
|
||||
"must be of type Date or DateTime. The 2nd argument (optional) must be "
|
||||
"a constant UInt8 with week mode. The 3nd argument (optional) must be "
|
||||
"a constant UInt8 with week mode. The 3rd argument (optional) must be "
|
||||
"a constant string with timezone name",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
}
|
||||
@ -67,7 +67,7 @@ public:
|
||||
"Function " + getName()
|
||||
+ " supports 1 or 2 or 3 arguments. The 1st argument "
|
||||
"must be of type Date or DateTime. The 2nd argument (optional) must be "
|
||||
"a constant UInt8 with week mode. The 3nd argument (optional) must be "
|
||||
"a constant UInt8 with week mode. The 3rd argument (optional) must be "
|
||||
"a constant string with timezone name",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
if (!isString(arguments[2].type))
|
||||
@ -75,7 +75,7 @@ public:
|
||||
"Function " + getName()
|
||||
+ " supports 1 or 2 or 3 arguments. The 1st argument "
|
||||
"must be of type Date or DateTime. The 2nd argument (optional) must be "
|
||||
"a constant UInt8 with week mode. The 3nd argument (optional) must be "
|
||||
"a constant UInt8 with week mode. The 3rd argument (optional) must be "
|
||||
"a constant string with timezone name",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
if (isDate(arguments[0].type) && std::is_same_v<ToDataType, DataTypeDate>)
|
||||
|
@ -426,7 +426,7 @@ public:
|
||||
template <typename FieldType>
|
||||
using TransformExecuteReturnType = decltype(std::declval<Transform>().execute(FieldType(), 0, std::declval<DateLUTImpl>()));
|
||||
|
||||
// Deduces RETURN DataType from INTPUT DataType, based on return type of Transform{}.execute(INPUT_TYPE, UInt64, DateLUTImpl).
|
||||
// Deduces RETURN DataType from INPUT DataType, based on return type of Transform{}.execute(INPUT_TYPE, UInt64, DateLUTImpl).
|
||||
// e.g. for Transform-type that has execute()-overload with 'UInt16' input and 'UInt32' return,
|
||||
// argument type is expected to be 'Date', and result type is deduced to be 'DateTime'.
|
||||
template <typename FromDataType>
|
||||
|
@ -33,10 +33,10 @@ namespace ErrorCodes
|
||||
* Convert bitmap to integer array:
|
||||
* bitmapToArray: bitmap -> integer[]
|
||||
*
|
||||
* Retrun the smallest value in the set:
|
||||
* Return the smallest value in the set:
|
||||
* bitmapMin: bitmap -> integer
|
||||
*
|
||||
* Retrun the greatest value in the set:
|
||||
* Return the greatest value in the set:
|
||||
* bitmapMax: bitmap -> integer
|
||||
*
|
||||
* Return subset in specified range (not include the range_end):
|
||||
@ -60,7 +60,7 @@ namespace ErrorCodes
|
||||
* Two bitmap andnot calculation:
|
||||
* bitmapAndnot: bitmap,bitmap -> bitmap
|
||||
*
|
||||
* Retrun bitmap cardinality:
|
||||
* Return bitmap cardinality:
|
||||
* bitmapCardinality: bitmap -> integer
|
||||
*
|
||||
* Two bitmap and calculation, return cardinality:
|
||||
|
@ -378,7 +378,7 @@ struct ConvertImpl<FromDataType, std::enable_if_t<!std::is_same_v<FromDataType,
|
||||
else if constexpr (std::is_same_v<FromDataType, DataTypeDateTime64>)
|
||||
data_to.resize(size * (strlen("YYYY-MM-DD hh:mm:ss.") + vec_from.getScale() + 1));
|
||||
else
|
||||
data_to.resize(size * 3); /// Arbitary
|
||||
data_to.resize(size * 3); /// Arbitrary
|
||||
|
||||
offsets_to.resize(size);
|
||||
|
||||
@ -579,7 +579,7 @@ struct ConvertThroughParsing
|
||||
if constexpr (std::is_same_v<ToDataType, DataTypeDateTime> || to_datetime64)
|
||||
{
|
||||
const auto result_type = removeNullable(block.getByPosition(result).type);
|
||||
// Time zone is already figured out during result type resultion, no need to do it here.
|
||||
// Time zone is already figured out during result type resolution, no need to do it here.
|
||||
if (const auto dt_col = checkAndGetDataType<ToDataType>(result_type.get()))
|
||||
local_time_zone = &dt_col->getTimeZone();
|
||||
else
|
||||
@ -967,7 +967,7 @@ public:
|
||||
else if constexpr (std::is_same_v<Name, NameToDecimal128>)
|
||||
return createDecimal<DataTypeDecimal>(38, scale);
|
||||
|
||||
throw Exception("Someting wrong with toDecimalNN()", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Something wrong with toDecimalNN()", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1211,7 +1211,7 @@ public:
|
||||
res = createDecimal<DataTypeDecimal>(38, scale);
|
||||
|
||||
if (!res)
|
||||
throw Exception("Someting wrong with toDecimalNNOrZero() or toDecimalNNOrNull()", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Something wrong with toDecimalNNOrZero() or toDecimalNNOrNull()", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
else
|
||||
res = std::make_shared<ToDataType>();
|
||||
@ -1380,7 +1380,7 @@ struct ToNumberMonotonicity
|
||||
return {};
|
||||
}
|
||||
|
||||
/// Size of type is shrinked.
|
||||
/// Size of type is shrunk.
|
||||
if (size_of_from > size_of_to)
|
||||
{
|
||||
/// Function cannot be monotonic on unbounded ranges.
|
||||
|
@ -484,7 +484,7 @@ struct ImplXxHash32
|
||||
static auto apply(const char * s, const size_t len) { return XXH32(s, len, 0); }
|
||||
/**
|
||||
* With current implementation with more than 1 arguments it will give the results
|
||||
* non-reproducable from outside of CH.
|
||||
* non-reproducible from outside of CH.
|
||||
*
|
||||
* Proper way of combining several input is to use streaming mode of hash function
|
||||
* https://github.com/Cyan4973/xxHash/issues/114#issuecomment-334908566
|
||||
@ -507,7 +507,7 @@ struct ImplXxHash64
|
||||
|
||||
/*
|
||||
With current implementation with more than 1 arguments it will give the results
|
||||
non-reproducable from outside of CH. (see comment on ImplXxHash32).
|
||||
non-reproducible from outside of CH. (see comment on ImplXxHash32).
|
||||
*/
|
||||
static auto combineHashes(UInt64 h1, UInt64 h2) { return CityHash_v1_0_2::Hash128to64(uint128_t(h1, h2)); }
|
||||
|
||||
|
@ -232,7 +232,7 @@ public:
|
||||
|
||||
/* Register new implementation for function.
|
||||
*
|
||||
* Arch - required instruction set for running the implementation. It's guarantied that no method would
|
||||
* Arch - required instruction set for running the implementation. It's guaranteed that no method would
|
||||
* be called (even the constructor and static methods) if the processor doesn't support this instruction set.
|
||||
*
|
||||
* FunctionImpl - implementation, should be inherited from template argument FunctionInterface.
|
||||
|
@ -179,9 +179,9 @@ private:
|
||||
{
|
||||
inner, /// The cell is completely inside polygon.
|
||||
outer, /// The cell is completely outside of polygon.
|
||||
singleLine, /// The cell is splitted to inner/outer part by a single line.
|
||||
pairOfLinesSingleConvexPolygon, /// The cell is splitted to inner/outer part by a polyline of two sections and inner part is convex.
|
||||
pairOfLinesSingleNonConvexPolygons, /// The cell is splitted to inner/outer part by a polyline of two sections and inner part is non convex.
|
||||
singleLine, /// The cell is split to inner/outer part by a single line.
|
||||
pairOfLinesSingleConvexPolygon, /// The cell is split to inner/outer part by a polyline of two sections and inner part is convex.
|
||||
pairOfLinesSingleNonConvexPolygons, /// The cell is split to inner/outer part by a polyline of two sections and inner part is non convex.
|
||||
pairOfLinesDifferentPolygons, /// The cell is spliited by two lines to three different parts.
|
||||
complexPolygon /// Generic case.
|
||||
};
|
||||
|
@ -10,12 +10,12 @@
|
||||
* checking platform in runtime (see isArchSupported() below).
|
||||
*
|
||||
* If compiler is not gcc/clang or target isn't x86_64 or ENABLE_MULTITARGET_CODE
|
||||
* was set to OFF in cmake, all code inside these macroses will be removed and
|
||||
* was set to OFF in cmake, all code inside these macros will be removed and
|
||||
* USE_MUTLITARGE_CODE will be set to 0. Use #if USE_MUTLITARGE_CODE whenever you
|
||||
* use anything from this namespaces.
|
||||
*
|
||||
* For similarities there is a macros DECLARE_DEFAULT_CODE, which wraps code
|
||||
* into the namespace TargetSpecific::Default but dosn't specify any additional
|
||||
* into the namespace TargetSpecific::Default but doesn't specify any additional
|
||||
* copile options. Functions and classes inside this macros are available regardless
|
||||
* of USE_MUTLITARGE_CODE.
|
||||
*
|
||||
|
@ -247,14 +247,14 @@ public:
|
||||
if (const ColumnConst * col_const_arr = checkAndGetColumnConst<ColumnArray>(block.getByPosition(arguments[2]).column.get()))
|
||||
{
|
||||
if (!col_const_arr)
|
||||
throw Exception("Thrid argument for function " + getName() + " must be Array of constant strings", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception("Third argument for function " + getName() + " must be Array of constant strings", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
Array src_arr = col_const_arr->getValue<Array>();
|
||||
|
||||
for (size_t i = 0; i < src_arr.size(); ++i)
|
||||
{
|
||||
if (src_arr[i].getType() != Field::Types::String)
|
||||
throw Exception("Thrid argument for function " + getName() + " must be Array of constant strings", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception("Third argument for function " + getName() + " must be Array of constant strings", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
variant_names.push_back(src_arr[i].get<const String &>());
|
||||
}
|
||||
}
|
||||
@ -276,7 +276,7 @@ public:
|
||||
}
|
||||
|
||||
if (variant_names.size() != xs.size() || xs.size() != ys.size())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Sizes of arguments doen't match: variant_names: {}, xs: {}, ys: {}", variant_names.size(), xs.size(), ys.size());
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Sizes of arguments doesn't match: variant_names: {}, xs: {}, ys: {}", variant_names.size(), xs.size(), ys.size());
|
||||
|
||||
if (variant_names.size() < 2)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Sizes of arguments must be larger than 1. variant_names: {}, xs: {}, ys: {}", variant_names.size(), xs.size(), ys.size());
|
||||
|
@ -88,7 +88,7 @@ ArraysDepths getArraysDepths(const ColumnsWithTypeAndName & arguments)
|
||||
|
||||
if (clear_depth > max_array_depth)
|
||||
throw Exception("Incorrect arguments for function arrayEnumerateUniqRanked or arrayEnumerateDenseRanked: clear_depth ("
|
||||
+ std::to_string(clear_depth) + ") cant be larger than max_array_depth (" + std::to_string(max_array_depth) + ").",
|
||||
+ std::to_string(clear_depth) + ") can't be larger than max_array_depth (" + std::to_string(max_array_depth) + ").",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
return {clear_depth, depths, max_array_depth};
|
||||
|
@ -17,7 +17,7 @@
|
||||
* This is very unusual function made as a special order for Yandex.Metrica.
|
||||
*
|
||||
* arrayEnumerateUniqRanked(['hello', 'world', 'hello']) = [1, 1, 2]
|
||||
* - it returns similar structured array containing number of occurence of the corresponding value.
|
||||
* - it returns similar structured array containing number of occurrence of the corresponding value.
|
||||
*
|
||||
* arrayEnumerateUniqRanked([['hello', 'world'], ['hello'], ['hello']], 1) = [1, 1, 2]
|
||||
* - look at the depth 1 by default. Elements are ['hello', 'world'], ['hello'], ['hello'].
|
||||
|
@ -31,7 +31,7 @@ enum class ExtractAllGroupsResultKind
|
||||
|
||||
/** Match all groups of given input string with given re, return array of arrays of matches.
|
||||
*
|
||||
* Depending on `Impl::Kind`, result is either grouped by grop id (Horizontal) or in order of appearance (Vertical):
|
||||
* Depending on `Impl::Kind`, result is either grouped by group id (Horizontal) or in order of appearance (Vertical):
|
||||
*
|
||||
* SELECT extractAllGroupsVertical('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[^"]+"|\\w+)')
|
||||
* =>
|
||||
|
@ -61,7 +61,7 @@ template <> struct ActionValueTypeMap<DataTypeDateTime64> { using ActionValueTyp
|
||||
* It is implemented in two steps.
|
||||
* At first step, it creates a pattern of zeros, literal characters, whitespaces, etc.
|
||||
* and quickly fills resulting character array (string column) with this pattern.
|
||||
* At second step, it walks across the resulting character array and modifies/replaces specific charaters,
|
||||
* At second step, it walks across the resulting character array and modifies/replaces specific characters,
|
||||
* by calling some functions by pointers and shifting cursor by specified amount.
|
||||
*
|
||||
* Advantages:
|
||||
|
@ -225,7 +225,7 @@ struct FormatImpl
|
||||
index_positions = big_index_positions_buffer.get();
|
||||
}
|
||||
|
||||
/// Vector of substrings of pattern that will be copied to the ans, not string view because of escaping and iterators invalidation.
|
||||
/// Vector of substrings of pattern that will be copied to the answer, not string view because of escaping and iterators invalidation.
|
||||
/// These are exactly what is between {} tokens, for `Hello {} world {}` we will have [`Hello `, ` world `, ``].
|
||||
std::vector<String> substrings;
|
||||
|
||||
@ -236,7 +236,7 @@ struct FormatImpl
|
||||
for (String & str : substrings)
|
||||
{
|
||||
/// To use memcpySmallAllowReadWriteOverflow15 for substrings we should allocate a bit more to each string.
|
||||
/// That was chosen due to perfomance issues.
|
||||
/// That was chosen due to performance issues.
|
||||
if (!str.empty())
|
||||
str.reserve(str.size() + right_padding);
|
||||
final_size += str.size();
|
||||
|
@ -148,7 +148,7 @@ public:
|
||||
|
||||
if (!res_offsets.empty() && res_offsets.back() != res_strings.size())
|
||||
{
|
||||
throw Exception("Arrary column size mismatch (internal logical error)" +
|
||||
throw Exception("Array column size mismatch (internal logical error)" +
|
||||
std::to_string(res_offsets.back()) + " != " + std::to_string(res_strings.size()),
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ SOFTWARE.
|
||||
range = _mm_or_si128(range, _mm_alignr_epi8(tmp1, tmp2, 13));
|
||||
|
||||
/*
|
||||
* Now we have below range indices caluclated
|
||||
* Now we have below range indices calculated
|
||||
* Correct cases:
|
||||
* - 8 for C0~FF
|
||||
* - 3 for 1st byte after F0~FF
|
||||
|
@ -97,7 +97,7 @@ public:
|
||||
bool compress_ = false, /// If true - set Content-Encoding header and compress the result.
|
||||
CompressionMethod compression_method_ = CompressionMethod::None);
|
||||
|
||||
/// Writes progess in repeating HTTP headers.
|
||||
/// Writes progress in repeating HTTP headers.
|
||||
void onProgress(const Progress & progress);
|
||||
|
||||
/// Send at least HTTP headers if no data has been sent yet.
|
||||
|
@ -24,7 +24,7 @@ static void parse_trash_string_as_uint_must_fail(const std::string & str)
|
||||
return;
|
||||
}
|
||||
|
||||
std::cerr << "Parsing must fail, but finished sucessfully x=" << x;
|
||||
std::cerr << "Parsing must fail, but finished successfully x=" << x;
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ public:
|
||||
ASTPtr default_value;
|
||||
/// Attribute expression
|
||||
ASTPtr expression;
|
||||
/// Is atribute mirrored to the parent identifier
|
||||
/// Is attribute mirrored to the parent identifier
|
||||
bool hierarchical;
|
||||
/// Flag that shows whether the id->attribute image is injective
|
||||
bool injective;
|
||||
|
@ -1097,7 +1097,7 @@ const char * ParserAlias::restricted_keywords[] =
|
||||
"ASOF",
|
||||
"SEMI",
|
||||
"ANTI",
|
||||
"ONLY", /// YQL synonim for ANTI. Note: YQL is the name of one of Yandex proprietary languages, completely unrelated to ClickHouse.
|
||||
"ONLY", /// YQL synonym for ANTI. Note: YQL is the name of one of Yandex proprietary languages, completely unrelated to ClickHouse.
|
||||
"ON",
|
||||
"USING",
|
||||
"PREWHERE",
|
||||
|
@ -350,7 +350,7 @@ protected:
|
||||
};
|
||||
|
||||
/** Parser for function with arguments like KEY VALUE (space separated)
|
||||
* no commas alowed, just space-separated pairs.
|
||||
* no commas allowed, just space-separated pairs.
|
||||
*/
|
||||
class ParserFunctionWithKeyValueArguments : public IParserBase
|
||||
{
|
||||
|
@ -354,7 +354,7 @@ protected:
|
||||
|
||||
/// Parses complete dictionary create query. Uses ParserDictionary and
|
||||
/// ParserDictionaryAttributeDeclaration. Produces ASTCreateQuery.
|
||||
/// CREATE DICTIONAY [IF NOT EXISTS] [db.]name (attrs) PRIMARY KEY key SOURCE(s(params)) LAYOUT(l(params)) LIFETIME([min v1 max] v2) [RANGE(min v1 max v2)]
|
||||
/// CREATE DICTIONARY [IF NOT EXISTS] [db.]name (attrs) PRIMARY KEY key SOURCE(s(params)) LAYOUT(l(params)) LIFETIME([min v1 max] v2) [RANGE(min v1 max v2)]
|
||||
class ParserCreateDictionaryQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
|
@ -6,7 +6,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Has arbitary non zero number of inputs and one output.
|
||||
/** Has arbitrary non zero number of inputs and one output.
|
||||
* All of them have the same structure.
|
||||
*
|
||||
* Pulls all data from first input, then all data from second input, etc...
|
||||
|
@ -90,7 +90,7 @@ public:
|
||||
|
||||
/// Ports which have changed their state since last processor->prepare() call.
|
||||
/// They changed when neighbour processors interact with connected ports.
|
||||
/// Tey will be used as arguments for next processor->prepare() (and will be cleaned after that).
|
||||
/// They will be used as arguments for next processor->prepare() (and will be cleaned after that).
|
||||
IProcessor::PortNumbers updated_input_ports;
|
||||
IProcessor::PortNumbers updated_output_ports;
|
||||
|
||||
|
@ -9,7 +9,7 @@ namespace ErrorCodes
|
||||
|
||||
/// Simple struct which stores threads with numbers [0 .. num_threads - 1].
|
||||
/// Allows to push and pop specified thread, or pop any thread if has.
|
||||
/// Oll operations (except init) are O(1). No memory allocations after init happen.
|
||||
/// All operations (except init) are O(1). No memory allocations after init happen.
|
||||
struct ThreadsQueue
|
||||
{
|
||||
void init(size_t num_threads)
|
||||
|
@ -34,7 +34,7 @@ protected:
|
||||
bool finished = false;
|
||||
bool finalized = false;
|
||||
|
||||
/// Flush data on each consumed chunk. This is intented for interactive applications to output data as soon as it's ready.
|
||||
/// Flush data on each consumed chunk. This is intended for interactive applications to output data as soon as it's ready.
|
||||
bool auto_flush = false;
|
||||
|
||||
RowsBeforeLimitCounterPtr rows_before_limit_counter;
|
||||
|
@ -50,7 +50,7 @@ namespace DB
|
||||
{arrow::Type::STRING, "String"},
|
||||
{arrow::Type::BINARY, "String"},
|
||||
|
||||
// TODO: add other types that are convertable to internal ones:
|
||||
// TODO: add other types that are convertible to internal ones:
|
||||
// 0. ENUM?
|
||||
// 1. UUID -> String
|
||||
// 2. JSON -> String
|
||||
|
@ -647,7 +647,7 @@ private:
|
||||
Poco::JSON::Parser parser;
|
||||
auto json_body = parser.parse(*response_body).extract<Poco::JSON::Object::Ptr>();
|
||||
auto schema = json_body->getValue<std::string>("schema");
|
||||
LOG_TRACE((&Poco::Logger::get("AvroConfluentRowInputFormat")), "Succesfully fetched schema id = {}\n{}", id, schema);
|
||||
LOG_TRACE((&Poco::Logger::get("AvroConfluentRowInputFormat")), "Successfully fetched schema id = {}\n{}", id, schema);
|
||||
return avro::compileJsonSchemaFromString(schema);
|
||||
}
|
||||
catch (const Exception &)
|
||||
|
@ -42,10 +42,10 @@ JSONEachRowRowInputFormat::JSONEachRowRowInputFormat(
|
||||
name_map[column_name] = i; /// NOTE You could place names more cache-locally.
|
||||
if (format_settings_.import_nested_json)
|
||||
{
|
||||
const auto splitted = Nested::splitName(column_name);
|
||||
if (!splitted.second.empty())
|
||||
const auto split = Nested::splitName(column_name);
|
||||
if (!split.second.empty())
|
||||
{
|
||||
const StringRef table_name(column_name.data(), splitted.first.size());
|
||||
const StringRef table_name(column_name.data(), split.first.size());
|
||||
name_map[table_name] = NESTED_FIELD;
|
||||
}
|
||||
}
|
||||
|
@ -70,11 +70,11 @@ using Processors = std::vector<ProcessorPtr>;
|
||||
* Limiting transformation. Pulls data from input and passes to output.
|
||||
* When there was enough data, says that it doesn't need data on its input and that data on its output port is finished.
|
||||
*
|
||||
* Resize. Has arbitary number of inputs and arbitary number of outputs.
|
||||
* Pulls data from whatever ready input and pushes it to randomly choosed free output.
|
||||
* Resize. Has arbitrary number of inputs and arbitrary number of outputs.
|
||||
* Pulls data from whatever ready input and pushes it to randomly chosen free output.
|
||||
* Examples:
|
||||
* Union - merge data from number of inputs to one output in arbitary order.
|
||||
* Split - read data from one input and pass it to arbitary output.
|
||||
* Union - merge data from number of inputs to one output in arbitrary order.
|
||||
* Split - read data from one input and pass it to arbitrary output.
|
||||
*
|
||||
* Concat. Has many inputs and only one output. Pulls all data from first input until it is exhausted,
|
||||
* then all data from second input, etc. and pushes all data to output.
|
||||
@ -104,7 +104,7 @@ using Processors = std::vector<ProcessorPtr>;
|
||||
* TODO Processor with all its parameters should represent "pure" function on streams of data from its input ports.
|
||||
* It's in question, what kind of "pure" function do we mean.
|
||||
* For example, data streams are considered equal up to order unless ordering properties are stated explicitly.
|
||||
* Another example: we should support the notion of "arbitary N-th of M substream" of full stream of data.
|
||||
* Another example: we should support the notion of "arbitrary N-th of M substream" of full stream of data.
|
||||
*/
|
||||
|
||||
class IProcessor
|
||||
|
@ -230,7 +230,7 @@ static SummingSortedAlgorithm::ColumnsDefinition defineColumns(
|
||||
continue;
|
||||
}
|
||||
|
||||
/// Are they inside the primary key or partiton key?
|
||||
/// Are they inside the primary key or partition key?
|
||||
if (isInPrimaryKey(description, column.name, i) || isInPartitionKey(column.name, partition_key_columns))
|
||||
{
|
||||
def.column_numbers_not_to_aggregate.push_back(i);
|
||||
|
@ -117,7 +117,7 @@ protected:
|
||||
while (!value.compare_exchange_weak(expected, desired))
|
||||
desired = getPtr((getUInt(expected) & FLAGS_MASK & (~mask)) | flags | getUInt(data));
|
||||
|
||||
/// It's not very safe. In case of exception after exchange and before assigment we will get leak.
|
||||
/// It's not very safe. In case of exception after exchange and before assignment we will get leak.
|
||||
/// Don't know how to make it better.
|
||||
data = getPtr(getUInt(expected) & PTR_MASK);
|
||||
|
||||
|
@ -101,7 +101,7 @@ static OutputPort * uniteExtremes(const std::vector<OutputPort *> & ports, const
|
||||
static OutputPort * uniteTotals(const std::vector<OutputPort *> & ports, const Block & header,
|
||||
QueryPipeline::ProcessorsContainer & processors)
|
||||
{
|
||||
/// Calculate totals fro several streams.
|
||||
/// Calculate totals from several streams.
|
||||
/// Take totals from first sources which has any, skip others.
|
||||
|
||||
/// ->> Concat -> Limit
|
||||
|
@ -7,10 +7,10 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Has arbitary non zero number of inputs and arbitary non zero number of outputs.
|
||||
/** Has arbitrary non zero number of inputs and arbitrary non zero number of outputs.
|
||||
* All of them have the same structure.
|
||||
*
|
||||
* Pulls data from arbitary input (whenever it is ready) and pushes it to arbitary output (whenever is is not full).
|
||||
* Pulls data from arbitrary input (whenever it is ready) and pushes it to arbitrary output (whenever is is not full).
|
||||
* Doesn't do any heavy calculations.
|
||||
* Doesn't preserve an order of data.
|
||||
*
|
||||
|
@ -45,7 +45,7 @@ private:
|
||||
/// How to construct result block. Position in source block, where to get each column.
|
||||
ColumnNumbers conversion;
|
||||
/// Do not check that constants are same. Use value from result_header.
|
||||
/// This is needed in case run functions which are constatn in query scope,
|
||||
/// This is needed in case run functions which are constant in query scope,
|
||||
/// but may return different result being executed remotely, like `now64()` or `randConstant()`.
|
||||
/// In this case we replace constants from remote source to constatns from initiator.
|
||||
bool ignore_constant_values;
|
||||
|
@ -7,7 +7,7 @@ namespace DB
|
||||
{
|
||||
|
||||
/** Implements modifier WITH FILL of ORDER BY clause.
|
||||
* It fills gaps in data stream by rows with missing values in columns with set WITH FILL and deafult values in other columns.
|
||||
* It fills gaps in data stream by rows with missing values in columns with set WITH FILL and default values in other columns.
|
||||
* Optionally FROM, TO and STEP values can be specified.
|
||||
*/
|
||||
class FillingTransform : public ISimpleTransform
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user