Merge branch 'master' of github.com:yandex/ClickHouse

This commit is contained in:
BayoNet 2018-12-27 14:50:55 +03:00
commit 5a25d65a7c
156 changed files with 636 additions and 459 deletions

2
.gitmodules vendored
View File

@ -36,7 +36,7 @@
url = https://github.com/ClickHouse-Extras/llvm
[submodule "contrib/mariadb-connector-c"]
path = contrib/mariadb-connector-c
url = https://github.com/MariaDB/mariadb-connector-c.git
url = https://github.com/ClickHouse-Extras/mariadb-connector-c.git
[submodule "contrib/jemalloc"]
path = contrib/jemalloc
url = https://github.com/jemalloc/jemalloc.git

@ -1 +1 @@
Subproject commit a0fd36cc5a5313414a5a2ebe9322577a29b4782a
Subproject commit d85d0e98999cd9e28ceb66645999b4a9ce85370e

View File

@ -85,7 +85,7 @@ public:
const ColumnArray & first_array_column = static_cast<const ColumnArray &>(*columns[0]);
const IColumn::Offsets & offsets = first_array_column.getOffsets();
size_t begin = row_num == 0 ? 0 : offsets[row_num - 1];
size_t begin = offsets[row_num - 1];
size_t end = offsets[row_num];
/// Sanity check. NOTE We can implement specialization for a case with single argument, if the check will hurt performance.

View File

@ -146,7 +146,7 @@ public:
const ColumnArray & first_array_column = static_cast<const ColumnArray &>(*columns[0]);
const IColumn::Offsets & offsets = first_array_column.getOffsets();
size_t begin = row_num == 0 ? 0 : offsets[row_num - 1];
size_t begin = offsets[row_num - 1];
size_t end = offsets[row_num];
/// Sanity check. NOTE We can implement specialization for a case with single argument, if the check will hurt performance.

View File

@ -119,7 +119,7 @@ public:
ColumnArray & arr_to = static_cast<ColumnArray &>(to);
ColumnArray::Offsets & offsets_to = arr_to.getOffsets();
offsets_to.push_back((offsets_to.size() == 0 ? 0 : offsets_to.back()) + size);
offsets_to.push_back(offsets_to.back() + size);
typename ColumnVector<T>::Container & data_to = static_cast<ColumnVector<T> &>(arr_to.getData()).getData();
data_to.insert(this->data(place).value.begin(), this->data(place).value.end());
@ -370,7 +370,7 @@ public:
auto & column_array = static_cast<ColumnArray &>(to);
auto & offsets = column_array.getOffsets();
offsets.push_back((offsets.size() == 0 ? 0 : offsets.back()) + data(place).elems);
offsets.push_back(offsets.back() + data(place).elems);
auto & column_data = column_array.getData();

View File

@ -83,7 +83,7 @@ public:
const typename State::Set & set = this->data(place).value;
size_t size = set.size();
offsets_to.push_back((offsets_to.size() == 0 ? 0 : offsets_to.back()) + size);
offsets_to.push_back(offsets_to.back() + size);
typename ColumnVector<T>::Container & data_to = static_cast<ColumnVector<T> &>(arr_to.getData()).getData();
size_t old_size = data_to.size();
@ -207,7 +207,7 @@ public:
IColumn & data_to = arr_to.getData();
auto & set = this->data(place).value;
offsets_to.push_back((offsets_to.size() == 0 ? 0 : offsets_to.back()) + set.size());
offsets_to.push_back(offsets_to.back() + set.size());
for (auto & elem : set)
{

View File

@ -138,7 +138,7 @@ public:
ColumnArray::Offsets & offsets_to = arr_to.getOffsets();
size_t size = levels.size();
offsets_to.push_back((offsets_to.size() == 0 ? 0 : offsets_to.back()) + size);
offsets_to.push_back(offsets_to.back() + size);
if (!size)
return;

View File

@ -83,7 +83,7 @@ public:
const ColumnArray & array_column = static_cast<const ColumnArray &>(*columns[0]);
const IColumn::Offsets & offsets = array_column.getOffsets();
const auto & keys_vec = static_cast<const ColVecType &>(array_column.getData());
const size_t keys_vec_offset = row_num == 0 ? 0 : offsets[row_num - 1];
const size_t keys_vec_offset = offsets[row_num - 1];
const size_t keys_vec_size = (offsets[row_num] - keys_vec_offset);
// Columns 1..n contain arrays of numeric values to sum
@ -93,7 +93,7 @@ public:
Field value;
const ColumnArray & array_column = static_cast<const ColumnArray &>(*columns[col + 1]);
const IColumn::Offsets & offsets = array_column.getOffsets();
const size_t values_vec_offset = row_num == 0 ? 0 : offsets[row_num - 1];
const size_t values_vec_offset = offsets[row_num - 1];
const size_t values_vec_size = (offsets[row_num] - values_vec_offset);
// Expect key and value arrays to be of same length

View File

@ -93,7 +93,7 @@ public:
auto result_vec = set.topK(threshold);
size_t size = result_vec.size();
offsets_to.push_back((offsets_to.size() == 0 ? 0 : offsets_to.back()) + size);
offsets_to.push_back(offsets_to.back() + size);
typename ColumnVector<T>::Container & data_to = static_cast<ColumnVector<T> &>(arr_to.getData()).getData();
size_t old_size = data_to.size();
@ -212,7 +212,7 @@ public:
IColumn & data_to = arr_to.getData();
auto result_vec = this->data(place).value.topK(threshold);
offsets_to.push_back((offsets_to.size() == 0 ? 0 : offsets_to.back()) + result_vec.size());
offsets_to.push_back(offsets_to.back() + result_vec.size());
for (auto & elem : result_vec)
{

View File

@ -166,7 +166,7 @@ void ColumnArray::insertData(const char * pos, size_t length)
if (pos != end)
throw Exception("Incorrect length argument for method ColumnArray::insertData", ErrorCodes::BAD_ARGUMENTS);
getOffsets().push_back((getOffsets().size() == 0 ? 0 : getOffsets().back()) + elems);
getOffsets().push_back(getOffsets().back() + elems);
}
@ -194,7 +194,7 @@ const char * ColumnArray::deserializeAndInsertFromArena(const char * pos)
for (size_t i = 0; i < array_size; ++i)
pos = getData().deserializeAndInsertFromArena(pos);
getOffsets().push_back((getOffsets().size() == 0 ? 0 : getOffsets().back()) + array_size);
getOffsets().push_back(getOffsets().back() + array_size);
return pos;
}
@ -216,7 +216,7 @@ void ColumnArray::insert(const Field & x)
size_t size = array.size();
for (size_t i = 0; i < size; ++i)
getData().insert(array[i]);
getOffsets().push_back((getOffsets().size() == 0 ? 0 : getOffsets().back()) + size);
getOffsets().push_back(getOffsets().back() + size);
}
@ -227,13 +227,13 @@ void ColumnArray::insertFrom(const IColumn & src_, size_t n)
size_t offset = src.offsetAt(n);
getData().insertRangeFrom(src.getData(), offset, size);
getOffsets().push_back((getOffsets().size() == 0 ? 0 : getOffsets().back()) + size);
getOffsets().push_back(getOffsets().back() + size);
}
void ColumnArray::insertDefault()
{
getOffsets().push_back(getOffsets().size() == 0 ? 0 : getOffsets().back());
getOffsets().push_back(getOffsets().back());
}

View File

@ -124,8 +124,8 @@ private:
ColumnPtr data;
ColumnPtr offsets;
size_t ALWAYS_INLINE offsetAt(size_t i) const { return i == 0 ? 0 : getOffsets()[i - 1]; }
size_t ALWAYS_INLINE sizeAt(size_t i) const { return i == 0 ? getOffsets()[0] : (getOffsets()[i] - getOffsets()[i - 1]); }
size_t ALWAYS_INLINE offsetAt(size_t i) const { return getOffsets()[i - 1]; }
size_t ALWAYS_INLINE sizeAt(size_t i) const { return getOffsets()[i] - getOffsets()[i - 1]; }
/// Multiply values if the nested column is ColumnVector<T>.

View File

@ -148,7 +148,7 @@ ColumnPtr ColumnString::permute(const Permutation & perm, size_t limit) const
for (size_t i = 0; i < limit; ++i)
{
size_t j = perm[i];
size_t string_offset = j == 0 ? 0 : offsets[j - 1];
size_t string_offset = offsets[j - 1];
size_t string_size = offsets[j] - string_offset;
memcpySmallAllowReadWriteOverflow15(&res_chars[current_new_offset], &chars[string_offset], string_size);
@ -219,7 +219,7 @@ ColumnPtr ColumnString::indexImpl(const PaddedPODArray<Type> & indexes, size_t l
for (size_t i = 0; i < limit; ++i)
{
size_t j = indexes[i];
size_t string_offset = j == 0 ? 0 : offsets[j - 1];
size_t string_offset = offsets[j - 1];
size_t string_size = offsets[j] - string_offset;
memcpySmallAllowReadWriteOverflow15(&res_chars[current_new_offset], &chars[string_offset], string_size);

View File

@ -31,10 +31,10 @@ private:
/// For convenience, every string ends with terminating zero byte. Note that strings could contain zero bytes in the middle.
Chars chars;
size_t ALWAYS_INLINE offsetAt(size_t i) const { return i == 0 ? 0 : offsets[i - 1]; }
size_t ALWAYS_INLINE offsetAt(size_t i) const { return offsets[i - 1]; }
/// Size of i-th element, including terminating zero.
size_t ALWAYS_INLINE sizeAt(size_t i) const { return i == 0 ? offsets[0] : (offsets[i] - offsets[i - 1]); }
size_t ALWAYS_INLINE sizeAt(size_t i) const { return offsets[i] - offsets[i - 1]; }
template <bool positive>
struct less;
@ -203,7 +203,7 @@ public:
void insertDefault() override
{
chars.push_back(0);
offsets.push_back(offsets.size() == 0 ? 1 : (offsets.back() + 1));
offsets.push_back(offsets.back() + 1);
}
int compareAt(size_t n, size_t m, const IColumn & rhs_, int /*nan_direction_hint*/) const override

View File

@ -107,7 +107,7 @@ public:
if (s != offsets.size())
throw Exception("Size of offsets doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
return cloneDummy(s == 0 ? 0 : offsets.back());
return cloneDummy(offsets.back());
}
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override

View File

@ -0,0 +1,8 @@
#include <Common/PODArray.h>
namespace DB
{
/// Used for left padding of PODArray when empty
const char EmptyPODArray[EmptyPODArraySize]{};
}

View File

@ -20,6 +20,11 @@
namespace DB
{
inline constexpr size_t integerRoundUp(size_t value, size_t dividend)
{
return ((value + dividend - 1) / dividend) * dividend;
}
/** A dynamic array for POD types.
* Designed for a small number of large arrays (rather than a lot of small ones).
* To be more precise - for use in ColumnVector.
@ -37,6 +42,10 @@ namespace DB
* The template parameter `pad_right` - always allocate at the end of the array as many unused bytes.
* Can be used to make optimistic reading, writing, copying with unaligned SIMD instructions.
*
* The template parameter `pad_left` - always allocate memory before 0th element of the array (rounded up to the whole number of elements)
* and zero initialize -1th element. It allows to use -1th element that will have value 0.
* This gives performance benefits when converting an array of offsets to array of sizes.
*
* Some methods using allocator have TAllocatorParams variadic arguments.
* These arguments will be passed to corresponding methods of TAllocator.
* Example: pointer to Arena, that is used for allocations.
@ -49,16 +58,25 @@ namespace DB
* TODO Pass alignment to Allocator.
* TODO Allow greater alignment than alignof(T). Example: array of char aligned to page size.
*/
template <typename T, size_t INITIAL_SIZE = 4096, typename TAllocator = Allocator<false>, size_t pad_right_ = 0>
static constexpr size_t EmptyPODArraySize = 1024;
extern const char EmptyPODArray[EmptyPODArraySize];
template <typename T, size_t INITIAL_SIZE = 4096, typename TAllocator = Allocator<false>, size_t pad_right_ = 0, size_t pad_left_ = 0>
class PODArray : private boost::noncopyable, private TAllocator /// empty base optimization
{
protected:
/// Round padding up to an whole number of elements to simplify arithmetic.
static constexpr size_t pad_right = (pad_right_ + sizeof(T) - 1) / sizeof(T) * sizeof(T);
static constexpr size_t pad_right = integerRoundUp(pad_right_, sizeof(T));
/// pad_left is also rounded up to 16 bytes to maintain alignment of allocated memory.
static constexpr size_t pad_left = integerRoundUp(integerRoundUp(pad_left_, sizeof(T)), 16);
/// Empty array will point to this static memory as padding.
static constexpr char * null = pad_left ? const_cast<char *>(EmptyPODArray) + EmptyPODArraySize : nullptr;
char * c_start = nullptr;
char * c_end = nullptr;
char * c_end_of_storage = nullptr; /// Does not include pad_right.
static_assert(pad_left <= EmptyPODArraySize && "Left Padding exceeds EmptyPODArraySize. Is the element size too large?");
char * c_start = null; /// Does not include pad_left.
char * c_end = null;
char * c_end_of_storage = null; /// Does not include pad_right.
T * t_start() { return reinterpret_cast<T *>(c_start); }
T * t_end() { return reinterpret_cast<T *>(c_end); }
@ -72,7 +90,7 @@ protected:
static size_t byte_size(size_t num_elements) { return num_elements * sizeof(T); }
/// Minimum amount of memory to allocate for num_elements, including padding.
static size_t minimum_memory_for_elements(size_t num_elements) { return byte_size(num_elements) + pad_right; }
static size_t minimum_memory_for_elements(size_t num_elements) { return byte_size(num_elements) + pad_right + pad_left; }
void alloc_for_num_elements(size_t num_elements)
{
@ -82,22 +100,24 @@ protected:
template <typename ... TAllocatorParams>
void alloc(size_t bytes, TAllocatorParams &&... allocator_params)
{
c_start = c_end = reinterpret_cast<char *>(TAllocator::alloc(bytes, std::forward<TAllocatorParams>(allocator_params)...));
c_end_of_storage = c_start + bytes - pad_right;
c_start = c_end = reinterpret_cast<char *>(TAllocator::alloc(bytes, std::forward<TAllocatorParams>(allocator_params)...)) + pad_left;
c_end_of_storage = c_start + bytes - pad_right - pad_left;
if (pad_left)
t_start()[-1] = {};
}
void dealloc()
{
if (c_start == nullptr)
if (c_start == null)
return;
TAllocator::free(c_start, allocated_bytes());
TAllocator::free(c_start - pad_left, allocated_bytes());
}
template <typename ... TAllocatorParams>
void realloc(size_t bytes, TAllocatorParams &&... allocator_params)
{
if (c_start == nullptr)
if (c_start == null)
{
alloc(bytes, std::forward<TAllocatorParams>(allocator_params)...);
return;
@ -105,15 +125,18 @@ protected:
ptrdiff_t end_diff = c_end - c_start;
c_start = reinterpret_cast<char *>(TAllocator::realloc(c_start, allocated_bytes(), bytes, std::forward<TAllocatorParams>(allocator_params)...));
c_start = reinterpret_cast<char *>(
TAllocator::realloc(c_start - pad_left, allocated_bytes(), bytes, std::forward<TAllocatorParams>(allocator_params)...))
+ pad_left;
c_end = c_start + end_diff;
c_end_of_storage = c_start + bytes - pad_right;
c_end_of_storage = c_start + bytes - pad_right - pad_left;
if (pad_left)
t_start()[-1] = {};
}
bool isInitialized() const
{
return (c_start != nullptr) && (c_end != nullptr) && (c_end_of_storage != nullptr);
return (c_start != null) && (c_end != null) && (c_end_of_storage != null);
}
bool isAllocatedFromStack() const
@ -139,7 +162,7 @@ protected:
public:
using value_type = T;
size_t allocated_bytes() const { return c_end_of_storage - c_start + pad_right; }
size_t allocated_bytes() const { return c_end_of_storage - c_start + pad_right + pad_left; }
/// You can not just use `typedef`, because there is ambiguity for the constructors and `assign` functions.
struct iterator : public boost::iterator_adaptor<iterator, T*>
@ -378,9 +401,9 @@ public:
memcpy(dest.c_start, src.c_start, byte_size(src.size()));
dest.c_end = dest.c_start + (src.c_end - src.c_start);
src.c_start = nullptr;
src.c_end = nullptr;
src.c_end_of_storage = nullptr;
src.c_start = null;
src.c_end = null;
src.c_end_of_storage = null;
}
else
{
@ -504,15 +527,9 @@ void swap(PODArray<T, INITIAL_SIZE, TAllocator, pad_right_> & lhs, PODArray<T, I
/** For columns. Padding is enough to read and write xmm-register at the address of the last element. */
template <typename T, size_t INITIAL_SIZE = 4096, typename TAllocator = Allocator<false>>
using PaddedPODArray = PODArray<T, INITIAL_SIZE, TAllocator, 15>;
inline constexpr size_t integerRound(size_t value, size_t dividend)
{
return ((value + dividend - 1) / dividend) * dividend;
}
using PaddedPODArray = PODArray<T, INITIAL_SIZE, TAllocator, 15, 16>;
template <typename T, size_t stack_size_in_bytes>
using PODArrayWithStackMemory = PODArray<T, 0, AllocatorWithStackMemory<Allocator<false>, integerRound(stack_size_in_bytes, sizeof(T))>>;
using PODArrayWithStackMemory = PODArray<T, 0, AllocatorWithStackMemory<Allocator<false>, integerRoundUp(stack_size_in_bytes, sizeof(T))>>;
}

View File

@ -20,23 +20,21 @@ ZooKeeperNodeCache::ZNode ZooKeeperNodeCache::get(const std::string & path, Even
ZooKeeperNodeCache::ZNode ZooKeeperNodeCache::get(const std::string & path, Coordination::WatchCallback caller_watch_callback)
{
zkutil::ZooKeeperPtr zookeeper;
std::unordered_set<std::string> invalidated_paths;
{
std::lock_guard<std::mutex> lock(context->mutex);
if (!context->zookeeper)
if (context->all_paths_invalidated)
{
/// Possibly, there was a previous session and it has expired. Clear the cache.
path_to_cached_znode.clear();
context->zookeeper = get_zookeeper();
context->all_paths_invalidated = false;
}
zookeeper = context->zookeeper;
invalidated_paths.swap(context->invalidated_paths);
}
zkutil::ZooKeeperPtr zookeeper = get_zookeeper();
if (!zookeeper)
throw DB::Exception("Could not get znode: `" + path + "'. ZooKeeper not configured.", DB::ErrorCodes::NO_ZOOKEEPER);
@ -65,8 +63,8 @@ ZooKeeperNodeCache::ZNode ZooKeeperNodeCache::get(const std::string & path, Coor
changed = owned_context->invalidated_paths.emplace(response.path).second;
else if (response.state == Coordination::EXPIRED_SESSION)
{
owned_context->zookeeper = nullptr;
owned_context->invalidated_paths.clear();
owned_context->all_paths_invalidated = true;
changed = true;
}
}

View File

@ -53,8 +53,8 @@ private:
struct Context
{
std::mutex mutex;
zkutil::ZooKeeperPtr zookeeper;
std::unordered_set<std::string> invalidated_paths;
bool all_paths_invalidated = false;
};
std::shared_ptr<Context> context;

View File

@ -59,7 +59,7 @@ void DataTypeArray::serializeBinary(const IColumn & column, size_t row_num, Writ
const ColumnArray & column_array = static_cast<const ColumnArray &>(column);
const ColumnArray::Offsets & offsets = column_array.getOffsets();
size_t offset = row_num == 0 ? 0 : offsets[row_num - 1];
size_t offset = offsets[row_num - 1];
size_t next_offset = offsets[row_num];
size_t size = next_offset - offset;
@ -113,7 +113,7 @@ namespace
? offset + limit
: size;
ColumnArray::Offset prev_offset = offset == 0 ? 0 : offset_values[offset - 1];
ColumnArray::Offset prev_offset = offset_values[offset - 1];
for (size_t i = offset; i < end; ++i)
{
ColumnArray::Offset current_offset = offset_values[i];
@ -280,7 +280,7 @@ static void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffe
const ColumnArray & column_array = static_cast<const ColumnArray &>(column);
const ColumnArray::Offsets & offsets = column_array.getOffsets();
size_t offset = row_num == 0 ? 0 : offsets[row_num - 1];
size_t offset = offsets[row_num - 1];
size_t next_offset = offsets[row_num];
const IColumn & nested_column = column_array.getData();
@ -369,7 +369,7 @@ void DataTypeArray::serializeTextJSON(const IColumn & column, size_t row_num, Wr
const ColumnArray & column_array = static_cast<const ColumnArray &>(column);
const ColumnArray::Offsets & offsets = column_array.getOffsets();
size_t offset = row_num == 0 ? 0 : offsets[row_num - 1];
size_t offset = offsets[row_num - 1];
size_t next_offset = offsets[row_num];
const IColumn & nested_column = column_array.getData();
@ -396,7 +396,7 @@ void DataTypeArray::serializeTextXML(const IColumn & column, size_t row_num, Wri
const ColumnArray & column_array = static_cast<const ColumnArray &>(column);
const ColumnArray::Offsets & offsets = column_array.getOffsets();
size_t offset = row_num == 0 ? 0 : offsets[row_num - 1];
size_t offset = offsets[row_num - 1];
size_t next_offset = offsets[row_num];
const IColumn & nested_column = column_array.getData();

View File

@ -1142,32 +1142,24 @@ struct ToIntMonotonicity
if (!type.isValueRepresentedByNumber())
return {};
size_t size_of_type = type.getSizeOfValueInMemory();
/// If type is expanding
if (sizeof(T) > size_of_type)
{
/// If convert signed -> signed or unsigned -> signed, then function is monotonic.
if (std::is_signed_v<T> || type.isValueRepresentedByUnsignedInteger())
return {true, true, true};
/// If arguments from the same half, then function is monotonic.
if ((left.get<Int64>() >= 0) == (right.get<Int64>() >= 0))
return {true, true, true};
}
/// If type is same, too. (Enum has separate case, because it is different data type)
/// If type is same, the conversion is always monotonic.
/// (Enum has separate case, because it is different data type)
if (checkAndGetDataType<DataTypeNumber<T>>(&type) ||
checkAndGetDataType<DataTypeEnum<T>>(&type))
return { true, true, true };
/// In other cases, if range is unbounded, we don't know, whether function is monotonic or not.
/// Float cases.
/// When converting to Float, the conversion is always monotonic.
if (std::is_floating_point_v<T>)
return {true, true, true};
/// If converting from Float, for monotonicity, arguments must fit in range of result type.
if (WhichDataType(type).isFloat())
{
if (left.isNull() || right.isNull())
return {};
/// If converting from float, for monotonicity, arguments must fit in range of result type.
if (WhichDataType(type).isFloat())
{
Float64 left_float = left.get<Float64>();
Float64 right_float = right.get<Float64>();
@ -1178,18 +1170,79 @@ struct ToIntMonotonicity
return {};
}
/// If signedness of type is changing, or converting from Date, DateTime, then arguments must be from same half,
/// and after conversion, resulting values must be from same half.
/// Just in case, it is required in rest of cases too.
if ((left.get<Int64>() >= 0) != (right.get<Int64>() >= 0)
|| (T(left.get<Int64>()) >= 0) != (T(right.get<Int64>()) >= 0))
return {};
/// Integer cases.
/// If type is shrinked, then for monotonicity, all bits other than that fits, must be same.
if (divideByRangeOfType(left.get<UInt64>()) != divideByRangeOfType(right.get<UInt64>()))
return {};
const bool from_is_unsigned = type.isValueRepresentedByUnsignedInteger();
const bool to_is_unsigned = std::is_unsigned_v<T>;
const size_t size_of_from = type.getSizeOfValueInMemory();
const size_t size_of_to = sizeof(T);
const bool left_in_first_half = left.isNull()
? from_is_unsigned
: (left.get<Int64>() >= 0);
const bool right_in_first_half = right.isNull()
? !from_is_unsigned
: (right.get<Int64>() >= 0);
/// Size of type is the same.
if (size_of_from == size_of_to)
{
if (from_is_unsigned == to_is_unsigned)
return {true, true, true};
if (left_in_first_half == right_in_first_half)
return {true};
return {};
}
/// Size of type is expanded.
if (size_of_from < size_of_to)
{
if (from_is_unsigned == to_is_unsigned)
return {true, true, true};
if (!to_is_unsigned)
return {true, true, true};
/// signed -> unsigned. If arguments from the same half, then function is monotonic.
if (left_in_first_half == right_in_first_half)
return {true};
return {};
}
/// Size of type is shrinked.
if (size_of_from > size_of_to)
{
/// Function cannot be monotonic on unbounded ranges.
if (left.isNull() || right.isNull())
return {};
if (from_is_unsigned == to_is_unsigned)
{
/// all bits other than that fits, must be same.
if (divideByRangeOfType(left.get<UInt64>()) == divideByRangeOfType(right.get<UInt64>()))
return {true};
return {};
}
else
{
/// When signedness is changed, it's also required for arguments to be from the same half.
/// And they must be in the same half after converting to the result type.
if (left_in_first_half == right_in_first_half
&& (T(left.get<Int64>()) >= 0) == (T(right.get<Int64>()) >= 0)
&& divideByRangeOfType(left.get<UInt64>()) == divideByRangeOfType(right.get<UInt64>()))
return {true};
return {};
}
}
__builtin_unreachable();
}
};

View File

@ -139,7 +139,7 @@ struct IntegerRoundingComputation
static ALWAYS_INLINE void compute(const T * __restrict in, size_t scale, T * __restrict out)
{
if (scale > size_t(std::numeric_limits<T>::max()))
if (sizeof(T) <= sizeof(scale) && scale > size_t(std::numeric_limits<T>::max()))
*out = 0;
else
*out = compute(*in, scale);

View File

@ -225,7 +225,7 @@ bool FunctionArrayReverse::executeString(const IColumn & src_data, const ColumnA
{
size_t j_reversed = array_size - j - 1;
auto src_pos = src_array_prev_offset + j_reversed == 0 ? 0 : src_string_offsets[src_array_prev_offset + j_reversed - 1];
auto src_pos = src_string_offsets[src_array_prev_offset + j_reversed - 1];
size_t string_size = src_string_offsets[src_array_prev_offset + j_reversed] - src_pos;
memcpySmallAllowReadWriteOverflow15(&res_chars[res_string_prev_offset], &src_data[src_pos], string_size);

View File

@ -271,7 +271,7 @@ struct AggregationMethodString
Arena & /*pool*/) const
{
return StringRef(
&(*chars)[i == 0 ? 0 : (*offsets)[i - 1]],
&(*chars)[(*offsets)[i - 1]],
(i == 0 ? (*offsets)[i] : ((*offsets)[i] - (*offsets)[i - 1])) - 1);
}
};

View File

@ -61,7 +61,7 @@ DNSCacheUpdater::DNSCacheUpdater(Context & context_)
task_handle = pool.addTask([this] () { return run(); });
}
bool DNSCacheUpdater::run()
BackgroundProcessingPoolTaskResult DNSCacheUpdater::run()
{
/// TODO: Ensusre that we get global counter (not thread local)
auto num_current_network_exceptions = ProfileEvents::global_counters[ProfileEvents::NetworkErrors].load(std::memory_order_relaxed);
@ -79,20 +79,20 @@ bool DNSCacheUpdater::run()
last_num_network_erros = num_current_network_exceptions;
last_update_time = time(nullptr);
return true;
return BackgroundProcessingPoolTaskResult::SUCCESS;
}
catch (...)
{
/// Do not increment ProfileEvents::NetworkErrors twice
if (isNetworkError())
return false;
return BackgroundProcessingPoolTaskResult::ERROR;
throw;
}
}
/// According to BackgroundProcessingPool logic, if task has done work, it could be executed again immediately.
return false;
return BackgroundProcessingPoolTaskResult::NOTHING_TO_DO;
}
DNSCacheUpdater::~DNSCacheUpdater()

View File

@ -11,6 +11,7 @@ namespace DB
class Context;
class BackgroundProcessingPool;
class BackgroundProcessingPoolTaskInfo;
enum class BackgroundProcessingPoolTaskResult;
/// Add a task to BackgroundProcessingPool that watch for ProfileEvents::NetworkErrors and updates DNS cache if it has increased
@ -25,7 +26,7 @@ public:
static bool incrementNetworkErrorEventsIfNeeded();
private:
bool run();
BackgroundProcessingPoolTaskResult run();
Context & context;
BackgroundProcessingPool & pool;

View File

@ -337,56 +337,6 @@ void ExpressionAnalyzer::makeSetsForIndexImpl(const ASTPtr & node, const Block &
}
}
bool ExpressionAnalyzer::isThereArrayJoin(const ASTPtr & ast)
{
if (typeid_cast<ASTIdentifier *>(ast.get()))
{
return false;
}
else if (ASTFunction * node = typeid_cast<ASTFunction *>(ast.get()))
{
if (node->name == "arrayJoin")
{
return true;
}
if (functionIsInOrGlobalInOperator(node->name))
{
return isThereArrayJoin(node->arguments->children.at(0));
}
if (node->name == "indexHint")
{
return false;
}
if (AggregateFunctionFactory::instance().isAggregateFunctionName(node->name))
{
return false;
}
for (auto & child : node->arguments->children)
{
if (isThereArrayJoin(child))
{
return true;
}
}
return false;
}
else if (typeid_cast<ASTLiteral *>(ast.get()))
{
return false;
}
else
{
for (auto & child : ast->children)
{
if (isThereArrayJoin(child))
{
return true;
}
}
return false;
}
}
void ExpressionAnalyzer::getRootActions(const ASTPtr & ast, bool no_subqueries, ExpressionActionsPtr & actions, bool only_consts)
{
@ -1124,9 +1074,4 @@ void ExpressionAnalyzer::collectUsedColumns()
}
Names ExpressionAnalyzer::getRequiredSourceColumns() const
{
return source_columns.getNames();
}
}

View File

@ -150,7 +150,7 @@ public:
/** Get a set of columns that are enough to read from the table to evaluate the expression.
* Columns added from another table by JOIN are not counted.
*/
Names getRequiredSourceColumns() const;
Names getRequiredSourceColumns() const { return source_columns.getNames(); }
/** These methods allow you to build a chain of transformations over a block, that receives values in the desired sections of the query.
*
@ -244,8 +244,6 @@ private:
void addJoinAction(ExpressionActionsPtr & actions, bool only_types) const;
bool isThereArrayJoin(const ASTPtr & ast);
/// If ast is ASTSelectQuery with JOIN, add actions for JOIN key columns.
void getActionsFromJoinKeys(const ASTTableJoin & table_join, bool no_subqueries, ExpressionActionsPtr & actions);

View File

@ -75,7 +75,7 @@ struct JoinKeyGetterString
const Sizes &) const
{
return StringRef(
&(*chars)[i == 0 ? 0 : (*offsets)[i - 1]],
&(*chars)[(*offsets)[i - 1]],
(i == 0 ? (*offsets)[i] : ((*offsets)[i] - (*offsets)[i - 1])) - 1);
}

View File

@ -86,7 +86,7 @@ struct SetMethodString
const Sizes &) const
{
return StringRef(
&(*chars)[i == 0 ? 0 : (*offsets)[i - 1]],
&(*chars)[(*offsets)[i - 1]],
(i == 0 ? (*offsets)[i] : ((*offsets)[i] - (*offsets)[i - 1])) - 1);
}
};

View File

@ -25,6 +25,7 @@ namespace DB
static constexpr double thread_sleep_seconds = 10;
static constexpr double thread_sleep_seconds_random_part = 1.0;
static constexpr double thread_sleep_seconds_if_nothing_to_do = 0.1;
/// For exponential backoff.
static constexpr double task_sleep_seconds_when_no_work_min = 10;
@ -146,7 +147,7 @@ void BackgroundProcessingPool::threadFunction()
while (!shutdown)
{
bool done_work = false;
TaskResult task_result = TaskResult::ERROR;
TaskHandle task;
try
@ -198,7 +199,7 @@ void BackgroundProcessingPool::threadFunction()
{
CurrentMetrics::Increment metric_increment{CurrentMetrics::BackgroundPoolTask};
done_work = task->function();
task_result = task->function();
}
}
catch (...)
@ -216,7 +217,7 @@ void BackgroundProcessingPool::threadFunction()
if (task->removed)
continue;
if (done_work)
if (task_result == TaskResult::SUCCESS)
task->count_no_work_done = 0;
else
++task->count_no_work_done;
@ -225,11 +226,13 @@ void BackgroundProcessingPool::threadFunction()
/// If not, add delay before next run.
Poco::Timestamp next_time_to_execute; /// current time
if (!done_work)
if (task_result == TaskResult::ERROR)
next_time_to_execute += 1000000 * (std::min(
task_sleep_seconds_when_no_work_max,
task_sleep_seconds_when_no_work_min * std::pow(task_sleep_seconds_when_no_work_multiplier, task->count_no_work_done))
+ std::uniform_real_distribution<double>(0, task_sleep_seconds_when_no_work_random_part)(rng));
else if (task_result == TaskResult::NOTHING_TO_DO)
next_time_to_execute += 1000000 * thread_sleep_seconds_if_nothing_to_do;
tasks.erase(task->iterator);
task->iterator = tasks.emplace(next_time_to_execute, task);

View File

@ -21,6 +21,12 @@ namespace DB
class BackgroundProcessingPool;
class BackgroundProcessingPoolTaskInfo;
enum class BackgroundProcessingPoolTaskResult
{
SUCCESS,
ERROR,
NOTHING_TO_DO,
};
/** Using a fixed number of threads, perform an arbitrary number of tasks in an infinite loop.
* In this case, one task can run simultaneously from different threads.
* Designed for tasks that perform continuous background work (for example, merge).
@ -31,11 +37,13 @@ class BackgroundProcessingPool
{
public:
/// Returns true, if some useful work was done. In that case, thread will not sleep before next run of this task.
using Task = std::function<bool()>;
using TaskResult = BackgroundProcessingPoolTaskResult;
using Task = std::function<TaskResult()>;
using TaskInfo = BackgroundProcessingPoolTaskInfo;
using TaskHandle = std::shared_ptr<TaskInfo>;
BackgroundProcessingPool(int size_);
size_t getNumberOfThreads() const

View File

@ -581,13 +581,13 @@ bool StorageMergeTree::tryMutatePart()
}
bool StorageMergeTree::backgroundTask()
BackgroundProcessingPoolTaskResult StorageMergeTree::backgroundTask()
{
if (shutdown_called)
return false;
return BackgroundProcessingPoolTaskResult::ERROR;
if (merger_mutator.actions_blocker.isCancelled())
return false;
return BackgroundProcessingPoolTaskResult::ERROR;
try
{
@ -601,16 +601,19 @@ bool StorageMergeTree::backgroundTask()
///TODO: read deduplicate option from table config
if (merge(false /*aggressive*/, {} /*partition_id*/, false /*final*/, false /*deduplicate*/))
return true;
return BackgroundProcessingPoolTaskResult::SUCCESS;
return tryMutatePart();
if (tryMutatePart())
return BackgroundProcessingPoolTaskResult::SUCCESS;
else
return BackgroundProcessingPoolTaskResult::ERROR;
}
catch (Exception & e)
{
if (e.code() == ErrorCodes::ABORTED)
{
LOG_INFO(log, e.message());
return false;
return BackgroundProcessingPoolTaskResult::ERROR;
}
throw;

View File

@ -137,7 +137,7 @@ private:
/// Try and find a single part to mutate and mutate it. If some part was successfully mutated, return true.
bool tryMutatePart();
bool backgroundTask();
BackgroundProcessingPoolTaskResult backgroundTask();
Int64 getCurrentMutationVersion(
const MergeTreeData::DataPartPtr & part,

View File

@ -2053,13 +2053,13 @@ void StorageReplicatedMergeTree::mutationsUpdatingTask()
}
bool StorageReplicatedMergeTree::queueTask()
BackgroundProcessingPoolTaskResult StorageReplicatedMergeTree::queueTask()
{
/// If replication queue is stopped exit immediately as we successfully executed the task
if (queue.actions_blocker.isCancelled())
{
std::this_thread::sleep_for(std::chrono::milliseconds(5));
return true;
return BackgroundProcessingPoolTaskResult::SUCCESS;
}
/// This object will mark the element of the queue as running.
@ -2077,7 +2077,7 @@ bool StorageReplicatedMergeTree::queueTask()
LogEntryPtr & entry = selected.first;
if (!entry)
return false;
return BackgroundProcessingPoolTaskResult::NOTHING_TO_DO;
time_t prev_attempt_time = entry->last_attempt_time;
@ -2125,7 +2125,7 @@ bool StorageReplicatedMergeTree::queueTask()
bool need_sleep = !res && (entry->last_attempt_time - prev_attempt_time < 10);
/// If there was no exception, you do not need to sleep.
return !need_sleep;
return need_sleep ? BackgroundProcessingPoolTaskResult::ERROR : BackgroundProcessingPoolTaskResult::SUCCESS;
}

View File

@ -427,7 +427,7 @@ private:
/** Performs actions from the queue.
*/
bool queueTask();
BackgroundProcessingPoolTaskResult queueTask();
/// Postcondition:
/// either leader_election is fully initialized (node in ZK is created and the watching thread is launched)

View File

@ -0,0 +1,50 @@
<test>
<name>cryptographic_hashes</name>
<type>once</type>
<stop_conditions>
<all_of>
<total_time_ms>10000</total_time_ms>
</all_of>
<any_of>
<average_speed_not_changing_for_ms>5000</average_speed_not_changing_for_ms>
<total_time_ms>20000</total_time_ms>
</any_of>
</stop_conditions>
<main_metric>
<max_bytes_per_second/>
</main_metric>
<substitutions>
<substitution>
<name>crypto_hash_func</name>
<values>
<value>MD5</value>
<value>SHA1</value>
<value>SHA224</value>
<value>SHA256</value>
<value>halfMD5</value>
<value>sipHash64</value>
<value>sipHash128</value>
</values>
</substitution>
<substitution>
<name>string</name>
<values>
<value>materialize('')</value>
<value>toString(1000000000+number)</value>
<value>materialize('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris sollicitudin nisi ac erat mollis dapibus. Maecenas leo purus, bibendum eu erat eget, iaculis molestie tortor. Phasellus maximus odio nec mauris ultrices dictum. Morbi efficitur nisl eget congue mollis. Vestibulum pharetra diam vitae urna interdum, eget ultricies justo sollicitudin. Nunc sit amet purus id leo tempus dignissim. Donec ac lacus ut orci tempus scelerisque quis ultricies nibh. Nullam lobortis, erat ac ullamcorper interdum, odio nisl elementum quam, ut malesuada massa nunc eget quam. Nam suscipit neque quis sapien ultricies imperdiet. Maecenas augue libero, finibus tristique sagittis et, semper nec arcu. Morbi non tortor ultrices, sollicitudin justo sed, accumsan ligula. Nullam at ipsum in nibh auctor ullamcorper. Nullam laoreet neque id lorem condimentum tincidunt. Nullam vel orci nibh. Ut sit amet sem faucibus, fringilla orci at, lacinia enim. Mauris imperdiet ex id scelerisque eleifend. Ut tincidunt massa nibh, viverra pharetra metus')</value>
</values>
</substitution>
<substitution>
<name>table</name>
<values>
<value>numbers</value>
<value>numbers_mt</value>
</values>
</substitution>
</substitutions>
<query>SELECT ignore({crypto_hash_func}({string})) FROM system.{table} LIMIT 10000000</query>
</test>

View File

@ -0,0 +1,55 @@
<test>
<name>general_purpose_hashes</name>
<type>once</type>
<stop_conditions>
<all_of>
<total_time_ms>10000</total_time_ms>
</all_of>
<any_of>
<average_speed_not_changing_for_ms>3000</average_speed_not_changing_for_ms>
<total_time_ms>20000</total_time_ms>
</any_of>
</stop_conditions>
<main_metric>
<max_bytes_per_second/>
</main_metric>
<substitutions>
<substitution>
<name>gp_hash_func</name>
<values>
<value>cityHash64</value>
<value>farmHash64</value>
<value>metroHash64</value>
<value>murmurHash2_32</value>
<value>murmurHash2_64</value>
<value>murmurHash3_32</value>
<value>murmurHash3_64</value>
<value>murmurHash3_128</value>
<value>javaHash</value>
<value>hiveHash</value>
<value>xxHash32</value>
<value>xxHash64</value>
</values>
</substitution>
<substitution>
<name>string</name>
<values>
<value>materialize('')</value>
<value>toString(1000000000+number)</value>
<value>materialize('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris sollicitudin nisi ac erat mollis dapibus. Maecenas leo purus, bibendum eu erat eget, iaculis molestie tortor. Phasellus maximus odio nec mauris ultrices dictum. Morbi efficitur nisl eget congue mollis. Vestibulum pharetra diam vitae urna interdum, eget ultricies justo sollicitudin. Nunc sit amet purus id leo tempus dignissim. Donec ac lacus ut orci tempus scelerisque quis ultricies nibh. Nullam lobortis, erat ac ullamcorper interdum, odio nisl elementum quam, ut malesuada massa nunc eget quam. Nam suscipit neque quis sapien ultricies imperdiet. Maecenas augue libero, finibus tristique sagittis et, semper nec arcu. Morbi non tortor ultrices, sollicitudin justo sed, accumsan ligula. Nullam at ipsum in nibh auctor ullamcorper. Nullam laoreet neque id lorem condimentum tincidunt. Nullam vel orci nibh. Ut sit amet sem faucibus, fringilla orci at, lacinia enim. Mauris imperdiet ex id scelerisque eleifend. Ut tincidunt massa nibh, viverra pharetra metus')</value>
</values>
</substitution>
<substitution>
<name>table</name>
<values>
<value>numbers</value>
<value>numbers_mt</value>
</values>
</substitution>
</substitutions>
<query>SELECT ignore({gp_hash_func}({string})) FROM system.{table} LIMIT 10000000</query>
</test>

View File

@ -0,0 +1,37 @@
<test>
<name>left pad test</name>
<tags>
<tag>string</tag>
</tags>
<preconditions>
<table_exists>hashfile</table_exists>
</preconditions>
<type>loop</type>
<stop_conditions>
<all_of>
<iterations>5</iterations>
<min_time_not_changing_for_ms>10000</min_time_not_changing_for_ms>
</all_of>
<any_of>
<iterations>50</iterations>
<total_time_ms>60000</total_time_ms>
</any_of>
</stop_conditions>
<main_metric>
<min_time/>
</main_metric>
<query><![CDATA[SELECT max(length(MobilePhoneModel)) FROM hashfile]]></query>
<query><![CDATA[SELECT max(length(Params)) FROM hashfile]]></query>
<query><![CDATA[SELECT max(length(Title)) FROM hashfile]]></query>
<query><![CDATA[SELECT max(length(URLDomain)) FROM hashfile]]></query>
<query><![CDATA[SELECT max(length(PageCharset)) FROM hashfile]]></query>
<query><![CDATA[SELECT max(length(Referer)) FROM hashfile]]></query>
<query><![CDATA[SELECT max(length(URL)) FROM hashfile]]></query>
<query><![CDATA[SELECT max(length(UTMSource)) FROM hashfile]]></query>
</test>

View File

@ -38,7 +38,7 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO test.unsigned_integer_test_table VALUE
${CLICKHOUSE_CLIENT} --query="INSERT INTO test.enum_test_table VALUES ('hello'), ('world'), ('world'), ('yandex'), ('clickhouse'), ('clickhouse');"
${CLICKHOUSE_CLIENT} --query="INSERT INTO test.date_test_table VALUES (1), (2), (2), (256), (257), (257);"
export CLICKHOUSE_CLIENT=`echo ${CLICKHOUSE_CLIENT} |sed 's/'"${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/debug/g'`
export CLICKHOUSE_CLIENT=`echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=debug/g'`
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.string_test_table WHERE toUInt64(val) == 0;" 2>&1 |grep -q "3 marks to read from 1 ranges" && echo "no monotonic int case: String -> UInt64"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.fixed_string_test_table WHERE toUInt64(val) == 0;" 2>&1 |grep -q "3 marks to read from 1 ranges" && echo "no monotonic int case: FixedString -> UInt64"
@ -52,7 +52,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.signed_integer_test_table
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.unsigned_integer_test_table WHERE toInt64(val) == 0;" 2>&1 |grep -q "1 marks to read from" && echo "monotonic int case: UInt32 -> Int64"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.unsigned_integer_test_table WHERE toUInt64(val) == 0;" 2>&1 |grep -q "1 marks to read from" && echo "monotonic int case: UInt32 -> UInt64"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.unsigned_integer_test_table WHERE toInt32(val) == 0;" 2>&1 |grep -q "1 marks to read from" && echo "monotonic int case: UInt32 -> Int32"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.unsigned_integer_test_table WHERE toInt32(val) == 0;" 2>&1 |grep -q "2 marks to read from" && echo "monotonic int case: UInt32 -> Int32"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.unsigned_integer_test_table WHERE toUInt32(val) == 0;" 2>&1 |grep -q "1 marks to read from" && echo "monotonic int case: UInt32 -> UInt32"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.unsigned_integer_test_table WHERE toInt16(val) == 0;" 2>&1 |grep -q "4 marks to read from" && echo "monotonic int case: UInt32 -> Int16"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.unsigned_integer_test_table WHERE toUInt16(val) == 0;" 2>&1 |grep -q "4 marks to read from" && echo "monotonic int case: UInt32 -> UInt16"
@ -68,7 +68,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.enum_test_table WHERE toU
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.date_test_table WHERE toInt32(val) == 1;" 2>&1 |grep -q "1 marks to read from" && echo "monotonic int case: Date -> Int32"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.date_test_table WHERE toUInt32(val) == 1;" 2>&1 |grep -q "1 marks to read from" && echo "monotonic int case: Date -> UInt32"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.date_test_table WHERE toInt16(val) == 1;" 2>&1 |grep -q "1 marks to read from" && echo "monotonic int case: Date -> Int16"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.date_test_table WHERE toInt16(val) == 1;" 2>&1 |grep -q "2 marks to read from" && echo "monotonic int case: Date -> Int16"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.date_test_table WHERE toUInt16(val) == 1;" 2>&1 |grep -q "1 marks to read from" && echo "monotonic int case: Date -> UInt16"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.date_test_table WHERE toInt8(val) == 1;" 2>&1 |grep -q "5 marks to read from" && echo "monotonic int case: Date -> Int8"
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test.date_test_table WHERE toUInt8(val) == 1;" 2>&1 |grep -q "5 marks to read from" && echo "monotonic int case: Date -> UInt8"

View File

@ -1 +0,0 @@
SELECT count() FROM test.mouse_clicks

View File

@ -91,7 +91,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
- `--vertical, -E` If specified, use the Vertical format by default to output the result. This is the same as '--format=Vertical'. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
- `--time, -t` If specified, print the query execution time to 'stderr' in non-interactive mode.
- `--stacktrace` If specified, also print the stack trace if an exception occurs.
- `-config-file` The name of the configuration file.
- `--config-file` The name of the configuration file.
### Configuration Files

View File

@ -24,7 +24,7 @@ Kafka SETTINGS
kafka_topic_list = 'topic1,topic2',
kafka_group_name = 'group1',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\n'
kafka_row_delimiter = '\n',
kafka_schema = '',
kafka_num_consumers = 2
```

View File

@ -207,7 +207,7 @@ SELECT nullIf(1, 2)
## assumeNotNull
Results in a value of type [Nullable](../../data_types/nullable.md)i for a non- `Nullable`, if the value is not `NULL`.
Results in a value of type [Nullable](../../data_types/nullable.md) for a non- `Nullable`, if the value is not `NULL`.
```
assumeNotNull(x)

View File

@ -23,7 +23,7 @@ Kafka SETTINGS
kafka_topic_list = 'topic1,topic2',
kafka_group_name = 'group1',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\n'
kafka_row_delimiter = '\n',
kafka_schema = '',
kafka_num_consumers = 2
```

View File

@ -47,6 +47,9 @@ class ClickHousePreprocessor(markdown.util.Processor):
for line in lines:
if '<!--hide-->' not in line:
yield line
else:
for line in lines:
yield line
class ClickHouseMarkdown(markdown.extensions.Extension):

View File

@ -23,7 +23,7 @@ Kafka SETTINGS
kafka_topic_list = 'topic1,topic2',
kafka_group_name = 'group1',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\n'
kafka_row_delimiter = '\n',
kafka_schema = '',
kafka_num_consumers = 2
```

View File

@ -5,7 +5,7 @@ else ()
option (USE_INTERNAL_MYSQL_LIBRARY "Set to FALSE to use system mysqlclient library instead of bundled" OFF)
endif ()
if (USE_INTERNAL_MYSQL_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/README")
if (USE_INTERNAL_MYSQL_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/README.md")
message (WARNING "submodule contrib/mariadb-connector-c is missing. to fix try run: \n git submodule update --init --recursive")
set (USE_INTERNAL_MYSQL_LIBRARY 0)
endif ()