mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge
This commit is contained in:
commit
3258e4a160
@ -1,97 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <DB/IO/WriteHelpers.h>
|
||||
#include <DB/IO/ReadHelpers.h>
|
||||
|
||||
#include <DB/AggregateFunctions/IUnaryAggregateFunction.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
struct AggregateFunctionAnyData
|
||||
{
|
||||
Field value;
|
||||
};
|
||||
|
||||
|
||||
/// Берёт первое попавшееся значение
|
||||
class AggregateFunctionAny final : public IUnaryAggregateFunction<AggregateFunctionAnyData, AggregateFunctionAny>
|
||||
{
|
||||
private:
|
||||
DataTypePtr type;
|
||||
|
||||
public:
|
||||
String getName() const { return "any"; }
|
||||
|
||||
DataTypePtr getReturnType() const
|
||||
{
|
||||
return type;
|
||||
}
|
||||
|
||||
void setArgument(const DataTypePtr & argument)
|
||||
{
|
||||
type = argument;
|
||||
}
|
||||
|
||||
|
||||
void addOne(AggregateDataPtr place, const IColumn & column, size_t row_num) const
|
||||
{
|
||||
Data & d = data(place);
|
||||
|
||||
if (!d.value.isNull())
|
||||
return;
|
||||
column.get(row_num, d.value);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs) const
|
||||
{
|
||||
Data & d = data(place);
|
||||
|
||||
if (d.value.isNull())
|
||||
d.value = data(rhs).value;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const
|
||||
{
|
||||
const Data & d = data(place);
|
||||
|
||||
if (unlikely(d.value.isNull()))
|
||||
{
|
||||
writeBinary(false, buf);
|
||||
}
|
||||
else
|
||||
{
|
||||
writeBinary(true, buf);
|
||||
type->serializeBinary(data(place).value, buf);
|
||||
}
|
||||
}
|
||||
|
||||
void deserializeMerge(AggregateDataPtr place, ReadBuffer & buf) const
|
||||
{
|
||||
Data & d = data(place);
|
||||
|
||||
bool is_not_null = false;
|
||||
readBinary(is_not_null, buf);
|
||||
|
||||
if (is_not_null)
|
||||
{
|
||||
Field tmp;
|
||||
type->deserializeBinary(tmp, buf);
|
||||
|
||||
if (d.value.isNull())
|
||||
d.value = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
void insertResultInto(ConstAggregateDataPtr place, IColumn & to) const
|
||||
{
|
||||
if (unlikely(data(place).value.isNull()))
|
||||
to.insertDefault();
|
||||
else
|
||||
to.insert(data(place).value);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
@ -1,83 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <DB/IO/WriteHelpers.h>
|
||||
#include <DB/IO/ReadHelpers.h>
|
||||
|
||||
#include <DB/AggregateFunctions/IUnaryAggregateFunction.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
struct AggregateFunctionAnyLastData
|
||||
{
|
||||
Field value;
|
||||
};
|
||||
|
||||
|
||||
/// Берёт последнее попавшееся значение
|
||||
class AggregateFunctionAnyLast final : public IUnaryAggregateFunction<AggregateFunctionAnyLastData, AggregateFunctionAnyLast>
|
||||
{
|
||||
private:
|
||||
DataTypePtr type;
|
||||
|
||||
public:
|
||||
String getName() const { return "anyLast"; }
|
||||
|
||||
DataTypePtr getReturnType() const
|
||||
{
|
||||
return type;
|
||||
}
|
||||
|
||||
void setArgument(const DataTypePtr & argument)
|
||||
{
|
||||
type = argument;
|
||||
}
|
||||
|
||||
|
||||
void addOne(AggregateDataPtr place, const IColumn & column, size_t row_num) const
|
||||
{
|
||||
column.get(row_num, data(place).value);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs) const
|
||||
{
|
||||
if (!data(rhs).value.isNull())
|
||||
data(place).value = data(rhs).value;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const
|
||||
{
|
||||
const Data & d = data(place);
|
||||
|
||||
if (unlikely(d.value.isNull()))
|
||||
{
|
||||
writeBinary(false, buf);
|
||||
}
|
||||
else
|
||||
{
|
||||
writeBinary(true, buf);
|
||||
type->serializeBinary(data(place).value, buf);
|
||||
}
|
||||
}
|
||||
|
||||
void deserializeMerge(AggregateDataPtr place, ReadBuffer & buf) const
|
||||
{
|
||||
bool is_not_null = false;
|
||||
readBinary(is_not_null, buf);
|
||||
|
||||
if (is_not_null)
|
||||
type->deserializeBinary(data(place).value, buf);
|
||||
}
|
||||
|
||||
void insertResultInto(ConstAggregateDataPtr place, IColumn & to) const
|
||||
{
|
||||
if (unlikely(data(place).value.isNull()))
|
||||
to.insertDefault();
|
||||
else
|
||||
to.insert(data(place).value);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
@ -1,132 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <DB/IO/WriteHelpers.h>
|
||||
#include <DB/IO/ReadHelpers.h>
|
||||
|
||||
#include <DB/AggregateFunctions/IUnaryAggregateFunction.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
struct AggregateFunctionMinTraits
|
||||
{
|
||||
static bool better(const Field & lhs, const Field & rhs) { return lhs < rhs; }
|
||||
static String name() { return "min"; }
|
||||
};
|
||||
|
||||
struct AggregateFunctionMaxTraits
|
||||
{
|
||||
static bool better(const Field & lhs, const Field & rhs) { return lhs > rhs; }
|
||||
static String name() { return "max"; }
|
||||
};
|
||||
|
||||
|
||||
struct AggregateFunctionsMinMaxData
|
||||
{
|
||||
Field value;
|
||||
};
|
||||
|
||||
|
||||
/// Берёт минимальное (или максимальное) значение. Если таких много - то первое попавшееся из них.
|
||||
template <typename Traits>
|
||||
class AggregateFunctionsMinMax final : public IUnaryAggregateFunction<AggregateFunctionsMinMaxData, AggregateFunctionsMinMax<Traits> >
|
||||
{
|
||||
private:
|
||||
typedef typename IAggregateFunctionHelper<AggregateFunctionsMinMaxData>::Data Data;
|
||||
DataTypePtr type;
|
||||
|
||||
public:
|
||||
String getName() const { return Traits::name(); }
|
||||
|
||||
DataTypePtr getReturnType() const
|
||||
{
|
||||
return type;
|
||||
}
|
||||
|
||||
void setArgument(const DataTypePtr & argument)
|
||||
{
|
||||
type = argument;
|
||||
}
|
||||
|
||||
|
||||
void addOne(AggregateDataPtr place, const IColumn & column, size_t row_num) const
|
||||
{
|
||||
Field value;
|
||||
column.get(row_num, value);
|
||||
Data & d = this->data(place);
|
||||
|
||||
if (!d.value.isNull())
|
||||
{
|
||||
if (Traits::better(value, d.value))
|
||||
d.value = value;
|
||||
}
|
||||
else
|
||||
d.value = value;
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs) const
|
||||
{
|
||||
Data & d = this->data(place);
|
||||
const Data & d_rhs = this->data(rhs);
|
||||
|
||||
if (!d.value.isNull())
|
||||
{
|
||||
if (Traits::better(d_rhs.value, d.value))
|
||||
d.value = d_rhs.value;
|
||||
}
|
||||
else
|
||||
d.value = d_rhs.value;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const
|
||||
{
|
||||
const Data & d = this->data(place);
|
||||
|
||||
if (unlikely(d.value.isNull()))
|
||||
{
|
||||
writeBinary(false, buf);
|
||||
}
|
||||
else
|
||||
{
|
||||
writeBinary(true, buf);
|
||||
type->serializeBinary(this->data(place).value, buf);
|
||||
}
|
||||
}
|
||||
|
||||
void deserializeMerge(AggregateDataPtr place, ReadBuffer & buf) const
|
||||
{
|
||||
Data & d = this->data(place);
|
||||
|
||||
bool is_not_null = false;
|
||||
readBinary(is_not_null, buf);
|
||||
|
||||
if (is_not_null)
|
||||
{
|
||||
if (!d.value.isNull())
|
||||
{
|
||||
Field value_;
|
||||
type->deserializeBinary(value_, buf);
|
||||
if (Traits::better(value_, d.value))
|
||||
d.value = value_;
|
||||
}
|
||||
else
|
||||
type->deserializeBinary(d.value, buf);
|
||||
}
|
||||
}
|
||||
|
||||
void insertResultInto(ConstAggregateDataPtr place, IColumn & to) const
|
||||
{
|
||||
if (unlikely(this->data(place).value.isNull()))
|
||||
to.insertDefault();
|
||||
else
|
||||
to.insert(this->data(place).value);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
typedef AggregateFunctionsMinMax<AggregateFunctionMinTraits> AggregateFunctionMin;
|
||||
typedef AggregateFunctionsMinMax<AggregateFunctionMaxTraits> AggregateFunctionMax;
|
||||
|
||||
}
|
480
dbms/include/DB/AggregateFunctions/AggregateFunctionsMinMaxAny.h
Normal file
480
dbms/include/DB/AggregateFunctions/AggregateFunctionsMinMaxAny.h
Normal file
@ -0,0 +1,480 @@
|
||||
#pragma once
|
||||
|
||||
#include <DB/IO/WriteHelpers.h>
|
||||
#include <DB/IO/ReadHelpers.h>
|
||||
|
||||
#include <DB/Columns/ColumnVector.h>
|
||||
#include <DB/Columns/ColumnString.h>
|
||||
|
||||
#include <DB/AggregateFunctions/IUnaryAggregateFunction.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Агрегатные функции, запоминающие одно какое-либо переданное значение.
|
||||
* Например, min, max, any, anyLast.
|
||||
*/
|
||||
|
||||
|
||||
/// Для числовых значений.
|
||||
template <typename T>
|
||||
struct SingleValueDataFixed
|
||||
{
|
||||
typedef SingleValueDataFixed<T> Self;
|
||||
|
||||
bool has_value = false; /// Надо запомнить, было ли передано хотя бы одно значение. Это нужно для AggregateFunctionIf.
|
||||
T value;
|
||||
|
||||
|
||||
bool has() const
|
||||
{
|
||||
return has_value;
|
||||
}
|
||||
|
||||
void insertResultInto(IColumn & to) const
|
||||
{
|
||||
if (has())
|
||||
static_cast<ColumnVector<T> &>(to).getData().push_back(value);
|
||||
else
|
||||
static_cast<ColumnVector<T> &>(to).insertDefault();
|
||||
}
|
||||
|
||||
void write(WriteBuffer & buf, const IDataType & data_type) const
|
||||
{
|
||||
writeBinary(has(), buf);
|
||||
if (has())
|
||||
writeBinary(value, buf);
|
||||
}
|
||||
|
||||
void read(ReadBuffer & buf, const IDataType & data_type)
|
||||
{
|
||||
readBinary(has_value, buf);
|
||||
if (has())
|
||||
readBinary(value, buf);
|
||||
}
|
||||
|
||||
|
||||
void change(const IColumn & column, size_t row_num)
|
||||
{
|
||||
has_value = true;
|
||||
value = static_cast<const ColumnVector<T> &>(column).getData()[row_num];
|
||||
}
|
||||
|
||||
void change(const Self & to)
|
||||
{
|
||||
has_value = true;
|
||||
value = to.value;
|
||||
}
|
||||
|
||||
void changeFirstTime(const IColumn & column, size_t row_num)
|
||||
{
|
||||
if (!has())
|
||||
change(column, row_num);
|
||||
}
|
||||
|
||||
void changeFirstTime(const Self & to)
|
||||
{
|
||||
if (!has())
|
||||
change(to);
|
||||
}
|
||||
|
||||
void changeIfLess(const IColumn & column, size_t row_num)
|
||||
{
|
||||
if (!has() || static_cast<const ColumnVector<T> &>(column).getData()[row_num] < value)
|
||||
change(column, row_num);
|
||||
}
|
||||
|
||||
void changeIfLess(const Self & to)
|
||||
{
|
||||
if (!has() || to.value < value)
|
||||
change(to);
|
||||
}
|
||||
|
||||
void changeIfGreater(const IColumn & column, size_t row_num)
|
||||
{
|
||||
if (!has() || static_cast<const ColumnVector<T> &>(column).getData()[row_num] > value)
|
||||
change(column, row_num);
|
||||
}
|
||||
|
||||
void changeIfGreater(const Self & to)
|
||||
{
|
||||
if (!has() || to.value > value)
|
||||
change(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/** Для строк. Короткие строки хранятся в самой структуре, а длинные выделяются отдельно.
|
||||
* NOTE Могло бы подойти также для массивов чисел.
|
||||
*/
|
||||
struct __attribute__((__packed__)) SingleValueDataString
|
||||
{
|
||||
typedef SingleValueDataString Self;
|
||||
|
||||
Int32 size = -1; /// -1 обозначает, что значения нет.
|
||||
|
||||
static constexpr Int32 AUTOMATIC_STORAGE_SIZE = 64;
|
||||
static constexpr Int32 MAX_SMALL_STRING_SIZE = AUTOMATIC_STORAGE_SIZE - sizeof(size);
|
||||
|
||||
union
|
||||
{
|
||||
char small_data[MAX_SMALL_STRING_SIZE]; /// Включая завершающий ноль.
|
||||
char * large_data;
|
||||
};
|
||||
|
||||
~SingleValueDataString()
|
||||
{
|
||||
if (size > MAX_SMALL_STRING_SIZE)
|
||||
free(large_data);
|
||||
}
|
||||
|
||||
bool has() const
|
||||
{
|
||||
return size >= 0;
|
||||
}
|
||||
|
||||
const char * getData() const
|
||||
{
|
||||
return size <= MAX_SMALL_STRING_SIZE ? small_data : large_data;
|
||||
}
|
||||
|
||||
StringRef getStringRef() const
|
||||
{
|
||||
return StringRef(getData(), size);
|
||||
}
|
||||
|
||||
void insertResultInto(IColumn & to) const
|
||||
{
|
||||
if (has())
|
||||
static_cast<ColumnString &>(to).insertDataWithTerminatingZero(getData(), size);
|
||||
else
|
||||
static_cast<ColumnString &>(to).insertDefault();
|
||||
}
|
||||
|
||||
void write(WriteBuffer & buf, const IDataType & data_type) const
|
||||
{
|
||||
writeBinary(size, buf);
|
||||
if (has())
|
||||
buf.write(getData(), size);
|
||||
}
|
||||
|
||||
void read(ReadBuffer & buf, const IDataType & data_type)
|
||||
{
|
||||
Int32 rhs_size;
|
||||
readBinary(rhs_size, buf);
|
||||
|
||||
if (rhs_size >= 0)
|
||||
{
|
||||
if (rhs_size <= MAX_SMALL_STRING_SIZE)
|
||||
{
|
||||
if (size > MAX_SMALL_STRING_SIZE)
|
||||
free(large_data);
|
||||
|
||||
size = rhs_size;
|
||||
|
||||
if (size > 0)
|
||||
buf.read(small_data, size);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (size < rhs_size)
|
||||
{
|
||||
if (size > MAX_SMALL_STRING_SIZE)
|
||||
free(large_data);
|
||||
|
||||
large_data = reinterpret_cast<char *>(malloc(rhs_size));
|
||||
}
|
||||
|
||||
size = rhs_size;
|
||||
buf.read(large_data, size);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (size > MAX_SMALL_STRING_SIZE)
|
||||
free(large_data);
|
||||
size = rhs_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void changeImpl(StringRef value)
|
||||
{
|
||||
Int32 value_size = value.size;
|
||||
|
||||
if (value_size <= MAX_SMALL_STRING_SIZE)
|
||||
{
|
||||
if (size > MAX_SMALL_STRING_SIZE)
|
||||
free(large_data);
|
||||
|
||||
size = value_size;
|
||||
|
||||
if (size > 0)
|
||||
memcpy(small_data, value.data, size);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (size < value_size)
|
||||
{
|
||||
if (size > MAX_SMALL_STRING_SIZE)
|
||||
free(large_data);
|
||||
|
||||
large_data = reinterpret_cast<char *>(malloc(value.size));
|
||||
}
|
||||
|
||||
size = value_size;
|
||||
memcpy(large_data, value.data, size);
|
||||
}
|
||||
}
|
||||
|
||||
void change(const IColumn & column, size_t row_num)
|
||||
{
|
||||
changeImpl(static_cast<const ColumnString &>(column).getDataAtWithTerminatingZero(row_num));
|
||||
}
|
||||
|
||||
void change(const Self & to)
|
||||
{
|
||||
changeImpl(to.getStringRef());
|
||||
}
|
||||
|
||||
void changeFirstTime(const IColumn & column, size_t row_num)
|
||||
{
|
||||
if (!has())
|
||||
change(column, row_num);
|
||||
}
|
||||
|
||||
void changeFirstTime(const Self & to)
|
||||
{
|
||||
if (!has())
|
||||
change(to);
|
||||
}
|
||||
|
||||
void changeIfLess(const IColumn & column, size_t row_num)
|
||||
{
|
||||
if (!has() || static_cast<const ColumnString &>(column).getDataAtWithTerminatingZero(row_num) < getStringRef())
|
||||
change(column, row_num);
|
||||
}
|
||||
|
||||
void changeIfLess(const Self & to)
|
||||
{
|
||||
if (!has() || to.getStringRef() < getStringRef())
|
||||
change(to);
|
||||
}
|
||||
|
||||
void changeIfGreater(const IColumn & column, size_t row_num)
|
||||
{
|
||||
if (!has() || static_cast<const ColumnString &>(column).getDataAtWithTerminatingZero(row_num) > getStringRef())
|
||||
change(column, row_num);
|
||||
}
|
||||
|
||||
void changeIfGreater(const Self & to)
|
||||
{
|
||||
if (!has() || to.getStringRef() > getStringRef())
|
||||
change(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/// Для любых других типов значений.
|
||||
struct SingleValueDataGeneric
|
||||
{
|
||||
typedef SingleValueDataGeneric Self;
|
||||
|
||||
Field value;
|
||||
|
||||
bool has() const
|
||||
{
|
||||
return !value.isNull();
|
||||
}
|
||||
|
||||
void insertResultInto(IColumn & to) const
|
||||
{
|
||||
if (has())
|
||||
to.insert(value);
|
||||
else
|
||||
to.insertDefault();
|
||||
}
|
||||
|
||||
void write(WriteBuffer & buf, const IDataType & data_type) const
|
||||
{
|
||||
if (!value.isNull())
|
||||
{
|
||||
writeBinary(true, buf);
|
||||
data_type.serializeBinary(value, buf);
|
||||
}
|
||||
else
|
||||
writeBinary(false, buf);
|
||||
}
|
||||
|
||||
void read(ReadBuffer & buf, const IDataType & data_type)
|
||||
{
|
||||
bool is_not_null;
|
||||
readBinary(is_not_null, buf);
|
||||
|
||||
if (is_not_null)
|
||||
data_type.deserializeBinary(value, buf);
|
||||
}
|
||||
|
||||
void change(const IColumn & column, size_t row_num)
|
||||
{
|
||||
column.get(row_num, value);
|
||||
}
|
||||
|
||||
void change(const Self & to)
|
||||
{
|
||||
value = to.value;
|
||||
}
|
||||
|
||||
void changeFirstTime(const IColumn & column, size_t row_num)
|
||||
{
|
||||
if (!has())
|
||||
change(column, row_num);
|
||||
}
|
||||
|
||||
void changeFirstTime(const Self & to)
|
||||
{
|
||||
if (!has())
|
||||
change(to);
|
||||
}
|
||||
|
||||
void changeIfLess(const IColumn & column, size_t row_num)
|
||||
{
|
||||
if (!has())
|
||||
change(column, row_num);
|
||||
else
|
||||
{
|
||||
Field new_value;
|
||||
column.get(row_num, new_value);
|
||||
if (new_value < value)
|
||||
value = new_value;
|
||||
}
|
||||
}
|
||||
|
||||
void changeIfLess(const Self & to)
|
||||
{
|
||||
if (!has() || to.value < value)
|
||||
change(to);
|
||||
}
|
||||
|
||||
void changeIfGreater(const IColumn & column, size_t row_num)
|
||||
{
|
||||
if (!has())
|
||||
change(column, row_num);
|
||||
else
|
||||
{
|
||||
Field new_value;
|
||||
column.get(row_num, new_value);
|
||||
if (new_value > value)
|
||||
value = new_value;
|
||||
}
|
||||
}
|
||||
|
||||
void changeIfGreater(const Self & to)
|
||||
{
|
||||
if (!has() || to.value > value)
|
||||
change(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/** То, чем отличаются друг от другая агрегатные функции min, max, any, anyLast
|
||||
* (условием, при котором сохранённое значение заменяется на новое,
|
||||
* а также, конечно, именем).
|
||||
*/
|
||||
|
||||
template <typename Data>
|
||||
struct AggregateFunctionMinData : Data
|
||||
{
|
||||
typedef AggregateFunctionMinData<Data> Self;
|
||||
|
||||
void changeIfBetter(const IColumn & column, size_t row_num) { this->changeIfLess(column, row_num); }
|
||||
void changeIfBetter(const Self & to) { this->changeIfLess(to); }
|
||||
|
||||
static const char * name() { return "min"; }
|
||||
};
|
||||
|
||||
template <typename Data>
|
||||
struct AggregateFunctionMaxData : Data
|
||||
{
|
||||
typedef AggregateFunctionMaxData<Data> Self;
|
||||
|
||||
void changeIfBetter(const IColumn & column, size_t row_num) { this->changeIfGreater(column, row_num); }
|
||||
void changeIfBetter(const Self & to) { this->changeIfGreater(to); }
|
||||
|
||||
static const char * name() { return "max"; }
|
||||
};
|
||||
|
||||
template <typename Data>
|
||||
struct AggregateFunctionAnyData : Data
|
||||
{
|
||||
typedef AggregateFunctionAnyData<Data> Self;
|
||||
|
||||
void changeIfBetter(const IColumn & column, size_t row_num) { this->changeFirstTime(column, row_num); }
|
||||
void changeIfBetter(const Self & to) { this->changeFirstTime(to); }
|
||||
|
||||
static const char * name() { return "any"; }
|
||||
};
|
||||
|
||||
template <typename Data>
|
||||
struct AggregateFunctionAnyLastData : Data
|
||||
{
|
||||
typedef AggregateFunctionAnyLastData<Data> Self;
|
||||
|
||||
void changeIfBetter(const IColumn & column, size_t row_num) { this->change(column, row_num); }
|
||||
void changeIfBetter(const Self & to) { this->change(to); }
|
||||
|
||||
static const char * name() { return "anyLast"; }
|
||||
};
|
||||
|
||||
|
||||
template <typename Data>
|
||||
class AggregateFunctionsSingleValue final : public IUnaryAggregateFunction<Data, AggregateFunctionsSingleValue<Data> >
|
||||
{
|
||||
private:
|
||||
DataTypePtr type;
|
||||
|
||||
public:
|
||||
String getName() const { return Data::name(); }
|
||||
|
||||
DataTypePtr getReturnType() const
|
||||
{
|
||||
return type;
|
||||
}
|
||||
|
||||
void setArgument(const DataTypePtr & argument)
|
||||
{
|
||||
type = argument;
|
||||
}
|
||||
|
||||
|
||||
void addOne(AggregateDataPtr place, const IColumn & column, size_t row_num) const
|
||||
{
|
||||
this->data(place).changeIfBetter(column, row_num);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs) const
|
||||
{
|
||||
this->data(place).changeIfBetter(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const
|
||||
{
|
||||
this->data(place).write(buf, *type.get());
|
||||
}
|
||||
|
||||
void deserializeMerge(AggregateDataPtr place, ReadBuffer & buf) const
|
||||
{
|
||||
Data rhs; /// Для строчек не очень оптимально, так как может делаться одна лишняя аллокация.
|
||||
rhs.read(buf, *type.get());
|
||||
|
||||
this->data(place).changeIfBetter(rhs);
|
||||
}
|
||||
|
||||
void insertResultInto(ConstAggregateDataPtr place, IColumn & to) const
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
@ -97,7 +97,8 @@ public:
|
||||
void sendExternalTablesData(ExternalTablesData & data);
|
||||
|
||||
/// Отправить блок данных, который уже был заранее сериализован (и, если надо, сжат), который следует прочитать из input-а.
|
||||
void sendPreparedData(ReadBuffer & input, const String & name = "");
|
||||
/// можно передать размер сериализованного/сжатого блока.
|
||||
void sendPreparedData(ReadBuffer & input, size_t size, const String & name = "");
|
||||
|
||||
/// Проверить, есть ли данные, которые можно прочитать.
|
||||
bool poll(size_t timeout_microseconds = 0);
|
||||
|
@ -161,8 +161,7 @@ public:
|
||||
|
||||
void insertDefault()
|
||||
{
|
||||
data->insertDefault();
|
||||
getOffsets().push_back(getOffsets().size() == 0 ? 1 : (getOffsets().back() + 1));
|
||||
getOffsets().push_back(getOffsets().size() == 0 ? 0 : getOffsets().back());
|
||||
}
|
||||
|
||||
ColumnPtr filter(const Filter & filt) const
|
||||
@ -387,6 +386,10 @@ private:
|
||||
throw Exception("Size of offsets doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
|
||||
ColumnPtr res = cloneEmpty();
|
||||
|
||||
if (0 == col_size)
|
||||
return res;
|
||||
|
||||
ColumnArray & res_ = typeid_cast<ColumnArray &>(*res);
|
||||
|
||||
const typename ColumnVector<T>::Container_t & cur_data = typeid_cast<const ColumnVector<T> &>(*data).getData();
|
||||
@ -431,6 +434,10 @@ private:
|
||||
throw Exception("Size of offsets doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
|
||||
ColumnPtr res = cloneEmpty();
|
||||
|
||||
if (0 == col_size)
|
||||
return res;
|
||||
|
||||
ColumnArray & res_ = typeid_cast<ColumnArray &>(*res);
|
||||
|
||||
const ColumnString & cur_string = typeid_cast<const ColumnString &>(*data);
|
||||
|
@ -32,11 +32,11 @@ class ColumnConst final : public IColumnConst
|
||||
public:
|
||||
typedef T Type;
|
||||
typedef typename NearestFieldType<T>::Type FieldType;
|
||||
|
||||
|
||||
/// Для ColumnConst<Array> data_type_ должен быть ненулевым.
|
||||
/// Для ColumnConst<String> data_type_ должен быть ненулевым, если тип данных FixedString.
|
||||
ColumnConst(size_t s_, const T & data_, DataTypePtr data_type_ = DataTypePtr()) : s(s_), data(data_), data_type(data_type_) {}
|
||||
|
||||
|
||||
std::string getName() const { return "ColumnConst<" + TypeName<T>::get() + ">"; }
|
||||
bool isNumeric() const { return IsNumber<T>::value; }
|
||||
bool isFixed() const { return IsNumber<T>::value; }
|
||||
@ -50,7 +50,7 @@ public:
|
||||
{
|
||||
return new ColumnConst<T>(length, data, data_type);
|
||||
}
|
||||
|
||||
|
||||
void insert(const Field & x)
|
||||
{
|
||||
if (x.get<FieldType>() != FieldType(data))
|
||||
@ -71,20 +71,15 @@ public:
|
||||
ErrorCodes::CANNOT_INSERT_ELEMENT_INTO_CONSTANT_COLUMN);
|
||||
++s;
|
||||
}
|
||||
|
||||
|
||||
void insertDefault() { ++s; }
|
||||
|
||||
ColumnPtr filter(const Filter & filt) const
|
||||
{
|
||||
if (s != filt.size())
|
||||
throw Exception("Size of filter doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
|
||||
size_t new_size = 0;
|
||||
for (Filter::const_iterator it = filt.begin(); it != filt.end(); ++it)
|
||||
if (*it)
|
||||
++new_size;
|
||||
|
||||
return new ColumnConst<T>(new_size, data, data_type);
|
||||
|
||||
return new ColumnConst<T>(countBytesInFilter(filt), data, data_type);
|
||||
}
|
||||
|
||||
ColumnPtr replicate(const Offsets_t & offsets) const
|
||||
@ -92,7 +87,8 @@ public:
|
||||
if (s != offsets.size())
|
||||
throw Exception("Size of offsets doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
|
||||
return new ColumnConst<T>(offsets.back(), data, data_type);
|
||||
size_t replicated_size = 0 == s ? 0 : offsets.back();
|
||||
return new ColumnConst<T>(replicated_size, data, data_type);
|
||||
}
|
||||
|
||||
size_t byteSize() const { return sizeof(data) + sizeof(s); }
|
||||
|
@ -48,12 +48,12 @@ public:
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
size_t byteSize() const
|
||||
{
|
||||
return chars.size() + sizeof(n);
|
||||
}
|
||||
|
||||
|
||||
Field operator[](size_t index) const
|
||||
{
|
||||
return String(reinterpret_cast<const char *>(&chars[n * index]), n);
|
||||
@ -75,7 +75,7 @@ public:
|
||||
|
||||
if (s.size() > n)
|
||||
throw Exception("Too large string '" + s + "' for FixedString column", ErrorCodes::TOO_LARGE_STRING_SIZE);
|
||||
|
||||
|
||||
size_t old_size = chars.size();
|
||||
chars.resize_fill(old_size + n);
|
||||
memcpy(&chars[old_size], s.data(), s.size());
|
||||
@ -222,7 +222,10 @@ public:
|
||||
|
||||
ColumnFixedString * res_ = new ColumnFixedString(n);
|
||||
ColumnPtr res = res_;
|
||||
|
||||
|
||||
if (0 == col_size)
|
||||
return res;
|
||||
|
||||
Chars_t & res_chars = res_->chars;
|
||||
res_chars.reserve(n * offsets.back());
|
||||
|
||||
|
@ -31,8 +31,8 @@ private:
|
||||
|
||||
/// Размер, включая завершающий нулевой байт.
|
||||
size_t __attribute__((__always_inline__)) sizeAt(size_t i) const { return i == 0 ? offsets[0] : (offsets[i] - offsets[i - 1]); }
|
||||
|
||||
public:
|
||||
|
||||
public:
|
||||
/** Создать пустой столбец строк */
|
||||
ColumnString() {}
|
||||
|
||||
@ -78,7 +78,7 @@ public:
|
||||
const String & s = DB::get<const String &>(x);
|
||||
size_t old_size = chars.size();
|
||||
size_t size_to_append = s.size() + 1;
|
||||
|
||||
|
||||
chars.resize(old_size + size_to_append);
|
||||
memcpy(&chars[old_size], s.c_str(), size_to_append);
|
||||
offsets.push_back((offsets.size() == 0 ? 0 : offsets.back()) + size_to_append);
|
||||
@ -90,7 +90,7 @@ public:
|
||||
size_t old_size = chars.size();
|
||||
size_t size_to_append = src.sizeAt(n);
|
||||
size_t offset = src.offsetAt(n);
|
||||
|
||||
|
||||
chars.resize(old_size + size_to_append);
|
||||
memcpy(&chars[old_size], &src.chars[offset], size_to_append);
|
||||
offsets.push_back((offsets.size() == 0 ? 0 : offsets.back()) + size_to_append);
|
||||
@ -132,7 +132,7 @@ public:
|
||||
|
||||
res_->chars.resize(nested_length);
|
||||
memcpy(&res_->chars[0], &chars[nested_offset], nested_length);
|
||||
|
||||
|
||||
Offsets_t & res_offsets = res_->offsets;
|
||||
|
||||
if (start == 0)
|
||||
@ -173,7 +173,7 @@ public:
|
||||
{
|
||||
if (!filt[i])
|
||||
continue;
|
||||
|
||||
|
||||
size_t string_offset = i == 0 ? 0 : offsets[i - 1];
|
||||
size_t string_size = offsets[i] - string_offset;
|
||||
|
||||
@ -248,12 +248,12 @@ public:
|
||||
reinterpret_cast<const char *>(&chars[offsetAt(n)]),
|
||||
reinterpret_cast<const char *>(&rhs.chars[rhs.offsetAt(m)]));
|
||||
}
|
||||
|
||||
|
||||
/// Версия compareAt для locale-sensitive сравнения строк
|
||||
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, const Collator & collator) const
|
||||
{
|
||||
const ColumnString & rhs = static_cast<const ColumnString &>(rhs_);
|
||||
|
||||
|
||||
return collator.compare(
|
||||
reinterpret_cast<const char *>(&chars[offsetAt(n)]), sizeAt(n),
|
||||
reinterpret_cast<const char *>(&rhs.chars[rhs.offsetAt(m)]), rhs.sizeAt(m));
|
||||
@ -305,9 +305,9 @@ public:
|
||||
{
|
||||
const ColumnString & parent;
|
||||
const Collator & collator;
|
||||
|
||||
|
||||
lessWithCollation(const ColumnString & parent_, const Collator & collator_) : parent(parent_), collator(collator_) {}
|
||||
|
||||
|
||||
bool operator()(size_t lhs, size_t rhs) const
|
||||
{
|
||||
int res = collator.compare(
|
||||
@ -354,6 +354,9 @@ public:
|
||||
ColumnString * res_ = new ColumnString;
|
||||
ColumnPtr res = res_;
|
||||
|
||||
if (0 == col_size)
|
||||
return res;
|
||||
|
||||
Chars_t & res_chars = res_->chars;
|
||||
Offsets_t & res_offsets = res_->offsets;
|
||||
res_chars.reserve(chars.size() / col_size * replicate_offsets.back());
|
||||
|
@ -222,11 +222,51 @@ public:
|
||||
typename Self::Container_t & res_data = res_->getData();
|
||||
res_data.reserve(size);
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
if (filt[i])
|
||||
res_data.push_back(data[i]);
|
||||
/** Чуть более оптимизированная версия.
|
||||
* Исходит из допущения, что часто куски последовательно идущих значений
|
||||
* полностью проходят или полностью не проходят фильтр.
|
||||
* Поэтому, будем оптимистично проверять куски по 16 значений.
|
||||
*/
|
||||
const UInt8 * filt_pos = &filt[0];
|
||||
const UInt8 * filt_end = filt_pos + size;
|
||||
const UInt8 * filt_end_sse = filt_pos + size / 16 * 16;
|
||||
const T * data_pos = &data[0];
|
||||
|
||||
return res;
|
||||
const __m128i zero16 = _mm_set1_epi8(0);
|
||||
|
||||
while (filt_pos < filt_end_sse)
|
||||
{
|
||||
int mask = _mm_movemask_epi8(_mm_cmpgt_epi8(_mm_loadu_si128(reinterpret_cast<const __m128i *>(filt_pos)), zero16));
|
||||
|
||||
if (0 == mask)
|
||||
{
|
||||
/// Ничего не вставляем.
|
||||
}
|
||||
else if (0xFFFF == mask)
|
||||
{
|
||||
res_data.insert_assume_reserved(data_pos, data_pos + 16);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < 16; ++i)
|
||||
if (filt_pos[i])
|
||||
res_data.push_back(data_pos[i]);
|
||||
}
|
||||
|
||||
filt_pos += 16;
|
||||
data_pos += 16;
|
||||
}
|
||||
|
||||
while (filt_pos < filt_end)
|
||||
{
|
||||
if (*filt_pos)
|
||||
res_data.push_back(*data_pos);
|
||||
|
||||
++filt_pos;
|
||||
++data_pos;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
ColumnPtr permute(const IColumn::Permutation & perm, size_t limit) const
|
||||
@ -256,6 +296,9 @@ public:
|
||||
if (size != offsets.size())
|
||||
throw Exception("Size of offsets doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
|
||||
if (0 == size)
|
||||
return new Self;
|
||||
|
||||
Self * res_ = new Self;
|
||||
ColumnPtr res = res_;
|
||||
typename Self::Container_t & res_data = res_->getData();
|
||||
|
@ -198,4 +198,8 @@ public:
|
||||
};
|
||||
|
||||
|
||||
/// Считает, сколько байт в filt больше нуля.
|
||||
size_t countBytesInFilter(const IColumn::Filter & filt);
|
||||
|
||||
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
/** Базовый класс для столбцов-констант, содержащих значение, не входящее в Field.
|
||||
* Не является полноценым столбцом и используется особым образом.
|
||||
*/
|
||||
@ -14,16 +14,16 @@ class IColumnDummy : public IColumn
|
||||
{
|
||||
public:
|
||||
IColumnDummy(size_t s_) : s(s_) {}
|
||||
|
||||
|
||||
virtual ColumnPtr cloneDummy(size_t s_) const = 0;
|
||||
|
||||
|
||||
ColumnPtr cloneResized(size_t s_) const { return cloneDummy(s_); }
|
||||
bool isConst() const { return true; }
|
||||
size_t size() const { return s; }
|
||||
void insertDefault() { ++s; }
|
||||
size_t byteSize() const { return 0; }
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const { return 0; }
|
||||
|
||||
|
||||
Field operator[](size_t n) const { throw Exception("Cannot get value from " + getName(), ErrorCodes::NOT_IMPLEMENTED); }
|
||||
void get(size_t n, Field & res) const { throw Exception("Cannot get value from " + getName(), ErrorCodes::NOT_IMPLEMENTED); };
|
||||
void insert(const Field & x) { throw Exception("Cannot insert element into " + getName(), ErrorCodes::NOT_IMPLEMENTED); }
|
||||
@ -39,42 +39,42 @@ public:
|
||||
{
|
||||
return cloneDummy(length);
|
||||
}
|
||||
|
||||
|
||||
ColumnPtr filter(const Filter & filt) const
|
||||
{
|
||||
size_t new_size = 0;
|
||||
for (Filter::const_iterator it = filt.begin(); it != filt.end(); ++it)
|
||||
if (*it)
|
||||
++new_size;
|
||||
|
||||
|
||||
return cloneDummy(new_size);
|
||||
}
|
||||
|
||||
|
||||
ColumnPtr permute(const Permutation & perm, size_t limit) const
|
||||
{
|
||||
if (s != perm.size())
|
||||
throw Exception("Size of permutation doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
|
||||
|
||||
return cloneDummy(limit ? std::min(s, limit) : s);
|
||||
}
|
||||
|
||||
|
||||
void getPermutation(bool reverse, size_t limit, Permutation & res) const
|
||||
{
|
||||
res.resize(s);
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
res[i] = i;
|
||||
}
|
||||
|
||||
|
||||
ColumnPtr replicate(const Offsets_t & offsets) const
|
||||
{
|
||||
if (s != offsets.size())
|
||||
throw Exception("Size of offsets doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
|
||||
return cloneDummy(offsets.back());
|
||||
|
||||
return cloneDummy(s == 0 ? 0 : offsets.back());
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
size_t s;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
27
dbms/include/DB/Common/Macros.h
Normal file
27
dbms/include/DB/Common/Macros.h
Normal file
@ -0,0 +1,27 @@
|
||||
#pragma once
|
||||
|
||||
#include <DB/Core/Types.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <map>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Раскрывает в строке макросы из конфига.
|
||||
*/
|
||||
class Macros
|
||||
{
|
||||
public:
|
||||
Macros();
|
||||
Macros(const Poco::Util::AbstractConfiguration & config, const String & key);
|
||||
|
||||
/// Заменить в строке подстроки вида {macro_name} на значение для macro_name, полученное из конфига.
|
||||
String expand(const String & s) const;
|
||||
|
||||
private:
|
||||
typedef std::map<String, String> MacroMap;
|
||||
|
||||
MacroMap macros;
|
||||
};
|
||||
|
||||
}
|
@ -248,6 +248,12 @@ public:
|
||||
if (required_capacity > capacity())
|
||||
reserve(round_up_to_power_of_two(required_capacity));
|
||||
|
||||
insert_assume_reserved(from_begin, from_end);
|
||||
}
|
||||
|
||||
template <typename It1, typename It2>
|
||||
void insert_assume_reserved(It1 from_begin, It2 from_end)
|
||||
{
|
||||
size_t bytes_to_copy = byte_size(from_end - from_begin);
|
||||
memcpy(c_end, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
|
||||
c_end += bytes_to_copy;
|
||||
|
@ -64,3 +64,5 @@
|
||||
#define DBMS_MIN_REVISION_WITH_TOTALS_EXTREMES 35265
|
||||
#define DBMS_MIN_REVISION_WITH_STRING_QUERY_ID 39002
|
||||
#define DBMS_MIN_REVISION_WITH_TEMPORARY_TABLES 50264
|
||||
|
||||
#define DBMS_DISTRIBUTED_DIRECTORY_MONITOR_SLEEP_TIME_MS 100
|
||||
|
@ -255,6 +255,10 @@ namespace ErrorCodes
|
||||
INVALID_NESTED_NAME,
|
||||
CORRUPTED_DATA,
|
||||
INCORRECT_MARK,
|
||||
INVALID_PARTITION_NAME,
|
||||
NOT_LEADER,
|
||||
NOT_ENOUGH_BLOCK_NUMBERS,
|
||||
NO_SUCH_REPLICA,
|
||||
|
||||
POCO_EXCEPTION = 1000,
|
||||
STD_EXCEPTION,
|
||||
|
@ -582,11 +582,39 @@ private:
|
||||
writeQuoted(x, wb);
|
||||
return res;
|
||||
}
|
||||
|
||||
/** В отличие от writeFloatText (и writeQuoted), если число после форматирования выглядит целым, всё равно добавляет десятичную точку.
|
||||
* - для того, чтобы это число могло обратно распарситься как Float64 парсером запроса (иначе распарсится как целое).
|
||||
*
|
||||
* При этом, не оставляет завершающие нули справа.
|
||||
*
|
||||
* NOTE: При таком roundtrip-е, точность может теряться.
|
||||
*/
|
||||
static inline String formatFloat(Float64 x)
|
||||
{
|
||||
char tmp[24];
|
||||
int res = std::snprintf(tmp, 23, "%.*g", WRITE_HELPERS_DEFAULT_FLOAT_PRECISION, x);
|
||||
|
||||
if (res >= 23 || res <= 0)
|
||||
throw Exception("Cannot print float or double number", ErrorCodes::CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER);
|
||||
|
||||
size_t string_size = res;
|
||||
|
||||
tmp[23] = '\0';
|
||||
if (string_size == strspn(tmp, "-0123456789"))
|
||||
{
|
||||
tmp[string_size] = '.';
|
||||
++string_size;
|
||||
}
|
||||
|
||||
return {tmp, string_size};
|
||||
}
|
||||
|
||||
public:
|
||||
String operator() (const Null & x) const { return "NULL"; }
|
||||
String operator() (const UInt64 & x) const { return formatQuoted(x); }
|
||||
String operator() (const Int64 & x) const { return formatQuoted(x); }
|
||||
String operator() (const Float64 & x) const { return formatQuoted(x); }
|
||||
String operator() (const Float64 & x) const { return formatFloat(x); }
|
||||
String operator() (const String & x) const { return formatQuoted(x); }
|
||||
|
||||
String operator() (const Array & x) const
|
||||
@ -689,7 +717,7 @@ namespace DB
|
||||
{
|
||||
class ReadBuffer;
|
||||
class WriteBuffer;
|
||||
|
||||
|
||||
/// Предполагается что у всех элементов массива одинаковый тип.
|
||||
inline void readBinary(Array & x, ReadBuffer & buf)
|
||||
{
|
||||
@ -745,7 +773,7 @@ namespace DB
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline void readText(Array & x, ReadBuffer & buf) { throw Exception("Cannot read Array.", ErrorCodes::NOT_IMPLEMENTED); }
|
||||
inline void readQuoted(Array & x, ReadBuffer & buf) { throw Exception("Cannot read Array.", ErrorCodes::NOT_IMPLEMENTED); }
|
||||
|
||||
@ -758,7 +786,7 @@ namespace DB
|
||||
type = x.front().getType();
|
||||
DB::writeBinary(type, buf);
|
||||
DB::writeBinary(size, buf);
|
||||
|
||||
|
||||
for (Array::const_iterator it = x.begin(); it != x.end(); ++it)
|
||||
{
|
||||
switch (type)
|
||||
@ -792,13 +820,13 @@ namespace DB
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline void writeText(const Array & x, WriteBuffer & buf)
|
||||
{
|
||||
DB::String res = apply_visitor(DB::FieldVisitorToString(), DB::Field(x));
|
||||
buf.write(res.data(), res.size());
|
||||
}
|
||||
|
||||
|
||||
inline void writeQuoted(const Array & x, WriteBuffer & buf) { throw Exception("Cannot write Array quoted.", ErrorCodes::NOT_IMPLEMENTED); }
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ struct StringRef
|
||||
|
||||
typedef std::vector<StringRef> StringRefs;
|
||||
|
||||
inline bool operator==(StringRef lhs, StringRef rhs)
|
||||
inline bool operator== (StringRef lhs, StringRef rhs)
|
||||
{
|
||||
/// Так почему-то быстрее, чем return lhs.size == rhs.size && 0 == memcmp(lhs.data, rhs.data, lhs.size);
|
||||
|
||||
@ -40,18 +40,21 @@ inline bool operator==(StringRef lhs, StringRef rhs)
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool operator!=(StringRef lhs, StringRef rhs)
|
||||
inline bool operator!= (StringRef lhs, StringRef rhs)
|
||||
{
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
inline bool operator<(StringRef lhs, StringRef rhs)
|
||||
inline bool operator< (StringRef lhs, StringRef rhs)
|
||||
{
|
||||
int cmp = memcmp(lhs.data, rhs.data, std::min(lhs.size, rhs.size));
|
||||
if (cmp == 0)
|
||||
return lhs.size < rhs.size;
|
||||
else
|
||||
return cmp < 0;
|
||||
return cmp < 0 || (cmp == 0 && lhs.size < rhs.size);
|
||||
}
|
||||
|
||||
inline bool operator> (StringRef lhs, StringRef rhs)
|
||||
{
|
||||
int cmp = memcmp(lhs.data, rhs.data, std::min(lhs.size, rhs.size));
|
||||
return cmp > 0 || (cmp == 0 && lhs.size > rhs.size);
|
||||
}
|
||||
|
||||
|
||||
|
@ -33,6 +33,8 @@ public:
|
||||
output->write(res);
|
||||
}
|
||||
|
||||
void flush() { output->flush(); }
|
||||
|
||||
private:
|
||||
BlockOutputStreamPtr output;
|
||||
NamesAndTypesListPtr required_columns;
|
||||
|
@ -20,6 +20,8 @@ public:
|
||||
void writeField(const Field & field);
|
||||
void writeRowEndDelimiter();
|
||||
|
||||
void flush() { ostr.next(); }
|
||||
|
||||
protected:
|
||||
WriteBuffer & ostr;
|
||||
const Block sample;
|
||||
|
@ -17,6 +17,8 @@ public:
|
||||
void write(const Block & block);
|
||||
void writePrefix() { row_output->writePrefix(); }
|
||||
void writeSuffix() { row_output->writeSuffix(); }
|
||||
|
||||
void flush() { row_output->flush(); }
|
||||
|
||||
void setRowsBeforeLimit(size_t rows_before_limit);
|
||||
void setTotals(const Block & totals);
|
||||
|
@ -31,6 +31,10 @@ public:
|
||||
*/
|
||||
virtual void writePrefix() {}
|
||||
virtual void writeSuffix() {}
|
||||
|
||||
/** Сбросить имеющиеся буферы для записи.
|
||||
*/
|
||||
virtual void flush() {}
|
||||
|
||||
/** Методы для установки дополнительной информации для вывода в поддерживающих её форматах.
|
||||
*/
|
||||
|
@ -32,6 +32,9 @@ public:
|
||||
virtual void writePrefix() {}; /// разделитель перед началом результата
|
||||
virtual void writeSuffix() {}; /// разделитель после конца результата
|
||||
|
||||
/** Сбросить имеющиеся буферы для записи. */
|
||||
virtual void flush() {}
|
||||
|
||||
/** Методы для установки дополнительной информации для вывода в поддерживающих её форматах.
|
||||
*/
|
||||
virtual void setRowsBeforeLimit(size_t rows_before_limit) {}
|
||||
|
@ -25,6 +25,8 @@ public:
|
||||
void writeRowEndDelimiter();
|
||||
void writePrefix();
|
||||
void writeSuffix();
|
||||
|
||||
void flush() { ostr.next(); dst_ostr.next(); }
|
||||
|
||||
void setRowsBeforeLimit(size_t rows_before_limit_)
|
||||
{
|
||||
@ -41,7 +43,8 @@ protected:
|
||||
virtual void writeTotals();
|
||||
virtual void writeExtremes();
|
||||
|
||||
WriteBufferValidUTF8 ostr;
|
||||
WriteBuffer & dst_ostr;
|
||||
WriteBufferValidUTF8 ostr; /// Валидирует и пишет в dst_ostr.
|
||||
size_t field_number;
|
||||
size_t row_count;
|
||||
bool applied_limit;
|
||||
|
@ -15,6 +15,8 @@ public:
|
||||
NativeBlockOutputStream(WriteBuffer & ostr_) : ostr(ostr_) {}
|
||||
void write(const Block & block);
|
||||
|
||||
void flush() { ostr.next(); }
|
||||
|
||||
private:
|
||||
WriteBuffer & ostr;
|
||||
};
|
||||
|
@ -18,6 +18,8 @@ public:
|
||||
void write(const Block & block);
|
||||
void writeSuffix();
|
||||
|
||||
void flush() { ostr.next(); }
|
||||
|
||||
void setTotals(const Block & totals_) { totals = totals_; }
|
||||
void setExtremes(const Block & extremes_) { extremes = extremes_; }
|
||||
|
||||
|
@ -18,8 +18,8 @@ namespace DB
|
||||
class PushingToViewsBlockOutputStream : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
PushingToViewsBlockOutputStream(String database_, String table_, const Context &context_, ASTPtr query_ptr_)
|
||||
:database(database_), table(table_), context(context_), query_ptr(query_ptr_)
|
||||
PushingToViewsBlockOutputStream(String database_, String table_, const Context & context_, ASTPtr query_ptr_)
|
||||
: database(database_), table(table_), context(context_), query_ptr(query_ptr_)
|
||||
{
|
||||
if (database.empty())
|
||||
database = context.getCurrentDatabase();
|
||||
|
@ -14,8 +14,8 @@ namespace DB
|
||||
class RemoteBlockOutputStream : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
RemoteBlockOutputStream(Connection & connection_, const String & query_)
|
||||
: connection(connection_), query(query_)
|
||||
RemoteBlockOutputStream(Connection & connection_, const String & query_, Settings * settings_ = nullptr)
|
||||
: connection(connection_), query(query_), settings(settings_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -26,7 +26,7 @@ public:
|
||||
*/
|
||||
Block sendQueryAndGetSampleBlock()
|
||||
{
|
||||
connection.sendQuery(query);
|
||||
connection.sendQuery(query, "", QueryProcessingStage::Complete, settings);
|
||||
sent_query = true;
|
||||
|
||||
Connection::Packet packet = connection.receivePacket();
|
||||
@ -64,12 +64,12 @@ public:
|
||||
|
||||
|
||||
/// Отправить блок данных, который уже был заранее сериализован (и, если надо, сжат), который следует прочитать из input-а.
|
||||
void writePrepared(ReadBuffer & input)
|
||||
void writePrepared(ReadBuffer & input, size_t size = 0)
|
||||
{
|
||||
if (!sent_query)
|
||||
sendQueryAndGetSampleBlock(); /// Никак не можем использовать sample_block.
|
||||
|
||||
connection.sendPreparedData(input);
|
||||
connection.sendPreparedData(input, size);
|
||||
}
|
||||
|
||||
|
||||
@ -95,6 +95,7 @@ public:
|
||||
private:
|
||||
Connection & connection;
|
||||
String query;
|
||||
Settings * settings;
|
||||
Block sample_block;
|
||||
|
||||
bool sent_query = false;
|
||||
|
@ -16,6 +16,8 @@ public:
|
||||
TabSeparatedBlockOutputStream(WriteBuffer & ostr_) : ostr(ostr_) {}
|
||||
void write(const Block & block);
|
||||
|
||||
void flush() { ostr.next(); }
|
||||
|
||||
private:
|
||||
WriteBuffer & ostr;
|
||||
};
|
||||
|
@ -26,6 +26,8 @@ public:
|
||||
void writePrefix();
|
||||
void writeSuffix();
|
||||
|
||||
void flush() { ostr.next(); }
|
||||
|
||||
void setTotals(const Block & totals_) { totals = totals_; }
|
||||
void setExtremes(const Block & extremes_) { extremes = extremes_; }
|
||||
|
||||
|
@ -26,6 +26,8 @@ public:
|
||||
void writeRowEndDelimiter();
|
||||
void writeRowBetweenDelimiter();
|
||||
|
||||
void flush() { ostr.next(); }
|
||||
|
||||
private:
|
||||
WriteBuffer & ostr;
|
||||
const Block sample;
|
||||
|
@ -25,6 +25,8 @@ public:
|
||||
void writeRowStartDelimiter();
|
||||
void writeRowBetweenDelimiter();
|
||||
|
||||
void flush() { ostr.next(); }
|
||||
|
||||
private:
|
||||
WriteBuffer & ostr;
|
||||
const Block sample;
|
||||
|
@ -14,7 +14,7 @@ namespace DB
|
||||
*/
|
||||
|
||||
template<typename A, typename B, typename Op>
|
||||
struct BinaryOperationImpl
|
||||
struct BinaryOperationImplBase
|
||||
{
|
||||
typedef typename Op::ResultType ResultType;
|
||||
|
||||
@ -45,6 +45,11 @@ struct BinaryOperationImpl
|
||||
}
|
||||
};
|
||||
|
||||
template<typename A, typename B, typename Op>
|
||||
struct BinaryOperationImpl : BinaryOperationImplBase<A, B, Op>
|
||||
{
|
||||
};
|
||||
|
||||
template<typename A, typename Op>
|
||||
struct UnaryOperationImpl
|
||||
{
|
||||
@ -221,7 +226,7 @@ struct NegateImpl
|
||||
|
||||
static inline ResultType apply(A a)
|
||||
{
|
||||
return -a;
|
||||
return -static_cast<ResultType>(a);
|
||||
}
|
||||
};
|
||||
|
||||
@ -570,4 +575,140 @@ typedef FunctionBinaryArithmetic<BitShiftRightImpl, NameBitShiftRight> Functi
|
||||
|
||||
|
||||
|
||||
/// Оптимизации для целочисленного деления на константу.
|
||||
|
||||
#define LIBDIVIDE_USE_SSE2 1
|
||||
#include <libdivide.h>
|
||||
|
||||
|
||||
template <typename A, typename B>
|
||||
struct DivideIntegralByConstantImpl
|
||||
: BinaryOperationImplBase<A, B, DivideIntegralImpl<A, B>>
|
||||
{
|
||||
typedef typename DivideIntegralImpl<A, B>::ResultType ResultType;
|
||||
|
||||
static void vector_constant(const PODArray<A> & a, B b, PODArray<ResultType> & c)
|
||||
{
|
||||
if (unlikely(b == 0))
|
||||
throw Exception("Division by zero", ErrorCodes::ILLEGAL_DIVISION);
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wsign-compare"
|
||||
|
||||
if (unlikely(std::is_signed<B>::value && b == -1))
|
||||
{
|
||||
size_t size = a.size();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
c[i] = -c[i];
|
||||
return;
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
libdivide::divider<A> divider(b);
|
||||
|
||||
size_t size = a.size();
|
||||
const A * a_pos = &a[0];
|
||||
const A * a_end = a_pos + size;
|
||||
ResultType * c_pos = &c[0];
|
||||
static constexpr size_t values_per_sse_register = 16 / sizeof(A);
|
||||
const A * a_end_sse = a_pos + size / values_per_sse_register * values_per_sse_register;
|
||||
|
||||
while (a_pos < a_end_sse)
|
||||
{
|
||||
_mm_storeu_si128(reinterpret_cast<__m128i *>(c_pos),
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(a_pos)) / divider);
|
||||
|
||||
a_pos += values_per_sse_register;
|
||||
c_pos += values_per_sse_register;
|
||||
}
|
||||
|
||||
while (a_pos < a_end)
|
||||
{
|
||||
*c_pos = *a_pos / divider;
|
||||
++a_pos;
|
||||
++c_pos;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename A, typename B>
|
||||
struct ModuloByConstantImpl
|
||||
: BinaryOperationImplBase<A, B, ModuloImpl<A, B>>
|
||||
{
|
||||
typedef typename ModuloImpl<A, B>::ResultType ResultType;
|
||||
|
||||
static void vector_constant(const PODArray<A> & a, B b, PODArray<ResultType> & c)
|
||||
{
|
||||
if (unlikely(b == 0))
|
||||
throw Exception("Division by zero", ErrorCodes::ILLEGAL_DIVISION);
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wsign-compare"
|
||||
|
||||
if (unlikely((std::is_signed<B>::value && b == -1) || b == 1))
|
||||
{
|
||||
size_t size = a.size();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
c[i] = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
libdivide::divider<A> divider(b);
|
||||
|
||||
/// Тут не удалось сделать так, чтобы SSE вариант из libdivide давал преимущество.
|
||||
size_t size = a.size();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
c[i] = a[i] - (a[i] / divider) * b; /// NOTE: возможно, не сохраняется семантика деления с остатком отрицательных чисел.
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/** Прописаны специализации для деления чисел типа UInt64 и UInt32 на числа той же знаковости.
|
||||
* Можно дополнить до всех возможных комбинаций, но потребуется больше кода.
|
||||
*/
|
||||
|
||||
template <> struct BinaryOperationImpl<UInt64, UInt8, DivideIntegralImpl<UInt64, UInt8>> : DivideIntegralByConstantImpl<UInt64, UInt8> {};
|
||||
template <> struct BinaryOperationImpl<UInt64, UInt16, DivideIntegralImpl<UInt64, UInt16>> : DivideIntegralByConstantImpl<UInt64, UInt16> {};
|
||||
template <> struct BinaryOperationImpl<UInt64, UInt32, DivideIntegralImpl<UInt64, UInt32>> : DivideIntegralByConstantImpl<UInt64, UInt32> {};
|
||||
template <> struct BinaryOperationImpl<UInt64, UInt64, DivideIntegralImpl<UInt64, UInt64>> : DivideIntegralByConstantImpl<UInt64, UInt64> {};
|
||||
|
||||
template <> struct BinaryOperationImpl<UInt32, UInt8, DivideIntegralImpl<UInt32, UInt8>> : DivideIntegralByConstantImpl<UInt32, UInt8> {};
|
||||
template <> struct BinaryOperationImpl<UInt32, UInt16, DivideIntegralImpl<UInt32, UInt16>> : DivideIntegralByConstantImpl<UInt32, UInt16> {};
|
||||
template <> struct BinaryOperationImpl<UInt32, UInt32, DivideIntegralImpl<UInt32, UInt32>> : DivideIntegralByConstantImpl<UInt32, UInt32> {};
|
||||
template <> struct BinaryOperationImpl<UInt32, UInt64, DivideIntegralImpl<UInt32, UInt64>> : DivideIntegralByConstantImpl<UInt32, UInt64> {};
|
||||
|
||||
template <> struct BinaryOperationImpl<Int64, Int8, DivideIntegralImpl<Int64, Int8>> : DivideIntegralByConstantImpl<Int64, Int8> {};
|
||||
template <> struct BinaryOperationImpl<Int64, Int16, DivideIntegralImpl<Int64, Int16>> : DivideIntegralByConstantImpl<Int64, Int16> {};
|
||||
template <> struct BinaryOperationImpl<Int64, Int32, DivideIntegralImpl<Int64, Int32>> : DivideIntegralByConstantImpl<Int64, Int32> {};
|
||||
template <> struct BinaryOperationImpl<Int64, Int64, DivideIntegralImpl<Int64, Int64>> : DivideIntegralByConstantImpl<Int64, Int64> {};
|
||||
|
||||
template <> struct BinaryOperationImpl<Int32, Int8, DivideIntegralImpl<Int32, Int8>> : DivideIntegralByConstantImpl<Int32, Int8> {};
|
||||
template <> struct BinaryOperationImpl<Int32, Int16, DivideIntegralImpl<Int32, Int16>> : DivideIntegralByConstantImpl<Int32, Int16> {};
|
||||
template <> struct BinaryOperationImpl<Int32, Int32, DivideIntegralImpl<Int32, Int32>> : DivideIntegralByConstantImpl<Int32, Int32> {};
|
||||
template <> struct BinaryOperationImpl<Int32, Int64, DivideIntegralImpl<Int32, Int64>> : DivideIntegralByConstantImpl<Int32, Int64> {};
|
||||
|
||||
|
||||
template <> struct BinaryOperationImpl<UInt64, UInt8, ModuloImpl<UInt64, UInt8>> : ModuloByConstantImpl<UInt64, UInt8> {};
|
||||
template <> struct BinaryOperationImpl<UInt64, UInt16, ModuloImpl<UInt64, UInt16>> : ModuloByConstantImpl<UInt64, UInt16> {};
|
||||
template <> struct BinaryOperationImpl<UInt64, UInt32, ModuloImpl<UInt64, UInt32>> : ModuloByConstantImpl<UInt64, UInt32> {};
|
||||
template <> struct BinaryOperationImpl<UInt64, UInt64, ModuloImpl<UInt64, UInt64>> : ModuloByConstantImpl<UInt64, UInt64> {};
|
||||
|
||||
template <> struct BinaryOperationImpl<UInt32, UInt8, ModuloImpl<UInt32, UInt8>> : ModuloByConstantImpl<UInt32, UInt8> {};
|
||||
template <> struct BinaryOperationImpl<UInt32, UInt16, ModuloImpl<UInt32, UInt16>> : ModuloByConstantImpl<UInt32, UInt16> {};
|
||||
template <> struct BinaryOperationImpl<UInt32, UInt32, ModuloImpl<UInt32, UInt32>> : ModuloByConstantImpl<UInt32, UInt32> {};
|
||||
template <> struct BinaryOperationImpl<UInt32, UInt64, ModuloImpl<UInt32, UInt64>> : ModuloByConstantImpl<UInt32, UInt64> {};
|
||||
|
||||
template <> struct BinaryOperationImpl<Int64, Int8, ModuloImpl<Int64, Int8>> : ModuloByConstantImpl<Int64, Int8> {};
|
||||
template <> struct BinaryOperationImpl<Int64, Int16, ModuloImpl<Int64, Int16>> : ModuloByConstantImpl<Int64, Int16> {};
|
||||
template <> struct BinaryOperationImpl<Int64, Int32, ModuloImpl<Int64, Int32>> : ModuloByConstantImpl<Int64, Int32> {};
|
||||
template <> struct BinaryOperationImpl<Int64, Int64, ModuloImpl<Int64, Int64>> : ModuloByConstantImpl<Int64, Int64> {};
|
||||
|
||||
template <> struct BinaryOperationImpl<Int32, Int8, ModuloImpl<Int32, Int8>> : ModuloByConstantImpl<Int32, Int8> {};
|
||||
template <> struct BinaryOperationImpl<Int32, Int16, ModuloImpl<Int32, Int16>> : ModuloByConstantImpl<Int32, Int16> {};
|
||||
template <> struct BinaryOperationImpl<Int32, Int32, ModuloImpl<Int32, Int32>> : ModuloByConstantImpl<Int32, Int32> {};
|
||||
template <> struct BinaryOperationImpl<Int32, Int64, ModuloImpl<Int32, Int64>> : ModuloByConstantImpl<Int32, Int64> {};
|
||||
|
||||
}
|
||||
|
@ -375,7 +375,7 @@ public:
|
||||
prev_offset = new_offset;
|
||||
}
|
||||
|
||||
if (out_offsets.back() != out_vec.size())
|
||||
if (!out_offsets.empty() && out_offsets.back() != out_vec.size())
|
||||
throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
return true;
|
||||
@ -436,7 +436,7 @@ public:
|
||||
prev_offset = new_offset;
|
||||
}
|
||||
|
||||
if (out_offsets.back() != out_vec.size())
|
||||
if (!out_offsets.empty() && out_offsets.back() != out_vec.size())
|
||||
throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
return true;
|
||||
@ -742,7 +742,7 @@ public:
|
||||
}
|
||||
out_vec.resize(pos - begin);
|
||||
|
||||
if (out_offsets.back() != out_vec.size())
|
||||
if (!out_offsets.empty() && out_offsets.back() != out_vec.size())
|
||||
throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
return true;
|
||||
@ -797,7 +797,7 @@ public:
|
||||
}
|
||||
out_vec.resize(pos - begin);
|
||||
|
||||
if (out_offsets.back() != out_vec.size())
|
||||
if (!out_offsets.empty() && out_offsets.back() != out_vec.size())
|
||||
throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
return true;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -50,6 +50,8 @@ namespace DB
|
||||
* не предназначена для пользователя, а используется только как prerequisites для функций высшего порядка.
|
||||
*
|
||||
* sleep(n) - спит n секунд каждый блок.
|
||||
*
|
||||
* bar(x, min, max, width) - рисует полосу из количества символов, пропорционального (x - min) и равного width при x == max.
|
||||
*/
|
||||
|
||||
|
||||
@ -274,7 +276,7 @@ public:
|
||||
{
|
||||
const IColumn & argument = *block.getByPosition(arguments[0]).column;
|
||||
if (!argument.isConst())
|
||||
throw Exception("Argument for function 'materialize' must be constant.", ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception("Argument for function " + getName() + " must be constant.", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
block.getByPosition(result).column = dynamic_cast<const IColumnConst &>(argument).convertToFullColumn();
|
||||
}
|
||||
@ -354,7 +356,7 @@ public:
|
||||
DataTypePtr getReturnType(const DataTypes & arguments) const
|
||||
{
|
||||
if (arguments.size() < 2)
|
||||
throw Exception("Function tuple requires at least two arguments.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception("Function " + getName() + " requires at least two arguments.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
return new DataTypeTuple(arguments);
|
||||
}
|
||||
@ -386,18 +388,18 @@ public:
|
||||
ExpressionActions::Actions & out_prerequisites)
|
||||
{
|
||||
if (arguments.size() != 2)
|
||||
throw Exception("Function tupleElement requires exactly two arguments: tuple and element index.",
|
||||
throw Exception("Function " + getName() + " requires exactly two arguments: tuple and element index.",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
const ColumnConstUInt8 * index_col = typeid_cast<const ColumnConstUInt8 *>(&*arguments[1].column);
|
||||
if (!index_col)
|
||||
throw Exception("Second argument to tupleElement must be a constant UInt8", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception("Second argument to " + getName() + " must be a constant UInt8", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
size_t index = index_col->getData();
|
||||
|
||||
const DataTypeTuple * tuple = typeid_cast<const DataTypeTuple *>(&*arguments[0].type);
|
||||
if (!tuple)
|
||||
throw Exception("First argument for function tupleElement must be tuple.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception("First argument for function " + getName() + " must be tuple.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
if (index == 0)
|
||||
throw Exception("Indices in tuples are 1-based.", ErrorCodes::ILLEGAL_INDEX);
|
||||
@ -417,10 +419,10 @@ public:
|
||||
const ColumnConstUInt8 * index_col = typeid_cast<const ColumnConstUInt8 *>(&*block.getByPosition(arguments[1]).column);
|
||||
|
||||
if (!tuple_col)
|
||||
throw Exception("First argument for function tupleElement must be tuple.", ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception("First argument for function " + getName() + " must be tuple.", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
if (!index_col)
|
||||
throw Exception("Second argument for function tupleElement must be UInt8 constant literal.", ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception("Second argument for function " + getName() + " must be UInt8 constant literal.", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
size_t index = index_col->getData();
|
||||
if (index == 0)
|
||||
@ -472,11 +474,11 @@ public:
|
||||
DataTypePtr getReturnType(const DataTypes & arguments) const
|
||||
{
|
||||
if (arguments.size() != 1)
|
||||
throw Exception("Function arrayJoin requires exactly one argument.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception("Function " + getName() + " requires exactly one argument.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
const DataTypeArray * arr = typeid_cast<const DataTypeArray *>(&*arguments[0]);
|
||||
if (!arr)
|
||||
throw Exception("Argument for function arrayJoin must be Array.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception("Argument for function " + getName() + " must be Array.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
return arr->getNestedType()->clone();
|
||||
}
|
||||
@ -484,7 +486,7 @@ public:
|
||||
/// Выполнить функцию над блоком.
|
||||
void execute(Block & block, const ColumnNumbers & arguments, size_t result)
|
||||
{
|
||||
throw Exception("Function arrayJoin must not be executed directly.", ErrorCodes::FUNCTION_IS_SPECIAL);
|
||||
throw Exception("Function " + getName() + " must not be executed directly.", ErrorCodes::FUNCTION_IS_SPECIAL);
|
||||
}
|
||||
};
|
||||
|
||||
@ -539,4 +541,202 @@ class FunctionReplicate : public IFunction
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class FunctionBar : public IFunction
|
||||
{
|
||||
public:
|
||||
/// Получить имя функции.
|
||||
String getName() const
|
||||
{
|
||||
return "bar";
|
||||
}
|
||||
|
||||
/// Получить тип результата по типам аргументов. Если функция неприменима для данных аргументов - кинуть исключение.
|
||||
DataTypePtr getReturnType(const DataTypes & arguments) const
|
||||
{
|
||||
if (arguments.size() != 3 && arguments.size() != 4)
|
||||
throw Exception("Function " + getName() + " requires from 3 or 4 parameters: value, min_value, max_value, [max_width_of_bar = 80]. Passed "
|
||||
+ toString(arguments.size()) + ".",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (!arguments[0]->isNumeric() || !arguments[1]->isNumeric() || !arguments[2]->isNumeric()
|
||||
|| (arguments.size() == 4 && !arguments[3]->isNumeric()))
|
||||
throw Exception("All arguments for function " + getName() + " must be numeric.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
return new DataTypeString;
|
||||
}
|
||||
|
||||
/// Выполнить функцию над блоком.
|
||||
void execute(Block & block, const ColumnNumbers & arguments, size_t result)
|
||||
{
|
||||
Int64 min = extractConstant<Int64>(block, arguments, 1, "Second"); /// Уровень значения, при котором полоска имеет нулевую длину.
|
||||
Int64 max = extractConstant<Int64>(block, arguments, 2, "Third"); /// Уровень значения, при котором полоска имеет максимальную длину.
|
||||
|
||||
/// Максимальная ширина полоски в символах, по-умолчанию.
|
||||
Float64 max_width = arguments.size() == 4
|
||||
? extractConstant<Float64>(block, arguments, 3, "Fourth")
|
||||
: 80;
|
||||
|
||||
if (max_width < 1)
|
||||
throw Exception("Max_width argument must be >= 1.", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||
|
||||
if (max_width > 1000)
|
||||
throw Exception("Too large max_width.", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||
|
||||
const auto & src = *block.getByPosition(arguments[0]).column;
|
||||
|
||||
if (src.isConst())
|
||||
{
|
||||
auto res_column = new ColumnConstString(block.rowsInFirstColumn(), "");
|
||||
block.getByPosition(result).column = res_column;
|
||||
|
||||
if ( executeConstNumber<UInt8> (src, *res_column, min, max, max_width)
|
||||
|| executeConstNumber<UInt16> (src, *res_column, min, max, max_width)
|
||||
|| executeConstNumber<UInt32> (src, *res_column, min, max, max_width)
|
||||
|| executeConstNumber<UInt64> (src, *res_column, min, max, max_width)
|
||||
|| executeConstNumber<Int8> (src, *res_column, min, max, max_width)
|
||||
|| executeConstNumber<Int16> (src, *res_column, min, max, max_width)
|
||||
|| executeConstNumber<Int32> (src, *res_column, min, max, max_width)
|
||||
|| executeConstNumber<Int64> (src, *res_column, min, max, max_width)
|
||||
|| executeConstNumber<Float32> (src, *res_column, min, max, max_width)
|
||||
|| executeConstNumber<Float64> (src, *res_column, min, max, max_width))
|
||||
{
|
||||
}
|
||||
else
|
||||
throw Exception("Illegal column " + block.getByPosition(arguments[0]).column->getName()
|
||||
+ " of argument of function " + getName(),
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto res_column = new ColumnString;
|
||||
block.getByPosition(result).column = res_column;
|
||||
|
||||
if ( executeNumber<UInt8> (src, *res_column, min, max, max_width)
|
||||
|| executeNumber<UInt16> (src, *res_column, min, max, max_width)
|
||||
|| executeNumber<UInt32> (src, *res_column, min, max, max_width)
|
||||
|| executeNumber<UInt64> (src, *res_column, min, max, max_width)
|
||||
|| executeNumber<Int8> (src, *res_column, min, max, max_width)
|
||||
|| executeNumber<Int16> (src, *res_column, min, max, max_width)
|
||||
|| executeNumber<Int32> (src, *res_column, min, max, max_width)
|
||||
|| executeNumber<Int64> (src, *res_column, min, max, max_width)
|
||||
|| executeNumber<Float32> (src, *res_column, min, max, max_width)
|
||||
|| executeNumber<Float64> (src, *res_column, min, max, max_width))
|
||||
{
|
||||
}
|
||||
else
|
||||
throw Exception("Illegal column " + block.getByPosition(arguments[0]).column->getName()
|
||||
+ " of argument of function " + getName(),
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
T extractConstant(Block & block, const ColumnNumbers & arguments, size_t argument_pos, const char * which_argument) const
|
||||
{
|
||||
const auto & column = *block.getByPosition(arguments[argument_pos]).column;
|
||||
|
||||
if (!column.isConst())
|
||||
throw Exception(which_argument + String(" argument for function ") + getName() + " must be constant.", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
return apply_visitor(FieldVisitorConvertToNumber<T>(), column[0]);
|
||||
}
|
||||
|
||||
static constexpr size_t BAR_CHAR_SIZE = strlen("█");
|
||||
|
||||
template <typename T>
|
||||
static Float64 barWidth(T x, Int64 min, Int64 max, Float64 max_width)
|
||||
{
|
||||
if (x <= min)
|
||||
return 0;
|
||||
|
||||
if (x >= max)
|
||||
return max_width;
|
||||
|
||||
return (x - min) * max_width / (max - min);
|
||||
}
|
||||
|
||||
static size_t barWidthInBytes(Float64 width)
|
||||
{
|
||||
return ceil(width - 1.0 / 8) * BAR_CHAR_SIZE;
|
||||
}
|
||||
|
||||
/// В dst должно быть место для barWidthInBytes(width) символов и завершающего нуля.
|
||||
static void renderBar(Float64 width, char * dst)
|
||||
{
|
||||
size_t floor_width = floor(width);
|
||||
|
||||
for (size_t i = 0; i < floor_width; ++i)
|
||||
{
|
||||
memcpy(dst, "█", BAR_CHAR_SIZE);
|
||||
dst += BAR_CHAR_SIZE;
|
||||
}
|
||||
|
||||
size_t remainder = floor((width - floor_width) * 8);
|
||||
|
||||
if (remainder)
|
||||
{
|
||||
memcpy(dst, &"▏▎▍▌▋▋▊▉"[(remainder - 1) * BAR_CHAR_SIZE], BAR_CHAR_SIZE);
|
||||
dst += BAR_CHAR_SIZE;
|
||||
}
|
||||
|
||||
*dst = 0;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void fill(const PODArray<T> & src, ColumnString::Chars_t & dst_chars, ColumnString::Offsets_t & dst_offsets,
|
||||
Int64 min, Int64 max, Float64 max_width)
|
||||
{
|
||||
size_t size = src.size();
|
||||
size_t current_offset = 0;
|
||||
|
||||
dst_offsets.resize(size);
|
||||
dst_chars.reserve(size * (barWidthInBytes(max_width) + 1)); /// строки 0-terminated.
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
Float64 width = barWidth(src[i], min, max, max_width);
|
||||
size_t next_size = current_offset + barWidthInBytes(width) + 1;
|
||||
dst_chars.resize(next_size);
|
||||
renderBar(width, reinterpret_cast<char *>(&dst_chars[current_offset]));
|
||||
current_offset = next_size;
|
||||
dst_offsets[i] = current_offset;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void fill(T src, String & dst_chars,
|
||||
Int64 min, Int64 max, Float64 max_width)
|
||||
{
|
||||
Float64 width = barWidth(src, min, max, max_width);
|
||||
dst_chars.resize(barWidthInBytes(width));
|
||||
renderBar(width, &dst_chars[0]);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static bool executeNumber(const IColumn & src, ColumnString & dst, Int64 min, Int64 max, Float64 max_width)
|
||||
{
|
||||
if (const ColumnVector<T> * col = typeid_cast<const ColumnVector<T> *>(&src))
|
||||
{
|
||||
fill(col->getData(), dst.getChars(), dst.getOffsets(), min, max, max_width);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static bool executeConstNumber(const IColumn & src, ColumnConstString & dst, Int64 min, Int64 max, Float64 max_width)
|
||||
{
|
||||
if (const ColumnConst<T> * col = typeid_cast<const ColumnConst<T> *>(&src))
|
||||
{
|
||||
fill(col->getData(), dst.getData(), min, max, max_width);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <DB/DataTypes/DataTypesNumberFixed.h>
|
||||
#include <DB/Functions/IFunction.h>
|
||||
#include <DB/Common/HashTable/Hash.h>
|
||||
#include <stats/IntHash.h>
|
||||
|
||||
|
||||
@ -13,14 +14,14 @@ namespace DB
|
||||
/** Функции генерации псевдослучайных чисел.
|
||||
* Функция может быть вызвана без аргументов или с одним аргументом.
|
||||
* Аргумент игнорируется и служит лишь для того, чтобы несколько вызовов одной функции считались разными и не склеивались.
|
||||
*
|
||||
*
|
||||
* Пример:
|
||||
* SELECT rand(), rand() - выдаст два одинаковых столбца.
|
||||
* SELECT rand(1), rand(2) - выдаст два разных столбца.
|
||||
*
|
||||
* Некриптографические генераторы:
|
||||
*
|
||||
* rand - linear congruental generator 0 .. 2^31 - 1.
|
||||
*
|
||||
* rand - linear congruental generator 0 .. 2^32 - 1.
|
||||
* rand64 - комбинирует несколько значений rand, чтобы получить значения из диапазона 0 .. 2^64 - 1.
|
||||
*
|
||||
* В качестве затравки используют время.
|
||||
@ -30,31 +31,74 @@ namespace DB
|
||||
|
||||
namespace detail
|
||||
{
|
||||
void seed(drand48_data & rand_state, intptr_t additional_seed)
|
||||
struct LinearCongruentialGenerator
|
||||
{
|
||||
/// Константы из man lrand48_r.
|
||||
static constexpr UInt64 a = 0x5DEECE66D;
|
||||
static constexpr UInt64 c = 0xB;
|
||||
|
||||
/// А эта - из head -c8 /dev/urandom | xxd -p
|
||||
UInt64 current = 0x09826f4a081cee35ULL;
|
||||
|
||||
LinearCongruentialGenerator() {}
|
||||
LinearCongruentialGenerator(UInt64 value) : current(value) {}
|
||||
|
||||
void seed(UInt64 value)
|
||||
{
|
||||
current = value;
|
||||
}
|
||||
|
||||
UInt32 next()
|
||||
{
|
||||
current = current * a + c;
|
||||
return current >> 16;
|
||||
}
|
||||
};
|
||||
|
||||
void seed(LinearCongruentialGenerator & generator, intptr_t additional_seed)
|
||||
{
|
||||
struct timespec times;
|
||||
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, ×))
|
||||
throwFromErrno("Cannot clock_gettime.", ErrorCodes::CANNOT_CLOCK_GETTIME);
|
||||
|
||||
srand48_r(intHash32<0>(times.tv_nsec ^ intHash32<0>(additional_seed)), &rand_state);
|
||||
generator.seed(intHash64(times.tv_nsec ^ intHash64(additional_seed)));
|
||||
}
|
||||
}
|
||||
|
||||
struct RandImpl
|
||||
{
|
||||
typedef UInt32 ReturnType;
|
||||
|
||||
|
||||
static void execute(PODArray<ReturnType> & res)
|
||||
{
|
||||
drand48_data rand_state;
|
||||
detail::seed(rand_state, reinterpret_cast<intptr_t>(&res[0]));
|
||||
|
||||
detail::LinearCongruentialGenerator generator0;
|
||||
detail::LinearCongruentialGenerator generator1;
|
||||
detail::LinearCongruentialGenerator generator2;
|
||||
detail::LinearCongruentialGenerator generator3;
|
||||
|
||||
detail::seed(generator0, 0xfb4121280b2ab902ULL + reinterpret_cast<intptr_t>(&res[0]));
|
||||
detail::seed(generator1, 0x0121cf76df39c673ULL + reinterpret_cast<intptr_t>(&res[0]));
|
||||
detail::seed(generator2, 0x17ae86e3a19a602fULL + reinterpret_cast<intptr_t>(&res[0]));
|
||||
detail::seed(generator3, 0x8b6e16da7e06d622ULL + reinterpret_cast<intptr_t>(&res[0]));
|
||||
|
||||
size_t size = res.size();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
ReturnType * pos = &res[0];
|
||||
ReturnType * end = pos + size;
|
||||
ReturnType * end4 = pos + size / 4 * 4;
|
||||
|
||||
while (pos < end4)
|
||||
{
|
||||
long rand_res;
|
||||
lrand48_r(&rand_state, &rand_res);
|
||||
res[i] = rand_res;
|
||||
pos[0] = generator0.next();
|
||||
pos[1] = generator1.next();
|
||||
pos[2] = generator2.next();
|
||||
pos[3] = generator3.next();
|
||||
pos += 4;
|
||||
}
|
||||
|
||||
while (pos < end)
|
||||
{
|
||||
pos[0] = generator0.next();
|
||||
++pos;
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -65,21 +109,32 @@ struct Rand64Impl
|
||||
|
||||
static void execute(PODArray<ReturnType> & res)
|
||||
{
|
||||
drand48_data rand_state;
|
||||
detail::seed(rand_state, reinterpret_cast<intptr_t>(&res[0]));
|
||||
detail::LinearCongruentialGenerator generator0;
|
||||
detail::LinearCongruentialGenerator generator1;
|
||||
detail::LinearCongruentialGenerator generator2;
|
||||
detail::LinearCongruentialGenerator generator3;
|
||||
|
||||
detail::seed(generator0, 0xfb4121280b2ab902ULL + reinterpret_cast<intptr_t>(&res[0]));
|
||||
detail::seed(generator1, 0x0121cf76df39c673ULL + reinterpret_cast<intptr_t>(&res[0]));
|
||||
detail::seed(generator2, 0x17ae86e3a19a602fULL + reinterpret_cast<intptr_t>(&res[0]));
|
||||
detail::seed(generator3, 0x8b6e16da7e06d622ULL + reinterpret_cast<intptr_t>(&res[0]));
|
||||
|
||||
size_t size = res.size();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
ReturnType * pos = &res[0];
|
||||
ReturnType * end = pos + size;
|
||||
ReturnType * end2 = pos + size / 2 * 2;
|
||||
|
||||
while (pos < end2)
|
||||
{
|
||||
long rand_res1;
|
||||
long rand_res2;
|
||||
long rand_res3;
|
||||
|
||||
lrand48_r(&rand_state, &rand_res1);
|
||||
lrand48_r(&rand_state, &rand_res2);
|
||||
lrand48_r(&rand_state, &rand_res3);
|
||||
|
||||
res[i] = rand_res1 ^ (rand_res2 << 18) ^ (rand_res3 << 33);
|
||||
pos[0] = (static_cast<UInt64>(generator0.next()) << 32) | generator1.next();
|
||||
pos[1] = (static_cast<UInt64>(generator2.next()) << 32) | generator3.next();
|
||||
pos += 2;
|
||||
}
|
||||
|
||||
while (pos < end)
|
||||
{
|
||||
pos[0] = (static_cast<UInt64>(generator0.next()) << 32) | generator1.next();
|
||||
++pos;
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -90,7 +145,7 @@ class FunctionRandom : public IFunction
|
||||
{
|
||||
private:
|
||||
typedef typename Impl::ReturnType ToType;
|
||||
|
||||
|
||||
public:
|
||||
/// Получить имя функции.
|
||||
String getName() const
|
||||
|
@ -149,7 +149,7 @@ inline void writeJSONString(const char * begin, const char * end, WriteBuffer &
|
||||
case '\t':
|
||||
writeChar('\\', buf);
|
||||
writeChar('t', buf);
|
||||
break;
|
||||
break;
|
||||
case '\\':
|
||||
writeChar('\\', buf);
|
||||
writeChar('\\', buf);
|
||||
@ -167,10 +167,10 @@ inline void writeJSONString(const char * begin, const char * end, WriteBuffer &
|
||||
{
|
||||
char higher_half = (*it) >> 4;
|
||||
char lower_half = (*it) & 0xF;
|
||||
|
||||
|
||||
writeCString("\\u00", buf);
|
||||
writeChar('0' + higher_half, buf);
|
||||
|
||||
|
||||
if (0 <= lower_half && lower_half <= 9)
|
||||
writeChar('0' + lower_half, buf);
|
||||
else
|
||||
@ -282,7 +282,6 @@ inline void writeQuotedString(const String & s, WriteBuffer & buf)
|
||||
writeAnyQuotedString<'\''>(s, buf);
|
||||
}
|
||||
|
||||
/// Совместимо с JSON.
|
||||
inline void writeDoubleQuotedString(const String & s, WriteBuffer & buf)
|
||||
{
|
||||
writeAnyQuotedString<'"'>(s, buf);
|
||||
@ -335,7 +334,7 @@ inline void writeDateText(DayNum_t date, WriteBuffer & buf)
|
||||
s[6] += values.month % 10;
|
||||
s[8] += values.day_of_month / 10;
|
||||
s[9] += values.day_of_month % 10;
|
||||
|
||||
|
||||
buf.write(s, 10);
|
||||
}
|
||||
|
||||
|
@ -29,10 +29,19 @@ public:
|
||||
/// Соединения с удалёнными серверами.
|
||||
ConnectionPools pools;
|
||||
|
||||
struct ShardInfo
|
||||
{
|
||||
/// contains names of directories for asynchronous write to StorageDistributed
|
||||
std::vector<std::string> dir_names;
|
||||
int weight;
|
||||
bool has_local_node;
|
||||
};
|
||||
std::vector<ShardInfo> shard_info_vec;
|
||||
std::vector<size_t> slot_to_shard;
|
||||
|
||||
/// используеться для выставления ограничения на размер таймаута
|
||||
static Poco::Timespan saturate(const Poco::Timespan & v, const Poco::Timespan & limit);
|
||||
|
||||
private:
|
||||
struct Address
|
||||
{
|
||||
/** В конфиге адреса либо находятся в узлах <node>:
|
||||
@ -59,6 +68,7 @@ private:
|
||||
Address(const String & host_port_, const String & user_, const String & password_);
|
||||
};
|
||||
|
||||
private:
|
||||
static bool isLocal(const Address & address);
|
||||
|
||||
/// Массив шардов. Каждый шард - адреса одного сервера.
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Yandex/logger_useful.h>
|
||||
|
||||
#include <DB/Core/NamesAndTypes.h>
|
||||
#include <DB/Common/Macros.h>
|
||||
#include <DB/IO/UncompressedCache.h>
|
||||
#include <DB/Storages/MarkCache.h>
|
||||
#include <DB/DataStreams/FormatFactory.h>
|
||||
@ -95,8 +96,9 @@ struct ContextShared
|
||||
ViewDependencies view_dependencies; /// Текущие зависимости
|
||||
ConfigurationPtr users_config; /// Конфиг с секциями users, profiles и quotas.
|
||||
InterserverIOHandler interserver_io_handler; /// Обработчик для межсерверной передачи данных.
|
||||
String default_replica_name; /// Имя реплики из конфига.
|
||||
String default_replica_name; /// Имя реплики из конфига. DEPRECATED
|
||||
BackgroundProcessingPoolPtr background_pool; /// Пул потоков для фоновой работы, выполняемой таблицами.
|
||||
Macros macros; /// Подстановки из конфига.
|
||||
|
||||
/// Кластеры для distributed таблиц
|
||||
/// Создаются при создании Distributed таблиц, так как нужно дождаться пока будут выставлены Settings
|
||||
@ -242,6 +244,9 @@ public:
|
||||
String getDefaultReplicaName() const;
|
||||
void setDefaultReplicaName(const String & name);
|
||||
|
||||
const Macros & getMacros() const;
|
||||
void setMacros(Macros && macros);
|
||||
|
||||
Settings getSettings() const;
|
||||
void setSettings(const Settings & settings_);
|
||||
|
||||
|
@ -220,8 +220,8 @@ private:
|
||||
void normalizeTree();
|
||||
void normalizeTreeImpl(ASTPtr & ast, MapOfASTs & finished_asts, SetOfASTs & current_asts, std::string current_alias);
|
||||
|
||||
/// Eliminates injective function calls from group by statement
|
||||
void eliminateInjectives();
|
||||
/// Eliminates injective function calls and constant expressions from group by statement
|
||||
void optimizeGroupBy();
|
||||
|
||||
/// Превратить перечисление значений или подзапрос в ASTSet. node - функция in или notIn.
|
||||
void makeSet(ASTFunction * node, const Block & sample_block);
|
||||
|
@ -22,11 +22,41 @@ public:
|
||||
/** Изменяет список столбцов в метаданных таблицы на диске. Нужно вызывать под TableStructureLock соответствующей таблицы.
|
||||
*/
|
||||
static void updateMetadata(const String & database, const String & table, const NamesAndTypesList & columns, Context & context);
|
||||
|
||||
static AlterCommands parseAlter(const ASTAlterQuery::ParameterContainer & params, const DataTypeFactory & data_type_factory);
|
||||
private:
|
||||
struct PartitionCommand
|
||||
{
|
||||
enum Type
|
||||
{
|
||||
DROP_PARTITION,
|
||||
ATTACH_PARTITION,
|
||||
};
|
||||
|
||||
Type type;
|
||||
|
||||
Field partition;
|
||||
bool detach; /// true для DETACH PARTITION.
|
||||
|
||||
bool unreplicated;
|
||||
bool part;
|
||||
|
||||
static PartitionCommand dropPartition(const Field & partition, bool detach)
|
||||
{
|
||||
return {DROP_PARTITION, partition, detach};
|
||||
}
|
||||
|
||||
static PartitionCommand attachPartition(const Field & partition, bool unreplicated, bool part)
|
||||
{
|
||||
return {ATTACH_PARTITION, partition, false, unreplicated, part};
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::vector<PartitionCommand> PartitionCommands;
|
||||
|
||||
ASTPtr query_ptr;
|
||||
|
||||
Context context;
|
||||
|
||||
static void parseAlter(const ASTAlterQuery::ParameterContainer & params, const DataTypeFactory & data_type_factory,
|
||||
AlterCommands & out_alter_commands, PartitionCommands & out_partition_commands);
|
||||
};
|
||||
}
|
||||
|
@ -70,6 +70,9 @@ struct Settings
|
||||
* TODO: Сейчас применяется только при запуске сервера. Можно сделать изменяемым динамически. */ \
|
||||
M(SettingUInt64, background_pool_size, DBMS_DEFAULT_BACKGROUND_POOL_SIZE) \
|
||||
\
|
||||
/** Sleep time for StorageDistributed DirectoryMonitors in case there is no work or exception has been thrown */ \
|
||||
M(SettingMilliseconds, distributed_directory_monitor_sleep_time_ms, DBMS_DISTRIBUTED_DIRECTORY_MONITOR_SLEEP_TIME_MS) \
|
||||
\
|
||||
M(SettingLoadBalancing, load_balancing, LoadBalancing::RANDOM) \
|
||||
\
|
||||
M(SettingTotalsMode, totals_mode, TotalsMode::BEFORE_HAVING) \
|
||||
|
@ -9,7 +9,8 @@ namespace DB
|
||||
* ALTER TABLE [db.]name_type
|
||||
* ADD COLUMN col_name type [AFTER col_after],
|
||||
* DROP COLUMN col_drop,
|
||||
* MODIFY COLUMN col_name type
|
||||
* MODIFY COLUMN col_name type,
|
||||
* DROP PARTITION partition
|
||||
* ...
|
||||
*/
|
||||
|
||||
@ -18,16 +19,18 @@ class ASTAlterQuery : public IAST
|
||||
public:
|
||||
enum ParameterType
|
||||
{
|
||||
ADD,
|
||||
DROP,
|
||||
MODIFY,
|
||||
ADD_COLUMN,
|
||||
DROP_COLUMN,
|
||||
MODIFY_COLUMN,
|
||||
DROP_PARTITION,
|
||||
ATTACH_PARTITION,
|
||||
NO_TYPE
|
||||
};
|
||||
|
||||
struct Parameters
|
||||
{
|
||||
Parameters() : type(NO_TYPE) {}
|
||||
int type;
|
||||
int type = NO_TYPE;
|
||||
|
||||
/** В запросе ADD COLUMN здесь хранится имя и тип добавляемого столбца
|
||||
* В запросе DROP это поле не используется
|
||||
@ -40,12 +43,21 @@ public:
|
||||
*/
|
||||
ASTPtr column;
|
||||
|
||||
/** В запросе DROP PARTITION здесь хранится имя partition'а.
|
||||
*/
|
||||
ASTPtr partition;
|
||||
bool detach = false; /// true для DETACH PARTITION.
|
||||
|
||||
bool part = false; /// true для ATTACH [UNREPLICATED] PART
|
||||
bool unreplicated = false; /// true для ATTACH UNREPLICATED ...
|
||||
|
||||
/// deep copy
|
||||
void clone(Parameters & p) const
|
||||
{
|
||||
p.type = type;
|
||||
p.column = column->clone();
|
||||
p = *this;
|
||||
p.name_type = name_type->clone();
|
||||
p.column = column->clone();
|
||||
p.partition = partition->clone();
|
||||
}
|
||||
};
|
||||
typedef std::vector<Parameters> ParameterContainer;
|
||||
@ -54,6 +66,18 @@ public:
|
||||
String table;
|
||||
|
||||
|
||||
void addParameters(const Parameters & params)
|
||||
{
|
||||
parameters.push_back(params);
|
||||
if (params.name_type)
|
||||
children.push_back(params.name_type);
|
||||
if (params.column)
|
||||
children.push_back(params.column);
|
||||
if (params.partition)
|
||||
children.push_back(params.partition);
|
||||
}
|
||||
|
||||
|
||||
ASTAlterQuery(StringRange range_ = StringRange()) : IAST(range_) {};
|
||||
|
||||
/** Получить текст, который идентифицирует этот элемент. */
|
||||
|
@ -22,7 +22,7 @@ protected:
|
||||
class ParserParenthesisExpression : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "expression in parenthesis"; }
|
||||
const char * getName() const { return "parenthesized expression"; }
|
||||
bool parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & expected);
|
||||
};
|
||||
|
||||
|
@ -10,6 +10,7 @@ namespace DB
|
||||
* [ADD COLUMN col_name type [AFTER col_after],]
|
||||
* [DROP COLUMN col_drop, ...]
|
||||
* [MODIFY COLUMN col_modify type, ...]
|
||||
* [DROP PARTITION partition, ...]
|
||||
*/
|
||||
class ParserAlterQuery : public IParserBase
|
||||
{
|
||||
|
@ -6,7 +6,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Операция из запроса ALTER. Добавление столбцов типа Nested не развернуто в добавление отдельных столбцов.
|
||||
/// Операция из запроса ALTER (кроме DROP PARTITION). Добавление столбцов типа Nested не развернуто в добавление отдельных столбцов.
|
||||
struct AlterCommand
|
||||
{
|
||||
enum Type
|
||||
|
207
dbms/include/DB/Storages/Distributed/DirectoryMonitor.h
Normal file
207
dbms/include/DB/Storages/Distributed/DirectoryMonitor.h
Normal file
@ -0,0 +1,207 @@
|
||||
#pragma once
|
||||
|
||||
#include <DB/DataStreams/RemoteBlockOutputStream.h>
|
||||
#include <DB/Common/escapeForFileName.h>
|
||||
#include <DB/Storages/StorageDistributed.h>
|
||||
#include <boost/algorithm/string/find_iterator.hpp>
|
||||
#include <boost/algorithm/string/finder.hpp>
|
||||
#include <thread>
|
||||
#include <mutex>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename PoolFactory>
|
||||
ConnectionPools createPoolsForAddresses(const std::string & name, PoolFactory && factory)
|
||||
{
|
||||
ConnectionPools pools;
|
||||
|
||||
for (auto it = boost::make_split_iterator(name, boost::first_finder(",")); it != decltype(it){}; ++it)
|
||||
{
|
||||
const auto & address = boost::copy_range<std::string>(*it);
|
||||
|
||||
const auto user_pw_end = strchr(address.data(), '@');
|
||||
const auto colon = strchr(address.data(), ':');
|
||||
if (!user_pw_end || !colon)
|
||||
throw Exception{
|
||||
"Shard address '" + address + "' does not match to 'user[:password]@host:port' pattern",
|
||||
ErrorCodes::INCORRECT_FILE_NAME
|
||||
};
|
||||
|
||||
const auto has_pw = colon < user_pw_end;
|
||||
const auto host_end = has_pw ? strchr(user_pw_end + 1, ':') : colon;
|
||||
if (!host_end)
|
||||
throw Exception{
|
||||
"Shard address '" + address + "' does not contain port",
|
||||
ErrorCodes::INCORRECT_FILE_NAME
|
||||
};
|
||||
|
||||
const auto user = unescapeForFileName({address.data(), has_pw ? colon : user_pw_end});
|
||||
const auto password = has_pw ? unescapeForFileName({colon + 1, user_pw_end}) : std::string{};
|
||||
const auto host = unescapeForFileName({user_pw_end + 1, host_end});
|
||||
const auto port = parse<UInt16>(host_end + 1);
|
||||
|
||||
pools.emplace_back(factory(host, port, user, password));
|
||||
}
|
||||
|
||||
return pools;
|
||||
}
|
||||
}
|
||||
|
||||
/** Implementation for StorageDistributed::DirectoryMonitor nested class.
|
||||
* This type is not designed for standalone use. */
|
||||
class StorageDistributed::DirectoryMonitor
|
||||
{
|
||||
public:
|
||||
DirectoryMonitor(StorageDistributed & storage, const std::string & name)
|
||||
: storage(storage), pool{createPool(name)}, path{storage.path + name + '/'}
|
||||
, sleep_time{storage.context.getSettingsRef().distributed_directory_monitor_sleep_time_ms.totalMilliseconds()}
|
||||
, log{&Logger::get(getLoggerName())}
|
||||
{
|
||||
}
|
||||
|
||||
~DirectoryMonitor()
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock{mutex};
|
||||
quit = true;
|
||||
}
|
||||
cond.notify_one();
|
||||
thread.join();
|
||||
}
|
||||
|
||||
private:
|
||||
void run()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock{mutex};
|
||||
|
||||
const auto quit_requested = [this] { return quit; };
|
||||
|
||||
while (!quit_requested())
|
||||
{
|
||||
auto do_sleep = true;
|
||||
|
||||
try
|
||||
{
|
||||
do_sleep = !findFiles();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
do_sleep = true;
|
||||
tryLogCurrentException(getLoggerName().data());
|
||||
}
|
||||
|
||||
if (do_sleep)
|
||||
cond.wait_for(lock, sleep_time, quit_requested);
|
||||
}
|
||||
}
|
||||
|
||||
ConnectionPoolPtr createPool(const std::string & name)
|
||||
{
|
||||
const auto pool_factory = [this, &name] (const std::string & host, const UInt16 port,
|
||||
const std::string & user, const std::string & password) {
|
||||
return new ConnectionPool{
|
||||
1, host, port, "",
|
||||
user, password, storage.context.getDataTypeFactory(),
|
||||
storage.getName() + '_' + name};
|
||||
};
|
||||
|
||||
auto pools = createPoolsForAddresses(name, pool_factory);
|
||||
|
||||
return pools.size() == 1 ? pools.front() : new ConnectionPoolWithFailover(pools, LoadBalancing::RANDOM);
|
||||
}
|
||||
|
||||
bool findFiles()
|
||||
{
|
||||
std::map<UInt64, std::string> files;
|
||||
|
||||
Poco::DirectoryIterator end;
|
||||
for (Poco::DirectoryIterator it{path}; it != end; ++it)
|
||||
{
|
||||
const auto & file_path_str = it->path();
|
||||
Poco::Path file_path{file_path_str};
|
||||
|
||||
if (!it->isDirectory() && 0 == strncmp(file_path.getExtension().data(), "bin", strlen("bin")))
|
||||
files[parse<UInt64>(file_path.getBaseName())] = file_path_str;
|
||||
}
|
||||
|
||||
if (files.empty())
|
||||
return false;
|
||||
|
||||
for (const auto & file : files)
|
||||
{
|
||||
if (quit)
|
||||
return true;
|
||||
|
||||
processFile(file.second);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void processFile(const std::string & file_path)
|
||||
{
|
||||
LOG_TRACE(log, "Started processing `" << file_path << '`');
|
||||
auto connection = pool->get();
|
||||
|
||||
try
|
||||
{
|
||||
ReadBufferFromFile in{file_path};
|
||||
|
||||
std::string insert_query;
|
||||
readStringBinary(insert_query, in);
|
||||
|
||||
RemoteBlockOutputStream remote{*connection, insert_query};
|
||||
|
||||
remote.writePrefix();
|
||||
remote.writePrepared(in);
|
||||
remote.writeSuffix();
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
const auto code = e.code();
|
||||
|
||||
/// mark file as broken if necessary
|
||||
if (code == ErrorCodes::CHECKSUM_DOESNT_MATCH ||
|
||||
code == ErrorCodes::TOO_LARGE_SIZE_COMPRESSED ||
|
||||
code == ErrorCodes::CANNOT_READ_ALL_DATA)
|
||||
{
|
||||
const auto last_path_separator_pos = file_path.rfind('/');
|
||||
const auto & path = file_path.substr(0, last_path_separator_pos + 1);
|
||||
const auto & file_name = file_path.substr(last_path_separator_pos + 1);
|
||||
const auto & broken_path = path + "broken/";
|
||||
const auto & broken_file_path = broken_path + file_name;
|
||||
|
||||
Poco::File{broken_path}.createDirectory();
|
||||
Poco::File{file_path}.renameTo(broken_file_path);
|
||||
|
||||
LOG_ERROR(log, "Renamed `" << file_path << "` to `" << broken_file_path << '`');
|
||||
}
|
||||
|
||||
throw;
|
||||
}
|
||||
|
||||
Poco::File{file_path}.remove();
|
||||
|
||||
LOG_TRACE(log, "Finished processing `" << file_path << '`');
|
||||
}
|
||||
|
||||
std::string getLoggerName() const
|
||||
{
|
||||
return storage.name + '.' + storage.getName() + ".DirectoryMonitor";
|
||||
}
|
||||
|
||||
StorageDistributed & storage;
|
||||
ConnectionPoolPtr pool;
|
||||
std::string path;
|
||||
std::chrono::milliseconds sleep_time;
|
||||
bool quit{false};
|
||||
std::mutex mutex;
|
||||
std::condition_variable cond;
|
||||
Logger * log;
|
||||
std::thread thread{&DirectoryMonitor::run, this};
|
||||
};
|
||||
|
||||
}
|
@ -0,0 +1,155 @@
|
||||
#pragma once
|
||||
|
||||
#include <DB/Storages/StorageDistributed.h>
|
||||
#include <DB/Storages/Distributed/queryToString.h>
|
||||
|
||||
#include <DB/IO/WriteBufferFromFile.h>
|
||||
#include <DB/IO/CompressedWriteBuffer.h>
|
||||
#include <DB/DataStreams/NativeBlockOutputStream.h>
|
||||
#include <DB/Interpreters/InterpreterInsertQuery.h>
|
||||
|
||||
#include <statdaemons/Increment.h>
|
||||
#include <statdaemons/stdext.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Запись асинхронная - данные сначала записываются на локальную файловую систему, а потом отправляются на удалённые серверы.
|
||||
* Если Distributed таблица использует более одного шарда, то для того, чтобы поддерживалась запись,
|
||||
* при создании таблицы должен быть указан дополнительный параметр у ENGINE - ключ шардирования.
|
||||
* Ключ шардирования - произвольное выражение от столбцов. Например, rand() или UserID.
|
||||
* При записи блок данных разбивается по остатку от деления ключа шардирования на суммарный вес шардов,
|
||||
* и полученные блоки пишутся в сжатом Native формате в отдельные директории для отправки.
|
||||
* Для каждого адреса назначения (каждой директории с данными для отправки), в StorageDistributed создаётся отдельный поток,
|
||||
* который следит за директорией и отправляет данные. */
|
||||
class DistributedBlockOutputStream : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
DistributedBlockOutputStream(StorageDistributed & storage, const ASTPtr & query_ast)
|
||||
: storage(storage), query_ast(query_ast)
|
||||
{
|
||||
}
|
||||
|
||||
void write(const Block & block) override
|
||||
{
|
||||
if (storage.getShardingKeyExpr() && storage.cluster.shard_info_vec.size() > 1)
|
||||
return writeSplit(block);
|
||||
|
||||
writeImpl(block);
|
||||
}
|
||||
|
||||
private:
|
||||
void writeSplit(const Block & block)
|
||||
{
|
||||
auto block_with_key = block;
|
||||
storage.getShardingKeyExpr()->execute(block_with_key);
|
||||
|
||||
const auto & key_column = block_with_key.getByName(storage.getShardingKeyColumnName()).column;
|
||||
const auto total_weight = storage.cluster.slot_to_shard.size();
|
||||
|
||||
/// shard => block mapping
|
||||
std::vector<std::unique_ptr<Block>> target_blocks(storage.cluster.shard_info_vec.size());
|
||||
|
||||
const auto num_cols = block.columns();
|
||||
std::vector<const IColumn*> columns(num_cols);
|
||||
for (size_t i = 0; i < columns.size(); ++i)
|
||||
columns[i] = block.getByPosition(i).column;
|
||||
|
||||
for (size_t num_rows = block.rowsInFirstColumn(), row = 0; row < num_rows; ++row)
|
||||
{
|
||||
const auto target_block_idx = storage.cluster.slot_to_shard[key_column->get64(row) % total_weight];
|
||||
auto & target_block = target_blocks[target_block_idx];
|
||||
if (!target_block)
|
||||
target_block = stdext::make_unique<Block>(block.cloneEmpty());
|
||||
|
||||
for (size_t col = 0; col < num_cols; ++col)
|
||||
target_block->getByPosition(col).column->insertFrom(*columns[col], row);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < target_blocks.size(); ++i)
|
||||
if (const auto & target_block = target_blocks[i])
|
||||
writeImpl(*target_block, i);
|
||||
}
|
||||
|
||||
void writeImpl(const Block & block, const size_t shard_id = 0)
|
||||
{
|
||||
const auto & shard_info = storage.cluster.shard_info_vec[shard_id];
|
||||
if (shard_info.has_local_node)
|
||||
writeToLocal(block);
|
||||
|
||||
/// dir_names is empty if shard has only local addresses
|
||||
if (!shard_info.dir_names.empty())
|
||||
writeToShard(block, shard_info.dir_names);
|
||||
}
|
||||
|
||||
void writeToLocal(const Block & block)
|
||||
{
|
||||
InterpreterInsertQuery interp{query_ast, storage.context};
|
||||
|
||||
auto block_io = interp.execute();
|
||||
block_io.out->writePrefix();
|
||||
block_io.out->write(block);
|
||||
block_io.out->writeSuffix();
|
||||
}
|
||||
|
||||
void writeToShard(const Block & block, const std::vector<std::string> & dir_names)
|
||||
{
|
||||
/** tmp directory is used to ensure atomicity of transactions
|
||||
* and keep monitor thread out from reading incomplete data
|
||||
*/
|
||||
std::string first_file_tmp_path{};
|
||||
|
||||
auto first = true;
|
||||
const auto & query_string = queryToString<ASTInsertQuery>(query_ast);
|
||||
|
||||
/// write first file, hardlink the others
|
||||
for (const auto & dir_name : dir_names)
|
||||
{
|
||||
const auto & path = storage.getPath() + dir_name + '/';
|
||||
|
||||
/// ensure shard subdirectory creation and notify storage
|
||||
if (Poco::File(path).createDirectory())
|
||||
storage.requireDirectoryMonitor(dir_name);
|
||||
|
||||
const auto & file_name = toString(Increment{path + "increment.txt"}.get(true)) + ".bin";
|
||||
const auto & block_file_path = path + file_name;
|
||||
|
||||
/** on first iteration write block to a temporary directory for subsequent hardlinking to ensure
|
||||
* the inode is not freed until we're done */
|
||||
if (first)
|
||||
{
|
||||
first = false;
|
||||
|
||||
const auto & tmp_path = path + "tmp/";
|
||||
Poco::File(tmp_path).createDirectory();
|
||||
const auto & block_file_tmp_path = tmp_path + file_name;
|
||||
|
||||
first_file_tmp_path = block_file_tmp_path;
|
||||
|
||||
WriteBufferFromFile out{block_file_tmp_path};
|
||||
CompressedWriteBuffer compress{out};
|
||||
NativeBlockOutputStream stream{compress};
|
||||
|
||||
writeStringBinary(query_string, out);
|
||||
|
||||
stream.writePrefix();
|
||||
stream.write(block);
|
||||
stream.writeSuffix();
|
||||
}
|
||||
|
||||
if (link(first_file_tmp_path.data(), block_file_path.data()))
|
||||
throwFromErrno("Could not link " + block_file_path + " to " + first_file_tmp_path);
|
||||
}
|
||||
|
||||
/** remove the temporary file, enabling the OS to reclaim inode after all threads
|
||||
* have removed their corresponding files */
|
||||
Poco::File(first_file_tmp_path).remove();
|
||||
}
|
||||
|
||||
StorageDistributed & storage;
|
||||
ASTPtr query_ast;
|
||||
};
|
||||
|
||||
}
|
17
dbms/include/DB/Storages/Distributed/queryToString.h
Normal file
17
dbms/include/DB/Storages/Distributed/queryToString.h
Normal file
@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
|
||||
#include <DB/Parsers/formatAST.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
template <typename ASTType>
|
||||
inline std::string queryToString(const ASTPtr & query)
|
||||
{
|
||||
const auto & query_ast = typeid_cast<const ASTType &>(*query);
|
||||
|
||||
std::ostringstream s;
|
||||
formatAST(query_ast, s, 0, false, true);
|
||||
|
||||
return s.str();
|
||||
}
|
||||
}
|
@ -205,6 +205,20 @@ public:
|
||||
throw Exception("Method alter is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/** Выполнить запрос (DROP|DETACH) PARTITION.
|
||||
*/
|
||||
virtual void dropPartition(const Field & partition, bool detach)
|
||||
{
|
||||
throw Exception("Method dropPartition is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/** Выполнить запрос ATTACH [UNREPLICATED] (PART|PARTITION).
|
||||
*/
|
||||
virtual void attachPartition(const Field & partition, bool unreplicated, bool part)
|
||||
{
|
||||
throw Exception("Method attachPartition is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/** Выполнить какую-либо фоновую работу. Например, объединение кусков в таблице типа MergeTree.
|
||||
* Возвращает - была ли выполнена какая-либо работа.
|
||||
*/
|
||||
|
@ -13,7 +13,7 @@ namespace DB
|
||||
* При вызове деструктора или завершении сессии в ZooKeeper, переходит в состояние ABANDONED.
|
||||
* (В том числе при падении программы)
|
||||
*/
|
||||
class AbandonableLockInZooKeeper
|
||||
class AbandonableLockInZooKeeper : private boost::noncopyable
|
||||
{
|
||||
public:
|
||||
enum State
|
||||
@ -34,6 +34,14 @@ public:
|
||||
path = zookeeper.create(path_prefix, holder_path, zkutil::CreateMode::PersistentSequential);
|
||||
}
|
||||
|
||||
AbandonableLockInZooKeeper(AbandonableLockInZooKeeper && rhs)
|
||||
: zookeeper(rhs.zookeeper)
|
||||
{
|
||||
std::swap(path_prefix, rhs.path_prefix);
|
||||
std::swap(path, rhs.path);
|
||||
std::swap(holder_path, rhs.holder_path);
|
||||
}
|
||||
|
||||
String getPath()
|
||||
{
|
||||
return path;
|
||||
@ -49,6 +57,7 @@ public:
|
||||
{
|
||||
zookeeper.remove(path);
|
||||
zookeeper.remove(holder_path);
|
||||
holder_path = "";
|
||||
}
|
||||
|
||||
/// Добавляет в список действия, эквивалентные unlock().
|
||||
@ -60,6 +69,9 @@ public:
|
||||
|
||||
~AbandonableLockInZooKeeper()
|
||||
{
|
||||
if (holder_path.empty())
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
zookeeper.tryRemove(holder_path);
|
||||
|
@ -63,12 +63,14 @@ public:
|
||||
void add(const String & name);
|
||||
String getContainingPart(const String & name) const;
|
||||
|
||||
Strings getParts() const;
|
||||
Strings getParts() const; /// В порядке возрастания месяца и номера блока.
|
||||
|
||||
size_t size() const;
|
||||
|
||||
static String getPartName(DayNum_t left_date, DayNum_t right_date, UInt64 left_id, UInt64 right_id, UInt64 level);
|
||||
|
||||
/// Возвращает true если имя директории совпадает с форматом имени директории кусочков
|
||||
static bool isPartDirectory(const String & dir_name, Poco::RegularExpression::MatchVec & matches);
|
||||
static bool isPartDirectory(const String & dir_name, Poco::RegularExpression::MatchVec * out_matches = nullptr);
|
||||
|
||||
/// Кладет в DataPart данные из имени кусочка.
|
||||
static void parsePartName(const String & file_name, Part & part, const Poco::RegularExpression::MatchVec * matches = nullptr);
|
||||
|
@ -16,15 +16,21 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** В нескольких потоках в бесконечном цикле выполняет указанные функции.
|
||||
/** Используя фиксированное количество потоков, выполнять произвольное количество задач в бесконечном цикле.
|
||||
* Предназначена для задач, выполняющих постоянную фоновую работу (например, слияния).
|
||||
* Задача - функция, возвращающая bool - сделала ли она какую-либо работу.
|
||||
* Если сделала - надо выполнить ещё раз. Если нет - надо подождать несколько секунд, или до события wake, и выполнить ещё раз.
|
||||
*
|
||||
* Также, задача во время выполнения может временно увеличить какой-либо счётчик, относящийся ко всем задачам
|
||||
* - например, число одновременно идующих слияний.
|
||||
*/
|
||||
class BackgroundProcessingPool
|
||||
{
|
||||
public:
|
||||
typedef std::map<String, int> Counters;
|
||||
|
||||
/** Используется изнутри таски. Позволяет инкрементировать какие-нибудь счетчики.
|
||||
* После завершения таски, все изменения откатятся.
|
||||
/** Используется изнутри задачи. Позволяет инкрементировать какие-нибудь счетчики.
|
||||
* После завершения задачи, все изменения откатятся.
|
||||
* Например, чтобы можно было узнавать количество потоков, выполняющих большое слияние,
|
||||
* можно в таске, выполняющей большое слияние, инкрементировать счетчик. Декрементировать обратно его не нужно.
|
||||
*/
|
||||
@ -57,10 +63,14 @@ public:
|
||||
/// Переставить таск в начало очереди и разбудить какой-нибудь поток.
|
||||
void wake()
|
||||
{
|
||||
Poco::ScopedReadRWLock rlock(rwlock);
|
||||
if (removed)
|
||||
return;
|
||||
|
||||
std::unique_lock<std::mutex> lock(pool.mutex);
|
||||
pool.tasks.splice(pool.tasks.begin(), pool.tasks, iterator);
|
||||
|
||||
/// Не очень надежно: если все потоки сейчас выполняют работу, этот вызов никого не разбудит,
|
||||
/// Не очень надёжно: если все потоки сейчас выполняют работу, этот вызов никого не разбудит,
|
||||
/// и все будут спать в конце итерации.
|
||||
pool.wake_event.notify_one();
|
||||
}
|
||||
@ -70,50 +80,32 @@ public:
|
||||
|
||||
BackgroundProcessingPool & pool;
|
||||
Task function;
|
||||
Poco::RWLock lock;
|
||||
volatile bool removed;
|
||||
|
||||
/// При выполнении задачи, держится read lock. Переменная removed меняется под write lock-ом.
|
||||
Poco::RWLock rwlock;
|
||||
volatile bool removed = false;
|
||||
|
||||
std::list<std::shared_ptr<TaskInfo>>::iterator iterator;
|
||||
|
||||
TaskInfo(BackgroundProcessingPool & pool_, const Task & function_) : pool(pool_), function(function_), removed(false) {}
|
||||
TaskInfo(BackgroundProcessingPool & pool_, const Task & function_) : pool(pool_), function(function_) {}
|
||||
};
|
||||
|
||||
typedef std::shared_ptr<TaskInfo> TaskHandle;
|
||||
|
||||
|
||||
BackgroundProcessingPool(int size_) : size(size_), sleep_seconds(10), shutdown(false) {}
|
||||
|
||||
void setNumberOfThreads(int size_)
|
||||
BackgroundProcessingPool(int size_) : size(size_)
|
||||
{
|
||||
if (size_ <= 0)
|
||||
throw Exception("Invalid number of threads: " + toString(size_), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||
|
||||
std::unique_lock<std::mutex> tlock(threads_mutex);
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
|
||||
if (size_ == size)
|
||||
return;
|
||||
|
||||
if (threads.empty())
|
||||
{
|
||||
size = size_;
|
||||
return;
|
||||
}
|
||||
|
||||
throw Exception("setNumberOfThreads is not implemented for non-empty pool", ErrorCodes::NOT_IMPLEMENTED);
|
||||
threads.resize(size);
|
||||
for (auto & thread : threads)
|
||||
thread = std::thread([this] { threadFunction(); });
|
||||
}
|
||||
|
||||
int getNumberOfThreads()
|
||||
|
||||
int getNumberOfThreads() const
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
return size;
|
||||
}
|
||||
|
||||
void setSleepTime(double seconds)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
sleep_seconds = seconds;
|
||||
}
|
||||
|
||||
int getCounter(const String & name)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
@ -122,8 +114,6 @@ public:
|
||||
|
||||
TaskHandle addTask(const Task & task)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(threads_mutex);
|
||||
|
||||
TaskHandle res(new TaskInfo(*this, task));
|
||||
|
||||
{
|
||||
@ -132,44 +122,22 @@ public:
|
||||
res->iterator = --tasks.end();
|
||||
}
|
||||
|
||||
if (threads.empty())
|
||||
{
|
||||
shutdown = false;
|
||||
counters.clear();
|
||||
threads.resize(size);
|
||||
for (std::thread & thread : threads)
|
||||
thread = std::thread(std::bind(&BackgroundProcessingPool::threadFunction, this));
|
||||
}
|
||||
wake_event.notify_all();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void removeTask(const TaskHandle & task)
|
||||
{
|
||||
std::unique_lock<std::mutex> tlock(threads_mutex);
|
||||
|
||||
/// Дождемся завершения всех выполнений этой задачи.
|
||||
/// Дождёмся завершения всех выполнений этой задачи.
|
||||
{
|
||||
Poco::ScopedWriteRWLock wlock(task->lock);
|
||||
Poco::ScopedWriteRWLock wlock(task->rwlock);
|
||||
task->removed = true;
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
auto it = std::find(tasks.begin(), tasks.end(), task);
|
||||
if (it == tasks.end())
|
||||
throw Exception("Task not found", ErrorCodes::LOGICAL_ERROR);
|
||||
tasks.erase(it);
|
||||
}
|
||||
|
||||
if (tasks.empty())
|
||||
{
|
||||
shutdown = true;
|
||||
wake_event.notify_all();
|
||||
for (std::thread & thread : threads)
|
||||
thread.join();
|
||||
threads.clear();
|
||||
counters.clear();
|
||||
tasks.erase(task->iterator);
|
||||
}
|
||||
}
|
||||
|
||||
@ -177,15 +145,10 @@ public:
|
||||
{
|
||||
try
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(threads_mutex);
|
||||
if (!threads.empty())
|
||||
{
|
||||
LOG_ERROR(&Logger::get("~BackgroundProcessingPool"), "Destroying non-empty BackgroundProcessingPool");
|
||||
shutdown = true;
|
||||
wake_event.notify_all();
|
||||
for (std::thread & thread : threads)
|
||||
thread.join();
|
||||
}
|
||||
shutdown = true;
|
||||
wake_event.notify_all();
|
||||
for (std::thread & thread : threads)
|
||||
thread.join();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -197,24 +160,25 @@ private:
|
||||
typedef std::list<TaskHandle> Tasks;
|
||||
typedef std::vector<std::thread> Threads;
|
||||
|
||||
std::mutex threads_mutex;
|
||||
std::mutex mutex;
|
||||
int size;
|
||||
Tasks tasks; /// Таски в порядке, в котором мы планируем их выполнять.
|
||||
Threads threads;
|
||||
Counters counters;
|
||||
double sleep_seconds;
|
||||
const size_t size;
|
||||
static constexpr double sleep_seconds = 10;
|
||||
|
||||
volatile bool shutdown;
|
||||
Tasks tasks; /// Задачи в порядке, в котором мы планируем их выполнять.
|
||||
Counters counters;
|
||||
std::mutex mutex; /// Для работы со списком tasks, а также с counters (когда threads не пустой).
|
||||
|
||||
Threads threads;
|
||||
|
||||
volatile bool shutdown = false;
|
||||
std::condition_variable wake_event;
|
||||
|
||||
|
||||
void threadFunction()
|
||||
{
|
||||
while (!shutdown)
|
||||
{
|
||||
Counters counters_diff;
|
||||
bool need_sleep = false;
|
||||
size_t tasks_count = 1;
|
||||
|
||||
try
|
||||
{
|
||||
@ -236,11 +200,12 @@ private:
|
||||
|
||||
if (!task)
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::duration<double>(sleep_seconds));
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
wake_event.wait_for(lock, std::chrono::duration<double>(sleep_seconds));
|
||||
continue;
|
||||
}
|
||||
|
||||
Poco::ScopedReadRWLock rlock(task->lock);
|
||||
Poco::ScopedReadRWLock rlock(task->rwlock);
|
||||
if (task->removed)
|
||||
continue;
|
||||
|
||||
@ -248,15 +213,11 @@ private:
|
||||
|
||||
if (task->function(context))
|
||||
{
|
||||
/// Если у таска получилось выполнить какую-то работу, запустим его снова без паузы.
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
/// Если у задачи получилось выполнить какую-то работу, запустим её снова без паузы.
|
||||
need_sleep = false;
|
||||
|
||||
auto it = std::find(tasks.begin(), tasks.end(), task);
|
||||
if (it != tasks.end())
|
||||
{
|
||||
need_sleep = false;
|
||||
tasks.splice(tasks.begin(), tasks, it);
|
||||
}
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
tasks.splice(tasks.begin(), tasks, task->iterator);
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
@ -265,14 +226,12 @@ private:
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
|
||||
/// Вычтем все счетчики обратно.
|
||||
/// Вычтем все счётчики обратно.
|
||||
if (!counters_diff.empty())
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
for (const auto & it : counters_diff)
|
||||
{
|
||||
counters[it.first] -= it.second;
|
||||
}
|
||||
}
|
||||
|
||||
if (shutdown)
|
||||
@ -281,7 +240,7 @@ private:
|
||||
if (need_sleep)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
wake_event.wait_for(lock, std::chrono::duration<double>(sleep_seconds / tasks_count));
|
||||
wake_event.wait_for(lock, std::chrono::duration<double>(sleep_seconds));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ struct MergeTreeSettings
|
||||
size_t max_rows_to_use_cache = 1024 * 1024;
|
||||
|
||||
/// Через сколько секунд удалять ненужные куски.
|
||||
time_t old_parts_lifetime = 5 * 60;
|
||||
time_t old_parts_lifetime = 8 * 60;
|
||||
|
||||
/// Если в таблице хотя бы столько активных кусков, искусственно замедлять вставки в таблицу.
|
||||
size_t parts_to_delay_insert = 150;
|
||||
@ -110,11 +110,17 @@ struct MergeTreeSettings
|
||||
double insert_delay_step = 1.1;
|
||||
|
||||
/// Для скольки последних блоков хранить хеши в ZooKeeper.
|
||||
size_t replicated_deduplication_window = 1000;
|
||||
size_t replicated_deduplication_window = 100;
|
||||
|
||||
/// Хранить примерно столько последних записей в логе в ZooKeeper, даже если они никому уже не нужны.
|
||||
/// Не влияет на работу таблиц; используется только чтобы успеть посмотреть на лог в ZooKeeper глазами прежде, чем его очистят.
|
||||
size_t replicated_logs_to_keep = 100;
|
||||
|
||||
/// Максимальное количество ошибок при загрузке кусков, при котором ReplicatedMergeTree соглашается запускаться.
|
||||
size_t replicated_max_unexpected_parts = 3;
|
||||
size_t replicated_max_unexpectedly_merged_parts = 2;
|
||||
size_t replicated_max_missing_obsolete_parts = 5;
|
||||
size_t replicated_max_missing_active_parts = 20;
|
||||
};
|
||||
|
||||
class MergeTreeData : public ITableDeclaration
|
||||
@ -307,17 +313,22 @@ public:
|
||||
Poco::File(to).remove(true);
|
||||
}
|
||||
|
||||
/// Переименовывает кусок, дописав к имени префикс.
|
||||
void renameAddPrefix(const String & prefix) const
|
||||
void renameTo(const String & new_name) const
|
||||
{
|
||||
String from = storage.full_path + name + "/";
|
||||
String to = storage.full_path + prefix + name + "/";
|
||||
String to = storage.full_path + new_name + "/";
|
||||
|
||||
Poco::File f(from);
|
||||
f.setLastModified(Poco::Timestamp::fromEpochTime(time(0)));
|
||||
f.renameTo(to);
|
||||
}
|
||||
|
||||
/// Переименовывает кусок, дописав к имени префикс.
|
||||
void renameAddPrefix(const String & prefix) const
|
||||
{
|
||||
renameTo(prefix + name);
|
||||
}
|
||||
|
||||
/// Загрузить индекс и вычислить размер. Если size=0, вычислить его тоже.
|
||||
void loadIndex()
|
||||
{
|
||||
@ -344,12 +355,12 @@ public:
|
||||
}
|
||||
|
||||
/// Прочитать контрольные суммы, если есть.
|
||||
void loadChecksums()
|
||||
void loadChecksums(bool require)
|
||||
{
|
||||
String path = storage.full_path + name + "/checksums.txt";
|
||||
if (!Poco::File(path).exists())
|
||||
{
|
||||
if (storage.require_part_metadata)
|
||||
if (require)
|
||||
throw Exception("No checksums.txt in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART);
|
||||
|
||||
return;
|
||||
@ -359,16 +370,21 @@ public:
|
||||
assertEOF(file);
|
||||
}
|
||||
|
||||
void loadColumns()
|
||||
void loadColumns(bool require)
|
||||
{
|
||||
String path = storage.full_path + name + "/columns.txt";
|
||||
if (!Poco::File(path).exists())
|
||||
{
|
||||
if (storage.require_part_metadata)
|
||||
if (require)
|
||||
throw Exception("No columns.txt in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART);
|
||||
columns = *storage.columns;
|
||||
|
||||
/// Если нет файла со списком столбцов, запишем его.
|
||||
for (const NameAndTypePair & column : *storage.columns)
|
||||
{
|
||||
if (Poco::File(storage.full_path + name + "/" + escapeForFileName(column.name) + ".bin").exists())
|
||||
columns.push_back(column);
|
||||
}
|
||||
|
||||
{
|
||||
WriteBufferFromFile out(path + ".tmp", 4096);
|
||||
columns.writeText(out);
|
||||
@ -382,7 +398,7 @@ public:
|
||||
columns.readText(file, storage.context.getDataTypeFactory());
|
||||
}
|
||||
|
||||
void checkNotBroken()
|
||||
void checkNotBroken(bool require_part_metadata)
|
||||
{
|
||||
String path = storage.full_path + name;
|
||||
|
||||
@ -391,7 +407,7 @@ public:
|
||||
if (!checksums.files.count("primary.idx"))
|
||||
throw Exception("No checksum for primary.idx", ErrorCodes::NO_FILE_IN_DATA_PART);
|
||||
|
||||
if (storage.require_part_metadata)
|
||||
if (require_part_metadata)
|
||||
{
|
||||
for (const NameAndTypePair & it : columns)
|
||||
{
|
||||
@ -560,6 +576,9 @@ public:
|
||||
bool require_part_metadata_,
|
||||
BrokenPartCallback broken_part_callback_ = &MergeTreeData::doNothing);
|
||||
|
||||
/// Загрузить множество кусков с данными с диска. Вызывается один раз - сразу после создания объекта.
|
||||
void loadDataParts(bool skip_sanity_checks);
|
||||
|
||||
std::string getModePrefix() const;
|
||||
|
||||
bool supportsSampling() const { return !!sampling_expression; }
|
||||
@ -625,15 +644,23 @@ public:
|
||||
*/
|
||||
DataPartsVector renameTempPartAndReplace(MutableDataPartPtr part, Increment * increment = nullptr, Transaction * out_transaction = nullptr);
|
||||
|
||||
/** Убирает из рабочего набора куски remove и добавляет куски add.
|
||||
/** Убирает из рабочего набора куски remove и добавляет куски add. add должны уже быть в all_data_parts.
|
||||
* Если clear_without_timeout, данные будут удалены при следующем clearOldParts, игнорируя old_parts_lifetime.
|
||||
*/
|
||||
void replaceParts(const DataPartsVector & remove, const DataPartsVector & add, bool clear_without_timeout);
|
||||
|
||||
/** Переименовывает кусок в prefix_кусок и убирает его из рабочего набора.
|
||||
/** Добавляет новый кусок в список известных кусков и в рабочий набор.
|
||||
*/
|
||||
void attachPart(DataPartPtr part);
|
||||
|
||||
/** Переименовывает кусок в detached/prefix_кусок и забывает про него. Данные не будут удалены в clearOldParts.
|
||||
* Если restore_covered, добавляет в рабочий набор неактивные куски, слиянием которых получен удаляемый кусок.
|
||||
*/
|
||||
void renameAndDetachPart(DataPartPtr part, const String & prefix, bool restore_covered = false);
|
||||
void renameAndDetachPart(DataPartPtr part, const String & prefix = "", bool restore_covered = false, bool move_to_detached = true);
|
||||
|
||||
/** Убирает кусок из списка кусков (включая all_data_parts), но не перемещщает директорию.
|
||||
*/
|
||||
void detachPartInPlace(DataPartPtr part);
|
||||
|
||||
/** Возвращает старые неактуальные куски, которые можно удалить. Одновременно удаляет их из списка кусков, но не с диска.
|
||||
*/
|
||||
@ -685,6 +712,9 @@ public:
|
||||
ExpressionActionsPtr getPrimaryExpression() const { return primary_expr; }
|
||||
SortDescription getSortDescription() const { return sort_descr; }
|
||||
|
||||
/// Проверить, что кусок не сломан и посчитать для него чексуммы, если их нет.
|
||||
MutableDataPartPtr loadPartAndFixMetadata(const String & relative_path);
|
||||
|
||||
const Context & context;
|
||||
const String date_column_name;
|
||||
const ASTPtr sampling_expression;
|
||||
@ -726,9 +756,6 @@ private:
|
||||
DataParts all_data_parts;
|
||||
Poco::FastMutex all_data_parts_mutex;
|
||||
|
||||
/// Загрузить множество кусков с данными с диска. Вызывается один раз - при создании объекта.
|
||||
void loadDataParts();
|
||||
|
||||
/** Выражение, преобразующее типы столбцов.
|
||||
* Если преобразований типов нет, out_expression=nullptr.
|
||||
* out_rename_map отображает файлы-столбцы на выходе выражения в новые файлы таблицы.
|
||||
|
@ -9,16 +9,27 @@ namespace DB
|
||||
class MergeTreePartChecker
|
||||
{
|
||||
public:
|
||||
struct Settings
|
||||
{
|
||||
bool verbose = false; /// Пишет в stderr прогресс и ошибки, и не останавливается при первой ошибке.
|
||||
bool require_checksums = false; /// Требует, чтобы был columns.txt.
|
||||
bool require_column_files = false; /// Требует, чтобы для всех столбцов из columns.txt были файлы.
|
||||
size_t index_granularity = 8192;
|
||||
|
||||
Settings & setVerbose(bool verbose_) { verbose = verbose_; return *this; }
|
||||
Settings & setRequireChecksums(bool require_checksums_) { require_checksums = require_checksums_; return *this; }
|
||||
Settings & setRequireColumnFiles(bool require_column_files_) { require_column_files = require_column_files_; return *this; }
|
||||
Settings & setIndexGranularity(size_t index_granularity_) { index_granularity = index_granularity_; return *this; }
|
||||
};
|
||||
|
||||
/** Полностью проверяет данные кусочка:
|
||||
* - Вычисляет контрольные суммы и сравнивает с checksums.txt.
|
||||
* - Для массивов и строк проверяет соответствие размеров и количества данных.
|
||||
* - Проверяет правильность засечек.
|
||||
* Бросает исключение, если кусок испорчен или если проверить не получилось (TODO: можно попробовать разделить эти случаи).
|
||||
* Если strict, требует, чтобы для всех столбцов из columns.txt были файлы.
|
||||
* Если verbose, пишет в stderr прогресс и ошибки, и не останавливается при первой ошибке.
|
||||
*/
|
||||
static void checkDataPart(String path, size_t index_granularity, bool strict, const DataTypeFactory & data_type_factory,
|
||||
bool verbose = false);
|
||||
static void checkDataPart(String path, const Settings & settings, const DataTypeFactory & data_type_factory,
|
||||
MergeTreeData::DataPart::Checksums * out_checksums = nullptr);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -28,30 +28,12 @@ public:
|
||||
time_t min_date_time = DateLUT::instance().fromDayNum(DayNum_t(current_block.min_date));
|
||||
String month_name = toString(Date2OrderedIdentifier(min_date_time) / 100);
|
||||
|
||||
String month_path = storage.zookeeper_path + "/block_numbers/" + month_name;
|
||||
if (!storage.zookeeper->exists(month_path))
|
||||
{
|
||||
/// Создадим в block_numbers ноду для месяца и пропустим в ней 200 значений инкремента.
|
||||
/// Нужно, чтобы в будущем при необходимости можно было добавить данные в начало.
|
||||
zkutil::Ops ops;
|
||||
auto acl = storage.zookeeper->getDefaultACL();
|
||||
ops.push_back(new zkutil::Op::Create(month_path, "", acl, zkutil::CreateMode::Persistent));
|
||||
for (size_t i = 0; i < 200; ++i)
|
||||
{
|
||||
ops.push_back(new zkutil::Op::Create(month_path + "/skip_increment", "", acl, zkutil::CreateMode::Persistent));
|
||||
ops.push_back(new zkutil::Op::Remove(month_path + "/skip_increment", -1));
|
||||
}
|
||||
/// Игнорируем ошибки - не получиться могло только если кто-то еще выполнил эту строчку раньше нас.
|
||||
storage.zookeeper->tryMulti(ops);
|
||||
}
|
||||
|
||||
AbandonableLockInZooKeeper block_number_lock(
|
||||
storage.zookeeper_path + "/block_numbers/" + month_name + "/block-",
|
||||
storage.zookeeper_path + "/temp", *storage.zookeeper);
|
||||
AbandonableLockInZooKeeper block_number_lock = storage.allocateBlockNumber(month_name);
|
||||
|
||||
UInt64 part_number = block_number_lock.getNumber();
|
||||
|
||||
MergeTreeData::MutableDataPartPtr part = storage.writer.writeTempPart(current_block, part_number);
|
||||
String part_name = ActiveDataPartSet::getPartName(part->left_date, part->right_date, part->left, part->right, part->level);
|
||||
|
||||
/// Если в запросе не указан ID, возьмем в качестве ID хеш от данных. То есть, не вставляем одинаковые данные дважды.
|
||||
/// NOTE: Если такая дедупликация не нужна, можно вместо этого оставлять block_id пустым.
|
||||
@ -61,13 +43,10 @@ public:
|
||||
|
||||
LOG_DEBUG(log, "Wrote block " << part_number << " with ID " << block_id << ", " << current_block.block.rows() << " rows");
|
||||
|
||||
MergeTreeData::Transaction transaction; /// Если не получится добавить кусок в ZK, снова уберем его из рабочего набора.
|
||||
storage.data.renameTempPartAndAdd(part, nullptr, &transaction);
|
||||
|
||||
StorageReplicatedMergeTree::LogEntry log_entry;
|
||||
log_entry.type = StorageReplicatedMergeTree::LogEntry::GET_PART;
|
||||
log_entry.source_replica = storage.replica_name;
|
||||
log_entry.new_part_name = part->name;
|
||||
log_entry.new_part_name = part_name;
|
||||
|
||||
/// Одновременно добавим информацию о куске во все нужные места в ZooKeeper и снимем block_number_lock.
|
||||
zkutil::Ops ops;
|
||||
@ -94,7 +73,7 @@ public:
|
||||
storage.zookeeper->getDefaultACL(),
|
||||
zkutil::CreateMode::Persistent));
|
||||
}
|
||||
storage.checkPartAndAddToZooKeeper(part, ops);
|
||||
storage.checkPartAndAddToZooKeeper(part, ops, part_name);
|
||||
ops.push_back(new zkutil::Op::Create(
|
||||
storage.zookeeper_path + "/log/log-",
|
||||
log_entry.toString(),
|
||||
@ -102,6 +81,9 @@ public:
|
||||
zkutil::CreateMode::PersistentSequential));
|
||||
block_number_lock.getUnlockOps(ops);
|
||||
|
||||
MergeTreeData::Transaction transaction; /// Если не получится добавить кусок в ZK, снова уберем его из рабочего набора.
|
||||
storage.data.renameTempPartAndAdd(part, nullptr, &transaction);
|
||||
|
||||
try
|
||||
{
|
||||
auto code = storage.zookeeper->tryMulti(ops);
|
||||
|
@ -18,6 +18,9 @@ namespace DB
|
||||
*/
|
||||
class StorageDistributed : public IStorage
|
||||
{
|
||||
friend class DistributedBlockOutputStream;
|
||||
friend class DirectoryMonitor;
|
||||
|
||||
public:
|
||||
static StoragePtr create(
|
||||
const std::string & name_, /// Имя таблицы.
|
||||
@ -25,7 +28,9 @@ public:
|
||||
const String & remote_database_, /// БД на удалённых серверах.
|
||||
const String & remote_table_, /// Имя таблицы на удалённых серверах.
|
||||
const String & cluster_name,
|
||||
Context & context_);
|
||||
Context & context_,
|
||||
const ASTPtr & sharding_key_,
|
||||
const String & data_path_);
|
||||
|
||||
static StoragePtr create(
|
||||
const std::string & name_, /// Имя таблицы.
|
||||
@ -57,12 +62,21 @@ public:
|
||||
size_t max_block_size = DEFAULT_BLOCK_SIZE,
|
||||
unsigned threads = 1);
|
||||
|
||||
BlockOutputStreamPtr write(ASTPtr query) override;
|
||||
|
||||
void drop() override {}
|
||||
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) { name = new_table_name; }
|
||||
/// в подтаблицах добавлять и удалять столбы нужно вручную
|
||||
/// структура подтаблиц не проверяется
|
||||
void alter(const AlterCommands & params, const String & database_name, const String & table_name, Context & context);
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
const ExpressionActionsPtr & getShardingKeyExpr() const { return sharding_key_expr; }
|
||||
const String & getShardingKeyColumnName() const { return sharding_key_column_name; }
|
||||
const String & getPath() const { return path; }
|
||||
|
||||
|
||||
private:
|
||||
StorageDistributed(
|
||||
const std::string & name_,
|
||||
@ -70,17 +84,24 @@ private:
|
||||
const String & remote_database_,
|
||||
const String & remote_table_,
|
||||
Cluster & cluster_,
|
||||
const Context & context_);
|
||||
Context & context_,
|
||||
const ASTPtr & sharding_key_ = nullptr,
|
||||
const String & data_path_ = String{});
|
||||
|
||||
/// Создает копию запроса, меняет имена базы данных и таблицы.
|
||||
ASTPtr rewriteQuery(ASTPtr query);
|
||||
|
||||
/// create directory monitor thread by subdirectory name
|
||||
void createDirectoryMonitor(const std::string & name);
|
||||
/// create directory monitors for each existing subdirectory
|
||||
void createDirectoryMonitors();
|
||||
/// ensure directory monitor creation
|
||||
void requireDirectoryMonitor(const std::string & name);
|
||||
|
||||
String name;
|
||||
NamesAndTypesListPtr columns;
|
||||
String remote_database;
|
||||
String remote_table;
|
||||
|
||||
const Context & context;
|
||||
Context & context;
|
||||
|
||||
/// Временные таблицы, которые необходимо отправить на сервер. Переменная очищается после каждого вызова метода read
|
||||
/// Для подготовки к отправке нужно использовтаь метод storeExternalTables
|
||||
@ -91,6 +112,14 @@ private:
|
||||
|
||||
/// Соединения с удалёнными серверами.
|
||||
Cluster & cluster;
|
||||
|
||||
ExpressionActionsPtr sharding_key_expr;
|
||||
String sharding_key_column_name;
|
||||
bool write_enabled;
|
||||
String path;
|
||||
|
||||
class DirectoryMonitor;
|
||||
std::unordered_map<std::string, std::unique_ptr<DirectoryMonitor>> directory_monitors;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <DB/Storages/MergeTree/MergeTreeDataWriter.h>
|
||||
#include <DB/Storages/MergeTree/MergeTreeDataSelectExecutor.h>
|
||||
#include <DB/Storages/MergeTree/ReplicatedMergeTreePartsExchange.h>
|
||||
#include "MergeTree/AbandonableLockInZooKeeper.h"
|
||||
#include <DB/DataTypes/DataTypesNumberFixed.h>
|
||||
#include <zkutil/ZooKeeper.h>
|
||||
#include <zkutil/LeaderElection.h>
|
||||
@ -77,6 +78,9 @@ public:
|
||||
|
||||
void alter(const AlterCommands & params, const String & database_name, const String & table_name, Context & context) override;
|
||||
|
||||
void dropPartition(const Field & partition, bool detach) override;
|
||||
void attachPartition(const Field & partition, bool unreplicated, bool part) override;
|
||||
|
||||
/** Удаляет реплику из ZooKeeper. Если других реплик нет, удаляет всю таблицу из ZooKeeper.
|
||||
*/
|
||||
void drop() override;
|
||||
@ -111,7 +115,7 @@ private:
|
||||
{
|
||||
try
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(storage.queue_mutex);
|
||||
std::unique_lock<std::mutex> lock(storage.queue_mutex);
|
||||
if (!storage.future_parts.erase(part))
|
||||
throw Exception("Untagging already untagged future part " + part + ". This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
@ -126,31 +130,48 @@ private:
|
||||
|
||||
struct LogEntry
|
||||
{
|
||||
typedef Poco::SharedPtr<LogEntry> Ptr;
|
||||
|
||||
enum Type
|
||||
{
|
||||
GET_PART,
|
||||
MERGE_PARTS,
|
||||
GET_PART, /// Получить кусок с другой реплики.
|
||||
MERGE_PARTS, /// Слить куски.
|
||||
DROP_RANGE, /// Удалить куски в указанном месяце в указанном диапазоне номеров.
|
||||
ATTACH_PART, /// Перенести кусок из директории detached или unreplicated.
|
||||
};
|
||||
|
||||
String znode_name;
|
||||
|
||||
Type type;
|
||||
String source_replica; /// Пустая строка значит, что эта запись была добавлена сразу в очередь, а не скопирована из лога.
|
||||
|
||||
/// Имя куска, получающегося в результате.
|
||||
/// Для DROP_RANGE имя несуществующего куска. Нужно удалить все куски, покрытые им.
|
||||
String new_part_name;
|
||||
|
||||
Strings parts_to_merge;
|
||||
|
||||
/// Для DROP_RANGE, true значит, что куски нужно не удалить, а перенести в директорию detached.
|
||||
bool detach = false;
|
||||
|
||||
/// Для ATTACH_PART имя куска в директории detached или unreplicated.
|
||||
String source_part_name;
|
||||
/// Нужно переносить из директории unreplicated, а не detached.
|
||||
bool attach_unreplicated;
|
||||
|
||||
FuturePartTaggerPtr future_part_tagger;
|
||||
bool currently_executing = false;
|
||||
bool currently_executing = false; /// Доступ под queue_mutex.
|
||||
std::condition_variable execution_complete; /// Пробуждается когда currently_executing становится false.
|
||||
|
||||
void addResultToVirtualParts(StorageReplicatedMergeTree & storage)
|
||||
{
|
||||
if (type == MERGE_PARTS || type == GET_PART)
|
||||
if (type == MERGE_PARTS || type == GET_PART || type == DROP_RANGE || type == ATTACH_PART)
|
||||
storage.virtual_parts.add(new_part_name);
|
||||
}
|
||||
|
||||
void tagPartAsFuture(StorageReplicatedMergeTree & storage)
|
||||
{
|
||||
if (type == MERGE_PARTS || type == GET_PART)
|
||||
if (type == MERGE_PARTS || type == GET_PART || type == ATTACH_PART)
|
||||
future_part_tagger = new FuturePartTagger(new_part_name, storage);
|
||||
}
|
||||
|
||||
@ -167,17 +188,19 @@ private:
|
||||
return s;
|
||||
}
|
||||
|
||||
static LogEntry parse(const String & s)
|
||||
static Ptr parse(const String & s)
|
||||
{
|
||||
ReadBufferFromString in(s);
|
||||
LogEntry res;
|
||||
res.readText(in);
|
||||
Ptr res = new LogEntry;
|
||||
res->readText(in);
|
||||
assertEOF(in);
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::list<LogEntry> LogEntries;
|
||||
typedef LogEntry::Ptr LogEntryPtr;
|
||||
|
||||
typedef std::list<LogEntryPtr> LogEntries;
|
||||
|
||||
typedef std::set<String> StringSet;
|
||||
typedef std::list<String> StringList;
|
||||
@ -195,7 +218,7 @@ private:
|
||||
* В ZK записи в хронологическом порядке. Здесь - не обязательно.
|
||||
*/
|
||||
LogEntries queue;
|
||||
Poco::FastMutex queue_mutex;
|
||||
std::mutex queue_mutex;
|
||||
|
||||
/** Куски, которые появятся в результате действий, выполняемых прямо сейчас фоновыми потоками (этих действий нет в очереди).
|
||||
* Использовать под залоченным queue_mutex.
|
||||
@ -263,12 +286,14 @@ private:
|
||||
/// Поток, выбирающий куски для слияния.
|
||||
std::thread merge_selecting_thread;
|
||||
Poco::Event merge_selecting_event;
|
||||
std::mutex merge_selecting_mutex; /// Берется на каждую итерацию выбора кусков для слияния.
|
||||
|
||||
/// Поток, удаляющий старые куски, записи в логе и блоки.
|
||||
std::thread cleanup_thread;
|
||||
|
||||
/// Поток, обрабатывающий переподключение к ZooKeeper при истечении сессии (очень маловероятное событие).
|
||||
std::thread restarting_thread;
|
||||
Poco::Event restarting_event;
|
||||
|
||||
/// Поток, следящий за изменениями списка столбцов в ZooKeeper и обновляющий куски в соответствии с этими изменениями.
|
||||
std::thread alter_thread;
|
||||
@ -287,7 +312,6 @@ private:
|
||||
Poco::Event shutdown_event;
|
||||
/// Нужно ли завершить restarting_thread.
|
||||
volatile bool permanent_shutdown_called = false;
|
||||
Poco::Event permanent_shutdown_event;
|
||||
|
||||
StorageReplicatedMergeTree(
|
||||
const String & zookeeper_path_,
|
||||
@ -308,7 +332,7 @@ private:
|
||||
|
||||
/** Создает минимальный набор нод в ZooKeeper.
|
||||
*/
|
||||
void createTable();
|
||||
void createTableIfNotExists();
|
||||
|
||||
/** Создает реплику в ZooKeeper и добавляет в очередь все, что нужно, чтобы догнать остальные реплики.
|
||||
*/
|
||||
@ -321,7 +345,7 @@ private:
|
||||
/** Проверить, что список столбцов и настройки таблицы совпадают с указанными в ZK (/metadata).
|
||||
* Если нет - бросить исключение.
|
||||
*/
|
||||
void checkTableStructure(bool skip_sanity_checks);
|
||||
void checkTableStructure(bool skip_sanity_checks, bool allow_alter);
|
||||
|
||||
/** Проверить, что множество кусков соответствует тому, что в ZK (/replicas/me/parts/).
|
||||
* Если каких-то кусков, описанных в ZK нет локально, бросить исключение.
|
||||
@ -334,11 +358,11 @@ private:
|
||||
void initVirtualParts();
|
||||
|
||||
/// Запустить или остановить фоновые потоки. Используется для частичной переинициализации при пересоздании сессии в ZooKeeper.
|
||||
void startup();
|
||||
bool tryStartup(); /// Возвращает false, если недоступен ZooKeeper.
|
||||
void partialShutdown();
|
||||
|
||||
/// Запретить запись в таблицу и завершить все фоновые потоки.
|
||||
void goReadOnly();
|
||||
void goReadOnlyPermanently();
|
||||
|
||||
|
||||
/** Проверить, что чексумма куска совпадает с чексуммой того же куска на какой-нибудь другой реплике.
|
||||
@ -347,7 +371,7 @@ private:
|
||||
* Кладет в ops действия, добавляющие данные о куске в ZooKeeper.
|
||||
* Вызывать под TableStructureLock.
|
||||
*/
|
||||
void checkPartAndAddToZooKeeper(MergeTreeData::DataPartPtr part, zkutil::Ops & ops);
|
||||
void checkPartAndAddToZooKeeper(MergeTreeData::DataPartPtr part, zkutil::Ops & ops, String name_override = "");
|
||||
|
||||
/// Убирает кусок из ZooKeeper и добавляет в очередь задание скачать его. Предполагается это делать с битыми кусками.
|
||||
void removePartAndEnqueueFetch(const String & part_name);
|
||||
@ -381,6 +405,9 @@ private:
|
||||
*/
|
||||
bool executeLogEntry(const LogEntry & entry, BackgroundProcessingPool::Context & pool_context);
|
||||
|
||||
void executeDropRange(const LogEntry & entry);
|
||||
bool executeAttachPart(const LogEntry & entry); /// Возвращает false, если куска нет, и его нужно забрать с другой реплики.
|
||||
|
||||
/** Обновляет очередь.
|
||||
*/
|
||||
void queueUpdatingThread();
|
||||
@ -425,6 +452,15 @@ private:
|
||||
/** Скачать указанный кусок с указанной реплики.
|
||||
*/
|
||||
void fetchPart(const String & part_name, const String & replica_name);
|
||||
|
||||
///
|
||||
|
||||
AbandonableLockInZooKeeper allocateBlockNumber(const String & month_name);
|
||||
|
||||
/** Дождаться, пока все реплики, включая эту, выполнят указанное действие из лога.
|
||||
* Если одновременно с этим добавляются реплики, может не дождаться добавленную реплику.
|
||||
*/
|
||||
void waitForAllReplicasToProcessLogEntry(const LogEntry & entry);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,9 +1,7 @@
|
||||
#include <DB/AggregateFunctions/AggregateFunctionCount.h>
|
||||
#include <DB/AggregateFunctions/AggregateFunctionSum.h>
|
||||
#include <DB/AggregateFunctions/AggregateFunctionAvg.h>
|
||||
#include <DB/AggregateFunctions/AggregateFunctionAny.h>
|
||||
#include <DB/AggregateFunctions/AggregateFunctionAnyLast.h>
|
||||
#include <DB/AggregateFunctions/AggregateFunctionsMinMax.h>
|
||||
#include <DB/AggregateFunctions/AggregateFunctionsMinMaxAny.h>
|
||||
#include <DB/AggregateFunctions/AggregateFunctionsArgMinMax.h>
|
||||
#include <DB/AggregateFunctions/AggregateFunctionUniq.h>
|
||||
#include <DB/AggregateFunctions/AggregateFunctionUniqUpTo.h>
|
||||
@ -69,6 +67,7 @@ static IAggregateFunction * createWithNumericType(const IDataType & argument_typ
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
template<template <typename, typename> class AggregateFunctionTemplate, template <typename> class Data>
|
||||
static IAggregateFunction * createWithNumericType(const IDataType & argument_type)
|
||||
{
|
||||
@ -87,18 +86,48 @@ static IAggregateFunction * createWithNumericType(const IDataType & argument_typ
|
||||
}
|
||||
|
||||
|
||||
/// min, max, any, anyLast
|
||||
template<template <typename> class AggregateFunctionTemplate, template <typename> class Data>
|
||||
static IAggregateFunction * createAggregateFunctionSingleValue(const String & name, const DataTypes & argument_types)
|
||||
{
|
||||
if (argument_types.size() != 1)
|
||||
throw Exception("Incorrect number of arguments for aggregate function " + name, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
const IDataType & argument_type = *argument_types[0];
|
||||
|
||||
if (typeid_cast<const DataTypeUInt8 *>(&argument_type)) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<UInt8>>>;
|
||||
else if (typeid_cast<const DataTypeUInt16 *>(&argument_type)) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<UInt16>>>;
|
||||
else if (typeid_cast<const DataTypeUInt32 *>(&argument_type)) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<UInt32>>>;
|
||||
else if (typeid_cast<const DataTypeUInt64 *>(&argument_type)) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<UInt64>>>;
|
||||
else if (typeid_cast<const DataTypeInt8 *>(&argument_type)) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Int8>>>;
|
||||
else if (typeid_cast<const DataTypeInt16 *>(&argument_type)) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Int16>>>;
|
||||
else if (typeid_cast<const DataTypeInt32 *>(&argument_type)) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Int32>>>;
|
||||
else if (typeid_cast<const DataTypeInt64 *>(&argument_type)) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Int64>>>;
|
||||
else if (typeid_cast<const DataTypeFloat32 *>(&argument_type)) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Float32>>>;
|
||||
else if (typeid_cast<const DataTypeFloat64 *>(&argument_type)) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Float64>>>;
|
||||
else if (typeid_cast<const DataTypeDate *>(&argument_type))
|
||||
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<DataTypeDate::FieldType>>>;
|
||||
else if (typeid_cast<const DataTypeDateTime*>(&argument_type))
|
||||
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<DataTypeDateTime::FieldType>>>;
|
||||
else if (typeid_cast<const DataTypeString*>(&argument_type))
|
||||
return new AggregateFunctionTemplate<Data<SingleValueDataString>>;
|
||||
else
|
||||
return new AggregateFunctionTemplate<Data<SingleValueDataGeneric>>;
|
||||
}
|
||||
|
||||
|
||||
AggregateFunctionPtr AggregateFunctionFactory::get(const String & name, const DataTypes & argument_types, int recursion_level) const
|
||||
{
|
||||
if (name == "count")
|
||||
return new AggregateFunctionCount;
|
||||
else if (name == "any")
|
||||
return new AggregateFunctionAny;
|
||||
return createAggregateFunctionSingleValue<AggregateFunctionsSingleValue, AggregateFunctionAnyData>(name, argument_types);
|
||||
else if (name == "anyLast")
|
||||
return new AggregateFunctionAnyLast;
|
||||
return createAggregateFunctionSingleValue<AggregateFunctionsSingleValue, AggregateFunctionAnyLastData>(name, argument_types);
|
||||
else if (name == "min")
|
||||
return new AggregateFunctionMin;
|
||||
return createAggregateFunctionSingleValue<AggregateFunctionsSingleValue, AggregateFunctionMinData>(name, argument_types);
|
||||
else if (name == "max")
|
||||
return new AggregateFunctionMax;
|
||||
return createAggregateFunctionSingleValue<AggregateFunctionsSingleValue, AggregateFunctionMaxData>(name, argument_types);
|
||||
else if (name == "argMin")
|
||||
return new AggregateFunctionArgMin;
|
||||
else if (name == "argMax")
|
||||
|
@ -49,6 +49,16 @@
|
||||
|
||||
#include <DB/Common/ExternalTable.h>
|
||||
|
||||
|
||||
/// http://en.wikipedia.org/wiki/ANSI_escape_code
|
||||
#define SAVE_CURSOR_POSITION "\033[s"
|
||||
#define RESTORE_CURSOR_POSITION "\033[u"
|
||||
#define CLEAR_TO_END_OF_LINE "\033[K"
|
||||
/// Эти коды, возможно, поддерживаются не везде.
|
||||
#define DISABLE_LINE_WRAPPING "\033[?7l"
|
||||
#define ENABLE_LINE_WRAPPING "\033[?7h"
|
||||
|
||||
|
||||
/** Клиент командной строки СУБД ClickHouse.
|
||||
*/
|
||||
|
||||
@ -61,11 +71,7 @@ using Poco::SharedPtr;
|
||||
class Client : public Poco::Util::Application
|
||||
{
|
||||
public:
|
||||
Client() : is_interactive(true), stdin_is_not_tty(false),
|
||||
format_max_block_size(0), std_in(STDIN_FILENO), std_out(STDOUT_FILENO), processed_rows(0),
|
||||
rows_read_on_server(0), bytes_read_on_server(0), written_progress_chars(0), written_first_block(false)
|
||||
{
|
||||
}
|
||||
Client() {}
|
||||
|
||||
private:
|
||||
typedef std::unordered_set<String> StringSet;
|
||||
@ -77,24 +83,24 @@ private:
|
||||
"q", "й", "\\q", "\\Q", "\\й", "\\Й", ":q", "Жй"
|
||||
};
|
||||
|
||||
bool is_interactive; /// Использовать readline интерфейс или batch режим.
|
||||
bool stdin_is_not_tty; /// stdin - не терминал.
|
||||
bool is_interactive = true; /// Использовать readline интерфейс или batch режим.
|
||||
bool stdin_is_not_tty = false; /// stdin - не терминал.
|
||||
|
||||
SharedPtr<Connection> connection; /// Соединение с БД.
|
||||
String query; /// Текущий запрос.
|
||||
|
||||
String format; /// Формат вывода результата в консоль.
|
||||
size_t format_max_block_size; /// Максимальный размер блока при выводе в консоль.
|
||||
size_t format_max_block_size = 0; /// Максимальный размер блока при выводе в консоль.
|
||||
String insert_format; /// Формат данных для INSERT-а при чтении их из stdin в batch режиме
|
||||
size_t insert_format_max_block_size; /// Максимальный размер блока при чтении данных INSERT-а.
|
||||
size_t insert_format_max_block_size = 0; /// Максимальный размер блока при чтении данных INSERT-а.
|
||||
|
||||
Context context;
|
||||
|
||||
/// Чтение из stdin для batch режима
|
||||
ReadBufferFromFileDescriptor std_in;
|
||||
ReadBufferFromFileDescriptor std_in {STDIN_FILENO};
|
||||
|
||||
/// Вывод в консоль
|
||||
WriteBufferFromFileDescriptor std_out;
|
||||
WriteBufferFromFileDescriptor std_out {STDOUT_FILENO};
|
||||
BlockOutputStreamPtr block_std_out;
|
||||
|
||||
String home_path;
|
||||
@ -105,7 +111,7 @@ private:
|
||||
String history_file;
|
||||
|
||||
/// Строк прочитано или записано.
|
||||
size_t processed_rows;
|
||||
size_t processed_rows = 0;
|
||||
|
||||
/// Распарсенный запрос. Оттуда берутся некоторые настройки (формат).
|
||||
ASTPtr parsed_query;
|
||||
@ -115,10 +121,10 @@ private:
|
||||
|
||||
Stopwatch watch;
|
||||
|
||||
size_t rows_read_on_server;
|
||||
size_t bytes_read_on_server;
|
||||
size_t written_progress_chars;
|
||||
bool written_first_block;
|
||||
size_t rows_read_on_server = 0;
|
||||
size_t bytes_read_on_server = 0;
|
||||
size_t written_progress_chars = 0;
|
||||
bool written_first_block = false;
|
||||
|
||||
/// Информация о внешних таблицах
|
||||
std::list<ExternalTable> external_tables;
|
||||
@ -441,7 +447,7 @@ private:
|
||||
if (exit_strings.end() != exit_strings.find(line))
|
||||
return false;
|
||||
|
||||
block_std_out = nullptr;
|
||||
resetOutput();
|
||||
|
||||
watch.restart();
|
||||
|
||||
@ -642,6 +648,14 @@ private:
|
||||
}
|
||||
|
||||
|
||||
/** Сбросить все данные, что ещё остались в буферах. */
|
||||
void resetOutput()
|
||||
{
|
||||
block_std_out = nullptr;
|
||||
std_out.next();
|
||||
}
|
||||
|
||||
|
||||
/** Получает и обрабатывает пакеты из сервера.
|
||||
* Также следит, не требуется ли прервать выполнение запроса.
|
||||
*/
|
||||
@ -747,12 +761,7 @@ private:
|
||||
void onData(Block & block)
|
||||
{
|
||||
if (written_progress_chars)
|
||||
{
|
||||
for (size_t i = 0; i < written_progress_chars; ++i)
|
||||
std::cerr << "\b \b";
|
||||
|
||||
written_progress_chars = 0;
|
||||
}
|
||||
clearProgress();
|
||||
|
||||
if (!block)
|
||||
return;
|
||||
@ -780,7 +789,8 @@ private:
|
||||
written_first_block = true;
|
||||
}
|
||||
|
||||
std_out.next();
|
||||
/// Полученный блок данных сразу выводится клиенту.
|
||||
block_std_out->flush();
|
||||
}
|
||||
|
||||
|
||||
@ -804,8 +814,18 @@ private:
|
||||
}
|
||||
|
||||
|
||||
void clearProgress()
|
||||
{
|
||||
std::cerr << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE;
|
||||
written_progress_chars = 0;
|
||||
}
|
||||
|
||||
|
||||
void writeProgress()
|
||||
{
|
||||
if (!is_interactive)
|
||||
return;
|
||||
|
||||
static size_t increment = 0;
|
||||
static const char * indicators[8] =
|
||||
{
|
||||
@ -816,30 +836,30 @@ private:
|
||||
"\033[1;34m←\033[0m",
|
||||
"\033[1;35m↖\033[0m",
|
||||
"\033[1;36m↑\033[0m",
|
||||
"\033[1;37m↗\033[0m",
|
||||
"\033[1m↗\033[0m",
|
||||
};
|
||||
|
||||
if (is_interactive)
|
||||
{
|
||||
std::cerr << std::string(written_progress_chars, '\b');
|
||||
if (written_progress_chars)
|
||||
clearProgress();
|
||||
else
|
||||
std::cerr << SAVE_CURSOR_POSITION;
|
||||
|
||||
std::stringstream message;
|
||||
message << indicators[increment % 8]
|
||||
<< std::fixed << std::setprecision(3)
|
||||
<< " Progress: " << rows_read_on_server << " rows, " << bytes_read_on_server / 1000000.0 << " MB";
|
||||
std::stringstream message;
|
||||
message << indicators[increment % 8]
|
||||
<< std::fixed << std::setprecision(3)
|
||||
<< " Progress: " << rows_read_on_server << " rows, " << bytes_read_on_server / 1000000.0 << " MB";
|
||||
|
||||
size_t elapsed_ns = watch.elapsed();
|
||||
if (elapsed_ns)
|
||||
message << " ("
|
||||
<< rows_read_on_server * 1000000000.0 / elapsed_ns << " rows/s., "
|
||||
<< bytes_read_on_server * 1000.0 / elapsed_ns << " MB/s.) ";
|
||||
else
|
||||
message << ". ";
|
||||
size_t elapsed_ns = watch.elapsed();
|
||||
if (elapsed_ns)
|
||||
message << " ("
|
||||
<< rows_read_on_server * 1000000000.0 / elapsed_ns << " rows/s., "
|
||||
<< bytes_read_on_server * 1000.0 / elapsed_ns << " MB/s.) ";
|
||||
else
|
||||
message << ". ";
|
||||
|
||||
written_progress_chars = message.str().size() - 13;
|
||||
std::cerr << message.rdbuf();
|
||||
++increment;
|
||||
}
|
||||
written_progress_chars = message.str().size() - 13;
|
||||
std::cerr << DISABLE_LINE_WRAPPING << message.rdbuf() << ENABLE_LINE_WRAPPING;
|
||||
++increment;
|
||||
}
|
||||
|
||||
|
||||
@ -859,6 +879,8 @@ private:
|
||||
|
||||
void onException(const Exception & e)
|
||||
{
|
||||
resetOutput();
|
||||
|
||||
std::cerr << "Received exception from server:" << std::endl
|
||||
<< "Code: " << e.code() << ". " << e.displayText();
|
||||
}
|
||||
@ -876,7 +898,7 @@ private:
|
||||
if (block_std_out)
|
||||
block_std_out->writeSuffix();
|
||||
|
||||
std_out.next();
|
||||
resetOutput();
|
||||
|
||||
if (is_interactive && !written_first_block)
|
||||
std::cout << "Ok." << std::endl;
|
||||
|
@ -284,14 +284,17 @@ void Connection::sendData(const Block & block, const String & name)
|
||||
}
|
||||
|
||||
|
||||
void Connection::sendPreparedData(ReadBuffer & input, const String & name)
|
||||
void Connection::sendPreparedData(ReadBuffer & input, size_t size, const String & name)
|
||||
{
|
||||
writeVarUInt(Protocol::Client::Data, *out);
|
||||
|
||||
if (server_revision >= DBMS_MIN_REVISION_WITH_TEMPORARY_TABLES)
|
||||
writeStringBinary(name, *out);
|
||||
|
||||
copyData(input, *out);
|
||||
if (0 == size)
|
||||
copyData(input, *out);
|
||||
else
|
||||
copyData(input, *out, size);
|
||||
out->next();
|
||||
}
|
||||
|
||||
|
45
dbms/src/Columns/IColumn.cpp
Normal file
45
dbms/src/Columns/IColumn.cpp
Normal file
@ -0,0 +1,45 @@
|
||||
#include <emmintrin.h>
|
||||
|
||||
#include <DB/Columns/IColumn.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
size_t countBytesInFilter(const IColumn::Filter & filt)
|
||||
{
|
||||
size_t count = 0;
|
||||
|
||||
/** NOTE: По идее, filt должен содержать только нолики и единички.
|
||||
* Но, на всякий случай, здесь используется условие > 0 (на знаковые байты).
|
||||
* Лучше было бы использовать != 0, то это не позволяет SSE2.
|
||||
*/
|
||||
|
||||
const __m128i zero16 = _mm_set1_epi8(0);
|
||||
|
||||
const Int8 * pos = reinterpret_cast<const Int8 *>(&filt[0]);
|
||||
const Int8 * end = pos + filt.size();
|
||||
const Int8 * end64 = pos + filt.size() / 64 * 64;
|
||||
|
||||
for (; pos < end64; pos += 64)
|
||||
count += __builtin_popcountll(
|
||||
static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpgt_epi8(
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos)),
|
||||
zero16)))
|
||||
| (static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpgt_epi8(
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos + 16)),
|
||||
zero16))) << 16)
|
||||
| (static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpgt_epi8(
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos + 32)),
|
||||
zero16))) << 32)
|
||||
| (static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpgt_epi8(
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos + 48)),
|
||||
zero16))) << 48));
|
||||
|
||||
for (; pos < end; ++pos)
|
||||
count += *pos > 0;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
}
|
60
dbms/src/Common/Macros.cpp
Normal file
60
dbms/src/Common/Macros.cpp
Normal file
@ -0,0 +1,60 @@
|
||||
#include <DB/Common/Macros.h>
|
||||
#include <DB/Core/Exception.h>
|
||||
#include <DB/Core/ErrorCodes.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
Macros::Macros() {}
|
||||
|
||||
Macros::Macros(const Poco::Util::AbstractConfiguration & config, const String & root_key)
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(root_key, keys);
|
||||
for (const String & key : keys)
|
||||
{
|
||||
macros[key] = config.getString(root_key + "." + key);
|
||||
}
|
||||
}
|
||||
|
||||
String Macros::expand(const String & s) const
|
||||
{
|
||||
if (s.find('{') == String::npos)
|
||||
return s;
|
||||
|
||||
String res;
|
||||
size_t pos = 0;
|
||||
while (true)
|
||||
{
|
||||
size_t begin = s.find('{', pos);
|
||||
|
||||
if (begin == String::npos)
|
||||
{
|
||||
res.append(s, pos, String::npos);
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
res.append(s, pos, begin - pos);
|
||||
}
|
||||
|
||||
++begin;
|
||||
size_t end = s.find('}', begin);
|
||||
if (end == String::npos)
|
||||
throw Exception("Unbalanced { and } in string with macros: \"" + s + "\"", ErrorCodes::SYNTAX_ERROR);
|
||||
|
||||
String macro_name = s.substr(begin, end - begin);
|
||||
|
||||
auto it = macros.find(macro_name);
|
||||
if (it == macros.end())
|
||||
throw Exception("No macro " + macro_name + " in config", ErrorCodes::SYNTAX_ERROR);
|
||||
|
||||
res += it->second;
|
||||
|
||||
pos = end + 1;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
@ -108,18 +108,15 @@ static bool isValidFunction(ASTPtr expression, const NameSet & columns)
|
||||
/// Извлечь все подфункции главной конъюнкции, но зависящие только от заданных столбцов
|
||||
static void extractFunctions(ASTPtr expression, const NameSet & columns, std::vector<ASTPtr> & result)
|
||||
{
|
||||
if (const ASTFunction * function = typeid_cast<const ASTFunction *>(&* expression))
|
||||
const ASTFunction * function = typeid_cast<const ASTFunction *>(&* expression);
|
||||
if (function && function->name == "and")
|
||||
{
|
||||
if (function->name == "and")
|
||||
{
|
||||
for (size_t i = 0; i < function->arguments->children.size(); ++i)
|
||||
extractFunctions(function->arguments->children[i], columns, result);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (isValidFunction(expression, columns))
|
||||
result.push_back(expression->clone());
|
||||
}
|
||||
for (size_t i = 0; i < function->arguments->children.size(); ++i)
|
||||
extractFunctions(function->arguments->children[i], columns, result);
|
||||
}
|
||||
else if (isValidFunction(expression, columns))
|
||||
{
|
||||
result.push_back(expression->clone());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ Block FilterBlockInputStream::readImpl()
|
||||
/** Если фильтр - константа (например, написано WHERE 1),
|
||||
* то либо вернём пустой блок, либо вернём блок без изменений.
|
||||
*/
|
||||
ColumnConstUInt8 * column_const = typeid_cast<ColumnConstUInt8 *>(&*column);
|
||||
const ColumnConstUInt8 * column_const = typeid_cast<const ColumnConstUInt8 *>(&*column);
|
||||
if (column_const)
|
||||
{
|
||||
if (!column_const->getData())
|
||||
@ -50,52 +50,73 @@ Block FilterBlockInputStream::readImpl()
|
||||
return res;
|
||||
}
|
||||
|
||||
ColumnUInt8 * column_vec = typeid_cast<ColumnUInt8 *>(&*column);
|
||||
const ColumnUInt8 * column_vec = typeid_cast<const ColumnUInt8 *>(&*column);
|
||||
if (!column_vec)
|
||||
throw Exception("Illegal type " + column->getName() + " of column for filter. Must be ColumnUInt8 or ColumnConstUInt8.", ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER);
|
||||
|
||||
IColumn::Filter & filter = column_vec->getData();
|
||||
const IColumn::Filter & filter = column_vec->getData();
|
||||
|
||||
/// Если кроме столбца с фильтром ничего нет.
|
||||
if (columns == 1)
|
||||
{
|
||||
/// То посчитаем в нём количество единичек.
|
||||
size_t filtered_rows = 0;
|
||||
for (size_t i = 0, size = filter.size(); i < size; ++i)
|
||||
if (filter[i])
|
||||
++filtered_rows;
|
||||
|
||||
/// Если текущий блок полностью отфильтровался - перейдём к следующему.
|
||||
if (filtered_rows == 0)
|
||||
continue;
|
||||
|
||||
/// Заменяем этот столбец на столбец с константой 1, нужного размера.
|
||||
res.getByPosition(filter_column).column = new ColumnConstUInt8(filtered_rows, 1);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Общий случай - фильтруем остальные столбцы.
|
||||
/** Выясним, сколько строк будет в результате.
|
||||
* Для этого отфильтруем первый попавшийся неконстантный столбец
|
||||
* или же посчитаем количество выставленных байт в фильтре.
|
||||
*/
|
||||
size_t first_non_constant_column = 0;
|
||||
for (size_t i = 0; i < columns; ++i)
|
||||
{
|
||||
if (i != static_cast<size_t>(filter_column))
|
||||
if (!res.getByPosition(i).column->isConst())
|
||||
{
|
||||
ColumnWithNameAndType & current_column = res.getByPosition(i);
|
||||
current_column.column = current_column.column->filter(filter);
|
||||
if (current_column.column->empty())
|
||||
first_non_constant_column = i;
|
||||
|
||||
if (first_non_constant_column != static_cast<size_t>(filter_column))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/// Любой столбец - не являющийся фильтром.
|
||||
IColumn & any_not_filter_column = *res.getByPosition(filter_column == 0 ? 1 : 0).column;
|
||||
size_t filtered_rows = 0;
|
||||
if (first_non_constant_column != static_cast<size_t>(filter_column))
|
||||
{
|
||||
ColumnWithNameAndType & current_column = res.getByPosition(first_non_constant_column);
|
||||
current_column.column = current_column.column->filter(filter);
|
||||
filtered_rows = current_column.column->size();
|
||||
}
|
||||
else
|
||||
{
|
||||
filtered_rows = countBytesInFilter(filter);
|
||||
}
|
||||
|
||||
/// Если текущий блок полностью отфильтровался - перейдём к следующему.
|
||||
if (any_not_filter_column.empty())
|
||||
if (filtered_rows == 0)
|
||||
continue;
|
||||
|
||||
/// Сам столбец с фильтром заменяем на столбец с константой 1, так как после фильтрации в нём ничего другого не останется.
|
||||
res.getByPosition(filter_column).column = new ColumnConstUInt8(any_not_filter_column.size(), 1);
|
||||
/// Если через фильтр проходят все строчки.
|
||||
if (filtered_rows == filter.size())
|
||||
{
|
||||
/// Заменим столбец с фильтром на константу.
|
||||
res.getByPosition(filter_column).column = new ColumnConstUInt8(filtered_rows, 1);
|
||||
/// Остальные столбцы трогать не нужно.
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Фильтруем остальные столбцы.
|
||||
for (size_t i = 0; i < columns; ++i)
|
||||
{
|
||||
ColumnWithNameAndType & current_column = res.getByPosition(i);
|
||||
|
||||
if (i == static_cast<size_t>(filter_column))
|
||||
{
|
||||
/// Сам столбец с фильтром заменяем на столбец с константой 1, так как после фильтрации в нём ничего другого не останется.
|
||||
current_column.column = new ColumnConstUInt8(filtered_rows, 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (i == first_non_constant_column)
|
||||
continue;
|
||||
|
||||
if (current_column.column->isConst())
|
||||
current_column.column = current_column.column->cut(0, filtered_rows);
|
||||
else
|
||||
current_column.column = current_column.column->filter(filter);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ using Poco::SharedPtr;
|
||||
|
||||
|
||||
JSONRowOutputStream::JSONRowOutputStream(WriteBuffer & ostr_, const Block & sample_)
|
||||
: ostr(ostr_), field_number(0), row_count(0), applied_limit(false), rows_before_limit(0)
|
||||
: dst_ostr(ostr_), ostr(dst_ostr), field_number(0), row_count(0), applied_limit(false), rows_before_limit(0)
|
||||
{
|
||||
NamesAndTypesList columns(sample_.getColumnsList());
|
||||
fields.assign(columns.begin(), columns.end());
|
||||
|
@ -140,7 +140,7 @@ void PrettyBlockOutputStream::write(const Block & block_)
|
||||
const ColumnWithNameAndType & col = block.getByPosition(i);
|
||||
|
||||
if (!no_escapes)
|
||||
writeCString("\033[1;37m", ostr);
|
||||
writeCString("\033[1m", ostr);
|
||||
|
||||
if (col.type->isNumeric())
|
||||
{
|
||||
|
@ -29,7 +29,7 @@ void PrettyCompactBlockOutputStream::writeHeader(
|
||||
writeCString("─", ostr);
|
||||
|
||||
if (!no_escapes)
|
||||
writeCString("\033[1;37m", ostr);
|
||||
writeCString("\033[1m", ostr);
|
||||
writeEscapedString(col.name, ostr);
|
||||
if (!no_escapes)
|
||||
writeCString("\033[0m", ostr);
|
||||
@ -37,7 +37,7 @@ void PrettyCompactBlockOutputStream::writeHeader(
|
||||
else
|
||||
{
|
||||
if (!no_escapes)
|
||||
writeCString("\033[1;37m", ostr);
|
||||
writeCString("\033[1m", ostr);
|
||||
writeEscapedString(col.name, ostr);
|
||||
if (!no_escapes)
|
||||
writeCString("\033[0m", ostr);
|
||||
@ -75,7 +75,7 @@ void PrettyCompactBlockOutputStream::writeRow(
|
||||
const Widths_t & name_widths)
|
||||
{
|
||||
size_t columns = max_widths.size();
|
||||
|
||||
|
||||
writeCString("│ ", ostr);
|
||||
|
||||
for (size_t j = 0; j < columns; ++j)
|
||||
@ -90,7 +90,7 @@ void PrettyCompactBlockOutputStream::writeRow(
|
||||
size_t width = get<UInt64>((*block.getByPosition(columns + j).column)[row_id]);
|
||||
for (size_t k = 0; k < max_widths[j] - width; ++k)
|
||||
writeChar(' ', ostr);
|
||||
|
||||
|
||||
col.type->serializeTextEscaped((*col.column)[row_id], ostr);
|
||||
}
|
||||
else
|
||||
@ -113,16 +113,16 @@ void PrettyCompactBlockOutputStream::write(const Block & block_)
|
||||
total_rows += block_.rows();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/// Будем вставлять сюда столбцы с вычисленными значениями видимых длин.
|
||||
Block block = block_;
|
||||
|
||||
|
||||
size_t rows = block.rows();
|
||||
|
||||
Widths_t max_widths;
|
||||
Widths_t name_widths;
|
||||
calculateWidths(block, max_widths, name_widths);
|
||||
|
||||
|
||||
writeHeader(block, max_widths, name_widths);
|
||||
|
||||
for (size_t i = 0; i < rows && total_rows + i < max_rows; ++i)
|
||||
|
@ -17,10 +17,10 @@ void PrettySpaceBlockOutputStream::write(const Block & block_)
|
||||
total_rows += block_.rows();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/// Будем вставлять суда столбцы с вычисленными значениями видимых длин.
|
||||
Block block = block_;
|
||||
|
||||
|
||||
size_t rows = block.rows();
|
||||
size_t columns = block.columns();
|
||||
|
||||
@ -48,7 +48,7 @@ void PrettySpaceBlockOutputStream::write(const Block & block_)
|
||||
writeChar(' ', ostr);
|
||||
|
||||
if (!no_escapes)
|
||||
writeCString("\033[1;37m", ostr);
|
||||
writeCString("\033[1m", ostr);
|
||||
writeEscapedString(col.name, ostr);
|
||||
if (!no_escapes)
|
||||
writeCString("\033[0m", ostr);
|
||||
@ -56,7 +56,7 @@ void PrettySpaceBlockOutputStream::write(const Block & block_)
|
||||
else
|
||||
{
|
||||
if (!no_escapes)
|
||||
writeCString("\033[1;37m", ostr);
|
||||
writeCString("\033[1m", ostr);
|
||||
writeEscapedString(col.name, ostr);
|
||||
if (!no_escapes)
|
||||
writeCString("\033[0m", ostr);
|
||||
@ -81,7 +81,7 @@ void PrettySpaceBlockOutputStream::write(const Block & block_)
|
||||
size_t width = get<UInt64>((*block.getByPosition(columns + j).column)[i]);
|
||||
for (ssize_t k = 0; k < std::max(0L, static_cast<ssize_t>(max_widths[j] - width)); ++k)
|
||||
writeChar(' ', ostr);
|
||||
|
||||
|
||||
col.type->serializeTextEscaped((*col.column)[i], ostr);
|
||||
}
|
||||
else
|
||||
|
@ -31,217 +31,225 @@ FunctionPtr FunctionFactory::get(
|
||||
const String & name,
|
||||
const Context & context) const
|
||||
{
|
||||
/// Немного неоптимально.
|
||||
static const std::unordered_map<
|
||||
std::string,
|
||||
std::function<IFunction* (const Context & context)>> functions =
|
||||
{
|
||||
#define F [](const Context & context)
|
||||
{"plus", F { return new FunctionPlus; } },
|
||||
{"minus", F { return new FunctionMinus; } },
|
||||
{"multiply", F { return new FunctionMultiply; } },
|
||||
{"divide", F { return new FunctionDivideFloating; } },
|
||||
{"intDiv", F { return new FunctionDivideIntegral; } },
|
||||
{"modulo", F { return new FunctionModulo; } },
|
||||
{"negate", F { return new FunctionNegate; } },
|
||||
{"bitAnd", F { return new FunctionBitAnd; } },
|
||||
{"bitOr", F { return new FunctionBitOr; } },
|
||||
{"bitXor", F { return new FunctionBitXor; } },
|
||||
{"bitNot", F { return new FunctionBitNot; } },
|
||||
{"bitShiftLeft", F { return new FunctionBitShiftLeft; } },
|
||||
{"bitShiftRight", F { return new FunctionBitShiftRight; } },
|
||||
|
||||
if (name == "plus") return new FunctionPlus;
|
||||
else if (name == "minus") return new FunctionMinus;
|
||||
else if (name == "multiply") return new FunctionMultiply;
|
||||
else if (name == "divide") return new FunctionDivideFloating;
|
||||
else if (name == "intDiv") return new FunctionDivideIntegral;
|
||||
else if (name == "modulo") return new FunctionModulo;
|
||||
else if (name == "negate") return new FunctionNegate;
|
||||
else if (name == "bitAnd") return new FunctionBitAnd;
|
||||
else if (name == "bitOr") return new FunctionBitOr;
|
||||
else if (name == "bitXor") return new FunctionBitXor;
|
||||
else if (name == "bitNot") return new FunctionBitNot;
|
||||
else if (name == "bitShiftLeft") return new FunctionBitShiftLeft;
|
||||
else if (name == "bitShiftRight") return new FunctionBitShiftRight;
|
||||
{"equals", F { return new FunctionEquals; } },
|
||||
{"notEquals", F { return new FunctionNotEquals; } },
|
||||
{"less", F { return new FunctionLess; } },
|
||||
{"greater", F { return new FunctionGreater; } },
|
||||
{"lessOrEquals", F { return new FunctionLessOrEquals; } },
|
||||
{"greaterOrEquals", F { return new FunctionGreaterOrEquals; } },
|
||||
|
||||
else if (name == "equals") return new FunctionEquals;
|
||||
else if (name == "notEquals") return new FunctionNotEquals;
|
||||
else if (name == "less") return new FunctionLess;
|
||||
else if (name == "greater") return new FunctionGreater;
|
||||
else if (name == "lessOrEquals") return new FunctionLessOrEquals;
|
||||
else if (name == "greaterOrEquals") return new FunctionGreaterOrEquals;
|
||||
{"and", F { return new FunctionAnd; } },
|
||||
{"or", F { return new FunctionOr; } },
|
||||
{"xor", F { return new FunctionXor; } },
|
||||
{"not", F { return new FunctionNot; } },
|
||||
|
||||
else if (name == "and") return new FunctionAnd;
|
||||
else if (name == "or") return new FunctionOr;
|
||||
else if (name == "xor") return new FunctionXor;
|
||||
else if (name == "not") return new FunctionNot;
|
||||
{"roundToExp2", F { return new FunctionRoundToExp2; } },
|
||||
{"roundDuration", F { return new FunctionRoundDuration; } },
|
||||
{"roundAge", F { return new FunctionRoundAge; } },
|
||||
|
||||
else if (name == "roundToExp2") return new FunctionRoundToExp2;
|
||||
else if (name == "roundDuration") return new FunctionRoundDuration;
|
||||
else if (name == "roundAge") return new FunctionRoundAge;
|
||||
{"empty", F { return new FunctionEmpty; } },
|
||||
{"notEmpty", F { return new FunctionNotEmpty; } },
|
||||
{"length", F { return new FunctionLength; } },
|
||||
{"lengthUTF8", F { return new FunctionLengthUTF8; } },
|
||||
{"lower", F { return new FunctionLower; } },
|
||||
{"upper", F { return new FunctionUpper; } },
|
||||
{"lowerUTF8", F { return new FunctionLowerUTF8; } },
|
||||
{"upperUTF8", F { return new FunctionUpperUTF8; } },
|
||||
{"reverse", F { return new FunctionReverse; } },
|
||||
{"reverseUTF8", F { return new FunctionReverseUTF8; } },
|
||||
{"concat", F { return new FunctionConcat; } },
|
||||
{"substring", F { return new FunctionSubstring; } },
|
||||
{"replaceOne", F { return new FunctionReplaceOne; } },
|
||||
{"replaceAll", F { return new FunctionReplaceAll; } },
|
||||
{"replaceRegexpOne", F { return new FunctionReplaceRegexpOne; } },
|
||||
{"replaceRegexpAll", F { return new FunctionReplaceRegexpAll; } },
|
||||
{"substringUTF8", F { return new FunctionSubstringUTF8; } },
|
||||
|
||||
else if (name == "empty") return new FunctionEmpty;
|
||||
else if (name == "notEmpty") return new FunctionNotEmpty;
|
||||
else if (name == "length") return new FunctionLength;
|
||||
else if (name == "lengthUTF8") return new FunctionLengthUTF8;
|
||||
else if (name == "lower") return new FunctionLower;
|
||||
else if (name == "upper") return new FunctionUpper;
|
||||
else if (name == "lowerUTF8") return new FunctionLowerUTF8;
|
||||
else if (name == "upperUTF8") return new FunctionUpperUTF8;
|
||||
else if (name == "reverse") return new FunctionReverse;
|
||||
else if (name == "reverseUTF8") return new FunctionReverseUTF8;
|
||||
else if (name == "concat") return new FunctionConcat;
|
||||
else if (name == "substring") return new FunctionSubstring;
|
||||
else if (name == "replaceOne") return new FunctionReplaceOne;
|
||||
else if (name == "replaceAll") return new FunctionReplaceAll;
|
||||
else if (name == "replaceRegexpOne") return new FunctionReplaceRegexpOne;
|
||||
else if (name == "replaceRegexpAll") return new FunctionReplaceRegexpAll;
|
||||
else if (name == "substringUTF8") return new FunctionSubstringUTF8;
|
||||
{"toUInt8", F { return new FunctionToUInt8; } },
|
||||
{"toUInt16", F { return new FunctionToUInt16; } },
|
||||
{"toUInt32", F { return new FunctionToUInt32; } },
|
||||
{"toUInt64", F { return new FunctionToUInt64; } },
|
||||
{"toInt8", F { return new FunctionToInt8; } },
|
||||
{"toInt16", F { return new FunctionToInt16; } },
|
||||
{"toInt32", F { return new FunctionToInt32; } },
|
||||
{"toInt64", F { return new FunctionToInt64; } },
|
||||
{"toFloat32", F { return new FunctionToFloat32; } },
|
||||
{"toFloat64", F { return new FunctionToFloat64; } },
|
||||
{"toDate", F { return new FunctionToDate; } },
|
||||
{"toDateTime", F { return new FunctionToDateTime; } },
|
||||
{"toString", F { return new FunctionToString; } },
|
||||
{"toFixedString", F { return new FunctionToFixedString; } },
|
||||
{"toStringCutToZero", F { return new FunctionToStringCutToZero; } },
|
||||
|
||||
else if (name == "toUInt8") return new FunctionToUInt8;
|
||||
else if (name == "toUInt16") return new FunctionToUInt16;
|
||||
else if (name == "toUInt32") return new FunctionToUInt32;
|
||||
else if (name == "toUInt64") return new FunctionToUInt64;
|
||||
else if (name == "toInt8") return new FunctionToInt8;
|
||||
else if (name == "toInt16") return new FunctionToInt16;
|
||||
else if (name == "toInt32") return new FunctionToInt32;
|
||||
else if (name == "toInt64") return new FunctionToInt64;
|
||||
else if (name == "toFloat32") return new FunctionToFloat32;
|
||||
else if (name == "toFloat64") return new FunctionToFloat64;
|
||||
else if (name == "toDate") return new FunctionToDate;
|
||||
else if (name == "toDateTime") return new FunctionToDateTime;
|
||||
else if (name == "toString") return new FunctionToString;
|
||||
else if (name == "toFixedString") return new FunctionToFixedString;
|
||||
else if (name == "toStringCutToZero") return new FunctionToStringCutToZero;
|
||||
{"reinterpretAsUInt8", F { return new FunctionReinterpretAsUInt8; } },
|
||||
{"reinterpretAsUInt16", F { return new FunctionReinterpretAsUInt16; } },
|
||||
{"reinterpretAsUInt32", F { return new FunctionReinterpretAsUInt32; } },
|
||||
{"reinterpretAsUInt64", F { return new FunctionReinterpretAsUInt64; } },
|
||||
{"reinterpretAsInt8", F { return new FunctionReinterpretAsInt8; } },
|
||||
{"reinterpretAsInt16", F { return new FunctionReinterpretAsInt16; } },
|
||||
{"reinterpretAsInt32", F { return new FunctionReinterpretAsInt32; } },
|
||||
{"reinterpretAsInt64", F { return new FunctionReinterpretAsInt64; } },
|
||||
{"reinterpretAsFloat32", F { return new FunctionReinterpretAsFloat32; } },
|
||||
{"reinterpretAsFloat64", F { return new FunctionReinterpretAsFloat64; } },
|
||||
{"reinterpretAsDate", F { return new FunctionReinterpretAsDate; } },
|
||||
{"reinterpretAsDateTime", F { return new FunctionReinterpretAsDateTime; } },
|
||||
{"reinterpretAsString", F { return new FunctionReinterpretAsString; } },
|
||||
|
||||
else if (name == "reinterpretAsUInt8") return new FunctionReinterpretAsUInt8;
|
||||
else if (name == "reinterpretAsUInt16") return new FunctionReinterpretAsUInt16;
|
||||
else if (name == "reinterpretAsUInt32") return new FunctionReinterpretAsUInt32;
|
||||
else if (name == "reinterpretAsUInt64") return new FunctionReinterpretAsUInt64;
|
||||
else if (name == "reinterpretAsInt8") return new FunctionReinterpretAsInt8;
|
||||
else if (name == "reinterpretAsInt16") return new FunctionReinterpretAsInt16;
|
||||
else if (name == "reinterpretAsInt32") return new FunctionReinterpretAsInt32;
|
||||
else if (name == "reinterpretAsInt64") return new FunctionReinterpretAsInt64;
|
||||
else if (name == "reinterpretAsFloat32") return new FunctionReinterpretAsFloat32;
|
||||
else if (name == "reinterpretAsFloat64") return new FunctionReinterpretAsFloat64;
|
||||
else if (name == "reinterpretAsDate") return new FunctionReinterpretAsDate;
|
||||
else if (name == "reinterpretAsDateTime") return new FunctionReinterpretAsDateTime;
|
||||
else if (name == "reinterpretAsString") return new FunctionReinterpretAsString;
|
||||
{"toYear", F { return new FunctionToYear; } },
|
||||
{"toMonth", F { return new FunctionToMonth; } },
|
||||
{"toDayOfMonth", F { return new FunctionToDayOfMonth; } },
|
||||
{"toDayOfWeek", F { return new FunctionToDayOfWeek; } },
|
||||
{"toHour", F { return new FunctionToHour; } },
|
||||
{"toMinute", F { return new FunctionToMinute; } },
|
||||
{"toSecond", F { return new FunctionToSecond; } },
|
||||
{"toMonday", F { return new FunctionToMonday; } },
|
||||
{"toStartOfMonth", F { return new FunctionToStartOfMonth; } },
|
||||
{"toStartOfQuarter", F { return new FunctionToStartOfQuarter; } },
|
||||
{"toStartOfYear", F { return new FunctionToStartOfYear; } },
|
||||
{"toStartOfMinute", F { return new FunctionToStartOfMinute; } },
|
||||
{"toStartOfHour", F { return new FunctionToStartOfHour; } },
|
||||
{"toRelativeYearNum", F { return new FunctionToRelativeYearNum; } },
|
||||
{"toRelativeMonthNum", F { return new FunctionToRelativeMonthNum; } },
|
||||
{"toRelativeWeekNum", F { return new FunctionToRelativeWeekNum; } },
|
||||
{"toRelativeDayNum", F { return new FunctionToRelativeDayNum; } },
|
||||
{"toRelativeHourNum", F { return new FunctionToRelativeHourNum; } },
|
||||
{"toRelativeMinuteNum", F { return new FunctionToRelativeMinuteNum; } },
|
||||
{"toRelativeSecondNum", F { return new FunctionToRelativeSecondNum; } },
|
||||
{"toTime", F { return new FunctionToTime; } },
|
||||
{"now", F { return new FunctionNow; } },
|
||||
{"timeSlot", F { return new FunctionTimeSlot; } },
|
||||
{"timeSlots", F { return new FunctionTimeSlots; } },
|
||||
|
||||
else if (name == "toYear") return new FunctionToYear;
|
||||
else if (name == "toMonth") return new FunctionToMonth;
|
||||
else if (name == "toDayOfMonth") return new FunctionToDayOfMonth;
|
||||
else if (name == "toDayOfWeek") return new FunctionToDayOfWeek;
|
||||
else if (name == "toHour") return new FunctionToHour;
|
||||
else if (name == "toMinute") return new FunctionToMinute;
|
||||
else if (name == "toSecond") return new FunctionToSecond;
|
||||
else if (name == "toMonday") return new FunctionToMonday;
|
||||
else if (name == "toStartOfMonth") return new FunctionToStartOfMonth;
|
||||
else if (name == "toStartOfQuarter") return new FunctionToStartOfQuarter;
|
||||
else if (name == "toStartOfYear") return new FunctionToStartOfYear;
|
||||
else if (name == "toStartOfMinute") return new FunctionToStartOfMinute;
|
||||
else if (name == "toStartOfHour") return new FunctionToStartOfHour;
|
||||
else if (name == "toRelativeYearNum") return new FunctionToRelativeYearNum;
|
||||
else if (name == "toRelativeMonthNum") return new FunctionToRelativeMonthNum;
|
||||
else if (name == "toRelativeWeekNum") return new FunctionToRelativeWeekNum;
|
||||
else if (name == "toRelativeDayNum") return new FunctionToRelativeDayNum;
|
||||
else if (name == "toRelativeHourNum") return new FunctionToRelativeHourNum;
|
||||
else if (name == "toRelativeMinuteNum") return new FunctionToRelativeMinuteNum;
|
||||
else if (name == "toRelativeSecondNum") return new FunctionToRelativeSecondNum;
|
||||
else if (name == "toTime") return new FunctionToTime;
|
||||
else if (name == "now") return new FunctionNow;
|
||||
else if (name == "timeSlot") return new FunctionTimeSlot;
|
||||
else if (name == "timeSlots") return new FunctionTimeSlots;
|
||||
{"position", F { return new FunctionPosition; } },
|
||||
{"positionUTF8", F { return new FunctionPositionUTF8; } },
|
||||
{"match", F { return new FunctionMatch; } },
|
||||
{"like", F { return new FunctionLike; } },
|
||||
{"notLike", F { return new FunctionNotLike; } },
|
||||
{"extract", F { return new FunctionExtract; } },
|
||||
{"extractAll", F { return new FunctionExtractAll; } },
|
||||
|
||||
else if (name == "position") return new FunctionPosition;
|
||||
else if (name == "positionUTF8") return new FunctionPositionUTF8;
|
||||
else if (name == "match") return new FunctionMatch;
|
||||
else if (name == "like") return new FunctionLike;
|
||||
else if (name == "notLike") return new FunctionNotLike;
|
||||
else if (name == "extract") return new FunctionExtract;
|
||||
else if (name == "extractAll") return new FunctionExtractAll;
|
||||
{"halfMD5", F { return new FunctionHalfMD5; } },
|
||||
{"sipHash64", F { return new FunctionSipHash64; } },
|
||||
{"cityHash64", F { return new FunctionCityHash64; } },
|
||||
{"intHash32", F { return new FunctionIntHash32; } },
|
||||
{"intHash64", F { return new FunctionIntHash64; } },
|
||||
|
||||
else if (name == "halfMD5") return new FunctionHalfMD5;
|
||||
else if (name == "sipHash64") return new FunctionSipHash64;
|
||||
else if (name == "cityHash64") return new FunctionCityHash64;
|
||||
else if (name == "intHash32") return new FunctionIntHash32;
|
||||
else if (name == "intHash64") return new FunctionIntHash64;
|
||||
{"IPv4NumToString", F { return new FunctionIPv4NumToString; } },
|
||||
{"IPv4StringToNum", F { return new FunctionIPv4StringToNum; } },
|
||||
{"hex", F { return new FunctionHex; } },
|
||||
{"unhex", F { return new FunctionUnhex; } },
|
||||
{"bitmaskToList", F { return new FunctionBitmaskToList; } },
|
||||
{"bitmaskToArray", F { return new FunctionBitmaskToArray; } },
|
||||
|
||||
else if (name == "IPv4NumToString") return new FunctionIPv4NumToString;
|
||||
else if (name == "IPv4StringToNum") return new FunctionIPv4StringToNum;
|
||||
else if (name == "hex") return new FunctionHex;
|
||||
else if (name == "unhex") return new FunctionUnhex;
|
||||
else if (name == "bitmaskToList") return new FunctionBitmaskToList;
|
||||
else if (name == "bitmaskToArray") return new FunctionBitmaskToArray;
|
||||
{"rand", F { return new FunctionRand; } },
|
||||
{"rand64", F { return new FunctionRand64; } },
|
||||
|
||||
else if (name == "rand") return new FunctionRand;
|
||||
else if (name == "rand64") return new FunctionRand64;
|
||||
{"protocol", F { return new FunctionProtocol; } },
|
||||
{"domain", F { return new FunctionDomain; } },
|
||||
{"domainWithoutWWW", F { return new FunctionDomainWithoutWWW; } },
|
||||
{"topLevelDomain", F { return new FunctionTopLevelDomain; } },
|
||||
{"path", F { return new FunctionPath; } },
|
||||
{"queryString", F { return new FunctionQueryString; } },
|
||||
{"fragment", F { return new FunctionFragment; } },
|
||||
{"queryStringAndFragment", F { return new FunctionQueryStringAndFragment; } },
|
||||
{"extractURLParameter", F { return new FunctionExtractURLParameter; } },
|
||||
{"extractURLParameters", F { return new FunctionExtractURLParameters; } },
|
||||
{"extractURLParameterNames", F { return new FunctionExtractURLParameterNames; } },
|
||||
{"URLHierarchy", F { return new FunctionURLHierarchy; } },
|
||||
{"URLPathHierarchy", F { return new FunctionURLPathHierarchy; } },
|
||||
{"cutWWW", F { return new FunctionCutWWW; } },
|
||||
{"cutQueryString", F { return new FunctionCutQueryString; } },
|
||||
{"cutFragment", F { return new FunctionCutFragment; } },
|
||||
{"cutQueryStringAndFragment", F { return new FunctionCutQueryStringAndFragment; } },
|
||||
{"cutURLParameter", F { return new FunctionCutURLParameter; } },
|
||||
|
||||
else if (name == "protocol") return new FunctionProtocol;
|
||||
else if (name == "domain") return new FunctionDomain;
|
||||
else if (name == "domainWithoutWWW") return new FunctionDomainWithoutWWW;
|
||||
else if (name == "topLevelDomain") return new FunctionTopLevelDomain;
|
||||
else if (name == "path") return new FunctionPath;
|
||||
else if (name == "queryString") return new FunctionQueryString;
|
||||
else if (name == "fragment") return new FunctionFragment;
|
||||
else if (name == "queryStringAndFragment") return new FunctionQueryStringAndFragment;
|
||||
else if (name == "extractURLParameter") return new FunctionExtractURLParameter;
|
||||
else if (name == "extractURLParameters") return new FunctionExtractURLParameters;
|
||||
else if (name == "extractURLParameterNames") return new FunctionExtractURLParameterNames;
|
||||
else if (name == "URLHierarchy") return new FunctionURLHierarchy;
|
||||
else if (name == "URLPathHierarchy") return new FunctionURLPathHierarchy;
|
||||
else if (name == "cutWWW") return new FunctionCutWWW;
|
||||
else if (name == "cutQueryString") return new FunctionCutQueryString;
|
||||
else if (name == "cutFragment") return new FunctionCutFragment;
|
||||
else if (name == "cutQueryStringAndFragment") return new FunctionCutQueryStringAndFragment;
|
||||
else if (name == "cutURLParameter") return new FunctionCutURLParameter;
|
||||
{"hostName", F { return new FunctionHostName; } },
|
||||
{"visibleWidth", F { return new FunctionVisibleWidth; } },
|
||||
{"bar", F { return new FunctionBar; } },
|
||||
{"toTypeName", F { return new FunctionToTypeName; } },
|
||||
{"blockSize", F { return new FunctionBlockSize; } },
|
||||
{"sleep", F { return new FunctionSleep; } },
|
||||
{"materialize", F { return new FunctionMaterialize; } },
|
||||
{"ignore", F { return new FunctionIgnore; } },
|
||||
{"arrayJoin", F { return new FunctionArrayJoin; } },
|
||||
|
||||
else if (name == "hostName") return new FunctionHostName;
|
||||
else if (name == "visibleWidth") return new FunctionVisibleWidth;
|
||||
else if (name == "toTypeName") return new FunctionToTypeName;
|
||||
else if (name == "blockSize") return new FunctionBlockSize;
|
||||
else if (name == "sleep") return new FunctionSleep;
|
||||
else if (name == "materialize") return new FunctionMaterialize;
|
||||
else if (name == "ignore") return new FunctionIgnore;
|
||||
else if (name == "arrayJoin") return new FunctionArrayJoin;
|
||||
{"tuple", F { return new FunctionTuple; } },
|
||||
{"tupleElement", F { return new FunctionTupleElement; } },
|
||||
{"in", F { return new FunctionIn(false, false); } },
|
||||
{"notIn", F { return new FunctionIn(true, false); } },
|
||||
{"globalIn", F { return new FunctionIn(false, true); } },
|
||||
{"globalNotIn", F { return new FunctionIn(true, true); } },
|
||||
|
||||
else if (name == "tuple") return new FunctionTuple;
|
||||
else if (name == "tupleElement") return new FunctionTupleElement;
|
||||
else if (name == "in") return new FunctionIn(false, false);
|
||||
else if (name == "notIn") return new FunctionIn(true, false);
|
||||
else if (name == "globalIn") return new FunctionIn(false, true);
|
||||
else if (name == "globalNotIn") return new FunctionIn(true, true);
|
||||
{"array", F { return new FunctionArray; } },
|
||||
{"arrayElement", F { return new FunctionArrayElement; } },
|
||||
{"has", F { return new FunctionHas; } },
|
||||
{"indexOf", F { return new FunctionIndexOf; } },
|
||||
{"countEqual", F { return new FunctionCountEqual; } },
|
||||
{"arrayEnumerate", F { return new FunctionArrayEnumerate; } },
|
||||
{"arrayEnumerateUniq", F { return new FunctionArrayEnumerateUniq; } },
|
||||
|
||||
else if (name == "array") return new FunctionArray;
|
||||
else if (name == "arrayElement") return new FunctionArrayElement;
|
||||
else if (name == "has") return new FunctionHas;
|
||||
else if (name == "indexOf") return new FunctionIndexOf;
|
||||
else if (name == "countEqual") return new FunctionCountEqual;
|
||||
else if (name == "arrayEnumerate") return new FunctionArrayEnumerate;
|
||||
else if (name == "arrayEnumerateUniq") return new FunctionArrayEnumerateUniq;
|
||||
{"arrayMap", F { return new FunctionArrayMap; } },
|
||||
{"arrayFilter", F { return new FunctionArrayFilter; } },
|
||||
{"arrayCount", F { return new FunctionArrayCount; } },
|
||||
{"arrayExists", F { return new FunctionArrayExists; } },
|
||||
{"arrayAll", F { return new FunctionArrayAll; } },
|
||||
{"arraySum", F { return new FunctionArraySum; } },
|
||||
|
||||
else if (name == "arrayMap") return new FunctionArrayMap;
|
||||
else if (name == "arrayFilter") return new FunctionArrayFilter;
|
||||
else if (name == "arrayCount") return new FunctionArrayCount;
|
||||
else if (name == "arrayExists") return new FunctionArrayExists;
|
||||
else if (name == "arrayAll") return new FunctionArrayAll;
|
||||
else if (name == "arraySum") return new FunctionArraySum;
|
||||
{"alphaTokens", F { return new FunctionAlphaTokens; } },
|
||||
{"splitByChar", F { return new FunctionSplitByChar; } },
|
||||
{"splitByString", F { return new FunctionSplitByString; } },
|
||||
|
||||
else if (name == "alphaTokens") return new FunctionAlphaTokens;
|
||||
else if (name == "splitByChar") return new FunctionSplitByChar;
|
||||
else if (name == "splitByString") return new FunctionSplitByString;
|
||||
{"if", F { return new FunctionIf; } },
|
||||
|
||||
else if (name == "if") return new FunctionIf;
|
||||
{"regionToCity", F { return new FunctionRegionToCity(context.getDictionaries().getRegionsHierarchies()); } },
|
||||
{"regionToArea", F { return new FunctionRegionToArea(context.getDictionaries().getRegionsHierarchies()); } },
|
||||
{"regionToCountry", F { return new FunctionRegionToCountry(context.getDictionaries().getRegionsHierarchies()); } },
|
||||
{"regionToContinent", F { return new FunctionRegionToContinent(context.getDictionaries().getRegionsHierarchies()); } },
|
||||
{"OSToRoot", F { return new FunctionOSToRoot(context.getDictionaries().getTechDataHierarchy()); } },
|
||||
{"SEToRoot", F { return new FunctionSEToRoot(context.getDictionaries().getTechDataHierarchy()); } },
|
||||
{"categoryToRoot", F { return new FunctionCategoryToRoot(context.getDictionaries().getCategoriesHierarchy()); } },
|
||||
{"categoryToSecondLevel", F { return new FunctionCategoryToSecondLevel(context.getDictionaries().getCategoriesHierarchy()); } },
|
||||
{"regionIn", F { return new FunctionRegionIn(context.getDictionaries().getRegionsHierarchies()); } },
|
||||
{"OSIn", F { return new FunctionOSIn(context.getDictionaries().getTechDataHierarchy()); } },
|
||||
{"SEIn", F { return new FunctionSEIn(context.getDictionaries().getTechDataHierarchy()); } },
|
||||
{"categoryIn", F { return new FunctionCategoryIn(context.getDictionaries().getCategoriesHierarchy()); } },
|
||||
{"regionHierarchy", F { return new FunctionRegionHierarchy(context.getDictionaries().getRegionsHierarchies()); } },
|
||||
{"OSHierarchy", F { return new FunctionOSHierarchy(context.getDictionaries().getTechDataHierarchy()); } },
|
||||
{"SEHierarchy", F { return new FunctionSEHierarchy(context.getDictionaries().getTechDataHierarchy()); } },
|
||||
{"categoryHierarchy", F { return new FunctionCategoryHierarchy(context.getDictionaries().getCategoriesHierarchy()); } },
|
||||
{"regionToName", F { return new FunctionRegionToName(context.getDictionaries().getRegionsNames()); } },
|
||||
|
||||
else if (name == "regionToCity") return new FunctionRegionToCity(context.getDictionaries().getRegionsHierarchies());
|
||||
else if (name == "regionToArea") return new FunctionRegionToArea(context.getDictionaries().getRegionsHierarchies());
|
||||
else if (name == "regionToCountry") return new FunctionRegionToCountry(context.getDictionaries().getRegionsHierarchies());
|
||||
else if (name == "regionToContinent") return new FunctionRegionToContinent(context.getDictionaries().getRegionsHierarchies());
|
||||
else if (name == "OSToRoot") return new FunctionOSToRoot(context.getDictionaries().getTechDataHierarchy());
|
||||
else if (name == "SEToRoot") return new FunctionSEToRoot(context.getDictionaries().getTechDataHierarchy());
|
||||
else if (name == "categoryToRoot") return new FunctionCategoryToRoot(context.getDictionaries().getCategoriesHierarchy());
|
||||
else if (name == "categoryToSecondLevel") return new FunctionCategoryToSecondLevel(context.getDictionaries().getCategoriesHierarchy());
|
||||
else if (name == "regionIn") return new FunctionRegionIn(context.getDictionaries().getRegionsHierarchies());
|
||||
else if (name == "OSIn") return new FunctionOSIn(context.getDictionaries().getTechDataHierarchy());
|
||||
else if (name == "SEIn") return new FunctionSEIn(context.getDictionaries().getTechDataHierarchy());
|
||||
else if (name == "categoryIn") return new FunctionCategoryIn(context.getDictionaries().getCategoriesHierarchy());
|
||||
else if (name == "regionHierarchy") return new FunctionRegionHierarchy(context.getDictionaries().getRegionsHierarchies());
|
||||
else if (name == "OSHierarchy") return new FunctionOSHierarchy(context.getDictionaries().getTechDataHierarchy());
|
||||
else if (name == "SEHierarchy") return new FunctionSEHierarchy(context.getDictionaries().getTechDataHierarchy());
|
||||
else if (name == "categoryHierarchy") return new FunctionCategoryHierarchy(context.getDictionaries().getCategoriesHierarchy());
|
||||
else if (name == "regionToName") return new FunctionRegionToName(context.getDictionaries().getRegionsNames());
|
||||
|
||||
else if (name == "visitParamHas") return new FunctionVisitParamHas;
|
||||
else if (name == "visitParamExtractUInt") return new FunctionVisitParamExtractUInt;
|
||||
else if (name == "visitParamExtractInt") return new FunctionVisitParamExtractInt;
|
||||
else if (name == "visitParamExtractFloat") return new FunctionVisitParamExtractFloat;
|
||||
else if (name == "visitParamExtractBool") return new FunctionVisitParamExtractBool;
|
||||
else if (name == "visitParamExtractRaw") return new FunctionVisitParamExtractRaw;
|
||||
else if (name == "visitParamExtractString") return new FunctionVisitParamExtractString;
|
||||
{"visitParamHas", F { return new FunctionVisitParamHas; } },
|
||||
{"visitParamExtractUInt", F { return new FunctionVisitParamExtractUInt; } },
|
||||
{"visitParamExtractInt", F { return new FunctionVisitParamExtractInt; } },
|
||||
{"visitParamExtractFloat", F { return new FunctionVisitParamExtractFloat; } },
|
||||
{"visitParamExtractBool", F { return new FunctionVisitParamExtractBool; } },
|
||||
{"visitParamExtractRaw", F { return new FunctionVisitParamExtractRaw; } },
|
||||
{"visitParamExtractString", F { return new FunctionVisitParamExtractString; } },
|
||||
};
|
||||
|
||||
auto it = functions.find(name);
|
||||
if (functions.end() != it)
|
||||
return it->second(context);
|
||||
else
|
||||
throw Exception("Unknown function " + name, ErrorCodes::UNKNOWN_FUNCTION);
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ void FunctionVisibleWidth::execute(Block & block, const ColumnNumbers & argument
|
||||
}
|
||||
else if (const ColumnTuple * col = typeid_cast<const ColumnTuple *>(&*column))
|
||||
{
|
||||
/// Посчитаем видимую ширину для каждого вложенного столбца по-отдельности, и просуммируем.
|
||||
/// Посчитаем видимую ширину для каждого вложенного столбца по отдельности, и просуммируем.
|
||||
Block nested_block = col->getData();
|
||||
size_t columns = nested_block.columns();
|
||||
|
||||
|
@ -206,7 +206,7 @@ void Aggregator::executeImpl(
|
||||
if (overflow && !overflow_row)
|
||||
continue;
|
||||
|
||||
/// Если вставили новый ключ - инициализируем состояния агрегатных функций, и возможно, что-нибудь связанное с ключём.
|
||||
/// Если вставили новый ключ - инициализируем состояния агрегатных функций, и возможно, что-нибудь связанное с ключом.
|
||||
if (inserted)
|
||||
{
|
||||
method.onNewKey(it, keys_size, i, keys, *aggregates_pool);
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <DB/Interpreters/Cluster.h>
|
||||
#include <DB/Common/escapeForFileName.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
#include <Poco/Net/NetworkInterface.h>
|
||||
@ -7,17 +8,20 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
Cluster::Address::Address(const String & config_prefix)
|
||||
{
|
||||
Poco::Util::AbstractConfiguration & config = Poco::Util::Application::instance().config();
|
||||
host_port = Poco::Net::SocketAddress(config.getString(config_prefix + ".host"),
|
||||
config.getInt(config_prefix + ".port"));
|
||||
auto & config = Poco::Util::Application::instance().config();
|
||||
|
||||
host_port = Poco::Net::SocketAddress(
|
||||
config.getString(config_prefix + ".host"),
|
||||
config.getInt(config_prefix + ".port")
|
||||
);
|
||||
|
||||
user = config.getString(config_prefix + ".user", "default");
|
||||
password = config.getString(config_prefix + ".password", "");
|
||||
}
|
||||
|
||||
|
||||
Cluster::Address::Address(const String & host_port_, const String & user_, const String & password_)
|
||||
: user(user_), password(password_)
|
||||
{
|
||||
@ -30,6 +34,18 @@ Cluster::Address::Address(const String & host_port_, const String & user_, const
|
||||
host_port = Poco::Net::SocketAddress(host_port_, default_port);
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
inline std::string addressToDirName(const Cluster::Address & address)
|
||||
{
|
||||
return
|
||||
escapeForFileName(address.user) +
|
||||
(address.password.empty() ? "" : (':' + escapeForFileName(address.password))) + '@' +
|
||||
escapeForFileName(address.host_port.host().toString()) + ':' +
|
||||
std::to_string(address.host_port.port());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Clusters::Clusters(const Settings & settings, const DataTypeFactory & data_type_factory, const String & config_name)
|
||||
{
|
||||
@ -51,29 +67,83 @@ Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_fa
|
||||
Poco::Util::AbstractConfiguration::Keys config_keys;
|
||||
config.keys(cluster_name, config_keys);
|
||||
|
||||
String config_prefix = cluster_name + ".";
|
||||
const auto & config_prefix = cluster_name + ".";
|
||||
|
||||
for (Poco::Util::AbstractConfiguration::Keys::const_iterator it = config_keys.begin(); it != config_keys.end(); ++it)
|
||||
for (auto it = config_keys.begin(); it != config_keys.end(); ++it)
|
||||
{
|
||||
if (0 == strncmp(it->c_str(), "node", strlen("node")))
|
||||
{
|
||||
addresses.push_back(Address(config_prefix + *it));
|
||||
const auto & prefix = config_prefix + *it;
|
||||
const auto weight = config.getInt(prefix + ".weight", 1);
|
||||
if (weight == 0)
|
||||
continue;
|
||||
|
||||
addresses.emplace_back(prefix);
|
||||
|
||||
slot_to_shard.insert(std::end(slot_to_shard), weight, shard_info_vec.size());
|
||||
if (const auto is_local = isLocal(addresses.back()))
|
||||
shard_info_vec.push_back({{}, weight, is_local });
|
||||
else
|
||||
shard_info_vec.push_back({{addressToDirName(addresses.back())}, weight, is_local});
|
||||
}
|
||||
else if (0 == strncmp(it->c_str(), "shard", strlen("shard")))
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys replica_keys;
|
||||
config.keys(config_prefix + *it, replica_keys);
|
||||
|
||||
addresses_with_failover.push_back(Addresses());
|
||||
addresses_with_failover.emplace_back();
|
||||
Addresses & replica_addresses = addresses_with_failover.back();
|
||||
|
||||
for (Poco::Util::AbstractConfiguration::Keys::const_iterator jt = replica_keys.begin(); jt != replica_keys.end(); ++jt)
|
||||
const auto & partial_prefix = config_prefix + *it + ".";
|
||||
const auto weight = config.getInt(partial_prefix + ".weight", 1);
|
||||
if (weight == 0)
|
||||
continue;
|
||||
|
||||
const auto internal_replication = config.getBool(partial_prefix + ".internal_replication", false);
|
||||
|
||||
/** in case of internal_replication we will be appending names to
|
||||
* the first element of vector; otherwise we will just .emplace_back
|
||||
*/
|
||||
std::vector<std::string> dir_names{};
|
||||
auto has_local_node = false;
|
||||
|
||||
auto first = true;
|
||||
for (auto jt = replica_keys.begin(); jt != replica_keys.end(); ++jt)
|
||||
{
|
||||
if (0 == strncmp(jt->data(), "weight", strlen("weight")) ||
|
||||
0 == strncmp(jt->data(), "internal_replication", strlen("internal_replication")))
|
||||
continue;
|
||||
|
||||
if (0 == strncmp(jt->c_str(), "replica", strlen("replica")))
|
||||
replica_addresses.push_back(Address(config_prefix + *it + "." + *jt));
|
||||
{
|
||||
replica_addresses.emplace_back(partial_prefix + *jt);
|
||||
|
||||
if (isLocal(replica_addresses.back()))
|
||||
{
|
||||
has_local_node = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (internal_replication)
|
||||
{
|
||||
auto dir_name = addressToDirName(replica_addresses.back());
|
||||
if (first)
|
||||
dir_names.emplace_back(std::move(dir_name));
|
||||
else
|
||||
dir_names.front() += "," + dir_name;
|
||||
}
|
||||
else
|
||||
dir_names.emplace_back(addressToDirName(replica_addresses.back()));
|
||||
|
||||
if (first) first = false;
|
||||
}
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown element in config: " + *jt, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
}
|
||||
|
||||
slot_to_shard.insert(std::end(slot_to_shard), weight, shard_info_vec.size());
|
||||
shard_info_vec.push_back({std::move(dir_names), weight, has_local_node});
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown element in config: " + *it, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
@ -99,7 +169,7 @@ Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_fa
|
||||
}
|
||||
else
|
||||
{
|
||||
replicas.push_back(new ConnectionPool(
|
||||
replicas.emplace_back(new ConnectionPool(
|
||||
settings.distributed_connections_pool_size,
|
||||
jt->host_port.host().toString(), jt->host_port.port(), "", jt->user, jt->password, data_type_factory, "server", Protocol::Compression::Enable,
|
||||
saturate(settings.connect_timeout_with_failover_ms, settings.limits.max_execution_time),
|
||||
@ -111,7 +181,7 @@ Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_fa
|
||||
if (has_local_replics)
|
||||
++local_nodes_num;
|
||||
else
|
||||
pools.push_back(new ConnectionPoolWithFailover(replicas, settings.load_balancing, settings.connections_with_failover_max_tries));
|
||||
pools.emplace_back(new ConnectionPoolWithFailover(replicas, settings.load_balancing, settings.connections_with_failover_max_tries));
|
||||
}
|
||||
}
|
||||
else if (addresses.size())
|
||||
@ -124,7 +194,7 @@ Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_fa
|
||||
}
|
||||
else
|
||||
{
|
||||
pools.push_back(new ConnectionPool(
|
||||
pools.emplace_back(new ConnectionPool(
|
||||
settings.distributed_connections_pool_size,
|
||||
it->host_port.host().toString(), it->host_port.port(), "", it->user, it->password, data_type_factory, "server", Protocol::Compression::Enable,
|
||||
saturate(settings.connect_timeout, settings.limits.max_execution_time),
|
||||
@ -145,8 +215,8 @@ Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_fa
|
||||
{
|
||||
Addresses current;
|
||||
for (size_t j = 0; j < names[i].size(); ++j)
|
||||
current.push_back(Address(names[i][j], username, password));
|
||||
addresses_with_failover.push_back(current);
|
||||
current.emplace_back(names[i][j], username, password);
|
||||
addresses_with_failover.emplace_back(current);
|
||||
}
|
||||
|
||||
for (AddressesWithFailover::const_iterator it = addresses_with_failover.begin(); it != addresses_with_failover.end(); ++it)
|
||||
@ -156,14 +226,14 @@ Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_fa
|
||||
|
||||
for (Addresses::const_iterator jt = it->begin(); jt != it->end(); ++jt)
|
||||
{
|
||||
replicas.push_back(new ConnectionPool(
|
||||
replicas.emplace_back(new ConnectionPool(
|
||||
settings.distributed_connections_pool_size,
|
||||
jt->host_port.host().toString(), jt->host_port.port(), "", jt->user, jt->password, data_type_factory, "server", Protocol::Compression::Enable,
|
||||
saturate(settings.connect_timeout_with_failover_ms, settings.limits.max_execution_time),
|
||||
saturate(settings.receive_timeout, settings.limits.max_execution_time),
|
||||
saturate(settings.send_timeout, settings.limits.max_execution_time)));
|
||||
}
|
||||
pools.push_back(new ConnectionPoolWithFailover(replicas, settings.load_balancing, settings.connections_with_failover_max_tries));
|
||||
pools.emplace_back(new ConnectionPoolWithFailover(replicas, settings.load_balancing, settings.connections_with_failover_max_tries));
|
||||
}
|
||||
}
|
||||
|
||||
@ -183,8 +253,8 @@ bool Cluster::isLocal(const Address & address)
|
||||
/// - её порт совпадает с портом, который слушает сервер;
|
||||
/// - её хост резолвится в набор адресов, один из которых совпадает с одним из адресов сетевых интерфейсов сервера
|
||||
/// то нужно всегда ходить на этот шард без межпроцессного взаимодействия
|
||||
UInt16 clickhouse_port = Poco::Util::Application::instance().config().getInt("tcp_port", 0);
|
||||
static Poco::Net::NetworkInterface::NetworkInterfaceList interfaces = Poco::Net::NetworkInterface::list();
|
||||
const UInt16 clickhouse_port = Poco::Util::Application::instance().config().getInt("tcp_port", 0);
|
||||
static auto interfaces = Poco::Net::NetworkInterface::list();
|
||||
|
||||
if (clickhouse_port == address.host_port.port() &&
|
||||
interfaces.end() != std::find_if(interfaces.begin(), interfaces.end(),
|
||||
|
@ -459,6 +459,17 @@ void Context::setDefaultReplicaName(const String & name)
|
||||
shared->default_replica_name = name;
|
||||
}
|
||||
|
||||
const Macros& Context::getMacros() const
|
||||
{
|
||||
return shared->macros;
|
||||
}
|
||||
|
||||
void Context::setMacros(Macros && macros)
|
||||
{
|
||||
/// Полагаемся, что это присваивание происходит один раз при старте сервера. Если это не так, нужно использовать мьютекс.
|
||||
shared->macros = macros;
|
||||
}
|
||||
|
||||
|
||||
Context & Context::getSessionContext()
|
||||
{
|
||||
|
@ -40,7 +40,8 @@ namespace DB
|
||||
/** Calls to these functions in the GROUP BY statement would be
|
||||
* replaced by their immediate argument.
|
||||
*/
|
||||
const std::unordered_set<String> injectiveFunctionNames{
|
||||
const std::unordered_set<String> injective_function_names
|
||||
{
|
||||
"negate",
|
||||
"bitNot",
|
||||
"reverse",
|
||||
@ -68,8 +69,8 @@ void ExpressionAnalyzer::init()
|
||||
/// Common subexpression elimination. Rewrite rules.
|
||||
normalizeTree();
|
||||
|
||||
/// GROUP BY injective function elimination
|
||||
eliminateInjectives();
|
||||
/// GROUP BY injective function elimination.
|
||||
optimizeGroupBy();
|
||||
|
||||
/// array_join_alias_to_name, array_join_result_to_source.
|
||||
getArrayJoinedColumns();
|
||||
@ -122,7 +123,7 @@ void ExpressionAnalyzer::analyzeAggregation()
|
||||
if (select_query->group_expression_list)
|
||||
{
|
||||
NameSet unique_keys;
|
||||
const ASTs & group_asts = select_query->group_expression_list->children;
|
||||
auto & group_asts = select_query->group_expression_list->children;
|
||||
for (size_t i = 0; i < group_asts.size(); ++i)
|
||||
{
|
||||
getRootActions(group_asts[i], true, false, temp_actions);
|
||||
@ -135,6 +136,17 @@ void ExpressionAnalyzer::analyzeAggregation()
|
||||
|
||||
const auto & col = block.getByName(column_name);
|
||||
|
||||
/// constant expressions have non-null column pointer at this stage
|
||||
if (const auto is_constexpr = col.column)
|
||||
{
|
||||
if (i < group_asts.size() - 1)
|
||||
group_asts[i] = std::move(group_asts.back());
|
||||
|
||||
group_asts.pop_back();
|
||||
i -= 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
NameAndTypePair key{column_name, col.type};
|
||||
aggregation_keys.push_back(key);
|
||||
|
||||
@ -145,6 +157,12 @@ void ExpressionAnalyzer::analyzeAggregation()
|
||||
aggregated_columns.push_back(std::move(key));
|
||||
}
|
||||
}
|
||||
|
||||
if (group_asts.empty())
|
||||
{
|
||||
select_query->group_expression_list = nullptr;
|
||||
has_aggregation = select_query->having_expression || aggregate_descriptions.size();
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < aggregate_descriptions.size(); ++i)
|
||||
@ -426,7 +444,7 @@ void ExpressionAnalyzer::normalizeTreeImpl(ASTPtr & ast, MapOfASTs & finished_as
|
||||
}
|
||||
|
||||
|
||||
void ExpressionAnalyzer::eliminateInjectives()
|
||||
void ExpressionAnalyzer::optimizeGroupBy()
|
||||
{
|
||||
if (!(select_query && select_query->group_expression_list))
|
||||
return;
|
||||
@ -438,7 +456,8 @@ void ExpressionAnalyzer::eliminateInjectives()
|
||||
auto & group_exprs = select_query->group_expression_list->children;
|
||||
|
||||
/// removes expression at index idx by making it last one and calling .pop_back()
|
||||
const auto remove_expr_at_index = [&group_exprs] (const size_t idx) {
|
||||
const auto remove_expr_at_index = [&group_exprs] (const size_t idx)
|
||||
{
|
||||
if (idx < group_exprs.size() - 1)
|
||||
group_exprs[idx] = std::move(group_exprs.back());
|
||||
|
||||
@ -446,13 +465,16 @@ void ExpressionAnalyzer::eliminateInjectives()
|
||||
};
|
||||
|
||||
/// iterate over each GROUP BY expression, eliminate injective function calls and literals
|
||||
for (size_t i = 0; i < group_exprs.size(); ++i)
|
||||
for (size_t i = 0; i < group_exprs.size();)
|
||||
{
|
||||
if (const auto function = typeid_cast<ASTFunction*>(group_exprs[i].get()))
|
||||
{
|
||||
/// assert function is injective
|
||||
if (!injectiveFunctionNames.count(function->name))
|
||||
if (!injective_function_names.count(function->name))
|
||||
{
|
||||
++i;
|
||||
continue;
|
||||
}
|
||||
|
||||
/// copy shared pointer to args in order to ensure lifetime
|
||||
auto args_ast = function->arguments;
|
||||
@ -461,7 +483,6 @@ void ExpressionAnalyzer::eliminateInjectives()
|
||||
* next iteration does not skip not yet processed data
|
||||
*/
|
||||
remove_expr_at_index(i);
|
||||
i -= 1;
|
||||
|
||||
/// copy non-literal arguments
|
||||
std::remove_copy_if(
|
||||
@ -469,7 +490,19 @@ void ExpressionAnalyzer::eliminateInjectives()
|
||||
std::back_inserter(group_exprs), is_literal
|
||||
);
|
||||
}
|
||||
else if (is_literal(group_exprs[i]))
|
||||
{
|
||||
remove_expr_at_index(i);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// if neither a function nor literal - advance to next expression
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
if (group_exprs.empty())
|
||||
select_query->group_expression_list = nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -997,7 +1030,6 @@ void ExpressionAnalyzer::getActionsImpl(ASTPtr ast, bool no_subqueries, bool onl
|
||||
ColumnWithNameAndType fake_column;
|
||||
fake_column.name = node->getColumnName();
|
||||
fake_column.type = new DataTypeUInt8;
|
||||
fake_column.column = new ColumnConstUInt8(1, 0);
|
||||
actions_stack.addAction(ExpressionAction::addColumn(fake_column));
|
||||
getActionsImpl(node->arguments->children.at(0), no_subqueries, only_consts, actions_stack);
|
||||
}
|
||||
|
@ -33,24 +33,35 @@ void InterpreterAlterQuery::execute()
|
||||
ASTAlterQuery & alter = typeid_cast<ASTAlterQuery &>(*query_ptr);
|
||||
String & table_name = alter.table;
|
||||
String database_name = alter.database.empty() ? context.getCurrentDatabase() : alter.database;
|
||||
AlterCommands commands = parseAlter(alter.parameters, context.getDataTypeFactory());
|
||||
AlterCommands alter_commands;
|
||||
PartitionCommands partition_commands;
|
||||
parseAlter(alter.parameters, context.getDataTypeFactory(), alter_commands, partition_commands);
|
||||
|
||||
StoragePtr table = context.getTable(database_name, table_name);
|
||||
table->alter(commands, database_name, table_name, context);
|
||||
|
||||
for (const PartitionCommand & command : partition_commands)
|
||||
{
|
||||
if (command.type == PartitionCommand::DROP_PARTITION)
|
||||
table->dropPartition(command.partition, command.detach);
|
||||
else if (command.type == PartitionCommand::ATTACH_PARTITION)
|
||||
table->attachPartition(command.partition, command.unreplicated, command.part);
|
||||
else
|
||||
throw Exception("Bad PartitionCommand::Type: " + toString(command.type), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||
}
|
||||
|
||||
if (!alter_commands.empty())
|
||||
table->alter(alter_commands, database_name, table_name, context);
|
||||
}
|
||||
|
||||
AlterCommands InterpreterAlterQuery::parseAlter(
|
||||
const ASTAlterQuery::ParameterContainer & params_container, const DataTypeFactory & data_type_factory)
|
||||
void InterpreterAlterQuery::parseAlter(
|
||||
const ASTAlterQuery::ParameterContainer & params_container, const DataTypeFactory & data_type_factory,
|
||||
AlterCommands & out_alter_commands, PartitionCommands & out_partition_commands)
|
||||
{
|
||||
AlterCommands res;
|
||||
|
||||
for (const auto & params : params_container)
|
||||
{
|
||||
res.push_back(AlterCommand());
|
||||
AlterCommand & command = res.back();
|
||||
|
||||
if (params.type == ASTAlterQuery::ADD)
|
||||
if (params.type == ASTAlterQuery::ADD_COLUMN)
|
||||
{
|
||||
AlterCommand command;
|
||||
command.type = AlterCommand::ADD;
|
||||
|
||||
const ASTNameTypePair & ast_name_type = typeid_cast<const ASTNameTypePair &>(*params.name_type);
|
||||
@ -62,14 +73,20 @@ AlterCommands InterpreterAlterQuery::parseAlter(
|
||||
|
||||
if (params.column)
|
||||
command.after_column = typeid_cast<const ASTIdentifier &>(*params.column).name;
|
||||
|
||||
out_alter_commands.push_back(command);
|
||||
}
|
||||
else if (params.type == ASTAlterQuery::DROP)
|
||||
else if (params.type == ASTAlterQuery::DROP_COLUMN)
|
||||
{
|
||||
AlterCommand command;
|
||||
command.type = AlterCommand::DROP;
|
||||
command.column_name = typeid_cast<const ASTIdentifier &>(*(params.column)).name;
|
||||
|
||||
out_alter_commands.push_back(command);
|
||||
}
|
||||
else if (params.type == ASTAlterQuery::MODIFY)
|
||||
else if (params.type == ASTAlterQuery::MODIFY_COLUMN)
|
||||
{
|
||||
AlterCommand command;
|
||||
command.type = AlterCommand::MODIFY;
|
||||
|
||||
const ASTNameTypePair & ast_name_type = typeid_cast<const ASTNameTypePair &>(*params.name_type);
|
||||
@ -78,12 +95,22 @@ AlterCommands InterpreterAlterQuery::parseAlter(
|
||||
|
||||
command.column_name = ast_name_type.name;
|
||||
command.data_type = data_type_factory.get(type_string);
|
||||
|
||||
out_alter_commands.push_back(command);
|
||||
}
|
||||
else if (params.type == ASTAlterQuery::DROP_PARTITION)
|
||||
{
|
||||
const Field & partition = dynamic_cast<const ASTLiteral &>(*params.partition).value;
|
||||
out_partition_commands.push_back(PartitionCommand::dropPartition(partition, params.detach));
|
||||
}
|
||||
else if (params.type == ASTAlterQuery::ATTACH_PARTITION)
|
||||
{
|
||||
const Field & partition = dynamic_cast<const ASTLiteral &>(*params.partition).value;
|
||||
out_partition_commands.push_back(PartitionCommand::attachPartition(partition, params.unreplicated, params.part));
|
||||
}
|
||||
else
|
||||
throw Exception("Wrong parameter type in ALTER query", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void InterpreterAlterQuery::updateMetadata(
|
||||
|
@ -346,6 +346,10 @@ BlockInputStreamPtr InterpreterSelectQuery::execute()
|
||||
|
||||
need_second_distinct_pass = streams.size() > 1;
|
||||
}
|
||||
else if (query.group_by_with_totals && !aggregate_final)
|
||||
{
|
||||
executeTotalsAndHaving(streams, false, nullptr, aggregate_overflow_row);
|
||||
}
|
||||
|
||||
if (has_order_by)
|
||||
executeOrder(streams);
|
||||
|
@ -77,7 +77,7 @@ bool ParserParenthesisExpression::parseImpl(Pos & pos, Pos end, ASTPtr & node, E
|
||||
/// пустое выражение в скобках недопустимо
|
||||
if (expr_list.children.empty())
|
||||
{
|
||||
expected = "not empty list of expressions in parenthesis";
|
||||
expected = "non-empty parenthesized list of expressions";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -23,11 +23,17 @@ bool ParserAlterQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & e
|
||||
ParserString s_modify("MODIFY", true, true);
|
||||
|
||||
ParserString s_drop("DROP", true, true);
|
||||
ParserString s_detach("DETACH", true, true);
|
||||
ParserString s_attach("ATTACH", true, true);
|
||||
ParserString s_unreplicated("UNREPLICATED", true, true);
|
||||
ParserString s_part("PART", true, true);
|
||||
ParserString s_partition("PARTITION", true, true);
|
||||
ParserString s_comma(",");
|
||||
|
||||
ParserIdentifier table_parser;
|
||||
ParserCompoundIdentifier parser_name;
|
||||
ParserCompoundNameTypePair parser_name_type;
|
||||
ParserLiteral parser_literal;
|
||||
|
||||
ASTPtr table;
|
||||
ASTPtr database;
|
||||
@ -75,7 +81,8 @@ bool ParserAlterQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & e
|
||||
if (s_add.ignore(pos, end, expected))
|
||||
{
|
||||
ws.ignore(pos, end);
|
||||
s_column.ignore(pos, end, expected);
|
||||
if (!s_column.ignore(pos, end, expected))
|
||||
return false;
|
||||
ws.ignore(pos, end);
|
||||
|
||||
parser_name_type.parse(pos, end, params.name_type, expected);
|
||||
@ -89,29 +96,84 @@ bool ParserAlterQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & e
|
||||
return false;
|
||||
}
|
||||
|
||||
params.type = ASTAlterQuery::ADD;
|
||||
params.type = ASTAlterQuery::ADD_COLUMN;
|
||||
}
|
||||
else if (s_drop.ignore(pos, end, expected))
|
||||
{
|
||||
ws.ignore(pos, end);
|
||||
s_column.ignore(pos, end, expected);
|
||||
|
||||
if (s_partition.ignore(pos, end, expected))
|
||||
{
|
||||
ws.ignore(pos, end);
|
||||
|
||||
if (!parser_literal.parse(pos, end, params.partition, expected))
|
||||
return false;
|
||||
|
||||
params.type = ASTAlterQuery::DROP_PARTITION;
|
||||
}
|
||||
else if (s_column.ignore(pos, end, expected))
|
||||
{
|
||||
ws.ignore(pos, end);
|
||||
|
||||
if (!parser_name.parse(pos, end, params.column, expected))
|
||||
return false;
|
||||
|
||||
params.type = ASTAlterQuery::DROP_COLUMN;
|
||||
params.detach = false;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
else if (s_detach.ignore(pos, end, expected))
|
||||
{
|
||||
ws.ignore(pos, end);
|
||||
|
||||
parser_name.parse(pos, end, params.column, expected);
|
||||
if (!s_partition.ignore(pos, end, expected))
|
||||
return false;
|
||||
|
||||
params.type = ASTAlterQuery::DROP;
|
||||
ws.ignore(pos, end);
|
||||
|
||||
if (!parser_literal.parse(pos, end, params.partition, expected))
|
||||
return false;
|
||||
|
||||
params.type = ASTAlterQuery::DROP_PARTITION;
|
||||
params.detach = true;
|
||||
}
|
||||
else if (s_attach.ignore(pos, end, expected))
|
||||
{
|
||||
ws.ignore(pos, end);
|
||||
|
||||
if (s_unreplicated.ignore(pos, end, expected))
|
||||
{
|
||||
params.unreplicated = true;
|
||||
ws.ignore(pos, end);
|
||||
}
|
||||
|
||||
if (s_part.ignore(pos, end, expected))
|
||||
params.part = true;
|
||||
else if (!s_partition.ignore(pos, end, expected))
|
||||
return false;
|
||||
|
||||
ws.ignore(pos, end);
|
||||
|
||||
if (!parser_literal.parse(pos, end, params.partition, expected))
|
||||
return false;
|
||||
|
||||
params.type = ASTAlterQuery::ATTACH_PARTITION;
|
||||
}
|
||||
else if (s_modify.ignore(pos, end, expected))
|
||||
{
|
||||
ws.ignore(pos, end);
|
||||
s_column.ignore(pos, end, expected);
|
||||
if (!s_column.ignore(pos, end, expected))
|
||||
return false;
|
||||
ws.ignore(pos, end);
|
||||
|
||||
parser_name_type.parse(pos, end, params.name_type, expected);
|
||||
if (!parser_name_type.parse(pos, end, params.name_type, expected))
|
||||
return false;
|
||||
|
||||
ws.ignore(pos, end);
|
||||
|
||||
params.type = ASTAlterQuery::MODIFY;
|
||||
params.type = ASTAlterQuery::MODIFY_COLUMN;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
@ -124,7 +186,7 @@ bool ParserAlterQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & e
|
||||
parsing_finished = true;
|
||||
}
|
||||
|
||||
query->parameters.push_back(params);
|
||||
query->addParameters(params);
|
||||
}
|
||||
while (!parsing_finished);
|
||||
|
||||
|
@ -198,18 +198,18 @@ bool ParserSelectQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected &
|
||||
return false;
|
||||
|
||||
ws.ignore(pos, end);
|
||||
}
|
||||
|
||||
/// WITH TOTALS
|
||||
if (s_with.ignore(pos, end, expected))
|
||||
{
|
||||
ws.ignore(pos, end);
|
||||
if (!s_totals.ignore(pos, end, expected))
|
||||
return false;
|
||||
/// WITH TOTALS
|
||||
if (s_with.ignore(pos, end, expected))
|
||||
{
|
||||
ws.ignore(pos, end);
|
||||
if (!s_totals.ignore(pos, end, expected))
|
||||
return false;
|
||||
|
||||
select_query->group_by_with_totals = true;
|
||||
select_query->group_by_with_totals = true;
|
||||
|
||||
ws.ignore(pos, end);
|
||||
}
|
||||
ws.ignore(pos, end);
|
||||
}
|
||||
|
||||
/// HAVING expr
|
||||
|
@ -19,7 +19,7 @@ namespace DB
|
||||
{
|
||||
|
||||
|
||||
static const char * hilite_keyword = "\033[1;37m";
|
||||
static const char * hilite_keyword = "\033[1m";
|
||||
static const char * hilite_identifier = "\033[0;36m";
|
||||
static const char * hilite_function = "\033[0;33m";
|
||||
static const char * hilite_operator = "\033[1;33m";
|
||||
@ -195,11 +195,11 @@ void formatAST(const ASTSelectQuery & ast, std::ostream & s, size_t indent, bo
|
||||
one_line
|
||||
? formatAST(*ast.group_expression_list, s, indent, hilite, one_line)
|
||||
: formatExpressionListMultiline(typeid_cast<const ASTExpressionList &>(*ast.group_expression_list), s, indent, hilite);
|
||||
|
||||
if (ast.group_by_with_totals)
|
||||
s << (hilite ? hilite_keyword : "") << nl_or_ws << indent_str << (one_line ? "" : " ") << "WITH TOTALS" << (hilite ? hilite_none : "");
|
||||
}
|
||||
|
||||
if (ast.group_by_with_totals)
|
||||
s << (hilite ? hilite_keyword : "") << nl_or_ws << indent_str << (one_line ? "" : " ") << "WITH TOTALS" << (hilite ? hilite_none : "");
|
||||
|
||||
if (ast.having_expression)
|
||||
{
|
||||
s << (hilite ? hilite_keyword : "") << nl_or_ws << indent_str << "HAVING " << (hilite ? hilite_none : "");
|
||||
@ -721,7 +721,7 @@ void formatAST(const ASTAlterQuery & ast, std::ostream & s, size_t indent, bo
|
||||
{
|
||||
const ASTAlterQuery::Parameters &p = ast.parameters[i];
|
||||
|
||||
if (p.type == ASTAlterQuery::ADD)
|
||||
if (p.type == ASTAlterQuery::ADD_COLUMN)
|
||||
{
|
||||
s << (hilite ? hilite_keyword : "") << indent_str << "ADD COLUMN " << (hilite ? hilite_none : "");
|
||||
formatAST(*p.name_type, s, indent, hilite, true);
|
||||
@ -733,16 +733,28 @@ void formatAST(const ASTAlterQuery & ast, std::ostream & s, size_t indent, bo
|
||||
formatAST(*p.column, s, indent, hilite, one_line);
|
||||
}
|
||||
}
|
||||
else if (p.type == ASTAlterQuery::DROP)
|
||||
else if (p.type == ASTAlterQuery::DROP_COLUMN)
|
||||
{
|
||||
s << (hilite ? hilite_keyword : "") << indent_str << "DROP COLUMN " << (hilite ? hilite_none : "");
|
||||
formatAST(*p.column, s, indent, hilite, true);
|
||||
}
|
||||
else if (p.type == ASTAlterQuery::MODIFY)
|
||||
else if (p.type == ASTAlterQuery::MODIFY_COLUMN)
|
||||
{
|
||||
s << (hilite ? hilite_keyword : "") << indent_str << "MODIFY COLUMN " << (hilite ? hilite_none : "");
|
||||
formatAST(*p.name_type, s, indent, hilite, true);
|
||||
}
|
||||
else if (p.type == ASTAlterQuery::DROP_PARTITION)
|
||||
{
|
||||
s << (hilite ? hilite_keyword : "") << indent_str << (p.detach ? "DETACH" : "DROP") << " PARTITION "
|
||||
<< (hilite ? hilite_none : "");
|
||||
formatAST(*p.partition, s, indent, hilite, true);
|
||||
}
|
||||
else if (p.type == ASTAlterQuery::ATTACH_PARTITION)
|
||||
{
|
||||
s << (hilite ? hilite_keyword : "") << indent_str << "ATTACH " << (p.unreplicated ? "UNREPLICATED" : "")
|
||||
<< (p.part ? " PART " : " PARTITION ") << (hilite ? hilite_none : "");
|
||||
formatAST(*p.partition, s, indent, hilite, true);
|
||||
}
|
||||
else
|
||||
throw Exception("Unexpected type of ALTER", ErrorCodes::UNEXPECTED_AST_STRUCTURE);
|
||||
|
||||
|
@ -166,7 +166,7 @@ QueryParseResult QueryParser::parse(std::istream & s)
|
||||
else if (settings_child_nodes->item(i)->nodeName() == "max_threads_per_counter")
|
||||
{
|
||||
/** Выставить локальное ограничение на максимальное количество обрабатываемых запросов
|
||||
* Оно может быть больше, чем ограничение по-умолчанию.
|
||||
* Оно может быть больше, чем ограничение по умолчанию.
|
||||
*/
|
||||
result.max_threads_per_counter = DB::parse<unsigned>(settings_child_nodes->item(i)->innerText());
|
||||
}
|
||||
|
@ -373,6 +373,9 @@ int Server::main(const std::vector<std::string> & args)
|
||||
if (config().has("replica_name"))
|
||||
global_context->setDefaultReplicaName(config().getString("replica_name"));
|
||||
|
||||
if (config().has("macros"))
|
||||
global_context->setMacros(Macros(config(), "macros"));
|
||||
|
||||
std::string users_config_path = config().getString("users_config", config().getString("config-file", "config.xml"));
|
||||
auto users_config_reloader = stdext::make_unique<UsersConfigReloader>(users_config_path, global_context.get());
|
||||
|
||||
|
@ -68,7 +68,7 @@ void TCPHandler::runImpl()
|
||||
throw;
|
||||
}
|
||||
|
||||
/// При соединении может быть указана БД по-умолчанию.
|
||||
/// При соединении может быть указана БД по умолчанию.
|
||||
if (!default_database.empty())
|
||||
{
|
||||
if (!connection_context.isDatabaseExist(default_database))
|
||||
@ -85,7 +85,9 @@ void TCPHandler::runImpl()
|
||||
|
||||
sendHello();
|
||||
|
||||
connection_context.setProgressCallback(boost::bind(&TCPHandler::updateProgress, this, _1, _2));
|
||||
connection_context.setProgressCallback([this] (const size_t rows, const size_t bytes) {
|
||||
return this->updateProgress(rows, bytes);
|
||||
});
|
||||
|
||||
while (1)
|
||||
{
|
||||
|
@ -82,6 +82,12 @@ Strings ActiveDataPartSet::getParts() const
|
||||
return res;
|
||||
}
|
||||
|
||||
size_t ActiveDataPartSet::size() const
|
||||
{
|
||||
Poco::ScopedLock<Poco::Mutex> lock(mutex);
|
||||
return parts.size();
|
||||
}
|
||||
|
||||
|
||||
|
||||
String ActiveDataPartSet::getPartName(DayNum_t left_date, DayNum_t right_date, UInt64 left_id, UInt64 right_id, UInt64 level)
|
||||
@ -110,10 +116,14 @@ String ActiveDataPartSet::getPartName(DayNum_t left_date, DayNum_t right_date, U
|
||||
return res;
|
||||
}
|
||||
|
||||
bool ActiveDataPartSet::isPartDirectory(const String & dir_name, Poco::RegularExpression::MatchVec & matches)
|
||||
bool ActiveDataPartSet::isPartDirectory(const String & dir_name, Poco::RegularExpression::MatchVec * out_matches)
|
||||
{
|
||||
Poco::RegularExpression::MatchVec matches;
|
||||
static Poco::RegularExpression file_name_regexp("^(\\d{8})_(\\d{8})_(\\d+)_(\\d+)_(\\d+)");
|
||||
return (file_name_regexp.match(dir_name, 0, matches) && 6 == matches.size());
|
||||
bool res = (file_name_regexp.match(dir_name, 0, matches) && 6 == matches.size());
|
||||
if (out_matches)
|
||||
*out_matches = matches;
|
||||
return res;
|
||||
}
|
||||
|
||||
void ActiveDataPartSet::parsePartName(const String & file_name, Part & part, const Poco::RegularExpression::MatchVec * matches_p)
|
||||
@ -121,7 +131,7 @@ void ActiveDataPartSet::parsePartName(const String & file_name, Part & part, con
|
||||
Poco::RegularExpression::MatchVec match_vec;
|
||||
if (!matches_p)
|
||||
{
|
||||
if (!isPartDirectory(file_name, match_vec))
|
||||
if (!isPartDirectory(file_name, &match_vec))
|
||||
throw Exception("Unexpected part name: " + file_name, ErrorCodes::BAD_DATA_PART_NAME);
|
||||
matches_p = &match_vec;
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <DB/Storages/MergeTree/MergeTreeReader.h>
|
||||
#include <DB/Storages/MergeTree/MergeTreeBlockInputStream.h>
|
||||
#include <DB/Storages/MergeTree/MergedBlockOutputStream.h>
|
||||
#include <DB/Storages/MergeTree/MergeTreePartChecker.h>
|
||||
#include <DB/Parsers/ASTIdentifier.h>
|
||||
#include <DB/Parsers/ASTNameTypePair.h>
|
||||
#include <DB/DataStreams/ExpressionBlockInputStream.h>
|
||||
@ -41,6 +42,7 @@ MergeTreeData::MergeTreeData(
|
||||
{
|
||||
/// создаём директорию, если её нет
|
||||
Poco::File(full_path).createDirectories();
|
||||
Poco::File(full_path + "detached").createDirectory();
|
||||
|
||||
/// инициализируем описание сортировки
|
||||
sort_descr.reserve(primary_expr_ast->children.size());
|
||||
@ -54,8 +56,6 @@ MergeTreeData::MergeTreeData(
|
||||
|
||||
ExpressionActionsPtr projected_expr = ExpressionAnalyzer(primary_expr_ast, context, *columns).getActions(true);
|
||||
primary_key_sample = projected_expr->getSampleBlock();
|
||||
|
||||
loadDataParts();
|
||||
}
|
||||
|
||||
UInt64 MergeTreeData::getMaxDataPartIndex()
|
||||
@ -83,7 +83,7 @@ std::string MergeTreeData::getModePrefix() const
|
||||
}
|
||||
|
||||
|
||||
void MergeTreeData::loadDataParts()
|
||||
void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
{
|
||||
LOG_DEBUG(log, "Loading data parts");
|
||||
|
||||
@ -104,6 +104,7 @@ void MergeTreeData::loadDataParts()
|
||||
if (0 == file_name.compare(0, strlen("tmp_"), "tmp_"))
|
||||
continue;
|
||||
|
||||
/// TODO: Это можно удалить, если нигде больше не осталось директорий old_* (их давно никто не пишет).
|
||||
if (0 == file_name.compare(0, strlen("old_"), "old_"))
|
||||
{
|
||||
String new_file_name = file_name.substr(strlen("old_"));
|
||||
@ -118,11 +119,13 @@ void MergeTreeData::loadDataParts()
|
||||
}
|
||||
|
||||
DataPartsVector broken_parts_to_remove;
|
||||
DataPartsVector broken_parts_to_detach;
|
||||
size_t suspicious_broken_parts = 0;
|
||||
|
||||
Poco::RegularExpression::MatchVec matches;
|
||||
for (const String & file_name : part_file_names)
|
||||
{
|
||||
if (!ActiveDataPartSet::isPartDirectory(file_name, matches))
|
||||
if (!ActiveDataPartSet::isPartDirectory(file_name, &matches))
|
||||
continue;
|
||||
|
||||
MutableDataPartPtr part = std::make_shared<DataPart>(*this);
|
||||
@ -133,10 +136,10 @@ void MergeTreeData::loadDataParts()
|
||||
|
||||
try
|
||||
{
|
||||
part->loadColumns();
|
||||
part->loadChecksums();
|
||||
part->loadColumns(require_part_metadata);
|
||||
part->loadChecksums(require_part_metadata);
|
||||
part->loadIndex();
|
||||
part->checkNotBroken();
|
||||
part->checkNotBroken(require_part_metadata);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -150,7 +153,7 @@ void MergeTreeData::loadDataParts()
|
||||
if (part->level == 0)
|
||||
{
|
||||
/// Восстановить куски нулевого уровня невозможно.
|
||||
LOG_ERROR(log, "Removing broken part " << full_path + file_name << " because is't impossible to repair.");
|
||||
LOG_ERROR(log, "Removing broken part " << full_path + file_name << " because it's impossible to repair.");
|
||||
broken_parts_to_remove.push_back(part);
|
||||
}
|
||||
else
|
||||
@ -160,12 +163,13 @@ void MergeTreeData::loadDataParts()
|
||||
int contained_parts = 0;
|
||||
|
||||
LOG_ERROR(log, "Part " << full_path + file_name << " is broken. Looking for parts to replace it.");
|
||||
++suspicious_broken_parts;
|
||||
|
||||
for (const String & contained_name : part_file_names)
|
||||
{
|
||||
if (contained_name == file_name)
|
||||
continue;
|
||||
if (!ActiveDataPartSet::isPartDirectory(contained_name, matches))
|
||||
if (!ActiveDataPartSet::isPartDirectory(contained_name, &matches))
|
||||
continue;
|
||||
DataPart contained_part(*this);
|
||||
ActiveDataPartSet::parsePartName(contained_name, contained_part, &matches);
|
||||
@ -183,8 +187,9 @@ void MergeTreeData::loadDataParts()
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_ERROR(log, "Not removing broken part " << full_path + file_name
|
||||
LOG_ERROR(log, "Detaching broken part " << full_path + file_name
|
||||
<< " because it covers less than 2 parts. You need to resolve this manually");
|
||||
broken_parts_to_detach.push_back(part);
|
||||
}
|
||||
}
|
||||
|
||||
@ -196,12 +201,14 @@ void MergeTreeData::loadDataParts()
|
||||
data_parts.insert(part);
|
||||
}
|
||||
|
||||
if (broken_parts_to_remove.size() > 2)
|
||||
throw Exception("Suspiciously many (" + toString(broken_parts_to_remove.size()) + ") broken parts to remove.",
|
||||
if (suspicious_broken_parts > 5 && !skip_sanity_checks)
|
||||
throw Exception("Suspiciously many (" + toString(suspicious_broken_parts) + ") broken parts to remove.",
|
||||
ErrorCodes::TOO_MANY_UNEXPECTED_DATA_PARTS);
|
||||
|
||||
for (const auto & part : broken_parts_to_remove)
|
||||
part->remove();
|
||||
for (const auto & part : broken_parts_to_detach)
|
||||
part->renameAddPrefix("detached/");
|
||||
|
||||
all_data_parts = data_parts;
|
||||
|
||||
@ -705,8 +712,6 @@ MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
|
||||
|
||||
void MergeTreeData::replaceParts(const DataPartsVector & remove, const DataPartsVector & add, bool clear_without_timeout)
|
||||
{
|
||||
LOG_TRACE(log, "Removing " << remove.size() << " parts and adding " << add.size() << " parts.");
|
||||
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
|
||||
for (const DataPartPtr & part : remove)
|
||||
@ -720,7 +725,17 @@ void MergeTreeData::replaceParts(const DataPartsVector & remove, const DataParts
|
||||
}
|
||||
}
|
||||
|
||||
void MergeTreeData::renameAndDetachPart(DataPartPtr part, const String & prefix, bool restore_covered)
|
||||
void MergeTreeData::attachPart(DataPartPtr part)
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
Poco::ScopedLock<Poco::FastMutex> lock_all(all_data_parts_mutex);
|
||||
|
||||
if (!all_data_parts.insert(part).second)
|
||||
throw Exception("Part " + part->name + " is already attached", ErrorCodes::DUPLICATE_DATA_PART);
|
||||
data_parts.insert(part);
|
||||
}
|
||||
|
||||
void MergeTreeData::renameAndDetachPart(DataPartPtr part, const String & prefix, bool restore_covered, bool move_to_detached)
|
||||
{
|
||||
LOG_INFO(log, "Renaming " << part->name << " to " << prefix << part->name << " and detaching it.");
|
||||
|
||||
@ -731,7 +746,8 @@ void MergeTreeData::renameAndDetachPart(DataPartPtr part, const String & prefix,
|
||||
throw Exception("No such data part", ErrorCodes::NO_SUCH_DATA_PART);
|
||||
|
||||
data_parts.erase(part);
|
||||
part->renameAddPrefix(prefix);
|
||||
if (move_to_detached || !prefix.empty())
|
||||
part->renameAddPrefix((move_to_detached ? "detached/" : "") + prefix);
|
||||
|
||||
if (restore_covered)
|
||||
{
|
||||
@ -783,6 +799,11 @@ void MergeTreeData::renameAndDetachPart(DataPartPtr part, const String & prefix,
|
||||
}
|
||||
}
|
||||
|
||||
void MergeTreeData::detachPartInPlace(DataPartPtr part)
|
||||
{
|
||||
renameAndDetachPart(part, "", false, false);
|
||||
}
|
||||
|
||||
MergeTreeData::DataParts MergeTreeData::getDataParts()
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
@ -879,6 +900,41 @@ MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const String & part_na
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartAndFixMetadata(const String & relative_path)
|
||||
{
|
||||
MutableDataPartPtr part = std::make_shared<DataPart>(*this);
|
||||
part->name = relative_path;
|
||||
|
||||
/// Раньше список столбцов записывался неправильно. Удалим его и создадим заново.
|
||||
if (Poco::File(full_path + relative_path + "/columns.txt").exists())
|
||||
Poco::File(full_path + relative_path + "/columns.txt").remove();
|
||||
|
||||
part->loadColumns(false);
|
||||
part->loadChecksums(false);
|
||||
part->loadIndex();
|
||||
part->checkNotBroken(false);
|
||||
|
||||
part->modification_time = Poco::File(full_path + relative_path).getLastModified().epochTime();
|
||||
|
||||
/// Если нет файла с чексуммами, посчитаем чексуммы и запишем. Заодно проверим данные.
|
||||
if (part->checksums.empty())
|
||||
{
|
||||
MergeTreePartChecker::Settings settings;
|
||||
settings.setIndexGranularity(index_granularity);
|
||||
settings.setRequireColumnFiles(true);
|
||||
MergeTreePartChecker::checkDataPart(full_path + relative_path, settings, context.getDataTypeFactory(), &part->checksums);
|
||||
|
||||
{
|
||||
WriteBufferFromFile out(full_path + relative_path + "/checksums.txt.tmp", 4096);
|
||||
part->checksums.writeText(out);
|
||||
}
|
||||
|
||||
Poco::File(full_path + relative_path + "/checksums.txt.tmp").renameTo(full_path + relative_path + "/checksums.txt");
|
||||
}
|
||||
|
||||
return part;
|
||||
}
|
||||
|
||||
|
||||
void MergeTreeData::DataPart::Checksums::Checksum::checkEqual(const Checksum & rhs, bool have_uncompressed, const String & name) const
|
||||
{
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <DB/DataTypes/DataTypeDateTime.h>
|
||||
#include <DB/DataTypes/DataTypesNumberFixed.h>
|
||||
#include <DB/DataTypes/DataTypeFixedString.h>
|
||||
#include <DB/DataTypes/DataTypeAggregateFunction.h>
|
||||
#include <DB/IO/CompressedReadBuffer.h>
|
||||
#include <DB/IO/HashingReadBuffer.h>
|
||||
#include <DB/Columns/ColumnsNumber.h>
|
||||
@ -14,6 +15,8 @@ namespace DB
|
||||
|
||||
struct Stream
|
||||
{
|
||||
static const size_t UNKNOWN = std::numeric_limits<size_t>::max();
|
||||
|
||||
DataTypePtr type;
|
||||
String path;
|
||||
String name;
|
||||
@ -35,6 +38,12 @@ struct Stream
|
||||
return mrk_hashing_buf.eof();
|
||||
}
|
||||
|
||||
void ignore()
|
||||
{
|
||||
uncompressed_hashing_buf.ignore(std::numeric_limits<size_t>::max());
|
||||
mrk_hashing_buf.ignore(std::numeric_limits<size_t>::max());
|
||||
}
|
||||
|
||||
size_t read(size_t rows)
|
||||
{
|
||||
if (dynamic_cast<const DataTypeString *>(&*type))
|
||||
@ -97,7 +106,7 @@ struct Stream
|
||||
return size / sizeof(UInt64);
|
||||
}
|
||||
|
||||
void assertMark(bool strict)
|
||||
void assertMark()
|
||||
{
|
||||
MarkInCompressedFile mrk_mark;
|
||||
readIntBinary(mrk_mark.offset_in_compressed_file, mrk_hashing_buf);
|
||||
@ -152,7 +161,7 @@ struct Stream
|
||||
};
|
||||
|
||||
/// Возвращает количество строк. Добавляет в checksums чексуммы всех файлов столбца.
|
||||
static size_t checkColumn(const String & path, const String & name, DataTypePtr type, size_t index_granularity, bool strict,
|
||||
static size_t checkColumn(const String & path, const String & name, DataTypePtr type, const MergeTreePartChecker::Settings & settings,
|
||||
MergeTreeData::DataPart::Checksums & checksums)
|
||||
{
|
||||
size_t rows = 0;
|
||||
@ -171,10 +180,10 @@ static size_t checkColumn(const String & path, const String & name, DataTypePtr
|
||||
if (sizes_stream.marksEOF())
|
||||
break;
|
||||
|
||||
sizes_stream.assertMark(strict);
|
||||
data_stream.assertMark(strict);
|
||||
sizes_stream.assertMark();
|
||||
data_stream.assertMark();
|
||||
|
||||
size_t cur_rows = sizes_stream.readUInt64(index_granularity, sizes);
|
||||
size_t cur_rows = sizes_stream.readUInt64(settings.index_granularity, sizes);
|
||||
|
||||
size_t sum = 0;
|
||||
for (size_t i = 0; i < cur_rows; ++i)
|
||||
@ -188,7 +197,7 @@ static size_t checkColumn(const String & path, const String & name, DataTypePtr
|
||||
data_stream.read(sum);
|
||||
|
||||
rows += cur_rows;
|
||||
if (cur_rows < index_granularity)
|
||||
if (cur_rows < settings.index_granularity)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -197,6 +206,12 @@ static size_t checkColumn(const String & path, const String & name, DataTypePtr
|
||||
|
||||
return rows;
|
||||
}
|
||||
else if (dynamic_cast<const DataTypeAggregateFunction *>(&*type))
|
||||
{
|
||||
Stream data_stream(path, escapeForFileName(name), type);
|
||||
data_stream.ignore();
|
||||
return Stream::UNKNOWN;
|
||||
}
|
||||
else
|
||||
{
|
||||
Stream data_stream(path, escapeForFileName(name), type);
|
||||
@ -207,12 +222,15 @@ static size_t checkColumn(const String & path, const String & name, DataTypePtr
|
||||
if (data_stream.marksEOF())
|
||||
break;
|
||||
|
||||
data_stream.assertMark(strict);
|
||||
data_stream.assertMark();
|
||||
|
||||
size_t cur_rows = data_stream.read(index_granularity);
|
||||
size_t cur_rows = data_stream.read(settings.index_granularity);
|
||||
|
||||
rows += cur_rows;
|
||||
if (cur_rows < index_granularity)
|
||||
if (cur_rows == Stream::UNKNOWN)
|
||||
rows = Stream::UNKNOWN;
|
||||
else
|
||||
rows += cur_rows;
|
||||
if (cur_rows < settings.index_granularity)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -228,8 +246,8 @@ static size_t checkColumn(const String & path, const String & name, DataTypePtr
|
||||
}
|
||||
}
|
||||
|
||||
void MergeTreePartChecker::checkDataPart(String path, size_t index_granularity, bool strict, const DataTypeFactory & data_type_factory,
|
||||
bool verbose)
|
||||
void MergeTreePartChecker::checkDataPart(String path, const Settings & settings, const DataTypeFactory & data_type_factory,
|
||||
MergeTreeData::DataPart::Checksums * out_checksums)
|
||||
{
|
||||
if (!path.empty() && *path.rbegin() != '/')
|
||||
path += "/";
|
||||
@ -243,7 +261,7 @@ void MergeTreePartChecker::checkDataPart(String path, size_t index_granularity,
|
||||
assertEOF(buf);
|
||||
}
|
||||
|
||||
if (strict || Poco::File(path + "checksums.txt").exists())
|
||||
if (settings.require_checksums || Poco::File(path + "checksums.txt").exists())
|
||||
{
|
||||
ReadBufferFromFile buf(path + "checksums.txt");
|
||||
checksums_txt.readText(buf);
|
||||
@ -260,13 +278,13 @@ void MergeTreePartChecker::checkDataPart(String path, size_t index_granularity,
|
||||
checksums_data.files["primary.idx"] = MergeTreeData::DataPart::Checksums::Checksum(primary_idx_size, hashing_buf.getHash());
|
||||
}
|
||||
|
||||
bool first = true;
|
||||
size_t rows = 0;
|
||||
String any_column_name;
|
||||
size_t rows = Stream::UNKNOWN;
|
||||
ExceptionPtr first_exception;
|
||||
|
||||
for (const NameAndTypePair & column : columns)
|
||||
{
|
||||
if (verbose)
|
||||
if (settings.verbose)
|
||||
{
|
||||
std::cerr << column.name << ":";
|
||||
std::cerr.flush();
|
||||
@ -275,30 +293,33 @@ void MergeTreePartChecker::checkDataPart(String path, size_t index_granularity,
|
||||
bool ok = false;
|
||||
try
|
||||
{
|
||||
if (!strict && !Poco::File(path + escapeForFileName(column.name) + ".bin").exists())
|
||||
if (!settings.require_column_files && !Poco::File(path + escapeForFileName(column.name) + ".bin").exists())
|
||||
{
|
||||
if (verbose)
|
||||
if (settings.verbose)
|
||||
std::cerr << " no files" << std::endl;
|
||||
continue;
|
||||
}
|
||||
|
||||
size_t cur_rows = checkColumn(path, column.name, column.type, index_granularity, strict, checksums_data);
|
||||
if (first)
|
||||
size_t cur_rows = checkColumn(path, column.name, column.type, settings, checksums_data);
|
||||
if (cur_rows != Stream::UNKNOWN)
|
||||
{
|
||||
rows = cur_rows;
|
||||
first = false;
|
||||
}
|
||||
else if (rows != cur_rows)
|
||||
{
|
||||
throw Exception("Different number of rows in columns " + columns.begin()->name + " and " + column.name,
|
||||
ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
if (rows == Stream::UNKNOWN)
|
||||
{
|
||||
rows = cur_rows;
|
||||
any_column_name = column.name;
|
||||
}
|
||||
else if (rows != cur_rows)
|
||||
{
|
||||
throw Exception("Different number of rows in columns " + any_column_name + " and " + column.name,
|
||||
ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
}
|
||||
}
|
||||
|
||||
ok = true;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (!verbose)
|
||||
if (!settings.verbose)
|
||||
throw;
|
||||
ExceptionPtr e = cloneCurrentException();
|
||||
if (!first_exception)
|
||||
@ -311,18 +332,18 @@ void MergeTreePartChecker::checkDataPart(String path, size_t index_granularity,
|
||||
std::cerr << std::endl;
|
||||
}
|
||||
|
||||
if (verbose && ok)
|
||||
if (settings.verbose && ok)
|
||||
std::cerr << " ok" << std::endl;
|
||||
}
|
||||
|
||||
if (first)
|
||||
if (rows == Stream::UNKNOWN)
|
||||
throw Exception("No columns", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED);
|
||||
|
||||
if (primary_idx_size % ((rows - 1) / index_granularity + 1))
|
||||
if (primary_idx_size % ((rows - 1) / settings.index_granularity + 1))
|
||||
throw Exception("primary.idx size (" + toString(primary_idx_size) + ") not divisible by number of marks ("
|
||||
+ toString(rows) + "/" + toString(index_granularity) + " rounded up)", ErrorCodes::CORRUPTED_DATA);
|
||||
+ toString(rows) + "/" + toString(settings.index_granularity) + " rounded up)", ErrorCodes::CORRUPTED_DATA);
|
||||
|
||||
if (strict || !checksums_txt.files.empty())
|
||||
if (settings.require_checksums || !checksums_txt.files.empty())
|
||||
checksums_txt.checkEqual(checksums_data, true);
|
||||
|
||||
if (first_exception)
|
||||
|
@ -73,8 +73,15 @@ MergeTreeData::MutableDataPartPtr ReplicatedMergeTreePartsFetcher::fetchPart(
|
||||
ReadBufferFromHTTP in(host, port, params);
|
||||
|
||||
String part_path = data.getFullPath() + "tmp_" + part_name + "/";
|
||||
if (!Poco::File(part_path).createDirectory())
|
||||
throw Exception("Directory " + part_path + " already exists");
|
||||
Poco::File part_file(part_path);
|
||||
|
||||
if (part_file.exists())
|
||||
{
|
||||
LOG_ERROR(log, "Directory " + part_path + " already exists. Removing.");
|
||||
part_file.remove(true);
|
||||
}
|
||||
|
||||
part_file.createDirectory();
|
||||
|
||||
MergeTreeData::MutableDataPartPtr new_data_part = std::make_shared<MergeTreeData::DataPart>(data);
|
||||
new_data_part->name = "tmp_" + part_name;
|
||||
@ -110,8 +117,8 @@ MergeTreeData::MutableDataPartPtr ReplicatedMergeTreePartsFetcher::fetchPart(
|
||||
|
||||
ActiveDataPartSet::parsePartName(part_name, *new_data_part);
|
||||
new_data_part->modification_time = time(0);
|
||||
new_data_part->loadColumns();
|
||||
new_data_part->loadChecksums();
|
||||
new_data_part->loadColumns(true);
|
||||
new_data_part->loadChecksums(true);
|
||||
new_data_part->loadIndex();
|
||||
|
||||
new_data_part->checksums.checkEqual(checksums, false);
|
||||
|
@ -1,34 +1,78 @@
|
||||
#include <DB/Parsers/formatAST.h>
|
||||
|
||||
#include <DB/DataStreams/RemoteBlockInputStream.h>
|
||||
#include <DB/DataStreams/RemoveColumnsBlockInputStream.h>
|
||||
|
||||
#include <DB/Storages/StorageDistributed.h>
|
||||
#include <DB/Storages/VirtualColumnFactory.h>
|
||||
|
||||
#include <Poco/Net/NetworkInterface.h>
|
||||
#include <DB/Client/ConnectionPool.h>
|
||||
#include <DB/Storages/Distributed/DistributedBlockOutputStream.h>
|
||||
#include <DB/Storages/Distributed/DirectoryMonitor.h>
|
||||
#include <DB/Storages/Distributed/queryToString.h>
|
||||
#include <DB/Common/escapeForFileName.h>
|
||||
|
||||
#include <DB/Interpreters/InterpreterSelectQuery.h>
|
||||
#include <DB/Interpreters/InterpreterAlterQuery.h>
|
||||
#include <boost/bind.hpp>
|
||||
|
||||
#include <DB/Core/Field.h>
|
||||
|
||||
#include <statdaemons/stdext.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename ASTType> void rewriteImpl(ASTType &, const std::string &, const std::string &) = delete;
|
||||
|
||||
/// select query has database and table names as AST pointers
|
||||
template <> inline void rewriteImpl<ASTSelectQuery>(ASTSelectQuery & query,
|
||||
const std::string & database, const std::string & table)
|
||||
{
|
||||
query.database = new ASTIdentifier{{}, database, ASTIdentifier::Database};
|
||||
query.table = new ASTIdentifier{{}, table, ASTIdentifier::Table};
|
||||
}
|
||||
|
||||
/// insert query has database and table names as bare strings
|
||||
template <> inline void rewriteImpl<ASTInsertQuery>(ASTInsertQuery & query,
|
||||
const std::string & database, const std::string & table)
|
||||
{
|
||||
query.database = database;
|
||||
query.table = table;
|
||||
/// make sure query is not INSERT SELECT
|
||||
query.select = nullptr;
|
||||
}
|
||||
|
||||
/// Создает копию запроса, меняет имена базы данных и таблицы.
|
||||
template <typename ASTType>
|
||||
inline ASTPtr rewriteQuery(const ASTPtr & query, const std::string & database, const std::string & table)
|
||||
{
|
||||
/// Создаем копию запроса.
|
||||
auto modified_query_ast = query->clone();
|
||||
|
||||
/// Меняем имена таблицы и базы данных
|
||||
rewriteImpl(typeid_cast<ASTType &>(*modified_query_ast), database, table);
|
||||
|
||||
return modified_query_ast;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
StorageDistributed::StorageDistributed(
|
||||
const std::string & name_,
|
||||
NamesAndTypesListPtr columns_,
|
||||
const String & remote_database_,
|
||||
const String & remote_table_,
|
||||
Cluster & cluster_,
|
||||
const Context & context_)
|
||||
Context & context_,
|
||||
const ASTPtr & sharding_key_,
|
||||
const String & data_path_)
|
||||
: name(name_), columns(columns_),
|
||||
remote_database(remote_database_), remote_table(remote_table_),
|
||||
context(context_),
|
||||
cluster(cluster_)
|
||||
context(context_), cluster(cluster_),
|
||||
sharding_key_expr(sharding_key_ ? ExpressionAnalyzer(sharding_key_, context, *columns).getActions(false) : nullptr),
|
||||
sharding_key_column_name(sharding_key_ ? sharding_key_->getColumnName() : String{}),
|
||||
write_enabled(cluster.getLocalNodesNum() + cluster.pools.size() < 2 || sharding_key_),
|
||||
path(data_path_ + escapeForFileName(name) + '/')
|
||||
{
|
||||
createDirectoryMonitors();
|
||||
}
|
||||
|
||||
StoragePtr StorageDistributed::create(
|
||||
@ -37,10 +81,17 @@ StoragePtr StorageDistributed::create(
|
||||
const String & remote_database_,
|
||||
const String & remote_table_,
|
||||
const String & cluster_name,
|
||||
Context & context_)
|
||||
Context & context_,
|
||||
const ASTPtr & sharding_key_,
|
||||
const String & data_path_)
|
||||
{
|
||||
context_.initClusters();
|
||||
return (new StorageDistributed(name_, columns_, remote_database_, remote_table_, context_.getCluster(cluster_name), context_))->thisPtr();
|
||||
|
||||
return (new StorageDistributed{
|
||||
name_, columns_, remote_database_, remote_table_,
|
||||
context_.getCluster(cluster_name), context_,
|
||||
sharding_key_, data_path_
|
||||
})->thisPtr();
|
||||
}
|
||||
|
||||
|
||||
@ -52,7 +103,9 @@ StoragePtr StorageDistributed::create(
|
||||
SharedPtr<Cluster> & owned_cluster_,
|
||||
Context & context_)
|
||||
{
|
||||
auto res = new StorageDistributed(name_, columns_, remote_database_, remote_table_, *owned_cluster_, context_);
|
||||
auto res = new StorageDistributed{
|
||||
name_, columns_, remote_database_,
|
||||
remote_table_, *owned_cluster_, context_};
|
||||
|
||||
/// Захватываем владение объектом-кластером.
|
||||
res->owned_cluster = owned_cluster_;
|
||||
@ -60,27 +113,6 @@ StoragePtr StorageDistributed::create(
|
||||
return res->thisPtr();
|
||||
}
|
||||
|
||||
ASTPtr StorageDistributed::rewriteQuery(ASTPtr query)
|
||||
{
|
||||
/// Создаем копию запроса.
|
||||
ASTPtr modified_query_ast = query->clone();
|
||||
|
||||
/// Меняем имена таблицы и базы данных
|
||||
ASTSelectQuery & select = typeid_cast<ASTSelectQuery &>(*modified_query_ast);
|
||||
select.database = new ASTIdentifier(StringRange(), remote_database, ASTIdentifier::Database);
|
||||
select.table = new ASTIdentifier(StringRange(), remote_table, ASTIdentifier::Table);
|
||||
|
||||
return modified_query_ast;
|
||||
}
|
||||
|
||||
static String selectToString(ASTPtr query)
|
||||
{
|
||||
ASTSelectQuery & select = typeid_cast<ASTSelectQuery &>(*query);
|
||||
std::stringstream s;
|
||||
formatAST(select, s, 0, false, true);
|
||||
return s.str();
|
||||
}
|
||||
|
||||
BlockInputStreams StorageDistributed::read(
|
||||
const Names & column_names,
|
||||
ASTPtr query,
|
||||
@ -99,20 +131,15 @@ BlockInputStreams StorageDistributed::read(
|
||||
: QueryProcessingStage::WithMergeableState;
|
||||
|
||||
BlockInputStreams res;
|
||||
ASTPtr modified_query_ast = rewriteQuery(query);
|
||||
const auto & modified_query_ast = rewriteQuery<ASTSelectQuery>(
|
||||
query, remote_database, remote_table);
|
||||
const auto & modified_query = queryToString<ASTSelectQuery>(modified_query_ast);
|
||||
|
||||
/// Цикл по шардам.
|
||||
for (auto & conn_pool : cluster.pools)
|
||||
{
|
||||
String modified_query = selectToString(modified_query_ast);
|
||||
|
||||
res.push_back(new RemoteBlockInputStream(
|
||||
conn_pool,
|
||||
modified_query,
|
||||
&new_settings,
|
||||
external_tables,
|
||||
processed_stage));
|
||||
}
|
||||
res.emplace_back(new RemoteBlockInputStream{
|
||||
conn_pool, modified_query, &new_settings,
|
||||
external_tables, processed_stage});
|
||||
|
||||
/// Добавляем запросы к локальному ClickHouse.
|
||||
if (cluster.getLocalNodesNum() > 0)
|
||||
@ -123,10 +150,10 @@ BlockInputStreams StorageDistributed::read(
|
||||
if (!new_context.tryGetExternalTable(it.first))
|
||||
new_context.addExternalTable(it.first, it.second);
|
||||
|
||||
for(size_t i = 0; i < cluster.getLocalNodesNum(); ++i)
|
||||
for (size_t i = 0; i < cluster.getLocalNodesNum(); ++i)
|
||||
{
|
||||
InterpreterSelectQuery interpreter(modified_query_ast, new_context, processed_stage);
|
||||
res.push_back(interpreter.execute());
|
||||
res.push_back(interpreter.execute());
|
||||
}
|
||||
}
|
||||
|
||||
@ -134,6 +161,21 @@ BlockInputStreams StorageDistributed::read(
|
||||
return res;
|
||||
}
|
||||
|
||||
BlockOutputStreamPtr StorageDistributed::write(ASTPtr query)
|
||||
{
|
||||
if (!write_enabled)
|
||||
throw Exception{
|
||||
"Method write is not supported by storage " + getName() +
|
||||
" with more than one shard and no sharding key provided",
|
||||
ErrorCodes::STORAGE_REQUIRES_PARAMETER
|
||||
};
|
||||
|
||||
return new DistributedBlockOutputStream{
|
||||
*this,
|
||||
rewriteQuery<ASTInsertQuery>(query, remote_database, remote_table)
|
||||
};
|
||||
}
|
||||
|
||||
void StorageDistributed::alter(const AlterCommands & params, const String & database_name, const String & table_name, Context & context)
|
||||
{
|
||||
auto lock = lockStructureForAlter();
|
||||
@ -141,11 +183,15 @@ void StorageDistributed::alter(const AlterCommands & params, const String & data
|
||||
InterpreterAlterQuery::updateMetadata(database_name, table_name, *columns, context);
|
||||
}
|
||||
|
||||
void StorageDistributed::shutdown()
|
||||
{
|
||||
directory_monitors.clear();
|
||||
}
|
||||
|
||||
NameAndTypePair StorageDistributed::getColumn(const String & column_name) const
|
||||
{
|
||||
auto type = VirtualColumnFactory::tryGetType(column_name);
|
||||
if (type)
|
||||
return NameAndTypePair(column_name, type);
|
||||
if (const auto & type = VirtualColumnFactory::tryGetType(column_name))
|
||||
return { column_name, type };
|
||||
|
||||
return getRealColumn(column_name);
|
||||
}
|
||||
@ -155,4 +201,25 @@ bool StorageDistributed::hasColumn(const String & column_name) const
|
||||
return VirtualColumnFactory::hasColumn(column_name) || hasRealColumn(column_name);
|
||||
}
|
||||
|
||||
void StorageDistributed::createDirectoryMonitor(const std::string & name)
|
||||
{
|
||||
directory_monitors.emplace(name, stdext::make_unique<DirectoryMonitor>(*this, name));
|
||||
}
|
||||
|
||||
void StorageDistributed::createDirectoryMonitors()
|
||||
{
|
||||
Poco::File{path}.createDirectory();
|
||||
|
||||
Poco::DirectoryIterator end;
|
||||
for (Poco::DirectoryIterator it{path}; it != end; ++it)
|
||||
if (it->isDirectory())
|
||||
createDirectoryMonitor(it.name());
|
||||
}
|
||||
|
||||
void StorageDistributed::requireDirectoryMonitor(const std::string & name)
|
||||
{
|
||||
if (!directory_monitors.count(name))
|
||||
createDirectoryMonitor(name);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -176,16 +176,20 @@ StoragePtr StorageFactory::get(
|
||||
|
||||
ASTs & args = typeid_cast<ASTExpressionList &>(*args_func.at(0)).children;
|
||||
|
||||
if (args.size() != 3)
|
||||
throw Exception("Storage Distributed requires 3 parameters"
|
||||
" - name of configuration section with list of remote servers, name of remote database, name of remote table.",
|
||||
if (args.size() != 3 && args.size() != 4)
|
||||
throw Exception("Storage Distributed requires 3 or 4 parameters"
|
||||
" - name of configuration section with list of remote servers, name of remote database, name of remote table,"
|
||||
" sharding key expression (optional).",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
String cluster_name = typeid_cast<ASTIdentifier &>(*args[0]).name;
|
||||
String remote_database = typeid_cast<ASTIdentifier &>(*args[1]).name;
|
||||
String remote_table = typeid_cast<ASTIdentifier &>(*args[2]).name;
|
||||
|
||||
return StorageDistributed::create(table_name, columns, remote_database, remote_table, cluster_name, context);
|
||||
const auto & sharding_key = args.size() == 4 ? args[3] : nullptr;
|
||||
|
||||
return StorageDistributed::create(
|
||||
table_name, columns, remote_database, remote_table, cluster_name, context, sharding_key, data_path);
|
||||
}
|
||||
else if (endsWith(name, "MergeTree"))
|
||||
{
|
||||
|
@ -27,6 +27,7 @@ StorageMergeTree::StorageMergeTree(const String & path_, const String & database
|
||||
{
|
||||
increment.fixIfBroken(data.getMaxDataPartIndex());
|
||||
|
||||
data.loadDataParts(false);
|
||||
data.clearOldParts();
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -15,8 +15,14 @@ int main(int argc, char ** argv)
|
||||
|
||||
try
|
||||
{
|
||||
DB::MergeTreePartChecker::checkDataPart(argv[1], argc == 4 ? DB::parse<size_t>(argv[3]) : 8192ul, argv[2][0] == '1',
|
||||
DB::DataTypeFactory(), true);
|
||||
DB::MergeTreePartChecker::Settings settings;
|
||||
if (argc == 4)
|
||||
settings.setIndexGranularity(DB::parse<size_t>(argv[3]));
|
||||
settings.setRequireChecksums(argv[2][0] == '1');
|
||||
settings.setRequireColumnFiles(argv[2][0] == '1');
|
||||
settings.setVerbose(true);
|
||||
|
||||
DB::MergeTreePartChecker::checkDataPart(argv[1], settings, DB::DataTypeFactory());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -1,3 +1,3 @@
|
||||
0 1 -1 128 -127 -128 255 -128 255 -127 65535 4294967295 12300 4656 -0 -0 0 18446744073709551615 2.09883e+19 -1.84467e+19 -9223372036854775807 -8.98847e+307 -2.22507e-308 inf -inf nan -nan 1e-302 UInt8 UInt8 Int8 UInt8 Int8 Int8 UInt8 Int8 UInt8 Int8 UInt16 UInt32 Float64 Float64 Float64 Float64 UInt8 UInt64 Float64 Float64 Int64 Float64 Float64 Float64 Float64 Float64 Float32 Float64
|
||||
0 1 -1 128 -127 -128 255 -128 255 -127 65535 4294967295 12300 4656 -0 -0 0 18446744073709551615 2.09883e+19 -1.84467e+19 -9223372036854775807 -8.98847e+307 -2.22507e-308 inf -inf nan -nan 1e-302 UInt8 UInt8 Int8 UInt8 Int8 Int8 UInt8 Int8 UInt8 Int8 UInt16 UInt32 Float64 Float64 Float64 Float64 Float64 UInt64 Float64 Float64 Int64 Float64 Float64 Float64 Float64 Float64 Float32 Float64
|
||||
1e+308
|
||||
-1e-307
|
||||
|
@ -0,0 +1 @@
|
||||
-1 Int64
|
1
dbms/tests/queries/0_stateless/00064_negate_bug.sql
Normal file
1
dbms/tests/queries/0_stateless/00064_negate_bug.sql
Normal file
@ -0,0 +1 @@
|
||||
SELECT -toUInt32(1) AS x, toTypeName(x) AS t
|
@ -0,0 +1,2 @@
|
||||
Float64
|
||||
Float64
|
@ -0,0 +1 @@
|
||||
SELECT toTypeName(1.0) FROM remote('127.0.0.{1,2}', system, one)
|
@ -0,0 +1,2 @@
|
||||
0
|
||||
0
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user