Merge branch 'master' of github.com:yandex/ClickHouse

This commit is contained in:
alesapin 2018-11-21 12:06:40 +03:00
commit ff5629263b
78 changed files with 1900 additions and 1408 deletions

View File

@ -1,6 +1,6 @@
BasedOnStyle: WebKit
Language: Cpp
AlignAfterOpenBracket: false
AlignAfterOpenBracket: AlwaysBreak
BreakBeforeBraces: Custom
BraceWrapping:
AfterClass: true
@ -25,7 +25,7 @@ Standard: Cpp11
PointerAlignment: Middle
MaxEmptyLinesToKeep: 2
KeepEmptyLinesAtTheStartOfBlocks: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortFunctionsOnASingleLine: InlineOnly
AlwaysBreakTemplateDeclarations: true
IndentCaseLabels: true
SpaceAfterTemplateKeyword: true

1
.gitignore vendored
View File

@ -11,6 +11,7 @@
/build
/build_*
/build-*
/docs/build
/docs/edit
/docs/tools/venv/

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,11 @@
## ClickHouse release 18.14.14, 2018-11-20
### Исправления ошибок:
* Исправлена работа запросов `ON CLUSTER` в случае, когда в конфигурации кластера включено шифрование (флаг `<secure>`). [#3599]
### Улучшения процесса сборки ClickHouse:
* Испрпавлены проблемы сборки (llvm-7 из системы, macos) [#3582]
## ClickHouse release 18.14.13, 2018-11-08
### Исправления ошибок:

2
contrib/poco vendored

@ -1 +1 @@
Subproject commit 566162b324e0b73eb360a1cd38077c9287cc1106
Subproject commit 20c1d877773b6a672f1bbfe3290dfea42a117ed5

View File

@ -270,7 +270,6 @@ void HTTPHandler::processQuery(
std::string query_id = params.get("query_id", "");
context.setUser(user, password, request.clientAddress(), quota_key);
context.setCurrentQueryId(query_id);
CurrentThread::attachQueryContext(context);
/// The user could specify session identifier and session timeout.
/// It allows to modify settings, create temporary tables and reuse them in subsequent requests.

View File

@ -12,7 +12,7 @@ namespace
template <typename T>
struct Avg
{
using FieldType = std::conditional_t<IsDecimalNumber<T>, Decimal128, typename NearestFieldType<T>::Type>;
using FieldType = std::conditional_t<IsDecimalNumber<T>, Decimal128, NearestFieldType<T>>;
using Function = AggregateFunctionAvg<T, AggregateFunctionAvgData<FieldType>>;
};

View File

@ -14,7 +14,7 @@ template <typename T>
struct SumSimple
{
/// @note It uses slow Decimal128 (cause we need such a variant). sumWithOverflow is faster for Decimal32/64
using ResultType = std::conditional_t<IsDecimalNumber<T>, Decimal128, typename NearestFieldType<T>::Type>;
using ResultType = std::conditional_t<IsDecimalNumber<T>, Decimal128, NearestFieldType<T>>;
using AggregateDataType = AggregateFunctionSumData<ResultType>;
using Function = AggregateFunctionSum<T, ResultType, AggregateDataType>;
};

View File

@ -52,7 +52,7 @@ struct AggregateFunctionSumMapData
template <typename T>
class AggregateFunctionSumMap final : public IAggregateFunctionDataHelper<
AggregateFunctionSumMapData<typename NearestFieldType<T>::Type>, AggregateFunctionSumMap<T>>
AggregateFunctionSumMapData<NearestFieldType<T>>, AggregateFunctionSumMap<T>>
{
private:
using ColVecType = std::conditional_t<IsDecimalNumber<T>, ColumnDecimal<T>, ColumnVector<T>>;

View File

@ -209,7 +209,7 @@ public:
Field getField() const { return getDataColumn()[0]; }
template <typename T>
T getValue() const { return getField().safeGet<typename NearestFieldType<T>::Type>(); }
T getValue() const { return getField().safeGet<NearestFieldType<T>>(); }
};
}

View File

@ -213,8 +213,8 @@ void ColumnDecimal<T>::getExtremes(Field & min, Field & max) const
{
if (data.size() == 0)
{
min = typename NearestFieldType<T>::Type(0, scale);
max = typename NearestFieldType<T>::Type(0, scale);
min = NearestFieldType<T>(0, scale);
max = NearestFieldType<T>(0, scale);
return;
}
@ -229,8 +229,8 @@ void ColumnDecimal<T>::getExtremes(Field & min, Field & max) const
cur_max = x;
}
min = typename NearestFieldType<T>::Type(cur_min, scale);
max = typename NearestFieldType<T>::Type(cur_max, scale);
min = NearestFieldType<T>(cur_min, scale);
max = NearestFieldType<T>(cur_max, scale);
}
template class ColumnDecimal<Decimal32>;

View File

@ -91,7 +91,7 @@ public:
void insertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast<const Self &>(src).getData()[n]); }
void insertData(const char * pos, size_t /*length*/) override;
void insertDefault() override { data.push_back(T()); }
void insert(const Field & x) override { data.push_back(DB::get<typename NearestFieldType<T>::Type>(x)); }
void insert(const Field & x) override { data.push_back(DB::get<NearestFieldType<T>>(x)); }
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
void popBack(size_t n) override { data.resize_assume_reserved(data.size() - n); }

View File

@ -311,8 +311,8 @@ void ColumnVector<T>::getExtremes(Field & min, Field & max) const
cur_max = x;
}
min = typename NearestFieldType<T>::Type(cur_min);
max = typename NearestFieldType<T>::Type(cur_max);
min = NearestFieldType<T>(cur_min);
max = NearestFieldType<T>(cur_max);
}
/// Explicit template instantiations - to avoid code bloat in headers.

View File

@ -244,7 +244,7 @@ public:
void insert(const Field & x) override
{
data.push_back(DB::get<typename NearestFieldType<T>::Type>(x));
data.push_back(DB::get<NearestFieldType<T>>(x));
}
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;

View File

@ -396,6 +396,7 @@ namespace ErrorCodes
extern const int MULTIPLE_ASSIGNMENTS_TO_COLUMN = 419;
extern const int CANNOT_UPDATE_COLUMN = 420;
extern const int CANNOT_ADD_DIFFERENT_AGGREGATE_STATES = 421;
extern const int UNSUPPORTED_URI_SCHEME = 422;
extern const int KEEPER_EXCEPTION = 999;
extern const int POCO_EXCEPTION = 1000;

View File

@ -62,7 +62,9 @@
#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 1800
#define DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT 1
#define DEFAULT_HTTP_AMOUNT_CONNECTIONS_PER_ENDPOINT 10
/// Maximum namber of http-connections between two endpoints
/// the number is unmotivated
#define DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT 15
// more aliases: https://mailman.videolan.org/pipermail/x264-devel/2014-May/010660.html

View File

@ -578,43 +578,54 @@ template <> struct TypeName<Array> { static std::string get() { return "Array";
template <> struct TypeName<Tuple> { static std::string get() { return "Tuple"; } };
template <typename T> struct NearestFieldType;
template <typename T> struct NearestFieldTypeImpl;
template <> struct NearestFieldType<UInt8> { using Type = UInt64; };
template <> struct NearestFieldType<UInt16> { using Type = UInt64; };
template <> struct NearestFieldType<UInt32> { using Type = UInt64; };
template <> struct NearestFieldType<UInt64> { using Type = UInt64; };
#ifdef __APPLE__
template <> struct NearestFieldType<time_t> { using Type = UInt64; };
template <> struct NearestFieldType<size_t> { using Type = UInt64; };
#endif
template <> struct NearestFieldType<DayNum> { using Type = UInt64; };
template <> struct NearestFieldType<UInt128> { using Type = UInt128; };
template <> struct NearestFieldType<UUID> { using Type = UInt128; };
template <> struct NearestFieldType<Int8> { using Type = Int64; };
template <> struct NearestFieldType<Int16> { using Type = Int64; };
template <> struct NearestFieldType<Int32> { using Type = Int64; };
template <> struct NearestFieldType<Int64> { using Type = Int64; };
template <> struct NearestFieldType<Int128> { using Type = Int128; };
template <> struct NearestFieldType<Decimal32> { using Type = DecimalField<Decimal32>; };
template <> struct NearestFieldType<Decimal64> { using Type = DecimalField<Decimal64>; };
template <> struct NearestFieldType<Decimal128> { using Type = DecimalField<Decimal128>; };
template <> struct NearestFieldType<DecimalField<Decimal32>> { using Type = DecimalField<Decimal32>; };
template <> struct NearestFieldType<DecimalField<Decimal64>> { using Type = DecimalField<Decimal64>; };
template <> struct NearestFieldType<DecimalField<Decimal128>> { using Type = DecimalField<Decimal128>; };
template <> struct NearestFieldType<Float32> { using Type = Float64; };
template <> struct NearestFieldType<Float64> { using Type = Float64; };
template <> struct NearestFieldType<const char*> { using Type = String; };
template <> struct NearestFieldType<String> { using Type = String; };
template <> struct NearestFieldType<Array> { using Type = Array; };
template <> struct NearestFieldType<Tuple> { using Type = Tuple; };
template <> struct NearestFieldType<bool> { using Type = UInt64; };
template <> struct NearestFieldType<Null> { using Type = Null; };
/// char may be signed or unsigned, and behave identically to signed char or unsigned char,
/// but they are always three different types.
/// signedness of char is different in Linux on x86 and Linux on ARM.
template <> struct NearestFieldTypeImpl<char> { using Type = std::conditional_t<std::is_signed_v<char>, Int64, UInt64>; };
template <> struct NearestFieldTypeImpl<signed char> { using Type = Int64; };
template <> struct NearestFieldTypeImpl<unsigned char> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<UInt16> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<UInt32> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<DayNum> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<UInt128> { using Type = UInt128; };
template <> struct NearestFieldTypeImpl<UUID> { using Type = UInt128; };
template <> struct NearestFieldTypeImpl<Int16> { using Type = Int64; };
template <> struct NearestFieldTypeImpl<Int32> { using Type = Int64; };
/// long and long long are always different types that may behave identically or not.
/// This is different on Linux and Mac.
template <> struct NearestFieldTypeImpl<long> { using Type = Int64; };
template <> struct NearestFieldTypeImpl<long long> { using Type = Int64; };
template <> struct NearestFieldTypeImpl<unsigned long> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<unsigned long long> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<Int128> { using Type = Int128; };
template <> struct NearestFieldTypeImpl<Decimal32> { using Type = DecimalField<Decimal32>; };
template <> struct NearestFieldTypeImpl<Decimal64> { using Type = DecimalField<Decimal64>; };
template <> struct NearestFieldTypeImpl<Decimal128> { using Type = DecimalField<Decimal128>; };
template <> struct NearestFieldTypeImpl<DecimalField<Decimal32>> { using Type = DecimalField<Decimal32>; };
template <> struct NearestFieldTypeImpl<DecimalField<Decimal64>> { using Type = DecimalField<Decimal64>; };
template <> struct NearestFieldTypeImpl<DecimalField<Decimal128>> { using Type = DecimalField<Decimal128>; };
template <> struct NearestFieldTypeImpl<Float32> { using Type = Float64; };
template <> struct NearestFieldTypeImpl<Float64> { using Type = Float64; };
template <> struct NearestFieldTypeImpl<const char *> { using Type = String; };
template <> struct NearestFieldTypeImpl<String> { using Type = String; };
template <> struct NearestFieldTypeImpl<Array> { using Type = Array; };
template <> struct NearestFieldTypeImpl<Tuple> { using Type = Tuple; };
template <> struct NearestFieldTypeImpl<bool> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<Null> { using Type = Null; };
template <typename T>
using NearestFieldType = typename NearestFieldTypeImpl<T>::Type;
template <typename T>
decltype(auto) nearestFieldType(T && x)
{
using U = typename NearestFieldType<std::decay_t<T>>::Type;
using U = NearestFieldType<std::decay_t<T>>;
if constexpr (std::is_same_v<std::decay_t<T>, U>)
return std::forward<T>(x);
else

View File

@ -179,8 +179,9 @@ void MergingAggregatedMemoryEfficientBlockInputStream::start()
auto thread_group = CurrentThread::getGroup();
reading_pool->schedule([&child, thread_group]
{
CurrentThread::attachToIfDetached(thread_group);
setThreadName("MergeAggReadThr");
if (thread_group)
CurrentThread::attachToIfDetached(thread_group);
CurrentMetrics::Increment metric_increment{CurrentMetrics::QueryThread};
child->readPrefix();
});
@ -485,8 +486,9 @@ MergingAggregatedMemoryEfficientBlockInputStream::BlocksToMerge MergingAggregate
auto thread_group = CurrentThread::getGroup();
reading_pool->schedule([&input, &read_from_input, thread_group]
{
CurrentThread::attachToIfDetached(thread_group);
setThreadName("MergeAggReadThr");
if (thread_group)
CurrentThread::attachToIfDetached(thread_group);
CurrentMetrics::Increment metric_increment{CurrentMetrics::QueryThread};
read_from_input(input);
});

View File

@ -91,7 +91,8 @@ void PushingToViewsBlockOutputStream::write(const Block & block)
pool.schedule([=]
{
setThreadName("PushingToViewsBlockOutputStream");
CurrentThread::attachToIfDetached(thread_group);
if (thread_group)
CurrentThread::attachToIfDetached(thread_group);
process(block, view_num);
});
}

View File

@ -259,6 +259,10 @@ void DataTypeArray::deserializeBinaryBulkWithMultipleStreams(
if (last_offset < nested_column.size())
throw Exception("Nested column is longer than last offset", ErrorCodes::LOGICAL_ERROR);
size_t nested_limit = last_offset - nested_column.size();
/// Adjust value size hint. Divide it to the average array size.
settings.avg_value_size_hint = nested_limit ? settings.avg_value_size_hint / nested_limit * offset_values.size() : 0;
nested->deserializeBinaryBulkWithMultipleStreams(nested_column, nested_limit, settings, state);
settings.path.pop_back();

View File

@ -103,7 +103,7 @@ DataTypeEnum<Type>::DataTypeEnum(const Values & values_) : values{values_}
template <typename Type>
void DataTypeEnum<Type>::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
const FieldType x = get<typename NearestFieldType<FieldType>::Type>(field);
const FieldType x = get<NearestFieldType<FieldType>>(field);
writeBinary(x, ostr);
}
@ -347,7 +347,7 @@ static DataTypePtr create(const ASTPtr & arguments)
ErrorCodes::UNEXPECTED_AST_STRUCTURE);
const String & field_name = name_literal->value.get<String>();
const auto value = value_literal->value.get<typename NearestFieldType<FieldType>::Type>();
const auto value = value_literal->value.get<NearestFieldType<FieldType>>();
if (value > std::numeric_limits<FieldType>::max() || value < std::numeric_limits<FieldType>::min())
throw Exception{"Value " + toString(value) + " for element '" + field_name + "' exceeds range of " + EnumName<FieldType>::value,

View File

@ -144,14 +144,14 @@ void DataTypeNumberBase<T>::deserializeTextCSV(IColumn & column, ReadBuffer & is
template <typename T>
Field DataTypeNumberBase<T>::getDefault() const
{
return typename NearestFieldType<FieldType>::Type();
return NearestFieldType<FieldType>();
}
template <typename T>
void DataTypeNumberBase<T>::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
/// ColumnVector<T>::value_type is a narrower type. For example, UInt8, when the Field type is UInt64
typename ColumnVector<T>::value_type x = get<typename NearestFieldType<FieldType>::Type>(field);
typename ColumnVector<T>::value_type x = get<NearestFieldType<FieldType>>(field);
writeBinary(x, ostr);
}
@ -160,7 +160,7 @@ void DataTypeNumberBase<T>::deserializeBinary(Field & field, ReadBuffer & istr)
{
typename ColumnVector<T>::value_type x;
readBinary(x, istr);
field = typename NearestFieldType<FieldType>::Type(x);
field = NearestFieldType<FieldType>(x);
}
template <typename T>

View File

@ -187,17 +187,23 @@ void DataTypeString::deserializeBinaryBulk(IColumn & column, ReadBuffer & istr,
avg_chars_size = (avg_value_size_hint - sizeof(offsets[0])) * avg_value_size_hint_reserve_multiplier;
}
try
size_t size_to_reserve = data.size() + std::ceil(limit * avg_chars_size);
/// Never reserve for too big size.
if (size_to_reserve < 256 * 1024 * 1024)
{
data.reserve(data.size() + std::ceil(limit * avg_chars_size));
}
catch (Exception & e)
{
e.addMessage(
"avg_value_size_hint = " + toString(avg_value_size_hint)
+ ", avg_chars_size = " + toString(avg_chars_size)
+ ", limit = " + toString(limit));
throw;
try
{
data.reserve(size_to_reserve);
}
catch (Exception & e)
{
e.addMessage(
"(avg_value_size_hint = " + toString(avg_value_size_hint)
+ ", avg_chars_size = " + toString(avg_chars_size)
+ ", limit = " + toString(limit) + ")");
throw;
}
}
offsets.reserve(offsets.size() + limit);

View File

@ -397,6 +397,7 @@ void DataTypeTuple::deserializeBinaryBulkWithMultipleStreams(
auto * tuple_state = checkAndGetTupleDeserializeState(state);
settings.path.push_back(Substream::TupleElement);
settings.avg_value_size_hint = 0;
for (const auto i : ext::range(0, ext::size(elems)))
{
settings.path.back().tuple_element_name = names[i];

View File

@ -456,7 +456,7 @@ CacheDictionary::Attribute CacheDictionary::createAttributeWithType(const Attrib
{
#define DISPATCH(TYPE) \
case AttributeUnderlyingType::TYPE: \
attr.null_values = TYPE(null_value.get<typename NearestFieldType<TYPE>::Type>()); \
attr.null_values = TYPE(null_value.get<NearestFieldType<TYPE>>()); \
attr.arrays = std::make_unique<ContainerType<TYPE>>(size); \
bytes_allocated += size * sizeof(TYPE); \
break;

View File

@ -11,7 +11,7 @@ ComplexKeyCacheDictionary::Attribute ComplexKeyCacheDictionary::createAttributeW
{
#define DISPATCH(TYPE) \
case AttributeUnderlyingType::TYPE: \
attr.null_values = TYPE(null_value.get<typename NearestFieldType<TYPE>::Type>()); \
attr.null_values = TYPE(null_value.get<NearestFieldType<TYPE>>()); \
attr.arrays = std::make_unique<ContainerType<TYPE>>(size); \
bytes_allocated += size * sizeof(TYPE); \
break;

View File

@ -421,7 +421,7 @@ void ComplexKeyHashedDictionary::calculateBytesAllocated()
template <typename T>
void ComplexKeyHashedDictionary::createAttributeImpl(Attribute & attribute, const Field & null_value)
{
attribute.null_values = T(null_value.get<typename NearestFieldType<T>::Type>());
attribute.null_values = T(null_value.get<NearestFieldType<T>>());
attribute.maps.emplace<ContainerType<T>>();
}

View File

@ -440,7 +440,7 @@ void FlatDictionary::calculateBytesAllocated()
template <typename T>
void FlatDictionary::createAttributeImpl(Attribute & attribute, const Field & null_value)
{
attribute.null_values = T(null_value.get<typename NearestFieldType<T>::Type>());
attribute.null_values = T(null_value.get<NearestFieldType<T>>());
const auto & null_value_ref = std::get<T>(attribute.null_values);
attribute.arrays.emplace<ContainerType<T>>(initial_array_size, null_value_ref);
}

View File

@ -430,7 +430,7 @@ void HashedDictionary::calculateBytesAllocated()
template <typename T>
void HashedDictionary::createAttributeImpl(Attribute & attribute, const Field & null_value)
{
attribute.null_values = T(null_value.get<typename NearestFieldType<T>::Type>());
attribute.null_values = T(null_value.get<NearestFieldType<T>>());
attribute.maps = std::make_unique<CollectionType<T>>();
}

View File

@ -261,7 +261,7 @@ void RangeHashedDictionary::calculateBytesAllocated()
template <typename T>
void RangeHashedDictionary::createAttributeImpl(Attribute & attribute, const Field & null_value)
{
attribute.null_values = T(null_value.get<typename NearestFieldType<T>::Type>());
attribute.null_values = T(null_value.get<NearestFieldType<T>>());
attribute.maps = std::make_unique<Collection<T>>();
}

View File

@ -352,7 +352,7 @@ void TrieDictionary::validateKeyTypes(const DataTypes & key_types) const
template <typename T>
void TrieDictionary::createAttributeImpl(Attribute & attribute, const Field & null_value)
{
attribute.null_values = T(null_value.get<typename NearestFieldType<T>::Type>());
attribute.null_values = T(null_value.get<NearestFieldType<T>>());
attribute.maps.emplace<ContainerType<T>>();
}

View File

@ -7,9 +7,9 @@
#include <Formats/CapnProtoRowInputStream.h> // Y_IGNORE
#include <Formats/FormatFactory.h>
#include <Formats/BlockInputStreamFromRowInputStream.h>
#include <capnp/serialize.h> // Y_IGNORE
#include <capnp/dynamic.h> // Y_IGNORE
#include <capnp/common.h> // Y_IGNORE
#include <boost/algorithm/string.hpp>
#include <boost/range/join.hpp>
#include <common/logger_useful.h>
@ -214,7 +214,12 @@ bool CapnProtoRowInputStream::read(MutableColumns & columns)
array = heap_array.asPtr();
}
#if CAPNP_VERSION >= 8000
capnp::UnalignedFlatArrayMessageReader msg(array);
#else
capnp::FlatArrayMessageReader msg(array);
#endif
std::vector<capnp::DynamicStruct::Reader> stack;
stack.push_back(msg.getRoot<capnp::DynamicStruct>(root));

View File

@ -73,13 +73,13 @@ const ColumnConst * checkAndGetColumnConstStringOrFixedString(const IColumn * co
template <typename T>
inline std::enable_if_t<!IsDecimalNumber<T>, Field> toField(const T & x)
{
return Field(typename NearestFieldType<T>::Type(x));
return Field(NearestFieldType<T>(x));
}
template <typename T>
inline std::enable_if_t<IsDecimalNumber<T>, Field> toField(const T & x, UInt32 scale)
{
return Field(typename NearestFieldType<T>::Type(x, scale));
return Field(NearestFieldType<T>(x, scale));
}

View File

@ -37,6 +37,8 @@ void registerFunctionsExternalDictionaries(FunctionFactory & factory)
factory.registerFunction<FunctionDictGetDateTimeOrDefault>();
factory.registerFunction<FunctionDictGetUUIDOrDefault>();
factory.registerFunction<FunctionDictGetStringOrDefault>();
factory.registerFunction<FunctionDictGetNoType>();
factory.registerFunction<FunctionDictGetNoTypeOrDefault>();
}
}

View File

@ -1145,6 +1145,219 @@ using FunctionDictGetDateOrDefault = FunctionDictGetOrDefault<DataTypeDate, Name
using FunctionDictGetDateTimeOrDefault = FunctionDictGetOrDefault<DataTypeDateTime, NameDictGetDateTimeOrDefault>;
using FunctionDictGetUUIDOrDefault = FunctionDictGetOrDefault<DataTypeUUID, NameDictGetUUIDOrDefault>;
#define FOR_DICT_TYPES(M) \
M(UInt8) \
M(UInt16) \
M(UInt32) \
M(UInt64) \
M(Int8) \
M(Int16) \
M(Int32) \
M(Int64) \
M(Float32) \
M(Float64) \
M(Date) \
M(DateTime) \
M(UUID)
/// This variant of function derives the result type automatically.
class FunctionDictGetNoType final : public IFunction
{
public:
static constexpr auto name = "dictGet";
static FunctionPtr create(const Context & context)
{
return std::make_shared<FunctionDictGetNoType>(context.getExternalDictionaries(), context);
}
FunctionDictGetNoType(const ExternalDictionaries & dictionaries, const Context & context) : dictionaries(dictionaries), context(context) {}
String getName() const override { return name; }
private:
bool isVariadic() const override { return true; }
size_t getNumberOfArguments() const override { return 0; }
bool useDefaultImplementationForConstants() const final { return true; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const final { return {0, 1}; }
bool isInjective(const Block & sample_block) override
{
return isDictGetFunctionInjective(dictionaries, sample_block);
}
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.size() != 3 && arguments.size() != 4)
throw Exception{"Function " + getName() + " takes 3 or 4 arguments", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH};
String dict_name;
if (auto name_col = checkAndGetColumnConst<ColumnString>(arguments[0].column.get()))
{
dict_name = name_col->getValue<String>();
}
else
throw Exception{"Illegal type " + arguments[0].type->getName() + " of first argument of function " + getName()
+ ", expected a const string.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
String attr_name;
if (auto name_col = checkAndGetColumnConst<ColumnString>(arguments[1].column.get()))
{
attr_name = name_col->getValue<String>();
}
else
throw Exception{"Illegal type " + arguments[1].type->getName() + " of second argument of function " + getName()
+ ", expected a const string.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
if (!WhichDataType(arguments[2].type).isUInt64() &&
!isTuple(arguments[2].type))
throw Exception{"Illegal type " + arguments[2].type->getName() + " of third argument of function " + getName()
+ ", must be UInt64 or tuple(...).", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
if (arguments.size() == 4 )
{
const auto range_argument = arguments[3].type.get();
if (!(range_argument->isValueRepresentedByInteger() &&
range_argument->getSizeOfValueInMemory() <= sizeof(Int64)))
throw Exception{"Illegal type " + range_argument->getName() + " of fourth argument of function " + getName()
+ ", must be convertible to " + TypeName<Int64>::get() + ".", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
}
auto dict = dictionaries.getDictionary(dict_name);
const DictionaryStructure & structure = dict->getStructure();
for (const auto idx : ext::range(0, structure.attributes.size()))
{
const DictionaryAttribute & attribute = structure.attributes[idx];
if (attribute.name == attr_name)
{
WhichDataType dt = attribute.type;
if (dt.idx == TypeIndex::String)
impl = FunctionDictGetString::create(context);
#define DISPATCH(TYPE) \
else if (dt.idx == TypeIndex::TYPE) \
impl = FunctionDictGet<DataType##TYPE, NameDictGet##TYPE>::create(context);
FOR_DICT_TYPES(DISPATCH)
#undef DISPATCH
else
throw Exception("Unknown dictGet type", ErrorCodes::UNKNOWN_TYPE);
return attribute.type;
}
}
throw Exception{"No such attribute '" + attr_name + "'", ErrorCodes::BAD_ARGUMENTS};
}
bool isDeterministic() const override { return false; }
void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) override
{
impl->executeImpl(block, arguments, result, input_rows_count);
}
private:
const ExternalDictionaries & dictionaries;
const Context & context;
mutable FunctionPtr impl; // underlying function used by dictGet function without explicit type info
};
class FunctionDictGetNoTypeOrDefault final : public IFunction
{
public:
static constexpr auto name = "dictGetOrDefault";
static FunctionPtr create(const Context & context)
{
return std::make_shared<FunctionDictGetNoTypeOrDefault>(context.getExternalDictionaries(), context);
}
FunctionDictGetNoTypeOrDefault(const ExternalDictionaries & dictionaries, const Context & context) : dictionaries(dictionaries), context(context) {}
String getName() const override { return name; }
private:
size_t getNumberOfArguments() const override { return 4; }
bool useDefaultImplementationForConstants() const final { return true; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const final { return {0, 1}; }
bool isInjective(const Block & sample_block) override
{
return isDictGetFunctionInjective(dictionaries, sample_block);
}
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
String dict_name;
if (auto name_col = checkAndGetColumnConst<ColumnString>(arguments[0].column.get()))
{
dict_name = name_col->getValue<String>();
}
else
throw Exception{"Illegal type " + arguments[0].type->getName() + " of first argument of function " + getName()
+ ", expected a const string.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
String attr_name;
if (auto name_col = checkAndGetColumnConst<ColumnString>(arguments[1].column.get()))
{
attr_name = name_col->getValue<String>();
}
else
throw Exception{"Illegal type " + arguments[1].type->getName() + " of second argument of function " + getName()
+ ", expected a const string.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
if (!WhichDataType(arguments[2].type).isUInt64() &&
!isTuple(arguments[2].type))
throw Exception{"Illegal type " + arguments[2].type->getName() + " of third argument of function " + getName()
+ ", must be UInt64 or tuple(...).", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
auto dict = dictionaries.getDictionary(dict_name);
const DictionaryStructure & structure = dict->getStructure();
for (const auto idx : ext::range(0, structure.attributes.size()))
{
const DictionaryAttribute & attribute = structure.attributes[idx];
if (attribute.name == attr_name)
{
WhichDataType dt = attribute.type;
if (dt.idx == TypeIndex::String)
{
if (!isString(arguments[3].type))
throw Exception{"Illegal type " + arguments[3].type->getName() + " of fourth argument of function " + getName() +
", must be String.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
impl = FunctionDictGetStringOrDefault::create(context);
}
#define DISPATCH(TYPE) \
else if (dt.idx == TypeIndex::TYPE) \
{ \
if (!checkAndGetDataType<DataType##TYPE>(arguments[3].type.get())) \
throw Exception{"Illegal type " + arguments[3].type->getName() + " of fourth argument of function " + getName() \
+ ", must be " + String(DataType##TYPE{}.getFamilyName()) + ".", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; \
impl = FunctionDictGetOrDefault<DataType##TYPE, NameDictGet##TYPE ## OrDefault>::create(context); \
}
FOR_DICT_TYPES(DISPATCH)
#undef DISPATCH
else
throw Exception("Unknown dictGetOrDefault type", ErrorCodes::UNKNOWN_TYPE);
return attribute.type;
}
}
throw Exception{"No such attribute '" + attr_name + "'", ErrorCodes::BAD_ARGUMENTS};
}
bool isDeterministic() const override { return false; }
void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) override
{
impl->executeImpl(block, arguments, result, input_rows_count);
}
private:
const ExternalDictionaries & dictionaries;
const Context & context;
mutable FunctionPtr impl; // underlying function used by dictGet function without explicit type info
};
/// Functions to work with hierarchies.

View File

@ -185,7 +185,7 @@ protected:
for (size_t k = 0; k < array.size(); ++k)
{
const Field & tmp_field = array[k];
typename NearestFieldType<CentroidsType>::Type value;
NearestFieldType<CentroidsType> value;
if (!tmp_field.tryGet(value))
return false;

View File

@ -202,15 +202,28 @@ struct ExtractFirstSignificantSubdomain
if (!last_3_periods[2])
last_3_periods[2] = begin - 1;
if (!strncmp(last_3_periods[1] + 1, "com.", 4) /// Note that in ColumnString every value has zero byte after it.
|| !strncmp(last_3_periods[1] + 1, "net.", 4)
|| !strncmp(last_3_periods[1] + 1, "org.", 4)
|| !strncmp(last_3_periods[1] + 1, "co.", 3)
|| !strncmp(last_3_periods[1] + 1, "biz.", 4))
size_t size_of_second_subdomain_plus_period = last_3_periods[0] - last_3_periods[1];
if (size_of_second_subdomain_plus_period == 4 || size_of_second_subdomain_plus_period == 3)
{
res_data += last_3_periods[2] + 1 - begin;
res_size = last_3_periods[1] - last_3_periods[2] - 1;
return;
/// We will key by four bytes that are either ".xyz" or ".xy.".
UInt32 key = unalignedLoad<UInt32>(last_3_periods[1]);
/// NOTE: assuming little endian.
/// NOTE: does the compiler generate SIMD code?
/// NOTE: for larger amount of cases we can use a perfect hash table (see 'gperf' as an example).
if ( key == '.' + 'c' * 0x100U + 'o' * 0x10000U + 'm' * 0x1000000U
|| key == '.' + 'n' * 0x100U + 'e' * 0x10000U + 't' * 0x1000000U
|| key == '.' + 'o' * 0x100U + 'r' * 0x10000U + 'g' * 0x1000000U
|| key == '.' + 'b' * 0x100U + 'i' * 0x10000U + 'z' * 0x1000000U
|| key == '.' + 'g' * 0x100U + 'o' * 0x10000U + 'v' * 0x1000000U
|| key == '.' + 'm' * 0x100U + 'i' * 0x10000U + 'l' * 0x1000000U
|| key == '.' + 'e' * 0x100U + 'd' * 0x10000U + 'u' * 0x1000000U
|| key == '.' + 'c' * 0x100U + 'o' * 0x10000U + '.' * 0x1000000U)
{
res_data += last_3_periods[2] + 1 - begin;
res_size = last_3_periods[1] - last_3_periods[2] - 1;
return;
}
}
res_data += last_3_periods[1] + 1 - begin;

View File

@ -4,6 +4,7 @@
#include <Common/DNSResolver.h>
#include <Common/Exception.h>
#include <Common/config.h>
#if USE_POCO_NETSSL
#include <Poco/Net/AcceptCertificateHandler.h>
#include <Poco/Net/Context.h>
@ -13,6 +14,7 @@
#include <Poco/Net/RejectCertificateHandler.h>
#include <Poco/Net/SSLManager.h>
#endif
#include <tuple>
#include <unordered_map>
#include <Poco/Net/HTTPServerResponse.h>
@ -22,6 +24,8 @@
#include <Common/SipHash.h>
#include <sstream>
namespace ProfileEvents
{
extern const Event CreatedHTTPConnections;
@ -34,6 +38,7 @@ namespace ErrorCodes
extern const int RECEIVED_ERROR_FROM_REMOTE_IO_SERVER;
extern const int RECEIVED_ERROR_TOO_MANY_REQUESTS;
extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME;
extern const int UNSUPPORTED_URI_SCHEME;
}
@ -48,23 +53,34 @@ namespace
#endif
}
HTTPSessionPtr makeHTTPSessionImpl(const std::string & host, UInt16 port, bool https)
bool isHTTPS(const Poco::URI & uri)
{
if (uri.getScheme() == "https")
return true;
else if (uri.getScheme() == "http")
return false;
else
throw Exception("Unsupported scheme in URI '" + uri.toString() + "'", ErrorCodes::UNSUPPORTED_URI_SCHEME);
}
HTTPSessionPtr makeHTTPSessionImpl(const std::string & host, UInt16 port, bool https, bool keep_alive)
{
HTTPSessionPtr session;
if (https)
#if USE_POCO_NETSSL
session = std::make_unique<Poco::Net::HTTPSClientSession>();
session = std::make_shared<Poco::Net::HTTPSClientSession>();
#else
throw Exception("ClickHouse was built without HTTPS support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
#endif
else
session = std::make_unique<Poco::Net::HTTPClientSession>();
session = std::make_shared<Poco::Net::HTTPClientSession>();
ProfileEvents::increment(ProfileEvents::CreatedHTTPConnections);
session->setHost(DNSResolver::instance().resolveHost(host).toString());
session->setPort(port);
session->setKeepAlive(keep_alive);
return session;
}
@ -79,12 +95,12 @@ namespace
ObjectPtr allocObject() override
{
return makeHTTPSessionImpl(host, port, https);
return makeHTTPSessionImpl(host, port, https, true);
}
public:
SingleEndpointHTTPSessionPool(const std::string & host_, UInt16 port_, bool https_, size_t max_pool_size_)
: Base(max_pool_size_, &Poco::Logger::get("HttpSessionsPool")), host(host_), port(port_), https(https_)
: Base(max_pool_size_, &Poco::Logger::get("HTTPSessionPool")), host(host_), port(port_), https(https_)
{
}
};
@ -122,14 +138,16 @@ namespace
std::unique_lock<std::mutex> lock(mutex);
const std::string & host = uri.getHost();
UInt16 port = uri.getPort();
bool https = (uri.getScheme() == "https");
bool https = isHTTPS(uri);
auto key = std::make_tuple(host, port, https);
auto pool_ptr = endpoints_pool.find(key);
if (pool_ptr == endpoints_pool.end())
std::tie(pool_ptr, std::ignore) = endpoints_pool.emplace(
key, std::make_shared<SingleEndpointHTTPSessionPool>(host, port, https, max_connections_per_endpoint));
auto session = pool_ptr->second->get(-1);
auto retry_timeout = timeouts.connection_timeout.totalMicroseconds();
auto session = pool_ptr->second->get(retry_timeout);
setTimeouts(*session, timeouts);
return session;
}
@ -150,9 +168,9 @@ HTTPSessionPtr makeHTTPSession(const Poco::URI & uri, const ConnectionTimeouts &
{
const std::string & host = uri.getHost();
UInt16 port = uri.getPort();
bool https = (uri.getScheme() == "https");
bool https = isHTTPS(uri);
auto session = makeHTTPSessionImpl(host, port, https);
auto session = makeHTTPSessionImpl(host, port, https, false);
setTimeouts(*session, timeouts);
return session;
}

View File

@ -40,7 +40,7 @@ public:
SingleEndpointHTTPSessionPool(const std::string & host_, UInt16 port_, bool https_, size_t max_pool_size_);
};
using PooledHTTPSessionPtr = SingleEndpointHTTPSessionPool::Entry;
using HTTPSessionPtr = std::unique_ptr<Poco::Net::HTTPClientSession>;
using HTTPSessionPtr = std::shared_ptr<Poco::Net::HTTPClientSession>;
void setResponseDefaultHeaders(Poco::Net::HTTPServerResponse & response, unsigned keep_alive_timeout);

View File

@ -50,7 +50,7 @@ namespace detail
: ReadBuffer(nullptr, 0)
, uri {uri}
, method {!method.empty() ? method : out_stream_callback ? Poco::Net::HTTPRequest::HTTP_POST : Poco::Net::HTTPRequest::HTTP_GET}
, session {std::move(session_)}
, session {session_}
{
// With empty path poco will send "POST HTTP/1.1" its bug.
if (uri.getPath().empty())
@ -117,7 +117,7 @@ public:
const ConnectionTimeouts & timeouts = {},
const Poco::Net::HTTPBasicCredentials & credentials = {},
size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE,
size_t max_connections_per_endpoint = DEFAULT_HTTP_AMOUNT_CONNECTIONS_PER_ENDPOINT)
size_t max_connections_per_endpoint = DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT)
: Parent(makePooledHTTPSession(uri, timeouts, max_connections_per_endpoint),
uri,
method,

View File

@ -1352,7 +1352,8 @@ BlocksList Aggregator::prepareBlocksAndFillTwoLevelImpl(
{
auto converter = [&](size_t bucket, ThreadGroupStatusPtr thread_group)
{
CurrentThread::attachToIfDetached(thread_group);
if (thread_group)
CurrentThread::attachToIfDetached(thread_group);
return convertOneBucketToBlock(data_variants, method, final, bucket);
};
@ -1805,7 +1806,8 @@ private:
try
{
setThreadName("MergingAggregtd");
CurrentThread::attachToIfDetached(thread_group);
if (thread_group)
CurrentThread::attachToIfDetached(thread_group);
CurrentMetrics::Increment metric_increment{CurrentMetrics::QueryThread};
/// TODO: add no_more_keys support maybe
@ -2127,7 +2129,8 @@ void Aggregator::mergeStream(const BlockInputStreamPtr & stream, AggregatedDataV
auto merge_bucket = [&bucket_to_blocks, &result, this](Int32 bucket, Arena * aggregates_pool, ThreadGroupStatusPtr thread_group)
{
CurrentThread::attachToIfDetached(thread_group);
if (thread_group)
CurrentThread::attachToIfDetached(thread_group);
for (Block & block : bucket_to_blocks[bucket])
{

View File

@ -54,41 +54,10 @@ BlockIO InterpreterAlterQuery::execute()
table->mutate(mutation_commands, context);
}
partition_commands.validate(*table);
for (const PartitionCommand & command : partition_commands)
if (!partition_commands.empty())
{
switch (command.type)
{
case PartitionCommand::DROP_PARTITION:
table->checkPartitionCanBeDropped(command.partition);
table->dropPartition(query_ptr, command.partition, command.detach, context);
break;
case PartitionCommand::ATTACH_PARTITION:
table->attachPartition(command.partition, command.part, context);
break;
case PartitionCommand::REPLACE_PARTITION:
{
table->checkPartitionCanBeDropped(command.partition);
String from_database = command.from_database.empty() ? context.getCurrentDatabase() : command.from_database;
auto from_storage = context.getTable(from_database, command.from_table);
table->replacePartitionFrom(from_storage, command.partition, command.replace, context);
}
break;
case PartitionCommand::FETCH_PARTITION:
table->fetchPartition(command.partition, command.from_zookeeper_path, context);
break;
case PartitionCommand::FREEZE_PARTITION:
table->freezePartition(command.partition, command.with_name, context);
break;
case PartitionCommand::CLEAR_COLUMN:
table->clearColumnInPartition(command.partition, command.column_name, context);
break;
}
partition_commands.validate(*table);
table->partition(query_ptr, partition_commands, context);
}
if (!alter_commands.empty())

View File

@ -15,7 +15,7 @@
namespace DB
{
template <> struct NearestFieldType<PartLogElement::Type> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<PartLogElement::Type> { using Type = UInt64; };
Block PartLogElement::createBlock()
{

View File

@ -9,6 +9,7 @@
#include <Parsers/queryToString.h>
#include <Interpreters/QueryNormalizer.h>
#include <Interpreters/QueryAliasesVisitor.h>
#include "TranslateQualifiedNamesVisitor.h"
namespace DB
{
@ -26,6 +27,9 @@ bool PredicateExpressionsOptimizer::optimize()
if (!settings.enable_optimize_predicate_expression || !ast_select || !ast_select->tables || ast_select->tables->children.empty())
return false;
if (!ast_select->where_expression && !ast_select->prewhere_expression)
return false;
SubqueriesProjectionColumns all_subquery_projection_columns;
getAllSubqueryProjectionColumns(all_subquery_projection_columns);
@ -300,15 +304,18 @@ void PredicateExpressionsOptimizer::getSubqueryProjectionColumns(SubqueriesProje
ASTs PredicateExpressionsOptimizer::getSelectQueryProjectionColumns(ASTPtr & ast)
{
ASTs projection_columns;
auto select_query = static_cast<ASTSelectQuery *>(ast.get());
/// first should normalize query tree.
std::unordered_map<String, ASTPtr> aliases;
std::vector<DatabaseAndTableWithAlias> tables = getDatabaseAndTables(*select_query, context.getCurrentDatabase());
TranslateQualifiedNamesVisitor({}, tables).visit(ast);
QueryAliasesVisitor query_aliases_visitor(aliases);
query_aliases_visitor.visit(ast);
QueryNormalizer(ast, aliases, settings, {}, {}).perform();
ASTs projection_columns;
auto select_query = static_cast<ASTSelectQuery *>(ast.get());
for (const auto & projection_column : select_query->select_expression_list->children)
{
if (typeid_cast<ASTAsterisk *>(projection_column.get()) || typeid_cast<ASTQualifiedAsterisk *>(projection_column.get()))

View File

@ -128,6 +128,16 @@ void ASTAlterCommand::formatImpl(
<< " " << std::quoted(with_name, '\'');
}
}
else if (type == ASTAlterCommand::FREEZE_ALL)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "FREEZE";
if (!with_name.empty())
{
settings.ostr << " " << (settings.hilite ? hilite_keyword : "") << "WITH NAME" << (settings.hilite ? hilite_none : "")
<< " " << std::quoted(with_name, '\'');
}
}
else if (type == ASTAlterCommand::DELETE)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "DELETE WHERE " << (settings.hilite ? hilite_none : "");

View File

@ -31,6 +31,7 @@ public:
REPLACE_PARTITION,
FETCH_PARTITION,
FREEZE_PARTITION,
FREEZE_ALL,
DELETE,
UPDATE,

View File

@ -30,7 +30,8 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
ParserKeyword s_drop_partition("DROP PARTITION");
ParserKeyword s_attach_part("ATTACH PART");
ParserKeyword s_fetch_partition("FETCH PARTITION");
ParserKeyword s_freeze_partition("FREEZE PARTITION");
ParserKeyword s_replace_partition("REPLACE PARTITION");
ParserKeyword s_freeze("FREEZE");
ParserKeyword s_partition("PARTITION");
ParserKeyword s_after("AFTER");
@ -121,7 +122,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
command->type = ASTAlterCommand::ATTACH_PARTITION;
}
}
else if (ParserKeyword{"REPLACE PARTITION"}.ignore(pos, expected))
else if (s_replace_partition.ignore(pos, expected))
{
if (!parser_partition.parse(pos, command->partition, expected))
return false;
@ -158,10 +159,19 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
command->from = typeid_cast<const ASTLiteral &>(*ast_from).value.get<const String &>();
command->type = ASTAlterCommand::FETCH_PARTITION;
}
else if (s_freeze_partition.ignore(pos, expected))
else if (s_freeze.ignore(pos, expected))
{
if (!parser_partition.parse(pos, command->partition, expected))
return false;
if (s_partition.ignore(pos, expected))
{
if (!parser_partition.parse(pos, command->partition, expected))
return false;
command->type = ASTAlterCommand::FREEZE_PARTITION;
}
else
{
command->type = ASTAlterCommand::FREEZE_ALL;
}
/// WITH NAME 'name' - place local backup to directory with specified name
if (s_with.ignore(pos, expected))
@ -175,8 +185,6 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
command->with_name = typeid_cast<const ASTLiteral &>(*ast_with_name).value.get<const String &>();
}
command->type = ASTAlterCommand::FREEZE_PARTITION;
}
else if (s_modify_column.ignore(pos, expected))
{

View File

@ -15,7 +15,7 @@ namespace DB
* [MODIFY PRIMARY KEY (a, b, c...)]
* [DROP|DETACH|ATTACH PARTITION|PART partition, ...]
* [FETCH PARTITION partition FROM ...]
* [FREEZE PARTITION]
* [FREEZE [PARTITION] [WITH NAME name]]
* [DELETE WHERE ...]
* [UPDATE col_name = expr, ... WHERE ...]
*/

View File

@ -12,7 +12,8 @@ namespace DB
class ASTAlterCommand;
/// Operation from the ALTER query (except for manipulation with PART/PARTITION). Adding Nested columns is not expanded to add individual columns.
/// Operation from the ALTER query (except for manipulation with PART/PARTITION).
/// Adding Nested columns is not expanded to add individual columns.
struct AlterCommand
{
enum Type

View File

@ -196,7 +196,8 @@ ThreadPool::Job DistributedBlockOutputStream::runWritingJob(DistributedBlockOutp
auto thread_group = CurrentThread::getGroup();
return [this, thread_group, &job, &current_block]()
{
CurrentThread::attachToIfDetached(thread_group);
if (thread_group)
CurrentThread::attachToIfDetached(thread_group);
setThreadName("DistrOutStrProc");
++job.blocks_started;

View File

@ -45,6 +45,7 @@ struct Settings;
class AlterCommands;
class MutationCommands;
class PartitionCommands;
/** Does not allow changing the table description (including rename and delete the table).
@ -239,44 +240,12 @@ public:
throw Exception("Method alter is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
/** Execute CLEAR COLUMN ... IN PARTITION query which removes column from given partition. */
virtual void clearColumnInPartition(const ASTPtr & /*partition*/, const Field & /*column_name*/, const Context & /*context*/)
{
throw Exception("Method dropColumnFromPartition is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
/** Execute ALTER TABLE dst.table REPLACE(ATTACH) PARTITION partition FROM src.table */
virtual void replacePartitionFrom(const StoragePtr & /*source_table*/, const ASTPtr & /*partition*/, bool /*replace*/, const Context &)
{
throw Exception("Method replacePartitionFrom is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
/** Run the query (DROP|DETACH) PARTITION.
/** ALTER tables with regard to its partitions.
* Should handle locks for each command on its own.
*/
virtual void dropPartition(const ASTPtr & /*query*/, const ASTPtr & /*partition*/, bool /*detach*/, const Context & /*context*/)
virtual void partition(const ASTPtr & /* query */, const PartitionCommands & /* commands */, const Context & /* context */)
{
throw Exception("Method dropPartition is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
/** Run the ATTACH request (PART|PARTITION).
*/
virtual void attachPartition(const ASTPtr & /*partition*/, bool /*part*/, const Context & /*context*/)
{
throw Exception("Method attachPartition is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
/** Run the FETCH PARTITION query.
*/
virtual void fetchPartition(const ASTPtr & /*partition*/, const String & /*from*/, const Context & /*context*/)
{
throw Exception("Method fetchPartition is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
/** Run the FREEZE PARTITION request. That is, create a local backup (snapshot) of data using the `localBackup` function (see localBackup.h)
*/
virtual void freezePartition(const ASTPtr & /*partition*/, const String & /*with_name*/, const Context & /*context*/)
{
throw Exception("Method freezePartition is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
throw Exception("Partition operations are not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
/** Perform any background work. For example, combining parts in a MergeTree type table.

View File

@ -79,7 +79,7 @@ class ReadBufferFromKafkaConsumer : public ReadBuffer
}
// Process next buffered message
rd_kafka_message_t * msg = rd_kafka_consumer_poll(consumer, READ_POLL_MS);
rd_kafka_message_t * msg = rd_kafka_consumer_poll(consumer, READ_POLL_MS); // XXX: use RAII.
if (msg == nullptr)
return false;

View File

@ -1279,6 +1279,11 @@ MergeTreeData::AlterDataPartTransactionPtr MergeTreeData::alterDataPart(
return transaction;
}
void MergeTreeData::freezeAll(const String & with_name, const Context & context)
{
freezePartitionsByMatcher([] (const DataPartPtr &){ return true; }, with_name, context);
}
void MergeTreeData::AlterDataPartTransaction::commit()
{
if (!data_part)
@ -2064,44 +2069,17 @@ void MergeTreeData::freezePartition(const ASTPtr & partition_ast, const String &
else
LOG_DEBUG(log, "Freezing parts with partition ID " + partition_id);
String clickhouse_path = Poco::Path(context.getPath()).makeAbsolute().toString();
String shadow_path = clickhouse_path + "shadow/";
Poco::File(shadow_path).createDirectories();
String backup_path = shadow_path
+ (!with_name.empty()
? escapeForFileName(with_name)
: toString(Increment(shadow_path + "increment.txt").get(true)))
+ "/";
LOG_DEBUG(log, "Snapshot will be placed at " + backup_path);
/// Acquire a snapshot of active data parts to prevent removing while doing backup.
const auto data_parts = getDataParts();
size_t parts_processed = 0;
for (const auto & part : data_parts)
{
if (prefix)
freezePartitionsByMatcher(
[&prefix, &partition_id](const DataPartPtr & part)
{
if (!startsWith(part->info.partition_id, *prefix))
continue;
}
else if (part->info.partition_id != partition_id)
continue;
LOG_DEBUG(log, "Freezing part " << part->name);
String part_absolute_path = Poco::Path(part->getFullPath()).absolute().toString();
if (!startsWith(part_absolute_path, clickhouse_path))
throw Exception("Part path " + part_absolute_path + " is not inside " + clickhouse_path, ErrorCodes::LOGICAL_ERROR);
String backup_part_absolute_path = part_absolute_path;
backup_part_absolute_path.replace(0, clickhouse_path.size(), backup_path);
localBackup(part_absolute_path, backup_part_absolute_path);
++parts_processed;
}
LOG_DEBUG(log, "Freezed " << parts_processed << " parts");
if (prefix)
return startsWith(part->info.partition_id, *prefix);
else
return part->info.partition_id == partition_id;
},
with_name,
context);
}
size_t MergeTreeData::getPartitionSize(const std::string & partition_id) const
@ -2442,4 +2420,41 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::cloneAndLoadDataPart(const Merg
return dst_data_part;
}
void MergeTreeData::freezePartitionsByMatcher(MatcherFn matcher, const String & with_name, const Context & context)
{
String clickhouse_path = Poco::Path(context.getPath()).makeAbsolute().toString();
String shadow_path = clickhouse_path + "shadow/";
Poco::File(shadow_path).createDirectories();
String backup_path = shadow_path
+ (!with_name.empty()
? escapeForFileName(with_name)
: toString(Increment(shadow_path + "increment.txt").get(true)))
+ "/";
LOG_DEBUG(log, "Snapshot will be placed at " + backup_path);
/// Acquire a snapshot of active data parts to prevent removing while doing backup.
const auto data_parts = getDataParts();
size_t parts_processed = 0;
for (const auto & part : data_parts)
{
if (!matcher(part))
continue;
LOG_DEBUG(log, "Freezing part " << part->name);
String part_absolute_path = Poco::Path(part->getFullPath()).absolute().toString();
if (!startsWith(part_absolute_path, clickhouse_path))
throw Exception("Part path " + part_absolute_path + " is not inside " + clickhouse_path, ErrorCodes::LOGICAL_ERROR);
String backup_part_absolute_path = part_absolute_path;
backup_part_absolute_path.replace(0, clickhouse_path.size(), backup_path);
localBackup(part_absolute_path, backup_part_absolute_path);
++parts_processed;
}
LOG_DEBUG(log, "Freezed " << parts_processed << " parts");
}
}

View File

@ -480,6 +480,9 @@ public:
const ASTPtr & new_primary_key,
bool skip_sanity_checks);
/// Freezes all parts.
void freezeAll(const String & with_name, const Context & context);
/// Should be called if part data is suspected to be corrupted.
void reportBrokenPart(const String & name) const
{
@ -720,6 +723,10 @@ private:
/// Checks whether the column is in the primary key, possibly wrapped in a chain of functions with single argument.
bool isPrimaryOrMinMaxKeyColumnPossiblyWrappedInFunctions(const ASTPtr & node) const;
/// Common part for |freezePartition()| and |freezeAll()|.
using MatcherFn = std::function<bool(const DataPartPtr &)>;
void freezePartitionsByMatcher(MatcherFn matcher, const String & with_name, const Context & context);
};
}

View File

@ -93,7 +93,7 @@ struct MergeTreeSettings
M(SettingUInt64, replicated_max_parallel_fetches_for_table, 0) \
\
/** Limit parallel fetches from endpoint (actually pool size) */ \
M(SettingUInt64, replicated_max_parallel_fetches_for_host, DEFAULT_HTTP_AMOUNT_CONNECTIONS_PER_ENDPOINT) \
M(SettingUInt64, replicated_max_parallel_fetches_for_host, DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT)\
\
/** Limit parallel sends */ \
M(SettingUInt64, replicated_max_parallel_sends, 0) \

View File

@ -61,7 +61,7 @@ std::optional<PartitionCommand> PartitionCommand::parse(const ASTAlterCommand *
else if (command_ast->type == ASTAlterCommand::DROP_COLUMN && command_ast->partition)
{
if (!command_ast->clear_column)
throw Exception("Can't DROP COLUMN from partition. It is possible only CLEAR COLUMN in partition", ErrorCodes::BAD_ARGUMENTS);
throw Exception("Can't DROP COLUMN from partition. It is possible only to CLEAR COLUMN in partition", ErrorCodes::BAD_ARGUMENTS);
PartitionCommand res;
res.type = CLEAR_COLUMN;
@ -70,6 +70,13 @@ std::optional<PartitionCommand> PartitionCommand::parse(const ASTAlterCommand *
res.column_name = column_name;
return res;
}
else if (command_ast->type == ASTAlterCommand::FREEZE_ALL)
{
PartitionCommand command;
command.type = PartitionCommand::FREEZE_ALL_PARTITIONS;
command.with_name = command_ast->with_name;
return command;
}
else
return {};
}

View File

@ -17,12 +17,13 @@ struct PartitionCommand
{
enum Type
{
DROP_PARTITION,
ATTACH_PARTITION,
REPLACE_PARTITION,
FETCH_PARTITION,
FREEZE_PARTITION,
CLEAR_COLUMN,
DROP_PARTITION,
FETCH_PARTITION,
FREEZE_ALL_PARTITIONS,
FREEZE_PARTITION,
REPLACE_PARTITION,
};
Type type;

View File

@ -1,3 +1,5 @@
#include <Storages/StorageMaterializedView.h>
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/ASTSelectWithUnionQuery.h>
#include <Parsers/ASTCreateQuery.h>
@ -9,7 +11,6 @@
#include <Interpreters/DatabaseAndTableWithAlias.h>
#include <Interpreters/AddDefaultDatabaseVisitor.h>
#include <Storages/StorageMaterializedView.h>
#include <Storages/StorageFactory.h>
#include <Common/typeid_cast.h>
@ -239,28 +240,10 @@ bool StorageMaterializedView::optimize(const ASTPtr & query, const ASTPtr & part
return getTargetTable()->optimize(query, partition, final, deduplicate, context);
}
void StorageMaterializedView::dropPartition(const ASTPtr & query, const ASTPtr & partition, bool detach, const Context & context)
void StorageMaterializedView::partition(const ASTPtr & query, const PartitionCommands &commands, const Context &context)
{
checkStatementCanBeForwarded();
getTargetTable()->dropPartition(query, partition, detach, context);
}
void StorageMaterializedView::clearColumnInPartition(const ASTPtr & partition, const Field & column_name, const Context & context)
{
checkStatementCanBeForwarded();
getTargetTable()->clearColumnInPartition(partition, column_name, context);
}
void StorageMaterializedView::attachPartition(const ASTPtr & partition, bool part, const Context & context)
{
checkStatementCanBeForwarded();
getTargetTable()->attachPartition(partition, part, context);
}
void StorageMaterializedView::freezePartition(const ASTPtr & partition, const String & with_name, const Context & context)
{
checkStatementCanBeForwarded();
getTargetTable()->freezePartition(partition, with_name, context);
getTargetTable()->partition(query, commands, context);
}
void StorageMaterializedView::mutate(const MutationCommands & commands, const Context & context)

View File

@ -8,7 +8,7 @@
namespace DB
{
class IAST;
class IAST; // XXX: should include full class - for proper use inside inline methods
using ASTPtr = std::shared_ptr<IAST>;
@ -35,10 +35,8 @@ public:
bool optimize(const ASTPtr & query, const ASTPtr & partition, bool final, bool deduplicate, const Context & context) override;
void dropPartition(const ASTPtr & query, const ASTPtr & partition, bool detach, const Context & context) override;
void clearColumnInPartition(const ASTPtr & partition, const Field & column_name, const Context & context) override;
void attachPartition(const ASTPtr & partition, bool part, const Context & context) override;
void freezePartition(const ASTPtr & partition, const String & with_name, const Context & context) override;
void partition(const ASTPtr & query, const PartitionCommands & commands, const Context & context) override;
void mutate(const MutationCommands & commands, const Context & context) override;
void shutdown() override;

View File

@ -771,8 +771,55 @@ bool StorageMergeTree::optimize(
return true;
}
void StorageMergeTree::partition(const ASTPtr & query, const PartitionCommands & commands, const Context & context)
{
for (const PartitionCommand & command : commands)
{
switch (command.type)
{
case PartitionCommand::DROP_PARTITION:
checkPartitionCanBeDropped(command.partition);
dropPartition(command.partition, command.detach, context);
break;
void StorageMergeTree::dropPartition(const ASTPtr & /*query*/, const ASTPtr & partition, bool detach, const Context & context)
case PartitionCommand::ATTACH_PARTITION:
attachPartition(command.partition, command.part, context);
break;
case PartitionCommand::REPLACE_PARTITION:
{
checkPartitionCanBeDropped(command.partition);
String from_database = command.from_database.empty() ? context.getCurrentDatabase() : command.from_database;
auto from_storage = context.getTable(from_database, command.from_table);
replacePartitionFrom(from_storage, command.partition, command.replace, context);
}
break;
case PartitionCommand::FREEZE_PARTITION:
{
auto lock = lockStructure(false, __PRETTY_FUNCTION__);
data.freezePartition(command.partition, command.with_name, context);
}
break;
case PartitionCommand::CLEAR_COLUMN:
clearColumnInPartition(command.partition, command.column_name, context);
break;
case PartitionCommand::FREEZE_ALL_PARTITIONS:
{
auto lock = lockStructure(false, __PRETTY_FUNCTION__);
data.freezeAll(command.with_name, context);
}
break;
default:
IStorage::partition(query, commands, context); // should throw an exception.
}
}
}
void StorageMergeTree::dropPartition(const ASTPtr & partition, bool detach, const Context & context)
{
{
/// Asks to complete merges and does not allow them to start.
@ -806,6 +853,8 @@ void StorageMergeTree::dropPartition(const ASTPtr & /*query*/, const ASTPtr & pa
void StorageMergeTree::attachPartition(const ASTPtr & partition, bool part, const Context & context)
{
// TODO: should get some locks to prevent race with 'alter … modify column'
String partition_id;
if (part)
@ -858,11 +907,6 @@ void StorageMergeTree::attachPartition(const ASTPtr & partition, bool part, cons
context.dropCaches();
}
void StorageMergeTree::freezePartition(const ASTPtr & partition, const String & with_name, const Context & context)
{
data.freezePartition(partition, with_name, context);
}
void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & context)
{
auto lock1 = lockStructure(false, __PRETTY_FUNCTION__);

View File

@ -43,15 +43,8 @@ public:
const ColumnsDescription & getColumns() const override { return data.getColumns(); }
void setColumns(ColumnsDescription columns_) override { return data.setColumns(std::move(columns_)); }
NameAndTypePair getColumn(const String & column_name) const override
{
return data.getColumn(column_name);
}
bool hasColumn(const String & column_name) const override
{
return data.hasColumn(column_name);
}
NameAndTypePair getColumn(const String & column_name) const override { return data.getColumn(column_name); }
bool hasColumn(const String & column_name) const override { return data.hasColumn(column_name); }
BlockInputStreams read(
const Names & column_names,
@ -67,11 +60,7 @@ public:
*/
bool optimize(const ASTPtr & query, const ASTPtr & partition, bool final, bool deduplicate, const Context & context) override;
void dropPartition(const ASTPtr & query, const ASTPtr & partition, bool detach, const Context & context) override;
void clearColumnInPartition(const ASTPtr & partition, const Field & column_name, const Context & context) override;
void attachPartition(const ASTPtr & partition, bool part, const Context & context) override;
void replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & context) override;
void freezePartition(const ASTPtr & partition, const String & with_name, const Context & context) override;
void partition(const ASTPtr & query, const PartitionCommands & commands, const Context & context) override;
void mutate(const MutationCommands & commands, const Context & context) override;
@ -160,6 +149,12 @@ private:
void clearOldMutations();
// Partition helpers
void dropPartition(const ASTPtr & partition, bool detach, const Context & context);
void clearColumnInPartition(const ASTPtr & partition, const Field & column_name, const Context & context);
void attachPartition(const ASTPtr & partition, bool part, const Context & context);
void replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & context);
friend class MergeTreeBlockOutputStream;
friend class MergeTreeData;
friend struct CurrentlyMergingPartsTagger;

View File

@ -3282,6 +3282,58 @@ void StorageReplicatedMergeTree::alter(const AlterCommands & params,
LOG_DEBUG(log, "ALTER finished");
}
void StorageReplicatedMergeTree::partition(const ASTPtr & query, const PartitionCommands & commands, const Context & context)
{
for (const PartitionCommand & command : commands)
{
switch (command.type)
{
case PartitionCommand::DROP_PARTITION:
checkPartitionCanBeDropped(command.partition);
dropPartition(query, command.partition, command.detach, context);
break;
case PartitionCommand::ATTACH_PARTITION:
attachPartition(command.partition, command.part, context);
break;
case PartitionCommand::REPLACE_PARTITION:
{
checkPartitionCanBeDropped(command.partition);
String from_database = command.from_database.empty() ? context.getCurrentDatabase() : command.from_database;
auto from_storage = context.getTable(from_database, command.from_table);
replacePartitionFrom(from_storage, command.partition, command.replace, context);
}
break;
case PartitionCommand::FETCH_PARTITION:
fetchPartition(command.partition, command.from_zookeeper_path, context);
break;
case PartitionCommand::FREEZE_PARTITION:
{
auto lock = lockStructure(false, __PRETTY_FUNCTION__);
data.freezePartition(command.partition, command.with_name, context);
}
break;
case PartitionCommand::CLEAR_COLUMN:
clearColumnInPartition(command.partition, command.column_name, context);
break;
case PartitionCommand::FREEZE_ALL_PARTITIONS:
{
auto lock = lockStructure(false, __PRETTY_FUNCTION__);
data.freezeAll(command.with_name, context);
}
break;
default:
IStorage::partition(query, commands, context); // should throw an exception.
}
}
}
/// If new version returns ordinary name, else returns part name containing the first and last month of the month
static String getPartNamePossiblyFake(MergeTreeDataFormatVersion format_version, const MergeTreePartInfo & part_info)
@ -3380,6 +3432,8 @@ void StorageReplicatedMergeTree::dropPartition(const ASTPtr & query, const ASTPt
if (!is_leader)
{
// TODO: we can manually reconstruct the query from outside the |dropPartition()| and remove the |query| argument from interface.
// It's the only place where we need this argument.
sendRequestToLeaderReplica(query, context.getSettingsRef());
return;
}
@ -3427,6 +3481,8 @@ void StorageReplicatedMergeTree::truncate(const ASTPtr & query)
void StorageReplicatedMergeTree::attachPartition(const ASTPtr & partition, bool attach_part, const Context & context)
{
// TODO: should get some locks to prevent race with 'alter … modify column'
assertNotReadonly();
String partition_id;
@ -4154,12 +4210,6 @@ void StorageReplicatedMergeTree::fetchPartition(const ASTPtr & partition, const
}
void StorageReplicatedMergeTree::freezePartition(const ASTPtr & partition, const String & with_name, const Context & context)
{
data.freezePartition(partition, with_name, context);
}
void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, const Context &)
{
/// Overview of the mutation algorithm.

View File

@ -116,12 +116,7 @@ public:
void alter(const AlterCommands & params, const String & database_name, const String & table_name, const Context & context) override;
void clearColumnInPartition(const ASTPtr & partition, const Field & column_name, const Context & context) override;
void dropPartition(const ASTPtr & query, const ASTPtr & partition, bool detach, const Context & context) override;
void attachPartition(const ASTPtr & partition, bool part, const Context & context) override;
void replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & context) override;
void fetchPartition(const ASTPtr & partition, const String & from, const Context & context) override;
void freezePartition(const ASTPtr & partition, const String & with_name, const Context & context) override;
void partition(const ASTPtr & query, const PartitionCommands & commands, const Context & context) override;
void mutate(const MutationCommands & commands, const Context & context) override;
@ -520,6 +515,13 @@ private:
bool dropPartsInPartition(zkutil::ZooKeeper & zookeeper, String & partition_id,
StorageReplicatedMergeTree::LogEntry & entry, bool detach);
// Partition helpers
void clearColumnInPartition(const ASTPtr & partition, const Field & column_name, const Context & context);
void dropPartition(const ASTPtr & query, const ASTPtr & partition, bool detach, const Context & context);
void attachPartition(const ASTPtr & partition, bool part, const Context & context);
void replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & context);
void fetchPartition(const ASTPtr & partition, const String & from, const Context & context);
protected:
/** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table.
*/

View File

@ -277,7 +277,7 @@ def main(args):
result_is_different = subprocess.call(['cmp', '-s', reference_file, stdout_file], stdout = PIPE)
if result_is_different:
(diff, _) = Popen(['diff', '--side-by-side', reference_file, stdout_file], stdout = PIPE).communicate()
(diff, _) = Popen(['diff', '--unified', reference_file, stdout_file], stdout = PIPE).communicate()
diff = unicode(diff, errors='replace', encoding='utf-8')
failure = et.Element("failure", attrib = {"message": "result differs with reference"})

View File

@ -741,8 +741,10 @@ def run_tests(args):
keys = [ 'toUInt64(n)', '(n, n)', '(toString(n), n)', 'toUInt64(n)' ]
dict_get_query_skeleton = "select dictGet{type}('{name}', '{type}_', {key}) from system.one array join range(8) as n;"
dict_get_notype_query_skeleton = "select dictGet('{name}', '{type}_', {key}) from system.one array join range(8) as n;"
dict_has_query_skeleton = "select dictHas('{name}', {key}) from system.one array join range(8) as n;"
dict_get_or_default_query_skeleton = "select dictGet{type}OrDefault('{name}', '{type}_', {key}, to{type}({default})) from system.one array join range(8) as n;"
dict_get_notype_or_default_query_skeleton = "select dictGetOrDefault('{name}', '{type}_', {key}, to{type}({default})) from system.one array join range(8) as n;"
dict_hierarchy_query_skeleton = "select dictGetHierarchy('{name}' as d, key), dictIsIn(d, key, toUInt64(1)), dictIsIn(d, key, key) from system.one array join range(toUInt64(8)) as key;"
# Designed to match 4 rows hit, 4 rows miss pattern of reference file
dict_get_query_range_hashed_skeleton = """
@ -751,6 +753,12 @@ def run_tests(args):
array join range(4) as n
cross join (select r from system.one array join array({hit}, {miss}) as r);
"""
dict_get_notype_query_range_hashed_skeleton = """
select dictGet('{name}', '{type}_', {key}, r)
from system.one
array join range(4) as n
cross join (select r from system.one array join array({hit}, {miss}) as r);
"""
def test_query(dict, query, reference, name):
global failures
@ -877,6 +885,9 @@ def run_tests(args):
test_query(name,
dict_get_query_range_hashed_skeleton.format(**locals()),
type, 'dictGet' + type)
test_query(name,
dict_get_notype_query_range_hashed_skeleton.format(**locals()),
type, 'dictGet' + type)
else:
# query dictHas is not supported for range_hashed dictionaries
@ -889,9 +900,15 @@ def run_tests(args):
test_query(name,
dict_get_query_skeleton.format(**locals()),
type, 'dictGet' + type)
test_query(name,
dict_get_notype_query_skeleton.format(**locals()),
type, 'dictGet' + type)
test_query(name,
dict_get_or_default_query_skeleton.format(**locals()),
type + 'OrDefault', 'dictGet' + type + 'OrDefault')
test_query(name,
dict_get_notype_or_default_query_skeleton.format(**locals()),
type + 'OrDefault', 'dictGet' + type + 'OrDefault')
# query dictGetHierarchy, dictIsIn
if has_parent:

View File

@ -0,0 +1,11 @@
<yandex>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/log.log</log>
<errorlog>/var/log/clickhouse-server/log.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
</logger>
</yandex>

View File

@ -22,8 +22,8 @@ def _fill_nodes(nodes, shard, connections_count):
'''.format(shard=shard, replica=node.name, connections=connections_count))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', config_dir="configs", main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', config_dir="configs", main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node1 = cluster.add_instance('node1', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_small_cluster():
@ -52,9 +52,9 @@ def test_single_endpoint_connections_count(start_small_cluster):
assert node2.query("SELECT value FROM system.events where event='CreatedHTTPConnections'") == '1\n'
node3 = cluster.add_instance('node3', config_dir="configs", main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node4 = cluster.add_instance('node4', config_dir="configs", main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node5 = cluster.add_instance('node5', config_dir="configs", main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node3 = cluster.add_instance('node3', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
node4 = cluster.add_instance('node4', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
node5 = cluster.add_instance('node5', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_big_cluster():

View File

@ -0,0 +1,29 @@
<test>
<name>first_significant_subdomain</name>
<type>loop</type>
<stop_conditions>
<all_of>
<iterations>3</iterations>
<min_time_not_changing_for_ms>10000</min_time_not_changing_for_ms>
</all_of>
<any_of>
<iterations>5</iterations>
<total_time_ms>60000</total_time_ms>
</any_of>
</stop_conditions>
<main_metric>
<min_time/>
</main_metric>
<preconditions>
<table_exists>test.hits</table_exists>
</preconditions>
<settings>
<max_threads>1</max_threads>
</settings>
<query>SELECT count() FROM test.hits WHERE NOT ignore(firstSignificantSubdomain(URL))</query>
</test>

View File

@ -1,6 +1,44 @@
5
5
082814b5aa5109160d5c0c5aff10d4df shadow/1/data/test/partition_428/19700102_19700102_2_2_0/k.bin
082814b5aa5109160d5c0c5aff10d4df shadow/1/data/test/partition_428/19700201_19700201_1_1_0/v1.bin
38e62ff37e1e5064e9a3f605dfe09d13 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/v1.bin
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/k.mrk
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/p.mrk
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/v1.mrk
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/k.mrk
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/p.mrk
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/v1.mrk
55a54008ad1ba589aa210d2629c1df41 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/primary.idx
77d5af402ada101574f4da114f242e02 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/columns.txt
77d5af402ada101574f4da114f242e02 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/columns.txt
88cdc31ded355e7572d68d8cde525d3a shadow/1/data/test/partition_428/19700201_19700201_1_1_0/p.bin
9e688c58a5487b8eaf69c9e1005ad0bf shadow/1/data/test/partition_428/19700102_19700102_2_2_0/primary.idx
b01e3d4df40467db3f1c2d029f59f6a2 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/checksums.txt
b026324c6904b2a9cb4b88d6d61c81d1 shadow/increment.txt
cfcb770c3ecd0990dcceb1bde129e6c6 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/p.bin
e2af3bef1fd129aea73a890ede1e7a30 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/k.bin
e6654eba9e88b001280d3bdd21ccc417 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/checksums.txt
5
5
082814b5aa5109160d5c0c5aff10d4df shadow/1/data/test/partition_428/19700102_19700102_2_2_0/k.bin
082814b5aa5109160d5c0c5aff10d4df shadow/1/data/test/partition_428/19700201_19700201_1_1_0/v1.bin
38e62ff37e1e5064e9a3f605dfe09d13 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/v1.bin
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/k.mrk
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/p.mrk
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/v1.mrk
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/k.mrk
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/p.mrk
4ae71336e44bf9bf79d2752e234818a5 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/v1.mrk
55a54008ad1ba589aa210d2629c1df41 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/primary.idx
77d5af402ada101574f4da114f242e02 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/columns.txt
77d5af402ada101574f4da114f242e02 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/columns.txt
88cdc31ded355e7572d68d8cde525d3a shadow/1/data/test/partition_428/19700201_19700201_1_1_0/p.bin
9e688c58a5487b8eaf69c9e1005ad0bf shadow/1/data/test/partition_428/19700102_19700102_2_2_0/primary.idx
b01e3d4df40467db3f1c2d029f59f6a2 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/checksums.txt
b026324c6904b2a9cb4b88d6d61c81d1 shadow/increment.txt
cfcb770c3ecd0990dcceb1bde129e6c6 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/p.bin
e2af3bef1fd129aea73a890ede1e7a30 shadow/1/data/test/partition_428/19700201_19700201_1_1_0/k.bin
e6654eba9e88b001280d3bdd21ccc417 shadow/1/data/test/partition_428/19700102_19700102_2_2_0/checksums.txt
31,1,2
1,2,3

View File

@ -23,6 +23,11 @@ for part in `$chl "SELECT name FROM system.parts WHERE database='test' AND table
cat $ch_dir/data/test/partition_428/$part/columns.txt) | wc -l
done
$chl "ALTER TABLE test.partition_428 FREEZE"
# Do `cd` for consistent output for reference
cd $ch_dir && find shadow -type f -exec md5sum {} \; | sort
$chl "ALTER TABLE test.partition_428 DETACH PARTITION 197001"
$chl "ALTER TABLE test.partition_428 ATTACH PARTITION 197001"
@ -33,6 +38,10 @@ for part in `$chl "SELECT name FROM system.parts WHERE database='test' AND table
done
$chl "ALTER TABLE test.partition_428 MODIFY COLUMN v1 Int8"
# Check the backup hasn't changed
cd $ch_dir && find shadow -type f -exec md5sum {} \; | sort
$chl "OPTIMIZE TABLE test.partition_428"
$chl "SELECT toUInt16(p), k, v1 FROM test.partition_428 ORDER BY k FORMAT CSV"

View File

@ -1,8 +1,10 @@
SET send_logs_level = 'none';
DROP TABLE IF EXISTS test.perf;
DROP TABLE IF EXISTS test.test;
DROP TABLE IF EXISTS test.test_view;
CREATE TABLE test.perf(site String, user_id UInt64, z Float64)ENGINE = Log;
CREATE TABLE test.test(date Date, id Int8, name String, value Int64) ENGINE = MergeTree(date, (id, date), 8192);
CREATE VIEW test.test_view AS SELECT * FROM test.test;
@ -15,6 +17,7 @@ SELECT '-------Not need optimize predicate, but it works.-------';
SELECT 1;
SELECT 1 AS id WHERE id = 1;
SELECT arrayJoin([1,2,3]) AS id WHERE id = 1;
SELECT * FROM (SELECT perf_1.z AS z_1 FROM test.perf AS perf_1);
SELECT '-------Need push down-------';
SELECT * FROM system.one ANY LEFT JOIN (SELECT 0 AS dummy) USING dummy WHERE 1;
@ -22,6 +25,7 @@ SELECT toString(value) AS value FROM (SELECT 1 AS value) WHERE value = '1';
SELECT * FROM (SELECT 1 AS id UNION ALL SELECT 2) WHERE id = 1;
SELECT * FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1;
SELECT id FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1;
SELECT * FROM (SELECT perf_1.z AS z_1 FROM test.perf AS perf_1) WHERE z_1 = 1;
SELECT * FROM (SELECT 1 AS id, (SELECT 1) as subquery) WHERE subquery = 1;
SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test.test) WHERE a = 3;
@ -72,5 +76,6 @@ SELECT * FROM (SELECT toUInt64(table_alias.b) AS a, sum(id) AS b FROM test.test
SELECT '-------Compatibility test-------';
SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test.test) AS b USING date, id WHERE b.date = toDate('2000-01-01');
DROP TABLE IF EXISTS test.perf;
DROP TABLE IF EXISTS test.test;
DROP TABLE IF EXISTS test.test_view;

View File

@ -0,0 +1,3 @@
usa
pentagon
stanford

View File

@ -0,0 +1 @@
SELECT firstSignificantSubdomain(arrayJoin(['http://usa.gov.com/cgi-bin/yabb.pl?password=qwerty', 'https://www2.pentagon.mil.net/index.phtml', 'ftp://stanford.edu.org/~ivanov/phd-thesis.SHTM']));

View File

@ -0,0 +1 @@
10000 10000000

View File

@ -0,0 +1,11 @@
DROP TABLE IF EXISTS test.size_hint;
CREATE TABLE test.size_hint (s Array(String)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 1000;
SET max_block_size = 1000;
SET max_memory_usage = 1000000000;
INSERT INTO test.size_hint SELECT arrayMap(x -> 'Hello', range(1000)) FROM numbers(10000);
SET max_memory_usage = 100000000, max_threads = 2;
SELECT count(), sum(length(s)) FROM test.size_hint;
DROP TABLE test.size_hint;

4
debian/.pbuilderrc vendored
View File

@ -161,11 +161,11 @@ esac
if [ "$ARCH" != arm64 ]; then
case "$DIST" in
"unstable")
"experimental")
EXTRAPACKAGES+=" liblld-8-dev libclang-8-dev llvm-8-dev liblld-8 "
export CMAKE_FLAGS="-DLLVM_VERSION_POSTFIX=-8 $CMAKE_FLAGS"
;;
"cosmic" | "experimental" | "testing")
"cosmic" | "testing" | "unstable")
EXTRAPACKAGES+=" liblld-7-dev libclang-7-dev llvm-7-dev liblld-7 "
export CMAKE_FLAGS="-DLLVM_VERSION_POSTFIX=-7 $CMAKE_FLAGS"
;;

View File

@ -4887,13 +4887,14 @@ var Result = function () {
/* Append trailing wildcard to all terms for prefix querying */
.query(function (query) {
_this.value_.toLowerCase().split(" ").filter(Boolean).forEach(function (term) {
term = _exposeLoaderLunrLunr2.default.stemmer(new _exposeLoaderLunrLunr2.default.Token(term)).toString();
for (var lang in _this.lang_) {
lang = _this.lang_[lang];
if (typeof(_exposeLoaderLunrLunr2.default[lang]) !== 'undefined' && typeof(_exposeLoaderLunrLunr2.default[lang].stemmer) !== 'undefined') {
term = _exposeLoaderLunrLunr2.default[lang].stemmer(new _exposeLoaderLunrLunr2.default.Token(term)).toString();
}
}
query.term(term, { wildcard: _exposeLoaderLunrLunr2.default.Query.wildcard.TRAILING });
query.term(term, { wildcard: _exposeLoaderLunrLunr2.default.Query.wildcard.TRAILING | _exposeLoaderLunrLunr2.default.Query.wildcard.LEADING });
});
})

View File

@ -22,5 +22,5 @@ env TEST_RUN=1 \
`# Use all possible contrib libs from system` \
`# psmisc - killall` \
`# gdb - symbol test in pbuilder` \
EXTRAPACKAGES="psmisc gdb clang-6.0 libc++abi-dev libc++-dev libboost-program-options-dev libboost-system-dev libboost-filesystem-dev libboost-thread-dev zlib1g-dev liblz4-dev libdouble-conversion-dev libsparsehash-dev librdkafka-dev libpoco-dev unixodbc-dev libsparsehash-dev libgoogle-perftools-dev libzstd-dev libre2-dev libunwind-dev googletest libcctz-dev libcapnp-dev libjemalloc-dev libssl-dev $EXTRAPACKAGES" \
EXTRAPACKAGES="psmisc libboost-program-options-dev libboost-system-dev libboost-filesystem-dev libboost-thread-dev zlib1g-dev liblz4-dev libdouble-conversion-dev libsparsehash-dev librdkafka-dev libpoco-dev unixodbc-dev libsparsehash-dev libgoogle-perftools-dev libzstd-dev libre2-dev libunwind-dev googletest libcctz-dev libcapnp-dev libjemalloc-dev libssl-dev $EXTRAPACKAGES" \
pdebuild --configfile $ROOT_DIR/debian/.pbuilderrc $PDEBUILD_OPT