mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge remote-tracking branch 'origin/master' into yandex-to-clickhouse-in-configs
This commit is contained in:
commit
551c52c236
@ -22,7 +22,7 @@ mkdocs-macros-plugin==0.4.20
|
||||
nltk==3.5
|
||||
nose==1.3.7
|
||||
protobuf==3.14.0
|
||||
numpy==1.19.2
|
||||
numpy==1.21.2
|
||||
pymdown-extensions==8.0
|
||||
python-slugify==4.0.1
|
||||
PyYAML==5.4.1
|
||||
|
@ -230,22 +230,24 @@ SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t
|
||||
|
||||
- 该函数搜索触发链中的第一个条件并将事件计数器设置为1。 这是滑动窗口启动的时刻。
|
||||
|
||||
- 如果来自链的事件在窗口内顺序发生,则计数器将递增。 如果事件序列中断,则计数器不会增加。
|
||||
- 如果来自链的事件在窗口内顺序发生,则计数器将递增。 如果事件序列中断,则计数器不再增加。
|
||||
|
||||
- 如果数据在不同的完成点具有多个事件链,则该函数将仅输出最长链的大小。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN)
|
||||
windowFunnel(window, [mode, [mode, ... ]])(timestamp, cond1, cond2, ..., condN)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `window` — 滑动窗户的大小,单位是秒。
|
||||
- `mode` - 这是一个可选的参数。
|
||||
- `'strict'` - 当 `'strict'` 设置时,windowFunnel()仅对唯一值应用匹配条件。
|
||||
- `timestamp` — 包含时间的列。 数据类型支持: [日期](../../sql-reference/data-types/date.md), [日期时间](../../sql-reference/data-types/datetime.md#data_type-datetime) 和其他无符号整数类型(请注意,即使时间戳支持 `UInt64` 类型,它的值不能超过Int64最大值,即2^63-1)。
|
||||
- `window` — 滑动窗户的大小,表示事件链中第一个事件和最后一个事件的最大间隔。 单位取决于`timestamp `。用表达式来表示则是:`timestamp of cond1 <= timestamp of cond2 <= ... <= timestamp of condN <= timestamp of cond1 + window`。
|
||||
- `mode` - 这是一个可选的参数,可以设置一个或多个参数。
|
||||
- `'strict_deduplication'` - 如果事件链中出现相同的条件,则会停止进一步搜索。
|
||||
- `'strict_order'` - 不允许其他事件的介入。 例如:在`A->B->D->C`的情况下,它在`D`停止继续搜索`A->B->C`,最大事件数为2。
|
||||
- `'strict_increase'` - 事件链中的时间戳必须严格上升。
|
||||
- `timestamp` — 包含时间戳的列。 数据类型支持: [日期](../../sql-reference/data-types/date.md), [日期时间](../../sql-reference/data-types/datetime.md#data_type-datetime) 和其他无符号整数类型(请注意,即使时间戳支持 `UInt64` 类型,它的值也不能超过Int64最大值,即2^63-1)。
|
||||
- `cond` — 事件链的约束条件。 [UInt8](../../sql-reference/data-types/int-uint.md) 类型。
|
||||
|
||||
**返回值**
|
||||
|
@ -113,13 +113,9 @@ String getFilesystemName([[maybe_unused]] const String & mount_point)
|
||||
|
||||
bool pathStartsWith(const std::filesystem::path & path, const std::filesystem::path & prefix_path)
|
||||
{
|
||||
auto absolute_path = std::filesystem::weakly_canonical(path);
|
||||
auto absolute_prefix_path = std::filesystem::weakly_canonical(prefix_path);
|
||||
|
||||
auto [_, prefix_path_mismatch_it] = std::mismatch(absolute_path.begin(), absolute_path.end(), absolute_prefix_path.begin(), absolute_prefix_path.end());
|
||||
|
||||
bool path_starts_with_prefix_path = (prefix_path_mismatch_it == absolute_prefix_path.end());
|
||||
return path_starts_with_prefix_path;
|
||||
String absolute_path = std::filesystem::weakly_canonical(path);
|
||||
String absolute_prefix_path = std::filesystem::weakly_canonical(prefix_path);
|
||||
return absolute_path.starts_with(absolute_prefix_path);
|
||||
}
|
||||
|
||||
bool symlinkStartsWith(const std::filesystem::path & path, const std::filesystem::path & prefix_path)
|
||||
@ -129,15 +125,11 @@ bool symlinkStartsWith(const std::filesystem::path & path, const std::filesystem
|
||||
/// `.` and `..` and extra `/`. Path is not canonized because otherwise path will
|
||||
/// not be a path of a symlink itself.
|
||||
|
||||
auto absolute_path = std::filesystem::absolute(path);
|
||||
absolute_path = absolute_path.lexically_normal(); /// Normalize path.
|
||||
auto absolute_prefix_path = std::filesystem::absolute(prefix_path);
|
||||
absolute_prefix_path = absolute_prefix_path.lexically_normal(); /// Normalize path.
|
||||
|
||||
auto [_, prefix_path_mismatch_it] = std::mismatch(absolute_path.begin(), absolute_path.end(), absolute_prefix_path.begin(), absolute_prefix_path.end());
|
||||
|
||||
bool path_starts_with_prefix_path = (prefix_path_mismatch_it == absolute_prefix_path.end());
|
||||
return path_starts_with_prefix_path;
|
||||
String absolute_path = std::filesystem::absolute(path);
|
||||
absolute_path = fs::path(absolute_path).lexically_normal(); /// Normalize path.
|
||||
String absolute_prefix_path = std::filesystem::absolute(prefix_path);
|
||||
absolute_prefix_path = fs::path(absolute_prefix_path).lexically_normal(); /// Normalize path.
|
||||
return absolute_path.starts_with(absolute_prefix_path);
|
||||
}
|
||||
|
||||
bool pathStartsWith(const String & path, const String & prefix_path)
|
||||
|
@ -348,7 +348,7 @@ Chain buildPushingToViewsChain(
|
||||
/// Do not push to destination table if the flag is set
|
||||
else if (!no_destination)
|
||||
{
|
||||
auto sink = storage->write(query_ptr, storage->getInMemoryMetadataPtr(), context);
|
||||
auto sink = storage->write(query_ptr, metadata_snapshot, context);
|
||||
metadata_snapshot->check(sink->getHeader().getColumnsWithTypeAndName());
|
||||
sink->setRuntimeData(thread_status, elapsed_counter_ms);
|
||||
result_chain.addSource(std::move(sink));
|
||||
|
@ -130,20 +130,17 @@ public:
|
||||
|
||||
Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override
|
||||
{
|
||||
IFunction::Monotonicity is_monotonic{true};
|
||||
IFunction::Monotonicity is_not_monotonic;
|
||||
if constexpr (std::is_same_v<typename Transform::FactorTransform, ZeroTransform>)
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
|
||||
if (std::is_same_v<typename Transform::FactorTransform, ZeroTransform>)
|
||||
{
|
||||
is_monotonic.is_always_monotonic = true;
|
||||
return is_monotonic;
|
||||
}
|
||||
const IFunction::Monotonicity is_monotonic = { .is_monotonic = true };
|
||||
const IFunction::Monotonicity is_not_monotonic;
|
||||
|
||||
/// This method is called only if the function has one argument. Therefore, we do not care about the non-local time zone.
|
||||
const DateLUTImpl & date_lut = DateLUT::instance();
|
||||
|
||||
if (left.isNull() || right.isNull())
|
||||
return is_not_monotonic;
|
||||
return {};
|
||||
|
||||
/// The function is monotonous on the [left, right] segment, if the factor transformation returns the same values for them.
|
||||
|
||||
|
@ -128,14 +128,11 @@ public:
|
||||
|
||||
Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override
|
||||
{
|
||||
IFunction::Monotonicity is_monotonic { true };
|
||||
IFunction::Monotonicity is_not_monotonic;
|
||||
if constexpr (std::is_same_v<typename Transform::FactorTransform, ZeroTransform>)
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
|
||||
if (std::is_same_v<typename Transform::FactorTransform, ZeroTransform>)
|
||||
{
|
||||
is_monotonic.is_always_monotonic = true;
|
||||
return is_monotonic;
|
||||
}
|
||||
const IFunction::Monotonicity is_monotonic = { .is_monotonic = true };
|
||||
const IFunction::Monotonicity is_not_monotonic;
|
||||
|
||||
/// This method is called only if the function has one argument. Therefore, we do not care about the non-local time zone.
|
||||
const DateLUTImpl & date_lut = DateLUT::instance();
|
||||
|
@ -290,7 +290,7 @@ struct PositiveMonotonicity
|
||||
static bool has() { return true; }
|
||||
static IFunction::Monotonicity get(const Field &, const Field &)
|
||||
{
|
||||
return { true };
|
||||
return { .is_monotonic = true };
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1954,7 +1954,7 @@ struct PositiveMonotonicity
|
||||
static bool has() { return true; }
|
||||
static IFunction::Monotonicity get(const IDataType &, const Field &, const Field &)
|
||||
{
|
||||
return { true };
|
||||
return { .is_monotonic = true };
|
||||
}
|
||||
};
|
||||
|
||||
@ -1963,7 +1963,7 @@ struct UnknownMonotonicity
|
||||
static bool has() { return false; }
|
||||
static IFunction::Monotonicity get(const IDataType &, const Field &, const Field &)
|
||||
{
|
||||
return { false };
|
||||
return { };
|
||||
}
|
||||
};
|
||||
|
||||
@ -1989,13 +1989,13 @@ struct ToNumberMonotonicity
|
||||
/// (Enum has separate case, because it is different data type)
|
||||
if (checkAndGetDataType<DataTypeNumber<T>>(&type) ||
|
||||
checkAndGetDataType<DataTypeEnum<T>>(&type))
|
||||
return { true, true, true };
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
|
||||
/// Float cases.
|
||||
|
||||
/// When converting to Float, the conversion is always monotonic.
|
||||
if (std::is_floating_point_v<T>)
|
||||
return {true, true, true};
|
||||
if constexpr (std::is_floating_point_v<T>)
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
|
||||
/// If converting from Float, for monotonicity, arguments must fit in range of result type.
|
||||
if (WhichDataType(type).isFloat())
|
||||
@ -2010,7 +2010,7 @@ struct ToNumberMonotonicity
|
||||
&& left_float <= static_cast<Float64>(std::numeric_limits<T>::max())
|
||||
&& right_float >= static_cast<Float64>(std::numeric_limits<T>::min())
|
||||
&& right_float <= static_cast<Float64>(std::numeric_limits<T>::max()))
|
||||
return { true };
|
||||
return { .is_monotonic = true };
|
||||
|
||||
return {};
|
||||
}
|
||||
@ -2035,10 +2035,10 @@ struct ToNumberMonotonicity
|
||||
if (size_of_from == size_of_to)
|
||||
{
|
||||
if (from_is_unsigned == to_is_unsigned)
|
||||
return {true, true, true};
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
|
||||
if (left_in_first_half == right_in_first_half)
|
||||
return {true};
|
||||
return { .is_monotonic = true };
|
||||
|
||||
return {};
|
||||
}
|
||||
@ -2047,14 +2047,14 @@ struct ToNumberMonotonicity
|
||||
if (size_of_from < size_of_to)
|
||||
{
|
||||
if (from_is_unsigned == to_is_unsigned)
|
||||
return {true, true, true};
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
|
||||
if (!to_is_unsigned)
|
||||
return {true, true, true};
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
|
||||
/// signed -> unsigned. If arguments from the same half, then function is monotonic.
|
||||
if (left_in_first_half == right_in_first_half)
|
||||
return {true};
|
||||
return { .is_monotonic = true };
|
||||
|
||||
return {};
|
||||
}
|
||||
@ -2071,10 +2071,14 @@ struct ToNumberMonotonicity
|
||||
return {};
|
||||
|
||||
if (to_is_unsigned)
|
||||
return {true};
|
||||
return { .is_monotonic = true };
|
||||
else
|
||||
{
|
||||
// If To is signed, it's possible that the signedness is different after conversion. So we check it explicitly.
|
||||
return {(T(left.get<UInt64>()) >= 0) == (T(right.get<UInt64>()) >= 0)};
|
||||
const bool is_monotonic = (T(left.get<UInt64>()) >= 0) == (T(right.get<UInt64>()) >= 0);
|
||||
|
||||
return { .is_monotonic = is_monotonic };
|
||||
}
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
@ -2089,7 +2093,7 @@ struct ToDateMonotonicity
|
||||
{
|
||||
auto which = WhichDataType(type);
|
||||
if (which.isDateOrDate32() || which.isDateTime() || which.isDateTime64() || which.isInt8() || which.isInt16() || which.isUInt8() || which.isUInt16())
|
||||
return {true, true, true};
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
else if (
|
||||
(which.isUInt() && ((left.isNull() || left.get<UInt64>() < 0xFFFF) && (right.isNull() || right.get<UInt64>() >= 0xFFFF)))
|
||||
|| (which.isInt() && ((left.isNull() || left.get<Int64>() < 0xFFFF) && (right.isNull() || right.get<Int64>() >= 0xFFFF)))
|
||||
@ -2097,7 +2101,7 @@ struct ToDateMonotonicity
|
||||
|| !type.isValueRepresentedByNumber())
|
||||
return {};
|
||||
else
|
||||
return {true, true, true};
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
}
|
||||
};
|
||||
|
||||
@ -2108,7 +2112,7 @@ struct ToDateTimeMonotonicity
|
||||
static IFunction::Monotonicity get(const IDataType & type, const Field &, const Field &)
|
||||
{
|
||||
if (type.isValueRepresentedByNumber())
|
||||
return {true, true, true};
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
else
|
||||
return {};
|
||||
}
|
||||
@ -2123,7 +2127,7 @@ struct ToStringMonotonicity
|
||||
|
||||
static IFunction::Monotonicity get(const IDataType & type, const Field & left, const Field & right)
|
||||
{
|
||||
IFunction::Monotonicity positive(true, true);
|
||||
IFunction::Monotonicity positive{ .is_monotonic = true };
|
||||
IFunction::Monotonicity not_monotonic;
|
||||
|
||||
const auto * type_ptr = &type;
|
||||
|
@ -614,7 +614,7 @@ public:
|
||||
|
||||
Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override
|
||||
{
|
||||
return { true, true, true };
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -250,12 +250,9 @@ public:
|
||||
/// The property of monotonicity for a certain range.
|
||||
struct Monotonicity
|
||||
{
|
||||
bool is_monotonic = false; /// Is the function monotonous (nondecreasing or nonincreasing).
|
||||
bool is_positive = true; /// true if the function is nondecreasing, false, if notincreasing. If is_monotonic = false, then it does not matter.
|
||||
bool is_monotonic = false; /// Is the function monotonous (non-decreasing or non-increasing).
|
||||
bool is_positive = true; /// true if the function is non-decreasing, false if non-increasing. If is_monotonic = false, then it does not matter.
|
||||
bool is_always_monotonic = false; /// Is true if function is monotonic on the whole input range I
|
||||
|
||||
Monotonicity(bool is_monotonic_ = false, bool is_positive_ = true, bool is_always_monotonic_ = false)
|
||||
: is_monotonic(is_monotonic_), is_positive(is_positive_), is_always_monotonic(is_always_monotonic_) {}
|
||||
};
|
||||
|
||||
/** Get information about monotonicity on a range of values. Call only if hasInformationAboutMonotonicity.
|
||||
|
@ -46,7 +46,7 @@ template <> struct FunctionUnaryArithmeticMonotonicity<NameAbs>
|
||||
if ((left_float < 0 && right_float > 0) || (left_float > 0 && right_float < 0))
|
||||
return {};
|
||||
|
||||
return { true, (left_float > 0) };
|
||||
return { .is_monotonic = true, .is_positive = left_float > 0 };
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -143,7 +143,7 @@ public:
|
||||
|
||||
Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override
|
||||
{
|
||||
return { true, true, true };
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -139,10 +139,7 @@ namespace DB
|
||||
|
||||
Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override
|
||||
{
|
||||
return Monotonicity(
|
||||
true, // is_monotonic
|
||||
true, // is_positive
|
||||
true); // is_always_monotonic
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -55,7 +55,7 @@ template <> struct FunctionUnaryArithmeticMonotonicity<NameIntExp10>
|
||||
if (left_float < 0 || right_float > 19)
|
||||
return {};
|
||||
|
||||
return { true };
|
||||
return { .is_monotonic = true };
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -58,7 +58,7 @@ template <> struct FunctionUnaryArithmeticMonotonicity<NameIntExp2>
|
||||
if (left_float < 0 || right_float > 63)
|
||||
return {};
|
||||
|
||||
return { true };
|
||||
return { .is_monotonic = true };
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -42,7 +42,7 @@ template <> struct FunctionUnaryArithmeticMonotonicity<NameNegate>
|
||||
static bool has() { return true; }
|
||||
static IFunction::Monotonicity get(const Field &, const Field &)
|
||||
{
|
||||
return { true, false };
|
||||
return { .is_monotonic = true, .is_positive = false };
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -37,7 +37,10 @@ template <>
|
||||
struct FunctionUnaryArithmeticMonotonicity<NameSign>
|
||||
{
|
||||
static bool has() { return true; }
|
||||
static IFunction::Monotonicity get(const Field &, const Field &) { return {true, true, false}; }
|
||||
static IFunction::Monotonicity get(const Field &, const Field &)
|
||||
{
|
||||
return { .is_monotonic = true };
|
||||
}
|
||||
};
|
||||
|
||||
void registerFunctionSign(FunctionFactory & factory)
|
||||
|
@ -157,10 +157,7 @@ namespace DB
|
||||
|
||||
Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override
|
||||
{
|
||||
return Monotonicity(
|
||||
true, // is_monotonic
|
||||
true, // is_positive
|
||||
true); // is_always_monotonic
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -311,7 +311,7 @@ public:
|
||||
|
||||
Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override
|
||||
{
|
||||
return { true, true, true };
|
||||
return { .is_monotonic = true, .is_always_monotonic = true };
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -66,7 +66,8 @@ public:
|
||||
|
||||
Monotonicity getMonotonicityForRange(const IDataType & /*type*/, const Field & /*left*/, const Field & /*right*/) const override
|
||||
{
|
||||
return {is_constant_timezone, is_constant_timezone, is_constant_timezone};
|
||||
const bool b = is_constant_timezone;
|
||||
return { .is_monotonic = b, .is_positive = b, .is_always_monotonic = b };
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/DumpASTNode.h>
|
||||
#include <Parsers/ASTAlterQuery.h>
|
||||
#include <Interpreters/DatabaseAndTableWithAlias.h>
|
||||
#include <Interpreters/IdentifierSemantic.h>
|
||||
|
||||
@ -36,7 +37,8 @@ public:
|
||||
{
|
||||
visitDDLChildren(ast);
|
||||
|
||||
if (!tryVisitDynamicCast<ASTQueryWithTableAndOutput>(ast) &&
|
||||
if (!tryVisitDynamicCast<ASTAlterQuery>(ast) &&
|
||||
!tryVisitDynamicCast<ASTQueryWithTableAndOutput>(ast) &&
|
||||
!tryVisitDynamicCast<ASTRenameQuery>(ast) &&
|
||||
!tryVisitDynamicCast<ASTFunction>(ast))
|
||||
{}
|
||||
@ -194,6 +196,24 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
void visitDDL(ASTAlterQuery & node, ASTPtr &) const
|
||||
{
|
||||
if (only_replace_current_database_function)
|
||||
return;
|
||||
|
||||
if (node.database.empty())
|
||||
node.database = database_name;
|
||||
|
||||
for (const auto & child : node.command_list->children)
|
||||
{
|
||||
auto * command_ast = child->as<ASTAlterCommand>();
|
||||
if (command_ast->from_database.empty())
|
||||
command_ast->from_database = database_name;
|
||||
if (command_ast->to_database.empty())
|
||||
command_ast->to_database = database_name;
|
||||
}
|
||||
}
|
||||
|
||||
void visitDDL(ASTFunction & function, ASTPtr & node) const
|
||||
{
|
||||
if (function.name == "currentDatabase")
|
||||
|
@ -239,7 +239,9 @@ public:
|
||||
const IDataType * type_ptr = &type;
|
||||
Field left_mut = left;
|
||||
Field right_mut = right;
|
||||
Monotonicity result(true, true, true);
|
||||
|
||||
Monotonicity result = { .is_monotonic = true, .is_positive = true, .is_always_monotonic = true };
|
||||
|
||||
/// monotonicity is only defined for unary functions, so the chain must describe a sequence of nested calls
|
||||
for (size_t i = 0; i < nested_functions.size(); ++i)
|
||||
{
|
||||
|
@ -28,7 +28,9 @@ public:
|
||||
const TablesWithColumns & tables;
|
||||
ContextPtr context;
|
||||
const std::unordered_set<String> & group_by_function_hashes;
|
||||
Monotonicity monotonicity{true, true, true};
|
||||
|
||||
Monotonicity monotonicity = { .is_monotonic = true, .is_positive = true, .is_always_monotonic = true };
|
||||
|
||||
ASTIdentifier * identifier = nullptr;
|
||||
DataTypePtr arg_data_type = {};
|
||||
|
||||
|
@ -87,9 +87,14 @@ void WriteBufferFromHTTPServerResponse::finishSendHeaders()
|
||||
|
||||
void WriteBufferFromHTTPServerResponse::nextImpl()
|
||||
{
|
||||
if (!initialized)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
/// Initialize as early as possible since if the code throws,
|
||||
/// next() should not be called anymore.
|
||||
initialized = true;
|
||||
|
||||
startSendHeaders();
|
||||
|
||||
if (!out && !is_http_method_head)
|
||||
@ -174,6 +179,8 @@ void WriteBufferFromHTTPServerResponse::finalize()
|
||||
if (out)
|
||||
out->finalize();
|
||||
out.reset();
|
||||
/// Catch write-after-finalize bugs.
|
||||
set(nullptr, 0);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -46,6 +46,7 @@ private:
|
||||
std::shared_ptr<std::ostream> response_header_ostr;
|
||||
|
||||
std::unique_ptr<WriteBuffer> out;
|
||||
bool initialized = false;
|
||||
|
||||
bool headers_started_sending = false;
|
||||
bool headers_finished_sending = false; /// If true, you could not add any headers.
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <IO/ReadBufferFromIStream.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/WriteBufferFromTemporaryFile.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -757,76 +756,84 @@ void HTTPHandler::processQuery(
|
||||
}
|
||||
|
||||
/// Send HTTP headers with code 200 if no exception happened and the data is still not sent to the client.
|
||||
used_output.out->finalize();
|
||||
used_output.finalize();
|
||||
}
|
||||
|
||||
void HTTPHandler::trySendExceptionToClient(
|
||||
const std::string & s, int exception_code, HTTPServerRequest & request, HTTPServerResponse & response, Output & used_output)
|
||||
try
|
||||
{
|
||||
response.set("X-ClickHouse-Exception-Code", toString<int>(exception_code));
|
||||
|
||||
/// FIXME: make sure that no one else is reading from the same stream at the moment.
|
||||
|
||||
/// If HTTP method is POST and Keep-Alive is turned on, we should read the whole request body
|
||||
/// to avoid reading part of the current request body in the next request.
|
||||
if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST
|
||||
&& response.getKeepAlive()
|
||||
&& exception_code != ErrorCodes::HTTP_LENGTH_REQUIRED
|
||||
&& !request.getStream().eof())
|
||||
{
|
||||
request.getStream().ignoreAll();
|
||||
}
|
||||
|
||||
if (exception_code == ErrorCodes::REQUIRED_PASSWORD)
|
||||
{
|
||||
response.requireAuthentication("ClickHouse server HTTP API");
|
||||
}
|
||||
else
|
||||
{
|
||||
response.setStatusAndReason(exceptionCodeToHTTPStatus(exception_code));
|
||||
}
|
||||
|
||||
if (!response.sent() && !used_output.out_maybe_compressed)
|
||||
{
|
||||
/// If nothing was sent yet and we don't even know if we must compress the response.
|
||||
*response.send() << s << std::endl;
|
||||
}
|
||||
else if (used_output.out_maybe_compressed)
|
||||
{
|
||||
/// Destroy CascadeBuffer to actualize buffers' positions and reset extra references
|
||||
if (used_output.hasDelayed())
|
||||
used_output.out_maybe_delayed_and_compressed.reset();
|
||||
|
||||
/// Send the error message into already used (and possibly compressed) stream.
|
||||
/// Note that the error message will possibly be sent after some data.
|
||||
/// Also HTTP code 200 could have already been sent.
|
||||
|
||||
/// If buffer has data, and that data wasn't sent yet, then no need to send that data
|
||||
bool data_sent = used_output.out->count() != used_output.out->offset();
|
||||
|
||||
if (!data_sent)
|
||||
{
|
||||
used_output.out_maybe_compressed->position() = used_output.out_maybe_compressed->buffer().begin();
|
||||
used_output.out->position() = used_output.out->buffer().begin();
|
||||
}
|
||||
|
||||
writeString(s, *used_output.out_maybe_compressed);
|
||||
writeChar('\n', *used_output.out_maybe_compressed);
|
||||
|
||||
used_output.out_maybe_compressed->next();
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(false);
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
used_output.finalize();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Cannot send exception to client");
|
||||
|
||||
try
|
||||
{
|
||||
response.set("X-ClickHouse-Exception-Code", toString<int>(exception_code));
|
||||
|
||||
/// FIXME: make sure that no one else is reading from the same stream at the moment.
|
||||
|
||||
/// If HTTP method is POST and Keep-Alive is turned on, we should read the whole request body
|
||||
/// to avoid reading part of the current request body in the next request.
|
||||
if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST
|
||||
&& response.getKeepAlive()
|
||||
&& exception_code != ErrorCodes::HTTP_LENGTH_REQUIRED
|
||||
&& !request.getStream().eof())
|
||||
{
|
||||
request.getStream().ignoreAll();
|
||||
}
|
||||
|
||||
if (exception_code == ErrorCodes::REQUIRED_PASSWORD)
|
||||
{
|
||||
response.requireAuthentication("ClickHouse server HTTP API");
|
||||
}
|
||||
else
|
||||
{
|
||||
response.setStatusAndReason(exceptionCodeToHTTPStatus(exception_code));
|
||||
}
|
||||
|
||||
if (!response.sent() && !used_output.out_maybe_compressed)
|
||||
{
|
||||
/// If nothing was sent yet and we don't even know if we must compress the response.
|
||||
*response.send() << s << std::endl;
|
||||
}
|
||||
else if (used_output.out_maybe_compressed)
|
||||
{
|
||||
/// Destroy CascadeBuffer to actualize buffers' positions and reset extra references
|
||||
if (used_output.hasDelayed())
|
||||
used_output.out_maybe_delayed_and_compressed.reset();
|
||||
|
||||
/// Send the error message into already used (and possibly compressed) stream.
|
||||
/// Note that the error message will possibly be sent after some data.
|
||||
/// Also HTTP code 200 could have already been sent.
|
||||
|
||||
/// If buffer has data, and that data wasn't sent yet, then no need to send that data
|
||||
bool data_sent = used_output.out->count() != used_output.out->offset();
|
||||
|
||||
if (!data_sent)
|
||||
{
|
||||
used_output.out_maybe_compressed->position() = used_output.out_maybe_compressed->buffer().begin();
|
||||
used_output.out->position() = used_output.out->buffer().begin();
|
||||
}
|
||||
|
||||
writeString(s, *used_output.out_maybe_compressed);
|
||||
writeChar('\n', *used_output.out_maybe_compressed);
|
||||
|
||||
used_output.out_maybe_compressed->next();
|
||||
used_output.out->finalize();
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(false);
|
||||
__builtin_unreachable();
|
||||
}
|
||||
used_output.finalize();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Cannot send exception to client");
|
||||
tryLogCurrentException(log, "Cannot flush data to client (after sending exception)");
|
||||
}
|
||||
}
|
||||
|
||||
@ -885,8 +892,7 @@ void HTTPHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
|
||||
trySendExceptionToClient(exception_message, exception_code, request, response, used_output);
|
||||
}
|
||||
|
||||
if (used_output.out)
|
||||
used_output.out->finalize();
|
||||
used_output.finalize();
|
||||
}
|
||||
|
||||
DynamicQueryHandler::DynamicQueryHandler(IServer & server_, const std::string & param_name_)
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Core/Names.h>
|
||||
#include <Server/HTTP/HTMLForm.h>
|
||||
#include <Server/HTTP/HTTPRequestHandler.h>
|
||||
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
|
||||
@ -63,6 +64,16 @@ private:
|
||||
{
|
||||
return out_maybe_delayed_and_compressed != out_maybe_compressed;
|
||||
}
|
||||
|
||||
inline void finalize() const
|
||||
{
|
||||
if (out_maybe_delayed_and_compressed)
|
||||
out_maybe_delayed_and_compressed->finalize();
|
||||
if (out_maybe_compressed)
|
||||
out_maybe_compressed->finalize();
|
||||
if (out)
|
||||
out->finalize();
|
||||
}
|
||||
};
|
||||
|
||||
IServer & server;
|
||||
|
@ -249,3 +249,22 @@ TEST(TransformQueryForExternalDatabase, Strict)
|
||||
/// !isCompatible() takes place
|
||||
EXPECT_THROW(check(state, 1, "SELECT column FROM test.table WHERE left(column, 10) = RIGHT(column, 10) AND SUBSTRING(column FROM 1 FOR 2) = 'Hello'", ""), Exception);
|
||||
}
|
||||
|
||||
TEST(TransformQueryForExternalDatabase, Null)
|
||||
{
|
||||
const State & state = State::instance();
|
||||
|
||||
check(state, 1,
|
||||
"SELECT field FROM table WHERE field IS NULL",
|
||||
R"(SELECT "field" FROM "test"."table" WHERE "field" IS NULL)");
|
||||
check(state, 1,
|
||||
"SELECT field FROM table WHERE field IS NOT NULL",
|
||||
R"(SELECT "field" FROM "test"."table" WHERE "field" IS NOT NULL)");
|
||||
|
||||
check(state, 1,
|
||||
"SELECT field FROM table WHERE isNull(field)",
|
||||
R"(SELECT "field" FROM "test"."table" WHERE "field" IS NULL)");
|
||||
check(state, 1,
|
||||
"SELECT field FROM table WHERE isNotNull(field)",
|
||||
R"(SELECT "field" FROM "test"."table" WHERE "field" IS NOT NULL)");
|
||||
}
|
||||
|
@ -132,6 +132,8 @@ bool isCompatible(IAST & node)
|
||||
|| name == "notLike"
|
||||
|| name == "in"
|
||||
|| name == "notIn"
|
||||
|| name == "isNull"
|
||||
|| name == "isNotNull"
|
||||
|| name == "tuple"))
|
||||
return false;
|
||||
|
||||
|
@ -449,6 +449,38 @@ def test_mysql_in(started_cluster):
|
||||
drop_mysql_table(conn, table_name)
|
||||
conn.close()
|
||||
|
||||
def test_mysql_null(started_cluster):
|
||||
table_name = 'test_mysql_in'
|
||||
node1.query(f'DROP TABLE IF EXISTS {table_name}')
|
||||
|
||||
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
|
||||
drop_mysql_table(conn, table_name)
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute("""
|
||||
CREATE TABLE `clickhouse`.`{}` (
|
||||
`id` int(11) NOT NULL,
|
||||
`money` int NULL default NULL,
|
||||
PRIMARY KEY (`id`)) ENGINE=InnoDB;
|
||||
""".format(table_name))
|
||||
|
||||
node1.query('''
|
||||
CREATE TABLE {}
|
||||
(
|
||||
id UInt32,
|
||||
money Nullable(UInt32)
|
||||
)
|
||||
ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')
|
||||
'''.format(table_name, table_name)
|
||||
)
|
||||
|
||||
node1.query("INSERT INTO {} (id, money) SELECT number, if(number%2, NULL, 1) from numbers(10) ".format(table_name))
|
||||
|
||||
assert int(node1.query("SELECT count() FROM {} WHERE money IS NULL SETTINGS external_table_strict_query=1".format(table_name))) == 5
|
||||
assert int(node1.query("SELECT count() FROM {} WHERE money IS NOT NULL SETTINGS external_table_strict_query=1".format(table_name))) == 5
|
||||
|
||||
drop_mysql_table(conn, table_name)
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with contextmanager(started_cluster)() as cluster:
|
||||
|
@ -8,6 +8,6 @@ $CLICKHOUSE_CLIENT -q "drop table if exists tab;"
|
||||
$CLICKHOUSE_CLIENT -q "create table tab(x LowCardinality(String)) engine = MergeTree order by tuple();"
|
||||
|
||||
# We should have correct env vars from shell_config.sh to run this test
|
||||
python3 "$CURDIR"/2010_lc_native.python
|
||||
python3 "$CURDIR"/02010_lc_native.python
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "drop table if exists tab;"
|
@ -118,27 +118,27 @@ re-attach table
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = TinyLog\nCOMMENT \'another comment on a table\'
|
||||
comment= another comment on a table
|
||||
|
||||
engine : ReplicatedMergeTree('/clickhouse/2020_alter_table_modify_comment_default', '1') ORDER BY k
|
||||
engine : ReplicatedMergeTree('/clickhouse/02020_alter_table_modify_comment_default', '1') ORDER BY k
|
||||
initial comment
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/02020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
comment= Test table with comment
|
||||
|
||||
change a comment
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/02020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
comment= Test table with comment
|
||||
|
||||
remove a comment
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/02020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
comment= Test table with comment
|
||||
|
||||
add a comment back
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/02020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
comment= Test table with comment
|
||||
|
||||
detach table
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/02020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
|
||||
re-attach table
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/02020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||
comment= Test table with comment
|
||||
|
@ -0,0 +1,7 @@
|
||||
localhost 9000 0 0 0
|
||||
localhost 9000 0 0 0
|
||||
localhost 9000 0 0 0
|
||||
localhost 9000 0 0 0
|
||||
localhost 9000 0 0 0
|
||||
localhost 9000 0 0 0
|
||||
localhost 9000 0 0 0
|
@ -0,0 +1,19 @@
|
||||
-- Tags: distributed, no-parallel, no-replicated-database
|
||||
-- Tag no-replicated-database: ON CLUSTER is not allowed
|
||||
|
||||
DROP DATABASE IF EXISTS 02028_db ON CLUSTER test_shard_localhost;
|
||||
CREATE DATABASE 02028_db ON CLUSTER test_shard_localhost;
|
||||
USE 02028_db;
|
||||
|
||||
CREATE TABLE t1_local ON CLUSTER test_shard_localhost(partition_col_1 String, tc1 int,tc2 int)ENGINE=MergeTree() PARTITION BY partition_col_1 ORDER BY tc1;
|
||||
CREATE TABLE t2_local ON CLUSTER test_shard_localhost(partition_col_1 String, tc1 int,tc2 int)ENGINE=MergeTree() PARTITION BY partition_col_1 ORDER BY tc1;
|
||||
|
||||
INSERT INTO t1_local VALUES('partition1', 1,1);
|
||||
INSERT INTO t1_local VALUES('partition2', 1,1);
|
||||
INSERT INTO t2_local VALUES('partition1', 3,3);
|
||||
INSERT INTO t2_local VALUES('partition2', 6,6);
|
||||
|
||||
ALTER TABLE t1_local ON CLUSTER test_shard_localhost REPLACE PARTITION 'partition1' FROM t2_local;
|
||||
ALTER TABLE t1_local ON CLUSTER test_shard_localhost MOVE PARTITION 'partition2' TO TABLE t2_local;
|
||||
|
||||
DROP DATABASE 02028_db ON CLUSTER test_shard_localhost;
|
Loading…
Reference in New Issue
Block a user