Merge remote-tracking branch 'origin/master' into pr-local-plan

This commit is contained in:
Igor Nikonov 2024-09-09 09:58:36 +00:00
commit eda6dabe41
23 changed files with 647 additions and 188 deletions

31
CITATION.cff Normal file
View File

@ -0,0 +1,31 @@
# This CITATION.cff file was generated with cffinit.
cff-version: 1.2.0
title: "ClickHouse"
message: "If you use this software, please cite it as below."
type: software
authors:
- family-names: "Milovidov"
given-names: "Alexey"
repository-code: 'https://github.com/ClickHouse/ClickHouse'
url: 'https://clickhouse.com'
license: Apache-2.0
preferred-citation:
type: article
authors:
- family-names: "Schulze"
given-names: "Robert"
- family-names: "Schreiber"
given-names: "Tom"
- family-names: "Yatsishin"
given-names: "Ilya"
- family-names: "Dahimene"
given-names: "Ryadh"
- family-names: "Milovidov"
given-names: "Alexey"
journal: "Proceedings of the VLDB Endowment"
title: "ClickHouse - Lightning Fast Analytics for Everyone"
year: 2024
volume: 17
issue: 12
doi: 10.14778/3685800.3685802

View File

@ -42,21 +42,19 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov:
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
Other upcoming meetups
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
* [Bangalore Meetup](https://www.meetup.com/clickhouse-bangalore-user-group/events/303208274/) - September 18
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
@ -64,7 +62,13 @@ Other upcoming meetups
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
Recently completed events
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e

2
contrib/libarchive vendored

@ -1 +1 @@
Subproject commit ee45796171324519f0c0bfd012018dd099296336
Subproject commit 0c21691b177fac5f4cceca2a1ff2ddfa5d60f51c

2
contrib/libuv vendored

@ -1 +1 @@
Subproject commit 4482964660c77eec1166cd7d14fb915e3dbd774a
Subproject commit 714b58b9849568211ade86b44dd91d37f8a2175e

View File

@ -10,6 +10,7 @@ set(uv_sources
src/random.c
src/strscpy.c
src/strtok.c
src/thread-common.c
src/threadpool.c
src/timer.c
src/uv-common.c
@ -70,10 +71,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
list(APPEND uv_libraries rt)
list(APPEND uv_sources
src/unix/epoll.c
src/unix/linux-core.c
src/unix/linux-inotify.c
src/unix/linux-syscalls.c
src/unix/linux.c
src/unix/procfs-exepath.c
src/unix/random-getrandom.c
src/unix/random-sysctl-linux.c)

2
contrib/openssl vendored

@ -1 +1 @@
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
Subproject commit b3e62c440f390e12e77c80675f883af82ad3d5ed

View File

@ -1617,8 +1617,19 @@ The calculation is performed relative to specific points in time:
If unit `WEEK` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday.
**See Also**
**Syntax**
```sql
toStartOfInterval(value, INTERVAL x unit[, time_zone])
toStartOfInterval(value, INTERVAL x unit[, origin[, time_zone]])
```
The second overload emulates TimescaleDB's `time_bucket()` function, respectively PostgreSQL's `date_bin()` function, e.g.
``` SQL
SELECT toStartOfInterval(toDateTime('2023-01-01 14:45:00'), INTERVAL 1 MINUTE, toDateTime('2023-01-01 14:35:30'));
```
**See Also**
- [date_trunc](#date_trunc)
## toTime
@ -3884,19 +3895,29 @@ Result:
└───────────────────────────────────────────────────────────────────────┘
```
## timeSlots(StartTime, Duration,\[, Size\])
## timeSlots
For a time interval starting at StartTime and continuing for Duration seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the Size in seconds. Size is an optional parameter set to 1800 (30 minutes) by default.
This is necessary, for example, when searching for pageviews in the corresponding session.
Accepts DateTime and DateTime64 as StartTime argument. For DateTime, Duration and Size arguments must be `UInt32`. For DateTime64 they must be `Decimal64`.
Returns an array of DateTime/DateTime64 (return type matches the type of StartTime). For DateTime64, the return value's scale can differ from the scale of StartTime --- the highest scale among all given arguments is taken.
Example:
**Syntax**
```sql
timeSlots(StartTime, Duration,\[, Size\])
```
**Example**
```sql
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
```
Result:
``` text
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │

View File

@ -20,10 +20,10 @@ overlay(s, replace, offset[, length])
**Parameters**
- `input`: A string type [String](../data-types/string.md).
- `s`: A string type [String](../data-types/string.md).
- `replace`: A string type [String](../data-types/string.md).
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of bytes removed from `input` equals the length of `replace`; otherwise `length` bytes are removed.
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the string `s`.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of bytes removed from `s` equals the length of `replace`; otherwise `length` bytes are removed.
**Returned value**
@ -32,22 +32,35 @@ overlay(s, replace, offset[, length])
**Example**
```sql
SELECT overlay('ClickHouse SQL', 'CORE', 12) AS res;
SELECT overlay('My father is from Mexico.', 'mother', 4) AS res;
```
Result:
```text
┌─res─────────────┐
│ ClickHouse CORE │
└─────────────────┘
┌─res──────────────────────┐
│ My mother is from Mexico.│
└──────────────────────────┘
```
```sql
SELECT overlay('My father is from Mexico.', 'dad', 4, 6) AS res;
```
Result:
```text
┌─res───────────────────┐
│ My dad is from Mexico.│
└───────────────────────┘
```
## overlayUTF8
Replace part of the string `input` with another string `replace`, starting at the 1-based index `offset`.
Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
Assumes that the string contains valid UTF-8 encoded text.
If this assumption is violated, no exception is thrown and the result is undefined.
**Syntax**
@ -59,8 +72,8 @@ overlayUTF8(s, replace, offset[, length])
- `s`: A string type [String](../data-types/string.md).
- `replace`: A string type [String](../data-types/string.md).
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of characters removed from `input` equals the length of `replace`; otherwise `length` characters are removed.
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the input string `s`.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of characters removed from `s` equals the length of `replace`; otherwise `length` characters are removed.
**Returned value**
@ -69,15 +82,15 @@ overlayUTF8(s, replace, offset[, length])
**Example**
```sql
SELECT overlayUTF8('ClickHouse是一款OLAP数据库', '开源', 12, 2) AS res;
SELECT overlay('Mein Vater ist aus Österreich.', 'der Türkei', 20) AS res;
```
Result:
```text
┌─res────────────────────────┐
ClickHouse是开源OLAP数据库
└────────────────────────────┘
┌─res───────────────────────────
Mein Vater ist aus der Türkei.
└───────────────────────────────
```
## replaceOne

View File

@ -492,7 +492,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Nanosecond>
{
throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME);
}
static Int64 execute(Int64 t, Int64 nanoseconds, const DateLUTImpl &, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 nanoseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
if (scale_multiplier < 1000000000)
{
@ -527,7 +527,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Microsecond>
{
throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME);
}
static Int64 execute(Int64 t, Int64 microseconds, const DateLUTImpl &, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 microseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
if (scale_multiplier < 1000000)
{
@ -570,7 +570,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Millisecond>
{
throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME);
}
static Int64 execute(Int64 t, Int64 milliseconds, const DateLUTImpl &, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 milliseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
if (scale_multiplier < 1000)
{
@ -613,7 +613,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Second>
{
return time_zone.toStartOfSecondInterval(t, seconds);
}
static Int64 execute(Int64 t, Int64 seconds, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 seconds, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfSecondInterval(t / scale_multiplier, seconds);
}
@ -634,7 +634,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Minute>
{
return time_zone.toStartOfMinuteInterval(t, minutes);
}
static Int64 execute(Int64 t, Int64 minutes, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 minutes, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfMinuteInterval(t / scale_multiplier, minutes);
}
@ -655,7 +655,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Hour>
{
return time_zone.toStartOfHourInterval(t, hours);
}
static Int64 execute(Int64 t, Int64 hours, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 hours, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfHourInterval(t / scale_multiplier, hours);
}
@ -676,7 +676,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Day>
{
return static_cast<UInt32>(time_zone.toStartOfDayInterval(time_zone.toDayNum(t), days));
}
static Int64 execute(Int64 t, Int64 days, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 days, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfDayInterval(time_zone.toDayNum(t / scale_multiplier), days);
}
@ -697,9 +697,13 @@ struct ToStartOfInterval<IntervalKind::Kind::Week>
{
return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t), weeks);
}
static UInt16 execute(Int64 t, Int64 weeks, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 weeks, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
if (origin == 0)
return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t / scale_multiplier), weeks);
else
return ToStartOfInterval<IntervalKind::Kind::Day>::execute(t, weeks * 7, time_zone, scale_multiplier, origin);
}
};
@ -718,9 +722,23 @@ struct ToStartOfInterval<IntervalKind::Kind::Month>
{
return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t), months);
}
static UInt16 execute(Int64 t, Int64 months, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 months, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t / scale_multiplier), months);
const Int64 scaled_time = t / scale_multiplier;
if (origin == 0)
return time_zone.toStartOfMonthInterval(time_zone.toDayNum(scaled_time), months);
else
{
const Int64 scaled_origin = origin / scale_multiplier;
const Int64 days = time_zone.toDayOfMonth(scaled_time + scaled_origin) - time_zone.toDayOfMonth(scaled_origin);
Int64 months_to_add = time_zone.toMonth(scaled_time + scaled_origin) - time_zone.toMonth(scaled_origin);
const Int64 years = time_zone.toYear(scaled_time + scaled_origin) - time_zone.toYear(scaled_origin);
months_to_add = days < 0 ? months_to_add - 1 : months_to_add;
months_to_add += years * 12;
Int64 month_multiplier = (months_to_add / months) * months;
return (time_zone.addMonths(time_zone.toDate(scaled_origin), month_multiplier) - time_zone.toDate(scaled_origin));
}
}
};
@ -739,9 +757,12 @@ struct ToStartOfInterval<IntervalKind::Kind::Quarter>
{
return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t), quarters);
}
static UInt16 execute(Int64 t, Int64 quarters, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 quarters, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
if (origin == 0)
return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t / scale_multiplier), quarters);
else
return ToStartOfInterval<IntervalKind::Kind::Month>::execute(t, quarters * 3, time_zone, scale_multiplier, origin);
}
};
@ -760,9 +781,12 @@ struct ToStartOfInterval<IntervalKind::Kind::Year>
{
return time_zone.toStartOfYearInterval(time_zone.toDayNum(t), years);
}
static UInt16 execute(Int64 t, Int64 years, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 years, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
if (origin == 0)
return time_zone.toStartOfYearInterval(time_zone.toDayNum(t / scale_multiplier), years);
else
return ToStartOfInterval<IntervalKind::Kind::Month>::execute(t, years * 12, time_zone, scale_multiplier, origin);
}
};

View File

@ -6,6 +6,7 @@
# include <Columns/ColumnString.h>
# include <Functions/LowerUpperImpl.h>
# include <base/scope_guard.h>
# include <unicode/ucasemap.h>
# include <unicode/unistr.h>
# include <unicode/urename.h>
@ -49,6 +50,11 @@ struct LowerUpperUTF8Impl
if (U_FAILURE(error_code))
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Error calling ucasemap_open: {}", u_errorName(error_code));
SCOPE_EXIT(
{
ucasemap_close(case_map);
});
size_t curr_offset = 0;
for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
{

View File

@ -1,12 +1,12 @@
#include <Columns/ColumnConst.h>
#include <Columns/ColumnString.h>
#include <Common/StringUtils.h>
#include <Common/UTF8Helpers.h>
#include <DataTypes/DataTypeString.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/GatherUtils/Sources.h>
#include <Functions/IFunction.h>
#include <Common/StringUtils.h>
#include <Common/UTF8Helpers.h>
namespace DB
{
@ -16,8 +16,8 @@ namespace
/// If 'is_utf8' - measure offset and length in code points instead of bytes.
/// Syntax:
/// - overlay(input, replace, offset[, length])
/// - overlayUTF8(input, replace, offset[, length]) - measure offset and length in code points instead of bytes
/// - overlay(s, replace, offset[, length])
/// - overlayUTF8(s, replace, offset[, length]) - measure offset and length in code points instead of bytes
template <bool is_utf8>
class FunctionOverlay : public IFunction
{
@ -34,7 +34,7 @@ public:
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
FunctionArgumentDescriptors mandatory_args{
{"input", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
{"s", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
{"replace", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
{"offset", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeInteger), nullptr, "(U)Int8/16/32/64"},
};
@ -100,7 +100,6 @@ public:
res_data.reserve(col_input_string->getChars().size());
}
#define OVERLAY_EXECUTE_CASE(HAS_FOUR_ARGS, OFFSET_IS_CONST, LENGTH_IS_CONST) \
if (input_is_const && replace_is_const) \
constantConstant<HAS_FOUR_ARGS, OFFSET_IS_CONST, LENGTH_IS_CONST>( \
@ -186,7 +185,6 @@ public:
return res_col;
}
private:
/// input offset is 1-based, maybe negative
/// output result is 0-based valid offset, within [0, input_size]
@ -229,6 +227,7 @@ private:
ColumnString::Chars & res_data,
ColumnString::Offsets & res_offsets) const
{
/// Free us from handling negative length in the code below
if (has_four_args && length_is_const && const_length < 0)
{
constantConstant<true, offset_is_const, false>(
@ -343,6 +342,7 @@ private:
ColumnString::Chars & res_data,
ColumnString::Offsets & res_offsets) const
{
/// Free us from handling negative length in the code below
if (has_four_args && length_is_const && const_length < 0)
{
vectorConstant<true, offset_is_const, false>(
@ -461,6 +461,7 @@ private:
ColumnString::Chars & res_data,
ColumnString::Offsets & res_offsets) const
{
/// Free us from handling negative length in the code below
if (has_four_args && length_is_const && const_length < 0)
{
constantVector<true, offset_is_const, false>(
@ -577,6 +578,7 @@ private:
ColumnString::Chars & res_data,
ColumnString::Offsets & res_offsets) const
{
/// Free us from handling negative length in the code below
if (has_four_args && length_is_const && const_length < 0)
{
vectorVector<true, offset_is_const, false>(

View File

@ -10,21 +10,31 @@
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <IO/WriteHelpers.h>
#include <algorithm>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
class FunctionToStartOfInterval : public IFunction
{
private:
enum class Overload
{
Default, /// toStartOfInterval(time, interval) or toStartOfInterval(time, interval, timezone)
Origin /// toStartOfInterval(time, interval, origin) or toStartOfInterval(time, interval, origin, timezone)
};
mutable Overload overload;
public:
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionToStartOfInterval>(); }
@ -34,7 +44,7 @@ public:
size_t getNumberOfArguments() const override { return 0; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
bool useDefaultImplementationForConstants() const override { return true; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2, 3}; }
bool hasInformationAboutMonotonicity() const override { return true; }
Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override { return { .is_monotonic = true, .is_always_monotonic = true }; }
@ -72,6 +82,9 @@ public:
"Illegal type {} of 2nd argument of function {}, expected a time interval",
type_arg2->getName(), getName());
overload = Overload::Default;
/// Determine result type for default overload (no origin)
switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case)
{
case IntervalKind::Kind::Nanosecond:
@ -97,13 +110,49 @@ public:
auto check_third_argument = [&]
{
const DataTypePtr & type_arg3 = arguments[2].type;
if (!isString(type_arg3))
if (isString(type_arg3))
{
if (value_is_date && result_type == ResultType::Date)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of 3rd argument of function {}, expected a constant timezone string",
"A timezone argument of function {} with interval type {} is allowed only when the 1st argument has the type DateTime or DateTime64",
getName(), interval_type->getKind().toString());
}
else if (isDateOrDate32OrDateTimeOrDateTime64(type_arg3))
{
overload = Overload::Origin;
const DataTypePtr & type_arg1 = arguments[0].type;
if (isDate(type_arg1) && isDate(type_arg3))
result_type = ResultType::Date;
else if (isDate32(type_arg1) && isDate32(type_arg3))
result_type = ResultType::Date32;
else if (isDateTime(type_arg1) && isDateTime(type_arg3))
result_type = ResultType::DateTime;
else if (isDateTime64(type_arg1) && isDateTime64(type_arg3))
result_type = ResultType::DateTime64;
else
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Datetime argument and origin argument for function {} must have the same type", getName());
}
else
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 3rd argument of function {}. "
"This argument is optional and must be a constant String with timezone name or a Date/Date32/DateTime/DateTime64 with a constant origin",
type_arg3->getName(), getName());
if (value_is_date && result_type == ResultType::Date) /// weird why this is && instead of || but too afraid to change it
};
auto check_fourth_argument = [&]
{
if (overload != Overload::Origin) /// sanity check
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 3rd argument of function {}. "
"The third argument must a Date/Date32/DateTime/DateTime64 with a constant origin",
arguments[2].type->getName(), getName());
const DataTypePtr & type_arg4 = arguments[3].type;
if (!isString(type_arg4))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 4th argument of function {}. "
"This argument is optional and must be a constant String with timezone name",
type_arg4->getName(), getName());
if (value_is_date && result_type == ResultType::Date)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"The timezone argument of function {} with interval type {} is allowed only when the 1st argument has type DateTime or DateTimt64",
"A timezone argument of function {} with interval type {} is allowed only when the 1st argument has the type DateTime or DateTime64",
getName(), interval_type->getKind().toString());
};
@ -118,10 +167,17 @@ public:
check_second_argument();
check_third_argument();
}
else if (arguments.size() == 4)
{
check_first_argument();
check_second_argument();
check_third_argument();
check_fourth_argument();
}
else
{
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Number of arguments for function {} doesn't match: passed {}, should be 2 or 3",
"Number of arguments for function {} doesn't match: passed {}, must be 2, 3 or 4",
getName(), arguments.size());
}
@ -132,10 +188,19 @@ public:
case ResultType::Date32:
return std::make_shared<DataTypeDate32>();
case ResultType::DateTime:
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false));
{
const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3;
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, time_zone_arg_num, 0, false));
}
case ResultType::DateTime64:
{
UInt32 scale = 0;
if (isDateTime64(arguments[0].type) && overload == Overload::Origin)
{
scale = assert_cast<const DataTypeDateTime64 &>(*arguments[0].type.get()).getScale();
if (assert_cast<const DataTypeDateTime64 &>(*arguments[2].type.get()).getScale() != scale)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Datetime argument and origin argument for function {} must have the same scale", getName());
}
if (interval_type->getKind() == IntervalKind::Kind::Nanosecond)
scale = 9;
else if (interval_type->getKind() == IntervalKind::Kind::Microsecond)
@ -143,69 +208,103 @@ public:
else if (interval_type->getKind() == IntervalKind::Kind::Millisecond)
scale = 3;
return std::make_shared<DataTypeDateTime64>(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false));
const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3;
return std::make_shared<DataTypeDateTime64>(scale, extractTimeZoneNameFromFunctionArguments(arguments, time_zone_arg_num, 0, false));
}
}
std::unreachable();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /* input_rows_count */) const override
{
const auto & time_column = arguments[0];
const auto & interval_column = arguments[1];
const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, 2, 0);
auto result_column = dispatchForTimeColumn(time_column, interval_column, result_type, time_zone, input_rows_count);
ColumnWithTypeAndName origin_column;
if (overload == Overload::Origin)
origin_column = arguments[2];
const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3;
const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, time_zone_arg_num, 0);
ColumnPtr result_column;
if (isDate(result_type))
result_column = dispatchForTimeColumn<DataTypeDate>(time_column, interval_column, origin_column, result_type, time_zone);
else if (isDate32(result_type))
result_column = dispatchForTimeColumn<DataTypeDate32>(time_column, interval_column, origin_column, result_type, time_zone);
else if (isDateTime(result_type))
result_column = dispatchForTimeColumn<DataTypeDateTime>(time_column, interval_column, origin_column, result_type, time_zone);
else if (isDateTime64(result_type))
result_column = dispatchForTimeColumn<DataTypeDateTime64>(time_column, interval_column, origin_column, result_type, time_zone);
return result_column;
}
private:
template <typename ReturnType>
ColumnPtr dispatchForTimeColumn(
const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column,
const DataTypePtr & result_type, const DateLUTImpl & time_zone,
size_t input_rows_count) const
const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column, const ColumnWithTypeAndName & origin_column, const DataTypePtr & result_type, const DateLUTImpl & time_zone) const
{
const auto & time_column_type = *time_column.type.get();
const auto & time_column_col = *time_column.column.get();
if (isDateTime64(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime64>(&time_column_col);
auto scale = assert_cast<const DataTypeDateTime64 &>(time_column_type).getScale();
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDateTime64 &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count, scale);
}
else if (isDateTime(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDateTime &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
}
else if (isDate(time_column_type))
if (isDate(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDate>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDate &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
return dispatchForIntervalColumn<ReturnType, DataTypeDate, ColumnDate>(assert_cast<const DataTypeDate &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone);
}
else if (isDate32(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDate32>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDate32 &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
return dispatchForIntervalColumn<ReturnType, DataTypeDate32, ColumnDate32>(assert_cast<const DataTypeDate32 &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone);
}
else if (isDateTime(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn<ReturnType, DataTypeDateTime, ColumnDateTime>(assert_cast<const DataTypeDateTime &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone);
}
else if (isDateTime64(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime64>(&time_column_col);
auto scale = assert_cast<const DataTypeDateTime64 &>(time_column_type).getScale();
if (time_column_vec)
return dispatchForIntervalColumn<ReturnType, DataTypeDateTime64, ColumnDateTime64>(assert_cast<const DataTypeDateTime64 &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone, scale);
}
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, Date32, DateTime or DateTime64", getName());
}
template <typename TimeDataType, typename TimeColumnType>
template <typename ReturnType, typename TimeDataType, typename TimeColumnType>
ColumnPtr dispatchForIntervalColumn(
const TimeDataType & time_data_type, const TimeColumnType & time_column, const ColumnWithTypeAndName & interval_column,
const DataTypePtr & result_type, const DateLUTImpl & time_zone, size_t input_rows_count, UInt16 scale = 1) const
const TimeDataType & time_data_type, const TimeColumnType & time_column, const ColumnWithTypeAndName & interval_column, const ColumnWithTypeAndName & origin_column,
const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale = 1) const
{
const auto * interval_type = checkAndGetDataType<DataTypeInterval>(interval_column.type.get());
if (!interval_type)
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for 2nd argument of function {}, must be a time interval", getName());
switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case)
{
case IntervalKind::Kind::Nanosecond:
case IntervalKind::Kind::Microsecond:
case IntervalKind::Kind::Millisecond:
if (isDateOrDate32(time_data_type) || isDateTime(time_data_type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal interval kind for argument data type {}", isDate(time_data_type) ? "Date" : "DateTime");
break;
case IntervalKind::Kind::Second:
case IntervalKind::Kind::Minute:
case IntervalKind::Kind::Hour:
if (isDateOrDate32(time_data_type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal interval kind for argument data type Date");
break;
default:
break;
}
const auto * interval_column_const_int64 = checkAndGetColumnConst<ColumnInt64>(interval_column.column.get());
if (!interval_column_const_int64)
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for 2nd argument of function {}, must be a const time interval", getName());
@ -217,51 +316,102 @@ private:
switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case)
{
case IntervalKind::Kind::Nanosecond:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime64, IntervalKind::Kind::Nanosecond>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Nanosecond>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Microsecond:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime64, IntervalKind::Kind::Microsecond>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Microsecond>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Millisecond:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime64, IntervalKind::Kind::Millisecond>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Millisecond>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Second:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Second>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Second>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Minute:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Minute>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Minute>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Hour:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Hour>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Hour>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Day:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Day>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Day>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Week:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Week>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Week>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Month:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Month>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Month>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Quarter:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Quarter>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Quarter>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Year:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Year>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Year>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
}
std::unreachable();
}
template <typename TimeDataType, typename TimeColumnType, typename ResultDataType, IntervalKind::Kind unit>
ColumnPtr execute(
const TimeDataType &, const TimeColumnType & time_column_type, Int64 num_units,
const DataTypePtr & result_type, const DateLUTImpl & time_zone, size_t input_rows_count, UInt16 scale) const
template <typename ResultDataType, typename TimeDataType, typename TimeColumnType, IntervalKind::Kind unit>
ColumnPtr execute(const TimeDataType &, const TimeColumnType & time_column_type, Int64 num_units, const ColumnWithTypeAndName & origin_column, const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale) const
{
using ResultColumnType = typename ResultDataType::ColumnType;
using ResultFieldType = typename ResultDataType::FieldType;
const auto & time_data = time_column_type.getData();
size_t size = time_data.size();
auto result_col = result_type->createColumn();
auto * col_to = assert_cast<ResultColumnType *>(result_col.get());
auto & result_data = col_to->getData();
result_data.resize(input_rows_count);
result_data.resize(size);
Int64 scale_multiplier = DecimalUtils::scaleMultiplier<DateTime64>(scale);
for (size_t i = 0; i != input_rows_count; ++i)
result_data[i] = static_cast<ResultFieldType>(ToStartOfInterval<unit>::execute(time_data[i], num_units, time_zone, scale_multiplier));
if (origin_column.column) // Overload: Origin
{
const bool is_small_interval = (unit == IntervalKind::Kind::Nanosecond || unit == IntervalKind::Kind::Microsecond || unit == IntervalKind::Kind::Millisecond);
const bool is_result_date = isDateOrDate32(result_type);
Int64 result_scale = scale_multiplier;
Int64 origin_scale = 1;
if (isDateTime64(result_type)) /// We have origin scale only in case if arguments are DateTime64.
origin_scale = assert_cast<const DataTypeDateTime64 &>(*origin_column.type).getScaleMultiplier();
else if (!is_small_interval) /// In case of large interval and arguments are not DateTime64, we should not have scale in result.
result_scale = 1;
if (is_small_interval)
result_scale = assert_cast<const DataTypeDateTime64 &>(*result_type).getScaleMultiplier();
/// In case if we have a difference between time arguments and Interval, we need to calculate the difference between them
/// to get the right precision for the result. In case of large intervals, we should not have scale difference.
Int64 scale_diff = is_small_interval ? std::max(result_scale / origin_scale, origin_scale / result_scale) : 1;
static constexpr Int64 SECONDS_PER_DAY = 86'400;
UInt64 origin = origin_column.column->get64(0);
for (size_t i = 0; i != size; ++i)
{
UInt64 time_arg = time_data[i];
if (origin > static_cast<size_t>(time_arg))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The origin must be before the end date / date with time");
if (is_result_date) /// All internal calculations of ToStartOfInterval<...> expect arguments to be seconds or milli-, micro-, nanoseconds.
{
time_arg *= SECONDS_PER_DAY;
origin *= SECONDS_PER_DAY;
}
Int64 offset = ToStartOfInterval<unit>::execute(time_arg - origin, num_units, time_zone, result_scale, origin);
/// In case if arguments are DateTime64 with large interval, we should apply scale on it.
offset *= (!is_small_interval) ? result_scale : 1;
if (is_result_date) /// Convert back to date after calculations.
{
offset /= SECONDS_PER_DAY;
origin /= SECONDS_PER_DAY;
}
result_data[i] = 0;
result_data[i] += (result_scale < origin_scale) ? (origin + offset) / scale_diff : (origin + offset) * scale_diff;
}
}
else // Overload: Default
{
for (size_t i = 0; i != size; ++i)
result_data[i] = static_cast<typename ResultDataType::FieldType>(ToStartOfInterval<unit>::execute(time_data[i], num_units, time_zone, scale_multiplier));
}
return result_col;
}

View File

@ -303,6 +303,7 @@ class _NetworkManager:
destination_port=None,
action=None,
probability=None,
protocol=None,
custom_args=None,
):
ret = []
@ -317,7 +318,7 @@ class _NetworkManager:
str(probability),
]
)
ret.extend(["-p", "tcp"])
ret.extend(["-p", "tcp" if protocol is None else protocol])
if source is not None:
ret.extend(["-s", source])
if destination is not None:

View File

@ -8,6 +8,7 @@ import logging
from helpers.cluster import ClickHouseCluster, is_arm
from helpers.test_tools import TSV
from helpers.client import QueryRuntimeException
from helpers.network import PartitionManager
import json
import subprocess
@ -138,7 +139,7 @@ def test_kafka_json_as_string_request_new_ticket_after_expiration(kafka_cluster)
kafka_produce(
kafka_cluster,
"kafka_json_as_string",
"kafka_json_as_string_after_expiration",
[
'{"t": 123, "e": {"x": "woof"} }',
"",
@ -152,9 +153,9 @@ def test_kafka_json_as_string_request_new_ticket_after_expiration(kafka_cluster)
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kerberized_kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_topic_list = 'kafka_json_as_string_after_expiration',
kafka_commit_on_select = 1,
kafka_group_name = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string_after_expiration',
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
"""
@ -170,7 +171,7 @@ def test_kafka_json_as_string_request_new_ticket_after_expiration(kafka_cluster)
"""
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows"
"Parsing of message (topic: kafka_json_as_string_after_expiration, partition: 0, offset: 1) return no rows"
)
@ -204,7 +205,22 @@ def test_kafka_json_as_string_no_kdc(kafka_cluster):
],
)
kafka_cluster.pause_container("kafka_kerberos")
# temporary prevent CH - KDC communications
with PartitionManager() as pm:
other_node = "kafka_kerberos"
for node in kafka_cluster.instances.values():
source = node.ip_address
destination = kafka_cluster.get_instance_ip(other_node)
logging.debug(f"partitioning source {source}, destination {destination}")
pm._add_rule(
{
"source": source,
"destination": destination,
"action": "REJECT",
"protocol": "all",
}
)
time.sleep(45) # wait for ticket expiration
instance.query(
@ -223,8 +239,6 @@ def test_kafka_json_as_string_no_kdc(kafka_cluster):
result = instance.query("SELECT * FROM test.kafka_no_kdc;")
expected = ""
kafka_cluster.unpause_container("kafka_kerberos")
assert TSV(result) == TSV(expected)
assert instance.contains_in_log("StorageKafka (kafka_no_kdc): Nothing to commit")
assert instance.contains_in_log("Ticket expired")
@ -234,7 +248,7 @@ def test_kafka_json_as_string_no_kdc(kafka_cluster):
def test_kafka_config_from_sql_named_collection(kafka_cluster):
kafka_produce(
kafka_cluster,
"kafka_json_as_string",
"kafka_json_as_string_named_collection",
[
'{"t": 123, "e": {"x": "woof"} }',
"",
@ -245,6 +259,7 @@ def test_kafka_config_from_sql_named_collection(kafka_cluster):
instance.query(
"""
DROP NAMED COLLECTION IF EXISTS kafka_config;
CREATE NAMED COLLECTION kafka_config AS
kafka.security_protocol = 'SASL_PLAINTEXT',
kafka.sasl_mechanism = 'GSSAPI',
@ -255,9 +270,9 @@ def test_kafka_config_from_sql_named_collection(kafka_cluster):
kafka.api_version_request = 'false',
kafka_broker_list = 'kerberized_kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_topic_list = 'kafka_json_as_string_named_collection',
kafka_commit_on_select = 1,
kafka_group_name = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string_named_collection',
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
"""
@ -279,7 +294,7 @@ def test_kafka_config_from_sql_named_collection(kafka_cluster):
"""
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows"
"Parsing of message (topic: kafka_json_as_string_named_collection, partition: 0, offset: 1) return no rows"
)

View File

@ -0,0 +1 @@
españa

View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
# Tags: no-fasttest
# no-fasttest: upper/lowerUTF8 use ICU
# Test for issue #69336
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_LOCAL --query "SELECT lowerUTF8('ESPAÑA')"

View File

@ -0,0 +1,77 @@
#!/usr/bin/env bash
# Tags: no-shared-merge-tree
# Tag no-shared-merge-tree: depend on events with local disk
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --query "
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt64, val UInt64) engine = MergeTree Order by key PARTITION BY key >= 128;
SET max_block_size = 64, max_insert_block_size = 64, min_insert_block_size_rows = 64;
INSERT INTO test SELECT number AS key, sipHash64(number) AS val FROM numbers(512);
"
${CLICKHOUSE_CLIENT} --query "
SYSTEM FLUSH LOGS;
SELECT
if(count(DISTINCT query_id) == 1, 'Ok', 'Error: ' || toString(count(DISTINCT query_id))),
if(count() == 512 / 64, 'Ok', 'Error: ' || toString(count())), -- 512 rows inserted, 64 rows per block
if(SUM(ProfileEvents['MergeTreeDataWriterRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterRows']))),
if(SUM(ProfileEvents['MergeTreeDataWriterUncompressedBytes']) >= 1024, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterUncompressedBytes']))),
if(SUM(ProfileEvents['MergeTreeDataWriterCompressedBytes']) >= 1024, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterCompressedBytes']))),
if(SUM(ProfileEvents['MergeTreeDataWriterBlocks']) >= 8, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterBlocks'])))
FROM system.part_log
WHERE event_time > now() - INTERVAL 10 MINUTE
AND database == currentDatabase() AND table == 'test'
AND event_type == 'NewPart';
"
${CLICKHOUSE_CLIENT} --query "OPTIMIZE TABLE test FINAL;"
${CLICKHOUSE_CLIENT} --query "
SYSTEM FLUSH LOGS;
SELECT
if(count() > 2, 'Ok', 'Error: ' || toString(count())),
if(SUM(ProfileEvents['MergedRows']) >= 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergedRows'])))
FROM system.part_log
WHERE event_time > now() - INTERVAL 10 MINUTE
AND database == currentDatabase() AND table == 'test'
AND event_type == 'MergeParts';
"
${CLICKHOUSE_CLIENT} --query "
ALTER TABLE test UPDATE val = 0 WHERE key % 2 == 0 SETTINGS mutations_sync = 2
"
# The mutation query may return before the entry is added to the system.part_log table.
# Retry SYSTEM FLUSH LOGS until all entries are fully flushed.
for _ in {1..10}; do
${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS"
res=$(${CLICKHOUSE_CLIENT} --query "
SELECT count() FROM system.part_log
WHERE event_time > now() - INTERVAL 10 MINUTE
AND database == currentDatabase() AND table == 'test'
AND event_type == 'MutatePart';"
)
if [[ $res -eq 2 ]]; then
break
fi
sleep 2.0
done
${CLICKHOUSE_CLIENT} --query "
SELECT
if(count() == 2, 'Ok', 'Error: ' || toString(count())),
if(SUM(ProfileEvents['MutatedRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MutatedRows']))),
if(SUM(ProfileEvents['FileOpen']) > 1, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['FileOpen'])))
FROM system.part_log
WHERE event_time > now() - INTERVAL 10 MINUTE
AND database == currentDatabase() AND table == 'test'
AND event_type == 'MutatePart';
"
${CLICKHOUSE_CLIENT} --query "DROP TABLE test"

View File

@ -1,50 +0,0 @@
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt64, val UInt64) engine = MergeTree Order by key PARTITION BY key >= 128;
SET max_block_size = 64, max_insert_block_size = 64, min_insert_block_size_rows = 64;
INSERT INTO test SELECT number AS key, sipHash64(number) AS val FROM numbers(512);
SYSTEM FLUSH LOGS;
SELECT
if(count(DISTINCT query_id) == 1, 'Ok', 'Error: ' || toString(count(DISTINCT query_id))),
if(count() == 512 / 64, 'Ok', 'Error: ' || toString(count())), -- 512 rows inserted, 64 rows per block
if(SUM(ProfileEvents['MergeTreeDataWriterRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterRows']))),
if(SUM(ProfileEvents['MergeTreeDataWriterUncompressedBytes']) >= 1024, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterUncompressedBytes']))),
if(SUM(ProfileEvents['MergeTreeDataWriterCompressedBytes']) >= 1024, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterCompressedBytes']))),
if(SUM(ProfileEvents['MergeTreeDataWriterBlocks']) >= 8, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterBlocks'])))
FROM system.part_log
WHERE event_time > now() - INTERVAL 10 MINUTE
AND database == currentDatabase() AND table == 'test'
AND event_type == 'NewPart'
;
OPTIMIZE TABLE test FINAL;
SYSTEM FLUSH LOGS;
SELECT
if(count() > 2, 'Ok', 'Error: ' || toString(count())),
if(SUM(ProfileEvents['MergedRows']) >= 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergedRows'])))
FROM system.part_log
WHERE event_time > now() - INTERVAL 10 MINUTE
AND database == currentDatabase() AND table == 'test'
AND event_type == 'MergeParts'
;
ALTER TABLE test UPDATE val = 0 WHERE key % 2 == 0 SETTINGS mutations_sync = 2;
SYSTEM FLUSH LOGS;
SELECT
if(count() == 2, 'Ok', 'Error: ' || toString(count())),
if(SUM(ProfileEvents['MutatedRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MutatedRows']))),
if(SUM(ProfileEvents['FileOpen']) > 1, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['FileOpen'])))
FROM system.part_log
WHERE event_time > now() - INTERVAL 10 MINUTE
AND database == currentDatabase() AND table == 'test'
AND event_type == 'MutatePart'
;
DROP TABLE test;

View File

@ -0,0 +1,59 @@
-- Negative tests
Time and origin as Date
2023-02-01
2023-08-01
2023-10-08
2023-10-08
2023-10-09
Time and origin as Date32
2023-02-01
2023-08-01
2023-10-08
2023-10-08
2023-10-09
Time and origin as DateTime
2023-02-01 09:08:07
2023-08-01 09:08:07
2023-10-08 09:08:07
2023-10-08 09:08:07
2023-10-09 09:08:07
2023-10-09 10:10:07
2023-10-09 10:11:07
2023-10-09 10:11:12
Time and origin as DateTime64(9)
2023-02-01 09:08:07.123456789
2023-08-01 09:08:07.123456789
2023-09-10 09:08:07.123456789
2023-10-08 09:08:07.123456789
2023-10-09 09:08:07.123456789
2023-10-09 10:10:07.123456789
2023-10-09 10:11:11.123456789
2023-10-09 10:11:12.123456789
2023-10-09 10:11:12.987
2023-10-09 10:11:12.987654
2023-10-09 10:11:12.987654321
Time and origin as DateTime64(3)
2023-02-01 09:08:07.123
2023-08-01 09:08:07.123
2023-10-08 09:08:07.123
2023-10-08 09:08:07.123
2023-10-09 09:08:07.123
2023-10-09 10:10:07.123
2023-10-09 10:11:11.123
2023-10-09 10:11:12.123
2023-10-09 10:11:12.987
2023-10-09 10:11:12.987000
2023-10-09 10:11:12.987000000
Non-const arguments
2023-03-01 16:55:00.00
2023-02-01 16:55:00.00
2023-03-01 16:55:00.00
2023-02-01 16:55:00.00
2023-03-01 16:55:00.00
2023-03-01 16:55:00
2023-02-01 16:55:00
2023-03-01 16:55:00
2023-02-01 16:55:00
2023-03-01 16:55:00
2023-01-02 15:44:30
2023-02-01 16:44:30.00

View File

@ -0,0 +1,95 @@
set session_timezone = 'UTC';
SELECT '-- Negative tests';
-- time and origin arguments must have the same type
SELECT toStartOfInterval(toDate('2023-01-02 14:45:50'), toIntervalSecond(5), toDate32('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate('2023-01-02 14:45:50'), toIntervalMillisecond(12), toDateTime('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate32('2023-01-02 14:45:50'), toIntervalHour(5), toDate('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalMinute(1), toDateTime64('2023-01-02 14:44:30', 2)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDateTime64('2023-01-02 14:45:50', 2), toIntervalMinute(1), toDate('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
-- the origin must be before the time
SELECT toStartOfInterval(toDateTime('2023-01-02 14:42:50'), toIntervalMinute(1), toDateTime('2023-01-02 14:44:30')); -- { serverError BAD_ARGUMENTS }
-- the origin must be constant
SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalMinute(1), number % 2 == 0 ? toDateTime('2023-02-01 15:55:00') : toDateTime('2023-01-01 15:55:00')) from numbers(1); -- { serverError ILLEGAL_COLUMN }
SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalHour(1), materialize(toDateTime('2023-01-02 14:44:30')), 'Europe/Amsterdam'); -- { serverError ILLEGAL_COLUMN }
-- with 4 arguments, the 3rd one must not be a string or an integer
SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalYear(1), 'Europe/Amsterdam', 'Europe/Amsterdam'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalYear(1), 5, 'Europe/Amsterdam'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
-- too many arguments
SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalYear(1), toDateTime('2020-01-02 14:44:30'), 'Europe/Amsterdam', 5); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
SELECT 'Time and origin as Date';
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalYear(1), toDate('2022-02-01'));
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalQuarter(1), toDate('2022-02-01'));
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMonth(1), toDate('2023-09-08'));
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalWeek(1), toDate('2023-10-01'));
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalDay(1), toDate('2023-10-08'));
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalHour(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMinute(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalSecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMillisecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMicrosecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalNanosecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT 'Time and origin as Date32';
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalYear(1), toDate32('2022-02-01'));
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalQuarter(1), toDate32('2022-02-01'));
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMonth(1), toDate32('2023-09-08'));
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalWeek(1), toDate32('2023-10-01'));
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalDay(1), toDate32('2023-10-08'));
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalHour(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMinute(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalSecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMillisecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMicrosecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalNanosecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT 'Time and origin as DateTime';
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalYear(1), toDateTime('2022-02-01 09:08:07'));
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalQuarter(1), toDateTime('2022-02-01 09:08:07'));
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMonth(1), toDateTime('2023-09-08 09:08:07'));
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalWeek(1), toDateTime('2023-10-01 09:08:07'));
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalDay(1), toDateTime('2023-10-08 09:08:07'));
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalHour(1), toDateTime('2023-10-09 09:10:07'));
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMinute(1), toDateTime('2023-10-09 09:10:07'));
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalSecond(1), toDateTime('2023-10-09 09:10:07'));
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMillisecond(1), toDateTime('2023-10-09 10:11:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMicrosecond(1), toDateTime('2023-10-09 10:11:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalNanosecond(1), toDateTime('2023-10-09 10:11:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT 'Time and origin as DateTime64(9)';
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalYear(1), toDateTime64('2022-02-01 09:08:07.123456789', 9));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalQuarter(1), toDateTime64('2022-02-01 09:08:07.123456789', 9));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMonth(1), toDateTime64('2023-09-10 09:08:07.123456789', 9));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalWeek(1), toDateTime64('2023-10-01 09:08:07.123456789', 9));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalDay(1), toDateTime64('2023-10-08 09:08:07.123456789', 9));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalHour(1), toDateTime64('2023-10-09 09:10:07.123456789', 9));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMinute(1), toDateTime64('2023-10-09 09:10:11.123456789', 9));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalSecond(1), toDateTime64('2023-10-09 10:11:10.123456789', 9));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMillisecond(1), toDateTime64('2023-10-09 10:11:12.123456789', 9));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMicrosecond(1), toDateTime64('2023-10-09 10:11:12.123456789', 9));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalNanosecond(1), toDateTime64('2023-10-09 10:11:12.123456789', 9));
SELECT 'Time and origin as DateTime64(3)';
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalYear(1), toDateTime64('2022-02-01 09:08:07.123', 3));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalQuarter(1), toDateTime64('2022-02-01 09:08:07.123', 3));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMonth(1), toDateTime64('2023-09-08 09:08:07.123', 3));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalWeek(1), toDateTime64('2023-10-01 09:08:07.123', 3));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalDay(1), toDateTime64('2023-10-08 09:08:07.123', 3));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalHour(1), toDateTime64('2023-10-09 09:10:07.123', 3));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMinute(1), toDateTime64('2023-10-09 10:10:11.123', 3));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalSecond(1), toDateTime64('2023-10-09 10:11:10.123', 3));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMillisecond(1), toDateTime64('2023-10-09 10:11:12.123', 3));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMicrosecond(1), toDateTime64('2023-10-09 10:11:12.123', 3));
SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalNanosecond(1), toDateTime64('2023-10-09 10:11:12.123', 3));
SELECT 'Non-const arguments';
SELECT toStartOfInterval(number % 2 == 0 ? toDateTime64('2023-03-01 15:55:00', 2) : toDateTime64('2023-02-01 15:55:00', 2), toIntervalMinute(1), toDateTime64('2023-01-01 13:55:00', 2), 'Europe/Amsterdam') from numbers(5);
SELECT toStartOfInterval(number % 2 == 0 ? toDateTime('2023-03-01 15:55:00') : toDateTime('2023-02-01 15:55:00'), toIntervalHour(1), toDateTime('2023-01-01 13:55:00'), 'Europe/Amsterdam') from numbers(5);
SELECT toStartOfInterval(materialize(toDateTime('2023-01-02 14:45:50')), toIntervalHour(1), toDateTime('2023-01-02 14:44:30'), 'Europe/Amsterdam');
SELECT toStartOfInterval(materialize(toDateTime64('2023-02-01 15:45:50', 2)), toIntervalHour(1), toDateTime64('2023-01-02 14:44:30', 2), 'Europe/Amsterdam');

View File

@ -35,10 +35,10 @@ SELECT overlay('Spark SQL', materialize('ANSI '), materialize(7), materialize(0)
SELECT overlay(materialize('Spark SQL'), materialize('ANSI '), materialize(7), materialize(0)), overlayUTF8(materialize('Spark SQL和CH'), materialize('ANSI '), materialize(7), materialize(0));
SELECT 'Test with special offset values';
WITH number - 12 AS offset SELECT offset, overlay('Spark SQL', '__', offset), overlayUTF8('Spark SQL和CH', '', offset) FROM numbers(26);
WITH number - 12 AS offset SELECT offset, overlay('Spark SQL', '__', offset), overlayUTF8('Spark SQL和CH', '', offset) FROM numbers(26) ORDER BY number;
SELECT 'Test with special length values';
WITH number - 1 AS length SELECT length, overlay('Spark SQL', 'ANSI ', 7, length), overlayUTF8('Spark SQL和CH', 'ANSI ', 7, length) FROM numbers(8);
WITH number - 1 AS length SELECT length, overlay('Spark SQL', 'ANSI ', 7, length), overlayUTF8('Spark SQL和CH', 'ANSI ', 7, length) FROM numbers(8) ORDER BY number;
SELECT 'Test with special input and replace values';
SELECT overlay('', '_', 6), overlayUTF8('', '_', 6);

View File

@ -975,6 +975,7 @@ ThreadPoolRemoteFSReaderThreads
ThreadPoolRemoteFSReaderThreadsActive
ThreadsActive
ThreadsInOvercommitTracker
TimescaleDB's
TimeSeries
Timeunit
TinyLog