mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-19 16:20:50 +00:00
Merge branch 'master' into builtin-skim
This commit is contained in:
commit
c93b7741b3
1
.gitignore
vendored
1
.gitignore
vendored
@ -158,6 +158,7 @@ website/package-lock.json
|
||||
# temporary test files
|
||||
tests/queries/0_stateless/test_*
|
||||
tests/queries/0_stateless/*.binary
|
||||
tests/queries/0_stateless/*.generated-expect
|
||||
|
||||
# rust
|
||||
/rust/**/target
|
||||
|
@ -16,6 +16,6 @@ ClickHouse® is an open-source column-oriented database management system that a
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
## Upcoming events
|
||||
* [**v22.12 Release Webinar**](https://clickhouse.com/company/events/v22-12-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||
* [**v22.12 Release Webinar**](https://clickhouse.com/company/events/v22-12-release-webinar) 22.12 is the ClickHouse Christmas release. There are plenty of gifts (a new JOIN algorithm among them) and we adopted something from MongoDB. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup at the CHEQ office in Tel Aviv**](https://www.meetup.com/clickhouse-tel-aviv-user-group/events/289599423/) - Jan 16 - We are very excited to be holding our next in-person ClickHouse meetup at the CHEQ office in Tel Aviv! Hear from CHEQ, ServiceNow and Contentsquare, as well as a deep dive presentation from ClickHouse CTO Alexey Milovidov. Join us for a fun evening of talks, food and discussion!
|
||||
* **ClickHouse Meetup in Seattle* - Keep an eye on this space as we will be announcing a January meetup in Seattle soon!
|
||||
* [**ClickHouse Meetup at Microsoft Office in Seattle**](https://www.meetup.com/clickhouse-seattle-user-group/events/290310025/) - Jan 18 - Keep an eye on this space as we will be announcing speakers soon!
|
||||
|
@ -497,6 +497,7 @@ else
|
||||
-e "Coordination::Exception: Connection loss" \
|
||||
-e "MutateFromLogEntryTask" \
|
||||
-e "No connection to ZooKeeper, cannot get shared table ID" \
|
||||
-e "Session expired" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
@ -13,6 +13,7 @@ Columns:
|
||||
- `metadata_path` ([String](../../sql-reference/data-types/enum.md)) — Metadata path.
|
||||
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Database UUID.
|
||||
- `comment` ([String](../../sql-reference/data-types/enum.md)) — Database comment.
|
||||
- `engine_full` ([String](../../sql-reference/data-types/enum.md)) — Parameters of the database engine.
|
||||
|
||||
The `name` column from this system table is used for implementing the `SHOW DATABASES` query.
|
||||
|
||||
@ -31,10 +32,12 @@ SELECT * FROM system.databases;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name───────────────┬─engine─┬─data_path──────────────────┬─metadata_path───────────────────────────────────────────────────────┬─uuid─────────────────────────────────┬─comment─┐
|
||||
│ INFORMATION_SCHEMA │ Memory │ /var/lib/clickhouse/ │ │ 00000000-0000-0000-0000-000000000000 │ │
|
||||
│ default │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/d31/d317b4bd-3595-4386-81ee-c2334694128a/ │ 24363899-31d7-42a0-a436-389931d752a0 │ │
|
||||
│ information_schema │ Memory │ /var/lib/clickhouse/ │ │ 00000000-0000-0000-0000-000000000000 │ │
|
||||
│ system │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/1d1/1d1c869d-e465-4b1b-a51f-be033436ebf9/ │ 03e9f3d1-cc88-4a49-83e9-f3d1cc881a49 │ │
|
||||
└────────────────────┴────────┴────────────────────────────┴─────────────────────────────────────────────────────────────────────┴──────────────────────────────────────┴─────────┘
|
||||
┌─name────────────────┬─engine─────┬─data_path────────────────────┬─metadata_path─────────────────────────────────────────────────────────┬─uuid─────────────────────────────────┬─engine_full────────────────────────────────────────────┬─comment─┐
|
||||
│ INFORMATION_SCHEMA │ Memory │ /data/clickhouse_data/ │ │ 00000000-0000-0000-0000-000000000000 │ Memory │ │
|
||||
│ default │ Atomic │ /data/clickhouse_data/store/ │ /data/clickhouse_data/store/f97/f97a3ceb-2e8a-4912-a043-c536e826a4d4/ │ f97a3ceb-2e8a-4912-a043-c536e826a4d4 │ Atomic │ │
|
||||
│ information_schema │ Memory │ /data/clickhouse_data/ │ │ 00000000-0000-0000-0000-000000000000 │ Memory │ │
|
||||
│ replicated_database │ Replicated │ /data/clickhouse_data/store/ │ /data/clickhouse_data/store/da8/da85bb71-102b-4f69-9aad-f8d6c403905e/ │ da85bb71-102b-4f69-9aad-f8d6c403905e │ Replicated('some/path/database', 'shard1', 'replica1') │ │
|
||||
│ system │ Atomic │ /data/clickhouse_data/store/ │ /data/clickhouse_data/store/b57/b5770419-ac7a-4b67-8229-524122024076/ │ b5770419-ac7a-4b67-8229-524122024076 │ Atomic │ │
|
||||
└─────────────────────┴────────────┴──────────────────────────────┴───────────────────────────────────────────────────────────────────────┴──────────────────────────────────────┴────────────────────────────────────────────────────────┴─────────┘
|
||||
|
||||
```
|
||||
|
@ -410,35 +410,35 @@ Converts a date with time to a certain fixed date, while preserving the time.
|
||||
|
||||
## toRelativeYearNum
|
||||
|
||||
Converts a date or date with time to the number of the year, starting from a certain fixed point in the past.
|
||||
Converts a date with time or date to the number of the year, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeQuarterNum
|
||||
|
||||
Converts a date or date with time to the number of the quarter, starting from a certain fixed point in the past.
|
||||
Converts a date with time or date to the number of the quarter, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeMonthNum
|
||||
|
||||
Converts a date or date with time to the number of the month, starting from a certain fixed point in the past.
|
||||
Converts a date with time or date to the number of the month, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeWeekNum
|
||||
|
||||
Converts a date or date with time to the number of the week, starting from a certain fixed point in the past.
|
||||
Converts a date with time or date to the number of the week, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeDayNum
|
||||
|
||||
Converts a date or date with time to the number of the day, starting from a certain fixed point in the past.
|
||||
Converts a date with time or date to the number of the day, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeHourNum
|
||||
|
||||
Converts a date or date with time to the number of the hour, starting from a certain fixed point in the past.
|
||||
Converts a date with time or date to the number of the hour, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeMinuteNum
|
||||
|
||||
Converts a date or date with time to the number of the minute, starting from a certain fixed point in the past.
|
||||
Converts a date with time or date to the number of the minute, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeSecondNum
|
||||
|
||||
Converts a date or date with time to the number of the second, starting from a certain fixed point in the past.
|
||||
Converts a date with time or date to the number of the second, starting from a certain fixed point in the past.
|
||||
|
||||
## toISOYear
|
||||
|
||||
@ -517,154 +517,6 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## age
|
||||
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
|
||||
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
age('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
- `day` (possible abbreviations: `dd`, `d`)
|
||||
- `week` (possible abbreviations: `wk`, `ww`)
|
||||
- `month` (possible abbreviations: `mm`, `m`)
|
||||
- `quarter` (possible abbreviations: `qq`, `q`)
|
||||
- `year` (possible abbreviations: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 24 │
|
||||
└───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
age('day', s, e) AS day_age,
|
||||
age('month', s, e) AS month__age,
|
||||
age('year', s, e) AS year_age;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
|
||||
└────────────┴────────────┴─────────┴────────────┴──────────┘
|
||||
```
|
||||
|
||||
|
||||
## date\_diff
|
||||
|
||||
Returns the count of the specified `unit` boundaries crossed between the `startdate` and `enddate`.
|
||||
The difference is calculated using relative units, e.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Aliases: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
- `day` (possible abbreviations: `dd`, `d`)
|
||||
- `week` (possible abbreviations: `wk`, `ww`)
|
||||
- `month` (possible abbreviations: `mm`, `m`)
|
||||
- `quarter` (possible abbreviations: `qq`, `q`)
|
||||
- `year` (possible abbreviations: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
dateDiff('day', s, e) AS day_diff,
|
||||
dateDiff('month', s, e) AS month__diff,
|
||||
dateDiff('year', s, e) AS year_diff;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
|
||||
└────────────┴────────────┴──────────┴─────────────┴───────────┘
|
||||
```
|
||||
|
||||
## date\_trunc
|
||||
|
||||
Truncates date and time data to the specified part of date.
|
||||
@ -785,6 +637,80 @@ Result:
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_diff
|
||||
|
||||
Returns the difference between two dates or dates with time values.
|
||||
The difference is calculated using relative units, e.g. the difference between `2022-01-01` and `2021-12-29` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Aliases: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
dateDiff('day', s, e) AS day_diff,
|
||||
dateDiff('month', s, e) AS month__diff,
|
||||
dateDiff('year', s, e) AS year_diff;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
|
||||
└────────────┴────────────┴──────────┴─────────────┴───────────┘
|
||||
```
|
||||
|
||||
## date\_sub
|
||||
|
||||
Subtracts the time interval or date interval from the provided date or date with time.
|
||||
|
@ -1159,4 +1159,40 @@ If s is empty, the result is 0. If the first character is not an ASCII character
|
||||
|
||||
|
||||
|
||||
## concatWithSeparator
|
||||
|
||||
Returns the concatenation strings separated by string separator. If any of the argument values is `NULL`, the function returns `NULL`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
concatWithSeparator(sep, expr1, expr2, expr3...)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
- sep — separator. Const [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- exprN — expression to be concatenated. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
**Returned values**
|
||||
- The concatenated String.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT concatWithSeparator('a', '1', '2', '3', '4')
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─concatWithSeparator('a', '1', '2', '3', '4')─┐
|
||||
│ 1a2a3a4 │
|
||||
└───────────────────────────────────┘
|
||||
```
|
||||
|
||||
## concatWithSeparatorAssumeInjective
|
||||
Same as concatWithSeparator, the difference is that you need to ensure that concatWithSeparator(sep, expr1, expr2, expr3...) → result is injective, it will be used for optimization of GROUP BY.
|
||||
|
||||
The function is named “injective” if it always returns different result for different values of arguments. In other words: different arguments never yield identical result.
|
||||
|
@ -424,23 +424,23 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
## toRelativeYearNum {#torelativeyearnum}
|
||||
|
||||
Переводит дату или дату-с-временем в номер года, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату-с-временем или дату в номер года, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeQuarterNum {#torelativequarternum}
|
||||
|
||||
Переводит дату или дату-с-временем в номер квартала, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату-с-временем или дату в номер квартала, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeMonthNum {#torelativemonthnum}
|
||||
|
||||
Переводит дату или дату-с-временем в номер месяца, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату-с-временем или дату в номер месяца, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeWeekNum {#torelativeweeknum}
|
||||
|
||||
Переводит дату или дату-с-временем в номер недели, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату-с-временем или дату в номер недели, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeDayNum {#torelativedaynum}
|
||||
|
||||
Переводит дату или дату-с-временем в номер дня, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату-с-временем или дату в номер дня, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeHourNum {#torelativehournum}
|
||||
|
||||
@ -456,7 +456,7 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
## toISOYear {#toisoyear}
|
||||
|
||||
Переводит дату или дату-с-временем в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
|
||||
Переводит дату-с-временем или дату в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -479,7 +479,7 @@ SELECT
|
||||
|
||||
## toISOWeek {#toisoweek}
|
||||
|
||||
Переводит дату или дату-с-временем в число типа UInt8, содержащее номер ISO недели.
|
||||
Переводит дату-с-временем или дату в число типа UInt8, содержащее номер ISO недели.
|
||||
Начало ISO года отличается от начала обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) первая неделя года - это неделя с четырьмя или более днями в этом году.
|
||||
|
||||
1 Января 2017 г. - воскресение, т.е. первая ISO неделя 2017 года началась в понедельник 2 января, поэтому 1 января 2017 это последняя неделя 2016 года.
|
||||
@ -503,7 +503,7 @@ SELECT
|
||||
```
|
||||
|
||||
## toWeek(date\[, mode\]\[, timezone\]) {#toweek}
|
||||
Переводит дату или дату-с-временем в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
|
||||
Переводит дату-с-временем или дату в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
|
||||
|
||||
`toISOWeek() ` эквивалентно `toWeek(date,3)`.
|
||||
|
||||
@ -569,132 +569,6 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## age
|
||||
|
||||
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 секунду.
|
||||
Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
age('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
- `minute` (возможные сокращения: `mi`, `n`)
|
||||
- `hour` (возможные сокращения: `hh`, `h`)
|
||||
- `day` (возможные сокращения: `dd`, `d`)
|
||||
- `week` (возможные сокращения: `wk`, `ww`)
|
||||
- `month` (возможные сокращения: `mm`, `m`)
|
||||
- `quarter` (возможные сокращения: `qq`, `q`)
|
||||
- `year` (возможные сокращения: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 24 │
|
||||
└───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
age('day', s, e) AS day_age,
|
||||
age('month', s, e) AS month__age,
|
||||
age('year', s, e) AS year_age;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
|
||||
└────────────┴────────────┴─────────┴────────────┴──────────┘
|
||||
```
|
||||
|
||||
## date\_diff {#date_diff}
|
||||
|
||||
Вычисляет разницу указанных границ `unit` пересекаемых между `startdate` и `enddate`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Синонимы: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
- `minute` (возможные сокращения: `mi`, `n`)
|
||||
- `hour` (возможные сокращения: `hh`, `h`)
|
||||
- `day` (возможные сокращения: `dd`, `d`)
|
||||
- `week` (возможные сокращения: `wk`, `ww`)
|
||||
- `month` (возможные сокращения: `mm`, `m`)
|
||||
- `quarter` (возможные сокращения: `qq`, `q`)
|
||||
- `year` (возможные сокращения: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date_trunc {#date_trunc}
|
||||
|
||||
Отсекает от даты и времени части, меньшие чем указанная часть.
|
||||
@ -815,6 +689,60 @@ SELECT date_add(YEAR, 3, toDate('2018-01-01'));
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_diff {#date_diff}
|
||||
|
||||
Вычисляет разницу между двумя значениями дат или дат со временем.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Синонимы: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_sub {#date_sub}
|
||||
|
||||
Вычитает интервал времени или даты из указанной даты или даты со временем.
|
||||
|
@ -16,6 +16,8 @@
|
||||
|
||||
#include <base/find_symbols.h>
|
||||
|
||||
#include <Access/AccessControl.h>
|
||||
|
||||
#include "config_version.h"
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/formatReadable.h>
|
||||
@ -258,6 +260,10 @@ try
|
||||
if (is_interactive && !config().has("no-warnings"))
|
||||
showWarnings();
|
||||
|
||||
/// Set user password complexity rules
|
||||
auto & access_control = global_context->getAccessControl();
|
||||
access_control.setPasswordComplexityRules(connection->getPasswordComplexityRules());
|
||||
|
||||
if (is_interactive && !delayed_interactive)
|
||||
{
|
||||
runInteractive();
|
||||
|
@ -466,6 +466,30 @@
|
||||
<allow_no_password>1</allow_no_password>
|
||||
<allow_implicit_no_password>1</allow_implicit_no_password>
|
||||
|
||||
<!-- Complexity requirements for user passwords. -->
|
||||
<!-- <password_complexity>
|
||||
<rule>
|
||||
<pattern>.{12}</pattern>
|
||||
<message>be at least 12 characters long</message>
|
||||
</rule>
|
||||
<rule>
|
||||
<pattern>\p{N}</pattern>
|
||||
<message>contain at least 1 numeric character</message>
|
||||
</rule>
|
||||
<rule>
|
||||
<pattern>\p{Ll}</pattern>
|
||||
<message>contain at least 1 lowercase character</message>
|
||||
</rule>
|
||||
<rule>
|
||||
<pattern>\p{Lu}</pattern>
|
||||
<message>contain at least 1 uppercase character</message>
|
||||
</rule>
|
||||
<rule>
|
||||
<pattern>[^\p{L}\p{N}]</pattern>
|
||||
<message>contain at least 1 special character</message>
|
||||
</rule>
|
||||
</password_complexity> -->
|
||||
|
||||
<!-- Policy from the <storage_configuration> for the temporary files.
|
||||
If not set <tmp_path> is used, otherwise <tmp_path> is ignored.
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/trim.hpp>
|
||||
#include <re2/re2.h>
|
||||
#include <filesystem>
|
||||
#include <mutex>
|
||||
|
||||
@ -38,6 +39,8 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
||||
extern const int UNKNOWN_SETTING;
|
||||
extern const int AUTHENTICATION_FAILED;
|
||||
extern const int CANNOT_COMPILE_REGEXP;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
namespace
|
||||
@ -140,6 +143,109 @@ private:
|
||||
};
|
||||
|
||||
|
||||
class AccessControl::PasswordComplexityRules
|
||||
{
|
||||
public:
|
||||
void setPasswordComplexityRulesFromConfig(const Poco::Util::AbstractConfiguration & config_)
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
|
||||
rules.clear();
|
||||
|
||||
if (config_.has("password_complexity"))
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys password_complexity;
|
||||
config_.keys("password_complexity", password_complexity);
|
||||
|
||||
for (const auto & key : password_complexity)
|
||||
{
|
||||
if (key == "rule" || key.starts_with("rule["))
|
||||
{
|
||||
String pattern(config_.getString("password_complexity." + key + ".pattern"));
|
||||
String message(config_.getString("password_complexity." + key + ".message"));
|
||||
|
||||
auto matcher = std::make_unique<RE2>(pattern, RE2::Quiet);
|
||||
if (!matcher->ok())
|
||||
throw Exception(ErrorCodes::CANNOT_COMPILE_REGEXP,
|
||||
"Password complexity pattern {} cannot be compiled: {}",
|
||||
pattern, matcher->error());
|
||||
|
||||
rules.push_back({std::move(matcher), std::move(pattern), std::move(message)});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void setPasswordComplexityRules(const std::vector<std::pair<String, String>> & rules_)
|
||||
{
|
||||
Rules new_rules;
|
||||
|
||||
for (const auto & [original_pattern, exception_message] : rules_)
|
||||
{
|
||||
auto matcher = std::make_unique<RE2>(original_pattern, RE2::Quiet);
|
||||
if (!matcher->ok())
|
||||
throw Exception(ErrorCodes::CANNOT_COMPILE_REGEXP,
|
||||
"Password complexity pattern {} cannot be compiled: {}",
|
||||
original_pattern, matcher->error());
|
||||
|
||||
new_rules.push_back({std::move(matcher), original_pattern, exception_message});
|
||||
}
|
||||
|
||||
std::lock_guard lock{mutex};
|
||||
rules = std::move(new_rules);
|
||||
}
|
||||
|
||||
void checkPasswordComplexityRules(const String & password_) const
|
||||
{
|
||||
String exception_text;
|
||||
bool failed = false;
|
||||
|
||||
std::lock_guard lock{mutex};
|
||||
for (const auto & rule : rules)
|
||||
{
|
||||
if (!RE2::PartialMatch(password_, *rule.matcher))
|
||||
{
|
||||
failed = true;
|
||||
|
||||
if (!exception_text.empty())
|
||||
exception_text += ", ";
|
||||
|
||||
exception_text += rule.exception_message;
|
||||
}
|
||||
}
|
||||
|
||||
if (failed)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid password. The password should: {}", exception_text);
|
||||
}
|
||||
|
||||
std::vector<std::pair<String, String>> getPasswordComplexityRules()
|
||||
{
|
||||
std::vector<std::pair<String, String>> result;
|
||||
|
||||
std::lock_guard lock{mutex};
|
||||
result.reserve(rules.size());
|
||||
|
||||
for (const auto & rule : rules)
|
||||
result.push_back({rule.original_pattern, rule.exception_message});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
struct Rule
|
||||
{
|
||||
std::unique_ptr<RE2> matcher;
|
||||
String original_pattern;
|
||||
String exception_message;
|
||||
};
|
||||
|
||||
using Rules = std::vector<Rule>;
|
||||
|
||||
Rules rules TSA_GUARDED_BY(mutex);
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
|
||||
|
||||
AccessControl::AccessControl()
|
||||
: MultipleAccessStorage("user directories"),
|
||||
context_access_cache(std::make_unique<ContextAccessCache>(*this)),
|
||||
@ -149,7 +255,8 @@ AccessControl::AccessControl()
|
||||
settings_profiles_cache(std::make_unique<SettingsProfilesCache>(*this)),
|
||||
external_authenticators(std::make_unique<ExternalAuthenticators>()),
|
||||
custom_settings_prefixes(std::make_unique<CustomSettingsPrefixes>()),
|
||||
changes_notifier(std::make_unique<AccessChangesNotifier>())
|
||||
changes_notifier(std::make_unique<AccessChangesNotifier>()),
|
||||
password_rules(std::make_unique<PasswordComplexityRules>())
|
||||
{
|
||||
}
|
||||
|
||||
@ -166,6 +273,7 @@ void AccessControl::setUpFromMainConfig(const Poco::Util::AbstractConfiguration
|
||||
setImplicitNoPasswordAllowed(config_.getBool("allow_implicit_no_password", true));
|
||||
setNoPasswordAllowed(config_.getBool("allow_no_password", true));
|
||||
setPlaintextPasswordAllowed(config_.getBool("allow_plaintext_password", true));
|
||||
setPasswordComplexityRulesFromConfig(config_);
|
||||
|
||||
/// Optional improvements in access control system.
|
||||
/// The default values are false because we need to be compatible with earlier access configurations
|
||||
@ -543,6 +651,26 @@ bool AccessControl::isPlaintextPasswordAllowed() const
|
||||
return allow_plaintext_password;
|
||||
}
|
||||
|
||||
void AccessControl::setPasswordComplexityRulesFromConfig(const Poco::Util::AbstractConfiguration & config_)
|
||||
{
|
||||
password_rules->setPasswordComplexityRulesFromConfig(config_);
|
||||
}
|
||||
|
||||
void AccessControl::setPasswordComplexityRules(const std::vector<std::pair<String, String>> & rules_)
|
||||
{
|
||||
password_rules->setPasswordComplexityRules(rules_);
|
||||
}
|
||||
|
||||
void AccessControl::checkPasswordComplexityRules(const String & password_) const
|
||||
{
|
||||
password_rules->checkPasswordComplexityRules(password_);
|
||||
}
|
||||
|
||||
std::vector<std::pair<String, String>> AccessControl::getPasswordComplexityRules() const
|
||||
{
|
||||
return password_rules->getPasswordComplexityRules();
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<const ContextAccess> AccessControl::getContextAccess(
|
||||
const UUID & user_id,
|
||||
|
@ -147,6 +147,13 @@ public:
|
||||
void setPlaintextPasswordAllowed(const bool allow_plaintext_password_);
|
||||
bool isPlaintextPasswordAllowed() const;
|
||||
|
||||
/// Check complexity requirements for plaintext passwords
|
||||
|
||||
void setPasswordComplexityRulesFromConfig(const Poco::Util::AbstractConfiguration & config_);
|
||||
void setPasswordComplexityRules(const std::vector<std::pair<String, String>> & rules_);
|
||||
void checkPasswordComplexityRules(const String & password_) const;
|
||||
std::vector<std::pair<String, String>> getPasswordComplexityRules() const;
|
||||
|
||||
/// Enables logic that users without permissive row policies can still read rows using a SELECT query.
|
||||
/// For example, if there two users A, B and a row policy is defined only for A, then
|
||||
/// if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows.
|
||||
@ -212,6 +219,7 @@ public:
|
||||
private:
|
||||
class ContextAccessCache;
|
||||
class CustomSettingsPrefixes;
|
||||
class PasswordComplexityRules;
|
||||
|
||||
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
|
||||
@ -225,6 +233,7 @@ private:
|
||||
std::unique_ptr<ExternalAuthenticators> external_authenticators;
|
||||
std::unique_ptr<CustomSettingsPrefixes> custom_settings_prefixes;
|
||||
std::unique_ptr<AccessChangesNotifier> changes_notifier;
|
||||
std::unique_ptr<PasswordComplexityRules> password_rules;
|
||||
std::atomic_bool allow_plaintext_password = true;
|
||||
std::atomic_bool allow_no_password = true;
|
||||
std::atomic_bool allow_implicit_no_password = true;
|
||||
|
@ -29,6 +29,7 @@ public:
|
||||
virtual UInt64 getFileSize(const String & file_name) = 0;
|
||||
virtual bool fileContentsEqual(const String & file_name, const String & expected_file_contents) = 0;
|
||||
virtual std::unique_ptr<WriteBuffer> writeFile(const String & file_name) = 0;
|
||||
virtual void removeFile(const String & file_name) = 0;
|
||||
virtual void removeFiles(const Strings & file_names) = 0;
|
||||
virtual DataSourceDescription getDataSourceDescription() const = 0;
|
||||
virtual void copyFileThroughBuffer(std::unique_ptr<SeekableReadBuffer> && source, const String & file_name);
|
||||
|
@ -75,6 +75,13 @@ std::unique_ptr<WriteBuffer> BackupWriterDisk::writeFile(const String & file_nam
|
||||
return disk->writeFile(file_path);
|
||||
}
|
||||
|
||||
void BackupWriterDisk::removeFile(const String & file_name)
|
||||
{
|
||||
disk->removeFileIfExists(path / file_name);
|
||||
if (disk->isDirectory(path) && disk->isDirectoryEmpty(path))
|
||||
disk->removeDirectory(path);
|
||||
}
|
||||
|
||||
void BackupWriterDisk::removeFiles(const Strings & file_names)
|
||||
{
|
||||
for (const auto & file_name : file_names)
|
||||
|
@ -34,6 +34,7 @@ public:
|
||||
UInt64 getFileSize(const String & file_name) override;
|
||||
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
|
||||
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
||||
void removeFile(const String & file_name) override;
|
||||
void removeFiles(const Strings & file_names) override;
|
||||
DataSourceDescription getDataSourceDescription() const override;
|
||||
|
||||
|
@ -72,6 +72,13 @@ std::unique_ptr<WriteBuffer> BackupWriterFile::writeFile(const String & file_nam
|
||||
return std::make_unique<WriteBufferFromFile>(file_path);
|
||||
}
|
||||
|
||||
void BackupWriterFile::removeFile(const String & file_name)
|
||||
{
|
||||
fs::remove(path / file_name);
|
||||
if (fs::is_directory(path) && fs::is_empty(path))
|
||||
fs::remove(path);
|
||||
}
|
||||
|
||||
void BackupWriterFile::removeFiles(const Strings & file_names)
|
||||
{
|
||||
for (const auto & file_name : file_names)
|
||||
|
@ -31,6 +31,7 @@ public:
|
||||
UInt64 getFileSize(const String & file_name) override;
|
||||
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
|
||||
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
||||
void removeFile(const String & file_name) override;
|
||||
void removeFiles(const Strings & file_names) override;
|
||||
DataSourceDescription getDataSourceDescription() const override;
|
||||
bool supportNativeCopy(DataSourceDescription data_source_description) const override;
|
||||
|
@ -372,7 +372,48 @@ std::unique_ptr<WriteBuffer> BackupWriterS3::writeFile(const String & file_name)
|
||||
threadPoolCallbackRunner<void>(IOThreadPool::get(), "BackupWriterS3"));
|
||||
}
|
||||
|
||||
void BackupWriterS3::removeFile(const String & file_name)
|
||||
{
|
||||
Aws::S3::Model::DeleteObjectRequest request;
|
||||
request.SetBucket(s3_uri.bucket);
|
||||
request.SetKey(fs::path(s3_uri.key) / file_name);
|
||||
auto outcome = client->DeleteObject(request);
|
||||
if (!outcome.IsSuccess())
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
}
|
||||
|
||||
void BackupWriterS3::removeFiles(const Strings & file_names)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (!supports_batch_delete.has_value() || supports_batch_delete.value() == true)
|
||||
{
|
||||
removeFilesBatch(file_names);
|
||||
supports_batch_delete = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto & file_name : file_names)
|
||||
removeFile(file_name);
|
||||
}
|
||||
}
|
||||
catch (const Exception &)
|
||||
{
|
||||
if (!supports_batch_delete.has_value())
|
||||
{
|
||||
supports_batch_delete = false;
|
||||
LOG_TRACE(log, "DeleteObjects is not supported. Retrying with plain DeleteObject.");
|
||||
|
||||
for (const auto & file_name : file_names)
|
||||
removeFile(file_name);
|
||||
}
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void BackupWriterS3::removeFilesBatch(const Strings & file_names)
|
||||
{
|
||||
/// One call of DeleteObjects() cannot remove more than 1000 keys.
|
||||
size_t chunk_size_limit = 1000;
|
||||
|
@ -54,6 +54,7 @@ public:
|
||||
UInt64 getFileSize(const String & file_name) override;
|
||||
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
|
||||
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
||||
void removeFile(const String & file_name) override;
|
||||
void removeFiles(const Strings & file_names) override;
|
||||
|
||||
DataSourceDescription getDataSourceDescription() const override;
|
||||
@ -79,11 +80,14 @@ private:
|
||||
const Aws::S3::Model::HeadObjectResult & head,
|
||||
const std::optional<ObjectAttributes> & metadata = std::nullopt) const;
|
||||
|
||||
void removeFilesBatch(const Strings & file_names);
|
||||
|
||||
S3::URI s3_uri;
|
||||
std::shared_ptr<Aws::S3::S3Client> client;
|
||||
ReadSettings read_settings;
|
||||
S3Settings::RequestSettings request_settings;
|
||||
Poco::Logger * log;
|
||||
std::optional<bool> supports_batch_delete;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -506,7 +506,7 @@ void BackupImpl::removeLockFile()
|
||||
return; /// Internal backup must not remove the lock file (it's still used by the initiator).
|
||||
|
||||
if (checkLockFile(false))
|
||||
writer->removeFiles({lock_file_name});
|
||||
writer->removeFile(lock_file_name);
|
||||
}
|
||||
|
||||
Strings BackupImpl::listFiles(const String & directory, bool recursive) const
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <Core/Block.h>
|
||||
#include <Core/Protocol.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
#include <Access/AccessControl.h>
|
||||
|
||||
#include "config_version.h"
|
||||
|
||||
@ -43,6 +44,7 @@
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTCreateFunctionQuery.h>
|
||||
#include <Parsers/Access/ASTCreateUserQuery.h>
|
||||
#include <Parsers/ASTDropQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/ASTUseQuery.h>
|
||||
@ -1562,6 +1564,15 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
||||
updateLoggerLevel(logs_level_field->safeGet<String>());
|
||||
}
|
||||
|
||||
if (const auto * create_user_query = parsed_query->as<ASTCreateUserQuery>())
|
||||
{
|
||||
if (!create_user_query->attach && create_user_query->temporary_password_for_checks)
|
||||
{
|
||||
global_context->getAccessControl().checkPasswordComplexityRules(create_user_query->temporary_password_for_checks.value());
|
||||
create_user_query->temporary_password_for_checks.reset();
|
||||
}
|
||||
}
|
||||
|
||||
processed_rows = 0;
|
||||
written_first_block = false;
|
||||
progress_indication.resetProgress();
|
||||
|
@ -309,6 +309,21 @@ void Connection::receiveHello()
|
||||
readVarUInt(server_version_patch, *in);
|
||||
else
|
||||
server_version_patch = server_revision;
|
||||
|
||||
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES)
|
||||
{
|
||||
UInt64 rules_size;
|
||||
readVarUInt(rules_size, *in);
|
||||
password_complexity_rules.reserve(rules_size);
|
||||
|
||||
for (size_t i = 0; i < rules_size; ++i)
|
||||
{
|
||||
String original_pattern, exception_message;
|
||||
readStringBinary(original_pattern, *in);
|
||||
readStringBinary(exception_message, *in);
|
||||
password_complexity_rules.push_back({std::move(original_pattern), std::move(exception_message)});
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (packet_type == Protocol::Server::Exception)
|
||||
receiveException()->rethrow();
|
||||
|
@ -93,6 +93,8 @@ public:
|
||||
|
||||
Protocol::Compression getCompression() const { return compression; }
|
||||
|
||||
std::vector<std::pair<String, String>> getPasswordComplexityRules() const override { return password_complexity_rules; }
|
||||
|
||||
void sendQuery(
|
||||
const ConnectionTimeouts & timeouts,
|
||||
const String & query,
|
||||
@ -207,6 +209,8 @@ private:
|
||||
*/
|
||||
ThrottlerPtr throttler;
|
||||
|
||||
std::vector<std::pair<String, String>> password_complexity_rules;
|
||||
|
||||
/// From where to read query execution result.
|
||||
std::shared_ptr<ReadBuffer> maybe_compressed_in;
|
||||
std::unique_ptr<NativeReader> block_in;
|
||||
|
@ -82,6 +82,8 @@ public:
|
||||
|
||||
virtual const String & getDescription() const = 0;
|
||||
|
||||
virtual std::vector<std::pair<String, String>> getPasswordComplexityRules() const = 0;
|
||||
|
||||
/// If last flag is true, you need to call sendExternalTablesData after.
|
||||
virtual void sendQuery(
|
||||
const ConnectionTimeouts & timeouts,
|
||||
|
@ -91,6 +91,8 @@ public:
|
||||
|
||||
const String & getDescription() const override { return description; }
|
||||
|
||||
std::vector<std::pair<String, String>> getPasswordComplexityRules() const override { return {}; }
|
||||
|
||||
void sendQuery(
|
||||
const ConnectionTimeouts & timeouts,
|
||||
const String & query,
|
||||
|
@ -1204,11 +1204,6 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename DateOrTime>
|
||||
inline DateTimeComponents toDateTimeComponents(DateOrTime v) const
|
||||
{
|
||||
return toDateTimeComponents(lut[toLUTIndex(v)].date);
|
||||
}
|
||||
|
||||
inline UInt64 toNumYYYYMMDDhhmmss(Time t) const
|
||||
{
|
||||
|
@ -22,17 +22,29 @@ struct StringKey24
|
||||
inline StringRef ALWAYS_INLINE toStringRef(const StringKey8 & n)
|
||||
{
|
||||
assert(n != 0);
|
||||
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
return {reinterpret_cast<const char *>(&n), 8ul - (std::countr_zero(n) >> 3)};
|
||||
#else
|
||||
return {reinterpret_cast<const char *>(&n), 8ul - (std::countl_zero(n) >> 3)};
|
||||
#endif
|
||||
}
|
||||
inline StringRef ALWAYS_INLINE toStringRef(const StringKey16 & n)
|
||||
{
|
||||
assert(n.items[1] != 0);
|
||||
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
return {reinterpret_cast<const char *>(&n), 16ul - (std::countr_zero(n.items[1]) >> 3)};
|
||||
#else
|
||||
return {reinterpret_cast<const char *>(&n), 16ul - (std::countl_zero(n.items[1]) >> 3)};
|
||||
#endif
|
||||
}
|
||||
inline StringRef ALWAYS_INLINE toStringRef(const StringKey24 & n)
|
||||
{
|
||||
assert(n.c != 0);
|
||||
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
return {reinterpret_cast<const char *>(&n), 24ul - (std::countr_zero(n.c) >> 3)};
|
||||
#else
|
||||
return {reinterpret_cast<const char *>(&n), 24ul - (std::countl_zero(n.c) >> 3)};
|
||||
#endif
|
||||
}
|
||||
|
||||
struct StringHashTableHash
|
||||
@ -238,7 +250,6 @@ public:
|
||||
// 2. Use switch case extension to generate fast dispatching table
|
||||
// 3. Funcs are named callables that can be force_inlined
|
||||
//
|
||||
// NOTE: It relies on Little Endianness
|
||||
//
|
||||
// NOTE: It requires padded to 8 bytes keys (IOW you cannot pass
|
||||
// std::string here, but you can pass i.e. ColumnString::getDataAt()),
|
||||
@ -280,13 +291,19 @@ public:
|
||||
if ((reinterpret_cast<uintptr_t>(p) & 2048) == 0)
|
||||
{
|
||||
memcpy(&n[0], p, 8);
|
||||
n[0] &= -1ULL >> s;
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
n[0] &= -1ULL >> s;
|
||||
else
|
||||
n[0] &= -1ULL << s;
|
||||
}
|
||||
else
|
||||
{
|
||||
const char * lp = x.data + x.size - 8;
|
||||
memcpy(&n[0], lp, 8);
|
||||
n[0] >>= s;
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
n[0] >>= s;
|
||||
else
|
||||
n[0] <<= s;
|
||||
}
|
||||
keyHolderDiscardKey(key_holder);
|
||||
return func(self.m1, k8, hash(k8));
|
||||
@ -296,7 +313,10 @@ public:
|
||||
memcpy(&n[0], p, 8);
|
||||
const char * lp = x.data + x.size - 8;
|
||||
memcpy(&n[1], lp, 8);
|
||||
n[1] >>= s;
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
n[1] >>= s;
|
||||
else
|
||||
n[1] <<= s;
|
||||
keyHolderDiscardKey(key_holder);
|
||||
return func(self.m2, k16, hash(k16));
|
||||
}
|
||||
@ -305,7 +325,10 @@ public:
|
||||
memcpy(&n[0], p, 16);
|
||||
const char * lp = x.data + x.size - 8;
|
||||
memcpy(&n[2], lp, 8);
|
||||
n[2] >>= s;
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
n[2] >>= s;
|
||||
else
|
||||
n[2] <<= s;
|
||||
keyHolderDiscardKey(key_holder);
|
||||
return func(self.m3, k24, hash(k24));
|
||||
}
|
||||
|
@ -437,7 +437,7 @@ public:
|
||||
this->reserveForNextSize(std::forward<TAllocatorParams>(allocator_params)...);
|
||||
|
||||
new (t_end()) T(std::forward<U>(x));
|
||||
this->c_end += this->byte_size(1);
|
||||
this->c_end += sizeof(T);
|
||||
}
|
||||
|
||||
/** This method doesn't allow to pass parameters for Allocator,
|
||||
@ -450,12 +450,12 @@ public:
|
||||
this->reserveForNextSize();
|
||||
|
||||
new (t_end()) T(std::forward<Args>(args)...);
|
||||
this->c_end += this->byte_size(1);
|
||||
this->c_end += sizeof(T);
|
||||
}
|
||||
|
||||
void pop_back() /// NOLINT
|
||||
{
|
||||
this->c_end -= this->byte_size(1);
|
||||
this->c_end -= sizeof(T);
|
||||
}
|
||||
|
||||
/// Do not insert into the array a piece of itself. Because with the resize, the iterators on themselves can be invalidated.
|
||||
|
@ -156,6 +156,20 @@ inline bool isValidIdentifier(std::string_view str)
|
||||
&& !(str.size() == strlen("null") && 0 == strncasecmp(str.data(), "null", strlen("null")));
|
||||
}
|
||||
|
||||
|
||||
inline bool isNumberSeparator(bool is_start_of_block, bool is_hex, const char * pos, const char * end)
|
||||
{
|
||||
if (*pos != '_')
|
||||
return false;
|
||||
if (is_start_of_block && *pos == '_')
|
||||
return false; // e.g. _123, 12e_3
|
||||
if (pos + 1 < end && !(is_hex ? isHexDigit(pos[1]) : isNumericASCII(pos[1])))
|
||||
return false; // e.g. 1__2, 1_., 1_e, 1_p, 1_;
|
||||
if (pos + 1 == end)
|
||||
return false; // e.g. 12_
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Works assuming isAlphaASCII.
|
||||
inline char toLowerIfAlphaASCII(char c)
|
||||
{
|
||||
|
@ -52,7 +52,7 @@
|
||||
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
|
||||
/// later is just a number for server version (one number instead of commit SHA)
|
||||
/// for simplicity (sometimes it may be more convenient in some use cases).
|
||||
#define DBMS_TCP_PROTOCOL_VERSION 54460
|
||||
#define DBMS_TCP_PROTOCOL_VERSION 54461
|
||||
|
||||
#define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449
|
||||
|
||||
@ -68,3 +68,5 @@
|
||||
|
||||
/// The server will send query elapsed run time in the Progress packet.
|
||||
#define DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRESS 54460
|
||||
|
||||
#define DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES 54461
|
||||
|
@ -456,7 +456,8 @@ void buildConfigurationFromFunctionWithKeyValueArguments(
|
||||
/// It's not possible to have a function in a dictionary definition since 22.10,
|
||||
/// because query must be normalized on dictionary creation. It's possible only when we load old metadata.
|
||||
/// For debug builds allow it only during server startup to avoid crash in BC check in Stress Tests.
|
||||
assert(!Context::getGlobalContextInstance()->isServerCompletelyStarted());
|
||||
assert(Context::getGlobalContextInstance()->getApplicationType() != Context::ApplicationType::SERVER
|
||||
|| !Context::getGlobalContextInstance()->isServerCompletelyStarted());
|
||||
auto builder = FunctionFactory::instance().tryGet(func->name, context);
|
||||
auto function = builder->build({});
|
||||
function->prepare({});
|
||||
|
@ -1343,30 +1343,6 @@ struct ToYYYYMMDDhhmmssImpl
|
||||
using FactorTransform = ZeroTransform;
|
||||
};
|
||||
|
||||
struct ToDateTimeComponentsImpl
|
||||
{
|
||||
static constexpr auto name = "toDateTimeComponents";
|
||||
|
||||
static inline DateLUTImpl::DateTimeComponents execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toDateTimeComponents(t);
|
||||
}
|
||||
static inline DateLUTImpl::DateTimeComponents execute(UInt32 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toDateTimeComponents(static_cast<DateLUTImpl::Time>(t));
|
||||
}
|
||||
static inline DateLUTImpl::DateTimeComponents execute(Int32 d, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toDateTimeComponents(ExtendedDayNum(d));
|
||||
}
|
||||
static inline DateLUTImpl::DateTimeComponents execute(UInt16 d, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toDateTimeComponents(DayNum(d));
|
||||
}
|
||||
|
||||
using FactorTransform = ZeroTransform;
|
||||
};
|
||||
|
||||
|
||||
template <typename FromType, typename ToType, typename Transform, bool is_extended_result = false>
|
||||
struct Transformer
|
||||
|
@ -1,17 +0,0 @@
|
||||
#include <Functions/FunctionsDecimalArithmetics.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
REGISTER_FUNCTION(DivideDecimals)
|
||||
{
|
||||
factory.registerFunction<FunctionsDecimalArithmetics<DivideDecimalsImpl>>(Documentation(
|
||||
"Decimal division with given precision. Slower than simple `divide`, but has controlled precision and no sound overflows"));
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(MultiplyDecimals)
|
||||
{
|
||||
factory.registerFunction<FunctionsDecimalArithmetics<MultiplyDecimalsImpl>>(Documentation(
|
||||
"Decimal multiplication with given precision. Slower than simple `divide`, but has controlled precision and no sound overflows"));
|
||||
}
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include <Core/AccurateComparison.h>
|
||||
|
||||
@ -23,7 +24,6 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int ILLEGAL_DIVISION;
|
||||
}
|
||||
|
||||
|
||||
@ -140,91 +140,6 @@ struct DecimalOpHelpers
|
||||
};
|
||||
|
||||
|
||||
struct DivideDecimalsImpl
|
||||
{
|
||||
static constexpr auto name = "divideDecimal";
|
||||
|
||||
template <typename FirstType, typename SecondType>
|
||||
static inline Decimal256
|
||||
execute(FirstType a, SecondType b, UInt16 scale_a, UInt16 scale_b, UInt16 result_scale)
|
||||
{
|
||||
if (b.value == 0)
|
||||
throw DB::Exception("Division by zero", ErrorCodes::ILLEGAL_DIVISION);
|
||||
if (a.value == 0)
|
||||
return Decimal256(0);
|
||||
|
||||
Int256 sign_a = a.value < 0 ? -1 : 1;
|
||||
Int256 sign_b = b.value < 0 ? -1 : 1;
|
||||
|
||||
std::vector<UInt8> a_digits = DecimalOpHelpers::toDigits(a.value * sign_a);
|
||||
|
||||
while (scale_a < scale_b + result_scale)
|
||||
{
|
||||
a_digits.push_back(0);
|
||||
++scale_a;
|
||||
}
|
||||
|
||||
while (scale_a > scale_b + result_scale && !a_digits.empty())
|
||||
{
|
||||
a_digits.pop_back();
|
||||
--scale_a;
|
||||
}
|
||||
|
||||
if (a_digits.empty())
|
||||
return Decimal256(0);
|
||||
|
||||
std::vector<UInt8> divided = DecimalOpHelpers::divide(a_digits, b.value * sign_b);
|
||||
|
||||
if (divided.size() > DecimalUtils::max_precision<Decimal256>)
|
||||
throw DB::Exception("Numeric overflow: result bigger that Decimal256", ErrorCodes::DECIMAL_OVERFLOW);
|
||||
return Decimal256(sign_a * sign_b * DecimalOpHelpers::fromDigits(divided));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct MultiplyDecimalsImpl
|
||||
{
|
||||
static constexpr auto name = "multiplyDecimal";
|
||||
|
||||
template <typename FirstType, typename SecondType>
|
||||
static inline Decimal256
|
||||
execute(FirstType a, SecondType b, UInt16 scale_a, UInt16 scale_b, UInt16 result_scale)
|
||||
{
|
||||
if (a.value == 0 || b.value == 0)
|
||||
return Decimal256(0);
|
||||
|
||||
Int256 sign_a = a.value < 0 ? -1 : 1;
|
||||
Int256 sign_b = b.value < 0 ? -1 : 1;
|
||||
|
||||
std::vector<UInt8> a_digits = DecimalOpHelpers::toDigits(a.value * sign_a);
|
||||
std::vector<UInt8> b_digits = DecimalOpHelpers::toDigits(b.value * sign_b);
|
||||
|
||||
std::vector<UInt8> multiplied = DecimalOpHelpers::multiply(a_digits, b_digits);
|
||||
|
||||
UInt16 product_scale = scale_a + scale_b;
|
||||
while (product_scale < result_scale)
|
||||
{
|
||||
multiplied.push_back(0);
|
||||
++product_scale;
|
||||
}
|
||||
|
||||
while (product_scale > result_scale&& !multiplied.empty())
|
||||
{
|
||||
multiplied.pop_back();
|
||||
--product_scale;
|
||||
}
|
||||
|
||||
if (multiplied.empty())
|
||||
return Decimal256(0);
|
||||
|
||||
if (multiplied.size() > DecimalUtils::max_precision<Decimal256>)
|
||||
throw DB::Exception("Numeric overflow: result bigger that Decimal256", ErrorCodes::DECIMAL_OVERFLOW);
|
||||
|
||||
return Decimal256(sign_a * sign_b * DecimalOpHelpers::fromDigits(multiplied));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename ResultType, typename Transform>
|
||||
struct Processor
|
||||
{
|
||||
@ -388,11 +303,12 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
//long resolver to call proper templated func
|
||||
// long resolver to call proper templated func
|
||||
ColumnPtr resolveOverload(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) const
|
||||
{
|
||||
WhichDataType which_dividend(arguments[0].type.get());
|
||||
WhichDataType which_divisor(arguments[1].type.get());
|
||||
|
||||
if (which_dividend.isDecimal32())
|
||||
{
|
||||
using DividendType = DataTypeDecimal32;
|
||||
@ -454,4 +370,3 @@ private:
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
@ -48,10 +48,6 @@ public:
|
||||
: scale_multiplier(DecimalUtils::scaleMultiplier<DateTime64::NativeType>(scale_))
|
||||
{}
|
||||
|
||||
TransformDateTime64(DateTime64::NativeType scale_multiplier_ = 1) /// NOLINT(google-explicit-constructor)
|
||||
: scale_multiplier(scale_multiplier_)
|
||||
{}
|
||||
|
||||
template <typename ... Args>
|
||||
inline auto NO_SANITIZE_UNDEFINED execute(const DateTime64 & t, Args && ... args) const
|
||||
{
|
||||
@ -131,8 +127,6 @@ public:
|
||||
return wrapped_transform.executeExtendedResult(t, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
DateTime64::NativeType getScaleMultiplier() const { return scale_multiplier; }
|
||||
|
||||
private:
|
||||
DateTime64::NativeType scale_multiplier = 1;
|
||||
Transform wrapped_transform = {};
|
||||
|
178
src/Functions/concatWithSeparator.cpp
Normal file
178
src/Functions/concatWithSeparator.cpp
Normal file
@ -0,0 +1,178 @@
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnFixedString.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <base/map.h>
|
||||
#include <base/range.h>
|
||||
|
||||
#include "formatString.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename Name, bool is_injective>
|
||||
class ConcatWithSeparatorImpl : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = Name::name;
|
||||
explicit ConcatWithSeparatorImpl(ContextPtr context_) : context(context_) {}
|
||||
|
||||
static FunctionPtr create(ContextPtr context) { return std::make_shared<ConcatWithSeparatorImpl>(context); }
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
bool isVariadic() const override { return true; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
|
||||
bool isInjective(const ColumnsWithTypeAndName &) const override { return is_injective; }
|
||||
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
if (arguments.empty())
|
||||
throw Exception(
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Number of arguments for function {} doesn't match: passed {}, should be at least 1",
|
||||
getName(),
|
||||
arguments.size());
|
||||
|
||||
for (const auto arg_idx : collections::range(0, arguments.size()))
|
||||
{
|
||||
const auto * arg = arguments[arg_idx].get();
|
||||
if (!isStringOrFixedString(arg))
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of argument {} of function {}",
|
||||
arg->getName(),
|
||||
arg_idx + 1,
|
||||
getName());
|
||||
}
|
||||
|
||||
return std::make_shared<DataTypeString>();
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||
{
|
||||
assert(!arguments.empty());
|
||||
if (arguments.size() == 1)
|
||||
return result_type->createColumnConstWithDefaultValue(input_rows_count);
|
||||
|
||||
auto c_res = ColumnString::create();
|
||||
c_res->reserve(input_rows_count);
|
||||
const ColumnConst * col_sep = checkAndGetColumnConstStringOrFixedString(arguments[0].column.get());
|
||||
if (!col_sep)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal column {} of first argument of function {}. Must be a constant String.",
|
||||
arguments[0].column->getName(),
|
||||
getName());
|
||||
String sep_str = col_sep->getValue<String>();
|
||||
|
||||
const size_t num_exprs = arguments.size() - 1;
|
||||
const size_t num_args = 2 * num_exprs - 1;
|
||||
|
||||
std::vector<const ColumnString::Chars *> data(num_args);
|
||||
std::vector<const ColumnString::Offsets *> offsets(num_args);
|
||||
std::vector<size_t> fixed_string_sizes(num_args);
|
||||
std::vector<std::optional<String>> constant_strings(num_args);
|
||||
|
||||
bool has_column_string = false;
|
||||
bool has_column_fixed_string = false;
|
||||
|
||||
for (size_t i = 0; i < num_exprs; ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
constant_strings[2 * i - 1] = sep_str;
|
||||
|
||||
const ColumnPtr & column = arguments[i + 1].column;
|
||||
if (const ColumnString * col = checkAndGetColumn<ColumnString>(column.get()))
|
||||
{
|
||||
has_column_string = true;
|
||||
data[2 * i] = &col->getChars();
|
||||
offsets[2 * i] = &col->getOffsets();
|
||||
}
|
||||
else if (const ColumnFixedString * fixed_col = checkAndGetColumn<ColumnFixedString>(column.get()))
|
||||
{
|
||||
has_column_fixed_string = true;
|
||||
data[2 * i] = &fixed_col->getChars();
|
||||
fixed_string_sizes[2 * i] = fixed_col->getN();
|
||||
}
|
||||
else if (const ColumnConst * const_col = checkAndGetColumnConstStringOrFixedString(column.get()))
|
||||
constant_strings[2 * i] = const_col->getValue<String>();
|
||||
else
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal column {} of argument of function {}", column->getName(), getName());
|
||||
}
|
||||
|
||||
String pattern;
|
||||
pattern.reserve(num_args * 2);
|
||||
for (size_t i = 0; i < num_args; ++i)
|
||||
pattern += "{}";
|
||||
|
||||
FormatImpl::formatExecute(
|
||||
has_column_string,
|
||||
has_column_fixed_string,
|
||||
std::move(pattern),
|
||||
data,
|
||||
offsets,
|
||||
fixed_string_sizes,
|
||||
constant_strings,
|
||||
c_res->getChars(),
|
||||
c_res->getOffsets(),
|
||||
input_rows_count);
|
||||
return std::move(c_res);
|
||||
}
|
||||
|
||||
private:
|
||||
ContextWeakPtr context;
|
||||
};
|
||||
|
||||
struct NameConcatWithSeparator
|
||||
{
|
||||
static constexpr auto name = "concatWithSeparator";
|
||||
};
|
||||
struct NameConcatWithSeparatorAssumeInjective
|
||||
{
|
||||
static constexpr auto name = "concatWithSeparatorAssumeInjective";
|
||||
};
|
||||
|
||||
using FunctionConcatWithSeparator = ConcatWithSeparatorImpl<NameConcatWithSeparator, false>;
|
||||
using FunctionConcatWithSeparatorAssumeInjective = ConcatWithSeparatorImpl<NameConcatWithSeparatorAssumeInjective, true>;
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(ConcatWithSeparator)
|
||||
{
|
||||
factory.registerFunction<FunctionConcatWithSeparator>({
|
||||
R"(
|
||||
Returns the concatenation strings separated by string separator. Syntax: concatWithSeparator(sep, expr1, expr2, expr3...)
|
||||
)",
|
||||
Documentation::Examples{{"concatWithSeparator", "SELECT concatWithSeparator('a', '1', '2', '3')"}},
|
||||
Documentation::Categories{"String"}});
|
||||
|
||||
factory.registerFunction<FunctionConcatWithSeparatorAssumeInjective>({
|
||||
R"(
|
||||
Same as concatWithSeparator, the difference is that you need to ensure that concatWithSeparator(sep, expr1, expr2, expr3...) → result is injective, it will be used for optimization of GROUP BY.
|
||||
|
||||
The function is named “injective” if it always returns different result for different values of arguments. In other words: different arguments never yield identical result.
|
||||
)",
|
||||
Documentation::Examples{{"concatWithSeparatorAssumeInjective", "SELECT concatWithSeparatorAssumeInjective('a', '1', '2', '3')"}},
|
||||
Documentation::Categories{"String"}});
|
||||
|
||||
/// Compatibility with Spark:
|
||||
factory.registerAlias("concat_ws", "concatWithSeparator", FunctionFactory::CaseInsensitive);
|
||||
}
|
||||
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Common/IntervalKind.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsDateTime.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
@ -35,7 +34,6 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
template <bool is_diff>
|
||||
class DateDiffImpl
|
||||
{
|
||||
public:
|
||||
@ -167,92 +165,8 @@ public:
|
||||
template <typename TransformX, typename TransformY, typename T1, typename T2>
|
||||
Int64 calculate(const TransformX & transform_x, const TransformY & transform_y, T1 x, T2 y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y) const
|
||||
{
|
||||
if constexpr (is_diff)
|
||||
return static_cast<Int64>(transform_y.execute(y, timezone_y))
|
||||
return static_cast<Int64>(transform_y.execute(y, timezone_y))
|
||||
- static_cast<Int64>(transform_x.execute(x, timezone_x));
|
||||
else
|
||||
{
|
||||
auto res = static_cast<Int64>(transform_y.execute(y, timezone_y))
|
||||
- static_cast<Int64>(transform_x.execute(x, timezone_x));
|
||||
DateLUTImpl::DateTimeComponents a_comp;
|
||||
DateLUTImpl::DateTimeComponents b_comp;
|
||||
Int64 adjust_value;
|
||||
auto x_seconds = TransformDateTime64<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
|
||||
auto y_seconds = TransformDateTime64<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
|
||||
if (x_seconds <= y_seconds)
|
||||
{
|
||||
a_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
|
||||
b_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
|
||||
adjust_value = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
a_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
|
||||
b_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
|
||||
adjust_value = 1;
|
||||
}
|
||||
|
||||
if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeYearNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
if ((a_comp.date.month > b_comp.date.month)
|
||||
|| ((a_comp.date.month == b_comp.date.month) && ((a_comp.date.day > b_comp.date.day)
|
||||
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|
||||
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
|
||||
)))))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
auto x_month_in_quarter = (a_comp.date.month - 1) % 3;
|
||||
auto y_month_in_quarter = (b_comp.date.month - 1) % 3;
|
||||
if ((x_month_in_quarter > y_month_in_quarter)
|
||||
|| ((x_month_in_quarter == y_month_in_quarter) && ((a_comp.date.day > b_comp.date.day)
|
||||
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|
||||
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
|
||||
)))))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeMonthNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
if ((a_comp.date.day > b_comp.date.day)
|
||||
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|
||||
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
|
||||
)))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeWeekNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
auto x_day_of_week = TransformDateTime64<ToDayOfWeekImpl>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
|
||||
auto y_day_of_week = TransformDateTime64<ToDayOfWeekImpl>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
|
||||
if ((x_day_of_week > y_day_of_week)
|
||||
|| ((x_day_of_week == y_day_of_week) && (a_comp.time.hour > b_comp.time.hour))
|
||||
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeDayNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
if ((a_comp.time.hour > b_comp.time.hour)
|
||||
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeHourNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
if ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
if (a_comp.time.second > b_comp.time.second)
|
||||
res += adjust_value;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -279,8 +193,7 @@ private:
|
||||
|
||||
|
||||
/** dateDiff('unit', t1, t2, [timezone])
|
||||
* age('unit', t1, t2, [timezone])
|
||||
* t1 and t2 can be Date, Date32, DateTime or DateTime64
|
||||
* t1 and t2 can be Date or DateTime
|
||||
*
|
||||
* If timezone is specified, it applied to both arguments.
|
||||
* If not, timezones from datatypes t1 and t2 are used.
|
||||
@ -288,11 +201,10 @@ private:
|
||||
*
|
||||
* Timezone matters because days can have different length.
|
||||
*/
|
||||
template <bool is_relative>
|
||||
class FunctionDateDiff : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = is_relative ? "dateDiff" : "age";
|
||||
static constexpr auto name = "dateDiff";
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionDateDiff>(); }
|
||||
|
||||
String getName() const override
|
||||
@ -358,21 +270,21 @@ public:
|
||||
const auto & timezone_y = extractTimeZoneFromFunctionArguments(arguments, 3, 2);
|
||||
|
||||
if (unit == "year" || unit == "yy" || unit == "yyyy")
|
||||
impl.template dispatchForColumns<ToRelativeYearNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
impl.dispatchForColumns<ToRelativeYearNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "quarter" || unit == "qq" || unit == "q")
|
||||
impl.template dispatchForColumns<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
impl.dispatchForColumns<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "month" || unit == "mm" || unit == "m")
|
||||
impl.template dispatchForColumns<ToRelativeMonthNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
impl.dispatchForColumns<ToRelativeMonthNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "week" || unit == "wk" || unit == "ww")
|
||||
impl.template dispatchForColumns<ToRelativeWeekNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
impl.dispatchForColumns<ToRelativeWeekNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "day" || unit == "dd" || unit == "d")
|
||||
impl.template dispatchForColumns<ToRelativeDayNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
impl.dispatchForColumns<ToRelativeDayNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "hour" || unit == "hh" || unit == "h")
|
||||
impl.template dispatchForColumns<ToRelativeHourNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
impl.dispatchForColumns<ToRelativeHourNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "minute" || unit == "mi" || unit == "n")
|
||||
impl.template dispatchForColumns<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
impl.dispatchForColumns<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "second" || unit == "ss" || unit == "s")
|
||||
impl.template dispatchForColumns<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
impl.dispatchForColumns<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Function {} does not support '{}' unit", getName(), unit);
|
||||
@ -380,7 +292,7 @@ public:
|
||||
return res;
|
||||
}
|
||||
private:
|
||||
DateDiffImpl<is_relative> impl{name};
|
||||
DateDiffImpl impl{name};
|
||||
};
|
||||
|
||||
|
||||
@ -440,14 +352,14 @@ public:
|
||||
return res;
|
||||
}
|
||||
private:
|
||||
DateDiffImpl<true> impl{name};
|
||||
DateDiffImpl impl{name};
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(DateDiff)
|
||||
{
|
||||
factory.registerFunction<FunctionDateDiff<true>>({}, FunctionFactory::CaseInsensitive);
|
||||
factory.registerFunction<FunctionDateDiff>({}, FunctionFactory::CaseInsensitive);
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(TimeDiff)
|
||||
@ -464,9 +376,4 @@ Example:
|
||||
Documentation::Categories{"Dates and Times"}}, FunctionFactory::CaseInsensitive);
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(Age)
|
||||
{
|
||||
factory.registerFunction<FunctionDateDiff<false>>({}, FunctionFactory::CaseInsensitive);
|
||||
}
|
||||
|
||||
}
|
||||
|
126
src/Functions/divideDecimal.cpp
Normal file
126
src/Functions/divideDecimal.cpp
Normal file
@ -0,0 +1,126 @@
|
||||
#include <Functions/FunctionsDecimalArithmetics.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int DECIMAL_OVERFLOW;
|
||||
extern const int ILLEGAL_DIVISION;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
struct DivideDecimalsImpl
|
||||
{
|
||||
static constexpr auto name = "divideDecimal";
|
||||
|
||||
template <typename FirstType, typename SecondType>
|
||||
static inline Decimal256
|
||||
execute(FirstType a, SecondType b, UInt16 scale_a, UInt16 scale_b, UInt16 result_scale)
|
||||
{
|
||||
if (b.value == 0)
|
||||
throw DB::Exception("Division by zero", ErrorCodes::ILLEGAL_DIVISION);
|
||||
if (a.value == 0)
|
||||
return Decimal256(0);
|
||||
|
||||
Int256 sign_a = a.value < 0 ? -1 : 1;
|
||||
Int256 sign_b = b.value < 0 ? -1 : 1;
|
||||
|
||||
std::vector<UInt8> a_digits = DecimalOpHelpers::toDigits(a.value * sign_a);
|
||||
|
||||
while (scale_a < scale_b + result_scale)
|
||||
{
|
||||
a_digits.push_back(0);
|
||||
++scale_a;
|
||||
}
|
||||
|
||||
while (scale_a > scale_b + result_scale && !a_digits.empty())
|
||||
{
|
||||
a_digits.pop_back();
|
||||
--scale_a;
|
||||
}
|
||||
|
||||
if (a_digits.empty())
|
||||
return Decimal256(0);
|
||||
|
||||
std::vector<UInt8> divided = DecimalOpHelpers::divide(a_digits, b.value * sign_b);
|
||||
|
||||
if (divided.size() > DecimalUtils::max_precision<Decimal256>)
|
||||
throw DB::Exception("Numeric overflow: result bigger that Decimal256", ErrorCodes::DECIMAL_OVERFLOW);
|
||||
return Decimal256(sign_a * sign_b * DecimalOpHelpers::fromDigits(divided));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(DivideDecimals)
|
||||
{
|
||||
factory.registerFunction<FunctionsDecimalArithmetics<DivideDecimalsImpl>>(Documentation(
|
||||
R"(
|
||||
Performs division on two decimals. Result value will be of type [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||
Result scale can be explicitly specified by `result_scale` argument (const Integer in range `[0, 76]`). If not specified, the result scale is the max scale of given arguments.
|
||||
|
||||
:::note
|
||||
These function work significantly slower than usual `divide`.
|
||||
In case you don't really need controlled precision and/or need fast computation, consider using [divide](#divide).
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
divideDecimal(a, b[, result_scale])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `a` — First value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `b` — Second value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `result_scale` — Scale of result: [Int/UInt](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The result of division with given scale.
|
||||
|
||||
Type: [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Example**
|
||||
|
||||
```text
|
||||
┌─divideDecimal(toDecimal256(-12, 0), toDecimal32(2.1, 1), 10)─┐
|
||||
│ -5.7142857142 │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Difference from regular division:**
|
||||
```sql
|
||||
SELECT toDecimal64(-12, 1) / toDecimal32(2.1, 1);
|
||||
SELECT toDecimal64(-12, 1) as a, toDecimal32(2.1, 1) as b, divideDecimal(a, b, 1), divideDecimal(a, b, 5);
|
||||
```
|
||||
|
||||
```text
|
||||
┌─divide(toDecimal64(-12, 1), toDecimal32(2.1, 1))─┐
|
||||
│ -5.7 │
|
||||
└──────────────────────────────────────────────────┘
|
||||
┌───a─┬───b─┬─divideDecimal(toDecimal64(-12, 1), toDecimal32(2.1, 1), 1)─┬─divideDecimal(toDecimal64(-12, 1), toDecimal32(2.1, 1), 5)─┐
|
||||
│ -12 │ 2.1 │ -5.7 │ -5.71428 │
|
||||
└─────┴─────┴────────────────────────────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT toDecimal64(-12, 0) / toDecimal32(2.1, 1);
|
||||
SELECT toDecimal64(-12, 0) as a, toDecimal32(2.1, 1) as b, divideDecimal(a, b, 1), divideDecimal(a, b, 5);
|
||||
```
|
||||
|
||||
```text
|
||||
DB::Exception: Decimal result's scale is less than argument's one: While processing toDecimal64(-12, 0) / toDecimal32(2.1, 1). (ARGUMENT_OUT_OF_BOUND)
|
||||
┌───a─┬───b─┬─divideDecimal(toDecimal64(-12, 0), toDecimal32(2.1, 1), 1)─┬─divideDecimal(toDecimal64(-12, 0), toDecimal32(2.1, 1), 5)─┐
|
||||
│ -12 │ 2.1 │ -5.7 │ -5.71428 │
|
||||
└─────┴─────┴────────────────────────────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
)"));
|
||||
}
|
||||
|
||||
}
|
134
src/Functions/multiplyDecimal.cpp
Normal file
134
src/Functions/multiplyDecimal.cpp
Normal file
@ -0,0 +1,134 @@
|
||||
#include <Functions/FunctionsDecimalArithmetics.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int DECIMAL_OVERFLOW;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
struct MultiplyDecimalsImpl
|
||||
{
|
||||
static constexpr auto name = "multiplyDecimal";
|
||||
|
||||
template <typename FirstType, typename SecondType>
|
||||
static inline Decimal256
|
||||
execute(FirstType a, SecondType b, UInt16 scale_a, UInt16 scale_b, UInt16 result_scale)
|
||||
{
|
||||
if (a.value == 0 || b.value == 0)
|
||||
return Decimal256(0);
|
||||
|
||||
Int256 sign_a = a.value < 0 ? -1 : 1;
|
||||
Int256 sign_b = b.value < 0 ? -1 : 1;
|
||||
|
||||
std::vector<UInt8> a_digits = DecimalOpHelpers::toDigits(a.value * sign_a);
|
||||
std::vector<UInt8> b_digits = DecimalOpHelpers::toDigits(b.value * sign_b);
|
||||
|
||||
std::vector<UInt8> multiplied = DecimalOpHelpers::multiply(a_digits, b_digits);
|
||||
|
||||
UInt16 product_scale = scale_a + scale_b;
|
||||
while (product_scale < result_scale)
|
||||
{
|
||||
multiplied.push_back(0);
|
||||
++product_scale;
|
||||
}
|
||||
|
||||
while (product_scale > result_scale&& !multiplied.empty())
|
||||
{
|
||||
multiplied.pop_back();
|
||||
--product_scale;
|
||||
}
|
||||
|
||||
if (multiplied.empty())
|
||||
return Decimal256(0);
|
||||
|
||||
if (multiplied.size() > DecimalUtils::max_precision<Decimal256>)
|
||||
throw DB::Exception("Numeric overflow: result bigger that Decimal256", ErrorCodes::DECIMAL_OVERFLOW);
|
||||
|
||||
return Decimal256(sign_a * sign_b * DecimalOpHelpers::fromDigits(multiplied));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(MultiplyDecimals)
|
||||
{
|
||||
factory.registerFunction<FunctionsDecimalArithmetics<MultiplyDecimalsImpl>>(Documentation(
|
||||
R"(
|
||||
Performs multiplication on two decimals. Result value will be of type [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||
Result scale can be explicitly specified by `result_scale` argument (const Integer in range `[0, 76]`). If not specified, the result scale is the max scale of given arguments.
|
||||
|
||||
:::note
|
||||
These functions work significantly slower than usual `multiply`.
|
||||
In case you don't really need controlled precision and/or need fast computation, consider using [multiply](#multiply)
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
multiplyDecimal(a, b[, result_scale])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `a` — First value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `b` — Second value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `result_scale` — Scale of result: [Int/UInt](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The result of multiplication with given scale.
|
||||
|
||||
Type: [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Example**
|
||||
|
||||
```text
|
||||
┌─multiplyDecimal(toDecimal256(-12, 0), toDecimal32(-2.1, 1), 1)─┐
|
||||
│ 25.2 │
|
||||
└────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Difference from regular multiplication:**
|
||||
```sql
|
||||
SELECT toDecimal64(-12.647, 3) * toDecimal32(2.1239, 4);
|
||||
SELECT toDecimal64(-12.647, 3) as a, toDecimal32(2.1239, 4) as b, multiplyDecimal(a, b);
|
||||
```
|
||||
|
||||
```text
|
||||
┌─multiply(toDecimal64(-12.647, 3), toDecimal32(2.1239, 4))─┐
|
||||
│ -26.8609633 │
|
||||
└───────────────────────────────────────────────────────────┘
|
||||
┌─multiplyDecimal(toDecimal64(-12.647, 3), toDecimal32(2.1239, 4))─┐
|
||||
│ -26.8609 │
|
||||
└──────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toDecimal64(-12.647987876, 9) AS a,
|
||||
toDecimal64(123.967645643, 9) AS b,
|
||||
multiplyDecimal(a, b);
|
||||
SELECT
|
||||
toDecimal64(-12.647987876, 9) AS a,
|
||||
toDecimal64(123.967645643, 9) AS b,
|
||||
a * b;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─────────────a─┬─────────────b─┬─multiplyDecimal(toDecimal64(-12.647987876, 9), toDecimal64(123.967645643, 9))─┐
|
||||
│ -12.647987876 │ 123.967645643 │ -1567.941279108 │
|
||||
└───────────────┴───────────────┴───────────────────────────────────────────────────────────────────────────────┘
|
||||
Received exception from server (version 22.11.1):
|
||||
Code: 407. DB::Exception: Received from localhost:9000. DB::Exception: Decimal math overflow: While processing toDecimal64(-12.647987876, 9) AS a, toDecimal64(123.967645643, 9) AS b, a * b. (DECIMAL_OVERFLOW)
|
||||
```
|
||||
)"));
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -250,7 +250,7 @@ size_t ReadBufferFromS3::getFileSize()
|
||||
if (file_size)
|
||||
return *file_size;
|
||||
|
||||
auto object_size = S3::getObjectSize(client_ptr, bucket, key, version_id, true, read_settings.for_object_storage);
|
||||
auto object_size = S3::getObjectSize(*client_ptr, bucket, key, version_id, true, read_settings.for_object_storage);
|
||||
|
||||
file_size = object_size;
|
||||
return *file_size;
|
||||
|
@ -852,7 +852,7 @@ namespace S3
|
||||
}
|
||||
|
||||
|
||||
S3::ObjectInfo getObjectInfo(std::shared_ptr<const Aws::S3::S3Client> client_ptr, const String & bucket, const String & key, const String & version_id, bool throw_on_error, bool for_disk_s3)
|
||||
S3::ObjectInfo getObjectInfo(const Aws::S3::S3Client & client, const String & bucket, const String & key, const String & version_id, bool throw_on_error, bool for_disk_s3)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::S3HeadObject);
|
||||
if (for_disk_s3)
|
||||
@ -865,7 +865,7 @@ namespace S3
|
||||
if (!version_id.empty())
|
||||
req.SetVersionId(version_id);
|
||||
|
||||
Aws::S3::Model::HeadObjectOutcome outcome = client_ptr->HeadObject(req);
|
||||
Aws::S3::Model::HeadObjectOutcome outcome = client.HeadObject(req);
|
||||
|
||||
if (outcome.IsSuccess())
|
||||
{
|
||||
@ -879,9 +879,9 @@ namespace S3
|
||||
return {};
|
||||
}
|
||||
|
||||
size_t getObjectSize(std::shared_ptr<const Aws::S3::S3Client> client_ptr, const String & bucket, const String & key, const String & version_id, bool throw_on_error, bool for_disk_s3)
|
||||
size_t getObjectSize(const Aws::S3::S3Client & client, const String & bucket, const String & key, const String & version_id, bool throw_on_error, bool for_disk_s3)
|
||||
{
|
||||
return getObjectInfo(client_ptr, bucket, key, version_id, throw_on_error, for_disk_s3).size;
|
||||
return getObjectInfo(client, bucket, key, version_id, throw_on_error, for_disk_s3).size;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -130,9 +130,9 @@ struct ObjectInfo
|
||||
time_t last_modification_time = 0;
|
||||
};
|
||||
|
||||
S3::ObjectInfo getObjectInfo(std::shared_ptr<const Aws::S3::S3Client> client_ptr, const String & bucket, const String & key, const String & version_id, bool throw_on_error, bool for_disk_s3);
|
||||
S3::ObjectInfo getObjectInfo(const Aws::S3::S3Client & client, const String & bucket, const String & key, const String & version_id, bool throw_on_error, bool for_disk_s3);
|
||||
|
||||
size_t getObjectSize(std::shared_ptr<const Aws::S3::S3Client> client_ptr, const String & bucket, const String & key, const String & version_id, bool throw_on_error, bool for_disk_s3);
|
||||
size_t getObjectSize(const Aws::S3::S3Client & client, const String & bucket, const String & key, const String & version_id, bool throw_on_error, bool for_disk_s3);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
@ -108,6 +108,12 @@ BlockIO InterpreterCreateUserQuery::execute()
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Authentication type NO_PASSWORD must be explicitly specified, check the setting allow_implicit_no_password in the server configuration");
|
||||
|
||||
if (!query.attach && query.temporary_password_for_checks)
|
||||
{
|
||||
access_control.checkPasswordComplexityRules(query.temporary_password_for_checks.value());
|
||||
query.temporary_password_for_checks.reset();
|
||||
}
|
||||
|
||||
std::optional<RolesOrUsersSet> default_roles_from_query;
|
||||
if (query.default_roles)
|
||||
{
|
||||
|
@ -46,6 +46,8 @@ public:
|
||||
|
||||
std::optional<AuthenticationData> auth_data;
|
||||
|
||||
mutable std::optional<String> temporary_password_for_checks;
|
||||
|
||||
std::optional<AllowedClientHosts> hosts;
|
||||
std::optional<AllowedClientHosts> add_hosts;
|
||||
std::optional<AllowedClientHosts> remove_hosts;
|
||||
|
@ -51,7 +51,7 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
bool parseAuthenticationData(IParserBase::Pos & pos, Expected & expected, AuthenticationData & auth_data)
|
||||
bool parseAuthenticationData(IParserBase::Pos & pos, Expected & expected, AuthenticationData & auth_data, std::optional<String> & temporary_password_for_checks)
|
||||
{
|
||||
return IParserBase::wrapParseImpl(pos, [&]
|
||||
{
|
||||
@ -165,6 +165,10 @@ namespace
|
||||
common_names.insert(ast_child->as<const ASTLiteral &>().value.safeGet<String>());
|
||||
}
|
||||
|
||||
/// Save password separately for future complexity rules check
|
||||
if (expect_password)
|
||||
temporary_password_for_checks = value;
|
||||
|
||||
auth_data = AuthenticationData{*type};
|
||||
if (auth_data.getType() == AuthenticationType::SHA256_PASSWORD)
|
||||
{
|
||||
@ -438,6 +442,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
|
||||
std::optional<String> new_name;
|
||||
std::optional<AuthenticationData> auth_data;
|
||||
std::optional<String> temporary_password_for_checks;
|
||||
std::optional<AllowedClientHosts> hosts;
|
||||
std::optional<AllowedClientHosts> add_hosts;
|
||||
std::optional<AllowedClientHosts> remove_hosts;
|
||||
@ -452,9 +457,11 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
if (!auth_data)
|
||||
{
|
||||
AuthenticationData new_auth_data;
|
||||
if (parseAuthenticationData(pos, expected, new_auth_data))
|
||||
std::optional<String> new_temporary_password_for_checks;
|
||||
if (parseAuthenticationData(pos, expected, new_auth_data, new_temporary_password_for_checks))
|
||||
{
|
||||
auth_data = std::move(new_auth_data);
|
||||
temporary_password_for_checks = std::move(new_temporary_password_for_checks);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -539,6 +546,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
query->names = std::move(names);
|
||||
query->new_name = std::move(new_name);
|
||||
query->auth_data = std::move(auth_data);
|
||||
query->temporary_password_for_checks = std::move(temporary_password_for_checks);
|
||||
query->hosts = std::move(hosts);
|
||||
query->add_hosts = std::move(add_hosts);
|
||||
query->remove_hosts = std::move(remove_hosts);
|
||||
|
@ -830,21 +830,65 @@ bool ParserNumber::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
if (!pos.isValid())
|
||||
return false;
|
||||
|
||||
/** Maximum length of number. 319 symbols is enough to write maximum double in decimal form.
|
||||
* Copy is needed to use strto* functions, which require 0-terminated string.
|
||||
*/
|
||||
static constexpr size_t MAX_LENGTH_OF_NUMBER = 319;
|
||||
auto try_read_float = [&](const char * it, const char * end)
|
||||
{
|
||||
char * str_end;
|
||||
errno = 0; /// Functions strto* don't clear errno.
|
||||
Float64 float_value = std::strtod(it, &str_end);
|
||||
if (str_end == end && errno != ERANGE)
|
||||
{
|
||||
if (float_value < 0)
|
||||
throw Exception("Logical error: token number cannot begin with minus, but parsed float number is less than zero.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (pos->size() > MAX_LENGTH_OF_NUMBER)
|
||||
if (negative)
|
||||
float_value = -float_value;
|
||||
|
||||
res = float_value;
|
||||
|
||||
auto literal = std::make_shared<ASTLiteral>(res);
|
||||
literal->begin = literal_begin;
|
||||
literal->end = ++pos;
|
||||
node = literal;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
expected.add(pos, "number");
|
||||
return false;
|
||||
};
|
||||
|
||||
/// NaN and Inf
|
||||
if (pos->type == TokenType::BareWord)
|
||||
{
|
||||
return try_read_float(pos->begin, pos->end);
|
||||
}
|
||||
|
||||
if (pos->type != TokenType::Number)
|
||||
{
|
||||
expected.add(pos, "number");
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Maximum length of number. 319 symbols is enough to write maximum double in decimal form.
|
||||
* Copy is needed to use strto* functions, which require 0-terminated string.
|
||||
*/
|
||||
static constexpr size_t MAX_LENGTH_OF_NUMBER = 319;
|
||||
|
||||
char buf[MAX_LENGTH_OF_NUMBER + 1];
|
||||
|
||||
size_t size = pos->size();
|
||||
memcpy(buf, pos->begin, size);
|
||||
size_t buf_size = 0;
|
||||
for (const auto * it = pos->begin; it != pos->end; ++it)
|
||||
{
|
||||
if (*it != '_')
|
||||
buf[buf_size++] = *it;
|
||||
if (unlikely(buf_size > MAX_LENGTH_OF_NUMBER))
|
||||
{
|
||||
expected.add(pos, "number");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
size_t size = buf_size;
|
||||
buf[size] = 0;
|
||||
char * start_pos = buf;
|
||||
|
||||
@ -915,29 +959,7 @@ bool ParserNumber::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
return true;
|
||||
}
|
||||
|
||||
char * pos_double = buf;
|
||||
errno = 0; /// Functions strto* don't clear errno.
|
||||
Float64 float_value = std::strtod(buf, &pos_double);
|
||||
if (pos_double == buf + pos->size() && errno != ERANGE)
|
||||
{
|
||||
if (float_value < 0)
|
||||
throw Exception("Logical error: token number cannot begin with minus, but parsed float number is less than zero.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (negative)
|
||||
float_value = -float_value;
|
||||
|
||||
res = float_value;
|
||||
|
||||
auto literal = std::make_shared<ASTLiteral>(res);
|
||||
literal->begin = literal_begin;
|
||||
literal->end = ++pos;
|
||||
node = literal;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
expected.add(pos, "number");
|
||||
return false;
|
||||
return try_read_float(buf, buf + buf_size);
|
||||
}
|
||||
|
||||
|
||||
|
@ -105,44 +105,71 @@ Token Lexer::nextTokenImpl()
|
||||
if (prev_significant_token_type == TokenType::Dot)
|
||||
{
|
||||
++pos;
|
||||
while (pos < end && isNumericASCII(*pos))
|
||||
while (pos < end && (isNumericASCII(*pos) || isNumberSeparator(false, false, pos, end)))
|
||||
++pos;
|
||||
}
|
||||
else
|
||||
{
|
||||
bool start_of_block = false;
|
||||
/// 0x, 0b
|
||||
bool hex = false;
|
||||
if (pos + 2 < end && *pos == '0' && (pos[1] == 'x' || pos[1] == 'b' || pos[1] == 'X' || pos[1] == 'B'))
|
||||
{
|
||||
bool is_valid = false;
|
||||
if (pos[1] == 'x' || pos[1] == 'X')
|
||||
hex = true;
|
||||
pos += 2;
|
||||
{
|
||||
if (isHexDigit(pos[2]))
|
||||
{
|
||||
hex = true;
|
||||
is_valid = true; // hex
|
||||
}
|
||||
}
|
||||
else if (pos[2] == '0' || pos[2] == '1')
|
||||
is_valid = true; // bin
|
||||
if (is_valid)
|
||||
{
|
||||
pos += 2;
|
||||
start_of_block = true;
|
||||
}
|
||||
else
|
||||
++pos; // consume the leading zero - could be an identifier
|
||||
}
|
||||
else
|
||||
++pos;
|
||||
|
||||
while (pos < end && (hex ? isHexDigit(*pos) : isNumericASCII(*pos)))
|
||||
while (pos < end && ((hex ? isHexDigit(*pos) : isNumericASCII(*pos)) || isNumberSeparator(start_of_block, hex, pos, end)))
|
||||
{
|
||||
++pos;
|
||||
start_of_block = false;
|
||||
}
|
||||
|
||||
/// decimal point
|
||||
if (pos < end && *pos == '.')
|
||||
{
|
||||
start_of_block = true;
|
||||
++pos;
|
||||
while (pos < end && (hex ? isHexDigit(*pos) : isNumericASCII(*pos)))
|
||||
while (pos < end && ((hex ? isHexDigit(*pos) : isNumericASCII(*pos)) || isNumberSeparator(start_of_block, hex, pos, end)))
|
||||
{
|
||||
++pos;
|
||||
start_of_block = false;
|
||||
}
|
||||
}
|
||||
|
||||
/// exponentiation (base 10 or base 2)
|
||||
if (pos + 1 < end && (hex ? (*pos == 'p' || *pos == 'P') : (*pos == 'e' || *pos == 'E')))
|
||||
{
|
||||
start_of_block = true;
|
||||
++pos;
|
||||
|
||||
/// sign of exponent. It is always decimal.
|
||||
if (pos + 1 < end && (*pos == '-' || *pos == '+'))
|
||||
++pos;
|
||||
|
||||
while (pos < end && isNumericASCII(*pos))
|
||||
while (pos < end && (isNumericASCII(*pos) || isNumberSeparator(start_of_block, false, pos, end)))
|
||||
{
|
||||
++pos;
|
||||
start_of_block = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -201,21 +228,29 @@ Token Lexer::nextTokenImpl()
|
||||
|| prev_significant_token_type == TokenType::Number))
|
||||
return Token(TokenType::Dot, token_begin, ++pos);
|
||||
|
||||
bool start_of_block = true;
|
||||
++pos;
|
||||
while (pos < end && isNumericASCII(*pos))
|
||||
while (pos < end && (isNumericASCII(*pos) || isNumberSeparator(start_of_block, false, pos, end)))
|
||||
{
|
||||
++pos;
|
||||
start_of_block = false;
|
||||
}
|
||||
|
||||
/// exponentiation
|
||||
if (pos + 1 < end && (*pos == 'e' || *pos == 'E'))
|
||||
{
|
||||
start_of_block = true;
|
||||
++pos;
|
||||
|
||||
/// sign of exponent
|
||||
if (pos + 1 < end && (*pos == '-' || *pos == '+'))
|
||||
++pos;
|
||||
|
||||
while (pos < end && isNumericASCII(*pos))
|
||||
while (pos < end && (isNumericASCII(*pos) || isNumberSeparator(start_of_block, false, pos, end)))
|
||||
{
|
||||
++pos;
|
||||
start_of_block = false;
|
||||
}
|
||||
}
|
||||
|
||||
return Token(TokenType::Number, token_begin, pos);
|
||||
|
@ -3,7 +3,7 @@
|
||||
#include <Core/Names.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <QueryPipeline/PipelineResourcesHolder.h>
|
||||
#include <QueryPipeline/QueryPlanResourceHolder.h>
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Processors/IProcessor.h>
|
||||
#include <QueryPipeline/PipelineResourcesHolder.h>
|
||||
#include <QueryPipeline/QueryPlanResourceHolder.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,7 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Processors/IProcessor.h>
|
||||
#include <QueryPipeline/PipelineResourcesHolder.h>
|
||||
#include <QueryPipeline/QueryPlanResourceHolder.h>
|
||||
#include <QueryPipeline/Chain.h>
|
||||
#include <QueryPipeline/SizeLimits.h>
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#pragma once
|
||||
#include <QueryPipeline/PipelineResourcesHolder.h>
|
||||
#include <QueryPipeline/QueryPlanResourceHolder.h>
|
||||
#include <QueryPipeline/SizeLimits.h>
|
||||
#include <QueryPipeline/StreamLocalLimits.h>
|
||||
#include <functional>
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include <QueryPipeline/PipelineResourcesHolder.h>
|
||||
#include <QueryPipeline/QueryPlanResourceHolder.h>
|
||||
#include <Processors/QueryPlan/QueryPlan.h>
|
||||
#include <Processors/QueryPlan/QueryIdHolder.h>
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <Storages/MergeTree/MergeTreeDataPartUUID.h>
|
||||
#include <Storages/StorageS3Cluster.h>
|
||||
#include <Core/ExternalTable.h>
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Access/Credentials.h>
|
||||
#include <Storages/ColumnDefault.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
@ -1193,6 +1194,17 @@ void TCPHandler::sendHello()
|
||||
writeStringBinary(server_display_name, *out);
|
||||
if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_VERSION_PATCH)
|
||||
writeVarUInt(DBMS_VERSION_PATCH, *out);
|
||||
if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES)
|
||||
{
|
||||
auto rules = server.context()->getAccessControl().getPasswordComplexityRules();
|
||||
|
||||
writeVarUInt(rules.size(), *out);
|
||||
for (const auto & [original_pattern, exception_message] : rules)
|
||||
{
|
||||
writeStringBinary(original_pattern, *out);
|
||||
writeStringBinary(exception_message, *out);
|
||||
}
|
||||
}
|
||||
out->next();
|
||||
}
|
||||
|
||||
|
@ -7,11 +7,6 @@
|
||||
#include <fstream>
|
||||
#include <mutex>
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
class Logger;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class ReadBufferFromFileLog : public ReadBuffer
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Disks/StoragePolicy.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
@ -17,11 +18,11 @@
|
||||
#include <Storages/StorageFactory.h>
|
||||
#include <Storages/StorageMaterializedView.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Macros.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <sys/stat.h>
|
||||
|
||||
@ -37,7 +38,6 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_READ_ALL_DATA;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int TABLE_METADATA_ALREADY_EXISTS;
|
||||
extern const int DIRECTORY_DOESNT_EXIST;
|
||||
extern const int CANNOT_SELECT;
|
||||
extern const int QUERY_NOT_ALLOWED;
|
||||
}
|
||||
@ -64,6 +64,7 @@ StorageFileLog::StorageFileLog(
|
||||
, metadata_base_path(std::filesystem::path(metadata_base_path_) / "metadata")
|
||||
, format_name(format_name_)
|
||||
, log(&Poco::Logger::get("StorageFileLog (" + table_id_.table_name + ")"))
|
||||
, disk(getContext()->getStoragePolicy("default")->getDisks().at(0))
|
||||
, milliseconds_to_wait(filelog_settings->poll_directory_watch_events_backoff_init.totalMilliseconds())
|
||||
{
|
||||
StorageInMemoryMetadata storage_metadata;
|
||||
@ -75,21 +76,14 @@ StorageFileLog::StorageFileLog(
|
||||
{
|
||||
if (!attach)
|
||||
{
|
||||
std::error_code ec;
|
||||
std::filesystem::create_directories(metadata_base_path, ec);
|
||||
|
||||
if (ec)
|
||||
if (disk->exists(metadata_base_path))
|
||||
{
|
||||
if (ec == std::make_error_code(std::errc::file_exists))
|
||||
{
|
||||
throw Exception(ErrorCodes::TABLE_METADATA_ALREADY_EXISTS,
|
||||
"Metadata files already exist by path: {}, remove them manually if it is intended",
|
||||
metadata_base_path);
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::DIRECTORY_DOESNT_EXIST,
|
||||
"Could not create directory {}, reason: {}", metadata_base_path, ec.message());
|
||||
throw Exception(
|
||||
ErrorCodes::TABLE_METADATA_ALREADY_EXISTS,
|
||||
"Metadata files already exist by path: {}, remove them manually if it is intended",
|
||||
metadata_base_path);
|
||||
}
|
||||
disk->createDirectories(metadata_base_path);
|
||||
}
|
||||
|
||||
loadMetaFiles(attach);
|
||||
@ -117,19 +111,8 @@ void StorageFileLog::loadMetaFiles(bool attach)
|
||||
/// Attach table
|
||||
if (attach)
|
||||
{
|
||||
const auto & storage = getStorageID();
|
||||
|
||||
auto metadata_path_exist = std::filesystem::exists(metadata_base_path);
|
||||
auto previous_path = std::filesystem::path(getContext()->getPath()) / ".filelog_storage_metadata" / storage.getDatabaseName() / storage.getTableName();
|
||||
|
||||
/// For compatibility with the previous path version.
|
||||
if (std::filesystem::exists(previous_path) && !metadata_path_exist)
|
||||
{
|
||||
std::filesystem::copy(previous_path, metadata_base_path, std::filesystem::copy_options::recursive);
|
||||
std::filesystem::remove_all(previous_path);
|
||||
}
|
||||
/// Meta file may lost, log and create directory
|
||||
else if (!metadata_path_exist)
|
||||
if (!disk->exists(metadata_base_path))
|
||||
{
|
||||
/// Create metadata_base_path directory when store meta data
|
||||
LOG_ERROR(log, "Metadata files of table {} are lost.", getStorageID().getTableName());
|
||||
@ -189,7 +172,7 @@ void StorageFileLog::loadFiles()
|
||||
/// data file have been renamed, need update meta file's name
|
||||
if (it->second.file_name != file)
|
||||
{
|
||||
std::filesystem::rename(getFullMetaPath(it->second.file_name), getFullMetaPath(file));
|
||||
disk->replaceFile(getFullMetaPath(it->second.file_name), getFullMetaPath(file));
|
||||
it->second.file_name = file;
|
||||
}
|
||||
}
|
||||
@ -217,7 +200,7 @@ void StorageFileLog::loadFiles()
|
||||
valid_metas.emplace(inode, meta);
|
||||
/// Delete meta file from filesystem
|
||||
else
|
||||
std::filesystem::remove(getFullMetaPath(meta.file_name));
|
||||
disk->removeFileIfExists(getFullMetaPath(meta.file_name));
|
||||
}
|
||||
file_infos.meta_by_inode.swap(valid_metas);
|
||||
}
|
||||
@ -228,70 +211,71 @@ void StorageFileLog::serialize() const
|
||||
for (const auto & [inode, meta] : file_infos.meta_by_inode)
|
||||
{
|
||||
auto full_name = getFullMetaPath(meta.file_name);
|
||||
if (!std::filesystem::exists(full_name))
|
||||
if (!disk->exists(full_name))
|
||||
{
|
||||
FS::createFile(full_name);
|
||||
disk->createFile(full_name);
|
||||
}
|
||||
else
|
||||
{
|
||||
checkOffsetIsValid(full_name, meta.last_writen_position);
|
||||
}
|
||||
WriteBufferFromFile out(full_name);
|
||||
writeIntText(inode, out);
|
||||
writeChar('\n', out);
|
||||
writeIntText(meta.last_writen_position, out);
|
||||
auto out = disk->writeFile(full_name);
|
||||
writeIntText(inode, *out);
|
||||
writeChar('\n', *out);
|
||||
writeIntText(meta.last_writen_position, *out);
|
||||
}
|
||||
}
|
||||
|
||||
void StorageFileLog::serialize(UInt64 inode, const FileMeta & file_meta) const
|
||||
{
|
||||
auto full_name = getFullMetaPath(file_meta.file_name);
|
||||
if (!std::filesystem::exists(full_name))
|
||||
if (!disk->exists(full_name))
|
||||
{
|
||||
FS::createFile(full_name);
|
||||
disk->createFile(full_name);
|
||||
}
|
||||
else
|
||||
{
|
||||
checkOffsetIsValid(full_name, file_meta.last_writen_position);
|
||||
}
|
||||
WriteBufferFromFile out(full_name);
|
||||
writeIntText(inode, out);
|
||||
writeChar('\n', out);
|
||||
writeIntText(file_meta.last_writen_position, out);
|
||||
auto out = disk->writeFile(full_name);
|
||||
writeIntText(inode, *out);
|
||||
writeChar('\n', *out);
|
||||
writeIntText(file_meta.last_writen_position, *out);
|
||||
}
|
||||
|
||||
void StorageFileLog::deserialize()
|
||||
{
|
||||
if (!std::filesystem::exists(metadata_base_path))
|
||||
if (!disk->exists(metadata_base_path))
|
||||
return;
|
||||
/// In case of single file (not a watched directory),
|
||||
/// iterated directory always has one file inside.
|
||||
for (const auto & dir_entry : std::filesystem::directory_iterator{metadata_base_path})
|
||||
for (const auto dir_iter = disk->iterateDirectory(metadata_base_path); dir_iter->isValid(); dir_iter->next())
|
||||
{
|
||||
if (!dir_entry.is_regular_file())
|
||||
auto full_name = getFullMetaPath(dir_iter->name());
|
||||
if (!disk->isFile(full_name))
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_FILE_TYPE,
|
||||
"The file {} under {} is not a regular file when deserializing meta files",
|
||||
dir_entry.path().c_str(),
|
||||
dir_iter->name(),
|
||||
metadata_base_path);
|
||||
}
|
||||
|
||||
ReadBufferFromFile in(dir_entry.path().c_str());
|
||||
auto in = disk->readFile(full_name);
|
||||
FileMeta meta;
|
||||
UInt64 inode, last_written_pos;
|
||||
|
||||
if (!tryReadIntText(inode, in))
|
||||
if (!tryReadIntText(inode, *in))
|
||||
{
|
||||
throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Read meta file {} failed", dir_entry.path().c_str());
|
||||
throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Read meta file {} failed", dir_iter->path());
|
||||
}
|
||||
assertChar('\n', in);
|
||||
if (!tryReadIntText(last_written_pos, in))
|
||||
assertChar('\n', *in);
|
||||
if (!tryReadIntText(last_written_pos, *in))
|
||||
{
|
||||
throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Read meta file {} failed", dir_entry.path().c_str());
|
||||
throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Read meta file {} failed", dir_iter->path());
|
||||
}
|
||||
|
||||
meta.file_name = dir_entry.path().filename();
|
||||
meta.file_name = dir_iter->name();
|
||||
meta.last_writen_position = last_written_pos;
|
||||
|
||||
file_infos.meta_by_inode.emplace(inode, meta);
|
||||
@ -506,17 +490,17 @@ void StorageFileLog::storeMetas(size_t start, size_t end)
|
||||
}
|
||||
}
|
||||
|
||||
void StorageFileLog::checkOffsetIsValid(const String & full_name, UInt64 offset)
|
||||
void StorageFileLog::checkOffsetIsValid(const String & full_name, UInt64 offset) const
|
||||
{
|
||||
ReadBufferFromFile in(full_name);
|
||||
auto in = disk->readFile(full_name);
|
||||
UInt64 _, last_written_pos;
|
||||
|
||||
if (!tryReadIntText(_, in))
|
||||
if (!tryReadIntText(_, *in))
|
||||
{
|
||||
throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Read meta file {} failed", full_name);
|
||||
}
|
||||
assertChar('\n', in);
|
||||
if (!tryReadIntText(last_written_pos, in))
|
||||
assertChar('\n', *in);
|
||||
if (!tryReadIntText(last_written_pos, *in))
|
||||
{
|
||||
throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Read meta file {} failed", full_name);
|
||||
}
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Disks/IDisk.h>
|
||||
|
||||
#include <Storages/FileLog/Buffer_fwd.h>
|
||||
#include <Storages/FileLog/FileLogDirectoryWatcher.h>
|
||||
#include <Storages/FileLog/FileLogSettings.h>
|
||||
@ -147,6 +149,8 @@ private:
|
||||
const String format_name;
|
||||
Poco::Logger * log;
|
||||
|
||||
DiskPtr disk;
|
||||
|
||||
uint64_t milliseconds_to_wait;
|
||||
|
||||
/// In order to avoid data race, using a naive trick to forbid execute two select
|
||||
@ -198,7 +202,7 @@ private:
|
||||
void serialize(UInt64 inode, const FileMeta & file_meta) const;
|
||||
|
||||
void deserialize();
|
||||
static void checkOffsetIsValid(const String & full_name, UInt64 offset);
|
||||
void checkOffsetIsValid(const String & full_name, UInt64 offset) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -21,16 +21,12 @@ limitations under the License. */
|
||||
namespace DB
|
||||
{
|
||||
|
||||
using Time = std::chrono::time_point<std::chrono::system_clock>;
|
||||
using Seconds = std::chrono::seconds;
|
||||
using MilliSeconds = std::chrono::milliseconds;
|
||||
|
||||
|
||||
struct BlocksMetadata
|
||||
{
|
||||
String hash;
|
||||
UInt64 version;
|
||||
Time time;
|
||||
std::chrono::time_point<std::chrono::system_clock> time;
|
||||
};
|
||||
|
||||
struct MergeableBlocks
|
||||
@ -54,6 +50,10 @@ friend class LiveViewSource;
|
||||
friend class LiveViewEventsSource;
|
||||
friend class LiveViewSink;
|
||||
|
||||
using Time = std::chrono::time_point<std::chrono::system_clock>;
|
||||
using Seconds = std::chrono::seconds;
|
||||
using MilliSeconds = std::chrono::milliseconds;
|
||||
|
||||
public:
|
||||
StorageLiveView(
|
||||
const StorageID & table_id_,
|
||||
|
@ -142,6 +142,9 @@ void ReplicatedMergeTreeAttachThread::runImpl()
|
||||
|
||||
checkHasReplicaMetadataInZooKeeper(zookeeper, replica_path);
|
||||
|
||||
/// Just in case it was not removed earlier due to connection loss
|
||||
zookeeper->tryRemove(replica_path + "/flags/force_restore_data");
|
||||
|
||||
String replica_metadata_version;
|
||||
const bool replica_metadata_version_exists = zookeeper->tryGet(replica_path + "/metadata_version", replica_metadata_version);
|
||||
if (replica_metadata_version_exists)
|
||||
|
@ -1193,7 +1193,7 @@ bool ReplicatedMergeTreeQueue::isCoveredByFuturePartsImpl(const LogEntry & entry
|
||||
const LogEntry & another_entry = *entry_for_same_part_it->second;
|
||||
out_reason = fmt::format(
|
||||
"Not executing log entry {} of type {} for part {} "
|
||||
"because another log entry {} of type {} for the same part ({}) is being processed. This shouldn't happen often.",
|
||||
"because another log entry {} of type {} for the same part ({}) is being processed.",
|
||||
entry.znode_name, entry.type, entry.new_part_name,
|
||||
another_entry.znode_name, another_entry.type, another_entry.new_part_name);
|
||||
LOG_INFO(log, fmt::runtime(out_reason));
|
||||
|
53
src/Storages/ReadFromStorageProgress.cpp
Normal file
53
src/Storages/ReadFromStorageProgress.cpp
Normal file
@ -0,0 +1,53 @@
|
||||
#include <Storages/ReadFromStorageProgress.h>
|
||||
#include <Processors/ISource.h>
|
||||
#include <QueryPipeline/StreamLocalLimits.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void updateRowsProgressApprox(
|
||||
ISource & source,
|
||||
const Chunk & chunk,
|
||||
UInt64 total_result_size,
|
||||
UInt64 & total_rows_approx_accumulated,
|
||||
size_t & total_rows_count_times,
|
||||
UInt64 & total_rows_approx_max)
|
||||
{
|
||||
if (!total_result_size)
|
||||
return;
|
||||
|
||||
const size_t num_rows = chunk.getNumRows();
|
||||
|
||||
if (!num_rows)
|
||||
return;
|
||||
|
||||
const auto progress = source.getReadProgress();
|
||||
if (progress && !progress->limits.empty())
|
||||
{
|
||||
for (const auto & limit : progress->limits)
|
||||
{
|
||||
if (limit.leaf_limits.max_rows || limit.leaf_limits.max_bytes
|
||||
|| limit.local_limits.size_limits.max_rows || limit.local_limits.size_limits.max_bytes)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const auto bytes_per_row = std::ceil(static_cast<double>(chunk.bytes()) / num_rows);
|
||||
size_t total_rows_approx = static_cast<size_t>(std::ceil(static_cast<double>(total_result_size) / bytes_per_row));
|
||||
total_rows_approx_accumulated += total_rows_approx;
|
||||
++total_rows_count_times;
|
||||
total_rows_approx = total_rows_approx_accumulated / total_rows_count_times;
|
||||
|
||||
/// We need to add diff, because total_rows_approx is incremental value.
|
||||
/// It would be more correct to send total_rows_approx as is (not a diff),
|
||||
/// but incrementation of total_rows_to_read does not allow that.
|
||||
/// A new counter can be introduced for that to be sent to client, but it does not worth it.
|
||||
if (total_rows_approx > total_rows_approx_max)
|
||||
{
|
||||
size_t diff = total_rows_approx - total_rows_approx_max;
|
||||
source.addTotalRowsApprox(diff);
|
||||
total_rows_approx_max = total_rows_approx;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
18
src/Storages/ReadFromStorageProgress.h
Normal file
18
src/Storages/ReadFromStorageProgress.h
Normal file
@ -0,0 +1,18 @@
|
||||
#pragma once
|
||||
#include <Core/Types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ISource;
|
||||
class Chunk;
|
||||
|
||||
void updateRowsProgressApprox(
|
||||
ISource & source,
|
||||
const Chunk & chunk,
|
||||
UInt64 total_result_size,
|
||||
UInt64 & total_rows_approx_accumulated,
|
||||
size_t & total_rows_count_times,
|
||||
UInt64 & total_rows_approx_max);
|
||||
|
||||
}
|
@ -5,6 +5,7 @@
|
||||
#include <Storages/PartitionedSink.h>
|
||||
#include <Storages/Distributed/DirectoryMonitor.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
#include <Storages/ReadFromStorageProgress.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
@ -592,22 +593,8 @@ public:
|
||||
|
||||
if (num_rows)
|
||||
{
|
||||
auto bytes_per_row = std::ceil(static_cast<double>(chunk.bytes()) / num_rows);
|
||||
size_t total_rows_approx = static_cast<size_t>(std::ceil(static_cast<double>(files_info->total_bytes_to_read) / bytes_per_row));
|
||||
total_rows_approx_accumulated += total_rows_approx;
|
||||
++total_rows_count_times;
|
||||
total_rows_approx = total_rows_approx_accumulated / total_rows_count_times;
|
||||
|
||||
/// We need to add diff, because total_rows_approx is incremental value.
|
||||
/// It would be more correct to send total_rows_approx as is (not a diff),
|
||||
/// but incrementation of total_rows_to_read does not allow that.
|
||||
/// A new field can be introduces for that to be sent to client, but it does not worth it.
|
||||
if (total_rows_approx > total_rows_approx_prev)
|
||||
{
|
||||
size_t diff = total_rows_approx - total_rows_approx_prev;
|
||||
addTotalRowsApprox(diff);
|
||||
total_rows_approx_prev = total_rows_approx;
|
||||
}
|
||||
updateRowsProgressApprox(
|
||||
*this, chunk, files_info->total_bytes_to_read, total_rows_approx_accumulated, total_rows_count_times, total_rows_approx_max);
|
||||
}
|
||||
return chunk;
|
||||
}
|
||||
@ -648,7 +635,7 @@ private:
|
||||
|
||||
UInt64 total_rows_approx_accumulated = 0;
|
||||
size_t total_rows_count_times = 0;
|
||||
UInt64 total_rows_approx_prev = 0;
|
||||
UInt64 total_rows_approx_max = 0;
|
||||
};
|
||||
|
||||
|
||||
|
@ -357,25 +357,37 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
|
||||
/// It does not make sense for CREATE query
|
||||
if (attach)
|
||||
{
|
||||
if (current_zookeeper && current_zookeeper->exists(replica_path + "/host"))
|
||||
try
|
||||
{
|
||||
/// Check it earlier if we can (we don't want incompatible version to start).
|
||||
/// If "/host" doesn't exist, then replica is probably dropped and there's nothing to check.
|
||||
ReplicatedMergeTreeAttachThread::checkHasReplicaMetadataInZooKeeper(current_zookeeper, replica_path);
|
||||
if (current_zookeeper && current_zookeeper->exists(replica_path + "/host"))
|
||||
{
|
||||
/// Check it earlier if we can (we don't want incompatible version to start).
|
||||
/// If "/host" doesn't exist, then replica is probably dropped and there's nothing to check.
|
||||
ReplicatedMergeTreeAttachThread::checkHasReplicaMetadataInZooKeeper(current_zookeeper, replica_path);
|
||||
}
|
||||
|
||||
if (current_zookeeper && current_zookeeper->exists(replica_path + "/flags/force_restore_data"))
|
||||
{
|
||||
skip_sanity_checks = true;
|
||||
current_zookeeper->remove(replica_path + "/flags/force_restore_data");
|
||||
|
||||
LOG_WARNING(
|
||||
log,
|
||||
"Skipping the limits on severity of changes to data parts and columns (flag {}/flags/force_restore_data).",
|
||||
replica_path);
|
||||
}
|
||||
else if (has_force_restore_data_flag)
|
||||
{
|
||||
skip_sanity_checks = true;
|
||||
|
||||
LOG_WARNING(log, "Skipping the limits on severity of changes to data parts and columns (flag force_restore_data).");
|
||||
}
|
||||
}
|
||||
|
||||
if (current_zookeeper && current_zookeeper->exists(replica_path + "/flags/force_restore_data"))
|
||||
catch (const Coordination::Exception & e)
|
||||
{
|
||||
skip_sanity_checks = true;
|
||||
current_zookeeper->remove(replica_path + "/flags/force_restore_data");
|
||||
|
||||
LOG_WARNING(log, "Skipping the limits on severity of changes to data parts and columns (flag {}/flags/force_restore_data).", replica_path);
|
||||
}
|
||||
else if (has_force_restore_data_flag)
|
||||
{
|
||||
skip_sanity_checks = true;
|
||||
|
||||
LOG_WARNING(log, "Skipping the limits on severity of changes to data parts and columns (flag force_restore_data).");
|
||||
if (!Coordination::isHardwareError(e.code))
|
||||
throw;
|
||||
LOG_ERROR(log, "Caught exception while checking table metadata in ZooKeeper, will recheck later: {}", e.displayText());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <Storages/getVirtualsForStorage.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
#include <Storages/StorageURL.h>
|
||||
#include <Storages/ReadFromStorageProgress.h>
|
||||
|
||||
#include <IO/ReadBufferFromS3.h>
|
||||
#include <IO/WriteBufferFromS3.h>
|
||||
@ -153,6 +154,11 @@ public:
|
||||
return nextAssumeLocked();
|
||||
}
|
||||
|
||||
size_t getTotalSize() const
|
||||
{
|
||||
return total_size;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
String nextAssumeLocked()
|
||||
@ -198,19 +204,27 @@ private:
|
||||
if (block.has("_file"))
|
||||
file_column = block.getByName("_file").column->assumeMutable();
|
||||
|
||||
for (const auto & row : result_batch)
|
||||
std::unordered_map<String, S3::ObjectInfo> all_object_infos;
|
||||
for (const auto & key_info : result_batch)
|
||||
{
|
||||
const String & key = row.GetKey();
|
||||
const String & key = key_info.GetKey();
|
||||
if (recursive || re2::RE2::FullMatch(key, *matcher))
|
||||
{
|
||||
String path = fs::path(globbed_uri.bucket) / key;
|
||||
if (object_infos)
|
||||
(*object_infos)[path] = {.size = size_t(row.GetSize()), .last_modification_time = row.GetLastModified().Millis() / 1000};
|
||||
String file = path.substr(path.find_last_of('/') + 1);
|
||||
const size_t key_size = key_info.GetSize();
|
||||
|
||||
all_object_infos.emplace(path, S3::ObjectInfo{.size = key_size, .last_modification_time = key_info.GetLastModified().Millis() / 1000});
|
||||
|
||||
if (path_column)
|
||||
{
|
||||
path_column->insert(path);
|
||||
}
|
||||
if (file_column)
|
||||
{
|
||||
String file = path.substr(path.find_last_of('/') + 1);
|
||||
file_column->insert(file);
|
||||
}
|
||||
|
||||
key_column->insert(key);
|
||||
}
|
||||
}
|
||||
@ -220,16 +234,35 @@ private:
|
||||
size_t rows = block.rows();
|
||||
buffer.reserve(rows);
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
buffer.emplace_back(keys.getDataAt(i).toString());
|
||||
{
|
||||
auto key = keys.getDataAt(i).toString();
|
||||
std::string path = fs::path(globbed_uri.bucket) / key;
|
||||
|
||||
const auto & object_info = all_object_infos.at(path);
|
||||
total_size += object_info.size;
|
||||
if (object_infos)
|
||||
object_infos->emplace(path, object_info);
|
||||
|
||||
buffer.emplace_back(key);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
buffer.reserve(result_batch.size());
|
||||
for (const auto & row : result_batch)
|
||||
for (const auto & key_info : result_batch)
|
||||
{
|
||||
String key = row.GetKey();
|
||||
String key = key_info.GetKey();
|
||||
if (recursive || re2::RE2::FullMatch(key, *matcher))
|
||||
{
|
||||
const size_t key_size = key_info.GetSize();
|
||||
total_size += key_size;
|
||||
if (object_infos)
|
||||
{
|
||||
const std::string path = fs::path(globbed_uri.bucket) / key;
|
||||
(*object_infos)[path] = {.size = key_size, .last_modification_time = key_info.GetLastModified().Millis() / 1000};
|
||||
}
|
||||
buffer.emplace_back(std::move(key));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -261,6 +294,7 @@ private:
|
||||
std::unordered_map<String, S3::ObjectInfo> * object_infos;
|
||||
Strings * read_keys;
|
||||
S3Settings::RequestSettings request_settings;
|
||||
size_t total_size = 0;
|
||||
};
|
||||
|
||||
StorageS3Source::DisclosedGlobIterator::DisclosedGlobIterator(
|
||||
@ -281,12 +315,28 @@ String StorageS3Source::DisclosedGlobIterator::next()
|
||||
return pimpl->next();
|
||||
}
|
||||
|
||||
size_t StorageS3Source::DisclosedGlobIterator::getTotalSize() const
|
||||
{
|
||||
return pimpl->getTotalSize();
|
||||
}
|
||||
|
||||
class StorageS3Source::KeysIterator::Impl : WithContext
|
||||
{
|
||||
public:
|
||||
explicit Impl(
|
||||
const std::vector<String> & keys_, const String & bucket_, ASTPtr query_, const Block & virtual_header_, ContextPtr context_)
|
||||
: WithContext(context_), keys(keys_), bucket(bucket_), query(query_), virtual_header(virtual_header_)
|
||||
const Aws::S3::S3Client & client_,
|
||||
const std::string & version_id_,
|
||||
const std::vector<String> & keys_,
|
||||
const String & bucket_,
|
||||
ASTPtr query_,
|
||||
const Block & virtual_header_,
|
||||
ContextPtr context_,
|
||||
std::unordered_map<String, S3::ObjectInfo> * object_infos_)
|
||||
: WithContext(context_)
|
||||
, keys(keys_)
|
||||
, bucket(bucket_)
|
||||
, query(query_)
|
||||
, virtual_header(virtual_header_)
|
||||
{
|
||||
/// Create a virtual block with one row to construct filter
|
||||
if (query && virtual_header)
|
||||
@ -316,14 +366,28 @@ public:
|
||||
if (block.has("_file"))
|
||||
file_column = block.getByName("_file").column->assumeMutable();
|
||||
|
||||
std::unordered_map<String, S3::ObjectInfo> all_object_infos;
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
String path = fs::path(bucket) / key;
|
||||
String file = path.substr(path.find_last_of('/') + 1);
|
||||
const String path = fs::path(bucket) / key;
|
||||
|
||||
/// To avoid extra requests update total_size only if object_infos != nullptr
|
||||
/// (which means we eventually need this info anyway, so it should be ok to do it now).
|
||||
if (object_infos_)
|
||||
{
|
||||
auto key_info = S3::getObjectInfo(client_, bucket, key, version_id_, true, false);
|
||||
all_object_infos.emplace(path, S3::ObjectInfo{.size = key_info.size, .last_modification_time = key_info.last_modification_time});
|
||||
}
|
||||
|
||||
if (path_column)
|
||||
{
|
||||
path_column->insert(path);
|
||||
}
|
||||
if (file_column)
|
||||
{
|
||||
const String file = path.substr(path.find_last_of('/') + 1);
|
||||
file_column->insert(file);
|
||||
}
|
||||
key_column->insert(key);
|
||||
}
|
||||
|
||||
@ -333,7 +397,19 @@ public:
|
||||
Strings filtered_keys;
|
||||
filtered_keys.reserve(rows);
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
filtered_keys.emplace_back(keys_col.getDataAt(i).toString());
|
||||
{
|
||||
auto key = keys_col.getDataAt(i).toString();
|
||||
|
||||
if (object_infos_)
|
||||
{
|
||||
std::string path = fs::path(bucket) / key;
|
||||
const auto & object_info = all_object_infos.at(path);
|
||||
total_size += object_info.size;
|
||||
object_infos_->emplace(path, object_info);
|
||||
}
|
||||
|
||||
filtered_keys.emplace_back(key);
|
||||
}
|
||||
|
||||
keys = std::move(filtered_keys);
|
||||
}
|
||||
@ -348,6 +424,11 @@ public:
|
||||
return keys[current_index];
|
||||
}
|
||||
|
||||
size_t getTotalSize() const
|
||||
{
|
||||
return total_size;
|
||||
}
|
||||
|
||||
private:
|
||||
Strings keys;
|
||||
std::atomic_size_t index = 0;
|
||||
@ -355,11 +436,21 @@ private:
|
||||
String bucket;
|
||||
ASTPtr query;
|
||||
Block virtual_header;
|
||||
|
||||
size_t total_size = 0;
|
||||
};
|
||||
|
||||
StorageS3Source::KeysIterator::KeysIterator(
|
||||
const std::vector<String> & keys_, const String & bucket_, ASTPtr query, const Block & virtual_header, ContextPtr context)
|
||||
: pimpl(std::make_shared<StorageS3Source::KeysIterator::Impl>(keys_, bucket_, query, virtual_header, context))
|
||||
const Aws::S3::S3Client & client_,
|
||||
const std::string & version_id_,
|
||||
const std::vector<String> & keys_,
|
||||
const String & bucket_,
|
||||
ASTPtr query,
|
||||
const Block & virtual_header,
|
||||
ContextPtr context,
|
||||
std::unordered_map<String, S3::ObjectInfo> * object_infos_)
|
||||
: pimpl(std::make_shared<StorageS3Source::KeysIterator::Impl>(
|
||||
client_, version_id_, keys_, bucket_, query, virtual_header, context, object_infos_))
|
||||
{
|
||||
}
|
||||
|
||||
@ -368,6 +459,11 @@ String StorageS3Source::KeysIterator::next()
|
||||
return pimpl->next();
|
||||
}
|
||||
|
||||
size_t StorageS3Source::KeysIterator::getTotalSize() const
|
||||
{
|
||||
return pimpl->getTotalSize();
|
||||
}
|
||||
|
||||
Block StorageS3Source::getHeader(Block sample_block, const std::vector<NameAndTypePair> & requested_virtual_columns)
|
||||
{
|
||||
for (const auto & virtual_column : requested_virtual_columns)
|
||||
@ -390,7 +486,7 @@ StorageS3Source::StorageS3Source(
|
||||
const std::shared_ptr<const Aws::S3::S3Client> & client_,
|
||||
const String & bucket_,
|
||||
const String & version_id_,
|
||||
std::shared_ptr<IteratorWrapper> file_iterator_,
|
||||
std::shared_ptr<IIterator> file_iterator_,
|
||||
const size_t download_thread_num_,
|
||||
const std::unordered_map<String, S3::ObjectInfo> & object_infos_)
|
||||
: ISource(getHeader(sample_block_, requested_virtual_columns_))
|
||||
@ -459,7 +555,7 @@ std::unique_ptr<ReadBuffer> StorageS3Source::createS3ReadBuffer(const String & k
|
||||
if (it != object_infos.end())
|
||||
object_size = it->second.size;
|
||||
else
|
||||
object_size = DB::S3::getObjectSize(client, bucket, key, version_id, false, false);
|
||||
object_size = DB::S3::getObjectSize(*client, bucket, key, version_id, false, false);
|
||||
|
||||
auto download_buffer_size = getContext()->getSettings().max_download_buffer_size;
|
||||
const bool use_parallel_download = download_buffer_size > 0 && download_thread_num > 1;
|
||||
@ -503,6 +599,13 @@ Chunk StorageS3Source::generate()
|
||||
{
|
||||
UInt64 num_rows = chunk.getNumRows();
|
||||
|
||||
auto it = object_infos.find(file_path);
|
||||
if (num_rows && it != object_infos.end())
|
||||
{
|
||||
updateRowsProgressApprox(
|
||||
*this, chunk, file_iterator->getTotalSize(), total_rows_approx_accumulated, total_rows_count_times, total_rows_approx_max);
|
||||
}
|
||||
|
||||
for (const auto & virtual_column : requested_virtual_columns)
|
||||
{
|
||||
if (virtual_column.name == "_path")
|
||||
@ -797,7 +900,7 @@ StorageS3::StorageS3(
|
||||
virtual_block.insert({column.type->createColumn(), column.type, column.name});
|
||||
}
|
||||
|
||||
std::shared_ptr<StorageS3Source::IteratorWrapper> StorageS3::createFileIterator(
|
||||
std::shared_ptr<StorageS3Source::IIterator> StorageS3::createFileIterator(
|
||||
const S3Configuration & s3_configuration,
|
||||
const std::vector<String> & keys,
|
||||
bool is_key_with_globs,
|
||||
@ -810,25 +913,22 @@ std::shared_ptr<StorageS3Source::IteratorWrapper> StorageS3::createFileIterator(
|
||||
{
|
||||
if (distributed_processing)
|
||||
{
|
||||
return std::make_shared<StorageS3Source::IteratorWrapper>(
|
||||
[callback = local_context->getReadTaskCallback()]() -> String {
|
||||
return callback();
|
||||
});
|
||||
return std::make_shared<StorageS3Source::ReadTaskIterator>(local_context->getReadTaskCallback());
|
||||
}
|
||||
else if (is_key_with_globs)
|
||||
{
|
||||
/// Iterate through disclosed globs and make a source for each file
|
||||
auto glob_iterator = std::make_shared<StorageS3Source::DisclosedGlobIterator>(
|
||||
*s3_configuration.client, s3_configuration.uri, query, virtual_block, local_context, object_infos, read_keys, s3_configuration.request_settings);
|
||||
return std::make_shared<StorageS3Source::IteratorWrapper>([glob_iterator]() { return glob_iterator->next(); });
|
||||
return std::make_shared<StorageS3Source::DisclosedGlobIterator>(
|
||||
*s3_configuration.client, s3_configuration.uri, query, virtual_block,
|
||||
local_context, object_infos, read_keys, s3_configuration.request_settings);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto keys_iterator
|
||||
= std::make_shared<StorageS3Source::KeysIterator>(keys, s3_configuration.uri.bucket, query, virtual_block, local_context);
|
||||
if (read_keys)
|
||||
*read_keys = keys;
|
||||
return std::make_shared<StorageS3Source::IteratorWrapper>([keys_iterator]() { return keys_iterator->next(); });
|
||||
|
||||
return std::make_shared<StorageS3Source::KeysIterator>(
|
||||
*s3_configuration.client, s3_configuration.uri.version_id, keys, s3_configuration.uri.bucket, query, virtual_block, local_context, object_infos);
|
||||
}
|
||||
}
|
||||
|
||||
@ -869,7 +969,7 @@ Pipe StorageS3::read(
|
||||
requested_virtual_columns.push_back(virtual_column);
|
||||
}
|
||||
|
||||
std::shared_ptr<StorageS3Source::IteratorWrapper> iterator_wrapper = createFileIterator(
|
||||
std::shared_ptr<StorageS3Source::IIterator> iterator_wrapper = createFileIterator(
|
||||
s3_configuration,
|
||||
keys,
|
||||
is_key_with_globs,
|
||||
@ -1369,7 +1469,7 @@ std::optional<ColumnsDescription> StorageS3::tryGetColumnsFromCache(
|
||||
/// Note that in case of exception in getObjectInfo returned info will be empty,
|
||||
/// but schema cache will handle this case and won't return columns from cache
|
||||
/// because we can't say that it's valid without last modification time.
|
||||
info = S3::getObjectInfo(s3_configuration.client, s3_configuration.uri.bucket, *it, s3_configuration.uri.version_id, false, false);
|
||||
info = S3::getObjectInfo(*s3_configuration.client, s3_configuration.uri.bucket, *it, s3_configuration.uri.version_id, false, false);
|
||||
if (object_infos)
|
||||
(*object_infos)[path] = info;
|
||||
}
|
||||
|
@ -33,7 +33,17 @@ class StorageS3SequentialSource;
|
||||
class StorageS3Source : public ISource, WithContext
|
||||
{
|
||||
public:
|
||||
class DisclosedGlobIterator
|
||||
class IIterator
|
||||
{
|
||||
public:
|
||||
virtual ~IIterator() = default;
|
||||
virtual String next() = 0;
|
||||
virtual size_t getTotalSize() const = 0;
|
||||
|
||||
String operator ()() { return next(); }
|
||||
};
|
||||
|
||||
class DisclosedGlobIterator : public IIterator
|
||||
{
|
||||
public:
|
||||
DisclosedGlobIterator(
|
||||
@ -46,7 +56,9 @@ public:
|
||||
Strings * read_keys_ = nullptr,
|
||||
const S3Settings::RequestSettings & request_settings_ = {});
|
||||
|
||||
String next();
|
||||
String next() override;
|
||||
|
||||
size_t getTotalSize() const override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
@ -54,12 +66,22 @@ public:
|
||||
std::shared_ptr<Impl> pimpl;
|
||||
};
|
||||
|
||||
class KeysIterator
|
||||
class KeysIterator : public IIterator
|
||||
{
|
||||
public:
|
||||
explicit KeysIterator(
|
||||
const std::vector<String> & keys_, const String & bucket_, ASTPtr query, const Block & virtual_header, ContextPtr context);
|
||||
String next();
|
||||
const Aws::S3::S3Client & client_,
|
||||
const std::string & version_id_,
|
||||
const std::vector<String> & keys_,
|
||||
const String & bucket_,
|
||||
ASTPtr query,
|
||||
const Block & virtual_header,
|
||||
ContextPtr context,
|
||||
std::unordered_map<String, S3::ObjectInfo> * object_infos = nullptr);
|
||||
|
||||
String next() override;
|
||||
|
||||
size_t getTotalSize() const override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
@ -67,7 +89,18 @@ public:
|
||||
std::shared_ptr<Impl> pimpl;
|
||||
};
|
||||
|
||||
using IteratorWrapper = std::function<String()>;
|
||||
class ReadTaskIterator : public IIterator
|
||||
{
|
||||
public:
|
||||
explicit ReadTaskIterator(const ReadTaskCallback & callback_) : callback(callback_) {}
|
||||
|
||||
String next() override { return callback(); }
|
||||
|
||||
size_t getTotalSize() const override { return 0; }
|
||||
|
||||
private:
|
||||
ReadTaskCallback callback;
|
||||
};
|
||||
|
||||
static Block getHeader(Block sample_block, const std::vector<NameAndTypePair> & requested_virtual_columns);
|
||||
|
||||
@ -85,7 +118,7 @@ public:
|
||||
const std::shared_ptr<const Aws::S3::S3Client> & client_,
|
||||
const String & bucket,
|
||||
const String & version_id,
|
||||
std::shared_ptr<IteratorWrapper> file_iterator_,
|
||||
std::shared_ptr<IIterator> file_iterator_,
|
||||
size_t download_thread_num,
|
||||
const std::unordered_map<String, S3::ObjectInfo> & object_infos_);
|
||||
|
||||
@ -116,11 +149,15 @@ private:
|
||||
/// onCancel and generate can be called concurrently
|
||||
std::mutex reader_mutex;
|
||||
std::vector<NameAndTypePair> requested_virtual_columns;
|
||||
std::shared_ptr<IteratorWrapper> file_iterator;
|
||||
std::shared_ptr<IIterator> file_iterator;
|
||||
size_t download_thread_num = 1;
|
||||
|
||||
Poco::Logger * log = &Poco::Logger::get("StorageS3Source");
|
||||
|
||||
UInt64 total_rows_approx_max = 0;
|
||||
size_t total_rows_count_times = 0;
|
||||
UInt64 total_rows_approx_accumulated = 0;
|
||||
|
||||
std::unordered_map<String, S3::ObjectInfo> object_infos;
|
||||
|
||||
/// Recreate ReadBuffer and Pipeline for each file.
|
||||
@ -233,7 +270,7 @@ private:
|
||||
|
||||
static void updateS3Configuration(ContextPtr, S3Configuration &);
|
||||
|
||||
static std::shared_ptr<StorageS3Source::IteratorWrapper> createFileIterator(
|
||||
static std::shared_ptr<StorageS3Source::IIterator> createFileIterator(
|
||||
const S3Configuration & s3_configuration,
|
||||
const std::vector<String> & keys,
|
||||
bool is_key_with_globs,
|
||||
|
@ -102,7 +102,7 @@ Pipe StorageS3Cluster::read(
|
||||
|
||||
auto iterator = std::make_shared<StorageS3Source::DisclosedGlobIterator>(
|
||||
*s3_configuration.client, s3_configuration.uri, query_info.query, virtual_block, context);
|
||||
auto callback = std::make_shared<StorageS3Source::IteratorWrapper>([iterator]() mutable -> String { return iterator->next(); });
|
||||
auto callback = std::make_shared<std::function<String()>>([iterator]() mutable -> String { return iterator->next(); });
|
||||
|
||||
/// Calculate the header. This is significant, because some columns could be thrown away in some cases like query with count(*)
|
||||
auto interpreter = InterpreterSelectQuery(query_info.query, context, SelectQueryOptions(processed_stage).analyze());
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Storages/System/StorageSystemDatabases.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -17,6 +18,7 @@ NamesAndTypesList StorageSystemDatabases::getNamesAndTypes()
|
||||
{"data_path", std::make_shared<DataTypeString>()},
|
||||
{"metadata_path", std::make_shared<DataTypeString>()},
|
||||
{"uuid", std::make_shared<DataTypeUUID>()},
|
||||
{"engine_full", std::make_shared<DataTypeString>()},
|
||||
{"comment", std::make_shared<DataTypeString>()}
|
||||
};
|
||||
}
|
||||
@ -28,6 +30,42 @@ NamesAndAliases StorageSystemDatabases::getNamesAndAliases()
|
||||
};
|
||||
}
|
||||
|
||||
static String getEngineFull(const DatabasePtr & database)
|
||||
{
|
||||
DDLGuardPtr guard;
|
||||
while (true)
|
||||
{
|
||||
String name = database->getDatabaseName();
|
||||
guard = DatabaseCatalog::instance().getDDLGuard(name, "");
|
||||
|
||||
/// Ensure that the database was not renamed before we acquired the lock
|
||||
auto locked_database = DatabaseCatalog::instance().tryGetDatabase(name);
|
||||
|
||||
if (locked_database.get() == database.get())
|
||||
break;
|
||||
|
||||
/// Database was dropped
|
||||
if (!locked_database && name == database->getDatabaseName())
|
||||
return {};
|
||||
|
||||
guard.reset();
|
||||
}
|
||||
|
||||
ASTPtr ast = database->getCreateDatabaseQuery();
|
||||
auto * ast_create = ast->as<ASTCreateQuery>();
|
||||
|
||||
if (!ast_create || !ast_create->storage)
|
||||
return {};
|
||||
|
||||
String engine_full = ast_create->storage->formatWithSecretsHidden();
|
||||
static const char * const extra_head = " ENGINE = ";
|
||||
|
||||
if (startsWith(engine_full, extra_head))
|
||||
engine_full = engine_full.substr(strlen(extra_head));
|
||||
|
||||
return engine_full;
|
||||
}
|
||||
|
||||
void StorageSystemDatabases::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const
|
||||
{
|
||||
const auto access = context->getAccess();
|
||||
@ -47,7 +85,8 @@ void StorageSystemDatabases::fillData(MutableColumns & res_columns, ContextPtr c
|
||||
res_columns[2]->insert(context->getPath() + database->getDataPath());
|
||||
res_columns[3]->insert(database->getMetadataPath());
|
||||
res_columns[4]->insert(database->getUUID());
|
||||
res_columns[5]->insert(database->getDatabaseComment());
|
||||
res_columns[5]->insert(getEngineFull(database));
|
||||
res_columns[6]->insert(database->getDatabaseComment());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,25 @@
|
||||
<clickhouse>
|
||||
<password_complexity>
|
||||
<rule>
|
||||
<pattern>.{12}</pattern>
|
||||
<message>be at least 12 characters long</message>
|
||||
</rule>
|
||||
<rule>
|
||||
<pattern>\p{N}</pattern>
|
||||
<message>contain at least 1 numeric character</message>
|
||||
</rule>
|
||||
<rule>
|
||||
<pattern>\p{Ll}</pattern>
|
||||
<message>contain at least 1 lowercase character</message>
|
||||
</rule>
|
||||
<rule>
|
||||
<pattern>\p{Lu}</pattern>
|
||||
<message>contain at least 1 uppercase character</message>
|
||||
</rule>
|
||||
<rule>
|
||||
<pattern>[^\p{L}\p{N}]</pattern>
|
||||
<message>contain at least 1 special character</message>
|
||||
</rule>
|
||||
</password_complexity>
|
||||
</clickhouse>
|
||||
|
42
tests/integration/test_password_constraints/test.py
Normal file
42
tests/integration/test_password_constraints/test.py
Normal file
@ -0,0 +1,42 @@
|
||||
import pytest
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node = cluster.add_instance("node", main_configs=["configs/complexity_rules.xml"])
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_complexity_rules(start_cluster):
|
||||
|
||||
error_message = "DB::Exception: Invalid password. The password should: be at least 12 characters long, contain at least 1 numeric character, contain at least 1 lowercase character, contain at least 1 uppercase character, contain at least 1 special character"
|
||||
assert error_message in node.query_and_get_error(
|
||||
"CREATE USER u_1 IDENTIFIED WITH plaintext_password BY ''"
|
||||
)
|
||||
|
||||
error_message = "DB::Exception: Invalid password. The password should: contain at least 1 lowercase character, contain at least 1 uppercase character, contain at least 1 special character"
|
||||
assert error_message in node.query_and_get_error(
|
||||
"CREATE USER u_2 IDENTIFIED WITH sha256_password BY '000000000000'"
|
||||
)
|
||||
|
||||
error_message = "DB::Exception: Invalid password. The password should: contain at least 1 uppercase character, contain at least 1 special character"
|
||||
assert error_message in node.query_and_get_error(
|
||||
"CREATE USER u_3 IDENTIFIED WITH double_sha1_password BY 'a00000000000'"
|
||||
)
|
||||
|
||||
error_message = "DB::Exception: Invalid password. The password should: contain at least 1 special character"
|
||||
assert error_message in node.query_and_get_error(
|
||||
"CREATE USER u_4 IDENTIFIED WITH plaintext_password BY 'aA0000000000'"
|
||||
)
|
||||
|
||||
node.query("CREATE USER u_5 IDENTIFIED WITH plaintext_password BY 'aA!000000000'")
|
||||
node.query("DROP USER u_5")
|
@ -20,23 +20,25 @@ done
|
||||
${CLICKHOUSE_CLIENT} --query "drop table if exists file_log;"
|
||||
${CLICKHOUSE_CLIENT} --query "create table file_log(k UInt8, v UInt8) engine=FileLog('${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}.txt', 'CSV');"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;"
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
for i in {100..120}
|
||||
do
|
||||
echo $i, $i >> ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}.txt
|
||||
done
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;"
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
# touch does not change file content, no event
|
||||
touch ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}.txt
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;"
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "detach table file_log;"
|
||||
${CLICKHOUSE_CLIENT} --query "attach table file_log;"
|
||||
|
||||
# should no records return
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;"
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "drop table file_log;"
|
||||
|
||||
rm -rf ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}.txt
|
||||
|
@ -23,11 +23,11 @@ done
|
||||
${CLICKHOUSE_CLIENT} --query "drop table if exists file_log;"
|
||||
${CLICKHOUSE_CLIENT} --query "create table file_log(k UInt8, v UInt8) engine=FileLog('${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/', 'CSV');"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;"
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
cp ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/a.txt ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/b.txt
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;"
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
for i in {100..120}
|
||||
do
|
||||
@ -44,7 +44,7 @@ mv ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/b.txt ${user_files_path}/${
|
||||
|
||||
rm ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/d.txt
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;"
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "detach table file_log;"
|
||||
cp ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/e.txt ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/f.txt
|
||||
@ -60,18 +60,18 @@ do
|
||||
done
|
||||
${CLICKHOUSE_CLIENT} --query "attach table file_log;"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;"
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "detach table file_log;"
|
||||
${CLICKHOUSE_CLIENT} --query "attach table file_log;"
|
||||
|
||||
# should no records return
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;"
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
truncate ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/a.txt --size 0
|
||||
|
||||
# exception happend
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;" 2>&1 | grep -q "Code: 33" && echo 'OK' || echo 'FAIL'
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;" 2>&1 | grep -q "Code: 33" && echo 'OK' || echo 'FAIL'
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "drop table file_log;"
|
||||
|
||||
|
@ -24,11 +24,11 @@ done
|
||||
${CLICKHOUSE_CLIENT} --query "drop table if exists file_log;"
|
||||
${CLICKHOUSE_CLIENT} --query "create table file_log(k UInt8, v UInt8) engine=FileLog('${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/', 'CSV');"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select *, _filename, _offset from file_log order by _filename, _offset;"
|
||||
${CLICKHOUSE_CLIENT} --query "select *, _filename, _offset from file_log order by _filename, _offset settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
cp ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/a.txt ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/b.txt
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select *, _filename, _offset from file_log order by _filename, _offset;"
|
||||
${CLICKHOUSE_CLIENT} --query "select *, _filename, _offset from file_log order by _filename, _offset settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
for i in {100..120}
|
||||
do
|
||||
@ -44,18 +44,18 @@ cp ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/a.txt ${user_files_path}/${
|
||||
|
||||
rm ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/d.txt
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select *, _filename, _offset from file_log order by _filename, _offset;"
|
||||
${CLICKHOUSE_CLIENT} --query "select *, _filename, _offset from file_log order by _filename, _offset settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "detach table file_log;"
|
||||
${CLICKHOUSE_CLIENT} --query "attach table file_log;"
|
||||
|
||||
# should no records return
|
||||
${CLICKHOUSE_CLIENT} --query "select *, _filename, _offset from file_log order by _filename, _offset;"
|
||||
${CLICKHOUSE_CLIENT} --query "select *, _filename, _offset from file_log order by _filename, _offset settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
truncate ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/a.txt --size 0
|
||||
|
||||
# exception happend
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k;" 2>&1 | grep -q "Code: 33" && echo 'OK' || echo 'FAIL'
|
||||
${CLICKHOUSE_CLIENT} --query "select * from file_log order by k settings stream_like_engine_allow_direct_select=1;" 2>&1 | grep -q "Code: 33" && echo 'OK' || echo 'FAIL'
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "drop table file_log;"
|
||||
|
||||
|
@ -26,14 +26,14 @@ done
|
||||
${CLICKHOUSE_CLIENT} --query "drop table if exists file_log;"
|
||||
${CLICKHOUSE_CLIENT} --query "create table file_log(k UInt32, v UInt32) engine=FileLog('${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/', 'CSV');"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select count() from file_log "
|
||||
${CLICKHOUSE_CLIENT} --query "select count() from file_log settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
for i in {11..20}
|
||||
do
|
||||
${CLICKHOUSE_CLIENT} --query "insert into function file('${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/test$i.csv', 'CSV', 'k UInt32, v UInt32') select number, number from numbers(10000);"
|
||||
done
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "select count() from file_log "
|
||||
${CLICKHOUSE_CLIENT} --query "select count() from file_log settings stream_like_engine_allow_direct_select=1;"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "drop table file_log;"
|
||||
|
||||
|
@ -128,6 +128,7 @@ CREATE TABLE system.databases
|
||||
`data_path` String,
|
||||
`metadata_path` String,
|
||||
`uuid` UUID,
|
||||
`engine_full` String,
|
||||
`comment` String,
|
||||
`database` String
|
||||
)
|
||||
|
@ -82,7 +82,6 @@ addYears
|
||||
addressToLine
|
||||
addressToLineWithInlines
|
||||
addressToSymbol
|
||||
age
|
||||
alphaTokens
|
||||
and
|
||||
appendTrailingCharIfAbsent
|
||||
|
@ -1,76 +0,0 @@
|
||||
Various intervals
|
||||
-1
|
||||
0
|
||||
0
|
||||
-7
|
||||
-3
|
||||
0
|
||||
-23
|
||||
-11
|
||||
0
|
||||
-103
|
||||
-52
|
||||
0
|
||||
-730
|
||||
-364
|
||||
1
|
||||
-17520
|
||||
-8736
|
||||
24
|
||||
-1051200
|
||||
-524160
|
||||
1440
|
||||
-63072000
|
||||
-31449600
|
||||
86400
|
||||
DateTime arguments
|
||||
0
|
||||
23
|
||||
1439
|
||||
86399
|
||||
Date and DateTime arguments
|
||||
-63072000
|
||||
-31449600
|
||||
86400
|
||||
Constant and non-constant arguments
|
||||
-1051200
|
||||
-524160
|
||||
1440
|
||||
Case insensitive
|
||||
-10
|
||||
Dependance of timezones
|
||||
0
|
||||
0
|
||||
1
|
||||
25
|
||||
1500
|
||||
90000
|
||||
0
|
||||
0
|
||||
1
|
||||
24
|
||||
1440
|
||||
86400
|
||||
0
|
||||
0
|
||||
1
|
||||
25
|
||||
1500
|
||||
90000
|
||||
0
|
||||
0
|
||||
1
|
||||
24
|
||||
1440
|
||||
86400
|
||||
Additional test
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
@ -1,82 +0,0 @@
|
||||
SELECT 'Various intervals';
|
||||
|
||||
SELECT age('year', toDate('2017-12-31'), toDate('2016-01-01'));
|
||||
SELECT age('year', toDate('2017-12-31'), toDate('2017-01-01'));
|
||||
SELECT age('year', toDate('2017-12-31'), toDate('2018-01-01'));
|
||||
SELECT age('quarter', toDate('2017-12-31'), toDate('2016-01-01'));
|
||||
SELECT age('quarter', toDate('2017-12-31'), toDate('2017-01-01'));
|
||||
SELECT age('quarter', toDate('2017-12-31'), toDate('2018-01-01'));
|
||||
SELECT age('month', toDate('2017-12-31'), toDate('2016-01-01'));
|
||||
SELECT age('month', toDate('2017-12-31'), toDate('2017-01-01'));
|
||||
SELECT age('month', toDate('2017-12-31'), toDate('2018-01-01'));
|
||||
SELECT age('week', toDate('2017-12-31'), toDate('2016-01-01'));
|
||||
SELECT age('week', toDate('2017-12-31'), toDate('2017-01-01'));
|
||||
SELECT age('week', toDate('2017-12-31'), toDate('2018-01-01'));
|
||||
SELECT age('day', toDate('2017-12-31'), toDate('2016-01-01'));
|
||||
SELECT age('day', toDate('2017-12-31'), toDate('2017-01-01'));
|
||||
SELECT age('day', toDate('2017-12-31'), toDate('2018-01-01'));
|
||||
SELECT age('hour', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC');
|
||||
SELECT age('hour', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC');
|
||||
SELECT age('hour', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC');
|
||||
SELECT age('minute', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC');
|
||||
SELECT age('minute', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC');
|
||||
SELECT age('minute', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC');
|
||||
SELECT age('second', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC');
|
||||
SELECT age('second', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC');
|
||||
SELECT age('second', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC');
|
||||
|
||||
SELECT 'DateTime arguments';
|
||||
SELECT age('day', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('hour', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('minute', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('second', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC');
|
||||
|
||||
SELECT 'Date and DateTime arguments';
|
||||
|
||||
SELECT age('second', toDate('2017-12-31'), toDateTime('2016-01-01 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('second', toDateTime('2017-12-31 00:00:00', 'UTC'), toDate('2017-01-01'), 'UTC');
|
||||
SELECT age('second', toDateTime('2017-12-31 00:00:00', 'UTC'), toDateTime('2018-01-01 00:00:00', 'UTC'));
|
||||
|
||||
SELECT 'Constant and non-constant arguments';
|
||||
|
||||
SELECT age('minute', materialize(toDate('2017-12-31')), toDate('2016-01-01'), 'UTC');
|
||||
SELECT age('minute', toDate('2017-12-31'), materialize(toDate('2017-01-01')), 'UTC');
|
||||
SELECT age('minute', materialize(toDate('2017-12-31')), materialize(toDate('2018-01-01')), 'UTC');
|
||||
|
||||
SELECT 'Case insensitive';
|
||||
|
||||
SELECT age('year', today(), today() - INTERVAL 10 YEAR);
|
||||
|
||||
SELECT 'Dependance of timezones';
|
||||
|
||||
SELECT age('month', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
|
||||
SELECT age('week', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
|
||||
SELECT age('day', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
|
||||
SELECT age('hour', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
|
||||
SELECT age('minute', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
|
||||
SELECT age('second', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
|
||||
|
||||
SELECT age('month', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
|
||||
SELECT age('week', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
|
||||
SELECT age('day', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
|
||||
SELECT age('hour', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
|
||||
SELECT age('minute', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
|
||||
SELECT age('second', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
|
||||
|
||||
SELECT age('month', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
|
||||
SELECT age('week', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
|
||||
SELECT age('day', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
|
||||
SELECT age('hour', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
|
||||
SELECT age('minute', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
|
||||
SELECT age('second', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
|
||||
|
||||
SELECT age('month', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
|
||||
SELECT age('week', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
|
||||
SELECT age('day', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
|
||||
SELECT age('hour', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
|
||||
SELECT age('minute', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
|
||||
SELECT age('second', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
|
||||
|
||||
SELECT 'Additional test';
|
||||
|
||||
SELECT number = age('month', now() - INTERVAL number MONTH, now()) FROM system.numbers LIMIT 10;
|
@ -1,169 +0,0 @@
|
||||
-- { echo }
|
||||
|
||||
-- Date32 vs Date32
|
||||
SELECT age('second', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
86400
|
||||
SELECT age('minute', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
1440
|
||||
SELECT age('hour', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
24
|
||||
SELECT age('day', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('week', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('month', toDate32('1927-01-01', 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('quarter', toDate32('1927-01-01', 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('year', toDate32('1927-01-01', 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC');
|
||||
1
|
||||
-- With DateTime64
|
||||
-- Date32 vs DateTime64
|
||||
SELECT age('second', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
|
||||
86400
|
||||
SELECT age('minute', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
|
||||
1440
|
||||
SELECT age('hour', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
|
||||
24
|
||||
SELECT age('day', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('week', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-08 00:00:00', 3, 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('month', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('quarter', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-04-01 00:00:00', 3, 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('year', toDate32('1927-01-01', 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC'), 'UTC');
|
||||
1
|
||||
-- DateTime64 vs Date32
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
86400
|
||||
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
1440
|
||||
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
24
|
||||
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('week', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('quarter', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC');
|
||||
1
|
||||
-- With DateTime
|
||||
-- Date32 vs DateTime
|
||||
SELECT age('second', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
|
||||
86400
|
||||
SELECT age('minute', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
|
||||
1440
|
||||
SELECT age('hour', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
|
||||
24
|
||||
SELECT age('day', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('week', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-25 00:00:00', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('month', toDate32('2015-08-18', 'UTC'), toDateTime('2015-09-18 00:00:00', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('quarter', toDate32('2015-08-18', 'UTC'), toDateTime('2015-11-18 00:00:00', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('year', toDate32('2015-08-18', 'UTC'), toDateTime('2016-08-18 00:00:00', 'UTC'), 'UTC');
|
||||
1
|
||||
-- DateTime vs Date32
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
86400
|
||||
SELECT age('minute', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
1440
|
||||
SELECT age('hour', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
24
|
||||
SELECT age('day', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('week', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('month', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('quarter', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('year', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC');
|
||||
1
|
||||
-- With Date
|
||||
-- Date32 vs Date
|
||||
SELECT age('second', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
|
||||
86400
|
||||
SELECT age('minute', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
|
||||
1440
|
||||
SELECT age('hour', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
|
||||
24
|
||||
SELECT age('day', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('week', toDate32('2015-08-18', 'UTC'), toDate('2015-08-25', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('month', toDate32('2015-08-18', 'UTC'), toDate('2015-09-18', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('quarter', toDate32('2015-08-18', 'UTC'), toDate('2015-11-18', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('year', toDate32('2015-08-18', 'UTC'), toDate('2016-08-18', 'UTC'), 'UTC');
|
||||
1
|
||||
-- Date vs Date32
|
||||
SELECT age('second', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
86400
|
||||
SELECT age('minute', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
1440
|
||||
SELECT age('hour', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
24
|
||||
SELECT age('day', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('week', toDate('2015-08-18', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('month', toDate('2015-08-18', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('quarter', toDate('2015-08-18', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('year', toDate('2015-08-18', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC');
|
||||
1
|
||||
-- Const vs non-const columns
|
||||
SELECT age('day', toDate32('1927-01-01', 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', toDate32('1927-01-01', 'UTC'), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', toDate32('2015-08-18', 'UTC'), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', toDate32('2015-08-18', 'UTC'), materialize(toDate('2015-08-19', 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', toDate('2015-08-18', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
|
||||
1
|
||||
-- Non-const vs const columns
|
||||
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), toDate('2015-08-19', 'UTC'), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
1
|
||||
-- Non-const vs non-const columns
|
||||
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDate('2015-08-19', 'UTC')), 'UTC');
|
||||
1
|
||||
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
|
||||
1
|
@ -1,101 +0,0 @@
|
||||
-- { echo }
|
||||
|
||||
-- Date32 vs Date32
|
||||
SELECT age('second', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
SELECT age('minute', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
SELECT age('hour', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
SELECT age('day', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
SELECT age('week', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC');
|
||||
SELECT age('month', toDate32('1927-01-01', 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC');
|
||||
SELECT age('quarter', toDate32('1927-01-01', 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC');
|
||||
SELECT age('year', toDate32('1927-01-01', 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC');
|
||||
|
||||
-- With DateTime64
|
||||
-- Date32 vs DateTime64
|
||||
SELECT age('second', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
|
||||
SELECT age('minute', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
|
||||
SELECT age('hour', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
|
||||
SELECT age('day', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
|
||||
SELECT age('week', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-08 00:00:00', 3, 'UTC'), 'UTC');
|
||||
SELECT age('month', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC'), 'UTC');
|
||||
SELECT age('quarter', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-04-01 00:00:00', 3, 'UTC'), 'UTC');
|
||||
SELECT age('year', toDate32('1927-01-01', 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC'), 'UTC');
|
||||
|
||||
-- DateTime64 vs Date32
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
SELECT age('week', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC');
|
||||
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC');
|
||||
SELECT age('quarter', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC');
|
||||
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC');
|
||||
|
||||
-- With DateTime
|
||||
-- Date32 vs DateTime
|
||||
SELECT age('second', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('minute', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('hour', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('day', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('week', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-25 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('month', toDate32('2015-08-18', 'UTC'), toDateTime('2015-09-18 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('quarter', toDate32('2015-08-18', 'UTC'), toDateTime('2015-11-18 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('year', toDate32('2015-08-18', 'UTC'), toDateTime('2016-08-18 00:00:00', 'UTC'), 'UTC');
|
||||
|
||||
-- DateTime vs Date32
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('minute', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('hour', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('day', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('week', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC');
|
||||
SELECT age('month', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC');
|
||||
SELECT age('quarter', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC');
|
||||
SELECT age('year', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC');
|
||||
|
||||
-- With Date
|
||||
-- Date32 vs Date
|
||||
SELECT age('second', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('minute', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('hour', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('day', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('week', toDate32('2015-08-18', 'UTC'), toDate('2015-08-25', 'UTC'), 'UTC');
|
||||
SELECT age('month', toDate32('2015-08-18', 'UTC'), toDate('2015-09-18', 'UTC'), 'UTC');
|
||||
SELECT age('quarter', toDate32('2015-08-18', 'UTC'), toDate('2015-11-18', 'UTC'), 'UTC');
|
||||
SELECT age('year', toDate32('2015-08-18', 'UTC'), toDate('2016-08-18', 'UTC'), 'UTC');
|
||||
|
||||
-- Date vs Date32
|
||||
SELECT age('second', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('minute', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('hour', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('day', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('week', toDate('2015-08-18', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC');
|
||||
SELECT age('month', toDate('2015-08-18', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC');
|
||||
SELECT age('quarter', toDate('2015-08-18', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC');
|
||||
SELECT age('year', toDate('2015-08-18', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC');
|
||||
|
||||
-- Const vs non-const columns
|
||||
SELECT age('day', toDate32('1927-01-01', 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
|
||||
SELECT age('day', toDate32('1927-01-01', 'UTC'), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC');
|
||||
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
|
||||
SELECT age('day', toDate32('2015-08-18', 'UTC'), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC');
|
||||
SELECT age('day', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
|
||||
SELECT age('day', toDate32('2015-08-18', 'UTC'), materialize(toDate('2015-08-19', 'UTC')), 'UTC');
|
||||
SELECT age('day', toDate('2015-08-18', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
|
||||
|
||||
-- Non-const vs const columns
|
||||
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
|
||||
SELECT age('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC');
|
||||
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
|
||||
SELECT age('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), toDate('2015-08-19', 'UTC'), 'UTC');
|
||||
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC');
|
||||
|
||||
-- Non-const vs non-const columns
|
||||
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
|
||||
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC');
|
||||
SELECT age('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
|
||||
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC');
|
||||
SELECT age('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
|
||||
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDate('2015-08-19', 'UTC')), 'UTC');
|
||||
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
|
@ -1,113 +0,0 @@
|
||||
-- { echo }
|
||||
|
||||
-- DateTime64 vs DateTime64 same scale
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:00:10', 0, 'UTC'));
|
||||
10
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC'));
|
||||
600
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:00:00', 0, 'UTC'));
|
||||
3600
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:10:10', 0, 'UTC'));
|
||||
4210
|
||||
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC'));
|
||||
10
|
||||
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC'));
|
||||
600
|
||||
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC'));
|
||||
10
|
||||
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-02 00:00:00', 0, 'UTC'));
|
||||
1
|
||||
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-02-01 00:00:00', 0, 'UTC'));
|
||||
1
|
||||
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1928-01-01 00:00:00', 0, 'UTC'));
|
||||
1
|
||||
-- DateTime64 vs DateTime64 different scale
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:00:10', 3, 'UTC'));
|
||||
10
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC'));
|
||||
600
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:00:00', 3, 'UTC'));
|
||||
3600
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:10:10', 3, 'UTC'));
|
||||
4210
|
||||
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC'));
|
||||
10
|
||||
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC'));
|
||||
600
|
||||
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC'));
|
||||
10
|
||||
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'));
|
||||
1
|
||||
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC'));
|
||||
1
|
||||
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC'));
|
||||
1
|
||||
-- With DateTime
|
||||
-- DateTime64 vs DateTime
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:00', 'UTC'));
|
||||
0
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:10', 'UTC'));
|
||||
10
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:10:00', 'UTC'));
|
||||
600
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:00:00', 'UTC'));
|
||||
3600
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:10:10', 'UTC'));
|
||||
4210
|
||||
-- DateTime vs DateTime64
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:00', 3, 'UTC'));
|
||||
0
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:10', 3, 'UTC'));
|
||||
10
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:10:00', 3, 'UTC'));
|
||||
600
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:00:00', 3, 'UTC'));
|
||||
3600
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:10:10', 3, 'UTC'));
|
||||
4210
|
||||
-- With Date
|
||||
-- DateTime64 vs Date
|
||||
SELECT age('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDate('2015-08-19', 'UTC'));
|
||||
1
|
||||
-- Date vs DateTime64
|
||||
SELECT age('day', toDate('2015-08-18', 'UTC'), toDateTime64('2015-08-19 00:00:00', 3, 'UTC'));
|
||||
1
|
||||
-- Same thing but const vs non-const columns
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC')));
|
||||
10
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC')));
|
||||
10
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDateTime('2015-08-18 00:00:10', 'UTC')));
|
||||
10
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC')));
|
||||
10
|
||||
SELECT age('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDate('2015-08-19', 'UTC')));
|
||||
1
|
||||
SELECT age('day', toDate('2015-08-18', 'UTC'), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC')));
|
||||
1
|
||||
-- Same thing but non-const vs const columns
|
||||
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), toDateTime64('1927-01-01 00:00:10', 0, 'UTC'));
|
||||
10
|
||||
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), toDateTime64('1927-01-01 00:00:10', 3, 'UTC'));
|
||||
10
|
||||
SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDateTime('2015-08-18 00:00:10', 'UTC'));
|
||||
10
|
||||
SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDateTime64('2015-08-18 00:00:10', 3, 'UTC'));
|
||||
10
|
||||
SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDate('2015-08-19', 'UTC'));
|
||||
1
|
||||
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), toDateTime64('2015-08-19 00:00:00', 3, 'UTC'));
|
||||
1
|
||||
-- Same thing but non-const vs non-const columns
|
||||
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC')));
|
||||
10
|
||||
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC')));
|
||||
10
|
||||
SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDateTime('2015-08-18 00:00:10', 'UTC')));
|
||||
10
|
||||
SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC')));
|
||||
10
|
||||
SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDate('2015-08-19', 'UTC')));
|
||||
1
|
||||
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC')));
|
||||
1
|
@ -1,77 +0,0 @@
|
||||
-- { echo }
|
||||
|
||||
-- DateTime64 vs DateTime64 same scale
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:00:10', 0, 'UTC'));
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC'));
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:00:00', 0, 'UTC'));
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:10:10', 0, 'UTC'));
|
||||
|
||||
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC'));
|
||||
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC'));
|
||||
|
||||
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC'));
|
||||
|
||||
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-02 00:00:00', 0, 'UTC'));
|
||||
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-02-01 00:00:00', 0, 'UTC'));
|
||||
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1928-01-01 00:00:00', 0, 'UTC'));
|
||||
|
||||
-- DateTime64 vs DateTime64 different scale
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:00:10', 3, 'UTC'));
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC'));
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:00:00', 3, 'UTC'));
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:10:10', 3, 'UTC'));
|
||||
|
||||
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC'));
|
||||
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC'));
|
||||
|
||||
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC'));
|
||||
|
||||
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'));
|
||||
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC'));
|
||||
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC'));
|
||||
|
||||
-- With DateTime
|
||||
-- DateTime64 vs DateTime
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:00', 'UTC'));
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:10', 'UTC'));
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:10:00', 'UTC'));
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:00:00', 'UTC'));
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:10:10', 'UTC'));
|
||||
|
||||
-- DateTime vs DateTime64
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:00', 3, 'UTC'));
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:10', 3, 'UTC'));
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:10:00', 3, 'UTC'));
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:00:00', 3, 'UTC'));
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:10:10', 3, 'UTC'));
|
||||
|
||||
-- With Date
|
||||
-- DateTime64 vs Date
|
||||
SELECT age('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDate('2015-08-19', 'UTC'));
|
||||
|
||||
-- Date vs DateTime64
|
||||
SELECT age('day', toDate('2015-08-18', 'UTC'), toDateTime64('2015-08-19 00:00:00', 3, 'UTC'));
|
||||
|
||||
-- Same thing but const vs non-const columns
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC')));
|
||||
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC')));
|
||||
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDateTime('2015-08-18 00:00:10', 'UTC')));
|
||||
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC')));
|
||||
SELECT age('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDate('2015-08-19', 'UTC')));
|
||||
SELECT age('day', toDate('2015-08-18', 'UTC'), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC')));
|
||||
|
||||
-- Same thing but non-const vs const columns
|
||||
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), toDateTime64('1927-01-01 00:00:10', 0, 'UTC'));
|
||||
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), toDateTime64('1927-01-01 00:00:10', 3, 'UTC'));
|
||||
SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDateTime('2015-08-18 00:00:10', 'UTC'));
|
||||
SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDateTime64('2015-08-18 00:00:10', 3, 'UTC'));
|
||||
SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDate('2015-08-19', 'UTC'));
|
||||
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), toDateTime64('2015-08-19 00:00:00', 3, 'UTC'));
|
||||
|
||||
-- Same thing but non-const vs non-const columns
|
||||
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC')));
|
||||
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC')));
|
||||
SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDateTime('2015-08-18 00:00:10', 'UTC')));
|
||||
SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC')));
|
||||
SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDate('2015-08-19', 'UTC')));
|
||||
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC')));
|
@ -0,0 +1 @@
|
||||
Replicated(\'some/path/default/replicated_database_test\', \'shard_1\', \'replica_1\') SETTINGS max_broken_tables_ratio = 1
|
@ -0,0 +1,8 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
DROP DATABASE IF EXISTS replicated_database_test;
|
||||
SET allow_experimental_database_replicated=1;
|
||||
CREATE DATABASE IF NOT EXISTS replicated_database_test ENGINE = Replicated('some/path/' || currentDatabase() || '/replicated_database_test', 'shard_1', 'replica_1') SETTINGS max_broken_tables_ratio=1;
|
||||
SELECT engine_full FROM system.databases WHERE name = 'replicated_database_test';
|
||||
DROP DATABASE IF EXISTS replicated_database_test;
|
||||
|
40
tests/queries/0_stateless/02493_inconsistent_hex_and_binary_number.expect
Executable file
40
tests/queries/0_stateless/02493_inconsistent_hex_and_binary_number.expect
Executable file
@ -0,0 +1,40 @@
|
||||
#!/usr/bin/expect -f
|
||||
|
||||
set basedir [file dirname $argv0]
|
||||
set basename [file tail $argv0]
|
||||
exp_internal -f $env(CLICKHOUSE_TMP)/$basename.debuglog 0
|
||||
|
||||
log_user 0
|
||||
set timeout 60
|
||||
match_max 100000
|
||||
set stty_init "rows 25 cols 120"
|
||||
|
||||
expect_after {
|
||||
eof { exp_continue }
|
||||
timeout { exit 1 }
|
||||
}
|
||||
|
||||
spawn bash
|
||||
send "source $basedir/../shell_config.sh\r"
|
||||
|
||||
send "\$CLICKHOUSE_CLIENT --query 'select 0b'\r"
|
||||
expect "DB::Exception: Missing columns: '0b' while processing query: 'SELECT `0b`', required columns: '0b'. (UNKNOWN_IDENTIFIER)"
|
||||
|
||||
send "\$CLICKHOUSE_CLIENT --query 'select 0b;'\r"
|
||||
expect "DB::Exception: Missing columns: '0b' while processing query: 'SELECT `0b`', required columns: '0b'. (UNKNOWN_IDENTIFIER)"
|
||||
|
||||
send "\$CLICKHOUSE_CLIENT --query 'select 0b ;'\r"
|
||||
expect "DB::Exception: Missing columns: '0b' while processing query: 'SELECT `0b`', required columns: '0b'. (UNKNOWN_IDENTIFIER)"
|
||||
|
||||
|
||||
send "\$CLICKHOUSE_CLIENT --query 'select 0x'\r"
|
||||
expect "DB::Exception: Missing columns: '0x' while processing query: 'SELECT `0x`', required columns: '0x'. (UNKNOWN_IDENTIFIER)"
|
||||
|
||||
send "\$CLICKHOUSE_CLIENT --query 'select 0x;'\r"
|
||||
expect "DB::Exception: Missing columns: '0x' while processing query: 'SELECT `0x`', required columns: '0x'. (UNKNOWN_IDENTIFIER)"
|
||||
|
||||
send "\$CLICKHOUSE_CLIENT --query 'select 0x ;'\r"
|
||||
expect "DB::Exception: Missing columns: '0x' while processing query: 'SELECT `0x`', required columns: '0x'. (UNKNOWN_IDENTIFIER)"
|
||||
|
||||
send "exit\r"
|
||||
expect eof
|
@ -0,0 +1,126 @@
|
||||
1234
|
||||
1234
|
||||
1234
|
||||
1234
|
||||
1234
|
||||
1234
|
||||
-1234
|
||||
-1234
|
||||
-1234
|
||||
12.34
|
||||
12.34
|
||||
12.34
|
||||
12.34
|
||||
-12.34
|
||||
-12.34
|
||||
-12.34
|
||||
-12.34
|
||||
3.4e22
|
||||
3.4e22
|
||||
3.4e22
|
||||
3.4e22
|
||||
3.4e22
|
||||
3.4e22
|
||||
3.4e22
|
||||
3.4e22
|
||||
3.4e-20
|
||||
3.4e-20
|
||||
3.4e-20
|
||||
3.4e-20
|
||||
-3.4e22
|
||||
-3.4e22
|
||||
-3.4e22
|
||||
-3.4e22
|
||||
-3.4e22
|
||||
-3.4e22
|
||||
-3.4e22
|
||||
-3.4e22
|
||||
-3.4e-20
|
||||
-3.4e-20
|
||||
-3.4e-20
|
||||
-3.4e-20
|
||||
1.34e21
|
||||
1.34e21
|
||||
1.34e21
|
||||
1.34e21
|
||||
1.34e21
|
||||
1.34e21
|
||||
1.34e21
|
||||
1.34e21
|
||||
1.34e-21
|
||||
1.34e-21
|
||||
1.34e-21
|
||||
1.34e-21
|
||||
-1.34e21
|
||||
-1.34e21
|
||||
-1.34e21
|
||||
-1.34e21
|
||||
-1.34e21
|
||||
-1.34e21
|
||||
-1.34e21
|
||||
-1.34e21
|
||||
-1.34e-21
|
||||
-1.34e-21
|
||||
-1.34e-21
|
||||
-1.34e-21
|
||||
-340000000000000000000
|
||||
-340000000000000000000
|
||||
-340000000000000000000
|
||||
-340000000000000000000
|
||||
-340000000000000000000
|
||||
-340000000000000000000
|
||||
-340000000000000000000
|
||||
-340000000000000000000
|
||||
-3.4e-22
|
||||
-3.4e-22
|
||||
-3.4e-22
|
||||
-3.4e-22
|
||||
nan
|
||||
nan
|
||||
inf
|
||||
inf
|
||||
-inf
|
||||
inf
|
||||
inf
|
||||
-inf
|
||||
inf
|
||||
inf
|
||||
-inf
|
||||
15
|
||||
15
|
||||
15
|
||||
-15
|
||||
-15
|
||||
-15
|
||||
4660
|
||||
4660
|
||||
4660
|
||||
-4660
|
||||
-4660
|
||||
-4660
|
||||
238
|
||||
238
|
||||
1.1376953125
|
||||
1.1376953125
|
||||
-1.1376953125
|
||||
-1.1376953125
|
||||
0.9296875
|
||||
0.9296875
|
||||
2.275390625
|
||||
2.275390625
|
||||
2.275390625
|
||||
2.275390625
|
||||
2.275390625
|
||||
2.275390625
|
||||
0.56884765625
|
||||
0.56884765625
|
||||
0.56884765625
|
||||
-2.275390625
|
||||
-2.275390625
|
||||
-2.275390625
|
||||
-2.275390625
|
||||
-2.275390625
|
||||
-2.275390625
|
||||
-0.56884765625
|
||||
-0.56884765625
|
||||
-0.56884765625
|
@ -0,0 +1,154 @@
|
||||
SELECT 1234; -- Positive integer (+ implied)
|
||||
SELECT 1_234;
|
||||
SELECT 1_2_3_4;
|
||||
SELECT +1234; -- Positive integer (+ explicit)
|
||||
SELECT +1_234;
|
||||
SELECT +1_2_3_4;
|
||||
SELECT -1234; -- Negative integer
|
||||
SELECT -1_234;
|
||||
SELECT -1_2_3_4;
|
||||
SELECT 12.34; -- Positive floating point with . notation
|
||||
SELECT 12.3_4;
|
||||
SELECT 1_2.34;
|
||||
SELECT 1_2.3_4;
|
||||
SELECT -12.34; -- Negative floating point with . notation
|
||||
SELECT -12.3_4;
|
||||
SELECT -1_2.34;
|
||||
SELECT -1_2.3_4;
|
||||
SELECT 34e21; -- Positive floating point with positive scientific notation (+ implied)
|
||||
SELECT 3_4e21;
|
||||
SELECT 34e2_1;
|
||||
SELECT 3_4e2_1;
|
||||
SELECT 34e+21; -- Positive floating point with positive scientific notation (+ explicit)
|
||||
SELECT 3_4e+21;
|
||||
SELECT 34e+2_1;
|
||||
SELECT 3_4e+2_1;
|
||||
SELECT 34e-21; -- Positive floating point with negative scientific notation
|
||||
SELECT 3_4e-21;
|
||||
SELECT 34e-2_1;
|
||||
SELECT 3_4e-2_1;
|
||||
SELECT -34e21; -- Negative floating point with positive scientific notation (+ implied)
|
||||
SELECT -3_4e21;
|
||||
SELECT -34e2_1;
|
||||
SELECT -3_4e2_1;
|
||||
SELECT -34e+21; -- Negative floating point with positive scientific notation (+ explicit)
|
||||
SELECT -3_4e+21;
|
||||
SELECT -34e+2_1;
|
||||
SELECT -3_4e+2_1;
|
||||
SELECT -34e-21; -- Negative floating point with negative scientific notation
|
||||
SELECT -3_4e-21;
|
||||
SELECT -34e-2_1;
|
||||
SELECT -3_4e-2_1;
|
||||
SELECT 1.34e21; -- Positive floating point (with .) with positive scientific notation (+ implied)
|
||||
SELECT 1.3_4e21;
|
||||
SELECT 1.34e2_1;
|
||||
SELECT 1.3_4e2_1;
|
||||
SELECT 1.34e+21; -- Positive floating point (with .) with positive scientific notation (+ explicit)
|
||||
SELECT 1.3_4e+21;
|
||||
SELECT 1.34e+2_1;
|
||||
SELECT 1.3_4e+2_1;
|
||||
SELECT 1.34e-21; -- Positive floating point (with .) with negative scientific notation
|
||||
SELECT 1.3_4e-21;
|
||||
SELECT 1.34e-2_1;
|
||||
SELECT 1.3_4e-2_1;
|
||||
SELECT -1.34e21; -- Negative floating point (with .) with positive scientific notation (+ implied)
|
||||
SELECT -1.3_4e21;
|
||||
SELECT -1.34e2_1;
|
||||
SELECT -1.3_4e2_1;
|
||||
SELECT -1.34e+21; -- Negative floating point (with .) with positive scientific notation (+ explicit)
|
||||
SELECT -1.3_4e+21;
|
||||
SELECT -1.34e+2_1;
|
||||
SELECT -1.3_4e+2_1;
|
||||
SELECT -1.34e-21; -- Negative floating point (with .) with negative scientific notation
|
||||
SELECT -1.3_4e-21;
|
||||
SELECT -1.34e-2_1;
|
||||
SELECT -1.3_4e-2_1;
|
||||
SELECT -.34e21; -- Negative floating point (with .) with positive scientific notation (+ implied)
|
||||
SELECT -.3_4e21;
|
||||
SELECT -.34e2_1;
|
||||
SELECT -.3_4e2_1;
|
||||
SELECT -.34e+21; -- Negative floating point (with .) with positive scientific notation (+ explicit)
|
||||
SELECT -.3_4e+21;
|
||||
SELECT -.34e+2_1;
|
||||
SELECT -.3_4e+2_1;
|
||||
SELECT -.34e-21; -- Negative floating point (with .) with negative scientific notation
|
||||
SELECT -.3_4e-21;
|
||||
SELECT -.34e-2_1;
|
||||
SELECT -.3_4e-2_1;
|
||||
SELECT NaN; -- Specials
|
||||
SELECT nan;
|
||||
SELECT inf;
|
||||
SELECT +inf;
|
||||
SELECT -inf;
|
||||
SELECT Inf;
|
||||
SELECT +Inf;
|
||||
SELECT -Inf;
|
||||
SELECT INF;
|
||||
SELECT +INF;
|
||||
SELECT -INF;
|
||||
SELECT 0b1111; -- Binary
|
||||
SELECT 0b1_111;
|
||||
SELECT 0b1_1_1_1;
|
||||
SELECT -0b1111;
|
||||
SELECT -0b1_111;
|
||||
SELECT -0b1_1_1_1;
|
||||
SELECT 0x1234; -- Hex
|
||||
SELECT 0x1_234;
|
||||
SELECT 0x1_2_3_4;
|
||||
SELECT -0x1234;
|
||||
SELECT -0x1_234;
|
||||
SELECT -0x1_2_3_4;
|
||||
SELECT 0xee;
|
||||
SELECT 0xe_e;
|
||||
SELECT 0x1.234; -- Hex fractions
|
||||
SELECT 0x1.2_3_4;
|
||||
SELECT -0x1.234;
|
||||
SELECT -0x1.2_3_4;
|
||||
SELECT 0x0.ee;
|
||||
SELECT 0x0.e_e;
|
||||
SELECT 0x1.234p01; -- Hex scientific notation
|
||||
SELECT 0x1.2_34p01;
|
||||
SELECT 0x1.234p0_1;
|
||||
SELECT 0x1.234p+01;
|
||||
SELECT 0x1.2_34p+01;
|
||||
SELECT 0x1.2_34p+0_1;
|
||||
SELECT 0x1.234p-01;
|
||||
SELECT 0x1.2_34p-01;
|
||||
SELECT 0x1.2_34p-0_1;
|
||||
SELECT -0x1.234p01;
|
||||
SELECT -0x1.2_34p01;
|
||||
SELECT -0x1.2_34p0_1;
|
||||
SELECT -0x1.234p+01;
|
||||
SELECT -0x1.2_34p+01;
|
||||
SELECT -0x1.2_34p+0_1;
|
||||
SELECT -0x1.234p-01;
|
||||
SELECT -0x1.2_34p-01;
|
||||
SELECT -0x1.2_34p-0_1;
|
||||
|
||||
-- Things that are not a number
|
||||
|
||||
select _1000; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select _1000 FROM (SELECT 1 AS _1000) FORMAT Null;
|
||||
select -_1; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select -_1 FROM (SELECT -1 AS _1) FORMAT Null;
|
||||
select +_1; -- { clientError SYNTAX_ERROR }
|
||||
select 1__0; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 1_; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 1_ ; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 10_; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 1_e5; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 1e_5; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 1e5_; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 1e_; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 1_.; -- { clientError SYNTAX_ERROR }
|
||||
select 1e_1; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 0_x2; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 0x2_p2; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 0x2p_2; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 0x2p2_; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 0b; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 0b ; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 0x; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 0x ; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 0x_; -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select 0x_1; -- { serverError UNKNOWN_IDENTIFIER }
|
@ -0,0 +1,19 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
\N
|
||||
\N
|
||||
\N
|
27
tests/queries/0_stateless/02495_concat_with_separator.sql
Normal file
27
tests/queries/0_stateless/02495_concat_with_separator.sql
Normal file
@ -0,0 +1,27 @@
|
||||
select concatWithSeparator('|', 'a', 'b') == 'a|b';
|
||||
select concatWithSeparator('|', 'a', materialize('b')) == 'a|b';
|
||||
select concatWithSeparator('|', materialize('a'), 'b') == 'a|b';
|
||||
select concatWithSeparator('|', materialize('a'), materialize('b')) == 'a|b';
|
||||
|
||||
select concatWithSeparator('|', 'a', toFixedString('b', 1)) == 'a|b';
|
||||
select concatWithSeparator('|', 'a', materialize(toFixedString('b', 1))) == 'a|b';
|
||||
select concatWithSeparator('|', materialize('a'), toFixedString('b', 1)) == 'a|b';
|
||||
select concatWithSeparator('|', materialize('a'), materialize(toFixedString('b', 1))) == 'a|b';
|
||||
|
||||
select concatWithSeparator('|', toFixedString('a', 1), 'b') == 'a|b';
|
||||
select concatWithSeparator('|', toFixedString('a', 1), materialize('b')) == 'a|b';
|
||||
select concatWithSeparator('|', materialize(toFixedString('a', 1)), 'b') == 'a|b';
|
||||
select concatWithSeparator('|', materialize(toFixedString('a', 1)), materialize('b')) == 'a|b';
|
||||
|
||||
select concatWithSeparator('|', toFixedString('a', 1), toFixedString('b', 1)) == 'a|b';
|
||||
select concatWithSeparator('|', toFixedString('a', 1), materialize(toFixedString('b', 1))) == 'a|b';
|
||||
select concatWithSeparator('|', materialize(toFixedString('a', 1)), toFixedString('b', 1)) == 'a|b';
|
||||
select concatWithSeparator('|', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1))) == 'a|b';
|
||||
|
||||
select concatWithSeparator(null, 'a', 'b') == null;
|
||||
select concatWithSeparator('1', null, 'b') == null;
|
||||
select concatWithSeparator('1', 'a', null) == null;
|
||||
|
||||
select concatWithSeparator(materialize('|'), 'a', 'b'); -- { serverError 44 }
|
||||
select concatWithSeparator(); -- { serverError 42 }
|
||||
select concatWithSeparator('|', 'a', 100); -- { serverError 43 }
|
Loading…
Reference in New Issue
Block a user