Merge branch 'master' into wv-engine

This commit is contained in:
mergify[bot] 2022-05-18 01:56:56 +00:00 committed by GitHub
commit 37d6da7506
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 23048 additions and 94 deletions

View File

@ -88,7 +88,8 @@ RUN python3 -m pip install \
urllib3 \
requests-kerberos \
pyhdfs \
azure-storage-blob
azure-storage-blob \
meilisearch
COPY modprobe.sh /usr/local/bin/modprobe
COPY dockerd-entrypoint.sh /usr/local/bin/

View File

@ -0,0 +1,16 @@
version: '2.3'
services:
meili1:
image: getmeili/meilisearch:latest
restart: always
ports:
- ${MEILI_EXTERNAL_PORT}:${MEILI_INTERNAL_PORT}
meili_secure:
image: getmeili/meilisearch:latest
restart: always
ports:
- ${MEILI_SECURE_EXTERNAL_PORT}:${MEILI_SECURE_INTERNAL_PORT}
environment:
MEILI_MASTER_KEY: "password"

View File

@ -3,10 +3,38 @@ sidebar_position: 43
sidebar_label: Boolean
---
# Boolean Values {#boolean-values}
# Boolean Values bool (boolean) {#boolean-values}
Since https://github.com/ClickHouse/ClickHouse/commit/4076ae77b46794e73594a9f400200088ed1e7a6e , there be a separate type for boolean values.
Type `bool` is stored as UInt8. Possible values `true` (1), `false` (0).
For versions before that, there is no separate type for boolean values. Use UInt8 type, restricted to the values 0 or 1.
```sql
select true as col, toTypeName(col);
┌─col──┬─toTypeName(true)─┐
│ true │ Bool │
└──────┴──────────────────┘
select true == 1 as col, toTypeName(col);
┌─col─┬─toTypeName(equals(true, 1))─┐
│ 1 │ UInt8 │
└─────┴─────────────────────────────┘
```
```sql
CREATE TABLE test_bool
(
`A` Int64,
`B` Bool
)
ENGINE = Memory;
INSERT INTO test_bool VALUES (1, true),(2,0);
SELECT * FROM test_bool;
┌─A─┬─B─────┐
│ 1 │ true │
│ 2 │ false │
└───┴───────┘
```
[Original article](https://clickhouse.com/docs/en/data_types/boolean/) <!--hide-->

View File

@ -1026,4 +1026,119 @@ Result:
│ 41162 │
└─────────────┘
```
## h3Line {#h3line}
Returns the line of indices between the two indices that are provided.
**Syntax**
``` sql
h3Line(start,end)
```
**Parameter**
- `start` — Hexagon index number that represents a starting point. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
- `end` — Hexagon index number that represents an ending point. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
**Returned value**
Array of h3 indexes representing the line of indices between the two provided indices:
Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT h3Line(590080540275638271,590103561300344831) as indexes;
```
Result:
``` text
┌─indexes────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ [590080540275638271,590080471556161535,590080883873021951,590106516237844479,590104385934065663,590103630019821567,590103561300344831] │
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
## h3Distance {#h3distance}
Returns the distance in grid cells between the two indices that are provided.
**Syntax**
``` sql
h3Distance(start,end)
```
**Parameter**
- `start` — Hexagon index number that represents a starting point. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
- `end` — Hexagon index number that represents an ending point. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
**Returned value**
- Number of grid cells.
Type: [Int64](../../../sql-reference/data-types/int-uint.md).
Returns a negative number if finding the distance fails.
**Example**
Query:
``` sql
SELECT h3Distance(590080540275638271,590103561300344831) as distance;
```
Result:
``` text
┌─distance─┐
│ 7 │
└──────────┘
```
## h3HexRing {#h3hexring}
Returns the indexes of the hexagonal ring centered at the provided origin h3Index and length k.
Returns 0 if no pentagonal distortion was encountered.
**Syntax**
``` sql
h3HexRing(index, k)
```
**Parameter**
- `index` — Hexagon index number that represents the origin. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
- `k` — Distance. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
**Returned values**
- Array of H3 indexes.
Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT h3HexRing(590080540275638271, toUInt16(1)) AS hexRing;
```
Result:
``` text
┌─hexRing─────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ [590080815153545215,590080471556161535,590080677714591743,590077585338138623,590077447899185151,590079509483487231] │
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/geo/h3) <!--hide-->

View File

@ -3,7 +3,35 @@ sidebar_position: 43
sidebar_label: "Булевы значения"
---
# Булевы значения {#bulevy-znacheniia}
# Булевы значения bool (boolean) {#bulevy-znacheniia}
Отдельного типа для булевых значений нет. Для них используется тип UInt8, в котором используются только значения 0 и 1.
Тип `bool` хранится как UInt8. Значения `true` (1), `false` (0).
```sql
select true as col, toTypeName(col);
┌─col──┬─toTypeName(true)─┐
│ true │ Bool │
└──────┴──────────────────┘
select true == 1 as col, toTypeName(col);
┌─col─┬─toTypeName(equals(true, 1))─┐
│ 1 │ UInt8 │
└─────┴─────────────────────────────┘
```
```sql
CREATE TABLE test_bool
(
`A` Int64,
`B` Bool
)
ENGINE = Memory;
INSERT INTO test_bool VALUES (1, true),(2,0);
SELECT * FROM test_bool;
┌─A─┬─B─────┐
│ 1 │ true │
│ 2 │ false │
└───┴───────┘
```

View File

@ -12,6 +12,8 @@ SELECT if(cond, then, else)
如果条件 `cond` 的计算结果为非零值,则返回表达式 `then` 的结果,并且跳过表达式 `else` 的结果(如果存在)。 如果 `cond` 为零或 `NULL`,则将跳过 `then` 表达式的结果,并返回 `else` 表达式的结果(如果存在)。
您可以使用[short_circuit_function_evaluation](../../operations/settings/settings.md#short-circuit-function-evaluation) 设置,来根据短路方案计算 `if` 函数。如果启用此设置,则仅在`cond`为真的时,加载`then`表达式,此时不加载`else`表达式。仅在`cond`为假时,加载`else`表达式,此时不加载`then`表达式。例如,执行查询`SELECT if(number = 0, 0, intDiv(42, number)) FROM numbers(10)`时不会抛出除以零的异常,因为`intDiv(42, number)`会仅对不满足条件`number = 0`的数字进行处理。
**参数**
- `cond` 条件结果可以为零或不为零。 类型是 UInt8Nullable(UInt8) 或 NULL。
@ -102,11 +104,21 @@ WHERE isNotNull(left) AND isNotNull(right)
- `then`和`else`可以是`NULL`
**参考**
- [ifNotFinite](../../sql-reference/functions/other-functions.md#ifnotfinite)。
## multiIf {#multiif}
允许您在查询中更紧凑地编写[CASE](../operators/index.md#operator_case)运算符。
multiIf(cond_1, then_1, cond_2, then_2...else)
**语法**
``` sql
multiIf(cond_1, then_1, cond_2, then_2, ..., else)
```
您可以使用[short_circuit_function_evaluation](../../operations/settings/settings.md#short-circuit-function-evaluation) 设置,根据短路方案计算 `multiIf` 函数。如果启用此设置,则 `then_i` 表达式仅在 `((NOT cond_1) AND (NOT cond_2) AND ... AND (NOT cond_{i-1}) AND cond_i)` 为真,`cond_i ` 将仅对 `((NOT cond_1) AND (NOT cond_2) AND ... AND (NOT cond_{i-1}))` 为真的行进行执行。例如执行查询“SELECT multiIf(number = 2, intDiv(1, number), number = 5) FROM numbers(10)”时不会抛出除以零的异常。
**参数:**

View File

@ -12,16 +12,61 @@ SELECT
toString(time, 'US/Samoa') AS time_samoa
```
┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐
│ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │
└─────────────────────┴────────────┴────────────┴─────────────────────┘
``` text
┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐
│ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │
└─────────────────────┴────────────┴────────────┴─────────────────────┘
```
仅支持与UTC相差一整小时的时区。
## timeZone {#timezone}
返回服务器的时区。
如果它在分布式表的上下文中执行,那么它会生成一个普通列,其中包含与每个分片相关的值。否则它会产生一个常数值。
**语法**
``` sql
timeZone()
```
别名:`timezone`。
**返回值**
- 时区。
类型为: [String](../../sql-reference/data-types/string.md)。
## toTimeZone {#totimezone}
将Date或DateTime转换为指定的时区。 时区是Date/DateTime类型的属性。 表字段或结果集的列的内部值(秒数)不会更改,列的类型会更改,并且其字符串表示形式也会相应更改。
**语法**
``` sql
toTimezone(value, timezone)
```
别名:`toTimezone`。
**参数**
- `value` — 时间或日期和时间。类型为[DateTime64](../../sql-reference/data-types/datetime64.md)。
- `timezone` — 返回值的时区。类型为 [String](../../sql-reference/data-types/string.md)。 这个参数是一个常量,因为 `toTimezone` 改变了列的时区(时区是 `DateTime` 类型的属性)。
**返回值**
- 日期和时间。
类型为: [DateTime](../../sql-reference/data-types/datetime.md)。
**示例**
查询语句:
```sql
SELECT
toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc,
@ -52,44 +97,138 @@ int32samoa: 1546300800
`toTimeZone(time_utc, 'Asia/Yekaterinburg')``DateTime('UTC')` 类型转换为 `DateTime('Asia/Yekaterinburg')`. 内部值 (Unixtimestamp) 1546300800 保持不变, 但是字符串表示(toString() 函数的结果值) 由 `time_utc: 2019-01-01 00:00:00` 转换为o `time_yekat: 2019-01-01 05:00:00`.
## timeZoneOf {#timezoneof}
返回[DateTime](../../sql-reference/data-types/datetime.md)或者[DateTime64](../../sql-reference/data-types/datetime64.md)数据类型的时区名称。
**语法**
``` sql
timeZoneOf(value)
```
别名: `timezoneOf`
**参数**
- `value` — 日期和时间。类型为[DateTime](../../sql-reference/data-types/datetime.md)或者[DateTime64](../../sql-reference/data-types/datetime64.md)。
**返回值**
- 时区名称。
类型为:[String](../../sql-reference/data-types/string.md)。
**示例**
查询语句:
``` sql
SELECT timezoneOf(now());
```
结果:
``` text
┌─timezoneOf(now())─┐
│ Etc/UTC │
└───────────────────┘
```
## timeZoneOffset {#timezoneoffset}
返回从[UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time)开始到现在以秒为单位的时区偏移量。该函数考虑到[夏时令](https://en.wikipedia.org/wiki/Daylight_saving_time)并在指定日期和时间更改历史时区。
[IANA timezone database](https://www.iana.org/time-zones)用于计算偏移量。
**语法**
``` sql
timeZoneOffset(value)
```
别名: `timezoneOffset`
**参数**
- `value` — 日期和时间。类型为[DateTime](../../sql-reference/data-types/datetime.md)或者[DateTime64](../../sql-reference/data-types/datetime64.md)。
**返回值**
- 以秒为单位的UTC偏移量。
类型为: [Int32](../../sql-reference/data-types/int-uint.md)。
**示例**
查询语句:
``` sql
SELECT toDateTime('2021-04-21 10:20:30', 'America/New_York') AS Time, toTypeName(Time) AS Type,
timeZoneOffset(Time) AS Offset_in_seconds, (Offset_in_seconds / 3600) AS Offset_in_hours;
```
结果:
``` text
┌────────────────Time─┬─Type─────────────────────────┬─Offset_in_seconds─┬─Offset_in_hours─┐
│ 2021-04-21 10:20:30 │ DateTime('America/New_York') │ -14400 │ -4 │
└─────────────────────┴──────────────────────────────┴───────────────────┴─────────────────┘
```
## toYear {#toyear}
将Date或DateTime转换为包含年份编号AD的UInt16类型的数字。
别名为:`YEAR`。
## toQuarter {#toquarter}
将Date或DateTime转换为包含季度编号的UInt8类型的数字。
别名为:`QUARTER`。
## toMonth {#tomonth}
将Date或DateTime转换为包含月份编号1-12的UInt8类型的数字。
别名为:`MONTH`。
## toDayOfYear {#todayofyear}
将Date或DateTime转换为包含一年中的某一天的编号的UInt161-366类型的数字。
别名为: `DAYOFYEAR`
## toDayOfMonth {#todayofmonth}
将Date或DateTime转换为包含一月中的某一天的编号的UInt81-31类型的数字。
别名为:`DAYOFMONTH``DAY`。
## toDayOfWeek {#todayofweek}
将Date或DateTime转换为包含一周中的某一天的编号的UInt8周一是1, 周日是7类型的数字。
别名为:`DAYOFWEEK`。
## toHour {#tohour}
将DateTime转换为包含24小时制0-23小时数的UInt8数字。
这个函数假设如果时钟向前移动它是一个小时发生在凌晨2点如果时钟被移回它是一个小时发生在凌晨3点这并非总是如此 - 即使在莫斯科时钟在不同的时间两次改变)。
别名为: `HOUR`
## toMinute {#tominute}
将DateTime转换为包含一小时中分钟数0-59的UInt8数字。
别名为: `MINUTE`
## toSecond {#tosecond}
将DateTime转换为包含一分钟中秒数0-59的UInt8数字。
闰秒不计算在内。
别名为: `SECOND`
## toUnixTimestamp {#to-unix-timestamp}
对于DateTime参数将值转换为UInt32类型的数字-Unix时间戳https://en.wikipedia.org/wiki/Unix_time
@ -124,6 +263,10 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp
└────────────────┘
```
:::注意
下面描述的返回类型 `toStartOf` 函数是 `Date``DateTime`。尽管这些函数可以将 `DateTime64` 作为参数但将超出正常范围1925年-2283年`DateTime64` 传递给它们会给出不正确的结果。
:::
## toStartOfYear {#tostartofyear}
将Date或DateTime向前取整到本年的第一天。
@ -429,6 +572,263 @@ SELECT now(), date_trunc('hour', now(), 'Asia/Istanbul');
- [toStartOfInterval](#tostartofintervaltime-or-data-interval-x-unit-time-zone)
## date_add {#date_add}
将时间间隔或日期间隔添加到提供的日期或带时间的日期。
**语法**
``` sql
date_add(unit, value, date)
```
别名为:`dateAdd`, `DATE_ADD`
**参数**
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `value` — 要添加的间隔值。类型为[Int](../../sql-reference/data-types/int-uint.md)。
- `date` — 添加`value`的日期或日期。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
**返回值**
通过将 `value` 以`unit` 表示,添加到`date` 获得的日期或带时间的日期。
类型为: [Date](../../sql-reference/data-types/date.md)或[DateTime](../../sql-reference/data-types/datetime.md)。
**示例**
查询语句:
```sql
SELECT date_add(YEAR, 3, toDate('2018-01-01'));
```
结果:
```text
┌─plus(toDate('2018-01-01'), toIntervalYear(3))─┐
│ 2021-01-01 │
└───────────────────────────────────────────────┘
```
## date_diff {#date_diff}
返回两个日期或具有时间值的日期之间的差值。
**语法**
``` sql
date_diff('unit', startdate, enddate, [timezone])
```
别名为: `dateDiff`, `DATE_DIFF`
**参数**
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `startdate` — 要减去的第一个时间值(减数)。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
- `enddate` — 要减去的第二个时间值(被减数)。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (可选项)。如果指定,它适用于 `startdate``enddate`。如果未指定,则使用 `startdate``enddate` 的时区。如果它们不相同,则结果未指定。类型为[String](../../sql-reference/data-types/string.md)。
**返回值**
`unit` 表示的 `enddate``startdate` 之间的区别。
类型为: [Int](../../sql-reference/data-types/int-uint.md)。
**示例**
查询语句:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
结果:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
## date_sub {#date_sub}
从提供的日期或带时间的日期中减去时间间隔或日期间隔。
**语法**
``` sql
date_sub(unit, value, date)
```
别名为: `dateSub`, `DATE_SUB`.
**参数**
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `value` — 要减去的时间。类型为[Int](../../sql-reference/data-types/int-uint.md)。
- `date` — 被减去`value`的日期或日期。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
**返回值**
`date` 中减去以`unit` 表示的`value` 得到的日期或带时间的日期。
类型为:[Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md)。
**示例**
查询语句:
``` sql
SELECT date_sub(YEAR, 3, toDate('2018-01-01'));
```
结果:
``` text
┌─minus(toDate('2018-01-01'), toIntervalYear(3))─┐
│ 2015-01-01 │
└────────────────────────────────────────────────┘
```
## timestamp_add {#timestamp_add}
将指定的时间值与提供的日期或日期时间值相加。
**语法**
``` sql
timestamp_add(date, INTERVAL value unit)
```
别名为: `timeStampAdd`, `TIMESTAMP_ADD`.
**参数**
- `date` — 日期或日期与时间。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
- `value` — 要添加的间隔值。类型为[Int](../../sql-reference/data-types/int-uint.md)。
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
**返回值**
以`unit`表示的指定`value`的日期或带时间的日期添加到`date`。
类型为:[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
**示例**
查询语句:
```sql
select timestamp_add(toDate('2018-01-01'), INTERVAL 3 MONTH);
```
结果:
```text
┌─plus(toDate('2018-01-01'), toIntervalMonth(3))─┐
│ 2018-04-01 │
└────────────────────────────────────────────────┘
```
## timestamp_sub {#timestamp_sub}
从提供的日期或带时间的日期中减去时间间隔。
**语法**
``` sql
timestamp_sub(unit, value, date)
```
别名为: `timeStampSub`, `TIMESTAMP_SUB`
**参数**
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `value` — 要减去的间隔值。类型为[Int](../../sql-reference/data-types/int-uint.md)。
- `date` — 日期或日期与时间。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
**返回值**
`date` 中减去以`unit` 表示的`value` 得到的日期或带时间的日期。
类型为: [Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
**示例**
查询语句:
```sql
select timestamp_sub(MONTH, 5, toDateTime('2018-12-18 01:02:03'));
```
结果:
```text
┌─minus(toDateTime('2018-12-18 01:02:03'), toIntervalMonth(5))─┐
│ 2018-07-18 01:02:03 │
└──────────────────────────────────────────────────────────────┘
```
# now {#now}
返回当前日期和时间。
@ -540,50 +940,6 @@ SELECT
│ 2018-01-01 │ 2018-01-01 00:00:00 │
└──────────────────────────┴───────────────────────────────┘
## dateDiff {#datediff}
返回两个Date或DateTime类型之间的时差。
**语法**
``` sql
dateDiff('unit', startdate, enddate, [timezone])
```
**参数**
- `unit` — 返回结果的时间单位。 [String](../../sql-reference/syntax.md#syntax-string-literal).
支持的时间单位: second, minute, hour, day, week, month, quarter, year.
- `startdate` — 第一个待比较值。 [Date](../../sql-reference/data-types/date.md) 或 [DateTime](../../sql-reference/data-types/datetime.md).
- `enddate` — 第二个待比较值。 [Date](../../sql-reference/data-types/date.md) 或 [DateTime](../../sql-reference/data-types/datetime.md).
- `timezone` — 可选参数。 如果指定了,则同时适用于`startdate`和`enddate`。如果不指定,则使用`startdate`和`enddate`的时区。如果两个时区不一致,则结果不可预料。
**返回值**
以`unit`为单位的`startdate`和`enddate`之间的时差。
类型: `int`.
**示例**
查询:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
结果:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size}
它返回一个时间数组其中包括从从«StartTime»开始到«StartTime + Duration 秒»内的所有符合«size»以秒为单位步长的时间点。其中«size»是一个可选参数默认为1800。
@ -652,7 +1008,44 @@ SELECT formatDateTime(toDate('2010-01-04'), '%g')
└────────────────────────────────────────────┘
```
[Original article](https://clickhouse.com/docs/en/query_language/functions/date_time_functions/) <!--hide-->
## dateName {#dataname}
返回日期的指定部分。
**语法**
``` sql
dateName(date_part, date)
```
**参数**
- `date_part` — 日期部分。可能的值为:'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'。类型为[String](../../sql-reference/data-types/string.md)。
- `date` — 日期。类型为[Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md)或者[DateTime64](../../sql-reference/data-types/datetime64.md)。
- `timezone` — 时区(可选项)。类型为[String](../../sql-reference/data-types/string.md)。
**返回值**
- 日期的指定部分。
类型为: [String](../../sql-reference/data-types/string.md#string)。
**示例**
查询语句:
```sql
WITH toDateTime('2021-04-14 11:22:33') AS date_value
SELECT dateName('year', date_value), dateName('month', date_value), dateName('day', date_value);
```
结果:
```text
┌─dateName('year', date_value)─┬─dateName('month', date_value)─┬─dateName('day', date_value)─┐
│ 2021 │ April │ 14 │
└──────────────────────────────┴───────────────────────────────┴─────────────────────────────
```
## FROM_UNIXTIME
@ -683,3 +1076,149 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime
│ 2009-02-11 14:42:23 │
└─────────────────────┘
```
## toModifiedJulianDay {#tomodifiedjulianday}
将文本形式 `YYYY-MM-DD` 的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期转换为 Int32 中的 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字。此功能支持从`0000-01-01`到`9999-12-31`的日期。如果无法将参数解析为日期或日期无效,则会引发异常。
**语法**
``` sql
toModifiedJulianDay(date)
```
**参数**
- `date` — 文本形式的日期。类型为[String](../../sql-reference/data-types/string.md)或者[FixedString](../../sql-reference/data-types/fixedstring.md)。
**返回值**
- 转换的儒略日数。
类型为: [Int32](../../sql-reference/data-types/int-uint.md)。
**示例**
查询语句:
``` sql
SELECT toModifiedJulianDay('2020-01-01');
```
结果:
``` text
┌─toModifiedJulianDay('2020-01-01')─┐
│ 58849 │
└───────────────────────────────────┘
```
## toModifiedJulianDayOrNull {#tomodifiedjuliandayornull}
类似于[toModifiedJulianDay()](#tomodifiedjulianday),但它不会引发异常,而是返回 `NULL`
**语法**
``` sql
toModifiedJulianDayOrNull(date)
```
**参数**
- `date` — 文本形式的日期。类型为[String](../../sql-reference/data-types/string.md)或者[FixedString](../../sql-reference/data-types/fixedstring.md)。
**返回值**
- 转换的儒略日数。
类型为: [Nullable(Int32)](../../sql-reference/data-types/int-uint.md)。
**示例**
查询语句:
``` sql
SELECT toModifiedJulianDayOrNull('2020-01-01');
```
结果:
``` text
┌─toModifiedJulianDayOrNull('2020-01-01')─┐
│ 58849 │
└─────────────────────────────────────────┘
```
## fromModifiedJulianDay {#frommodifiedjulianday}
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941``2973119` 的天数(分别代表 0000-01-01 和 9999-12-31。如果天数超出支持范围则会引发异常。
**语法**
``` sql
fromModifiedJulianDay(day)
```
**参数**
- `day` — 需要转换的儒略日数。类型为[Any integral types](../../sql-reference/data-types/int-uint.md)。
**返回值**
- 文本形式的日期。
类型为: [String](../../sql-reference/data-types/string.md)。
**示例**
查询语句:
``` sql
SELECT fromModifiedJulianDay(58849);
```
结果:
``` text
┌─fromModifiedJulianDay(58849)─┐
│ 2020-01-01 │
└──────────────────────────────┘
```
## fromModifiedJulianDayOrNull {#frommodifiedjuliandayornull}
类似于[fromModifiedJulianDayOrNull()](#frommodifiedjuliandayornull),但它不会引发异常,而是返回 `NULL`
**语法**
``` sql
fromModifiedJulianDayOrNull(day)
```
**参数**
- `day` — 需要转换的儒略日数。类型为[Any integral types](../../sql-reference/data-types/int-uint.md)。
**返回值**
- 文本形式的日期。
类型为: [Nullable(String)](../../sql-reference/data-types/string.md)。
**示例**
查询语句:
``` sql
SELECT fromModifiedJulianDayOrNull(58849);
```
结果:
``` text
┌─fromModifiedJulianDayOrNull(58849)─┐
│ 2020-01-01 │
└────────────────────────────────────┘
```
[Original article](https://clickhouse.com/docs/en/query_language/functions/date_time_functions/) <!--hide-->

View File

@ -402,8 +402,10 @@ void Server::createServer(
/// If we already have an active server for this listen_host/port_name, don't create it again
for (const auto & server : servers)
{
if (!server.isStopping() && server.getListenHost() == listen_host && server.getPortName() == port_name)
return;
}
auto port = config.getInt(port_name);
try
@ -2011,11 +2013,28 @@ void Server::updateServers(
std::vector<ProtocolServerAdapter> & servers)
{
Poco::Logger * log = &logger();
/// Gracefully shutdown servers when their port is removed from config
const auto listen_hosts = getListenHosts(config);
const auto listen_try = getListenTry(config);
/// Remove servers once all their connections are closed
auto check_server = [&log](const char prefix[], auto & server)
{
if (!server.isStopping())
return false;
size_t current_connections = server.currentConnections();
LOG_DEBUG(log, "Server {}{}: {} ({} connections)",
server.getDescription(),
prefix,
!current_connections ? "finished" : "waiting",
current_connections);
return !current_connections;
};
std::erase_if(servers, std::bind_front(check_server, " (from one of previous reload)"));
for (auto & server : servers)
{
if (!server.isStopping())
{
bool has_host = std::find(listen_hosts.begin(), listen_hosts.end(), server.getListenHost()) != listen_hosts.end();
@ -2026,25 +2045,11 @@ void Server::updateServers(
LOG_INFO(log, "Stopped listening for {}", server.getDescription());
}
}
createServers(config, listen_hosts, listen_try, server_pool, async_metrics, servers, /* start_servers: */ true);
/// Remove servers once all their connections are closed
while (std::any_of(servers.begin(), servers.end(), [](const auto & server) { return server.isStopping(); }))
{
std::this_thread::sleep_for(std::chrono::milliseconds(100));
std::erase_if(servers, [&log](auto & server)
{
if (!server.isStopping())
return false;
auto is_finished = server.currentConnections() == 0;
if (is_finished)
LOG_DEBUG(log, "Server finished: {}", server.getDescription());
else
LOG_TRACE(log, "Waiting server to finish: {}", server.getDescription());
return is_finished;
});
}
createServers(config, listen_hosts, listen_try, server_pool, async_metrics, servers, /* start_servers= */ true);
std::erase_if(servers, std::bind_front(check_server, ""));
}
}

View File

@ -177,6 +177,7 @@ enum class AccessType
M(URL, "", GLOBAL, SOURCES) \
M(REMOTE, "", GLOBAL, SOURCES) \
M(MONGO, "", GLOBAL, SOURCES) \
M(MEILISEARCH, "", GLOBAL, SOURCES) \
M(MYSQL, "", GLOBAL, SOURCES) \
M(POSTGRES, "", GLOBAL, SOURCES) \
M(SQLITE, "", GLOBAL, SOURCES) \

View File

@ -366,7 +366,7 @@ namespace
{
if (info.zk_path.empty())
{
for (auto & [relative_path, backup_entry] : info.data)
for (const auto & [relative_path, backup_entry] : info.data)
res.emplace_back(info.data_path + relative_path, backup_entry);
return;
}
@ -374,7 +374,7 @@ namespace
Strings data_paths = backup_coordination->getReplicatedTableDataPaths(info.zk_path);
Strings part_names = backup_coordination->getReplicatedTablePartNames(backup_settings.host_id, info.table_name, info.zk_path);
std::unordered_set<std::string_view> part_names_set{part_names.begin(), part_names.end()};
for (auto & [relative_path, backup_entry] : info.data)
for (const auto & [relative_path, backup_entry] : info.data)
{
size_t slash_pos = relative_path.find('/');
if (slash_pos != String::npos)

View File

@ -107,6 +107,8 @@ if (TARGET ch_contrib::rdkafka)
add_headers_and_sources(dbms Storages/Kafka)
endif()
add_headers_and_sources(dbms Storages/MeiliSearch)
if (TARGET ch_contrib::amqp_cpp)
add_headers_and_sources(dbms Storages/RabbitMQ)
endif()

View File

@ -624,6 +624,9 @@
M(653, CANNOT_PARSE_BACKUP_SETTINGS) \
M(654, WRONG_BACKUP_SETTINGS) \
M(655, FAILED_TO_SYNC_BACKUP_OR_RESTORE) \
M(656, MEILISEARCH_EXCEPTION) \
M(657, UNSUPPORTED_MEILISEARCH_TYPE) \
M(658, MEILISEARCH_MISSING_SOME_COLUMNS) \
\
M(999, KEEPER_EXCEPTION) \
M(1000, POCO_EXCEPTION) \

View File

@ -47,7 +47,6 @@ AsynchronousReadIndirectBufferFromRemoteFS::AsynchronousReadIndirectBufferFromRe
, impl(impl_)
, prefetch_buffer(settings_.remote_fs_buffer_size)
, min_bytes_for_seek(min_bytes_for_seek_)
, must_read_until_position(settings_.must_read_until_position)
#ifndef NDEBUG
, log(&Poco::Logger::get("AsynchronousBufferFromRemoteFS"))
#else
@ -93,9 +92,6 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::hasPendingDataToRead()
throw Exception(ErrorCodes::LOGICAL_ERROR, "Read beyond last offset ({} > {}, info: {})",
file_offset_of_buffer_end, *read_until_position, impl->getInfoForLog());
}
else if (must_read_until_position)
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Reading for MergeTree family tables must be done with last position boundary");
return true;
}

View File

@ -80,8 +80,6 @@ private:
std::optional<size_t> read_until_position;
bool must_read_until_position;
Poco::Logger * log;
};

View File

@ -0,0 +1,117 @@
#include "config_functions.h"
#if USE_H3
#include <Columns/ColumnArray.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <Common/typeid_cast.h>
#include <IO/WriteHelpers.h>
#include <base/range.h>
#include <constants.h>
#include <h3api.h>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ILLEGAL_COLUMN;
}
namespace
{
class FunctionH3Distance : public IFunction
{
public:
static constexpr auto name = "h3Distance";
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionH3Distance>(); }
std::string getName() const override { return name; }
size_t getNumberOfArguments() const override { return 2; }
bool useDefaultImplementationForConstants() const override { return true; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
const auto * arg = arguments[0].get();
if (!WhichDataType(arg).isUInt64())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arg->getName(), 1, getName());
arg = arguments[1].get();
if (!WhichDataType(arg).isUInt64())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arg->getName(), 2, getName());
return std::make_shared<DataTypeInt64>();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
auto non_const_arguments = arguments;
for (auto & argument : non_const_arguments)
argument.column = argument.column->convertToFullColumnIfConst();
const auto * col_start_index = checkAndGetColumn<ColumnUInt64>(non_const_arguments[0].column.get());
if (!col_start_index)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64.",
arguments[0].type->getName(),
1,
getName());
const auto & data_start_index = col_start_index->getData();
const auto * col_end_index = checkAndGetColumn<ColumnUInt64>(non_const_arguments[1].column.get());
if (!col_end_index)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64.",
arguments[1].type->getName(),
2,
getName());
const auto & data_end_index = col_end_index->getData();
auto dst = ColumnVector<Int64>::create();
auto & dst_data = dst->getData();
dst_data.resize(input_rows_count);
for (size_t row = 0; row < input_rows_count; ++row)
{
const UInt64 start = data_start_index[row];
const UInt64 end = data_end_index[row];
auto size = gridPathCellsSize(start, end);
dst_data[row] = size;
}
return dst;
}
};
}
void registerFunctionH3Distance(FunctionFactory & factory)
{
factory.registerFunction<FunctionH3Distance>();
}
}
#endif

153
src/Functions/h3HexRing.cpp Normal file
View File

@ -0,0 +1,153 @@
#include "config_functions.h"
#if USE_H3
#include <vector>
#include <Columns/ColumnArray.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/IDataType.h>
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <Common/typeid_cast.h>
#include <h3api.h>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int PARAMETER_OUT_OF_BOUND;
extern const int ILLEGAL_COLUMN;
extern const int INCORRECT_DATA;
}
namespace
{
class FunctionH3HexRing : public IFunction
{
public:
static constexpr auto name = "h3HexRing";
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionH3HexRing>(); }
std::string getName() const override { return name; }
size_t getNumberOfArguments() const override { return 2; }
bool useDefaultImplementationForConstants() const override { return true; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
const auto * arg = arguments[0].get();
if (!WhichDataType(arg).isUInt64())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arg->getName(), 1, getName());
arg = arguments[1].get();
if (!WhichDataType(arg).isUInt16())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument {} of function {}. Must be UInt16",
arg->getName(),
2,
getName());
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>());
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
auto non_const_arguments = arguments;
for (auto & argument : non_const_arguments)
argument.column = argument.column->convertToFullColumnIfConst();
const auto * col_hindex = checkAndGetColumn<ColumnUInt64>(non_const_arguments[0].column.get());
if (!col_hindex)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64.",
arguments[0].type->getName(),
1,
getName());
const auto & data_hindex = col_hindex->getData();
/// ColumnUInt16 is sufficient as the max value of 2nd arg is checked (arg > 0 < 10000) in implementation below
const auto * col_k = checkAndGetColumn<ColumnUInt16>(non_const_arguments[1].column.get());
if (!col_k)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt16.",
arguments[1].type->getName(),
2,
getName());
const auto & data_k = col_k->getData();
auto dst = ColumnArray::create(ColumnUInt64::create());
auto & dst_data = typeid_cast<ColumnUInt64 &>(dst->getData());
auto & dst_offsets = dst->getOffsets();
dst_offsets.resize(input_rows_count);
/// First calculate array sizes for all rows and save them in Offsets
UInt64 current_offset = 0;
for (size_t row = 0; row < input_rows_count; ++row)
{
const int k = data_k[row];
/// The result size is 6*k. We should not allow to generate too large arrays nevertheless.
constexpr auto max_k = 10000;
if (k > max_k)
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Too large 'k' argument for {} function, maximum {}", getName(), max_k);
/// Check is already made while fetching the argument for k (to determine if it's an unsigned integer). Nevertheless, it's checked again here.
if (k < 0)
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Argument 'k' for {} function must be non negative", getName());
const auto vec_size = (k == 0 ? 1 : 6 * k); /// Required size according to comments in gridRingUnsafe() source code
current_offset += vec_size;
dst_offsets[row] = current_offset;
}
/// Allocate based on total size of arrays for all rows
dst_data.getData().resize(current_offset);
/// Fill the array for each row with known size
auto* ptr = dst_data.getData().data();
current_offset = 0;
for (size_t row = 0; row < input_rows_count; ++row)
{
const H3Index origin_hindex = data_hindex[row];
const int k = data_k[row];
H3Error err = gridRingUnsafe(origin_hindex, k, ptr + current_offset);
if (err)
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect arguments h3Index: {}, k: {}, error: {}", origin_hindex, k, err);
const auto size = dst_offsets[row] - current_offset;
current_offset += size;
}
return dst;
}
};
}
void registerFunctionH3HexRing(FunctionFactory & factory)
{
factory.registerFunction<FunctionH3HexRing>();
}
}
#endif

143
src/Functions/h3Line.cpp Normal file
View File

@ -0,0 +1,143 @@
#include "config_functions.h"
#if USE_H3
#include <Columns/ColumnArray.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <Common/typeid_cast.h>
#include <IO/WriteHelpers.h>
#include <base/range.h>
#include <constants.h>
#include <h3api.h>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ILLEGAL_COLUMN;
extern const int INCORRECT_DATA;
}
namespace
{
class FunctionH3Line : public IFunction
{
public:
static constexpr auto name = "h3Line";
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionH3Line>(); }
std::string getName() const override { return name; }
size_t getNumberOfArguments() const override { return 2; }
bool useDefaultImplementationForConstants() const override { return true; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
const auto * arg = arguments[0].get();
if (!WhichDataType(arg).isUInt64())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arg->getName(), 1, getName());
arg = arguments[1].get();
if (!WhichDataType(arg).isUInt64())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arg->getName(), 2, getName());
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>());
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
auto non_const_arguments = arguments;
for (auto & argument : non_const_arguments)
argument.column = argument.column->convertToFullColumnIfConst();
const auto * col_start_index = checkAndGetColumn<ColumnUInt64>(non_const_arguments[0].column.get());
if (!col_start_index)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64.",
arguments[0].type->getName(),
1,
getName());
const auto & data_start_index = col_start_index->getData();
const auto * col_end_index = checkAndGetColumn<ColumnUInt64>(non_const_arguments[1].column.get());
if (!col_end_index)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64.",
arguments[1].type->getName(),
2,
getName());
const auto & data_end_index = col_end_index->getData();
auto dst = ColumnArray::create(ColumnUInt64::create());
auto & dst_data = typeid_cast<ColumnUInt64 &>(dst->getData());
auto & dst_offsets = dst->getOffsets();
dst_offsets.resize(input_rows_count);
/// First calculate array sizes for all rows and save them in Offsets
UInt64 current_offset = 0;
for (size_t row = 0; row < input_rows_count; ++row)
{
const UInt64 start = data_start_index[row];
const UInt64 end = data_end_index[row];
auto size = gridPathCellsSize(start, end);
if (size < 0)
throw Exception(
ErrorCodes::INCORRECT_DATA,
"Line cannot be computed between start H3 index {} and end H3 index {}",
start, end);
current_offset += size;
dst_offsets[row] = current_offset;
}
/// Allocate based on total size of arrays for all rows
dst_data.getData().resize(current_offset);
/// Fill the array for each row with known size
auto* ptr = dst_data.getData().data();
current_offset = 0;
for (size_t row = 0; row < input_rows_count; ++row)
{
const UInt64 start = data_start_index[row];
const UInt64 end = data_end_index[row];
const auto size = dst_offsets[row] - current_offset;
gridPathCells(start, end, ptr + current_offset);
current_offset += size;
}
return dst;
}
};
}
void registerFunctionH3Line(FunctionFactory & factory)
{
factory.registerFunction<FunctionH3Line>();
}
}
#endif

View File

@ -0,0 +1,49 @@
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <Interpreters/Context_fwd.h>
namespace DB
{
namespace
{
// This class is a stub for the meiliMatch function in the where section of the query,
// this function is used to pass parameters to the MeiliSearch storage engine
class FunctionMeiliMatch : public IFunction
{
public:
static constexpr auto name = "meiliMatch";
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionMeiliMatch>(); }
/// Get the function name.
String getName() const override { return name; }
bool isStateful() const override { return false; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
size_t getNumberOfArguments() const override { return 0; }
bool isVariadic() const override { return true; }
bool isDeterministic() const override { return false; }
bool isDeterministicInScopeOfQuery() const override { return false; }
DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override { return std::make_shared<DataTypeUInt8>(); }
ColumnPtr executeImpl(const ColumnsWithTypeAndName &, const DataTypePtr &, size_t input_rows_count) const override
{
return ColumnUInt8::create(input_rows_count, 1u);
}
};
}
void registerFunctionMeiliMatch(FunctionFactory & factory)
{
factory.registerFunction<FunctionMeiliMatch>();
}
}

View File

@ -59,6 +59,9 @@ void registerFunctionsTimeWindow(FunctionFactory &);
void registerFunctionToBool(FunctionFactory &);
void registerFunctionMinSampleSize(FunctionFactory &);
// meilisearch
void registerFunctionMeiliMatch(FunctionFactory & factory);
#if USE_SSL
void registerFunctionEncrypt(FunctionFactory & factory);
void registerFunctionDecrypt(FunctionFactory & factory);
@ -123,6 +126,9 @@ void registerFunctions()
registerFunctionToBool(factory);
registerFunctionMinSampleSize(factory);
//meilisearch
registerFunctionMeiliMatch(factory);
#if USE_SSL
registerFunctionEncrypt(factory);
registerFunctionDecrypt(factory);

View File

@ -57,6 +57,9 @@ void registerFunctionH3PointDistKm(FunctionFactory &);
void registerFunctionH3PointDistRads(FunctionFactory &);
void registerFunctionH3GetRes0Indexes(FunctionFactory &);
void registerFunctionH3GetPentagonIndexes(FunctionFactory &);
void registerFunctionH3Line(FunctionFactory &);
void registerFunctionH3Distance(FunctionFactory &);
void registerFunctionH3HexRing(FunctionFactory &);
#endif
@ -128,6 +131,9 @@ void registerFunctionsGeo(FunctionFactory & factory)
registerFunctionH3PointDistRads(factory);
registerFunctionH3GetRes0Indexes(factory);
registerFunctionH3GetPentagonIndexes(factory);
registerFunctionH3Line(factory);
registerFunctionH3Distance(factory);
registerFunctionH3HexRing(factory);
#endif
#if USE_S2_GEOMETRY

View File

@ -26,6 +26,10 @@ public:
std::string getFileName() const override { return in->getFileName(); }
void setReadUntilPosition(size_t position) override { in->setReadUntilPosition(position + FileEncryption::Header::kSize); }
void setReadUntilEnd() override { in->setReadUntilEnd(); }
private:
bool nextImpl() override;

View File

@ -91,12 +91,6 @@ struct ReadSettings
size_t http_retry_max_backoff_ms = 1600;
bool http_skip_not_found_url_for_globs = true;
/// Set to true for MergeTree tables to make sure
/// that last position (offset in compressed file) is always passed.
/// (Otherwise asynchronous reading from remote fs is not efficient).
/// If reading is done without final position set, throw logical_error.
bool must_read_until_position = false;
ReadSettings adjustBufferSize(size_t file_size) const
{
ReadSettings res = *this;

View File

@ -0,0 +1,87 @@
#include <memory>
#include <string>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/Serializations/ISerialization.h>
#include <IO/ReadHelpers.h>
#include <Storages/MeiliSearch/MeiliSearchColumnDescriptionFetcher.h>
#include <base/JSON.h>
#include <base/types.h>
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_READ_ALL_DATA;
extern const int MEILISEARCH_EXCEPTION;
}
MeiliSearchColumnDescriptionFetcher::MeiliSearchColumnDescriptionFetcher(const MeiliSearchConfiguration & config) : connection(config)
{
}
void MeiliSearchColumnDescriptionFetcher::addParam(const String & key, const String & val)
{
query_params[key] = val;
}
bool checkIfInteger(const String & s)
{
return s.find('.') == String::npos;
}
DataTypePtr parseTypeOfField(JSON ptr)
{
if (ptr.isString())
{
return std::make_shared<DataTypeString>();
}
if (ptr.isArray())
{
auto nested_type = parseTypeOfField(ptr.begin());
return std::make_shared<DataTypeArray>(nested_type);
}
if (ptr.isBool())
{
return std::make_shared<DataTypeUInt8>();
}
if (ptr.isNull())
{
DataTypePtr res = std::make_shared<DataTypeNullable>(res);
return res;
}
if (ptr.isNumber())
{
if (checkIfInteger(ptr.toString()))
{
return std::make_shared<DataTypeInt64>();
}
return std::make_shared<DataTypeFloat64>();
}
return std::make_shared<DataTypeString>();
}
ColumnsDescription MeiliSearchColumnDescriptionFetcher::fetchColumnsDescription() const
{
auto response = connection.searchQuery(query_params);
JSON jres = JSON(response).begin();
if (jres.getName() == "message")
throw Exception(ErrorCodes::MEILISEARCH_EXCEPTION, jres.getValue().toString());
NamesAndTypesList list;
for (const JSON kv_pair : jres.getValue().begin())
{
if (!kv_pair.isNameValuePair())
throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Bad response data");
list.emplace_back(kv_pair.getName(), parseTypeOfField(kv_pair.getValue()));
}
return ColumnsDescription(list);
}
};

View File

@ -0,0 +1,24 @@
#pragma once
#include <unordered_map>
#include <Storages/ColumnsDescription.h>
#include <Storages/MeiliSearch/MeiliSearchConnection.h>
#include <base/types.h>
namespace DB
{
class MeiliSearchColumnDescriptionFetcher
{
public:
explicit MeiliSearchColumnDescriptionFetcher(const MeiliSearchConfiguration & config);
void addParam(const String & key, const String & val);
ColumnsDescription fetchColumnsDescription() const;
private:
std::unordered_map<String, String> query_params;
MeiliSearchConnection connection;
};
};

View File

@ -0,0 +1,126 @@
#include <sstream>
#include <string_view>
#include <IO/Operators.h>
#include <IO/WriteBufferFromString.h>
#include <Storages/MeiliSearch/MeiliSearchConnection.h>
#include <Common/Exception.h>
#include <Poco/StreamCopier.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NETWORK_ERROR;
}
MeiliSearchConnection::MeiliSearchConnection(const MeiliConfig & conf) : config{conf}
{
Poco::URI uri(config.connection_string);
session.setHost(uri.getHost());
session.setPort(uri.getPort());
}
String MeiliSearchConnection::execPostQuery(const String & url, std::string_view post_fields) const
{
Poco::URI uri(url);
String path(uri.getPathAndQuery());
if (path.empty())
path = "/";
Poco::Net::HTTPRequest req(Poco::Net::HTTPRequest::HTTP_POST, path, Poco::Net::HTTPMessage::HTTP_1_1);
req.setContentType("application/json");
if (!config.key.empty())
req.add("Authorization", "Bearer " + config.key);
req.setContentLength(post_fields.length());
std::ostream & os = session.sendRequest(req);
os << post_fields;
Poco::Net::HTTPResponse res;
std::istream & is = session.receiveResponse(res);
// need to separate MeiliSearch response from other situations
// in order to handle it properly
if (res.getStatus() / 100 == 2 || res.getStatus() / 100 == 4)
{
String response_buffer;
Poco::StreamCopier::copyToString(is, response_buffer);
return response_buffer;
}
else
throw Exception(ErrorCodes::NETWORK_ERROR, res.getReason());
}
String MeiliSearchConnection::execGetQuery(const String & url, const std::unordered_map<String, String> & query_params) const
{
Poco::URI uri(url);
for (const auto & kv : query_params)
{
uri.addQueryParameter(kv.first, kv.second);
}
String path(uri.getPathAndQuery());
if (path.empty())
path = "/";
Poco::Net::HTTPRequest req(Poco::Net::HTTPRequest::HTTP_GET, path, Poco::Net::HTTPMessage::HTTP_1_1);
if (!config.key.empty())
req.add("Authorization", "Bearer " + config.key);
session.sendRequest(req);
Poco::Net::HTTPResponse res;
std::istream & is = session.receiveResponse(res);
// need to separate MeiliSearch response from other situations
// in order to handle it properly
if (res.getStatus() / 100 == 2 || res.getStatus() / 100 == 4)
{
String response_buffer;
Poco::StreamCopier::copyToString(is, response_buffer);
return response_buffer;
}
else
throw Exception(ErrorCodes::NETWORK_ERROR, res.getReason());
}
String MeiliSearchConnection::searchQuery(const std::unordered_map<String, String> & query_params) const
{
WriteBufferFromOwnString post_fields;
post_fields << "{";
auto it = query_params.begin();
while (it != query_params.end())
{
post_fields << it->first << ":" << it->second;
++it;
if (it != query_params.end())
post_fields << ",";
}
post_fields << "}";
String url = config.connection_string + "search";
return execPostQuery(url, post_fields.str());
}
String MeiliSearchConnection::updateQuery(std::string_view data) const
{
String url = config.connection_string + "documents";
return execPostQuery(url, data);
}
String MeiliSearchConnection::getDocumentsQuery(const std::unordered_map<String, String> & query_params) const
{
String url = config.connection_string + "documents";
return execGetQuery(url, query_params);
}
}

View File

@ -0,0 +1,51 @@
#pragma once
#include <string>
#include <string_view>
#include <unordered_map>
#include <base/types.h>
#include <Poco/Exception.h>
#include <Poco/Net/HTTPClientSession.h>
#include <Poco/Net/HTTPRequest.h>
#include <Poco/Net/HTTPResponse.h>
#include <Poco/Path.h>
#include <Poco/URI.h>
namespace DB
{
struct MeiliSearchConfiguration
{
String key;
String index;
String connection_string;
MeiliSearchConfiguration(const String & url_, const String & index_, const String & key_) : key{key_}, index{index_}
{
connection_string = url_ + "/indexes/" + index_ + "/";
}
};
using MeiliConfig = MeiliSearchConfiguration;
class MeiliSearchConnection
{
public:
explicit MeiliSearchConnection(const MeiliConfig & config);
String searchQuery(const std::unordered_map<String, String> & query_params) const;
String getDocumentsQuery(const std::unordered_map<String, String> & query_params) const;
String updateQuery(std::string_view data) const;
private:
String execPostQuery(const String & url, std::string_view post_fields) const;
String execGetQuery(const String & url, const std::unordered_map<String, String> & query_params) const;
MeiliConfig config;
mutable Poco::Net::HTTPClientSession session;
};
}

View File

@ -0,0 +1,65 @@
#include <Formats/FormatFactory.h>
#include <IO/WriteBufferFromString.h>
#include <Processors/Formats/Impl/JSONRowOutputFormat.h>
#include <Storages/MeiliSearch/SinkMeiliSearch.h>
#include <base/JSON.h>
#include <base/types.h>
namespace DB
{
namespace ErrorCodes
{
extern const int MEILISEARCH_EXCEPTION;
}
SinkMeiliSearch::SinkMeiliSearch(const MeiliSearchConfiguration & config_, const Block & sample_block_, ContextPtr local_context_)
: SinkToStorage(sample_block_), connection(config_), local_context{local_context_}, sample_block{sample_block_}
{
}
// gets the content of the json data section, which was obtained using the JSON format output
// "data": [{...}, {...}, {...}]
void extractData(std::string_view & view)
{
size_t ind = view.find("\"data\":");
while (view[ind] != '[')
++ind;
view.remove_prefix(ind);
size_t bal = ind = 1;
while (bal > 0)
{
if (view[ind] == '[')
++bal;
else if (view[ind] == ']')
--bal;
++ind;
}
view.remove_suffix(view.size() - ind);
}
void SinkMeiliSearch::writeBlockData(const Block & block) const
{
FormatSettings settings = getFormatSettings(local_context);
settings.json.quote_64bit_integers = false;
WriteBufferFromOwnString buf;
auto writer = FormatFactory::instance().getOutputFormat("JSON", buf, sample_block, local_context, {}, settings);
writer->write(block);
writer->flush();
writer->finalize();
std::string_view vbuf(buf.str());
extractData(vbuf);
auto response = connection.updateQuery(vbuf);
auto jres = JSON(response).begin();
if (jres.getName() == "message")
throw Exception(ErrorCodes::MEILISEARCH_EXCEPTION, jres.getValue().toString());
}
void SinkMeiliSearch::consume(Chunk chunk)
{
auto block = getHeader().cloneWithColumns(chunk.detachColumns());
writeBlockData(block);
}
}

View File

@ -0,0 +1,28 @@
#pragma once
#include <Core/ExternalResultDescription.h>
#include <Interpreters/Context.h>
#include <Interpreters/Context_fwd.h>
#include <Processors/Sinks/SinkToStorage.h>
#include <Storages/MeiliSearch/MeiliSearchConnection.h>
namespace DB
{
class SinkMeiliSearch : public SinkToStorage
{
public:
SinkMeiliSearch(const MeiliSearchConfiguration & config_, const Block & sample_block_, ContextPtr local_context_);
String getName() const override { return "SinkMeiliSearch"; }
void consume(Chunk chunk) override;
void writeBlockData(const Block & block) const;
private:
MeiliSearchConnection connection;
ContextPtr local_context;
Block sample_block;
};
}

View File

@ -0,0 +1,232 @@
#include <Columns/ColumnString.h>
#include <Columns/ColumnVector.h>
#include <Columns/ColumnsNumber.h>
#include <Columns/IColumn.h>
#include <Core/ExternalResultDescription.h>
#include <Core/Field.h>
#include <Core/Types.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/Serializations/ISerialization.h>
#include <IO/Operators.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteBufferFromString.h>
#include <IO/WriteHelpers.h>
#include <Storages/MeiliSearch/SourceMeiliSearch.h>
#include <base/JSON.h>
#include <base/range.h>
#include <base/types.h>
#include <magic_enum.hpp>
#include <Common/Exception.h>
#include <Common/quoteString.h>
#include "Interpreters/ProcessList.h"
namespace DB
{
namespace ErrorCodes
{
extern const int MEILISEARCH_EXCEPTION;
extern const int UNSUPPORTED_MEILISEARCH_TYPE;
extern const int MEILISEARCH_MISSING_SOME_COLUMNS;
}
String MeiliSearchSource::doubleQuoteIfNeed(const String & param) const
{
if (route == QueryRoute::search)
return doubleQuoteString(param);
return param;
}
String MeiliSearchSource::constructAttributesToRetrieve() const
{
WriteBufferFromOwnString columns_to_get;
if (route == QueryRoute::search)
columns_to_get << "[";
auto it = description.sample_block.begin();
while (it != description.sample_block.end())
{
columns_to_get << doubleQuoteIfNeed(it->name);
++it;
if (it != description.sample_block.end())
columns_to_get << ",";
}
if (route == QueryRoute::search)
columns_to_get << "]";
return columns_to_get.str();
}
MeiliSearchSource::MeiliSearchSource(
const MeiliSearchConfiguration & config,
const Block & sample_block,
UInt64 max_block_size_,
QueryRoute route_,
std::unordered_map<String, String> query_params_)
: SourceWithProgress(sample_block.cloneEmpty())
, connection(config)
, max_block_size{max_block_size_}
, route{route_}
, query_params{query_params_}
, offset{0}
{
description.init(sample_block);
auto attributes_to_retrieve = constructAttributesToRetrieve();
query_params[doubleQuoteIfNeed("attributesToRetrieve")] = attributes_to_retrieve;
query_params[doubleQuoteIfNeed("limit")] = std::to_string(max_block_size);
}
MeiliSearchSource::~MeiliSearchSource() = default;
Field getField(JSON value, DataTypePtr type_ptr)
{
TypeIndex type_id = type_ptr->getTypeId();
if (type_id == TypeIndex::UInt64 || type_id == TypeIndex::UInt32 || type_id == TypeIndex::UInt16 || type_id == TypeIndex::UInt8)
{
if (value.isBool())
return value.getBool();
else
return value.get<UInt64>();
}
else if (type_id == TypeIndex::Int64 || type_id == TypeIndex::Int32 || type_id == TypeIndex::Int16 || type_id == TypeIndex::Int8)
{
return value.get<Int64>();
}
else if (type_id == TypeIndex::String)
{
if (value.isObject())
return value.toString();
else
return value.get<String>();
}
else if (type_id == TypeIndex::Float64 || type_id == TypeIndex::Float32)
{
return value.get<Float64>();
}
else if (type_id == TypeIndex::Date)
{
return UInt16{LocalDate{String(value.toString())}.getDayNum()};
}
else if (type_id == TypeIndex::Date32)
{
return Int32{LocalDate{String(value.toString())}.getExtenedDayNum()};
}
else if (type_id == TypeIndex::DateTime)
{
ReadBufferFromString in(value.toString());
time_t time = 0;
readDateTimeText(time, in, assert_cast<const DataTypeDateTime *>(type_ptr.get())->getTimeZone());
if (time < 0)
time = 0;
return time;
}
else if (type_id == TypeIndex::Nullable)
{
if (value.isNull())
return Null();
const auto * null_type = typeid_cast<const DataTypeNullable *>(type_ptr.get());
DataTypePtr nested = null_type->getNestedType();
return getField(value, nested);
}
else if (type_id == TypeIndex::Array)
{
const auto * array_type = typeid_cast<const DataTypeArray *>(type_ptr.get());
DataTypePtr nested = array_type->getNestedType();
Array array;
for (const auto el : value)
array.push_back(getField(el, nested));
return array;
}
else
{
const std::string_view type_name = magic_enum::enum_name(type_id);
const String err_msg = "MeiliSearch storage doesn't support type: ";
throw Exception(ErrorCodes::UNSUPPORTED_MEILISEARCH_TYPE, err_msg + type_name.data());
}
}
void insertWithTypeId(MutableColumnPtr & column, JSON value, DataTypePtr type_ptr)
{
column->insert(getField(value, type_ptr));
}
size_t MeiliSearchSource::parseJSON(MutableColumns & columns, const JSON & jres) const
{
size_t cnt_match = 0;
for (const auto json : jres)
{
++cnt_match;
size_t cnt_fields = 0;
for (const auto kv_pair : json)
{
++cnt_fields;
const auto & name = kv_pair.getName();
int pos = description.sample_block.getPositionByName(name);
MutableColumnPtr & col = columns[pos];
DataTypePtr type_ptr = description.sample_block.getByPosition(pos).type;
insertWithTypeId(col, kv_pair.getValue(), type_ptr);
}
if (cnt_fields != columns.size())
throw Exception(
ErrorCodes::MEILISEARCH_MISSING_SOME_COLUMNS, "Some columns were not found in the table, json = " + json.toString());
}
return cnt_match;
}
Chunk MeiliSearchSource::generate()
{
if (all_read)
return {};
MutableColumns columns = description.sample_block.cloneEmptyColumns();
query_params[doubleQuoteIfNeed("offset")] = std::to_string(offset);
size_t cnt_match = 0;
if (route == QueryRoute::search)
{
auto response = connection.searchQuery(query_params);
JSON jres = JSON(response).begin();
if (jres.getName() == "message")
throw Exception(ErrorCodes::MEILISEARCH_EXCEPTION, jres.toString());
cnt_match = parseJSON(columns, jres.getValue());
}
else
{
auto response = connection.getDocumentsQuery(query_params);
JSON jres(response);
if (!jres.isArray())
{
auto error = jres.getWithDefault<String>("message");
throw Exception(ErrorCodes::MEILISEARCH_EXCEPTION, error);
}
cnt_match = parseJSON(columns, jres);
}
offset += cnt_match;
if (cnt_match == 0)
{
all_read = true;
return {};
}
return Chunk(std::move(columns), cnt_match);
}
}

View File

@ -0,0 +1,53 @@
#pragma once
#include <cstddef>
#include <unordered_map>
#include <Core/ColumnsWithTypeAndName.h>
#include <Core/ExternalResultDescription.h>
#include <Processors/Chunk.h>
#include <Processors/Sources/SourceWithProgress.h>
#include <Storages/MeiliSearch/MeiliSearchConnection.h>
#include <base/JSON.h>
namespace DB
{
class MeiliSearchSource final : public SourceWithProgress
{
public:
enum QueryRoute
{
search,
documents
};
MeiliSearchSource(
const MeiliSearchConfiguration & config,
const Block & sample_block,
UInt64 max_block_size_,
QueryRoute route,
std::unordered_map<String, String> query_params_);
~MeiliSearchSource() override;
String getName() const override { return "MeiliSearchSource"; }
private:
String doubleQuoteIfNeed(const String & param) const;
String constructAttributesToRetrieve() const;
size_t parseJSON(MutableColumns & columns, const JSON & jres) const;
Chunk generate() override;
MeiliSearchConnection connection;
const UInt64 max_block_size;
const QueryRoute route;
ExternalResultDescription description;
std::unordered_map<String, String> query_params;
UInt64 offset;
bool all_read = false;
};
}

View File

@ -0,0 +1,183 @@
#include <memory>
#include <Core/Types.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/IAST_fwd.h>
#include <Processors/Formats/IOutputFormat.h>
#include <QueryPipeline/Pipe.h>
#include <Storages/IStorage.h>
#include <Storages/MeiliSearch/MeiliSearchConnection.h>
#include <Storages/MeiliSearch/SinkMeiliSearch.h>
#include <Storages/MeiliSearch/SourceMeiliSearch.h>
#include <Storages/MeiliSearch/StorageMeiliSearch.h>
#include <Storages/SelectQueryInfo.h>
#include <Storages/StorageFactory.h>
#include <Storages/StorageInMemoryMetadata.h>
#include <Storages/transformQueryForExternalDatabase.h>
#include <Common/logger_useful.h>
#include <Common/parseAddress.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int BAD_QUERY_PARAMETER;
extern const int BAD_ARGUMENTS;
}
StorageMeiliSearch::StorageMeiliSearch(
const StorageID & table_id,
const MeiliSearchConfiguration & config_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment)
: IStorage(table_id), config{config_}, log(&Poco::Logger::get("StorageMeiliSearch (" + table_id.table_name + ")"))
{
StorageInMemoryMetadata storage_metadata;
storage_metadata.setColumns(columns_);
storage_metadata.setConstraints(constraints_);
storage_metadata.setComment(comment);
setInMemoryMetadata(storage_metadata);
}
String convertASTtoStr(ASTPtr ptr)
{
WriteBufferFromOwnString out;
IAST::FormatSettings settings(out, true);
settings.identifier_quoting_style = IdentifierQuotingStyle::BackticksMySQL;
settings.always_quote_identifiers = IdentifierQuotingStyle::BackticksMySQL != IdentifierQuotingStyle::None;
ptr->format(settings);
return out.str();
}
ASTPtr getFunctionParams(ASTPtr node, const String & name)
{
if (!node)
return nullptr;
const auto * ptr = node->as<ASTFunction>();
if (ptr && ptr->name == name)
{
if (node->children.size() == 1)
return node->children[0];
else
return nullptr;
}
for (const auto & next : node->children)
{
auto res = getFunctionParams(next, name);
if (res != nullptr)
return res;
}
return nullptr;
}
Pipe StorageMeiliSearch::read(
const Names & column_names,
const StorageSnapshotPtr & storage_snapshot,
SelectQueryInfo & query_info,
ContextPtr /*context*/,
QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
unsigned)
{
storage_snapshot->check(column_names);
ASTPtr original_where = query_info.query->clone()->as<ASTSelectQuery &>().where();
ASTPtr query_params = getFunctionParams(original_where, "meiliMatch");
MeiliSearchSource::QueryRoute route = MeiliSearchSource::QueryRoute::documents;
std::unordered_map<String, String> kv_pairs_params;
if (query_params)
{
route = MeiliSearchSource::QueryRoute::search;
LOG_TRACE(log, "Query params: {}", convertASTtoStr(query_params));
for (const auto & el : query_params->children)
{
auto str = el->getColumnName();
auto it = find(str.begin(), str.end(), '=');
if (it == str.end())
throw Exception("meiliMatch function must have parameters of the form \'key=value\'", ErrorCodes::BAD_QUERY_PARAMETER);
String key(str.begin() + 1, it);
String value(it + 1, str.end() - 1);
kv_pairs_params[key] = value;
}
}
else
{
LOG_TRACE(log, "Query params: none");
}
for (const auto & el : kv_pairs_params)
LOG_TRACE(log, "Parsed parameter: key = {}, value = {}", el.first, el.second);
auto sample_block = storage_snapshot->getSampleBlockForColumns(column_names);
return Pipe(std::make_shared<MeiliSearchSource>(config, sample_block, max_block_size, route, kv_pairs_params));
}
SinkToStoragePtr StorageMeiliSearch::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context)
{
LOG_TRACE(log, "Trying update index: {}", config.index);
return std::make_shared<SinkMeiliSearch>(config, metadata_snapshot->getSampleBlock(), local_context);
}
MeiliSearchConfiguration StorageMeiliSearch::getConfiguration(ASTs engine_args, ContextPtr context)
{
if (auto named_collection = getExternalDataSourceConfiguration(engine_args, context))
{
auto [common_configuration, storage_specific_args, _] = named_collection.value();
String url = common_configuration.addresses_expr;
String index = common_configuration.table;
String key = common_configuration.password;
if (url.empty() || index.empty())
{
throw Exception(
"Storage MeiliSearch requires 3 parameters: MeiliSearch('url', 'index', 'key'= \"\")", ErrorCodes::BAD_ARGUMENTS);
}
return MeiliSearchConfiguration(url, index, key);
}
else
{
if (engine_args.size() < 2 || 3 < engine_args.size())
{
throw Exception(
"Storage MeiliSearch requires 3 parameters: MeiliSearch('url', 'index', 'key'= \"\")",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
}
for (auto & engine_arg : engine_args)
engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context);
String url = engine_args[0]->as<ASTLiteral &>().value.safeGet<String>();
String index = engine_args[1]->as<ASTLiteral &>().value.safeGet<String>();
String key;
if (engine_args.size() == 3)
key = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
return MeiliSearchConfiguration(url, index, key);
}
}
void registerStorageMeiliSearch(StorageFactory & factory)
{
factory.registerStorage(
"MeiliSearch",
[](const StorageFactory::Arguments & args)
{
auto config = StorageMeiliSearch::getConfiguration(args.engine_args, args.getLocalContext());
return std::make_shared<StorageMeiliSearch>(args.table_id, config, args.columns, args.constraints, args.comment);
},
{
.source_access_type = AccessType::MEILISEARCH,
});
}
}

View File

@ -0,0 +1,40 @@
#pragma once
#include <Storages/ExternalDataSourceConfiguration.h>
#include <Storages/IStorage.h>
#include <Storages/MeiliSearch/MeiliSearchConnection.h>
namespace DB
{
class StorageMeiliSearch final : public IStorage
{
public:
StorageMeiliSearch(
const StorageID & table_id,
const MeiliSearchConfiguration & config_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment);
String getName() const override { return "MeiliSearch"; }
Pipe read(
const Names & column_names,
const StorageSnapshotPtr & storage_snapshot,
SelectQueryInfo & query_info,
ContextPtr context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
unsigned num_streams) override;
SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context) override;
MeiliSearchConfiguration static getConfiguration(ASTs engine_args, ContextPtr context);
private:
MeiliSearchConfiguration config;
Poco::Logger * log;
};
}

View File

@ -58,7 +58,6 @@ MergeTreeReaderStream::MergeTreeReaderStream(
/// Avoid empty buffer. May happen while reading dictionary for DataTypeLowCardinality.
/// For example: part has single dictionary and all marks point to the same position.
ReadSettings read_settings = settings.read_settings;
read_settings.must_read_until_position = true;
if (max_mark_range_bytes != 0)
read_settings = read_settings.adjustBufferSize(max_mark_range_bytes);

View File

@ -28,6 +28,9 @@ void registerStorageGenerateRandom(StorageFactory & factory);
void registerStorageExecutable(StorageFactory & factory);
void registerStorageWindowView(StorageFactory & factory);
// MEILISEARCH
void registerStorageMeiliSearch(StorageFactory& factory);
#if USE_AWS_S3
void registerStorageS3(StorageFactory & factory);
void registerStorageCOS(StorageFactory & factory);
@ -105,6 +108,9 @@ void registerStorages()
registerStorageExecutable(factory);
registerStorageWindowView(factory);
// MEILISEARCH
registerStorageMeiliSearch(factory);
#if USE_AWS_S3
registerStorageS3(factory);
registerStorageCOS(factory);

View File

@ -0,0 +1,39 @@
#include <memory>
#include <Parsers/ASTFunction.h>
#include <Storages/MeiliSearch/MeiliSearchColumnDescriptionFetcher.h>
#include <Storages/MeiliSearch/StorageMeiliSearch.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <TableFunctions/TableFunctionMeiliSearch.h>
#include <Common/Exception.h>
namespace DB
{
StoragePtr TableFunctionMeiliSearch::executeImpl(
const ASTPtr & /* ast_function */, ContextPtr context, const String & table_name, ColumnsDescription /*cached_columns*/) const
{
auto columns = getActualTableStructure(context);
return std::make_shared<StorageMeiliSearch>(
StorageID(getDatabaseName(), table_name), configuration.value(), columns, ConstraintsDescription{}, String{});
}
ColumnsDescription TableFunctionMeiliSearch::getActualTableStructure(ContextPtr /* context */) const
{
MeiliSearchColumnDescriptionFetcher fetcher(configuration.value());
fetcher.addParam(doubleQuoteString("limit"), "1");
return fetcher.fetchColumnsDescription();
}
void TableFunctionMeiliSearch::parseArguments(const ASTPtr & ast_function, ContextPtr context)
{
const auto & func_args = ast_function->as<ASTFunction &>();
configuration = StorageMeiliSearch::getConfiguration(func_args.arguments->children, context);
}
void registerTableFunctionMeiliSearch(TableFunctionFactory & factory)
{
factory.registerFunction<TableFunctionMeiliSearch>();
}
}

View File

@ -0,0 +1,25 @@
#pragma once
#include <Storages/MeiliSearch/MeiliSearchConnection.h>
#include <TableFunctions/ITableFunction.h>
namespace DB
{
class TableFunctionMeiliSearch : public ITableFunction
{
public:
static constexpr auto name = "MeiliSearch";
String getName() const override { return name; }
private:
StoragePtr executeImpl(
const ASTPtr & ast_function, ContextPtr context, const String & table_name, ColumnsDescription cached_columns) const override;
const char * getStorageTypeName() const override { return "MeiliSearch"; }
ColumnsDescription getActualTableStructure(ContextPtr context) const override;
void parseArguments(const ASTPtr & ast_function, ContextPtr context) override;
std::optional<MeiliSearchConfiguration> configuration;
};
}

View File

@ -20,6 +20,8 @@ void registerTableFunctions()
registerTableFunctionInput(factory);
registerTableFunctionGenerate(factory);
registerTableFunctionMeiliSearch(factory);
#if USE_AWS_S3
registerTableFunctionS3(factory);
registerTableFunctionS3Cluster(factory);

View File

@ -18,6 +18,8 @@ void registerTableFunctionValues(TableFunctionFactory & factory);
void registerTableFunctionInput(TableFunctionFactory & factory);
void registerTableFunctionGenerate(TableFunctionFactory & factory);
void registerTableFunctionMeiliSearch(TableFunctionFactory & factory);
#if USE_AWS_S3
void registerTableFunctionS3(TableFunctionFactory & factory);
void registerTableFunctionS3Cluster(TableFunctionFactory & factory);

View File

@ -16,6 +16,12 @@ import traceback
import urllib.parse
import shlex
import urllib3
from cassandra.policies import RoundRobinPolicy
import cassandra.cluster
import psycopg2
import pymongo
import meilisearch
import pymysql
import requests
try:
@ -356,6 +362,7 @@ class ClickHouseCluster:
self.with_kerberized_hdfs = False
self.with_mongo = False
self.with_mongo_secure = False
self.with_meili = False
self.with_net_trics = False
self.with_redis = False
self.with_cassandra = False
@ -418,6 +425,12 @@ class ClickHouseCluster:
self.mongo_no_cred_host = "mongo2"
self.mongo_no_cred_port = get_free_port()
# available when with_meili == True
self.meili_host = "meili1"
self.meili_port = get_free_port()
self.meili_secure_host = "meili_secure"
self.meili_secure_port = get_free_port()
# available when with_cassandra == True
self.cassandra_host = "cassandra1"
self.cassandra_port = 9042
@ -1048,6 +1061,30 @@ class ClickHouseCluster:
]
return self.base_mongo_cmd
def setup_meili_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_meili = True
env_variables["MEILI_HOST"] = self.meili_host
env_variables["MEILI_EXTERNAL_PORT"] = str(self.meili_port)
env_variables["MEILI_INTERNAL_PORT"] = "7700"
env_variables["MEILI_SECURE_HOST"] = self.meili_secure_host
env_variables["MEILI_SECURE_EXTERNAL_PORT"] = str(self.meili_secure_port)
env_variables["MEILI_SECURE_INTERNAL_PORT"] = "7700"
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_meili.yml")]
)
self.base_meili_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_meili.yml"),
]
return self.base_meili_cmd
def setup_minio_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_minio = True
cert_d = p.join(self.minio_dir, "certs")
@ -1178,6 +1215,7 @@ class ClickHouseCluster:
with_kerberized_hdfs=False,
with_mongo=False,
with_mongo_secure=False,
with_meili=False,
with_nginx=False,
with_redis=False,
with_minio=False,
@ -1261,6 +1299,7 @@ class ClickHouseCluster:
with_nginx=with_nginx,
with_kerberized_hdfs=with_kerberized_hdfs,
with_mongo=with_mongo or with_mongo_secure,
with_meili=with_meili,
with_redis=with_redis,
with_minio=with_minio,
with_azurite=with_azurite,
@ -1426,6 +1465,11 @@ class ClickHouseCluster:
)
)
if with_meili and not self.with_meili:
cmds.append(
self.setup_meili_cmd(instance, env_variables, docker_compose_yml_dir)
)
if self.with_net_trics:
for cmd in cmds:
cmd.extend(
@ -1971,6 +2015,30 @@ class ClickHouseCluster:
logging.debug("Can't connect to Mongo " + str(ex))
time.sleep(1)
def wait_meili_to_start(self, timeout=30):
connection_str = "http://{host}:{port}".format(
host="localhost", port=self.meili_port
)
client = meilisearch.Client(connection_str)
connection_str_secure = "http://{host}:{port}".format(
host="localhost", port=self.meili_secure_port
)
client_secure = meilisearch.Client(connection_str_secure, "password")
start = time.time()
while time.time() - start < timeout:
try:
client.get_all_stats()
client_secure.get_all_stats()
logging.debug(
f"Connected to MeiliSearch dbs: {client.get_all_stats()}\n{client_secure.get_all_stats()}"
)
return
except Exception as ex:
logging.debug("Can't connect to MeiliSearch " + str(ex))
time.sleep(1)
def wait_minio_to_start(self, timeout=180, secure=False):
self.minio_ip = self.get_instance_ip(self.minio_host)
self.minio_redirect_ip = self.get_instance_ip(self.minio_redirect_host)
@ -2317,6 +2385,12 @@ class ClickHouseCluster:
self.up_called = True
self.wait_mongo_to_start(30, secure=self.with_mongo_secure)
if self.with_meili and self.base_meili_cmd:
logging.debug("Setup MeiliSearch")
run_and_check(self.base_meili_cmd + common_opts)
self.up_called = True
self.wait_meili_to_start()
if self.with_redis and self.base_redis_cmd:
logging.debug("Setup Redis")
subprocess_check_call(self.base_redis_cmd + common_opts)
@ -2642,6 +2716,7 @@ class ClickHouseInstance:
with_nginx,
with_kerberized_hdfs,
with_mongo,
with_meili,
with_redis,
with_minio,
with_azurite,
@ -2722,6 +2797,7 @@ class ClickHouseInstance:
self.with_nginx = with_nginx
self.with_kerberized_hdfs = with_kerberized_hdfs
self.with_mongo = with_mongo
self.with_meili = with_meili
self.with_redis = with_redis
self.with_minio = with_minio
self.with_azurite = with_azurite

View File

@ -1,3 +1,10 @@
# pylint: disable=wrong-import-order
# pylint: disable=line-too-long
# pylint: disable=redefined-builtin
# pylint: disable=redefined-outer-name
# pylint: disable=protected-access
# pylint: disable=broad-except
import contextlib
import grpc
import psycopg2
@ -6,6 +13,7 @@ import pymysql.err
import pytest
import sys
import time
import logging
from helpers.cluster import ClickHouseCluster, run_and_check
from helpers.client import Client, QueryRuntimeException
from kazoo.exceptions import NodeExistsError
@ -196,7 +204,7 @@ def test_change_http_port(cluster, zk):
zk.set("/clickhouse/ports/http", b"9090")
with pytest.raises(ConnectionError, match="Connection refused"):
instance.http_query("SELECT 1")
instance.http_query("SELECT 1", port=9090) == "1\n"
assert instance.http_query("SELECT 1", port=9090) == "1\n"
def test_change_mysql_port(cluster, zk):
@ -224,7 +232,7 @@ def test_change_postgresql_port(cluster, zk):
pgsql_client_on_new_port = get_pgsql_client(cluster, port=9090)
cursor = pgsql_client_on_new_port.cursor()
cursor.execute("SELECT 1")
cursor.fetchall() == [(1,)]
assert cursor.fetchall() == [(1,)]
def test_change_grpc_port(cluster, zk):
@ -313,3 +321,61 @@ def test_change_listen_host(cluster, zk):
finally:
with sync_loaded_config(localhost_client.query):
configure_ports_from_zk(zk)
# This is a regression test for the case when the clickhouse-server was waiting
# for the connection that had been issued "SYSTEM RELOAD CONFIG" indefinitely.
#
# Configuration reload directly from the query,
# "directly from the query" means that reload was done from the query context
# over periodic config reload (that is done each 2 seconds).
def test_reload_via_client(cluster, zk):
exception = None
localhost_client = Client(
host="127.0.0.1", port=9000, command="/usr/bin/clickhouse"
)
localhost_client.command = [
"docker",
"exec",
"-i",
instance.docker_id,
] + localhost_client.command
# NOTE: reload via zookeeper is too fast, but 100 iterations was enough, even for debug build.
for i in range(0, 100):
try:
client = get_client(cluster, port=9000)
zk.set("/clickhouse/listen_hosts", b"<listen_host>127.0.0.1</listen_host>")
query_id = f"reload_config_{i}"
client.query("SYSTEM RELOAD CONFIG", query_id=query_id)
assert int(localhost_client.query("SELECT 1")) == 1
localhost_client.query("SYSTEM FLUSH LOGS")
MainConfigLoads = int(
localhost_client.query(
f"""
SELECT ProfileEvents['MainConfigLoads']
FROM system.query_log
WHERE query_id = '{query_id}' AND type = 'QueryFinish'
"""
)
)
assert MainConfigLoads == 1
logging.info("MainConfigLoads = %s (retry %s)", MainConfigLoads, i)
exception = None
break
except Exception as e:
logging.exception("Retry %s", i)
exception = e
finally:
while True:
try:
with sync_loaded_config(localhost_client.query):
configure_ports_from_zk(zk)
break
except QueryRuntimeException:
logging.exception("The new socket is not binded yet")
time.sleep(0.1)
if exception:
raise exception

View File

@ -0,0 +1,24 @@
<clickhouse>
<named_collections>
<named_collection_for_meili>
<database>MeiliSearch</database>
<addresses_expr>http://meili1:7700</addresses_expr>
<table>new_table</table>
</named_collection_for_meili>
<named_collection_for_meili_secure>
<database>MeiliSearch</database>
<addresses_expr>http://meili_secure:7700</addresses_expr>
<table>new_table</table>
<password>password</password>
</named_collection_for_meili_secure>
<named_collection_for_meili_secure_no_password>
<database>MeiliSearch</database>
<addresses_expr>http://meili_secure:7700</addresses_expr>
<table>new_table</table>
</named_collection_for_meili_secure_no_password>
</named_collections>
</clickhouse>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,855 @@
import json
import os
from time import sleep
import meilisearch
from pymysql import NULL
import pytest
from helpers.cluster import ClickHouseCluster
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture(scope="module")
def started_cluster(request):
try:
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
"meili", main_configs=["configs/named_collection.xml"], with_meili=True
)
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_meili_client(started_cluster):
connection_str = "http://localhost:{}".format(started_cluster.meili_port)
return meilisearch.Client(connection_str)
def get_meili_secure_client(started_cluster):
connection_str = "http://localhost:{}".format(started_cluster.meili_secure_port)
return meilisearch.Client(connection_str, "password")
def push_data(client, table, documents):
ans = table.add_documents(documents)
client.wait_for_task(ans["uid"])
def push_movies(client):
print(SCRIPT_DIR + "/movies.json")
json_file = open(SCRIPT_DIR + "/movies.json")
movies = json.load(json_file)
ans = client.index("movies").add_documents(movies)
client.wait_for_task(ans["uid"], 100000)
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_simple_select(started_cluster):
client = get_meili_client(started_cluster)
table = client.index("new_table")
data = []
for i in range(0, 100):
data.append({"id": i, "data": hex(i * i)})
push_data(client, table, data)
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE simple_meili_table(id UInt64, data String) ENGINE = MeiliSearch('http://meili1:7700', 'new_table', '')"
)
assert node.query("SELECT COUNT() FROM simple_meili_table") == "100\n"
assert (
node.query("SELECT sum(id) FROM simple_meili_table")
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query("SELECT data FROM simple_meili_table WHERE id = 42")
== hex(42 * 42) + "\n"
)
node.query("DROP TABLE simple_meili_table")
table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_insert(started_cluster):
client = get_meili_client(started_cluster)
new_table = client.index("new_table")
big_table = client.index("big_table")
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE new_table(id UInt64, data String) ENGINE = MeiliSearch('http://meili1:7700', 'new_table', '')"
)
node.query(
"INSERT INTO new_table (id, data) VALUES (1, '1') (2, '2') (3, '3') (4, '4') (5, '5') (6, '6') (7, '7')"
)
sleep(1)
assert len(new_table.get_documents()) == 7
node.query(
"CREATE TABLE big_table(id UInt64, data String) ENGINE = MeiliSearch('http://meili1:7700', 'big_table', '')"
)
values = ""
for i in range(1, 40001):
values += "(" + str(i) + ", " + "'" + str(i) + "'" + ") "
node.query("INSERT INTO big_table (id, data) VALUES " + values)
sleep(5)
ans = big_table.update_sortable_attributes(["id"])
client.wait_for_task(ans["uid"])
docs = big_table.get_documents({"limit": 40010})
assert len(docs) == 40000
node.query("DROP TABLE new_table")
node.query("DROP TABLE big_table")
new_table.delete()
big_table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_meilimatch(started_cluster):
client = get_meili_client(started_cluster)
table = client.index("movies")
table.update_sortable_attributes(["release_date"])
table.update_filterable_attributes(["release_date"])
push_movies(client)
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE movies_table(id String, title String, release_date Int64) ENGINE = MeiliSearch('http://meili1:7700', 'movies', '')"
)
assert node.query("SELECT COUNT() FROM movies_table") == "19546\n"
real_json = table.search(
"abaca",
{"attributesToRetrieve": ["id", "title", "release_date"], "limit": 20000},
)["hits"]
click_ans = (
"["
+ ", ".join(
node.query(
'SELECT * FROM movies_table WHERE \
meiliMatch(\'"q"="abaca"\') \
format JSONEachRow settings output_format_json_quote_64bit_integers=0'
).split("\n")[:-1]
)
+ "]"
)
click_json = json.loads(click_ans)
assert real_json == click_json
real_json = table.search(
"abaca",
{
"attributesToRetrieve": ["id", "title", "release_date"],
"limit": 20000,
"sort": ["release_date:asc"],
},
)["hits"]
click_ans = (
"["
+ ", ".join(
node.query(
'SELECT * FROM movies_table WHERE \
meiliMatch(\'"q"="abaca"\', \'"sort"=["release_date:asc"]\') \
format JSONEachRow settings output_format_json_quote_64bit_integers=0'
).split("\n")[:-1]
)
+ "]"
)
click_json = json.loads(click_ans)
assert real_json == click_json
real_json = table.search(
"abaca",
{
"attributesToRetrieve": ["id", "title", "release_date"],
"limit": 20000,
"sort": ["release_date:desc"],
"filter": "release_date < 700000000",
},
)["hits"]
click_ans = (
"["
+ ", ".join(
node.query(
'SELECT * FROM movies_table WHERE \
meiliMatch(\'"q"="abaca"\', \'"sort"=["release_date:asc"]\', \'"filter"="release_date < 700000000"\') \
format JSONEachRow settings output_format_json_quote_64bit_integers=0'
).split("\n")[:-1]
)
+ "]"
)
click_json = json.loads(click_ans)
assert real_json == click_json
node.query("DROP TABLE movies_table")
table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_incorrect_data_type(started_cluster):
client = get_meili_client(started_cluster)
table = client.index("new_table")
data = []
for i in range(0, 100):
data.append({"id": i, "data": hex(i * i), "aaaa": "Hello"})
push_data(client, table, data)
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE strange_meili_table(id UInt64, data String, bbbb String) ENGINE = MeiliSearch('http://meili1:7700', 'new_table', '')"
)
error = node.query_and_get_error("SELECT bbbb FROM strange_meili_table")
assert "MEILISEARCH_MISSING_SOME_COLUMNS" in error
node.query("DROP TABLE strange_meili_table")
table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_simple_select_secure(started_cluster):
client = get_meili_secure_client(started_cluster)
table = client.index("new_table")
data = []
for i in range(0, 100):
data.append({"id": i, "data": hex(i * i)})
push_data(client, table, data)
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE simple_meili_table(id UInt64, data String) ENGINE = MeiliSearch('http://meili_secure:7700', 'new_table', 'password')"
)
node.query(
"CREATE TABLE wrong_meili_table(id UInt64, data String) ENGINE = MeiliSearch('http://meili_secure:7700', 'new_table', 'wrong_password')"
)
assert node.query("SELECT COUNT() FROM simple_meili_table") == "100\n"
assert (
node.query("SELECT sum(id) FROM simple_meili_table")
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query("SELECT data FROM simple_meili_table WHERE id = 42")
== hex(42 * 42) + "\n"
)
error = node.query_and_get_error("SELECT COUNT() FROM wrong_meili_table")
assert "MEILISEARCH_EXCEPTION" in error
error = node.query_and_get_error("SELECT sum(id) FROM wrong_meili_table")
assert "MEILISEARCH_EXCEPTION" in error
error = node.query_and_get_error("SELECT data FROM wrong_meili_table WHERE id = 42")
assert "MEILISEARCH_EXCEPTION" in error
node.query("DROP TABLE simple_meili_table")
node.query("DROP TABLE wrong_meili_table")
table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_meilimatch_secure(started_cluster):
client = get_meili_secure_client(started_cluster)
table = client.index("movies")
table.update_sortable_attributes(["release_date"])
table.update_filterable_attributes(["release_date"])
push_movies(client)
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE movies_table(id String, title String, release_date Int64) ENGINE = MeiliSearch('http://meili_secure:7700', 'movies', 'password')"
)
assert node.query("SELECT COUNT() FROM movies_table") == "19546\n"
real_json = table.search(
"abaca",
{"attributesToRetrieve": ["id", "title", "release_date"], "limit": 20000},
)["hits"]
click_ans = (
"["
+ ", ".join(
node.query(
'SELECT * FROM movies_table WHERE \
meiliMatch(\'"q"="abaca"\') \
format JSONEachRow settings output_format_json_quote_64bit_integers=0'
).split("\n")[:-1]
)
+ "]"
)
click_json = json.loads(click_ans)
assert real_json == click_json
real_json = table.search(
"abaca",
{
"attributesToRetrieve": ["id", "title", "release_date"],
"limit": 20000,
"sort": ["release_date:asc"],
},
)["hits"]
click_ans = (
"["
+ ", ".join(
node.query(
'SELECT * FROM movies_table WHERE \
meiliMatch(\'"q"="abaca"\', \'"sort"=["release_date:asc"]\') \
format JSONEachRow settings output_format_json_quote_64bit_integers=0'
).split("\n")[:-1]
)
+ "]"
)
click_json = json.loads(click_ans)
assert real_json == click_json
real_json = table.search(
"abaca",
{
"attributesToRetrieve": ["id", "title", "release_date"],
"limit": 20000,
"sort": ["release_date:desc"],
"filter": "release_date < 700000000",
},
)["hits"]
click_ans = (
"["
+ ", ".join(
node.query(
'SELECT * FROM movies_table WHERE \
meiliMatch(\'"q"="abaca"\', \'"sort"=["release_date:asc"]\', \'"filter"="release_date < 700000000"\') \
format JSONEachRow settings output_format_json_quote_64bit_integers=0'
).split("\n")[:-1]
)
+ "]"
)
click_json = json.loads(click_ans)
assert real_json == click_json
node.query("DROP TABLE movies_table")
table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_incorrect_data_type_secure(started_cluster):
client = get_meili_secure_client(started_cluster)
table = client.index("new_table")
data = []
for i in range(0, 100):
data.append({"id": i, "data": hex(i * i), "aaaa": "Hello"})
push_data(client, table, data)
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE strange_meili_table(id UInt64, data String, bbbb String) ENGINE = MeiliSearch('http://meili_secure:7700', 'new_table', 'password')"
)
error = node.query_and_get_error("SELECT bbbb FROM strange_meili_table")
assert "MEILISEARCH_MISSING_SOME_COLUMNS" in error
node.query("DROP TABLE strange_meili_table")
table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_insert_secure(started_cluster):
client = get_meili_secure_client(started_cluster)
new_table = client.index("new_table")
big_table = client.index("big_table")
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE new_table(id UInt64, data String) ENGINE = MeiliSearch('http://meili_secure:7700', 'new_table', 'password')"
)
node.query(
"INSERT INTO new_table (id, data) VALUES (1, '1') (2, '2') (3, '3') (4, '4') (5, '5') (6, '6') (7, '7')"
)
sleep(1)
assert len(new_table.get_documents()) == 7
node.query(
"CREATE TABLE big_table(id UInt64, data String) ENGINE = MeiliSearch('http://meili_secure:7700', 'big_table', 'password')"
)
values = ""
for i in range(1, 40001):
values += "(" + str(i) + ", " + "'" + str(i) + "'" + ") "
node.query("INSERT INTO big_table (id, data) VALUES " + values)
sleep(5)
ans = big_table.update_sortable_attributes(["id"])
client.wait_for_task(ans["uid"])
docs = big_table.get_documents({"limit": 40010})
assert len(docs) == 40000
node.query("DROP TABLE new_table")
node.query("DROP TABLE big_table")
new_table.delete()
big_table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_security_levels(started_cluster):
client = get_meili_secure_client(started_cluster)
new_table = client.index("new_table")
search_key = client.get_keys()["results"][0]["key"]
admin_key = client.get_keys()["results"][1]["key"]
values = ""
for i in range(1, 101):
values += "(" + str(i) + ", " + "'" + str(i) + "'" + ") "
node = started_cluster.instances["meili"]
node.query(
f"CREATE TABLE read_table(id UInt64, data String) ENGINE = MeiliSearch('http://meili_secure:7700', 'new_table', '{search_key}')"
)
node.query(
f"CREATE TABLE write_table(id UInt64, data String) ENGINE = MeiliSearch('http://meili_secure:7700', 'new_table', '{admin_key}')"
)
error = node.query_and_get_error(
"INSERT INTO read_table (id, data) VALUES " + values
)
assert "MEILISEARCH_EXCEPTION" in error
node.query("INSERT INTO write_table (id, data) VALUES " + values)
sleep(1)
assert len(new_table.get_documents({"limit": 40010})) == 100
ans1 = (
"["
+ ", ".join(
node.query(
'SELECT * FROM read_table where meiliMatch(\'"q"=""\') \
format JSONEachRow settings output_format_json_quote_64bit_integers=0'
).split("\n")[:-1]
)
+ "]"
)
ans2 = (
"["
+ ", ".join(
node.query(
"SELECT * FROM write_table \
format JSONEachRow settings output_format_json_quote_64bit_integers=0"
).split("\n")[:-1]
)
+ "]"
)
assert ans1 == ans2
docs = json.loads(ans1)
assert len(docs) == 100
node.query("DROP TABLE read_table")
node.query("DROP TABLE write_table")
client.index("new_table").delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_types(started_cluster):
client = get_meili_client(started_cluster)
table = client.index("types_table")
data = {
"id": 1,
"UInt8_test": 128,
"UInt16_test": 32768,
"UInt32_test": 2147483648,
"UInt64_test": 9223372036854775808,
"Int8_test": -128,
"Int16_test": -32768,
"Int32_test": -2147483648,
"Int64_test": -9223372036854775808,
"String_test": "abacaba",
"Float32_test": 42.42,
"Float64_test": 42.42,
"Array_test": [["aba", "caba"], ["2d", "array"]],
"Null_test1": "value",
"Null_test2": NULL,
"Bool_test1": True,
"Bool_test2": False,
"Json_test": {"a": 1, "b": {"in_json": "qwerty"}},
}
push_data(client, table, data)
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE types_table(\
id UInt64,\
UInt8_test UInt8,\
UInt16_test UInt16,\
UInt32_test UInt32,\
UInt64_test UInt64,\
Int8_test Int8,\
Int16_test Int16,\
Int32_test Int32,\
Int64_test Int64,\
String_test String,\
Float32_test Float32,\
Float64_test Float64,\
Array_test Array(Array(String)),\
Null_test1 Nullable(String),\
Null_test2 Nullable(String),\
Bool_test1 Boolean,\
Bool_test2 Boolean,\
Json_test String\
) ENGINE = MeiliSearch('http://meili1:7700', 'types_table', '')"
)
assert node.query("SELECT id FROM types_table") == "1\n"
assert node.query("SELECT UInt8_test FROM types_table") == "128\n"
assert node.query("SELECT UInt16_test FROM types_table") == "32768\n"
assert node.query("SELECT UInt32_test FROM types_table") == "2147483648\n"
assert node.query("SELECT UInt64_test FROM types_table") == "9223372036854775808\n"
assert node.query("SELECT Int8_test FROM types_table") == "-128\n"
assert node.query("SELECT Int16_test FROM types_table") == "-32768\n"
assert node.query("SELECT Int32_test FROM types_table") == "-2147483648\n"
assert node.query("SELECT Int64_test FROM types_table") == "-9223372036854775808\n"
assert node.query("SELECT String_test FROM types_table") == "abacaba\n"
assert node.query("SELECT Float32_test FROM types_table") == "42.42\n"
assert node.query("SELECT Float32_test FROM types_table") == "42.42\n"
assert (
node.query("SELECT Array_test FROM types_table")
== "[['aba','caba'],['2d','array']]\n"
)
assert node.query("SELECT Null_test1 FROM types_table") == "value\n"
assert node.query("SELECT Null_test2 FROM types_table") == "NULL\n"
assert node.query("SELECT Bool_test1 FROM types_table") == "true\n"
assert node.query("SELECT Bool_test2 FROM types_table") == "false\n"
assert (
node.query("SELECT Json_test FROM types_table")
== '{"a":1,"b":{"in_json":"qwerty"}}\n'
)
node.query("DROP TABLE types_table")
table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_named_collection(started_cluster):
client = get_meili_client(started_cluster)
table = client.index("new_table")
data = []
for i in range(0, 100):
data.append({"id": i, "data": hex(i * i)})
push_data(client, table, data)
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE simple_meili_table(id UInt64, data String) ENGINE = MeiliSearch( named_collection_for_meili )"
)
assert node.query("SELECT COUNT() FROM simple_meili_table") == "100\n"
assert (
node.query("SELECT sum(id) FROM simple_meili_table")
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query("SELECT data FROM simple_meili_table WHERE id = 42")
== hex(42 * 42) + "\n"
)
node.query("DROP TABLE simple_meili_table")
table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_named_collection_secure(started_cluster):
client_secure = get_meili_secure_client(started_cluster)
client_free = get_meili_client(started_cluster)
table_secure = client_secure.index("new_table")
table_free = client_free.index("new_table")
data = []
for i in range(0, 100):
data.append({"id": i, "data": hex(i * i)})
push_data(client_secure, table_secure, data)
push_data(client_free, table_free, data)
node = started_cluster.instances["meili"]
node.query(
"CREATE TABLE simple_meili_table(id UInt64, data String) ENGINE = MeiliSearch( named_collection_for_meili_secure )"
)
node.query(
"CREATE TABLE wrong_meili_table(id UInt64, data String) ENGINE = MeiliSearch( named_collection_for_meili_secure_no_password )"
)
node.query(
'CREATE TABLE combine_meili_table(id UInt64, data String) ENGINE = MeiliSearch( named_collection_for_meili_secure_no_password, password="password" )'
)
assert node.query("SELECT COUNT() FROM simple_meili_table") == "100\n"
assert (
node.query("SELECT sum(id) FROM simple_meili_table")
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query("SELECT data FROM simple_meili_table WHERE id = 42")
== hex(42 * 42) + "\n"
)
assert node.query("SELECT COUNT() FROM combine_meili_table") == "100\n"
assert (
node.query("SELECT sum(id) FROM combine_meili_table")
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query("SELECT data FROM combine_meili_table WHERE id = 42")
== hex(42 * 42) + "\n"
)
error = node.query_and_get_error("SELECT COUNT() FROM wrong_meili_table")
assert "MEILISEARCH_EXCEPTION" in error
error = node.query_and_get_error("SELECT sum(id) FROM wrong_meili_table")
assert "MEILISEARCH_EXCEPTION" in error
error = node.query_and_get_error("SELECT data FROM wrong_meili_table WHERE id = 42")
assert "MEILISEARCH_EXCEPTION" in error
node.query("DROP TABLE simple_meili_table")
node.query("DROP TABLE wrong_meili_table")
node.query("DROP TABLE combine_meili_table")
table_secure.delete()
table_free.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_table_function(started_cluster):
client = get_meili_client(started_cluster)
table = client.index("new_table")
data = []
for i in range(0, 100):
data.append({"id": i, "data": hex(i * i)})
push_data(client, table, data)
node = started_cluster.instances["meili"]
assert (
node.query(
"SELECT COUNT() FROM MeiliSearch('http://meili1:7700', 'new_table', '')"
)
== "100\n"
)
assert (
node.query(
"SELECT sum(id) FROM MeiliSearch('http://meili1:7700', 'new_table', '')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"SELECT data FROM MeiliSearch('http://meili1:7700', 'new_table', '') WHERE id = 42"
)
== hex(42 * 42) + "\n"
)
table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_table_function_secure(started_cluster):
client = get_meili_secure_client(started_cluster)
table = client.index("new_table")
data = []
for i in range(0, 100):
data.append({"id": i, "data": hex(i * i)})
push_data(client, table, data)
node = started_cluster.instances["meili"]
assert (
node.query(
"SELECT COUNT() FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'password')"
)
== "100\n"
)
assert (
node.query(
"SELECT sum(id) FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'password')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"SELECT data FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'password') WHERE id = 42"
)
== hex(42 * 42) + "\n"
)
error = node.query_and_get_error(
"SELECT COUNT() FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'wrong_password')"
)
assert "MEILISEARCH_EXCEPTION" in error
error = node.query_and_get_error(
"SELECT sum(id) FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'wrong_password')"
)
assert "MEILISEARCH_EXCEPTION" in error
error = node.query_and_get_error(
"SELECT data FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'wrong_password') WHERE id = 42"
)
assert "MEILISEARCH_EXCEPTION" in error
table.delete()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_types_in_table_function(started_cluster):
client = get_meili_client(started_cluster)
table = client.index("types_table")
data = {
"id": 1,
"UInt8_test": 128,
"UInt16_test": 32768,
"UInt32_test": 2147483648,
"Int8_test": -128,
"Int16_test": -32768,
"Int32_test": -2147483648,
"Int64_test": -9223372036854775808,
"String_test": "abacaba",
"Float32_test": 42.42,
"Float64_test": 42.42,
"Array_test": [["aba", "caba"], ["2d", "array"]],
"Null_test1": "value",
"Null_test2": NULL,
"Bool_test1": True,
"Bool_test2": False,
"Json_test": {"a": 1, "b": {"in_json": "qwerty"}},
}
push_data(client, table, data)
node = started_cluster.instances["meili"]
assert (
node.query(
"SELECT id FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "1\n"
)
assert (
node.query(
"SELECT UInt8_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "128\n"
)
assert (
node.query(
"SELECT UInt16_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "32768\n"
)
assert (
node.query(
"SELECT UInt32_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "2147483648\n"
)
assert (
node.query(
"SELECT Int8_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "-128\n"
)
assert (
node.query(
"SELECT Int16_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "-32768\n"
)
assert (
node.query(
"SELECT Int32_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "-2147483648\n"
)
assert (
node.query(
"SELECT Int64_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "-9223372036854775808\n"
)
assert (
node.query(
"SELECT String_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "abacaba\n"
)
assert (
node.query(
"SELECT Float32_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "42.42\n"
)
assert (
node.query(
"SELECT Float32_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "42.42\n"
)
assert (
node.query(
"SELECT Array_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "[['aba','caba'],['2d','array']]\n"
)
assert (
node.query(
"SELECT Null_test1 FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "value\n"
)
assert (
node.query(
"SELECT Null_test2 FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "NULL\n"
)
assert (
node.query(
"SELECT Bool_test1 FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "1\n"
)
assert (
node.query(
"SELECT Bool_test2 FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== "0\n"
)
assert (
node.query(
"SELECT Json_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')"
)
== '{"a":1,"b":{"in_json":"qwerty"}}\n'
)
table.delete()

View File

@ -128,6 +128,7 @@ FILE [] GLOBAL SOURCES
URL [] GLOBAL SOURCES
REMOTE [] GLOBAL SOURCES
MONGO [] GLOBAL SOURCES
MEILISEARCH [] GLOBAL SOURCES
MYSQL [] GLOBAL SOURCES
POSTGRES [] GLOBAL SOURCES
SQLITE [] GLOBAL SOURCES

View File

@ -10,3 +10,6 @@ SELECT h3GetBaseCell(0xFFFFFFFFFFFFFF) FORMAT Null;
SELECT h3GetResolution(0xFFFFFFFFFFFFFF) FORMAT Null;
SELECT h3kRing(0xFFFFFFFFFFFFFF, toUInt16(10)) FORMAT Null;
SELECT h3ToGeo(0xFFFFFFFFFFFFFF) FORMAT Null;
SELECT h3HexRing(0xFFFFFFFFFFFFFF, toUInt16(10)) FORMAT Null; -- { serverError 117 }
SELECT h3HexRing(0xFFFFFFFFFFFFFF, toUInt16(10000)) FORMAT Null; -- { serverError 117 }
SELECT length(h3HexRing(581276613233082367, toUInt16(1))) FORMAT Null;

View File

@ -276,7 +276,7 @@ CREATE TABLE system.grants
(
`user_name` Nullable(String),
`role_name` Nullable(String),
`access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MYSQL' = 130, 'POSTGRES' = 131, 'SQLITE' = 132, 'ODBC' = 133, 'JDBC' = 134, 'HDFS' = 135, 'S3' = 136, 'HIVE' = 137, 'SOURCES' = 138, 'ALL' = 139, 'NONE' = 140),
`access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MEILISEARCH' = 130, 'MYSQL' = 131, 'POSTGRES' = 132, 'SQLITE' = 133, 'ODBC' = 134, 'JDBC' = 135, 'HDFS' = 136, 'S3' = 137, 'HIVE' = 138, 'SOURCES' = 139, 'ALL' = 140, 'NONE' = 141),
`database` Nullable(String),
`table` Nullable(String),
`column` Nullable(String),
@ -549,10 +549,10 @@ ENGINE = SystemPartsColumns()
COMMENT 'SYSTEM TABLE is built on the fly.'
CREATE TABLE system.privileges
(
`privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MYSQL' = 130, 'POSTGRES' = 131, 'SQLITE' = 132, 'ODBC' = 133, 'JDBC' = 134, 'HDFS' = 135, 'S3' = 136, 'HIVE' = 137, 'SOURCES' = 138, 'ALL' = 139, 'NONE' = 140),
`privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MEILISEARCH' = 130, 'MYSQL' = 131, 'POSTGRES' = 132, 'SQLITE' = 133, 'ODBC' = 134, 'JDBC' = 135, 'HDFS' = 136, 'S3' = 137, 'HIVE' = 138, 'SOURCES' = 139, 'ALL' = 140, 'NONE' = 141),
`aliases` Array(String),
`level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5)),
`parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MYSQL' = 130, 'POSTGRES' = 131, 'SQLITE' = 132, 'ODBC' = 133, 'JDBC' = 134, 'HDFS' = 135, 'S3' = 136, 'HIVE' = 137, 'SOURCES' = 138, 'ALL' = 139, 'NONE' = 140))
`parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MEILISEARCH' = 130, 'MYSQL' = 131, 'POSTGRES' = 132, 'SQLITE' = 133, 'ODBC' = 134, 'JDBC' = 135, 'HDFS' = 136, 'S3' = 137, 'HIVE' = 138, 'SOURCES' = 139, 'ALL' = 140, 'NONE' = 141))
)
ENGINE = SystemPrivileges()
COMMENT 'SYSTEM TABLE is built on the fly.'

View File

@ -78,3 +78,7 @@
842
5882
41162
[599686042433355775,599686043507097599,599686023106002943]
[581276613233082367]
[581518505791193087,581500913605148671,581764796395814911,581259021047037951,581250224954015743,581267817140060159]
[581514107744681983,581496515558637567,581509709698170879,581760398349303807,581742806163259391,581747204209770495,581549292116770815,581263419093549055,581254623000526847,581272215186571263,581122681605193727,581118283558682623]

View File

@ -29,3 +29,6 @@ SELECT h3ExactEdgeLengthM(arrayJoin([1298057039473278975,1370114633511206911,144
SELECT h3ExactEdgeLengthKm(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783]));
SELECT h3ExactEdgeLengthRads(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783]));
SELECT h3NumHexagons(arrayJoin([1,2,3]));
SELECT h3Line(arrayJoin([stringToH3('85283473fffffff')]), arrayJoin([stringToH3('8528342bfffffff')]));
SELECT h3HexRing(arrayJoin([579205133326352383]), arrayJoin([toUInt16(1),toUInt16(2),toUInt16(3)])); -- { serverError 117 }
SELECT h3HexRing(arrayJoin([581276613233082367]), arrayJoin([toUInt16(0),toUInt16(1),toUInt16(2)]));

View File

@ -0,0 +1,28 @@
7
7
7
7
7
7
7
7
7
7
8
9
9
9
9
9
9
9
9
9
13
13
13
13
13
13
13
13

View File

@ -0,0 +1,43 @@
-- Tags: no-fasttest
DROP TABLE IF EXISTS h3_indexes;
CREATE TABLE h3_indexes (id int, start String, end String) ENGINE = Memory;
-- test values taken from h3 library test suite
INSERT INTO h3_indexes VALUES (1, '830631fffffffff','830780fffffffff');
INSERT INTO h3_indexes VALUES (2, '830631fffffffff','830783fffffffff');
INSERT INTO h3_indexes VALUES (3, '830631fffffffff','83079dfffffffff');
INSERT INTO h3_indexes VALUES (4, '830631fffffffff','830799fffffffff');
INSERT INTO h3_indexes VALUES (5, '830631fffffffff','8306f5fffffffff');
INSERT INTO h3_indexes VALUES (6, '830631fffffffff','8306e6fffffffff');
INSERT INTO h3_indexes VALUES (7, '830631fffffffff','8306e4fffffffff');
INSERT INTO h3_indexes VALUES (8, '830631fffffffff','830701fffffffff');
INSERT INTO h3_indexes VALUES (9, '830631fffffffff','830700fffffffff');
INSERT INTO h3_indexes VALUES (10, '830631fffffffff','830706fffffffff');
INSERT INTO h3_indexes VALUES (11, '830631fffffffff','830733fffffffff');
INSERT INTO h3_indexes VALUES (12, '8301a6fffffffff','830014fffffffff');
INSERT INTO h3_indexes VALUES (13, '8301a6fffffffff','830033fffffffff');
INSERT INTO h3_indexes VALUES (14, '8301a6fffffffff','830031fffffffff');
INSERT INTO h3_indexes VALUES (15, '8301a6fffffffff','830022fffffffff');
INSERT INTO h3_indexes VALUES (16, '8301a6fffffffff','830020fffffffff');
INSERT INTO h3_indexes VALUES (17, '8301a6fffffffff','830024fffffffff');
INSERT INTO h3_indexes VALUES (18, '8301a6fffffffff','830120fffffffff');
INSERT INTO h3_indexes VALUES (19, '8301a6fffffffff','830124fffffffff');
INSERT INTO h3_indexes VALUES (20, '8301a6fffffffff','8308cdfffffffff');
INSERT INTO h3_indexes VALUES (21, '8301a5fffffffff','831059fffffffff');
INSERT INTO h3_indexes VALUES (22, '8301a5fffffffff','830b2dfffffffff');
INSERT INTO h3_indexes VALUES (23, '8301a5fffffffff','830b29fffffffff');
INSERT INTO h3_indexes VALUES (24, '8301a5fffffffff','830b76fffffffff');
INSERT INTO h3_indexes VALUES (25, '8301a5fffffffff','830b43fffffffff');
INSERT INTO h3_indexes VALUES (26, '8301a5fffffffff','830b4efffffffff');
INSERT INTO h3_indexes VALUES (27, '8301a5fffffffff','830b48fffffffff');
INSERT INTO h3_indexes VALUES (28, '8301a5fffffffff','830b49fffffffff');
SELECT h3Distance(stringToH3(start), stringToH3(end)) FROM h3_indexes ORDER BY id;
DROP TABLE h3_indexes;

View File

@ -0,0 +1,17 @@
[581276613233082367]
[580995138256371711,581144671837749247,581166662070304767,581171060116815871,581267817140060159,581272215186571263,581276613233082367,581531699930726399,581536097977237503,581558088209793023,581747204209770495,581764796395814911]
[581250224954015743,581259021047037951,581267817140060159,581500913605148671,581518505791193087,581764796395814911]
[589624655266971647,589625205022785535,589626854290227199,589642797208829951,589644996232085503,589708767906496511,589709317662310399,589709867418124287,589714815220449279,589715914732077055,589725810336727039,589727459604168703,589728009359982591,589735156185563135,589736255697190911,589742303011143679,589744502034399231,589745051790213119]
[594053281945223167,594053419384176639,594054106578943999,594054244017897471,594054450176327679,594055343529525247,594055618407432191,594064277061500927,594064345780977663,594071698764988415,594071767484465151,594072111081848831,594072317240279039,594072523398709247,594072592118185983,594079532785336319,594079807663243263,594081044613824511,594081113333301247,594081319491731455,594081594369638399,594081731808591871,594081937967022079,594082762600742911]
[598346591383846911,598346814722146303,598346840491950079,598346849081884671,598346960751034367,598346977930903551,598348884896382975,598348910666186751,598348919256121343,598349168364224511,598349176954159103,598356710326796287,598356718916730879,598356761866403839,598356779046273023,598356796226142207,598356821995945983,598371905921089535,598371923100958719,598372687605137407,598372704785006591,598372807864221695,598372842223960063,598373580958334975,598373589548269567,598373830066438143,598373864426176511,598374783549177855,598374792139112447,598374800729047039]
[599542260886929407,599542265181896703,599542294172925951,599542297394151423,599542298467893247,599542329606406143,599542330680147967,599542331753889791,599542337122598911,599542338196340735,599542343565049855,599542350007500799,599542351081242623,599542599115603967,599542601263087615,599542612000505855,599542614147989503,599542617369214975,599542630254116863,599542632401600511,599542642065276927,599542643139018751,599547512558190591,599547514705674239,599547525443092479,599547527590576127,599547530811801599,599549536561528831,599549540856496127,599549546225205247,599549550520172543,599549569847525375,599549573068750847,599549576289976319,599549838282981375,599549839356723199]
[604296028669083647,604296029205954559,604296036587929599,604296036722147327,604296037124800511,604296037259018239,604296037930106879,604296273348001791,604296273750654975,604296273884872703,604296277777186815,604296333477543935,604296333880197119,604296334014414847,604296337235640319,604296337369858047,604296337772511231,604296337906728959,604296338577817599,604296347704623103,604296348241494015,604296351999590399,604296352133808127,604296356294557695,604296356831428607,604296358173605887,604296358442041343,604296358844694527,604296360857960447,604296361126395903,604296363676532735,604296363944968191,604296364213403647,604296369179459583,604296369313677311,604296383943409663,604296384211845119,604296384614498303,604296386627764223,604296386896199679,604296389446336511,604296389714771967]
[608784291018571775,608784291035348991,608784291119235071,608784291136012287,608784291219898367,608784292931174399,608784292964728831,608784293132500991,608784293166055423,608784293216387071,608784293417713663,608784293451268095,608785191854407679,608785191904739327,608785191921516543,608785192424833023,608785192491941887,608785193750233087,608785193783787519,608785194203217919,608785194236772351,608785194404544511,608785194438098943,608785194488430591,608785196182929407,608785196199706623,608785198632402943,608785198649180159,608785198716289023,608785198783397887,608785209319489535,608785209386598399,608785209436930047,608785209453707263,608785209537593343,608785210410008575,608785210426785791,608785210510671871,608785210594557951,608785210611335167,608785210711998463,608785210728775679,608785213195026431,608785213245358079,608785213262135295,608785213614456831,608785213765451775,608785213832560639]
[615732152056676351,615732152062967807,615732152065064959,615732152115396607,615732152117493759,615732152125882367,615732152134270975,615732152593547263,615732152599838719,615732152601935871,615732152648073215,615732152656461823,615732152677433343,615732152681627647,615732152687919103,615732189809606655,615732191669780479,615732191673974783,615732191678169087,615732191701237759,615732191705432063,615732191711723519,615732191923535871,615732191925633023,615732191938215935,615732191942410239,615732191978061823,615732191980158975,615732192020004863,615732192022102015,615732192032587775,615732192129056767,615732192133251071,615732192206651391,615732192215039999,615732192263274495,615732192265371647,615732192271663103,615732192273760255,615732192284246015,615732192359743487,615732192368132095,615732192422658047,615732192428949503,615732192431046655,615732196117839871,615732196119937023,615732196356915199,615732196361109503,615732196432412671,615732196434509823,615732196436606975,615732196451287039,615732196455481343]
[617056792082120703,617056792083169279,617056793998917631,617056793999179775,617056794000490495,617056794000752639,617056794002063359,617056794003636223,617056794003898367,617056794015170559,617056794015694847,617056794016481279,617056794020413439,617056794020675583,617056794478903295,617056794479951871,617056794487029759,617056794487816191,617056794488078335,617056794497515519,617056794498301951,617056794498564095,617056794506428415,617056794507476991,617056794527924223,617056794528186367,617056794528448511,617056794531069951,617056794531594239,617056794532380671,617056794537885695,617056794538147839,617056794544701439,617056794544963583,617056794546012159,617056794547060735,617056794554662911,617056794554925055,617056794556235775,617056794556497919,617056794557808639,617056794561478655,617056794562002943,617056794564624383,617056794565410815,617056794565935103,617056794569080831,617056794569605119,617056794666860543,617056794699890687,617056794700414975,617056794703036415,617056794703560703,617056794704347135,617056794707492863,617056794708017151,617056794755727359,617056794756775935,617056794822836223,617056794823884799]
[624586471612907519,624586471613005823,624586471613038591,624586471613759487,624586471613890559,624586471613988863,624586471614119935,624586471619330047,624586471619395583,624586471620280319,624586471620411391,624586471620804607,624586471620870143,624586471621951487,624586471621984255,624586471622148095,624586471622279167,624586471622934527,624586471622967295,624586471647903743,624586471647936511,624586471655374847,624586471655407615,624586471655571455,624586471655702527,624586471655735295,624586471657078783,624586471657144319,624586471658258431,624586471658323967,624586471658422271,624586471658553343,624586471658618879,624586471660486655,624586471660519423,624586475478614015,624586475478646783,624586475478745087,624586475478777855,624586475478941695,624586475479597055,624586475479629823,624586477870874623,624586477871005695,624586477871366143,624586477871497215,624586477871890431,624586477871955967,624586477872250879,624586477872316415,624586477877166079,624586477877297151,624586477877657599,624586477877788671,624586477877952511,624586477877985279,624586477878149119,624586477878247423,624586477878280191,624586477882376191,624586477882441727,624586477882736639,624586477882802175,624586477882867711,624586477882933247,624586477883031551]
[627882919482134527,627882919482138623,627882919482159103,627882919482200063,627882919482208255,627882919482220543,627882919482675199,627882919482683391,627882919482724351,627882919482732543,627882919482744831,627882919482793983,627882919482802175,627882919484207103,627882919484223487,627882919484297215,627882919484309503,627882919484313599,627882919484399615,627882919484415999,627882919484993535,627882919484997631,627882919485018111,627882919485022207,627882919485042687,627882919485067263,627882919485071359,627882919485255679,627882919485329407,627882919485333503,627882919485345791,627882919485358079,627882919485362175,627882919521722367,627882919521738751,627882919521849343,627882919521861631,627882919521865727,627882919522021375,627882919522037759,627882919522054143,627882919522058239,627882919522652159,627882919522656255,627882919522770943,627882919522787327,627882919522902015,627882919522910207,627882919523069951,627882919523086335,627882919538020351,627882919538028543,627882919538139135,627882919538147327,627882919538155519,627882919540690943,627882919540711423,627882919540715519,627882919541714943,627882919541719039,627882919541952511,627882919541960703,627882919542001663,627882919542009855,627882919542022143,627882919542071295,627882919542079487,627882919542263807,627882919542267903,627882919542288383,627882919542312959,627882919542317055]
[634600058495592959,634600058495593983,634600058495648255,634600058495649279,634600058495650815,634600058495658495,634600058495659519,634600058503304191,634600058503306239,634600058503341567,634600058503343615,634600058503356927,634600058503358463,634600058503358975,634600058503402495,634600058503404543,634600058503410687,634600058503411711,634600058503431679,634600058503432191,634600058503444991,634600058503445503,634600058503455231,634600058503455743,634600058503458303,634600058503472639,634600058503474687,634600058503487999,634600058503489535,634600058503491071,634600058504447487,634600058504447999,634600058504460799,634600058504461311,634600058504471551,634600058504472575,634600058507233791,634600058507234303,634600058507235839,634600058507236351,634600058507238911,634600058507249151,634600058507249663,634600058507280895,634600058507281919,634600058507283455,634600058507408383,634600058507409407,634600058507430399,634600058507430911,634600058507432447,634600058507432959,634600058507435519,634600058507444735,634600058507446271,634600058507446783,634600058508281343,634600058508282367,634600058508283391,634600058508289023,634600058508291071,634600058508300799,634600058508301823,634600058508303359,634600058508379647,634600058508381695,634600058508387327,634600058508389375,634600058508395007,634600058508396543,634600058508397055,634600058508446207,634600058508447231,634600058508464639,634600058508465663,634600058508467199,634600058508469247,634600058508470271]
[635544851676508863,635544851676508927,635544851676509247,635544851676509311,635544851676509375,635544851676510015,635544851676510079,635544851676961407,635544851676961663,635544851676977791,635544851676978047,635544851677184319,635544851677184447,635544851677185087,635544851677185215,635544851677185407,635544851677186175,635544851677186303,635544851677192511,635544851677192639,635544851677193279,635544851677193599,635544851677193663,635544851677194367,635544851677194495,635544851677203071,635544851677203327,635544851677205567,635544851677205695,635544851677205823,635544851677207743,635544851677207999,635544851677266623,635544851677266687,635544851677267007,635544851677267071,635544851677267263,635544851677267775,635544851677267839,635544851677269183,635544851677269439,635544851677270719,635544851677270783,635544851677271103,635544851677271167,635544851677271487,635544851677271871,635544851677271935,635544851677316159,635544851677316351,635544851677316543,635544851677325951,635544851677326207,635544851677328447,635544851677328639,635544851677328703,635544851677330623,635544851677330879,635544851677340351,635544851677340415,635544851677340735,635544851677340799,635544851677341119,635544851677341503,635544851677341567,635544851677389887,635544851677390079,635544851677390143,635544851677392063,635544851677392319,635544851677397311,635544851677397439,635544851677398079,635544851677398207,635544851677398399,635544851677399167,635544851677399295,635544851677405503,635544851677405631,635544851677406271,635544851677406463,635544851677406591,635544851677407359,635544851677407487]
[639763125756235855,639763125756235887,639763125756236439,639763125756236471,639763125756236679,639763125756236703,639763125756236711,639763125756237391,639763125756237423,639763125756238487,639763125756238519,639763125756238599,639763125756238615,639763125756238639,639763125756238727,639763125756238751,639763125756238759,639763125756266535,639763125756266551,639763125756266823,639763125756266839,639763125756266863,639763125756266895,639763125756266911,639763125756267559,639763125756267575,639763125756267847,639763125756267863,639763125756267887,639763125756267911,639763125756267919,639763125756267959,639763125756286007,639763125756286215,639763125756286239,639763125756286247,639763125756286351,639763125756286367,639763125756288023,639763125756288055,639763125756288143,639763125756288175,639763125756288263,639763125756288287,639763125756288295,639763125756291095,639763125756291103,639763125756291303,639763125756291311,639763125756291463,639763125756291471,639763125756291495,639763125756291607,639763125756291615,639763125756291815,639763125756291823,639763125756291975,639763125756291983,639763125756292023,639763125756403919,639763125756403935,639763125756404263,639763125756404271,639763125756404295,639763125756404303,639763125756404343,639763125756404503,639763125756404511,639763125756404647,639763125756404663,639763125756404943,639763125756404959,639763125756405511,639763125756405527,639763125756405551,639763125756405671,639763125756405687,639763125756408871,639763125756408879,639763125756408903,639763125756408911,639763125756408951,639763125756409111,639763125756409119,639763125756409367,639763125756409399,639763125756409487,639763125756409519,639763125756409623,639763125756409631]
[644178757620498449,644178757620498453,644178757620498458,644178757620498462,644178757620498480,644178757620498483,644178757620498484,644178757620498628,644178757620498629,644178757620498666,644178757620498667,644178757620498672,644178757620498673,644178757620498676,644178757620498705,644178757620498709,644178757620498714,644178757620498718,644178757620498736,644178757620498739,644178757620498740,644178757620498948,644178757620498977,644178757620498979,644178757620498992,644178757620498993,644178757620498998,644178757620499268,644178757620499270,644178757620499280,644178757620499282,644178757620499285,644178757620499297,644178757620499299,644178757620499332,644178757620499333,644178757620499370,644178757620499371,644178757620499376,644178757620499377,644178757620499382,644178757620500492,644178757620500494,644178757620500504,644178757620500506,644178757620500509,644178757620500521,644178757620500523,644178757620500620,644178757620500622,644178757620500632,644178757620500634,644178757620500637,644178757620500649,644178757620500651,644178757620500810,644178757620500811,644178757620500824,644178757620500826,644178757620500829,644178757620501064,644178757620501067,644178757620501068,644178757620501089,644178757620501093,644178757620501098,644178757620501102,644178757620501320,644178757620501323,644178757620501324,644178757620501345,644178757620501349,644178757620501354,644178757620501358,644178757620505988,644178757620505990,644178757620506000,644178757620506002,644178757620506005,644178757620506017,644178757620506019,644178757620506021,644178757620520074,644178757620520075,644178757620520080,644178757620520081,644178757620520086,644178757620520092,644178757620520093,644178757620520138,644178757620520139,644178757620520144,644178757620520145,644178757620520150,644178757620520156,644178757620520157]

View File

@ -0,0 +1,35 @@
-- Tags: no-fasttest
SELECT h3HexRing(581276613233082367, toUInt16(0));
SELECT h3HexRing(579205132326352334, toUInt16(1)) as hexRing; -- { serverError 117 }
SELECT h3HexRing(581276613233082367, -1); -- { serverError 43 }
SELECT h3HexRing(581276613233082367, toUInt16(-1)); -- { serverError 12 }
DROP TABLE IF EXISTS h3_indexes;
-- Test h3 indices and k selected from original test fixture: https://github.com/uber/h3/blob/master/src/apps/testapps
CREATE TABLE h3_indexes (h3_index UInt64, k UInt16) ENGINE = Memory;
INSERT INTO h3_indexes VALUES (581276613233082367,1);
INSERT INTO h3_indexes VALUES (581263419093549055,2);
INSERT INTO h3_indexes VALUES (589753847883235327,3);
INSERT INTO h3_indexes VALUES (594082350283882495,4);
INSERT INTO h3_indexes VALUES (598372386957426687,5);
INSERT INTO h3_indexes VALUES (599542359671177215,6);
INSERT INTO h3_indexes VALUES (604296355086598143,7);
INSERT INTO h3_indexes VALUES (608785214872748031,8);
INSERT INTO h3_indexes VALUES (615732192485572607,9);
INSERT INTO h3_indexes VALUES (617056794467368959,10);
INSERT INTO h3_indexes VALUES (624586477873168383,11);
INSERT INTO h3_indexes VALUES (627882919484481535,12);
INSERT INTO h3_indexes VALUES (634600058503392255,13);
INSERT INTO h3_indexes VALUES (635544851677385791,14);
INSERT INTO h3_indexes VALUES (639763125756281263,15);
INSERT INTO h3_indexes VALUES (644178757620501158,16);
SELECT arraySort(h3HexRing(h3_index, k)) FROM h3_indexes ORDER BY h3_index;
DROP TABLE h3_indexes;

View File

@ -0,0 +1,28 @@
[590080540275638271,590080471556161535,590080883873021951,590106516237844479,590104385934065663,590103630019821567,590103561300344831]
[590080540275638271,590080471556161535,590080608995115007,590104454653542399,590104385934065663,590104523373019135,590103767458775039]
[590080540275638271,590080471556161535,590080608995115007,590104454653542399,590104111056158719,590104523373019135,590105554165170175]
[590080540275638271,590080677714591743,590080608995115007,590104179775635455,590104317214588927,590104248495112191,590105279287263231]
[590080540275638271,590077585338138623,590077310460231679,590079097166626815,590078822288719871,590079028447150079,590094009293078527]
[590080540275638271,590077585338138623,590077310460231679,590079097166626815,590079165886103551,590078891008196607,590092978500927487]
[590080540275638271,590077585338138623,590077173021278207,590077379179708415,590079165886103551,590077860216045567,590092841061974015]
[590080540275638271,590080815153545215,590079784361394175,590096483194241023,590096758072147967,590095727279996927,590094833926799359]
[590080540275638271,590080815153545215,590096620633194495,590096414474764287,590096758072147967,590094971365752831,590094765207322623]
[590080540275638271,590080815153545215,590096620633194495,590096414474764287,590096689352671231,590094902646276095,590095177524183039]
[590080540275638271,590080815153545215,590096620633194495,590096414474764287,590096826791624703,590095933438427135,590096208316334079,590098269900636159]
[590000619524194303,590000344646287359,590000413365764095,589998351781462015,590000894402101247,589998832817799167,589998901537275903,589998626659368959,589972994294546431]
[590000619524194303,590000207207333887,590000413365764095,590001169280008191,590000894402101247,590000963121577983,589975330756755455,589975055878848511,589975124598325247]
[590000619524194303,590000207207333887,590000413365764095,590001169280008191,590000756963147775,590000825682624511,589975330756755455,589974918439895039,589974987159371775]
[590000619524194303,590000207207333887,590000275926810623,590001031841054719,590000756963147775,590000825682624511,589975193317801983,589975262037278719,589973956367220735]
[590000619524194303,590000207207333887,590000275926810623,590001031841054719,590001100560531455,589990998797451263,589991067516927999,589974231245127679,589973818928267263]
[590000619524194303,590000207207333887,590000275926810623,590001031841054719,589990517761114111,589991273675358207,589990861358497791,589990930077974527,589974093806174207]
[590000619524194303,590000482085240831,590277902612824063,590278177490731007,589992648064892927,589992716784369663,589992579345416191,589991548553265151,589991411114311679]
[590000619524194303,590000482085240831,590277902612824063,590278177490731007,589992648064892927,589992510625939455,589992854223323135,589991823431172095,589991685992218623]
[590000619524194303,590000482085240831,590277902612824063,590278177490731007,590278108771254271,589992922942799871,589992785503846399,590126170008190975,590126444886097919]
[590000550804717567,590000207207333887,590000619524194303,590001650316345343,590001581596868607,590001719035822079,590259760670965759,590260654024163327,590260379146256383,590257561647710207,590258455000907775,590259485793058815,590259210915151871]
[590000550804717567,590000207207333887,590000619524194303,590001650316345343,590001306718961663,590001719035822079,590260516585209855,590260241707302911,590260379146256383,590258317561954303,590258042684047359,590258180123000831,590168226327953407]
[590000550804717567,590000207207333887,590000619524194303,590001650316345343,590001306718961663,590001444157915135,590260516585209855,590260241707302911,590260447865733119,590258386281431039,590258042684047359,590258248842477567,590167951450046463]
[590000550804717567,590000207207333887,590000344646287359,590001650316345343,590001306718961663,590001444157915135,590260585304686591,590260310426779647,590172005899173887,590258386281431039,590258111403524095,590173105410801663,590173242849755135]
[590000550804717567,590000207207333887,590000344646287359,589998283061985279,589998420500938751,589999451293089791,589999107695706111,589999313854136319,590172555654987775,590172693093941247,590169875595395071,590169600717488127,590169738156441599]
[590000550804717567,590000207207333887,590000413365764095,589998283061985279,589998420500938751,589998145623031807,589999176415182847,589999313854136319,590172624374464511,590172280777080831,590172418216034303,590170356631732223,590170494070685695]
[590000550804717567,590000207207333887,590000413365764095,589998283061985279,589998008184078335,589998145623031807,589999176415182847,590000069768380415,590172624374464511,590172349496557567,590172486935511039,590170425351208959,590170081753825279]
[590000550804717567,590000207207333887,590000413365764095,589998283061985279,589998008184078335,589998145623031807,589999932329426943,590000069768380415,589999794890473471,590172349496557567,589984126849777663,590170425351208959,590170150473302015]

View File

@ -0,0 +1,44 @@
-- Tags: no-fasttest
DROP TABLE IF EXISTS h3_indexes;
CREATE TABLE h3_indexes (id int, start String, end String) ENGINE = Memory;
-- test values taken from h3 library test suite
INSERT INTO h3_indexes VALUES (1, '830631fffffffff','830780fffffffff');
INSERT INTO h3_indexes VALUES (2, '830631fffffffff','830783fffffffff');
INSERT INTO h3_indexes VALUES (3, '830631fffffffff','83079dfffffffff');
INSERT INTO h3_indexes VALUES (4, '830631fffffffff','830799fffffffff');
INSERT INTO h3_indexes VALUES (5, '830631fffffffff','8306f5fffffffff');
INSERT INTO h3_indexes VALUES (6, '830631fffffffff','8306e6fffffffff');
INSERT INTO h3_indexes VALUES (7, '830631fffffffff','8306e4fffffffff');
INSERT INTO h3_indexes VALUES (8, '830631fffffffff','830701fffffffff');
INSERT INTO h3_indexes VALUES (9, '830631fffffffff','830700fffffffff');
INSERT INTO h3_indexes VALUES (10, '830631fffffffff','830706fffffffff');
INSERT INTO h3_indexes VALUES (11, '830631fffffffff','830733fffffffff');
INSERT INTO h3_indexes VALUES (12, '8301a6fffffffff','830014fffffffff');
INSERT INTO h3_indexes VALUES (13, '8301a6fffffffff','830033fffffffff');
INSERT INTO h3_indexes VALUES (14, '8301a6fffffffff','830031fffffffff');
INSERT INTO h3_indexes VALUES (15, '8301a6fffffffff','830022fffffffff');
INSERT INTO h3_indexes VALUES (16, '8301a6fffffffff','830020fffffffff');
INSERT INTO h3_indexes VALUES (17, '8301a6fffffffff','830024fffffffff');
INSERT INTO h3_indexes VALUES (18, '8301a6fffffffff','830120fffffffff');
INSERT INTO h3_indexes VALUES (19, '8301a6fffffffff','830124fffffffff');
INSERT INTO h3_indexes VALUES (20, '8301a6fffffffff','8308cdfffffffff');
INSERT INTO h3_indexes VALUES (21, '8301a5fffffffff','831059fffffffff');
INSERT INTO h3_indexes VALUES (22, '8301a5fffffffff','830b2dfffffffff');
INSERT INTO h3_indexes VALUES (23, '8301a5fffffffff','830b29fffffffff');
INSERT INTO h3_indexes VALUES (24, '8301a5fffffffff','830b76fffffffff');
INSERT INTO h3_indexes VALUES (25, '8301a5fffffffff','830b43fffffffff');
INSERT INTO h3_indexes VALUES (26, '8301a5fffffffff','830b4efffffffff');
INSERT INTO h3_indexes VALUES (27, '8301a5fffffffff','830b48fffffffff');
INSERT INTO h3_indexes VALUES (28, '8301a5fffffffff','830b49fffffffff');
SELECT h3Line(stringToH3(start), stringToH3(end)) FROM h3_indexes ORDER BY id;
SELECT h3Line(0xffffffffffffff, 0xffffffffffffff); -- { serverError 117 }
DROP TABLE h3_indexes;