mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'master' into atomic_metadata5
This commit is contained in:
commit
eae901f58d
@ -17,7 +17,7 @@ function find_reference_sha
|
||||
# If not master, try to fetch pull/.../{head,merge}
|
||||
if [ "$PR_TO_TEST" != "0" ]
|
||||
then
|
||||
git -C ch fetch origin "refs/pull/$PR_TO_TEST/*:refs/heads/pr/*"
|
||||
git -C ch fetch origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*"
|
||||
fi
|
||||
|
||||
# Go back from the revision to be tested, trying to find the closest published
|
||||
@ -28,9 +28,9 @@ function find_reference_sha
|
||||
# and SHA_TO_TEST, but a revision that is merged with recent master, given
|
||||
# by pull/.../merge ref.
|
||||
# Master is the first parent of the pull/.../merge.
|
||||
if git -C ch rev-parse pr/merge
|
||||
if git -C ch rev-parse "pull/$PR_TO_TEST/merge"
|
||||
then
|
||||
start_ref=pr/merge~
|
||||
start_ref="pull/$PR_TO_TEST/merge~"
|
||||
fi
|
||||
|
||||
while :
|
||||
@ -73,11 +73,11 @@ if [ "$REF_PR" == "" ]; then echo Reference PR is not specified ; exit 1 ; fi
|
||||
|
||||
(
|
||||
git -C ch log -1 --decorate "$SHA_TO_TEST" ||:
|
||||
if git -C ch rev-parse pr/merge &> /dev/null
|
||||
if git -C ch rev-parse "pull/$PR_TO_TEST/merge" &> /dev/null
|
||||
then
|
||||
echo
|
||||
echo Real tested commit is:
|
||||
git -C ch log -1 --decorate pr/merge
|
||||
git -C ch log -1 --decorate "pull/$PR_TO_TEST/merge"
|
||||
fi
|
||||
) | tee right-commit.txt
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
## function-name {#function-name-in-lower-case}
|
||||
## functionName {#functionname-in-lower-case}
|
||||
|
||||
Short description.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
## setting-name {#setting-name-in-lower-case}
|
||||
## setting_name {#setting_name}
|
||||
|
||||
Description.
|
||||
|
||||
|
@ -12,6 +12,7 @@ toc_title: Integrations
|
||||
|
||||
- Relational database management systems
|
||||
- [MySQL](https://www.mysql.com)
|
||||
- [mysql2ch](https://github.com/long2ice/mysql2ch)
|
||||
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
|
||||
- [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader)
|
||||
- [horgh-replicator](https://github.com/larsnovikov/horgh-replicator)
|
||||
|
@ -35,7 +35,7 @@ toc_title: Adopters
|
||||
| [Exness](https://www.exness.com){.favicon} | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
| [Geniee](https://geniee.co.jp){.favicon} | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||
| [HUYA](https://www.huya.com/){.favicon} | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||
| [Idealista](https://www.idealista.com){.favicon} | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| [Idealista](https://www.idealista.com){.favicon} | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| [Infovista](https://www.infovista.com/){.favicon} | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
|
||||
| [InnoGames](https://www.innogames.com){.favicon} | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
|
||||
| [Integros](https://integros.com){.favicon} | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||
|
@ -31,7 +31,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv4` domain supports custom input format as IPv4-strings:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -31,7 +31,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv6` domain supports custom input as IPv6-strings:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -12,6 +12,8 @@ The following aggregate functions are supported:
|
||||
- [`groupBitAnd`](../../sql-reference/aggregate-functions/reference.md#groupbitand)
|
||||
- [`groupBitOr`](../../sql-reference/aggregate-functions/reference.md#groupbitor)
|
||||
- [`groupBitXor`](../../sql-reference/aggregate-functions/reference.md#groupbitxor)
|
||||
- [`groupArrayArray`](../../sql-reference/aggregate-functions/reference.md#agg_function-grouparray)
|
||||
- [`groupUniqArrayArray`](../../sql-reference/aggregate-functions/reference.md#groupuniqarrayx-groupuniqarraymax-sizex)
|
||||
|
||||
Values of the `SimpleAggregateFunction(func, Type)` look and stored the same way as `Type`, so you do not need to apply functions with `-Merge`/`-State` suffixes. `SimpleAggregateFunction` has better performance than `AggregateFunction` with same aggregation function.
|
||||
|
||||
|
@ -701,13 +701,13 @@ arrayDifference(array)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `array` – [Array](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [Array](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**Returned values**
|
||||
|
||||
Returns an array of differences between adjacent elements.
|
||||
|
||||
Type: [UInt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [Float\*](https://clickhouse.yandex/docs/en/data_types/float/).
|
||||
Type: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [Float\*](https://clickhouse.tech/docs/en/data_types/float/).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -753,7 +753,7 @@ arrayDistinct(array)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `array` – [Array](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [Array](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**Returned values**
|
||||
|
||||
|
@ -1200,4 +1200,52 @@ SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers
|
||||
└────────┴────────────────────────────────┴──────────────────────────────────┘
|
||||
```
|
||||
|
||||
## randomString {#randomstring}
|
||||
|
||||
Generates a binary string of the specified length filled with random bytes (including zero bytes).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randomString(length)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `length` — String length. Positive integer.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- String filled with random bytes.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randomString(30) AS str, length(str) AS len FROM numbers(2) FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
str: 3 G : pT ?w тi k aV f6
|
||||
len: 30
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
str: 9 ,] ^ ) ]?? 8
|
||||
len: 30
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [generateRandom](../../sql-reference/table-functions/generate.md#generaterandom)
|
||||
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
|
||||
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) <!--hide-->
|
||||
|
@ -51,7 +51,11 @@ Modifies how matching by "join keys" is performed
|
||||
|
||||
`ASOF JOIN` is useful when you need to join records that have no exact match.
|
||||
|
||||
Tables for `ASOF JOIN` must have an ordered sequence column. This column cannot be alone in a table, and should be one of the data types: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date`, and `DateTime`.
|
||||
Algorithm requires the special column in tables. This column:
|
||||
|
||||
- Must contain an ordered sequence.
|
||||
- Can be one of the following types: [Int*, UInt*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Date](../../data-types/date.md), [DateTime](../../data-types/datetime.md), [Decimal*](../../data-types/decimal.md).
|
||||
- Can't be the only column in the `JOIN` clause.
|
||||
|
||||
Syntax `ASOF JOIN ... ON`:
|
||||
|
||||
|
@ -24,7 +24,7 @@ This release contains bug fixes for the previous release 1.1.54310:
|
||||
#### New Features: {#new-features}
|
||||
|
||||
- Custom partitioning key for the MergeTree family of table engines.
|
||||
- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) table engine.
|
||||
- [Kafka](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) table engine.
|
||||
- Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse.
|
||||
- Added support for time zones with non-integer offsets from UTC.
|
||||
- Added support for arithmetic operations with time intervals.
|
||||
|
@ -12,6 +12,7 @@ toc_title: Integrations
|
||||
|
||||
- Relational database management systems
|
||||
- [MySQL](https://www.mysql.com)
|
||||
- [mysql2ch](https://github.com/long2ice/mysql2ch)
|
||||
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
|
||||
- [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader)
|
||||
- [horgh-replicator](https://github.com/larsnovikov/horgh-replicator)
|
||||
|
@ -37,7 +37,7 @@ toc_title: Adoptante
|
||||
| <a href="https://www.exness.com" class="favicon">Exness</a> | Comercio | Métricas, Registro | — | — | [Charla en ruso, mayo 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
| <a href="https://geniee.co.jp" class="favicon">Sistema abierto.</a> | Red Ad | Producto principal | — | — | [Publicación de blog en japonés, julio 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | Video Streaming | Analítica | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Inmobiliario | Analítica | — | — | [Blog Post en Inglés, Abril 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Inmobiliario | Analítica | — | — | [Blog Post en Inglés, Abril 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://www.infovista.com/" class="favicon">Infovista</a> | Red | Analítica | — | — | [Diapositivas en español, octubre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
|
||||
| <a href="https://www.innogames.com" class="favicon">InnoGames</a> | Juego | Métricas, Registro | — | — | [Diapositivas en ruso, septiembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
|
||||
| <a href="https://integros.com" class="favicon">Integros</a> | Plataforma para servicios de video | Analítica | — | — | [Diapositivas en ruso, mayo 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||
|
@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv4` domain admite formato de entrada personalizado como cadenas IPv4:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv6` domain admite entradas personalizadas como cadenas IPv6:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -702,13 +702,13 @@ arrayDifference(array)
|
||||
|
||||
**Parámetros**
|
||||
|
||||
- `array` – [Matriz](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [Matriz](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**Valores devueltos**
|
||||
|
||||
Devuelve una matriz de diferencias entre los elementos adyacentes.
|
||||
|
||||
Tipo: [UInt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [En\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [Flotante\*](https://clickhouse.yandex/docs/en/data_types/float/).
|
||||
Tipo: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [En\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [Flotante\*](https://clickhouse.tech/docs/en/data_types/float/).
|
||||
|
||||
**Ejemplo**
|
||||
|
||||
@ -754,7 +754,7 @@ arrayDistinct(array)
|
||||
|
||||
**Parámetros**
|
||||
|
||||
- `array` – [Matriz](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [Matriz](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**Valores devueltos**
|
||||
|
||||
|
@ -26,7 +26,7 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.543
|
||||
#### Novedad: {#new-features}
|
||||
|
||||
- Clave de partición personalizada para la familia MergeTree de motores de tabla.
|
||||
- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) motor de mesa.
|
||||
- [Kafka](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) motor de mesa.
|
||||
- Se agregó soporte para cargar [CatBoost](https://catboost.yandex/) modelos y aplicarlos a los datos almacenados en ClickHouse.
|
||||
- Se agregó soporte para zonas horarias con desplazamientos no enteros de UTC.
|
||||
- Se agregó soporte para operaciones aritméticas con intervalos de tiempo.
|
||||
|
@ -14,6 +14,7 @@ toc_title: "\u06CC\u06A9\u067E\u0627\u0631\u0686\u06AF\u06CC"
|
||||
|
||||
- سیستم های مدیریت پایگاه داده رابطه ای
|
||||
- [MySQL](https://www.mysql.com)
|
||||
- [mysql2ch](https://github.com/long2ice/mysql2ch)
|
||||
- [در حال بارگذاری](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
|
||||
- [تاتر-خروجی زیر-داده خوان](https://github.com/Altinity/clickhouse-mysql-data-reader)
|
||||
- [horgh-replicator](https://github.com/larsnovikov/horgh-replicator)
|
||||
|
@ -37,7 +37,7 @@ toc_title: "\u067E\u0630\u06CC\u0631\u0627"
|
||||
| <a href="https://www.exness.com" class="favicon">اعمال</a> | بازرگانی | معیارهای ورود به سیستم | — | — | [بحث در روسیه, بیشتر 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
| <a href="https://geniee.co.jp" class="favicon">ژنی</a> | شبکه تبلیغاتی | محصول اصلی | — | — | [پست وبلاگ در ژاپن, جولای 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | جریان ویدیو | تجزیه و تحلیل | — | — | [اسلاید در چین, اکتبر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | املاک و مستغلات | تجزیه و تحلیل | — | — | [پست وبلاگ به زبان انگلیسی, مارس 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | املاک و مستغلات | تجزیه و تحلیل | — | — | [پست وبلاگ به زبان انگلیسی, مارس 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://www.infovista.com/" class="favicon">اینفویستا</a> | شبکه ها | تجزیه و تحلیل | — | — | [اسلاید به زبان انگلیسی, اکتبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
|
||||
| <a href="https://www.innogames.com" class="favicon">نام</a> | بازی ها | معیارهای ورود به سیستم | — | — | [اسلاید در روسیه, سپتامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
|
||||
| <a href="https://integros.com" class="favicon">پوششی</a> | بستر های نرم افزاری برای خدمات تصویری | تجزیه و تحلیل | — | — | [اسلاید در روسیه, بیشتر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||
|
@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv4` دامنه پشتیبانی از فرمت ورودی سفارشی به عنوان ایپو4 رشته:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv6` دامنه پشتیبانی از ورودی های سفارشی به عنوان ایپو6 رشته:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -702,13 +702,13 @@ arrayDifference(array)
|
||||
|
||||
**پارامترها**
|
||||
|
||||
- `array` – [& حذف](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [& حذف](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**مقادیر بازگشتی**
|
||||
|
||||
بازگرداندن مجموعه ای از تفاوت بین عناصر مجاور.
|
||||
|
||||
نوع: [اینترنت\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [شناور\*](https://clickhouse.yandex/docs/en/data_types/float/).
|
||||
نوع: [اینترنت\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [شناور\*](https://clickhouse.tech/docs/en/data_types/float/).
|
||||
|
||||
**مثال**
|
||||
|
||||
@ -754,7 +754,7 @@ arrayDistinct(array)
|
||||
|
||||
**پارامترها**
|
||||
|
||||
- `array` – [& حذف](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [& حذف](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**مقادیر بازگشتی**
|
||||
|
||||
|
@ -26,7 +26,7 @@ toc_title: '2017'
|
||||
#### ویژگی های جدید: {#new-features}
|
||||
|
||||
- کلید پارتیشن بندی سفارشی برای خانواده ادغام موتورهای جدول.
|
||||
- [کافکا](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) موتور جدول.
|
||||
- [کافکا](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) موتور جدول.
|
||||
- اضافه شدن پشتیبانی برای بارگذاری [مانتو](https://catboost.yandex/) مدل ها و استفاده از داده های ذخیره شده در کلیک.
|
||||
- اضافه شدن پشتیبانی برای مناطق زمانی با شیپور خاموشی غیر عدد صحیح از مجموعه مقالات.
|
||||
- اضافه شدن پشتیبانی برای عملیات ریاضی با فواصل زمانی.
|
||||
|
@ -14,6 +14,7 @@ toc_title: "Int\xE9gration"
|
||||
|
||||
- Systèmes de gestion de bases de données relationnelles
|
||||
- [MySQL](https://www.mysql.com)
|
||||
- [mysql2ch](https://github.com/long2ice/mysql2ch)
|
||||
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
|
||||
- [clickhouse-mysql-lecteur de données](https://github.com/Altinity/clickhouse-mysql-data-reader)
|
||||
- [horgh-réplicateur](https://github.com/larsnovikov/horgh-replicator)
|
||||
|
@ -37,7 +37,7 @@ toc_title: Adoptant
|
||||
| <a href="https://www.exness.com" class="favicon">Exness</a> | Trading | Métriques, Journalisation | — | — | [Parler en russe, mai 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Réseau publicitaire | Produit principal | — | — | [Billet de Blog en japonais, juillet 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | Le Streaming Vidéo | Analytics | — | — | [Diapositives en chinois, octobre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Immobilier | Analytics | — | — | [Billet de Blog en anglais, avril 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Immobilier | Analytics | — | — | [Billet de Blog en anglais, avril 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://www.infovista.com/" class="favicon">Infovista</a> | Réseau | Analytics | — | — | [Diapositives en anglais, octobre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
|
||||
| <a href="https://www.innogames.com" class="favicon">InnoGames</a> | Jeu | Métriques, Journalisation | — | — | [Diapositives en russe, septembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
|
||||
| <a href="https://integros.com" class="favicon">Integros</a> | Plate-forme pour les services vidéo | Analytics | — | — | [Diapositives en russe, mai 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||
|
@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv4` le domaine prend en charge le format d'entrée personnalisé en tant que chaînes IPv4:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv6` le domaine prend en charge l'entrée personnalisée en tant que chaînes IPv6:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -702,13 +702,13 @@ arrayDifference(array)
|
||||
|
||||
**Paramètre**
|
||||
|
||||
- `array` – [Tableau](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [Tableau](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**Valeurs renvoyées**
|
||||
|
||||
Renvoie un tableau de différences entre les éléments adjacents.
|
||||
|
||||
Type: [UInt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [Flottant\*](https://clickhouse.yandex/docs/en/data_types/float/).
|
||||
Type: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [Flottant\*](https://clickhouse.tech/docs/en/data_types/float/).
|
||||
|
||||
**Exemple**
|
||||
|
||||
@ -754,7 +754,7 @@ arrayDistinct(array)
|
||||
|
||||
**Paramètre**
|
||||
|
||||
- `array` – [Tableau](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [Tableau](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**Valeurs renvoyées**
|
||||
|
||||
|
@ -26,7 +26,7 @@ Cette version contient des corrections de bugs pour la version précédente 1.1.
|
||||
#### Nouveauté: {#new-features}
|
||||
|
||||
- Clé de partitionnement personnalisée pour la famille MergeTree des moteurs de table.
|
||||
- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) tableau moteur.
|
||||
- [Kafka](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) tableau moteur.
|
||||
- Ajout du support pour le chargement [CatBoost](https://catboost.yandex/) modèles et les appliquer aux données stockées dans ClickHouse.
|
||||
- Ajout du support pour les fuseaux horaires avec des décalages non entiers de UTC.
|
||||
- Ajout du support pour les opérations arithmétiques avec des intervalles de temps.
|
||||
|
@ -14,6 +14,7 @@ toc_title: "\u7D71\u5408"
|
||||
|
||||
- リレーショナルデータベース管理システム
|
||||
- [MySQL](https://www.mysql.com)
|
||||
- [mysql2ch](https://github.com/long2ice/mysql2ch)
|
||||
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
|
||||
- [clickhouse-mysql-データリーダー](https://github.com/Altinity/clickhouse-mysql-data-reader)
|
||||
- [horgh-レプリケーター](https://github.com/larsnovikov/horgh-replicator)
|
||||
|
@ -37,7 +37,7 @@ toc_title: "\u30A2\u30C0\u30D7\u30BF\u30FC"
|
||||
| <a href="https://www.exness.com" class="favicon">Exness</a> | 取引 | 指標、ロギング | — | — | [ロシア語で話す,May2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
| <a href="https://geniee.co.jp" class="favicon">魔神</a> | 広告ネットワーク | 主な製品 | — | — | [ブログ投稿日本語,July2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | ビデオストリーミング | 分析 | — | — | [中国語でのスライド,October2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||
| <a href="https://www.idealista.com" class="favicon">イデアリスタ</a> | 不動産 | 分析 | — | — | [ブログ投稿英語,April2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://www.idealista.com" class="favicon">イデアリスタ</a> | 不動産 | 分析 | — | — | [ブログ投稿英語,April2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://www.infovista.com/" class="favicon">インフォビスタ</a> | ネット | 分析 | — | — | [2019年のスライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
|
||||
| <a href="https://www.innogames.com" class="favicon">InnoGames</a> | ゲーム | 指標、ロギング | — | — | [2019年ロシア](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
|
||||
| <a href="https://integros.com" class="favicon">インテグロス</a> | Platformビデオサービス | 分析 | — | — | [ロシア語でのスライド,月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||
|
@ -69,4 +69,4 @@ ClickHouseには、精度を犠牲にしてパフォーマンスを得るため
|
||||
2. 既に挿入されたデータの変更または削除を、高頻度かつ低遅延に行う機能はありません。 [GDPR](https://gdpr-info.eu)に準拠するなど、データをクリーンアップまたは変更するために、バッチ削除およびバッチ更新が利用可能です。
|
||||
3. インデックスが疎であるため、ClickHouseは、キーで単一行を取得するようなクエリにはあまり適していません。
|
||||
|
||||
[Original article](https://clickhouse.yandex/docs/en/introduction/distinctive_features/) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/introduction/distinctive_features/) <!--hide-->
|
||||
|
@ -48,4 +48,4 @@ Yandex.Metricaには、Metrageと呼ばれるデータを集計するための
|
||||
|
||||
OLAPServerの制限を取り除き、レポートのための非集計データを扱う問題を解決するために、私達は ClickHouse DBMSを開発しました。
|
||||
|
||||
[Original article](https://clickhouse.yandex/docs/en/introduction/history/) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/introduction/history/) <!--hide-->
|
||||
|
@ -5,9 +5,9 @@ toc_title: "\u30D1\u30D5\u30A9\u30FC\u30DE\u30F3\u30B9"
|
||||
|
||||
# パフォーマンス {#pahuomansu}
|
||||
|
||||
Yandexの内部テスト結果によると、ClickHouseは、テスト可能なクラスのシステム間で同等の動作シナリオで最高のパフォーマンス(長時間のクエリで最も高いスループットと、短時間のクエリで最小のレイテンシの両方)を示します。 [別のページで](https://clickhouse.yandex/benchmark/dbms/)テスト結果を表示できます 。
|
||||
Yandexの内部テスト結果によると、ClickHouseは、テスト可能なクラスのシステム間で同等の動作シナリオで最高のパフォーマンス(長時間のクエリで最も高いスループットと、短時間のクエリで最小のレイテンシの両方)を示します。 [別のページで](https://clickhouse.tech/benchmark/dbms/)テスト結果を表示できます 。
|
||||
|
||||
これは、多数の独立したベンチマークでも確認されています。インターネット検索で見つけることは難しくありませんし、 [私達がまとめた関連リンク集](https://clickhouse.yandex/#independent-benchmarks) から見つけることもできます。
|
||||
これは、多数の独立したベンチマークでも確認されています。インターネット検索で見つけることは難しくありませんし、 [私達がまとめた関連リンク集](https://clickhouse.tech/#independent-benchmarks) から見つけることもできます。
|
||||
|
||||
## 単一の巨大なクエリのスループット {#dan-yi-noju-da-nakuerinosurupututo}
|
||||
|
||||
@ -27,4 +27,4 @@ Yandexの内部テスト結果によると、ClickHouseは、テスト可能な
|
||||
|
||||
少なくとも1000行のパケットにデータを挿入することをお勧めします。または、1秒あたり1回のリクエストを超えないでください。タブ区切りのダンプデータをMergeTreeテーブルに挿入する場合、挿入速度は50〜200MB/sになります。挿入された行のサイズが約1Kbの場合、速度は毎秒50,000〜200,000行になります。行が小さい場合、パフォーマンスは1秒あたりの行数で高くなります(Banner System データ- `>` 500,000行/秒、Graphite データ- `>` 1,000,000行/秒)。パフォーマンスを向上させるために、複数のINSERTクエリを並行して作成することで、パフォーマンスを線形に向上できます。
|
||||
|
||||
[Original article](https://clickhouse.yandex/docs/ja/introduction/performance/) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/ja/introduction/performance/) <!--hide-->
|
||||
|
@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv4` ドメインはIPv4文字列としてカスタム入力形式をサポート:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv6` ドメイ:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -702,13 +702,13 @@ arrayDifference(array)
|
||||
|
||||
**パラメータ**
|
||||
|
||||
- `array` – [配列](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [配列](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**戻り値**
|
||||
|
||||
隣接する要素間の差分の配列を返します。
|
||||
|
||||
タイプ: [UInt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [フロート\*](https://clickhouse.yandex/docs/en/data_types/float/).
|
||||
タイプ: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [フロート\*](https://clickhouse.tech/docs/en/data_types/float/).
|
||||
|
||||
**例**
|
||||
|
||||
@ -754,7 +754,7 @@ arrayDistinct(array)
|
||||
|
||||
**パラメータ**
|
||||
|
||||
- `array` – [配列](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [配列](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**戻り値**
|
||||
|
||||
|
@ -26,7 +26,7 @@ toc_title: '2017'
|
||||
#### 新しい機能: {#new-features}
|
||||
|
||||
- カスタムパーティショニングキーのMergeTree家族のテーブルエンジンです。
|
||||
- [カフカ](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) テーブルエンジン。
|
||||
- [カフカ](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) テーブルエンジン。
|
||||
- ロードのサポートを追加 [CatBoost](https://catboost.yandex/) モデルとClickHouseに格納されたデータにそれらを適用します。
|
||||
- サポートが追加された時間帯と非整数オフセットからのUTCです。
|
||||
- 時間間隔での算術演算のサポートが追加されました。
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
- Реляционные системы управления базами данных
|
||||
- [MySQL](https://www.mysql.com)
|
||||
- [mysql2ch](https://github.com/long2ice/mysql2ch)
|
||||
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
|
||||
- [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader)
|
||||
- [horgh-replicator](https://github.com/larsnovikov/horgh-replicator)
|
||||
|
@ -78,7 +78,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat
|
||||
<default_profile>default</default_profile>
|
||||
```
|
||||
|
||||
## dictionaries\_config {#dictionaries-config}
|
||||
## dictionaries\_config {#server_configuration_parameters-dictionaries_config}
|
||||
|
||||
Путь к конфигурации внешних словарей.
|
||||
|
||||
@ -95,7 +95,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat
|
||||
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||
```
|
||||
|
||||
## dictionaries\_lazy\_load {#dictionaries-lazy-load}
|
||||
## dictionaries\_lazy\_load {#server_configuration_parameters-dictionaries_lazy_load}
|
||||
|
||||
Отложенная загрузка словарей.
|
||||
|
||||
|
@ -26,7 +26,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv4` поддерживает вставку в виде строк с текстовым представлением IPv4 адреса:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -26,7 +26,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv6` поддерживает вставку в виде строк с текстовым представлением IPv6 адреса:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -692,7 +692,7 @@ arrayDifference(array)
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `array` – [Массив](https://clickhouse.yandex/docs/ru/data_types/array/).
|
||||
- `array` – [Массив](https://clickhouse.tech/docs/ru/data_types/array/).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
@ -742,7 +742,7 @@ arrayDistinct(array)
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `array` – [Массив](https://clickhouse.yandex/docs/ru/data_types/array/).
|
||||
- `array` – [Массив](https://clickhouse.tech/docs/ru/data_types/array/).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
|
@ -1153,4 +1153,52 @@ SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers
|
||||
└────────┴────────────────────────────────┴──────────────────────────────────┘
|
||||
```
|
||||
|
||||
## randomString {#randomstring}
|
||||
|
||||
Генерирует бинарную строку заданной длины, заполненную случайными байтами (в том числе нулевыми).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
randomString(length)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `length` — длина строки. Положительное целое число.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Строка, заполненная случайными байтами.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT randomString(30) AS str, length(str) AS len FROM numbers(2) FORMAT Vertical;
|
||||
```
|
||||
|
||||
Ответ:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
str: 3 G : pT ?w тi k aV f6
|
||||
len: 30
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
str: 9 ,] ^ ) ]?? 8
|
||||
len: 30
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [generateRandom](../../sql-reference/table-functions/generate.md#generaterandom)
|
||||
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
|
||||
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/other_functions/) <!--hide-->
|
||||
|
@ -45,7 +45,11 @@ FROM <left_table>
|
||||
|
||||
`ASOF JOIN` применим в том случае, когда необходимо объединять записи, которые не имеют точного совпадения.
|
||||
|
||||
Таблицы для `ASOF JOIN` должны иметь столбец с отсортированной последовательностью. Этот столбец не может быть единственным в таблице и должен быть одного из типов: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date` и `DateTime`.
|
||||
Для работы алгоритма необходим специальный столбец в таблицах. Этот столбец:
|
||||
|
||||
- Должен содержать упорядоченную последовательность.
|
||||
- Может быть одного из следующих типов: [Int*, UInt*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Date](../../data-types/date.md), [DateTime](../../data-types/datetime.md), [Decimal*](../../data-types/decimal.md).
|
||||
- Не может быть единственным столбцом в секции `JOIN`.
|
||||
|
||||
Синтаксис `ASOF JOIN ... ON`:
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
||||
## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries}
|
||||
|
||||
Перегружает все словари, которые были успешно загружены до этого.
|
||||
По умолчанию включена ленивая загрузка [dictionaries\_lazy\_load](../../sql-reference/statements/system.md#dictionaries-lazy-load), поэтому словари не загружаются автоматически при старте, а только при первом обращении через dictGet или SELECT к ENGINE=Dictionary. После этого такие словари (LOADED) будут перегружаться командой `system reload dictionaries`.
|
||||
По умолчанию включена ленивая загрузка [dictionaries\_lazy\_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load), поэтому словари не загружаются автоматически при старте, а только при первом обращении через dictGet или SELECT к ENGINE=Dictionary. После этого такие словари (LOADED) будут перегружаться командой `system reload dictionaries`.
|
||||
Всегда возвращает `Ok.`, вне зависимости от результата обновления словарей.
|
||||
|
||||
## RELOAD DICTIONARY Dictionary\_name {#query_language-system-reload-dictionary}
|
||||
|
@ -1,4 +1,4 @@
|
||||
# generateRandom {#generateRandom}
|
||||
# generateRandom {#generaterandom}
|
||||
|
||||
Генерирует случайные данные с заданной схемой.
|
||||
Позволяет заполнять тестовые таблицы данными.
|
||||
|
107
docs/tools/blog.py
Normal file
107
docs/tools/blog.py
Normal file
@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env python3
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
import nav # monkey patches mkdocs
|
||||
|
||||
import mkdocs.commands
|
||||
from mkdocs import config
|
||||
from mkdocs import exceptions
|
||||
|
||||
import mdx_clickhouse
|
||||
import redirects
|
||||
|
||||
import util
|
||||
|
||||
|
||||
def build_for_lang(lang, args):
|
||||
logging.info(f'Building {lang} blog')
|
||||
|
||||
try:
|
||||
theme_cfg = {
|
||||
'name': None,
|
||||
'custom_dir': os.path.join(os.path.dirname(__file__), '..', args.theme_dir),
|
||||
'language': lang,
|
||||
'direction': 'ltr',
|
||||
'static_templates': ['404.html'],
|
||||
'extra': {
|
||||
'now': int(time.mktime(datetime.datetime.now().timetuple())) # TODO better way to avoid caching
|
||||
}
|
||||
}
|
||||
|
||||
# the following list of languages is sorted according to
|
||||
# https://en.wikipedia.org/wiki/List_of_languages_by_total_number_of_speakers
|
||||
languages = {
|
||||
'en': 'English',
|
||||
'ru': 'Русский'
|
||||
}
|
||||
|
||||
site_names = {
|
||||
'en': 'ClickHouse Blog',
|
||||
'ru': 'Блог ClickHouse '
|
||||
}
|
||||
|
||||
assert len(site_names) == len(languages)
|
||||
|
||||
site_dir = os.path.join(args.blog_output_dir, lang)
|
||||
|
||||
plugins = ['macros']
|
||||
if args.htmlproofer:
|
||||
plugins.append('htmlproofer')
|
||||
|
||||
website_url = 'https://clickhouse.tech'
|
||||
site_name = site_names.get(lang, site_names['en'])
|
||||
blog_nav, post_meta = nav.build_blog_nav(lang, args)
|
||||
raw_config = dict(
|
||||
site_name=site_name,
|
||||
site_url=f'{website_url}/blog/{lang}/',
|
||||
docs_dir=os.path.join(args.blog_dir, lang),
|
||||
site_dir=site_dir,
|
||||
strict=True,
|
||||
theme=theme_cfg,
|
||||
nav=blog_nav,
|
||||
copyright='©2016–2020 Yandex LLC',
|
||||
use_directory_urls=True,
|
||||
repo_name='ClickHouse/ClickHouse',
|
||||
repo_url='https://github.com/ClickHouse/ClickHouse/',
|
||||
edit_uri=f'edit/master/website/blog/{lang}',
|
||||
markdown_extensions=mdx_clickhouse.MARKDOWN_EXTENSIONS,
|
||||
plugins=plugins,
|
||||
extra=dict(
|
||||
now=datetime.datetime.now().isoformat(),
|
||||
rev=args.rev,
|
||||
rev_short=args.rev_short,
|
||||
rev_url=args.rev_url,
|
||||
website_url=website_url,
|
||||
events=args.events,
|
||||
languages=languages,
|
||||
includes_dir=os.path.join(os.path.dirname(__file__), '..', '_includes'),
|
||||
is_amp=False,
|
||||
is_blog=True,
|
||||
post_meta=post_meta
|
||||
)
|
||||
)
|
||||
|
||||
cfg = config.load_config(**raw_config)
|
||||
mkdocs.commands.build.build(cfg)
|
||||
|
||||
redirects.build_blog_redirects(args)
|
||||
|
||||
# TODO: AMP for blog
|
||||
# if not args.skip_amp:
|
||||
# amp.build_amp(lang, args, cfg)
|
||||
|
||||
logging.info(f'Finished building {lang} blog')
|
||||
|
||||
except exceptions.ConfigurationError as e:
|
||||
raise SystemExit('\n' + str(e))
|
||||
|
||||
|
||||
def build_blog(args):
|
||||
tasks = []
|
||||
for lang in args.blog_lang.split(','):
|
||||
if lang:
|
||||
tasks.append((lang, args,))
|
||||
util.run_function_in_parallel(build_for_lang, tasks, threads=False)
|
@ -20,8 +20,8 @@ from mkdocs import exceptions
|
||||
import mkdocs.commands.build
|
||||
|
||||
import amp
|
||||
import blog
|
||||
import mdx_clickhouse
|
||||
|
||||
import redirects
|
||||
import single_page
|
||||
import test
|
||||
@ -95,25 +95,6 @@ def build_for_lang(lang, args):
|
||||
else:
|
||||
site_dir = os.path.join(args.docs_output_dir, lang)
|
||||
|
||||
markdown_extensions = [
|
||||
'mdx_clickhouse',
|
||||
'admonition',
|
||||
'attr_list',
|
||||
'codehilite',
|
||||
'nl2br',
|
||||
'sane_lists',
|
||||
'pymdownx.details',
|
||||
'pymdownx.magiclink',
|
||||
'pymdownx.superfences',
|
||||
'extra',
|
||||
{
|
||||
'toc': {
|
||||
'permalink': True,
|
||||
'slugify': mdx_clickhouse.slugify
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
plugins = ['macros']
|
||||
if args.htmlproofer:
|
||||
plugins.append('htmlproofer')
|
||||
@ -133,7 +114,7 @@ def build_for_lang(lang, args):
|
||||
repo_name='ClickHouse/ClickHouse',
|
||||
repo_url='https://github.com/ClickHouse/ClickHouse/',
|
||||
edit_uri=f'edit/master/docs/{lang}',
|
||||
markdown_extensions=markdown_extensions,
|
||||
markdown_extensions=mdx_clickhouse.MARKDOWN_EXTENSIONS,
|
||||
plugins=plugins,
|
||||
extra=dict(
|
||||
now=datetime.datetime.now().isoformat(),
|
||||
@ -147,14 +128,15 @@ def build_for_lang(lang, args):
|
||||
events=args.events,
|
||||
languages=languages,
|
||||
includes_dir=os.path.join(os.path.dirname(__file__), '..', '_includes'),
|
||||
is_amp=False
|
||||
is_amp=False,
|
||||
is_blog=False
|
||||
)
|
||||
)
|
||||
|
||||
if os.path.exists(config_path):
|
||||
raw_config['config_file'] = config_path
|
||||
else:
|
||||
raw_config['nav'] = nav.build_nav(lang, args)
|
||||
raw_config['nav'] = nav.build_docs_nav(lang, args)
|
||||
|
||||
cfg = config.load_config(**raw_config)
|
||||
|
||||
@ -187,7 +169,7 @@ def build_docs(args):
|
||||
if lang:
|
||||
tasks.append((lang, args,))
|
||||
util.run_function_in_parallel(build_for_lang, tasks, threads=False)
|
||||
redirects.build_redirects(args)
|
||||
redirects.build_docs_redirects(args)
|
||||
|
||||
|
||||
def build(args):
|
||||
@ -204,6 +186,9 @@ def build(args):
|
||||
from github import build_releases
|
||||
build_releases(args, build_docs)
|
||||
|
||||
if not args.skip_blog:
|
||||
blog.build_blog(args)
|
||||
|
||||
if not args.skip_website:
|
||||
website.process_benchmark_results(args)
|
||||
website.minify_website(args)
|
||||
@ -215,9 +200,11 @@ if __name__ == '__main__':
|
||||
website_dir = os.path.join('..', 'website')
|
||||
arg_parser = argparse.ArgumentParser()
|
||||
arg_parser.add_argument('--lang', default='en,es,fr,ru,zh,ja,tr,fa')
|
||||
arg_parser.add_argument('--blog-lang', default='en,ru')
|
||||
arg_parser.add_argument('--docs-dir', default='.')
|
||||
arg_parser.add_argument('--theme-dir', default=website_dir)
|
||||
arg_parser.add_argument('--website-dir', default=website_dir)
|
||||
arg_parser.add_argument('--blog-dir', default=os.path.join(website_dir, 'blog'))
|
||||
arg_parser.add_argument('--output-dir', default='build')
|
||||
arg_parser.add_argument('--enable-stable-releases', action='store_true')
|
||||
arg_parser.add_argument('--stable-releases-limit', type=int, default='3')
|
||||
@ -230,6 +217,7 @@ if __name__ == '__main__':
|
||||
arg_parser.add_argument('--skip-amp', action='store_true')
|
||||
arg_parser.add_argument('--skip-pdf', action='store_true')
|
||||
arg_parser.add_argument('--skip-website', action='store_true')
|
||||
arg_parser.add_argument('--skip-blog', action='store_true')
|
||||
arg_parser.add_argument('--skip-git-log', action='store_true')
|
||||
arg_parser.add_argument('--test-only', action='store_true')
|
||||
arg_parser.add_argument('--minify', action='store_true')
|
||||
@ -249,6 +237,7 @@ if __name__ == '__main__':
|
||||
logging.getLogger('MARKDOWN').setLevel(logging.INFO)
|
||||
|
||||
args.docs_output_dir = os.path.join(os.path.abspath(args.output_dir), 'docs')
|
||||
args.blog_output_dir = os.path.join(os.path.abspath(args.output_dir), 'blog')
|
||||
|
||||
from github import choose_latest_releases, get_events
|
||||
args.stable_releases = choose_latest_releases(args) if args.enable_stable_releases else []
|
||||
@ -259,6 +248,7 @@ if __name__ == '__main__':
|
||||
|
||||
if args.test_only:
|
||||
args.skip_multi_page = True
|
||||
args.skip_blog = True
|
||||
args.skip_website = True
|
||||
args.skip_pdf = True
|
||||
args.skip_amp = True
|
||||
|
@ -18,6 +18,30 @@ import amp
|
||||
import website
|
||||
|
||||
|
||||
def slugify(value, separator):
|
||||
return slugify_impl.slugify(value, separator=separator, word_boundary=True, save_order=True)
|
||||
|
||||
|
||||
MARKDOWN_EXTENSIONS = [
|
||||
'mdx_clickhouse',
|
||||
'admonition',
|
||||
'attr_list',
|
||||
'codehilite',
|
||||
'nl2br',
|
||||
'sane_lists',
|
||||
'pymdownx.details',
|
||||
'pymdownx.magiclink',
|
||||
'pymdownx.superfences',
|
||||
'extra',
|
||||
{
|
||||
'toc': {
|
||||
'permalink': True,
|
||||
'slugify': slugify
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
class ClickHouseLinkMixin(object):
|
||||
|
||||
def handleMatch(self, m, data):
|
||||
@ -72,10 +96,6 @@ def makeExtension(**kwargs):
|
||||
return ClickHouseMarkdown(**kwargs)
|
||||
|
||||
|
||||
def slugify(value, separator):
|
||||
return slugify_impl.slugify(value, separator=separator, word_boundary=True, save_order=True)
|
||||
|
||||
|
||||
def get_translations(dirname, lang):
|
||||
import babel.support
|
||||
return babel.support.Translations.load(
|
||||
|
@ -1,4 +1,5 @@
|
||||
import collections
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
|
||||
@ -19,7 +20,8 @@ def build_nav_entry(root, args):
|
||||
return None, None, None
|
||||
result_items = []
|
||||
index_meta, index_content = util.read_md_file(os.path.join(root, 'index.md'))
|
||||
current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title', find_first_header(index_content)))
|
||||
current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title'))
|
||||
current_title = current_title or index_meta.get('title', find_first_header(index_content))
|
||||
for filename in os.listdir(root):
|
||||
path = os.path.join(root, filename)
|
||||
if os.path.isdir(path):
|
||||
@ -47,7 +49,7 @@ def build_nav_entry(root, args):
|
||||
return index_meta.get('toc_priority', 10000), current_title, result
|
||||
|
||||
|
||||
def build_nav(lang, args):
|
||||
def build_docs_nav(lang, args):
|
||||
docs_dir = os.path.join(args.docs_dir, lang)
|
||||
_, _, nav = build_nav_entry(docs_dir, args)
|
||||
result = []
|
||||
@ -64,10 +66,50 @@ def build_nav(lang, args):
|
||||
key = list(result[0].keys())[0]
|
||||
result[0][key][index_key] = 'index.md'
|
||||
result[0][key].move_to_end(index_key, last=False)
|
||||
print('result', result)
|
||||
return result
|
||||
|
||||
|
||||
def build_blog_nav(lang, args):
|
||||
blog_dir = os.path.join(args.blog_dir, lang)
|
||||
years = sorted(os.listdir(blog_dir), reverse=True)
|
||||
result_nav = [{'hidden': 'index.md'}]
|
||||
post_meta = collections.OrderedDict()
|
||||
for year in years:
|
||||
year_dir = os.path.join(blog_dir, year)
|
||||
if not os.path.isdir(year_dir):
|
||||
continue
|
||||
result_nav.append({year: collections.OrderedDict()})
|
||||
posts = []
|
||||
post_meta_items = []
|
||||
for post in os.listdir(year_dir):
|
||||
meta, _ = util.read_md_file(os.path.join(year_dir, post))
|
||||
post_date = meta['date']
|
||||
post_title = meta['title']
|
||||
if datetime.date.fromisoformat(post_date) > datetime.date.today():
|
||||
continue
|
||||
posts.append(
|
||||
(post_date, post_title, os.path.join(year, post),)
|
||||
)
|
||||
if post_title in post_meta:
|
||||
raise RuntimeError(f'Duplicate post title: {post_title}')
|
||||
if not post_date.startswith(f'{year}-'):
|
||||
raise RuntimeError(f'Post date {post_date} doesn\'t match the folder year {year}: {post_title}')
|
||||
post_url_part = post.replace('.md', '')
|
||||
post_meta_items.append((post_date, {
|
||||
'date': post_date,
|
||||
'title': post_title,
|
||||
'image': meta.get('image'),
|
||||
'url': f'/blog/{lang}/{year}/{post_url_part}/'
|
||||
},))
|
||||
for _, title, path in sorted(posts, reverse=True):
|
||||
result_nav[-1][year][title] = path
|
||||
for _, post_meta_item in sorted(post_meta_items,
|
||||
reverse=True,
|
||||
key=lambda item: item[0]):
|
||||
post_meta[post_meta_item['title']] = post_meta_item
|
||||
return result_nav, post_meta
|
||||
|
||||
|
||||
def _custom_get_navigation(files, config):
|
||||
nav_config = config['nav'] or mkdocs.structure.nav.nest_paths(f.src_path for f in files.documentation_pages())
|
||||
items = mkdocs.structure.nav._data_to_navigation(nav_config, files, config)
|
||||
|
@ -25,24 +25,34 @@ def write_redirect_html(out_path, to_url):
|
||||
</html>''')
|
||||
|
||||
|
||||
def build_redirect_html(args, from_path, to_path):
|
||||
for lang in args.lang.split(','):
|
||||
out_path = os.path.join(
|
||||
args.docs_output_dir, lang,
|
||||
from_path.replace('/index.md', '/index.html').replace('.md', '/index.html')
|
||||
)
|
||||
version_prefix = f'/{args.version_prefix}/' if args.version_prefix else '/'
|
||||
target_path = to_path.replace('/index.md', '/').replace('.md', '/')
|
||||
to_url = f'/docs{version_prefix}{lang}/{target_path}'
|
||||
to_url = to_url.strip()
|
||||
write_redirect_html(out_path, to_url)
|
||||
def build_redirect_html(args, base_prefix, lang, output_dir, from_path, to_path):
|
||||
out_path = os.path.join(
|
||||
output_dir, lang,
|
||||
from_path.replace('/index.md', '/index.html').replace('.md', '/index.html')
|
||||
)
|
||||
version_prefix = f'/{args.version_prefix}/' if args.version_prefix else '/'
|
||||
target_path = to_path.replace('/index.md', '/').replace('.md', '/')
|
||||
to_url = f'/{base_prefix}{version_prefix}{lang}/{target_path}'
|
||||
to_url = to_url.strip()
|
||||
write_redirect_html(out_path, to_url)
|
||||
|
||||
|
||||
def build_redirects(args):
|
||||
def build_docs_redirects(args):
|
||||
with open(os.path.join(args.docs_dir, 'redirects.txt'), 'r') as f:
|
||||
for line in f:
|
||||
from_path, to_path = line.split(' ', 1)
|
||||
build_redirect_html(args, from_path, to_path)
|
||||
for lang in args.lang.split(','):
|
||||
from_path, to_path = line.split(' ', 1)
|
||||
build_redirect_html(args, 'docs', lang, args.docs_output_dir, from_path, to_path)
|
||||
|
||||
|
||||
def build_blog_redirects(args):
|
||||
for lang in args.blog_lang.split(','):
|
||||
redirects_path = os.path.join(args.blog_dir, lang, 'redirects.txt')
|
||||
if os.path.exists(redirects_path):
|
||||
with open(redirects_path, 'r') as f:
|
||||
for line in f:
|
||||
from_path, to_path = line.split(' ', 1)
|
||||
build_redirect_html(args, 'blog', lang, args.blog_output_dir, from_path, to_path)
|
||||
|
||||
|
||||
def build_static_redirects(args):
|
||||
|
@ -17,20 +17,56 @@ import jsmin
|
||||
import mdx_clickhouse
|
||||
|
||||
|
||||
def handle_iframe(iframe, soup):
|
||||
if not iframe.attrs['src'].startswith('https://www.youtube.com/'):
|
||||
raise RuntimeError('iframes are allowed only for YouTube')
|
||||
wrapper = soup.new_tag('div')
|
||||
wrapper.attrs['class'] = ['embed-responsive', 'embed-responsive-16by9']
|
||||
iframe.insert_before(wrapper)
|
||||
iframe.extract()
|
||||
wrapper.insert(0, iframe)
|
||||
if 'width' in iframe.attrs:
|
||||
del iframe.attrs['width']
|
||||
if 'height' in iframe.attrs:
|
||||
del iframe.attrs['height']
|
||||
iframe.attrs['allow'] = 'accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture'
|
||||
iframe.attrs['class'] = 'embed-responsive-item'
|
||||
iframe.attrs['frameborder'] = '0'
|
||||
iframe.attrs['allowfullscreen'] = '1'
|
||||
|
||||
|
||||
def adjust_markdown_html(content):
|
||||
soup = bs4.BeautifulSoup(
|
||||
content,
|
||||
features='html.parser'
|
||||
)
|
||||
|
||||
for a in soup.find_all('a'):
|
||||
a_class = a.attrs.get('class')
|
||||
if a_class and 'headerlink' in a_class:
|
||||
a.string = '\xa0'
|
||||
|
||||
for iframe in soup.find_all('iframe'):
|
||||
handle_iframe(iframe, soup)
|
||||
|
||||
for img in soup.find_all('img'):
|
||||
if img.attrs.get('alt') == 'iframe':
|
||||
img.name = 'iframe'
|
||||
img.string = ''
|
||||
handle_iframe(img, soup)
|
||||
continue
|
||||
img_class = img.attrs.get('class')
|
||||
if img_class:
|
||||
img.attrs['class'] = img_class + ['img-fluid']
|
||||
else:
|
||||
img.attrs['class'] = 'img-fluid'
|
||||
|
||||
for details in soup.find_all('details'):
|
||||
for summary in details.find_all('summary'):
|
||||
if summary.parent != details:
|
||||
summary.extract()
|
||||
details.insert(0, summary)
|
||||
|
||||
for div in soup.find_all('div'):
|
||||
div_class = div.attrs.get('class')
|
||||
is_admonition = div_class and 'admonition' in div.attrs.get('class')
|
||||
@ -41,10 +77,12 @@ def adjust_markdown_html(content):
|
||||
a.attrs['class'] = a_class + ['alert-link']
|
||||
else:
|
||||
a.attrs['class'] = 'alert-link'
|
||||
|
||||
for p in div.find_all('p'):
|
||||
p_class = p.attrs.get('class')
|
||||
if is_admonition and p_class and ('admonition-title' in p_class):
|
||||
p.attrs['class'] = p_class + ['alert-heading', 'display-6', 'mb-2']
|
||||
|
||||
if is_admonition:
|
||||
div.attrs['role'] = 'alert'
|
||||
if ('info' in div_class) or ('note' in div_class):
|
||||
@ -136,6 +174,7 @@ def get_css_in(args):
|
||||
f"'{args.website_dir}/css/bootstrap.css'",
|
||||
f"'{args.website_dir}/css/docsearch.css'",
|
||||
f"'{args.website_dir}/css/base.css'",
|
||||
f"'{args.website_dir}/css/blog.css'",
|
||||
f"'{args.website_dir}/css/docs.css'",
|
||||
f"'{args.website_dir}/css/highlight.css'"
|
||||
]
|
||||
|
@ -14,6 +14,7 @@ toc_title: Entegrasyonlar
|
||||
|
||||
- İlişkisel veritabanı yönetim sistemleri
|
||||
- [MySQL](https://www.mysql.com)
|
||||
- [mysql2ch](https://github.com/long2ice/mysql2ch)
|
||||
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
|
||||
- [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader)
|
||||
- [horgh-çoğaltıcı](https://github.com/larsnovikov/horgh-replicator)
|
||||
|
@ -37,7 +37,7 @@ toc_title: Benimseyenler
|
||||
| <a href="https://www.exness.com" class="favicon">Exness</a> | Ticaret | Metrikler, Günlük Kaydı | — | — | [Rusça konuşun, Mayıs 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Reklam Ağı | Ana ürün | — | — | [Japonca Blog yazısı, Temmuz 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | Video Akışı | Analiz | — | — | [Çince slaytlar, Ekim 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Emlak | Analiz | — | — | [İngilizce Blog yazısı, Nisan 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Emlak | Analiz | — | — | [İngilizce Blog yazısı, Nisan 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://www.infovista.com/" class="favicon">Infovista</a> | Ağlar | Analiz | — | — | [İngilizce slaytlar, Ekim 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
|
||||
| <a href="https://www.innogames.com" class="favicon">Innogames</a> | Oyun | Metrikler, Günlük Kaydı | — | — | [Rusça slaytlar, Eylül 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
|
||||
| <a href="https://integros.com" class="favicon">Integros</a> | Video hizmetleri platformu | Analiz | — | — | [Rusça slaytlar, Mayıs 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||
|
@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv4` etki alanı IPv4 dizeleri olarak özel giriş biçimini destekler:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from;
|
||||
`IPv6` etki alanı IPv6 dizeleri olarak özel girişi destekler:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -702,13 +702,13 @@ arrayDifference(array)
|
||||
|
||||
**Parametre**
|
||||
|
||||
- `array` – [Dizi](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [Dizi](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**Döndürülen değerler**
|
||||
|
||||
Bitişik öğeler arasındaki farklar dizisini döndürür.
|
||||
|
||||
Tür: [Uİnt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Tamsayı\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [Yüzdürmek\*](https://clickhouse.yandex/docs/en/data_types/float/).
|
||||
Tür: [Uİnt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Tamsayı\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [Yüzdürmek\*](https://clickhouse.tech/docs/en/data_types/float/).
|
||||
|
||||
**Örnek**
|
||||
|
||||
@ -754,7 +754,7 @@ arrayDistinct(array)
|
||||
|
||||
**Parametre**
|
||||
|
||||
- `array` – [Dizi](https://clickhouse.yandex/docs/en/data_types/array/).
|
||||
- `array` – [Dizi](https://clickhouse.tech/docs/en/data_types/array/).
|
||||
|
||||
**Döndürülen değerler**
|
||||
|
||||
|
@ -26,7 +26,7 @@ Bu sürüm önceki sürüm 1.1.54310 için hata düzeltmeleri içerir:
|
||||
#### Yenilik: {#new-features}
|
||||
|
||||
- Tablo motorları MergeTree ailesi için özel bölümleme anahtarı.
|
||||
- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) masa motoru.
|
||||
- [Kafka](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) masa motoru.
|
||||
- Yükleme için destek eklendi [CatBoost](https://catboost.yandex/) modelleri ve ClickHouse saklanan verilere uygulayarak.
|
||||
- UTC olmayan tamsayı uzaklıklar ile saat dilimleri için destek eklendi.
|
||||
- Zaman aralıklarıyla aritmetik işlemler için destek eklendi.
|
||||
|
@ -1,7 +1,5 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_folder_title: "\u6885\u6811\u5BB6\u65CF"
|
||||
toc_folder_title: "合并树家族"
|
||||
toc_priority: 28
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,4 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 37
|
||||
toc_title: "\u7248\u672C\u96C6\u5408\u5728\u65B0\u6811"
|
||||
---
|
||||
@ -33,23 +31,23 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
有关查询参数的说明,请参阅 [查询说明](../../../sql-reference/statements/create.md).
|
||||
|
||||
**发动机参数**
|
||||
**引擎参数**
|
||||
|
||||
``` sql
|
||||
VersionedCollapsingMergeTree(sign, version)
|
||||
```
|
||||
|
||||
- `sign` — Name of the column with the type of row: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划
|
||||
- `sign` — 指定行类型的列名: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划
|
||||
|
||||
列数据类型应为 `Int8`.
|
||||
|
||||
- `version` — Name of the column with the version of the object state.
|
||||
- `version` — 指定对象状态版本的列名。
|
||||
|
||||
列数据类型应为 `UInt*`.
|
||||
|
||||
**查询子句**
|
||||
**查询 Clauses**
|
||||
|
||||
当创建一个 `VersionedCollapsingMergeTree` 表,相同 [条款](mergetree.md) 需要创建一个时 `MergeTree` 桌子
|
||||
当创建一个 `VersionedCollapsingMergeTree` 表时,跟创建一个 `MergeTree`表的时候需要相同 [Clause](mergetree.md)
|
||||
|
||||
<details markdown="1">
|
||||
|
||||
@ -69,17 +67,17 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
所有的参数,除了 `sign` 和 `version` 具有相同的含义 `MergeTree`.
|
||||
|
||||
- `sign` — Name of the column with the type of row: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划
|
||||
- `sign` — 指定行类型的列名: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划
|
||||
|
||||
Column Data Type — `Int8`.
|
||||
|
||||
- `version` — Name of the column with the version of the object state.
|
||||
- `version` — 指定对象状态版本的列名。
|
||||
|
||||
列数据类型应为 `UInt*`.
|
||||
|
||||
</details>
|
||||
|
||||
## 崩溃 {#table_engines_versionedcollapsingmergetree}
|
||||
## 折叠 {#table_engines_versionedcollapsingmergetree}
|
||||
|
||||
### 数据 {#data}
|
||||
|
||||
@ -125,23 +123,23 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
1. 写入数据的程序应该记住对象的状态以取消它。 该 “cancel” 字符串应该是 “state” 与相反的字符串 `Sign`. 这增加了存储的初始大小,但允许快速写入数据。
|
||||
2. 列中长时间增长的数组由于写入负载而降低了引擎的效率。 数据越简单,效率就越高。
|
||||
3. `SELECT` 结果很大程度上取决于对象变化历史的一致性。 准备插入数据时要准确。 您可以通过不一致的数据获得不可预测的结果,例如会话深度等非负指标的负值。
|
||||
3. `SELECT` 结果很大程度上取决于对象变化历史的一致性。 准备插入数据时要准确。 不一致的数据将导致不可预测的结果,例如会话深度等非负指标的负值。
|
||||
|
||||
### 算法 {#table_engines-versionedcollapsingmergetree-algorithm}
|
||||
|
||||
当ClickHouse合并数据部分时,它会删除具有相同主键和版本且不同主键和版本的每对行 `Sign`. 行的顺序并不重要。
|
||||
当ClickHouse合并数据部分时,它会删除具有相同主键和版本但 `Sign`值不同的一对行. 行的顺序并不重要。
|
||||
|
||||
当ClickHouse插入数据时,它会按主键对行进行排序。 如果 `Version` 列不在主键中,ClickHouse将其隐式添加到主键作为最后一个字段并使用它进行排序。
|
||||
|
||||
## 选择数据 {#selecting-data}
|
||||
|
||||
ClickHouse不保证具有相同主键的所有行都将位于相同的结果数据部分中,甚至位于相同的物理服务器上。 对于写入数据和随后合并数据部分都是如此。 此外,ClickHouse流程 `SELECT` 具有多个线程的查询,并且无法预测结果中的行顺序。 这意味着聚合是必需的,如果有必要得到完全 “collapsed” 从数据 `VersionedCollapsingMergeTree` 桌子
|
||||
ClickHouse不保证具有相同主键的所有行都将位于相同的结果数据部分中,甚至位于相同的物理服务器上。 对于写入数据和随后合并数据部分都是如此。 此外,ClickHouse流程 `SELECT` 具有多个线程的查询,并且无法预测结果中的行顺序。 这意味着,如果有必要从`VersionedCollapsingMergeTree` 表中得到完全 “collapsed” 的数据,聚合是必需的。
|
||||
|
||||
要完成折叠,请使用 `GROUP BY` 考虑符号的子句和聚合函数。 例如,要计算数量,请使用 `sum(Sign)` 而不是 `count()`. 要计算的东西的总和,使用 `sum(Sign * x)` 而不是 `sum(x)`,并添加 `HAVING sum(Sign) > 0`.
|
||||
|
||||
聚合 `count`, `sum` 和 `avg` 可以这样计算。 聚合 `uniq` 如果对象至少具有一个非折叠状态,则可以计算。 聚合 `min` 和 `max` 无法计算是因为 `VersionedCollapsingMergeTree` 不保存折叠状态值的历史记录。
|
||||
|
||||
如果您需要提取数据 “collapsing” 但是,如果没有聚合(例如,要检查是否存在其最新值与某些条件匹配的行),则可以使用 `FINAL` 修饰符 `FROM` 条款 这种方法效率低下,不应与大型表一起使用。
|
||||
如果您需要提取数据 “collapsing” 但是,如果没有聚合(例如,要检查是否存在其最新值与某些条件匹配的行),则可以使用 `FINAL` 修饰 `FROM` 条件这种方法效率低下,不应与大型表一起使用。
|
||||
|
||||
## 使用示例 {#example-of-use}
|
||||
|
||||
@ -233,6 +231,6 @@ SELECT * FROM UAct FINAL
|
||||
└─────────────────────┴───────────┴──────────┴──────┴─────────┘
|
||||
```
|
||||
|
||||
这是一个非常低效的方式来选择数据。 不要把它用于大桌子。
|
||||
这是一个非常低效的方式来选择数据。 不要把它用于数据量大的表。
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) <!--hide-->
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
- 关系数据库管理系统
|
||||
- [MySQL](https://www.mysql.com)
|
||||
- [mysql2ch](https://github.com/long2ice/mysql2ch)
|
||||
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
|
||||
- [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader)
|
||||
- [horgh-复制器](https://github.com/larsnovikov/horgh-replicator)
|
||||
|
@ -35,7 +35,7 @@ toc_title: "\u91C7\u7528\u8005"
|
||||
| [Exness](https://www.exness.com) | 交易 | 指标,日志记录 | — | — | [俄语交谈,2019年5月](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
| [精灵](https://geniee.co.jp) | 广告网络 | 主要产品 | — | — | [日文博客,2017年7月](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||
| [虎牙](https://www.huya.com/) | 视频流 | 分析 | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||
| [Idealista](https://www.idealista.com) | 房地产 | 分析 | — | — | [英文博客文章,四月2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| [Idealista](https://www.idealista.com) | 房地产 | 分析 | — | — | [英文博客文章,四月2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| [Infovista](https://www.infovista.com/) | 网络 | 分析 | — | — | [英文幻灯片,十月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
|
||||
| [InnoGames](https://www.innogames.com) | 游戏 | 指标,日志记录 | — | — | [俄文幻灯片,2019年9月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
|
||||
| [Integros](https://integros.com) | 视频服务平台 | 分析 | — | — | [俄文幻灯片,2019年5月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||
|
@ -24,7 +24,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from;
|
||||
在写入与查询时,`IPv4`类型能够识别可读性更加友好的输入输出格式:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -24,7 +24,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from;
|
||||
在写入与查询时,`IPv6`类型能够识别可读性更加友好的输入输出格式:
|
||||
|
||||
``` sql
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1');
|
||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1');
|
||||
|
||||
SELECT * FROM hits;
|
||||
```
|
||||
|
@ -1,15 +1,13 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_folder_title: "SQL\u53C2\u8003"
|
||||
toc_folder_title: SQL参考
|
||||
toc_hidden: true
|
||||
toc_priority: 28
|
||||
toc_title: "\u9690\u85CF"
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
# SQL参考 {#sql-reference}
|
||||
|
||||
ClickHouse支持以下类型的查询:
|
||||
ClickHouse支持以下形式的查询:
|
||||
|
||||
- [SELECT](statements/select/index.md)
|
||||
- [INSERT INTO](statements/insert-into.md)
|
||||
@ -17,4 +15,4 @@ ClickHouse支持以下类型的查询:
|
||||
- [ALTER](statements/alter.md#query_language_queries_alter)
|
||||
- [其他类型的查询](statements/misc.md)
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/sql-reference/) <!--hide-->
|
||||
[原始文档](https://clickhouse.tech/docs/zh/sql-reference/) <!--hide-->
|
||||
|
@ -26,7 +26,7 @@ toc_title: '2017'
|
||||
#### 新功能: {#new-features}
|
||||
|
||||
- MergeTree表引擎系列的自定义分区键。
|
||||
- [卡夫卡](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) 表引擎。
|
||||
- [卡夫卡](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) 表引擎。
|
||||
- 增加了对加载的支持 [CatBoost](https://catboost.yandex/) 模型并将其应用到ClickHouse中存储的数据。
|
||||
- 增加了对UTC非整数偏移的时区的支持。
|
||||
- 增加了对具有时间间隔的算术运算的支持。
|
||||
|
@ -869,7 +869,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
if (listen_try)
|
||||
{
|
||||
LOG_ERROR(log, "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
|
||||
LOG_WARNING(log, "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
|
||||
"specify not disabled IPv4 or IPv6 address to listen in <listen_host> element of configuration "
|
||||
"file. Example for disabled IPv6: <listen_host>0.0.0.0</listen_host> ."
|
||||
" Example for disabled IPv4: <listen_host>::</listen_host>",
|
||||
@ -1013,7 +1013,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
}
|
||||
|
||||
if (servers.empty())
|
||||
throw Exception("No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
throw Exception("No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
|
||||
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
|
||||
global_context->enableNamedSessions();
|
||||
|
||||
|
@ -17,7 +17,7 @@ String RowPolicy::NameParts::getName() const
|
||||
name.reserve(database.length() + table_name.length() + short_name.length() + 6);
|
||||
name += backQuoteIfNeed(short_name);
|
||||
name += " ON ";
|
||||
if (!name.empty())
|
||||
if (!database.empty())
|
||||
{
|
||||
name += backQuoteIfNeed(database);
|
||||
name += '.';
|
||||
|
@ -353,16 +353,17 @@ namespace
|
||||
for (const String & name : names)
|
||||
{
|
||||
SettingsProfileElement profile_element;
|
||||
profile_element.setting_index = Settings::findIndexStrict(name);
|
||||
size_t setting_index = Settings::findIndexStrict(name);
|
||||
profile_element.setting_index = setting_index;
|
||||
Poco::Util::AbstractConfiguration::Keys constraint_types;
|
||||
String path_to_name = path_to_constraints + "." + name;
|
||||
config.keys(path_to_name, constraint_types);
|
||||
for (const String & constraint_type : constraint_types)
|
||||
{
|
||||
if (constraint_type == "min")
|
||||
profile_element.min_value = config.getString(path_to_name + "." + constraint_type);
|
||||
profile_element.min_value = Settings::valueToCorrespondingType(setting_index, config.getString(path_to_name + "." + constraint_type));
|
||||
else if (constraint_type == "max")
|
||||
profile_element.max_value = config.getString(path_to_name + "." + constraint_type);
|
||||
profile_element.max_value = Settings::valueToCorrespondingType(setting_index, config.getString(path_to_name + "." + constraint_type));
|
||||
else if (constraint_type == "readonly")
|
||||
profile_element.readonly = true;
|
||||
else
|
||||
@ -402,8 +403,9 @@ namespace
|
||||
}
|
||||
|
||||
SettingsProfileElement profile_element;
|
||||
profile_element.setting_index = Settings::findIndexStrict(key);
|
||||
profile_element.value = config.getString(profile_config + "." + key);
|
||||
size_t setting_index = Settings::findIndexStrict(key);
|
||||
profile_element.setting_index = setting_index;
|
||||
profile_element.value = Settings::valueToCorrespondingType(setting_index, config.getString(profile_config + "." + key));
|
||||
profile->elements.emplace_back(std::move(profile_element));
|
||||
}
|
||||
|
||||
|
@ -272,12 +272,12 @@ struct ODBCBridgeMixin
|
||||
return AccessType::ODBC;
|
||||
}
|
||||
|
||||
static std::unique_ptr<ShellCommand> startBridge(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log, const Poco::Timespan & http_timeout)
|
||||
static std::unique_ptr<ShellCommand> startBridge(
|
||||
const Poco::Util::AbstractConfiguration & config, Poco::Logger * log, const Poco::Timespan & http_timeout)
|
||||
{
|
||||
/// Path to executable folder
|
||||
Poco::Path path{config.getString("application.dir", "/usr/bin")};
|
||||
|
||||
|
||||
std::vector<std::string> cmd_args;
|
||||
path.setFileName("clickhouse-odbc-bridge");
|
||||
|
||||
|
@ -409,6 +409,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
\
|
||||
M(SettingDateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic' and 'best_effort'.", 0) \
|
||||
\
|
||||
M(SettingBool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \
|
||||
M(SettingBool, input_format_values_interpret_expressions, true, "For Values format: if the field could not be parsed by streaming parser, run SQL parser and try to interpret it as SQL expression.", 0) \
|
||||
M(SettingBool, input_format_values_deduce_templates_of_expressions, true, "For Values format: if the field could not be parsed by streaming parser, run SQL parser, deduce template of the SQL expression, try to parse all rows using template and then interpret expression for all rows.", 0) \
|
||||
M(SettingBool, input_format_values_accurate_types_of_literals, true, "For Values format: when parsing and interpreting expressions using template, check actual type of literal to avoid possible overflow and precision issues.", 0) \
|
||||
|
@ -30,7 +30,7 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
static const std::vector<String> supported_functions{"any", "anyLast", "min", "max", "sum", "groupBitAnd", "groupBitOr", "groupBitXor", "sumMap"};
|
||||
static const std::vector<String> supported_functions{"any", "anyLast", "min", "max", "sum", "groupBitAnd", "groupBitOr", "groupBitXor", "sumMap", "groupArrayArray", "groupUniqArrayArray"};
|
||||
|
||||
|
||||
String DataTypeCustomSimpleAggregateFunction::getName() const
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Parsers/ASTNameTypePair.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
|
@ -196,18 +196,17 @@ struct BloomFilterHash
|
||||
const ColumnString::Chars & data = index_column->getChars();
|
||||
const ColumnString::Offsets & offsets = index_column->getOffsets();
|
||||
|
||||
ColumnString::Offset current_offset = pos;
|
||||
for (size_t index = 0, size = vec.size(); index < size; ++index)
|
||||
{
|
||||
ColumnString::Offset current_offset = offsets[index + pos - 1];
|
||||
size_t length = offsets[index + pos] - current_offset - 1 /* terminating zero */;
|
||||
UInt64 city_hash = CityHash_v1_0_2::CityHash64(
|
||||
reinterpret_cast<const char *>(&data[current_offset]), offsets[index + pos] - current_offset - 1);
|
||||
reinterpret_cast<const char *>(&data[current_offset]), length);
|
||||
|
||||
if constexpr (is_first)
|
||||
vec[index] = city_hash;
|
||||
else
|
||||
vec[index] = CityHash_v1_0_2::Hash128to64(CityHash_v1_0_2::uint128(vec[index], city_hash));
|
||||
|
||||
current_offset = offsets[index + pos];
|
||||
}
|
||||
}
|
||||
else if (const auto * fixed_string_index_column = typeid_cast<const ColumnFixedString *>(column))
|
||||
|
115
src/Interpreters/GroupByFunctionKeysVisitor.h
Normal file
115
src/Interpreters/GroupByFunctionKeysVisitor.h
Normal file
@ -0,0 +1,115 @@
|
||||
#pragma once
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
/// recursive traversal and check for optimizeGroupByFunctionKeys
|
||||
struct KeepFunctionMatcher
|
||||
{
|
||||
struct Data
|
||||
{
|
||||
std::unordered_set<String> & key_names_to_keep;
|
||||
bool & keep_key;
|
||||
};
|
||||
|
||||
using Visitor = InDepthNodeVisitor<KeepFunctionMatcher, true>;
|
||||
|
||||
static bool needChildVisit(const ASTPtr & node, const ASTPtr &)
|
||||
{
|
||||
return !(node->as<ASTFunction>());
|
||||
}
|
||||
|
||||
static void visit(ASTFunction * function_node, Data & data)
|
||||
{
|
||||
if ((function_node->arguments->children).empty())
|
||||
{
|
||||
data.keep_key = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!data.key_names_to_keep.count(function_node->getColumnName()))
|
||||
{
|
||||
Visitor(data).visit(function_node->arguments);
|
||||
}
|
||||
}
|
||||
|
||||
static void visit(ASTIdentifier * ident, Data & data)
|
||||
{
|
||||
if (!data.key_names_to_keep.count(ident->shortName()))
|
||||
{
|
||||
/// if variable of a function is not in GROUP BY keys, this function should not be deleted
|
||||
data.keep_key = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void visit(const ASTPtr & ast, Data & data)
|
||||
{
|
||||
if (data.keep_key)
|
||||
return;
|
||||
|
||||
if (auto * function_node = ast->as<ASTFunction>())
|
||||
{
|
||||
visit(function_node, data);
|
||||
}
|
||||
else if (auto * ident = ast->as<ASTIdentifier>())
|
||||
{
|
||||
visit(ident, data);
|
||||
}
|
||||
else if (!ast->as<ASTExpressionList>())
|
||||
{
|
||||
data.keep_key = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
using KeepFunctionVisitor = InDepthNodeVisitor<KeepFunctionMatcher, true>;
|
||||
|
||||
class GroupByFunctionKeysMatcher
|
||||
{
|
||||
public:
|
||||
struct Data
|
||||
{
|
||||
std::unordered_set<String> & key_names_to_keep;
|
||||
};
|
||||
|
||||
static bool needChildVisit(const ASTPtr & node, const ASTPtr &)
|
||||
{
|
||||
return !(node->as<ASTFunction>());
|
||||
}
|
||||
|
||||
static void visit(ASTFunction * function_node, Data & data)
|
||||
{
|
||||
bool keep_key = false;
|
||||
KeepFunctionVisitor::Data keep_data{data.key_names_to_keep, keep_key};
|
||||
KeepFunctionVisitor(keep_data).visit(function_node->arguments);
|
||||
|
||||
if (!keep_key)
|
||||
(data.key_names_to_keep).erase(function_node->getColumnName());
|
||||
}
|
||||
|
||||
static void visit(const ASTPtr & ast, Data & data)
|
||||
{
|
||||
if (auto * function_node = ast->as<ASTFunction>())
|
||||
{
|
||||
if (!(function_node->arguments->children.empty()))
|
||||
visit(function_node, data);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
using GroupByFunctionKeysVisitor = InDepthNodeVisitor<GroupByFunctionKeysMatcher, true>;
|
||||
|
||||
}
|
@ -974,6 +974,14 @@ void InterpreterSelectQuery::executeImpl(QueryPipeline & pipeline, const BlockIn
|
||||
|
||||
executeWithFill(pipeline);
|
||||
|
||||
/// If we have 'WITH TIES', we need execute limit before projection,
|
||||
/// because in that case columns from 'ORDER BY' are used.
|
||||
if (query.limit_with_ties)
|
||||
{
|
||||
executeLimit(pipeline);
|
||||
has_prelimit = true;
|
||||
}
|
||||
|
||||
/** We must do projection after DISTINCT because projection may remove some columns.
|
||||
*/
|
||||
executeProjection(pipeline, expressions.final_projection);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <Interpreters/ArithmeticOperationsInAgrFuncOptimize.h>
|
||||
#include <Interpreters/DuplicateDistinctVisitor.h>
|
||||
#include <Interpreters/DuplicateOrderByVisitor.h>
|
||||
#include <Interpreters/GroupByFunctionKeysVisitor.h>
|
||||
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
@ -346,6 +347,89 @@ void optimizeGroupBy(ASTSelectQuery * select_query, const NameSet & source_colum
|
||||
appendUnusedGroupByColumn(select_query, source_columns);
|
||||
}
|
||||
|
||||
///eliminate functions of other GROUP BY keys
|
||||
void optimizeGroupByFunctionKeys(ASTSelectQuery * select_query, bool optimize_group_by_function_keys)
|
||||
{
|
||||
if (!optimize_group_by_function_keys)
|
||||
return;
|
||||
|
||||
if (!select_query->groupBy())
|
||||
return;
|
||||
|
||||
auto grp_by = select_query->groupBy();
|
||||
auto & group_keys = grp_by->children;
|
||||
|
||||
ASTs modified; ///result
|
||||
std::unordered_set<String> key_names_to_keep; ///set of keys' short names
|
||||
|
||||
///check if optimization is needed while building set
|
||||
bool need_optimization = false;
|
||||
///filling set with short names of keys
|
||||
for (auto & group_key : group_keys)
|
||||
{
|
||||
if (!need_optimization && group_key->as<ASTFunction>())
|
||||
need_optimization = true;
|
||||
|
||||
if (auto * group_key_ident = group_key->as<ASTIdentifier>())
|
||||
{
|
||||
if (key_names_to_keep.count(group_key_ident->shortName()))
|
||||
{
|
||||
///There may be a collision between different tables having similar variables.
|
||||
///Due to the fact that we can't track these conflicts yet,
|
||||
///it's better to disable optimization to avoid elimination necessary keys.
|
||||
need_optimization = false;
|
||||
break;
|
||||
}
|
||||
|
||||
key_names_to_keep.insert(group_key_ident->shortName());
|
||||
continue;
|
||||
}
|
||||
if (auto * group_key_func = group_key->as<ASTFunction>())
|
||||
{
|
||||
key_names_to_keep.insert(group_key_func->getColumnName());
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
key_names_to_keep.insert(group_key->getColumnName());
|
||||
}
|
||||
}
|
||||
if (!need_optimization)
|
||||
return;
|
||||
|
||||
GroupByFunctionKeysVisitor::Data visitor_data{key_names_to_keep};
|
||||
GroupByFunctionKeysVisitor(visitor_data).visit(grp_by);
|
||||
|
||||
modified.reserve(group_keys.size());
|
||||
|
||||
///filling the result
|
||||
for (auto & group_key : group_keys)
|
||||
{
|
||||
if (auto * group_key_func = group_key->as<ASTFunction>())
|
||||
{
|
||||
if (key_names_to_keep.count(group_key_func->getColumnName()))
|
||||
modified.push_back(group_key);
|
||||
|
||||
continue;
|
||||
}
|
||||
if (auto * group_key_ident = group_key->as<ASTIdentifier>())
|
||||
{
|
||||
if (key_names_to_keep.count(group_key_ident->shortName()))
|
||||
modified.push_back(group_key);
|
||||
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (key_names_to_keep.count(group_key->getColumnName()))
|
||||
modified.push_back(group_key);
|
||||
}
|
||||
}
|
||||
|
||||
///modifying the input
|
||||
grp_by->children = modified;
|
||||
}
|
||||
|
||||
/// Remove duplicate items from ORDER BY.
|
||||
void optimizeOrderBy(const ASTSelectQuery * select_query)
|
||||
{
|
||||
@ -843,6 +927,9 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyzeSelect(
|
||||
/// GROUP BY injective function elimination.
|
||||
optimizeGroupBy(select_query, source_columns_set, context);
|
||||
|
||||
/// GROUP BY functions of other keys elimination.
|
||||
optimizeGroupByFunctionKeys(select_query, settings.optimize_group_by_function_keys);
|
||||
|
||||
/// Remove duplicate items from ORDER BY.
|
||||
optimizeOrderBy(select_query);
|
||||
|
||||
|
@ -146,7 +146,7 @@ void ASTAlterCommand::formatImpl(
|
||||
}
|
||||
else if (type == ASTAlterCommand::ADD_CONSTRAINT)
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "ADD CONSTRAINT" << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "");
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "ADD CONSTRAINT " << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "");
|
||||
constraint_decl->formatImpl(settings, state, frame);
|
||||
}
|
||||
else if (type == ASTAlterCommand::DROP_CONSTRAINT)
|
||||
|
@ -49,9 +49,6 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & settings, FormatSta
|
||||
{
|
||||
frame.need_parens = false;
|
||||
|
||||
if (!settings.one_line)
|
||||
settings.ostr << settings.nl_or_ws << std::string(4 * frame.indent, ' ');
|
||||
|
||||
/// We have to always backquote column names to avoid ambiguouty with INDEX and other declarations in CREATE query.
|
||||
settings.ostr << backQuote(name);
|
||||
|
||||
|
@ -19,10 +19,6 @@ ASTPtr ASTConstraintDeclaration::clone() const
|
||||
|
||||
void ASTConstraintDeclaration::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
frame.need_parens = false;
|
||||
std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' ');
|
||||
|
||||
s.ostr << s.nl_or_ws << indent_str;
|
||||
s.ostr << backQuoteIfNeed(name);
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << " CHECK " << (s.hilite ? hilite_none : "");
|
||||
expr->formatImpl(s, state, frame);
|
||||
|
@ -108,17 +108,9 @@ void ASTColumnsElement::formatImpl(const FormatSettings & s, FormatState & state
|
||||
return;
|
||||
}
|
||||
|
||||
frame.need_parens = false;
|
||||
std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' ');
|
||||
|
||||
s.ostr << s.nl_or_ws << indent_str;
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << prefix << (s.hilite ? hilite_none : "");
|
||||
|
||||
FormatSettings nested_settings = s;
|
||||
nested_settings.one_line = true;
|
||||
nested_settings.nl_or_ws = ' ';
|
||||
|
||||
elem->formatImpl(nested_settings, state, frame);
|
||||
s.ostr << ' ';
|
||||
elem->formatImpl(s, state, frame);
|
||||
}
|
||||
|
||||
|
||||
@ -172,7 +164,12 @@ void ASTColumns::formatImpl(const FormatSettings & s, FormatState & state, Forma
|
||||
}
|
||||
|
||||
if (!list.children.empty())
|
||||
list.formatImpl(s, state, frame);
|
||||
{
|
||||
if (s.one_line)
|
||||
list.formatImpl(s, state, frame);
|
||||
else
|
||||
list.formatImplMultiline(s, state, frame);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -273,11 +270,12 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
|
||||
<< (!as_database.empty() ? backQuoteIfNeed(as_database) + "." : "") << backQuoteIfNeed(as_table);
|
||||
}
|
||||
|
||||
frame.expression_list_always_start_on_new_line = true;
|
||||
|
||||
if (columns_list)
|
||||
{
|
||||
settings.ostr << (settings.one_line ? " (" : "\n(");
|
||||
FormatStateStacked frame_nested = frame;
|
||||
++frame_nested.indent;
|
||||
columns_list->formatImpl(settings, state, frame_nested);
|
||||
settings.ostr << (settings.one_line ? ")" : "\n)");
|
||||
}
|
||||
@ -286,11 +284,15 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
|
||||
{
|
||||
settings.ostr << (settings.one_line ? " (" : "\n(");
|
||||
FormatStateStacked frame_nested = frame;
|
||||
++frame_nested.indent;
|
||||
dictionary_attributes_list->formatImpl(settings, state, frame_nested);
|
||||
if (settings.one_line)
|
||||
dictionary_attributes_list->formatImpl(settings, state, frame_nested);
|
||||
else
|
||||
dictionary_attributes_list->formatImplMultiline(settings, state, frame_nested);
|
||||
settings.ostr << (settings.one_line ? ")" : "\n)");
|
||||
}
|
||||
|
||||
frame.expression_list_always_start_on_new_line = false;
|
||||
|
||||
if (storage)
|
||||
storage->formatImpl(settings, state, frame);
|
||||
|
||||
|
@ -34,9 +34,6 @@ void ASTDictionaryAttributeDeclaration::formatImpl(const FormatSettings & settin
|
||||
{
|
||||
frame.need_parens = false;
|
||||
|
||||
if (!settings.one_line)
|
||||
settings.ostr << settings.nl_or_ws << std::string(4 * frame.indent, ' ');
|
||||
|
||||
settings.ostr << backQuote(name);
|
||||
|
||||
if (type)
|
||||
|
@ -37,14 +37,14 @@ void ASTExpressionList::formatImplMultiline(const FormatSettings & settings, For
|
||||
{
|
||||
if (separator)
|
||||
settings.ostr << separator;
|
||||
settings.ostr << ' ';
|
||||
}
|
||||
|
||||
|
||||
if (children.size() > 1)
|
||||
if (children.size() > 1 || frame.expression_list_always_start_on_new_line)
|
||||
settings.ostr << indent_str;
|
||||
|
||||
(*it)->formatImpl(settings, state, frame);
|
||||
FormatStateStacked frame_nested = frame;
|
||||
frame_nested.expression_list_always_start_on_new_line = false;
|
||||
(*it)->formatImpl(settings, state, frame_nested);
|
||||
}
|
||||
}
|
||||
|
||||
|
35
src/Parsers/ASTIndexDeclaration.cpp
Normal file
35
src/Parsers/ASTIndexDeclaration.cpp
Normal file
@ -0,0 +1,35 @@
|
||||
#include <Parsers/ASTIndexDeclaration.h>
|
||||
#include <Common/quoteString.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
ASTPtr ASTIndexDeclaration::clone() const
|
||||
{
|
||||
auto res = std::make_shared<ASTIndexDeclaration>();
|
||||
|
||||
res->name = name;
|
||||
res->granularity = granularity;
|
||||
|
||||
if (expr)
|
||||
res->set(res->expr, expr->clone());
|
||||
if (type)
|
||||
res->set(res->type, type->clone());
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void ASTIndexDeclaration::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
s.ostr << backQuoteIfNeed(name);
|
||||
s.ostr << " ";
|
||||
expr->formatImpl(s, state, frame);
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << " TYPE " << (s.hilite ? hilite_none : "");
|
||||
type->formatImpl(s, state, frame);
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << " GRANULARITY " << (s.hilite ? hilite_none : "");
|
||||
s.ostr << granularity;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,15 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Field.h>
|
||||
#include <Core/Types.h>
|
||||
#include <Common/FieldVisitors.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/IAST.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -27,34 +20,8 @@ public:
|
||||
/** Get the text that identifies this element. */
|
||||
String getID(char) const override { return "Index"; }
|
||||
|
||||
ASTPtr clone() const override
|
||||
{
|
||||
auto res = std::make_shared<ASTIndexDeclaration>();
|
||||
|
||||
res->name = name;
|
||||
res->granularity = granularity;
|
||||
|
||||
if (expr)
|
||||
res->set(res->expr, expr->clone());
|
||||
if (type)
|
||||
res->set(res->type, type->clone());
|
||||
return res;
|
||||
}
|
||||
|
||||
void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override
|
||||
{
|
||||
frame.need_parens = false;
|
||||
std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' ');
|
||||
|
||||
s.ostr << s.nl_or_ws << indent_str;
|
||||
s.ostr << backQuoteIfNeed(name);
|
||||
s.ostr << " ";
|
||||
expr->formatImpl(s, state, frame);
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << " TYPE " << (s.hilite ? hilite_none : "");
|
||||
type->formatImpl(s, state, frame);
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << " GRANULARITY " << (s.hilite ? hilite_none : "");
|
||||
s.ostr << granularity;
|
||||
}
|
||||
ASTPtr clone() const override;
|
||||
void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override;
|
||||
};
|
||||
|
||||
}
|
||||
|
33
src/Parsers/ASTNameTypePair.cpp
Normal file
33
src/Parsers/ASTNameTypePair.cpp
Normal file
@ -0,0 +1,33 @@
|
||||
#include <Parsers/ASTNameTypePair.h>
|
||||
#include <Common/quoteString.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
ASTPtr ASTNameTypePair::clone() const
|
||||
{
|
||||
auto res = std::make_shared<ASTNameTypePair>(*this);
|
||||
res->children.clear();
|
||||
|
||||
if (type)
|
||||
{
|
||||
res->type = type;
|
||||
res->children.push_back(res->type);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void ASTNameTypePair::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' ');
|
||||
|
||||
settings.ostr << indent_str << backQuoteIfNeed(name) << ' ';
|
||||
type->formatImpl(settings, state, frame);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Common/quoteString.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -19,29 +18,10 @@ public:
|
||||
|
||||
/** Get the text that identifies this element. */
|
||||
String getID(char delim) const override { return "NameTypePair" + (delim + name); }
|
||||
|
||||
ASTPtr clone() const override
|
||||
{
|
||||
auto res = std::make_shared<ASTNameTypePair>(*this);
|
||||
res->children.clear();
|
||||
|
||||
if (type)
|
||||
{
|
||||
res->type = type;
|
||||
res->children.push_back(res->type);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
ASTPtr clone() const override;
|
||||
|
||||
protected:
|
||||
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override
|
||||
{
|
||||
std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' ');
|
||||
|
||||
settings.ostr << settings.nl_or_ws << indent_str << backQuoteIfNeed(name) << " ";
|
||||
type->formatImpl(settings, state, frame);
|
||||
}
|
||||
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
};
|
||||
|
||||
|
||||
|
@ -202,6 +202,7 @@ public:
|
||||
{
|
||||
UInt8 indent = 0;
|
||||
bool need_parens = false;
|
||||
bool expression_list_always_start_on_new_line = false; /// Line feed and indent before expression list even if it's of single element.
|
||||
const IAST * current_select = nullptr;
|
||||
};
|
||||
|
||||
|
@ -796,7 +796,6 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E
|
||||
ParserDictionaryAttributeDeclarationList attributes_p;
|
||||
ParserDictionary dictionary_p;
|
||||
|
||||
|
||||
bool if_not_exists = false;
|
||||
|
||||
ASTPtr database;
|
||||
|
@ -23,14 +23,14 @@ namespace ErrorCodes
|
||||
|
||||
namespace
|
||||
{
|
||||
bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, String & new_name, std::optional<String> & new_host_pattern)
|
||||
bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, String & new_name)
|
||||
{
|
||||
return IParserBase::wrapParseImpl(pos, [&]
|
||||
{
|
||||
if (!ParserKeyword{"RENAME TO"}.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
return parseUserName(pos, expected, new_name, new_host_pattern);
|
||||
return parseUserName(pos, expected, new_name);
|
||||
});
|
||||
}
|
||||
|
||||
@ -274,7 +274,6 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
return false;
|
||||
|
||||
String new_name;
|
||||
std::optional<String> new_host_pattern;
|
||||
std::optional<Authentication> authentication;
|
||||
std::optional<AllowedClientHosts> hosts;
|
||||
std::optional<AllowedClientHosts> add_hosts;
|
||||
@ -302,7 +301,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
|
||||
if (alter)
|
||||
{
|
||||
if (new_name.empty() && parseRenameTo(pos, expected, new_name, new_host_pattern))
|
||||
if (new_name.empty() && parseRenameTo(pos, expected, new_name))
|
||||
continue;
|
||||
|
||||
if (parseHosts(pos, expected, "ADD", add_hosts) || parseHosts(pos, expected, "DROP", remove_hosts))
|
||||
@ -312,13 +311,8 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
break;
|
||||
}
|
||||
|
||||
if (!hosts)
|
||||
{
|
||||
if (!alter && host_pattern)
|
||||
hosts.emplace().addLikePattern(*host_pattern);
|
||||
else if (alter && new_host_pattern)
|
||||
hosts.emplace().addLikePattern(*new_host_pattern);
|
||||
}
|
||||
if (!alter && !hosts && host_pattern)
|
||||
hosts.emplace().addLikePattern(*host_pattern);
|
||||
|
||||
auto query = std::make_shared<ASTCreateUserQuery>();
|
||||
node = query;
|
||||
|
@ -87,7 +87,7 @@ namespace
|
||||
readonly = true;
|
||||
return true;
|
||||
}
|
||||
else if (ParserKeyword{"READONLY"}.ignore(pos, expected))
|
||||
else if (ParserKeyword{"WRITABLE"}.ignore(pos, expected))
|
||||
{
|
||||
readonly = false;
|
||||
return true;
|
||||
|
@ -26,9 +26,11 @@ SRCS(
|
||||
ASTFunctionWithKeyValueArguments.cpp
|
||||
ASTGrantQuery.cpp
|
||||
ASTIdentifier.cpp
|
||||
ASTIndexDeclaration.cpp
|
||||
ASTInsertQuery.cpp
|
||||
ASTKillQueryQuery.cpp
|
||||
ASTLiteral.cpp
|
||||
ASTNameTypePair.cpp
|
||||
ASTOptimizeQuery.cpp
|
||||
ASTOrderByElement.cpp
|
||||
ASTPartition.cpp
|
||||
|
@ -1,6 +1,5 @@
|
||||
#include "HTTPHandlerFactory.h"
|
||||
|
||||
#include <re2/stringpiece.h>
|
||||
#include <Poco/Util/LayeredConfiguration.h>
|
||||
|
||||
#include "HTTPHandler.h"
|
||||
@ -21,6 +20,9 @@ namespace ErrorCodes
|
||||
extern const int INVALID_CONFIG_PARAMETER;
|
||||
}
|
||||
|
||||
static void addCommonDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server);
|
||||
static void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics);
|
||||
|
||||
HTTPRequestHandlerFactoryMain::HTTPRequestHandlerFactoryMain(const std::string & name_)
|
||||
: log(&Poco::Logger::get(name_)), name(name_)
|
||||
{
|
||||
@ -65,7 +67,8 @@ HTTPRequestHandlerFactoryMain::TThis * HTTPRequestHandlerFactoryMain::addHandler
|
||||
return this;
|
||||
}
|
||||
|
||||
static inline auto createHandlersFactoryFromConfig(IServer & server, const std::string & name, const String & prefix)
|
||||
static inline auto createHandlersFactoryFromConfig(
|
||||
IServer & server, const std::string & name, const String & prefix, AsynchronousMetrics & async_metrics)
|
||||
{
|
||||
auto main_handler_factory = std::make_unique<HTTPRequestHandlerFactoryMain>(name);
|
||||
|
||||
@ -74,66 +77,46 @@ static inline auto createHandlersFactoryFromConfig(IServer & server, const std::
|
||||
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
if (!startsWith(key, "rule"))
|
||||
throw Exception("Unknown element in config: " + prefix + "." + key + ", must be 'rule'", ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
if (key == "defaults")
|
||||
addDefaultHandlersFactory(*main_handler_factory, server, async_metrics);
|
||||
else if (startsWith(key, "rule"))
|
||||
{
|
||||
const auto & handler_type = server.config().getString(prefix + "." + key + ".handler.type", "");
|
||||
|
||||
const auto & handler_type = server.config().getString(prefix + "." + key + ".handler.type", "");
|
||||
if (handler_type.empty())
|
||||
throw Exception("Handler type in config is not specified here: " + prefix + "." + key + ".handler.type",
|
||||
ErrorCodes::INVALID_CONFIG_PARAMETER);
|
||||
|
||||
if (handler_type == "static")
|
||||
main_handler_factory->addHandler(createStaticHandlerFactory(server, prefix + "." + key));
|
||||
else if (handler_type == "dynamic_query_handler")
|
||||
main_handler_factory->addHandler(createDynamicHandlerFactory(server, prefix + "." + key));
|
||||
else if (handler_type == "predefined_query_handler")
|
||||
main_handler_factory->addHandler(createPredefinedHandlerFactory(server, prefix + "." + key));
|
||||
else if (handler_type.empty())
|
||||
throw Exception("Handler type in config is not specified here: " +
|
||||
prefix + "." + key + ".handler.type", ErrorCodes::INVALID_CONFIG_PARAMETER);
|
||||
if (handler_type == "static")
|
||||
main_handler_factory->addHandler(createStaticHandlerFactory(server, prefix + "." + key));
|
||||
else if (handler_type == "dynamic_query_handler")
|
||||
main_handler_factory->addHandler(createDynamicHandlerFactory(server, prefix + "." + key));
|
||||
else if (handler_type == "predefined_query_handler")
|
||||
main_handler_factory->addHandler(createPredefinedHandlerFactory(server, prefix + "." + key));
|
||||
else if (handler_type == "prometheus")
|
||||
main_handler_factory->addHandler(createPrometheusHandlerFactory(server, async_metrics, prefix + "." + key));
|
||||
else if (handler_type == "replicas_status")
|
||||
main_handler_factory->addHandler(createReplicasStatusHandlerFactory(server, prefix + "." + key));
|
||||
else
|
||||
throw Exception("Unknown handler type '" + handler_type + "' in config here: " + prefix + "." + key + ".handler.type",
|
||||
ErrorCodes::INVALID_CONFIG_PARAMETER);
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown handler type '" + handler_type +"' in config here: " +
|
||||
prefix + "." + key + ".handler.type",ErrorCodes::INVALID_CONFIG_PARAMETER);
|
||||
throw Exception("Unknown element in config: " + prefix + "." + key + ", must be 'rule' or 'defaults'",
|
||||
ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
}
|
||||
|
||||
return main_handler_factory.release();
|
||||
}
|
||||
|
||||
static const auto ping_response_expression = "Ok.\n";
|
||||
static const auto root_response_expression = "config://http_server_default_response";
|
||||
|
||||
static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory(
|
||||
IServer & server, const std::string & name, AsynchronousMetrics & async_metrics)
|
||||
static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory(IServer & server, const std::string & name, AsynchronousMetrics & async_metrics)
|
||||
{
|
||||
if (server.config().has("http_handlers"))
|
||||
return createHandlersFactoryFromConfig(server, name, "http_handlers");
|
||||
return createHandlersFactoryFromConfig(server, name, "http_handlers", async_metrics);
|
||||
else
|
||||
{
|
||||
auto factory = std::make_unique<HTTPRequestHandlerFactoryMain>(name);
|
||||
|
||||
auto root_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(server, root_response_expression);
|
||||
root_handler->attachStrictPath("/")->allowGetAndHeadRequest();
|
||||
factory->addHandler(root_handler.release());
|
||||
|
||||
auto ping_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(server, ping_response_expression);
|
||||
ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest();
|
||||
factory->addHandler(ping_handler.release());
|
||||
|
||||
auto replicas_status_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<ReplicasStatusHandler>>(server);
|
||||
replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest();
|
||||
factory->addHandler(replicas_status_handler.release());
|
||||
|
||||
auto query_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<DynamicQueryHandler>>(server, "query");
|
||||
query_handler->allowPostAndGetParamsRequest();
|
||||
factory->addHandler(query_handler.release());
|
||||
|
||||
/// We check that prometheus handler will be served on current (default) port.
|
||||
/// Otherwise it will be created separately, see below.
|
||||
if (server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0)
|
||||
{
|
||||
auto prometheus_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<PrometheusRequestHandler>>(
|
||||
server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics));
|
||||
prometheus_handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest();
|
||||
factory->addHandler(prometheus_handler.release());
|
||||
}
|
||||
|
||||
addDefaultHandlersFactory(*factory, server, async_metrics);
|
||||
return factory.release();
|
||||
}
|
||||
}
|
||||
@ -141,18 +124,7 @@ static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory(
|
||||
static inline Poco::Net::HTTPRequestHandlerFactory * createInterserverHTTPHandlerFactory(IServer & server, const std::string & name)
|
||||
{
|
||||
auto factory = std::make_unique<HTTPRequestHandlerFactoryMain>(name);
|
||||
|
||||
auto root_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(server, root_response_expression);
|
||||
root_handler->attachStrictPath("/")->allowGetAndHeadRequest();
|
||||
factory->addHandler(root_handler.release());
|
||||
|
||||
auto ping_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(server, ping_response_expression);
|
||||
ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest();
|
||||
factory->addHandler(ping_handler.release());
|
||||
|
||||
auto replicas_status_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<ReplicasStatusHandler>>(server);
|
||||
replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest();
|
||||
factory->addHandler(replicas_status_handler.release());
|
||||
addCommonDefaultHandlersFactory(*factory, server);
|
||||
|
||||
auto main_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<InterserverIOHTTPHandler>>(server);
|
||||
main_handler->allowPostAndGetParamsRequest();
|
||||
@ -180,4 +152,41 @@ Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, As
|
||||
throw Exception("LOGICAL ERROR: Unknown HTTP handler factory name.", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
static const auto ping_response_expression = "Ok.\n";
|
||||
static const auto root_response_expression = "config://http_server_default_response";
|
||||
|
||||
void addCommonDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server)
|
||||
{
|
||||
auto root_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(server, root_response_expression);
|
||||
root_handler->attachStrictPath("/")->allowGetAndHeadRequest();
|
||||
factory.addHandler(root_handler.release());
|
||||
|
||||
auto ping_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(server, ping_response_expression);
|
||||
ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest();
|
||||
factory.addHandler(ping_handler.release());
|
||||
|
||||
auto replicas_status_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<ReplicasStatusHandler>>(server);
|
||||
replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest();
|
||||
factory.addHandler(replicas_status_handler.release());
|
||||
}
|
||||
|
||||
void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics)
|
||||
{
|
||||
addCommonDefaultHandlersFactory(factory, server);
|
||||
|
||||
auto query_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<DynamicQueryHandler>>(server, "query");
|
||||
query_handler->allowPostAndGetParamsRequest();
|
||||
factory.addHandler(query_handler.release());
|
||||
|
||||
/// We check that prometheus handler will be served on current (default) port.
|
||||
/// Otherwise it will be created separately, see createHandlerFactory(...).
|
||||
if (server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0)
|
||||
{
|
||||
auto prometheus_handler = std::make_unique<HandlingRuleHTTPHandlerFactory<PrometheusRequestHandler>>(
|
||||
server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics));
|
||||
prometheus_handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest();
|
||||
factory.addHandler(prometheus_handler.release());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -109,6 +109,10 @@ Poco::Net::HTTPRequestHandlerFactory * createDynamicHandlerFactory(IServer & ser
|
||||
|
||||
Poco::Net::HTTPRequestHandlerFactory * createPredefinedHandlerFactory(IServer & server, const std::string & config_prefix);
|
||||
|
||||
Poco::Net::HTTPRequestHandlerFactory * createReplicasStatusHandlerFactory(IServer & server, const std::string & config_prefix);
|
||||
|
||||
Poco::Net::HTTPRequestHandlerFactory * createPrometheusHandlerFactory(IServer & server, AsynchronousMetrics & async_metrics, const std::string & config_prefix);
|
||||
|
||||
Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, AsynchronousMetrics & async_metrics, const std::string & name);
|
||||
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <Common/CurrentMetrics.h>
|
||||
|
||||
#include <IO/WriteBufferFromHTTPServerResponse.h>
|
||||
#include <Server/HTTPHandlerRequestFilter.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -40,4 +41,10 @@ void PrometheusRequestHandler::handleRequest(
|
||||
}
|
||||
}
|
||||
|
||||
Poco::Net::HTTPRequestHandlerFactory * createPrometheusHandlerFactory(IServer & server, AsynchronousMetrics & async_metrics, const std::string & config_prefix)
|
||||
{
|
||||
return addFiltersFromConfig(new HandlingRuleHTTPHandlerFactory<PrometheusRequestHandler>(
|
||||
server, PrometheusMetricsWriter(server.config(), config_prefix + ".handler", async_metrics)), server.config(), config_prefix);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -7,8 +7,11 @@
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <IO/HTTPCommon.h>
|
||||
|
||||
#include <Poco/Net/HTTPRequestHandlerFactory.h>
|
||||
#include <Poco/Net/HTTPServerRequest.h>
|
||||
#include <Poco/Net/HTTPServerResponse.h>
|
||||
#include <Server/HTTPHandlerFactory.h>
|
||||
#include <Server/HTTPHandlerRequestFilter.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -104,5 +107,9 @@ void ReplicasStatusHandler::handleRequest(Poco::Net::HTTPServerRequest & request
|
||||
}
|
||||
}
|
||||
|
||||
Poco::Net::HTTPRequestHandlerFactory * createReplicasStatusHandlerFactory(IServer & server, const std::string & config_prefix)
|
||||
{
|
||||
return addFiltersFromConfig(new HandlingRuleHTTPHandlerFactory<ReplicasStatusHandler>(server), server.config(), config_prefix);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -63,8 +63,10 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo
|
||||
|
||||
static std::atomic_uint total_sends {0};
|
||||
|
||||
if ((data_settings->replicated_max_parallel_sends && total_sends >= data_settings->replicated_max_parallel_sends)
|
||||
|| (data_settings->replicated_max_parallel_sends_for_table && data.current_table_sends >= data_settings->replicated_max_parallel_sends_for_table))
|
||||
if ((data_settings->replicated_max_parallel_sends
|
||||
&& total_sends >= data_settings->replicated_max_parallel_sends)
|
||||
|| (data_settings->replicated_max_parallel_sends_for_table
|
||||
&& data.current_table_sends >= data_settings->replicated_max_parallel_sends_for_table))
|
||||
{
|
||||
response.setStatus(std::to_string(HTTP_TOO_MANY_REQUESTS));
|
||||
response.setReason("Too many concurrent fetches, try again later");
|
||||
@ -182,6 +184,9 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
|
||||
bool to_detached,
|
||||
const String & tmp_prefix_)
|
||||
{
|
||||
if (blocker.isCancelled())
|
||||
throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED);
|
||||
|
||||
/// Validation of the input that may come from malicious replica.
|
||||
MergeTreePartInfo::fromPartName(part_name, data.format_version);
|
||||
const auto data_settings = data.getSettings();
|
||||
@ -294,7 +299,8 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPart(
|
||||
|
||||
if (blocker.isCancelled())
|
||||
{
|
||||
/// NOTE The is_cancelled flag also makes sense to check every time you read over the network, performing a poll with a not very large timeout.
|
||||
/// NOTE The is_cancelled flag also makes sense to check every time you read over the network,
|
||||
/// performing a poll with a not very large timeout.
|
||||
/// And now we check it only between read chunks (in the `copyData` function).
|
||||
disk->removeRecursive(part_download_path);
|
||||
throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED);
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <Interpreters/PartLog.h>
|
||||
#include <Disks/StoragePolicy.h>
|
||||
#include <Interpreters/Aggregator.h>
|
||||
#include <Storages/extractKeyExpressionList.h>
|
||||
|
||||
#include <boost/multi_index_container.hpp>
|
||||
#include <boost/multi_index/ordered_index.hpp>
|
||||
@ -511,6 +512,13 @@ public:
|
||||
broken_part_callback(name);
|
||||
}
|
||||
|
||||
/// TODO (alesap) Duplicate method required for compatibility.
|
||||
/// Must be removed.
|
||||
static ASTPtr extractKeyExpressionList(const ASTPtr & node)
|
||||
{
|
||||
return DB::extractKeyExpressionList(node);
|
||||
}
|
||||
|
||||
/// Check that the part is not broken and calculate the checksums for it if they are not present.
|
||||
MutableDataPartPtr loadPartAndFixMetadata(const VolumePtr & volume, const String & relative_path) const;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user