Merge branch 'master' of https://github.com/ClickHouse/ClickHouse into master

This commit is contained in:
George 2020-12-30 09:33:23 +03:00
commit 5111aa3e93
492 changed files with 1983 additions and 651 deletions

View File

@ -102,11 +102,11 @@ else
echo "No failed tests" echo "No failed tests"
fi fi
mkdir -p $COVERAGE_DIR mkdir -p "$COVERAGE_DIR"
mv /*.profraw $COVERAGE_DIR mv /*.profraw "$COVERAGE_DIR"
mkdir -p $SOURCE_DIR/obj-x86_64-linux-gnu mkdir -p "$SOURCE_DIR"/obj-x86_64-linux-gnu
cd $SOURCE_DIR/obj-x86_64-linux-gnu && CC=clang-11 CXX=clang++-11 cmake .. && cd / cd "$SOURCE_DIR"/obj-x86_64-linux-gnu && CC=clang-11 CXX=clang++-11 cmake .. && cd /
llvm-profdata-11 merge -sparse ${COVERAGE_DIR}/* -o clickhouse.profdata llvm-profdata-11 merge -sparse "${COVERAGE_DIR}"/* -o clickhouse.profdata
llvm-cov-11 export /usr/bin/clickhouse -instr-profile=clickhouse.profdata -j=16 -format=lcov -skip-functions -ignore-filename-regex $IGNORE > output.lcov llvm-cov-11 export /usr/bin/clickhouse -instr-profile=clickhouse.profdata -j=16 -format=lcov -skip-functions -ignore-filename-regex "$IGNORE" > output.lcov
genhtml output.lcov --ignore-errors source --output-directory ${OUTPUT_DIR} genhtml output.lcov --ignore-errors source --output-directory "${OUTPUT_DIR}"

View File

@ -65,7 +65,7 @@ function start_server
{ {
set -m # Spawn server in its own process groups set -m # Spawn server in its own process groups
local opts=( local opts=(
--config-file="$FASTTEST_DATA/config.xml" --config-file "$FASTTEST_DATA/config.xml"
-- --
--path "$FASTTEST_DATA" --path "$FASTTEST_DATA"
--user_files_path "$FASTTEST_DATA/user_files" --user_files_path "$FASTTEST_DATA/user_files"

View File

@ -55,9 +55,9 @@ function run_tests()
ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip') ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip')
fi fi
for i in $(seq 1 $NUM_TRIES); do for _ in $(seq 1 "$NUM_TRIES"); do
clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
if [ ${PIPESTATUS[0]} -ne "0" ]; then if [ "${PIPESTATUS[0]}" -ne "0" ]; then
break; break;
fi fi
done done
@ -65,4 +65,4 @@ function run_tests()
export -f run_tests export -f run_tests
timeout $MAX_RUN_TIME bash -c run_tests ||: timeout "$MAX_RUN_TIME" bash -c run_tests ||:

View File

@ -8,4 +8,5 @@ CMD cd /ClickHouse/utils/check-style && \
./check-style -n | tee /test_output/style_output.txt && \ ./check-style -n | tee /test_output/style_output.txt && \
./check-typos | tee /test_output/typos_output.txt && \ ./check-typos | tee /test_output/typos_output.txt && \
./check-whitespaces -n | tee /test_output/whitespaces_output.txt && \ ./check-whitespaces -n | tee /test_output/whitespaces_output.txt && \
./check-duplicate-includes.sh | tee /test_output/duplicate_output.txt ./check-duplicate-includes.sh | tee /test_output/duplicate_output.txt && \
./shellcheck-run.sh | tee /test_output/shellcheck_output.txt

View File

@ -2470,6 +2470,17 @@ Possible values:
Default value: `0`. Default value: `0`.
## data_type_default_nullable {#data_type_default_nullable}
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable).
Possible values:
- 1 — The data types in column definitions are set to `Nullable` by default.
- 0 — The data types in column definitions are set to not `Nullable` by default.
Default value: `0`.
## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold} ## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold}
Enables special logic to perform merges on replicas. Enables special logic to perform merges on replicas.

View File

@ -558,4 +558,46 @@ Result:
└─────┘ └─────┘
``` ```
## encodeXMLComponent {#encode-xml-component}
Escapes characters to place string into XML text node or attribute.
The following five XML predefined entities will be replaced: `<`, `&`, `>`, `"`, `'`.
**Syntax**
``` sql
encodeXMLComponent(x)
```
**Parameters**
- `x` — The sequence of characters. [String](../../sql-reference/data-types/string.md).
**Returned value(s)**
- The sequence of characters with escape characters.
Type: [String](../../sql-reference/data-types/string.md).
**Example**
Query:
``` sql
SELECT encodeXMLComponent('Hello, "world"!');
SELECT encodeXMLComponent('<123>');
SELECT encodeXMLComponent('&clickhouse');
SELECT encodeXMLComponent('\'foo\'');
```
Result:
``` text
Hello, &quot;world&quot;!
&lt;123&gt;
&amp;clickhouse
&apos;foo&apos;
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) <!--hide-->

View File

@ -400,7 +400,8 @@ Result:
└──────────────────────────────────────────────────────────────────────────────────────────┘ └──────────────────────────────────────────────────────────────────────────────────────────┘
``` ```
**See also** **See Also**
- [extractAllGroupsVertical](#extractallgroups-vertical) - [extractAllGroupsVertical](#extractallgroups-vertical)
## extractAllGroupsVertical {#extractallgroups-vertical} ## extractAllGroupsVertical {#extractallgroups-vertical}
@ -440,7 +441,8 @@ Result:
└────────────────────────────────────────────────────────────────────────────────────────┘ └────────────────────────────────────────────────────────────────────────────────────────┘
``` ```
**See also** **See Also**
- [extractAllGroupsHorizontal](#extractallgroups-horizontal) - [extractAllGroupsHorizontal](#extractallgroups-horizontal)
## like(haystack, pattern), haystack LIKE pattern operator {#function-like} ## like(haystack, pattern), haystack LIKE pattern operator {#function-like}
@ -590,8 +592,55 @@ Result:
└───────────────────────────────┘ └───────────────────────────────┘
``` ```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) <!--hide-->
## countMatches(haystack, pattern) {#countmatcheshaystack-pattern} ## countMatches(haystack, pattern) {#countmatcheshaystack-pattern}
Returns the number of regular expression matches for a `pattern` in a `haystack`. Returns the number of regular expression matches for a `pattern` in a `haystack`.
**Syntax**
``` sql
countMatches(haystack, pattern)
```
**Parameters**
- `haystack` — The string to search in. [String](../../sql-reference/syntax.md#syntax-string-literal).
- `pattern` — The regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). [String](../../sql-reference/data-types/string.md).
**Returned value**
- The number of matches.
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
**Examples**
Query:
``` sql
SELECT countMatches('foobar.com', 'o+');
```
Result:
``` text
┌─countMatches('foobar.com', 'o+')─┐
│ 2 │
└──────────────────────────────────┘
```
Query:
``` sql
SELECT countMatches('aaaa', 'aa');
```
Result:
``` text
┌─countMatches('aaaa', 'aa')────┐
│ 2 │
└───────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) <!--hide-->

View File

@ -16,8 +16,8 @@ By default, tables are created only on the current server. Distributed DDL queri
``` sql ``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
( (
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], name1 [type1] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2], name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2],
... ...
) ENGINE = engine ) ENGINE = engine
``` ```
@ -57,6 +57,14 @@ In all cases, if `IF NOT EXISTS` is specified, the query wont return an error
There can be other clauses after the `ENGINE` clause in the query. See detailed documentation on how to create tables in the descriptions of [table engines](../../../engines/table-engines/index.md#table_engines). There can be other clauses after the `ENGINE` clause in the query. See detailed documentation on how to create tables in the descriptions of [table engines](../../../engines/table-engines/index.md#table_engines).
## NULL Or NOT NULL Modifiers {#null-modifiers}
`NULL` and `NOT NULL` modifiers after data type in column definition allow or do not allow it to be [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable).
If the type is not `Nullable` and if `NULL` is specified, it will be treated as `Nullable`; if `NOT NULL` is specified, then no. For example, `INT NULL` is the same as `Nullable(INT)`. If the type is `Nullable` and `NULL` or `NOT NULL` modifiers are specified, the exception will be thrown.
See also [data_type_default_nullable](../../../operations/settings/settings.md#data_type_default_nullable) setting.
## Default Values {#create-default-values} ## Default Values {#create-default-values}
The column description can specify an expression for a default value, in one of the following ways: `DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`. The column description can specify an expression for a default value, in one of the following ways: `DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`.

View File

@ -116,12 +116,14 @@ FROM dt
## See Also {#see-also} ## See Also {#see-also}
- [Функции преобразования типов](../../sql-reference/data-types/datetime.md) - [Функции преобразования типов](../../sql-reference/functions/type-conversion-functions.md)
- [Функции для работы с датой и временем](../../sql-reference/data-types/datetime.md) - [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md)
- [Функции для работы с массивами](../../sql-reference/data-types/datetime.md) - [Функции для работы с массивами](../../sql-reference/functions/array-functions.md)
- [Настройка `date_time_input_format`](../../operations/settings/settings.md#settings-date_time_input_format) - [Настройка `date_time_input_format`](../../operations/settings/settings/#settings-date_time_input_format)
- [Конфигурационный параметр сервера `timezone`](../../sql-reference/data-types/datetime.md#server_configuration_parameters-timezone) - [Настройка `date_time_output_format`](../../operations/settings/settings/)
- [Операторы для работы с датой и временем](../../sql-reference/data-types/datetime.md#operators-datetime) - [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime)
- [Тип данных `Date`](date.md) - [Тип данных `Date`](date.md)
- [Тип данных `DateTime64`](datetime64.md)
[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/datetime/) <!--hide--> [Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/datetime/) <!--hide-->

View File

@ -92,11 +92,12 @@ FROM dt
## See Also {#see-also} ## See Also {#see-also}
- [Функции преобразования типов](../../sql-reference/data-types/datetime64.md) - [Функции преобразования типов](../../sql-reference/functions/type-conversion-functions.md)
- [Функции для работы с датой и временем](../../sql-reference/data-types/datetime64.md) - [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md)
- [Функции для работы с массивами](../../sql-reference/data-types/datetime64.md) - [Функции для работы с массивами](../../sql-reference/functions/array-functions.md)
- [Настройка `date_time_input_format`](../../operations/settings/settings.md#settings-date_time_input_format) - [Настройка `date_time_input_format`](../../operations/settings/settings.md#settings-date_time_input_format)
- [Конфигурационный параметр сервера `timezone`](../../sql-reference/data-types/datetime64.md#server_configuration_parameters-timezone) - [Настройка `date_time_output_format`](../../operations/settings/settings.md)
- [Операторы для работы с датой и временем](../../sql-reference/data-types/datetime64.md#operators-datetime) - [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime)
- [Тип данных `Date`](date.md) - [Тип данных `Date`](date.md)
- [Тип данных `DateTime`](datetime.md) - [Тип данных `DateTime`](datetime.md)

View File

@ -521,5 +521,56 @@ SELECT * FROM Months WHERE ilike(name, '%j%')
!!! note "Примечание" !!! note "Примечание"
Для случая UTF-8 мы используем триграммное расстояние. Вычисление n-граммного расстояния не совсем честное. Мы используем 2-х байтные хэши для хэширования n-грамм, а затем вычисляем (не)симметрическую разность между хэш таблицами могут возникнуть коллизии. В формате UTF-8 без учета регистра мы не используем честную функцию `tolower` мы обнуляем 5-й бит (нумерация с нуля) каждого байта кодовой точки, а также первый бит нулевого байта, если байтов больше 1 это работает для латиницы и почти для всех кириллических букв. Для случая UTF-8 мы используем триграммное расстояние. Вычисление n-граммного расстояния не совсем честное. Мы используем 2-х байтные хэши для хэширования n-грамм, а затем вычисляем (не)симметрическую разность между хэш таблицами могут возникнуть коллизии. В формате UTF-8 без учета регистра мы не используем честную функцию `tolower` мы обнуляем 5-й бит (нумерация с нуля) каждого байта кодовой точки, а также первый бит нулевого байта, если байтов больше 1 это работает для латиницы и почти для всех кириллических букв.
## countMatches(haystack, pattern) {#countmatcheshaystack-pattern}
Возвращает количество совпадений, найденных в строке `haystack`, для регулярного выражения `pattern`.
**Синтаксис**
``` sql
countMatches(haystack, pattern)
```
**Параметры**
- `haystack` — строка, по которой выполняется поиск. [String](../../sql-reference/syntax.md#syntax-string-literal).
- `pattern` — регулярное выражение, построенное по синтаксическим правилам [re2](https://github.com/google/re2/wiki/Syntax). [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
- Количество совпадений.
Тип: [UInt64](../../sql-reference/data-types/int-uint.md).
**Примеры**
Запрос:
``` sql
SELECT countMatches('foobar.com', 'o+');
```
Результат:
``` text
┌─countMatches('foobar.com', 'o+')─┐
│ 2 │
└──────────────────────────────────┘
```
Запрос:
``` sql
SELECT countMatches('aaaa', 'aa');
```
Результат:
``` text
┌─countMatches('aaaa', 'aa')────┐
│ 2 │
└───────────────────────────────┘
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/string_search_functions/) <!--hide--> [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/string_search_functions/) <!--hide-->

File diff suppressed because it is too large Load Diff

View File

@ -1,51 +1,66 @@
# 第三方开发的库 {#di-san-fang-kai-fa-de-ku} ---
toc_priority: 26
toc_title: 客户端开发库
---
!!! warning "放弃" # 第三方开发库 {#client-libraries-from-third-party-developers}
Yandex不维护下面列出的库也没有进行任何广泛的测试以确保其质量。
!!! warning "声明"
Yandex**没有**维护下面列出的库,也没有做过任何广泛的测试来确保它们的质量。
- Python - Python
- [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm) - [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)
- [ツ环板driverョツ嘉ッツ偲](https://github.com/mymarilyn/clickhouse-driver) - [clickhouse-driver](https://github.com/mymarilyn/clickhouse-driver)
- [ツ环板clientョツ嘉ッツ偲](https://github.com/yurial/clickhouse-client) - [clickhouse-client](https://github.com/yurial/clickhouse-client)
- [aiochclient](https://github.com/maximdanilchenko/aiochclient)
- PHP - PHP
- [smi2/phpclickhouse](https://packagist.org/packages/smi2/phpClickHouse) - [smi2/phpclickhouse](https://packagist.org/packages/smi2/phpClickHouse)
- [8bitov/clickhouse-php客户端](https://packagist.org/packages/8bitov/clickhouse-php-client) - [8bitov/clickhouse-php-client](https://packagist.org/packages/8bitov/clickhouse-php-client)
- [ツ暗ェツ氾环催ツ団ツ法ツ人](https://packagist.org/packages/bozerkins/clickhouse-client) - [bozerkins/clickhouse-client](https://packagist.org/packages/bozerkins/clickhouse-client)
- [ツ环板clientョツ嘉ッツ偲](https://packagist.org/packages/simpod/clickhouse-client) - [simpod/clickhouse-client](https://packagist.org/packages/simpod/clickhouse-client)
- [seva-code/php-click-house-client](https://packagist.org/packages/seva-code/php-click-house-client) - [seva-code/php-click-house-client](https://packagist.org/packages/seva-code/php-click-house-client)
- [ツ环板clientョツ嘉ッツ偲](https://github.com/SeasX/SeasClick) - [SeasClick C++ client](https://github.com/SeasX/SeasClick)
- 走吧 - [one-ck](https://github.com/lizhichao/one-ck)
- [glushkovds/phpclickhouse-laravel](https://packagist.org/packages/glushkovds/phpclickhouse-laravel)
- Go
- [clickhouse](https://github.com/kshvakov/clickhouse/) - [clickhouse](https://github.com/kshvakov/clickhouse/)
- [ツ环板-ョツ嘉ッツ偲](https://github.com/roistat/go-clickhouse) - [go-clickhouse](https://github.com/roistat/go-clickhouse)
- [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/mailru/go-clickhouse) - [mailrugo-clickhouse](https://github.com/mailru/go-clickhouse)
- [golang-clickhouse](https://github.com/leprosus/golang-clickhouse) - [golang-clickhouse](https://github.com/leprosus/golang-clickhouse)
- Swift
- [ClickHouseNIO](https://github.com/patrick-zippenfenig/ClickHouseNIO)
- [ClickHouseVapor ORM](https://github.com/patrick-zippenfenig/ClickHouseVapor)
- NodeJs - NodeJs
- [ツ暗ェツ氾环催ツ団ツ法ツ人)](https://github.com/TimonKK/clickhouse) - [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse)
- [ツ环板-ョツ嘉ッツ偲](https://github.com/apla/node-clickhouse) - [node-clickhouse](https://github.com/apla/node-clickhouse)
- Perl - Perl
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse) - [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse) - [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
- [ツ暗ェツ氾环催ツ団ツ法ツ人](https://metacpan.org/release/AnyEvent-ClickHouse) - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
- Ruby - Ruby
- [ツ暗ェツ氾环催ツ団)](https://github.com/shlima/click_house) - [ClickHouse (Ruby)](https://github.com/shlima/click_house)
- [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/PNixx/clickhouse-activerecord) - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
- R - R
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
- [RClickhouse](https://github.com/IMSMWU/RClickhouse) - [RClickHouse](https://github.com/IMSMWU/RClickHouse)
- Java - Java
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java) - [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
- 斯卡拉 - [clickhouse-client](https://github.com/Ecwid/clickhouse-client)
- [掳胫client-禄脢鹿脷露胫鲁隆鹿-client酶](https://github.com/crobox/clickhouse-scala-client) - Scala
- [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client)
- Kotlin - Kotlin
- [AORM](https://github.com/TanVD/AORM) - [AORM](https://github.com/TanVD/AORM)
- C# - C#
- [Octonica.ClickHouseClient](https://github.com/Octonica/ClickHouseClient) - [Octonica.ClickHouseClient](https://github.com/Octonica/ClickHouseClient)
- [克莱克豪斯Ado](https://github.com/killwort/ClickHouse-Net) - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net)
- [ClickHouse.Client](https://github.com/DarkWanderer/ClickHouse.Client)
- [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net)
- [克莱克豪斯客户](https://github.com/DarkWanderer/ClickHouse.Client) - Elixir
- 仙丹
- [clickhousex](https://github.com/appodeal/clickhousex/) - [clickhousex](https://github.com/appodeal/clickhousex/)
- 尼姆 - [pillar](https://github.com/sofakingworld/pillar)
- Nim
- [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse) - [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse)
- Haskell
- [hdbc-clickhouse](https://github.com/zaneli/hdbc-clickhouse)
[来源文章](https://clickhouse.tech/docs/zh/interfaces/third-party/client_libraries/) <!--hide--> [来源文章](https://clickhouse.tech/docs/en/interfaces/third-party/client_libraries/) <!--hide-->

View File

@ -1,8 +1,16 @@
--- ---
machine_translated: true toc_folder_title: 第三方工具
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_folder_title: "\u7B2C\u4E09\u65B9"
toc_priority: 24 toc_priority: 24
--- ---
# 第三方工具 {#third-party-interfaces}
这是第三方工具的链接集合它们提供了一些ClickHouse的接口。它可以是可视化界面、命令行界面或API:
- [Client libraries](../../interfaces/third-party/client-libraries.md)
- [Integrations](../../interfaces/third-party/integrations.md)
- [GUI](../../interfaces/third-party/gui.md)
- [Proxies](../../interfaces/third-party/proxy.md)
!!! note "注意"
支持通用API的通用工具[ODBC](../../interfaces/odbc.md)或[JDBC](../../interfaces/jdbc.md)通常也适用于ClickHouse但这里没有列出因为它们实在太多了。

View File

@ -1,100 +1,108 @@
# 第三方集成库 {#di-san-fang-ji-cheng-ku} ---
toc_priority: 27
toc_title: 第三方集成库
---
# 第三方集成库 {#integration-libraries-from-third-party-developers}
!!! warning "声明" !!! warning "声明"
Yandex不维护下面列出的库也没有进行任何广泛的测试以确保其质量。 Yandex**没有**维护下面列出的库,也没有做过任何广泛的测试来确保它们的质量。
## 基建产品 {#ji-jian-chan-pin} ## 基础设施 {#infrastructure-products}
- 关系数据库管理系统 - 关系数据库
- [MySQL](https://www.mysql.com) - [MySQL](https://www.mysql.com)
- [mysql2ch](https://github.com/long2ice/mysql2ch) - [mysql2ch](https://github.com/long2ice/mysql2ch)
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
- [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader)
- [horgh-复制器](https://github.com/larsnovikov/horgh-replicator) - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator)
- [PostgreSQL](https://www.postgresql.org) - [PostgreSQL](https://www.postgresql.org)
- [clickhousedb_fdw](https://github.com/Percona-Lab/clickhousedb_fdw) - [clickhousedb_fdw](https://github.com/Percona-Lab/clickhousedb_fdw)
- [infi.clickhouse_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (使用 [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - [infi.clickhouse_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (uses [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [pg2ch](https://github.com/mkabilov/pg2ch) - [pg2ch](https://github.com/mkabilov/pg2ch)
- [clickhouse_fdw](https://github.com/adjust/clickhouse_fdw)
- [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server) - [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server)
- [ClickHouseMightrator](https://github.com/zlzforever/ClickHouseMigrator) - [ClickHouseMigrator](https://github.com/zlzforever/ClickHouseMigrator)
- 消息队列 - 消息队列
- [卡夫卡](https://kafka.apache.org) - [Kafka](https://kafka.apache.org)
- [clickhouse_sinker](https://github.com/housepower/clickhouse_sinker) (使用 [去客户](https://github.com/ClickHouse/clickhouse-go/)) - [clickhouse_sinker](https://github.com/housepower/clickhouse_sinker) (uses [Go client](https://github.com/ClickHouse/clickhouse-go/))
- [stream-loader-clickhouse](https://github.com/adform/stream-loader) - [stream-loader-clickhouse](https://github.com/adform/stream-loader)
- 流处理 - 流处理
- [Flink](https://flink.apache.org) - [Flink](https://flink.apache.org)
- [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink) - [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink)
- 对象存储 - 对象存储
- [S3](https://en.wikipedia.org/wiki/Amazon_S3) - [S3](https://en.wikipedia.org/wiki/Amazon_S3)
- [ツ环板backupョツ嘉ッツ偲](https://github.com/AlexAkulov/clickhouse-backup) - [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup)
- 容器编排 - 容器编排
- [Kubernetes](https://kubernetes.io) - [Kubernetes](https://kubernetes.io)
- [clickhouse-操](https://github.com/Altinity/clickhouse-operator) - [clickhouse-operator](https://github.com/Altinity/clickhouse-operator)
- 配置管理 - 配置管理
- [木偶](https://puppet.com) - [puppet](https://puppet.com)
- [ツ环板/ョツ嘉ッツ偲](https://forge.puppet.com/innogames/clickhouse) - [innogames/clickhouse](https://forge.puppet.com/innogames/clickhouse)
- [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse) - [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse)
- 监控 - Monitoring
- [石墨](https://graphiteapp.org) - [Graphite](https://graphiteapp.org)
- [graphouse](https://github.com/yandex/graphouse) - [graphouse](https://github.com/yandex/graphouse)
- [ツ暗ェツ氾环催ツ団](https://github.com/lomik/carbon-clickhouse) + - [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) +
- [ツ环板-ョツ嘉ッツ偲](https://github.com/lomik/graphite-clickhouse) - [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
- [石墨-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) -优化静态分区 [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) 如果从规则 [汇总配置](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) 可以应用 - [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) could be applied
- [Grafana](https://grafana.com/) - [Grafana](https://grafana.com/)
- [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana)
- [普罗米修斯号](https://prometheus.io/) - [Prometheus](https://prometheus.io/)
- [clickhouse_exporter](https://github.com/f1yegor/clickhouse_exporter) - [clickhouse_exporter](https://github.com/f1yegor/clickhouse_exporter)
- [PromHouse](https://github.com/Percona-Lab/PromHouse) - [PromHouse](https://github.com/Percona-Lab/PromHouse)
- [clickhouse_exporter](https://github.com/hot-wifi/clickhouse_exporter) (用途 [去客户](https://github.com/kshvakov/clickhouse/)) - [clickhouse_exporter](https://github.com/hot-wifi/clickhouse_exporter) (uses [Go client](https://github.com/kshvakov/clickhouse/))
- [Nagios](https://www.nagios.org/) - [Nagios](https://www.nagios.org/)
- [check_clickhouse](https://github.com/exogroup/check_clickhouse/) - [check_clickhouse](https://github.com/exogroup/check_clickhouse/)
- [check_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) - [check_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py)
- [Zabbix](https://www.zabbix.com) - [Zabbix](https://www.zabbix.com)
- [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/Altinity/clickhouse-zabbix-template) - [clickhouse-zabbix-template](https://github.com/Altinity/clickhouse-zabbix-template)
- [Sematext](https://sematext.com/) - [Sematext](https://sematext.com/)
- [clickhouse积分](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) - [clickhouse integration](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse)
- 记录 - Logging
- [rsyslog](https://www.rsyslog.com/) - [rsyslog](https://www.rsyslog.com/)
- [鹿茫house omhousee酶](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) - [omclickhouse](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html)
- [fluentd](https://www.fluentd.org) - [fluentd](https://www.fluentd.org)
- [loghouse](https://github.com/flant/loghouse) (对于 [Kubernetes](https://kubernetes.io)) - [loghouse](https://github.com/flant/loghouse) (for [Kubernetes](https://kubernetes.io))
- [Sematext](https://www.sematext.com/logagent) - [logagent](https://www.sematext.com/logagent)
- [logagent输出-插件-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) - [logagent output-plugin-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/)
- 地理 - Geo
- [MaxMind](https://dev.maxmind.com/geoip/) - [MaxMind](https://dev.maxmind.com/geoip/)
- [ツ环板-ョツ嘉ッツ偲青clickシツ氾カツ鉄ツ工ツ渉](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) - [clickhouse-maxmind-geoip](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip)
## 编程语言生态系统 {#bian-cheng-yu-yan-sheng-tai-xi-tong} ## 编程语言 {#programming-language-ecosystems}
- Python - Python
- [SQLAlchemy](https://www.sqlalchemy.org) - [SQLAlchemy](https://www.sqlalchemy.org)
- [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/cloudflare/sqlalchemy-clickhouse) (使用 [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (uses [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [熊猫](https://pandas.pydata.org) - [pandas](https://pandas.pydata.org)
- [pandahouse](https://github.com/kszucs/pandahouse) - [pandahouse](https://github.com/kszucs/pandahouse)
- PHP - PHP
- [Doctrine](https://www.doctrine-project.org/) - [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse) - [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)
- R - R
- [dplyr](https://db.rstudio.com/dplyr/) - [dplyr](https://db.rstudio.com/dplyr/)
- [RClickhouse](https://github.com/IMSMWU/RClickhouse) (使用 [ツ暗ェツ氾环催ツ団](https://github.com/artpaul/clickhouse-cpp)) - [RClickHouse](https://github.com/IMSMWU/RClickHouse) (uses [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp))
- Java - Java
- [Hadoop](http://hadoop.apache.org) - [Hadoop](http://hadoop.apache.org)
- [clickhouse-hdfs-装载机](https://github.com/jaykelin/clickhouse-hdfs-loader) (使用 [JDBC](../../sql-reference/table-functions/jdbc.md)) - [clickhouse-hdfs-loader](https://github.com/jaykelin/clickhouse-hdfs-loader) (uses [JDBC](../../sql-reference/table-functions/jdbc.md))
- 斯卡拉 - Scala
- [Akka](https://akka.io) - [Akka](https://akka.io)
- [掳胫client-禄脢鹿脷露胫鲁隆鹿-client酶](https://github.com/crobox/clickhouse-scala-client) - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client)
- C# - C#
- [ADO.NET](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview) - [ADO.NET](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview)
- [克莱克豪斯Ado](https://github.com/killwort/ClickHouse-Net) - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net)
- [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - [ClickHouse.Client](https://github.com/DarkWanderer/ClickHouse.Client)
- [ClickHouse.Net.Migrations](https://github.com/ilyabreev/ClickHouse.Net.Migrations) - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net)
- 仙丹 - [ClickHouse.Net.Migrations](https://github.com/ilyabreev/ClickHouse.Net.Migrations)
- Elixir
- [Ecto](https://github.com/elixir-ecto/ecto) - [Ecto](https://github.com/elixir-ecto/ecto)
- [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto) - [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto)
- Ruby - Ruby
- [Ruby on Rails](https://rubyonrails.org/) - [Ruby on Rails](https://rubyonrails.org/)
- [activecube](https://github.com/bitquery/activecube) - [activecube](https://github.com/bitquery/activecube)
- [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord)
- [GraphQL](https://github.com/graphql) - [GraphQL](https://github.com/graphql)
- [activecube-graphql](https://github.com/bitquery/activecube-graphql) - [activecube-graphql](https://github.com/bitquery/activecube-graphql)
[来源文章](https://clickhouse.tech/docs/zh/interfaces/third-party/integrations/) <!--hide--> [源文章](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) <!--hide-->

View File

@ -1,37 +1,44 @@
# 来自第三方开发人员的代理服务器 {#lai-zi-di-san-fang-kai-fa-ren-yuan-de-dai-li-fu-wu-qi} ---
toc_priority: 29
toc_title: 第三方代理
---
[chproxy](https://github.com/Vertamedia/chproxy) 是ClickHouse数据库的http代理和负载均衡器。 # 第三方代理 {#proxy-servers-from-third-party-developers}
特征 ## chproxy {#chproxy}
*每用户路由和响应缓存。 [chproxy](https://github.com/Vertamedia/chproxy), 是一个用于ClickHouse数据库的HTTP代理和负载均衡器。
*灵活的限制。
\*自动SSL证书续订。
在Go中实现。 特性:
- 用户路由和响应缓存。
- 灵活的限制。
- 自动SSL证书续订。
使用go语言实现。
## KittenHouse {#kittenhouse} ## KittenHouse {#kittenhouse}
[KittenHouse](https://github.com/VKCOM/kittenhouse) 设计为ClickHouse和应用程序服务器之间的本地代理以防在应用程序端缓冲INSERT数据是不可能或不方便的。 [KittenHouse](https://github.com/VKCOM/kittenhouse)被设计为ClickHouse和应用服务器之间的本地代理以防不可能或不方便在应用程序端缓冲插入数据
特征: 性:
*内存和磁盘数据缓冲。 - 内存和磁盘上的数据缓冲。
*每表路由。 - 表路由。
\*负载平衡和健康检查。 - 负载平衡和运行状况检查。
在Go中实现。 使用go语言实现。
## ツ环板-ョツ嘉ッツ偲 {#clickhouse-bulk} ## ClickHouse-Bulk {#clickhouse-bulk}
[ツ环板-ョツ嘉ッツ偲](https://github.com/nikepan/clickhouse-bulk) 是一个简单的ClickHouse插入收集器。 [ClickHouse-Bulk](https://github.com/nikepan/clickhouse-bulk)是一个简单的ClickHouse收集器。
征: 性:
*分组请求并按阈值或间隔发送。 - 按阈值或间隔对请求进行分组并发送。
*多个远程服务器。 - 多个远程服务器。
\*基本身份验证。 - 基本身份验证。
在Go中实现。 使用go语言实现。
[来源文章](https://clickhouse.tech/docs/zh/interfaces/third-party/proxy/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/interfaces/third-party/proxy/) <!--hide-->

View File

@ -1,8 +1,8 @@
--- ---
machine_translated: true toc_folder_title: ClickHouse事迹
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd toc_priority: 82
toc_folder_title: "\u65B0\u589E\u5185\u5BB9"
toc_priority: 72
--- ---
# ClickHouse变更及改动? {#whats-new-in-clickhouse}
对于已经发布的版本,有一个[roadmap](../whats-new/roadmap.md)和一个详细的[changelog](../whats-new/changelog/index.md)。

View File

@ -1,17 +1,10 @@
--- ---
toc_priority: 74 toc_priority: 74
toc_title: 路线图 toc_title: Roadmap
--- ---
# 路线图 {#roadmap} # Roadmap {#roadmap}
## Q2 2020 {#q2-2020} `2021年Roadmap`已公布供公开讨论查看[这里](https://github.com/ClickHouse/ClickHouse/issues/17623).
- 和外部认证服务集成
## Q3 2020 {#q3-2020}
- 资源池,为用户提供更精准的集群资源分配
{## [原始文档](https://clickhouse.tech/docs/en/roadmap/) ##}
{## [源文章](https://clickhouse.tech/docs/en/roadmap/) ##}

View File

@ -1,41 +1,74 @@
## 修复于 ClickHouse Release 18.12.13, 2018-09-10 {#xiu-fu-yu-clickhouse-release-18-12-13-2018-09-10} ---
toc_priority: 76
toc_title: 安全更新日志
---
## 修复于ClickHouse Release 19.14.3.3, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10}
### CVE-2019-15024 {#cve-2019-15024}
对ZooKeeper具有写访问权限并且可以运行ClickHouse所在网络上可用的自定义服务器的攻击者可以创建一个自定义的恶意服务器该服务器将充当ClickHouse副本并在ZooKeeper中注册。当另一个副本将从恶意副本获取数据部分时它可以强制clickhouse服务器写入文件系统上的任意路径。
作者Yandex信息安全团队Eldar Zaitov
### CVE-2019-16535 {#cve-2019-16535}
解压算法中的OOB-read、OOB-write和整数下溢可以通过本机协议实现RCE或DoS。
作者: Yandex信息安全团队Eldar Zaitov
### CVE-2019-16536 {#cve-2019-16536}
恶意的经过身份验证的客户端可能会触发导致DoS的堆栈溢出。
作者: Yandex信息安全团队Eldar Zaitov
## 修复于ClickHouse Release 19.13.6.1, 2019-09-20 {#fixed-in-clickhouse-release-19-13-6-1-2019-09-20}
### CVE-2019-18657 {#cve-2019-18657}
表函数`url`存在允许攻击者在请求中插入任意HTTP标头的漏洞。
作者: [Nikita Tikhomirov](https://github.com/NSTikhomirov)
## 修复于ClickHouse Release 18.12.13, 2018-09-10 {#fixed-in-clickhouse-release-18-12-13-2018-09-10}
### CVE-2018-14672 {#cve-2018-14672} ### CVE-2018-14672 {#cve-2018-14672}
加载CatBoost模型的功能允许遍历路径并通过错误消息读取任意文件。 加载CatBoost模型的函数允许路径遍历和通过错误消息读取任意文件。
来源: Yandex信息安全团队的Andrey Krasichkov 作者Yandex信息安全团队Andrey Krasichkov
## 修复于 ClickHouse Release 18.10.3, 2018-08-13 {#xiu-fu-yu-clickhouse-release-18-10-3-2018-08-13} ## 修复于Release 18.10.3, 2018-08-13 {#fixed-in-clickhouse-release-18-10-3-2018-08-13}
### CVE-2018-14671 {#cve-2018-14671} ### CVE-2018-14671 {#cve-2018-14671}
unixODBC允许从文件系统加载任意共享对象从而导致«远程执行代码»漏洞。 unixODBC允许从文件系统加载任意共享对象从而导致远程代码执行漏洞。
来源Yandex信息安全团队的Andrey Krasichkov和Evgeny Sidorov 作者Yandex信息安全团队Andrey Krasichkov和Evgeny Sidorov
## 修复于 ClickHouse Release 1.1.54388, 2018-06-28 {#xiu-fu-yu-clickhouse-release-1-1-54388-2018-06-28} ## 修复于ClickHouse Release 1.1.54388, 2018-06-28 {#fixed-in-clickhouse-release-1-1-54388-2018-06-28}
### CVE-2018-14668 {#cve-2018-14668} ### CVE-2018-14668 {#cve-2018-14668}
远程表函数功能允许在 «user», «password» 及 «default_database» 字段中使用任意符号,从而导致跨协议请求伪造攻击。 `remote`表函数允许在`user``password`和`default_database`字段中使用任意符号,从而导致跨协议请求伪造攻击。
来源Yandex信息安全团队的Andrey Krasichkov Yandex信息安全团队Andrey Krasichkov
## 修复于 ClickHouse Release 1.1.54390, 2018-07-06 {#xiu-fu-yu-clickhouse-release-1-1-54390-2018-07-06} ## 修复于ClickHouse Release 1.1.54390, 2018-07-06 {#fixed-in-clickhouse-release-1-1-54390-2018-07-06}
### CVE-2018-14669 {#cve-2018-14669} ### CVE-2018-14669 {#cve-2018-14669}
ClickHouse MySQL客户端启用了 «LOAD DATA LOCAL INFILE» 功能该功能允许恶意MySQL数据库从连接的ClickHouse服务器读取任意文件。 ClickHouse MySQL客户端启用了`LOAD DATA LOCAL INFILE`功能,允许恶意MySQL数据库从连接的ClickHouse服务器读取任意文件。
来源Yandex信息安全团队的Andrey Krasichkov和Evgeny Sidorov 作者Yandex信息安全团队Andrey Krasichkov和Evgeny Sidorov
## 修复于 ClickHouse Release 1.1.54131, 2017-01-10 {#xiu-fu-yu-clickhouse-release-1-1-54131-2017-01-10} ## 修复于ClickHouse Release 1.1.54131, 2017-01-10 {#fixed-in-clickhouse-release-1-1-54131-2017-01-10}
### CVE-2018-14670 {#cve-2018-14670} ### CVE-2018-14670 {#cve-2018-14670}
deb软件包中的错误配置可能导致使用未经授权的数据库。 deb包中的错误配置可能导致未经授权使用数据库。
来源英国国家网络安全中心NCSC 作者英国国家网络安全中心NCSC
[来源文章](https://clickhouse.tech/docs/en/security_changelog/) <!--hide--> {## [Original article](https://clickhouse.tech/docs/en/security_changelog/) ##}

View File

@ -405,8 +405,8 @@ void QueryFuzzer::fuzz(ASTPtr & ast)
if (fn->is_window_function) if (fn->is_window_function)
{ {
fuzzColumnLikeExpressionList(fn->window_partition_by); fuzzColumnLikeExpressionList(fn->window_partition_by.get());
fuzzOrderByList(fn->window_order_by); fuzzOrderByList(fn->window_order_by.get());
} }
fuzz(fn->children); fuzz(fn->children);

View File

@ -4,6 +4,7 @@
#include <DataTypes/DataTypeDateTime64.h> #include <DataTypes/DataTypeDateTime64.h>
#include <DataTypes/DataTypeNullable.h> #include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeString.h> #include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeFixedString.h>
#include <DataTypes/DataTypeUUID.h> #include <DataTypes/DataTypeUUID.h>
#include <DataTypes/DataTypesDecimal.h> #include <DataTypes/DataTypesDecimal.h>
#include <DataTypes/DataTypesNumber.h> #include <DataTypes/DataTypesNumber.h>
@ -76,6 +77,8 @@ void ExternalResultDescription::init(const Block & sample_block_)
types.emplace_back(ValueType::vtDecimal128, is_nullable); types.emplace_back(ValueType::vtDecimal128, is_nullable);
else if (typeid_cast<const DataTypeDecimal<Decimal256> *>(type)) else if (typeid_cast<const DataTypeDecimal<Decimal256> *>(type))
types.emplace_back(ValueType::vtDecimal256, is_nullable); types.emplace_back(ValueType::vtDecimal256, is_nullable);
else if (typeid_cast<const DataTypeFixedString *>(type))
types.emplace_back(ValueType::vtFixedString, is_nullable);
else else
throw Exception{"Unsupported type " + type->getName(), ErrorCodes::UNKNOWN_TYPE}; throw Exception{"Unsupported type " + type->getName(), ErrorCodes::UNKNOWN_TYPE};
} }

View File

@ -30,7 +30,8 @@ struct ExternalResultDescription
vtDecimal32, vtDecimal32,
vtDecimal64, vtDecimal64,
vtDecimal128, vtDecimal128,
vtDecimal256 vtDecimal256,
vtFixedString
}; };
Block sample_block; Block sample_block;

View File

@ -518,7 +518,7 @@ void IPAddressDictionary::loadData()
{ {
/// We format key attribute values here instead of filling with data from key_column /// We format key attribute values here instead of filling with data from key_column
/// because string representation can be normalized if bits beyond mask are set. /// because string representation can be normalized if bits beyond mask are set.
/// Also all IPv4 will be displayed as mapped IPv6 if threre are any IPv6. /// Also all IPv4 will be displayed as mapped IPv6 if there are any IPv6.
/// It's consistent with representation in table created with `ENGINE = Dictionary` from this dictionary. /// It's consistent with representation in table created with `ENGINE = Dictionary` from this dictionary.
char str_buffer[48]; char str_buffer[48];
if (has_ipv6) if (has_ipv6)

View File

@ -8,6 +8,7 @@
# include <Columns/ColumnString.h> # include <Columns/ColumnString.h>
# include <Columns/ColumnsNumber.h> # include <Columns/ColumnsNumber.h>
# include <Columns/ColumnDecimal.h> # include <Columns/ColumnDecimal.h>
# include <Columns/ColumnFixedString.h>
# include <DataTypes/IDataType.h> # include <DataTypes/IDataType.h>
# include <DataTypes/DataTypeNullable.h> # include <DataTypes/DataTypeNullable.h>
# include <IO/ReadHelpers.h> # include <IO/ReadHelpers.h>
@ -110,6 +111,9 @@ namespace
data_type.deserializeAsWholeText(column, buffer, FormatSettings{}); data_type.deserializeAsWholeText(column, buffer, FormatSettings{});
break; break;
} }
case ValueType::vtFixedString:
assert_cast<ColumnFixedString &>(column).insertData(value.data(), value.size());
break;
} }
} }

View File

@ -738,15 +738,26 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data &
if (node.is_window_function) if (node.is_window_function)
{ {
// Also add columns from PARTITION BY and ORDER BY of window functions. // Also add columns from PARTITION BY and ORDER BY of window functions.
// Requiring a constant reference to a shared pointer to non-const AST
// doesn't really look sane, but the visitor does indeed require it.
if (node.window_partition_by) if (node.window_partition_by)
{ {
visit(node.window_partition_by->clone(), data); visit(node.window_partition_by, data);
} }
if (node.window_order_by) if (node.window_order_by)
{ {
visit(node.window_order_by->clone(), data); visit(node.window_order_by, data);
}
// Also manually add columns for arguments of the window function itself.
// ActionVisitor is written in such a way that this method must itself
// descend into all needed function children. Window functions can't have
// any special functions as argument, so the code below that handles
// special arguments is not needed. This is analogous to the
// appendWindowFunctionsArguments() in SelectQueryExpressionAnalyzer and
// partially duplicates its code. Probably we can remove most of the
// logic from that function, but I don't yet have it all figured out...
for (const auto & arg : node.arguments->children)
{
visit(arg, data);
} }
// Don't need to do anything more for window functions here -- the // Don't need to do anything more for window functions here -- the

View File

@ -970,7 +970,9 @@ void SelectQueryExpressionAnalyzer::appendWindowFunctionsArguments(
ExpressionActionsChain::Step & step = chain.lastStep(aggregated_columns); ExpressionActionsChain::Step & step = chain.lastStep(aggregated_columns);
// 1) Add actions for window functions and their arguments; // 1) Add actions for window functions and their arguments;
// 2) Mark the columns that are really required. // 2) Mark the columns that are really required. We have to mark them as
// required because we finish the expression chain before processing the
// window functions.
for (const auto & [_, w] : window_descriptions) for (const auto & [_, w] : window_descriptions)
{ {
for (const auto & f : w.window_functions) for (const auto & f : w.window_functions)
@ -981,41 +983,14 @@ void SelectQueryExpressionAnalyzer::appendWindowFunctionsArguments(
getRootActionsNoMakeSet(f.function_node->clone(), getRootActionsNoMakeSet(f.function_node->clone(),
true /* no_subqueries */, step.actions()); true /* no_subqueries */, step.actions());
// 1.2) result of window function: an empty INPUT. // 2.1) function arguments;
// It is an aggregate function, so it won't be added by getRootActions.
// This is something of a hack. Other options:
// a] do it like aggregate function -- break the chain of actions
// and manually add window functions to the starting list of
// input columns. Logically this is similar to what we're doing
// now, but would require to split the window function processing
// into a full-fledged step after plain functions. This would be
// somewhat cumbersome. With INPUT hack we can avoid a separate
// step and pretend that window functions are almost "normal"
// select functions. The limitation of both these ways is that
// we can't reference window functions in other SELECT
// expressions.
// b] add a WINDOW action type, then sort, then split the chain on
// each WINDOW action and insert the Window pipeline between the
// Expression pipelines. This is a "proper" way that would allow
// us to depend on window functions in other functions. But it's
// complicated so I avoid doing it for now.
ColumnWithTypeAndName col;
col.type = f.aggregate_function->getReturnType();
col.column = col.type->createColumn();
col.name = f.column_name;
step.actions()->addInput(col);
for (const auto & a : f.function_node->arguments->children) for (const auto & a : f.function_node->arguments->children)
{ {
// 2.1) function arguments;
step.required_output.push_back(a->getColumnName()); step.required_output.push_back(a->getColumnName());
} }
// 2.2) function result;
step.required_output.push_back(f.column_name);
} }
// 2.3) PARTITION BY and ORDER BY columns. // 2.1) PARTITION BY and ORDER BY columns.
for (const auto & c : w.full_sort_description) for (const auto & c : w.full_sort_description)
{ {
step.required_output.push_back(c.column_name); step.required_output.push_back(c.column_name);
@ -1048,6 +1023,15 @@ void SelectQueryExpressionAnalyzer::appendSelect(ExpressionActionsChain & chain,
for (const auto & child : select_query->select()->children) for (const auto & child : select_query->select()->children)
{ {
if (const auto * function = typeid_cast<const ASTFunction *>(child.get());
function
&& function->is_window_function)
{
// Skip window function columns here -- they are calculated after
// other SELECT expressions by a special step.
continue;
}
step.required_output.push_back(child->getColumnName()); step.required_output.push_back(child->getColumnName());
} }
} }
@ -1421,11 +1405,54 @@ ExpressionAnalysisResult::ExpressionAnalysisResult(
/// If there is aggregation, we execute expressions in SELECT and ORDER BY on the initiating server, otherwise on the source servers. /// If there is aggregation, we execute expressions in SELECT and ORDER BY on the initiating server, otherwise on the source servers.
query_analyzer.appendSelect(chain, only_types || (need_aggregate ? !second_stage : !first_stage)); query_analyzer.appendSelect(chain, only_types || (need_aggregate ? !second_stage : !first_stage));
query_analyzer.appendWindowFunctionsArguments(chain, only_types || !first_stage); // Window functions are processed in a separate expression chain after
// the main SELECT, similar to what we do for aggregate functions.
if (has_window)
{
query_analyzer.appendWindowFunctionsArguments(chain, only_types || !first_stage);
// Build a list of output columns of the window step.
// 1) We need the columns that are the output of ExpressionActions.
for (const auto & x : chain.getLastActions()->getNamesAndTypesList())
{
query_analyzer.columns_after_window.push_back(x);
}
// 2) We also have to manually add the output of the window function
// to the list of the output columns of the window step, because the
// window functions are not in the ExpressionActions.
for (const auto & [_, w] : query_analyzer.window_descriptions)
{
for (const auto & f : w.window_functions)
{
query_analyzer.columns_after_window.push_back(
{f.column_name, f.aggregate_function->getReturnType()});
}
}
before_window = chain.getLastActions();
finalize_chain(chain);
auto & step = chain.lastStep(query_analyzer.columns_after_window);
// The output of this expression chain is the result of
// SELECT (before "final projection" i.e. renaming the columns), so
// we have to mark the expressions that are required in the output,
// again. We did it for the previous expression chain ("select w/o
// window functions") earlier, in appendSelect(). But that chain also
// produced the expressions required to calculate window functions.
// They are not needed in the final SELECT result. Knowing the correct
// list of columns is important when we apply SELECT DISTINCT later.
const auto * select_query = query_analyzer.getSelectQuery();
for (const auto & child : select_query->select()->children)
{
step.required_output.push_back(child->getColumnName());
}
}
selected_columns = chain.getLastStep().required_output; selected_columns = chain.getLastStep().required_output;
has_order_by = query.orderBy() != nullptr; has_order_by = query.orderBy() != nullptr;
before_order_and_select = query_analyzer.appendOrderBy( before_order_by = query_analyzer.appendOrderBy(
chain, chain,
only_types || (need_aggregate ? !second_stage : !first_stage), only_types || (need_aggregate ? !second_stage : !first_stage),
optimize_read_in_order, optimize_read_in_order,
@ -1572,9 +1599,9 @@ std::string ExpressionAnalysisResult::dump() const
ss << "before_window " << before_window->dumpDAG() << "\n"; ss << "before_window " << before_window->dumpDAG() << "\n";
} }
if (before_order_and_select) if (before_order_by)
{ {
ss << "before_order_and_select " << before_order_and_select->dumpDAG() << "\n"; ss << "before_order_by " << before_order_by->dumpDAG() << "\n";
} }
if (before_limit_by) if (before_limit_by)
@ -1587,6 +1614,20 @@ std::string ExpressionAnalysisResult::dump() const
ss << "final_projection " << final_projection->dumpDAG() << "\n"; ss << "final_projection " << final_projection->dumpDAG() << "\n";
} }
if (!selected_columns.empty())
{
ss << "selected_columns ";
for (size_t i = 0; i < selected_columns.size(); i++)
{
if (i > 0)
{
ss << ", ";
}
ss << backQuote(selected_columns[i]);
}
ss << "\n";
}
return ss.str(); return ss.str();
} }

View File

@ -55,6 +55,8 @@ struct ExpressionAnalyzerData
NamesAndTypesList columns_after_join; NamesAndTypesList columns_after_join;
/// Columns after ARRAY JOIN, JOIN, and/or aggregation. /// Columns after ARRAY JOIN, JOIN, and/or aggregation.
NamesAndTypesList aggregated_columns; NamesAndTypesList aggregated_columns;
/// Columns after window functions.
NamesAndTypesList columns_after_window;
bool has_aggregation = false; bool has_aggregation = false;
NamesAndTypesList aggregation_keys; NamesAndTypesList aggregation_keys;
@ -202,11 +204,12 @@ struct ExpressionAnalysisResult
ActionsDAGPtr before_aggregation; ActionsDAGPtr before_aggregation;
ActionsDAGPtr before_having; ActionsDAGPtr before_having;
ActionsDAGPtr before_window; ActionsDAGPtr before_window;
ActionsDAGPtr before_order_and_select; ActionsDAGPtr before_order_by;
ActionsDAGPtr before_limit_by; ActionsDAGPtr before_limit_by;
ActionsDAGPtr final_projection; ActionsDAGPtr final_projection;
/// Columns from the SELECT list, before renaming them to aliases. /// Columns from the SELECT list, before renaming them to aliases. Used to
/// perform SELECT DISTINCT.
Names selected_columns; Names selected_columns;
/// Columns will be removed after prewhere actions execution. /// Columns will be removed after prewhere actions execution.

View File

@ -22,15 +22,22 @@ void ExpressionInfoMatcher::visit(const ASTFunction & ast_function, const ASTPtr
{ {
data.is_array_join = true; data.is_array_join = true;
} }
// "is_aggregate_function" doesn't mean much by itself. Apparently here it is // "is_aggregate_function" is used to determine whether we can move a filter
// used to move filters from HAVING to WHERE, and probably for this purpose // (1) from HAVING to WHERE or (2) from WHERE of a parent query to HAVING of
// an aggregate function calculated as a window function is not relevant. // a subquery.
// For aggregate functions we can't do (1) but can do (2).
// For window functions both don't make sense -- they are not allowed in
// WHERE or HAVING.
else if (!ast_function.is_window_function else if (!ast_function.is_window_function
&& AggregateFunctionFactory::instance().isAggregateFunctionName( && AggregateFunctionFactory::instance().isAggregateFunctionName(
ast_function.name)) ast_function.name))
{ {
data.is_aggregate_function = true; data.is_aggregate_function = true;
} }
else if (ast_function.is_window_function)
{
data.is_window_function = true;
}
else else
{ {
const auto & function = FunctionFactory::instance().tryGet(ast_function.name, data.context); const auto & function = FunctionFactory::instance().tryGet(ast_function.name, data.context);
@ -75,15 +82,26 @@ bool ExpressionInfoMatcher::needChildVisit(const ASTPtr & node, const ASTPtr &)
return !node->as<ASTSubquery>(); return !node->as<ASTSubquery>();
} }
bool hasStatefulFunction(const ASTPtr & node, const Context & context) bool hasNonRewritableFunction(const ASTPtr & node, const Context & context)
{ {
for (const auto & select_expression : node->children) for (const auto & select_expression : node->children)
{ {
ExpressionInfoVisitor::Data expression_info{.context = context, .tables = {}}; ExpressionInfoVisitor::Data expression_info{.context = context, .tables = {}};
ExpressionInfoVisitor(expression_info).visit(select_expression); ExpressionInfoVisitor(expression_info).visit(select_expression);
if (expression_info.is_stateful_function) if (expression_info.is_stateful_function
|| expression_info.is_window_function)
{
// If an outer query has a WHERE on window function, we can't move
// it into the subquery, because window functions are not allowed in
// WHERE and HAVING. Example:
// select * from (
// select number,
// count(*) over (partition by intDiv(number, 3)) c
// from numbers(3)
// ) where c > 1;
return true; return true;
}
} }
return false; return false;

View File

@ -21,6 +21,7 @@ struct ExpressionInfoMatcher
bool is_array_join = false; bool is_array_join = false;
bool is_stateful_function = false; bool is_stateful_function = false;
bool is_aggregate_function = false; bool is_aggregate_function = false;
bool is_window_function = false;
bool is_deterministic_function = true; bool is_deterministic_function = true;
std::unordered_set<size_t> unique_reference_tables_pos = {}; std::unordered_set<size_t> unique_reference_tables_pos = {};
}; };
@ -36,6 +37,6 @@ struct ExpressionInfoMatcher
using ExpressionInfoVisitor = ConstInDepthNodeVisitor<ExpressionInfoMatcher, true>; using ExpressionInfoVisitor = ConstInDepthNodeVisitor<ExpressionInfoMatcher, true>;
bool hasStatefulFunction(const ASTPtr & node, const Context & context); bool hasNonRewritableFunction(const ASTPtr & node, const Context & context);
} }

View File

@ -33,11 +33,14 @@ public:
return false; return false;
if (auto * func = node->as<ASTFunction>()) if (auto * func = node->as<ASTFunction>())
{ {
if (isAggregateFunction(*func) if (isAggregateFunction(*func))
|| func->is_window_function)
{ {
return false; return false;
} }
// Window functions can contain aggregation results as arguments
// to the window functions, or columns of PARTITION BY or ORDER BY
// of the window.
} }
return true; return true;
} }

View File

@ -538,7 +538,10 @@ Block InterpreterSelectQuery::getSampleBlockImpl()
if (options.to_stage == QueryProcessingStage::Enum::WithMergeableState) if (options.to_stage == QueryProcessingStage::Enum::WithMergeableState)
{ {
if (!analysis_result.need_aggregate) if (!analysis_result.need_aggregate)
return analysis_result.before_order_and_select->getResultColumns(); {
// What's the difference with selected_columns?
return analysis_result.before_order_by->getResultColumns();
}
Block header = analysis_result.before_aggregation->getResultColumns(); Block header = analysis_result.before_aggregation->getResultColumns();
@ -564,7 +567,8 @@ Block InterpreterSelectQuery::getSampleBlockImpl()
if (options.to_stage == QueryProcessingStage::Enum::WithMergeableStateAfterAggregation) if (options.to_stage == QueryProcessingStage::Enum::WithMergeableStateAfterAggregation)
{ {
return analysis_result.before_order_and_select->getResultColumns(); // What's the difference with selected_columns?
return analysis_result.before_order_by->getResultColumns();
} }
return analysis_result.final_projection->getResultColumns(); return analysis_result.final_projection->getResultColumns();
@ -958,8 +962,9 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu
} }
else else
{ {
executeExpression(query_plan, expressions.before_order_and_select, "Before ORDER BY and SELECT"); executeExpression(query_plan, expressions.before_window, "Before window functions");
executeWindow(query_plan); executeWindow(query_plan);
executeExpression(query_plan, expressions.before_order_by, "Before ORDER BY");
executeDistinct(query_plan, true, expressions.selected_columns, true); executeDistinct(query_plan, true, expressions.selected_columns, true);
} }
@ -1005,8 +1010,10 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu
else if (expressions.hasHaving()) else if (expressions.hasHaving())
executeHaving(query_plan, expressions.before_having); executeHaving(query_plan, expressions.before_having);
executeExpression(query_plan, expressions.before_order_and_select, "Before ORDER BY and SELECT"); executeExpression(query_plan, expressions.before_window,
"Before window functions");
executeWindow(query_plan); executeWindow(query_plan);
executeExpression(query_plan, expressions.before_order_by, "Before ORDER BY");
executeDistinct(query_plan, true, expressions.selected_columns, true); executeDistinct(query_plan, true, expressions.selected_columns, true);
} }
@ -1029,10 +1036,23 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu
/** Optimization - if there are several sources and there is LIMIT, then first apply the preliminary LIMIT, /** Optimization - if there are several sources and there is LIMIT, then first apply the preliminary LIMIT,
* limiting the number of rows in each up to `offset + limit`. * limiting the number of rows in each up to `offset + limit`.
*/ */
bool has_withfill = false;
if (query.orderBy())
{
SortDescription order_descr = getSortDescription(query, *context);
for (auto & desc : order_descr)
if (desc.with_fill)
{
has_withfill = true;
break;
}
}
bool has_prelimit = false; bool has_prelimit = false;
if (!to_aggregation_stage && if (!to_aggregation_stage &&
query.limitLength() && !query.limit_with_ties && !hasWithTotalsInAnySubqueryInFromClause(query) && query.limitLength() && !query.limit_with_ties && !hasWithTotalsInAnySubqueryInFromClause(query) &&
!query.arrayJoinExpressionList() && !query.distinct && !expressions.hasLimitBy() && !settings.extremes) !query.arrayJoinExpressionList() && !query.distinct && !expressions.hasLimitBy() && !settings.extremes &&
!has_withfill)
{ {
executePreLimit(query_plan, false); executePreLimit(query_plan, false);
has_prelimit = true; has_prelimit = true;
@ -1745,6 +1765,11 @@ void InterpreterSelectQuery::executeRollupOrCube(QueryPlan & query_plan, Modific
void InterpreterSelectQuery::executeExpression(QueryPlan & query_plan, const ActionsDAGPtr & expression, const std::string & description) void InterpreterSelectQuery::executeExpression(QueryPlan & query_plan, const ActionsDAGPtr & expression, const std::string & description)
{ {
if (!expression)
{
return;
}
auto expression_step = std::make_unique<ExpressionStep>(query_plan.getCurrentDataStream(), expression); auto expression_step = std::make_unique<ExpressionStep>(query_plan.getCurrentDataStream(), expression);
expression_step->setStepDescription(description); expression_step->setStepDescription(description);

View File

@ -90,8 +90,12 @@ std::vector<ASTs> PredicateExpressionsOptimizer::extractTablesPredicates(const A
ExpressionInfoVisitor::Data expression_info{.context = context, .tables = tables_with_columns}; ExpressionInfoVisitor::Data expression_info{.context = context, .tables = tables_with_columns};
ExpressionInfoVisitor(expression_info).visit(predicate_expression); ExpressionInfoVisitor(expression_info).visit(predicate_expression);
if (expression_info.is_stateful_function || !expression_info.is_deterministic_function) if (expression_info.is_stateful_function
return {}; /// Not optimized when predicate contains stateful function or indeterministic function || !expression_info.is_deterministic_function
|| expression_info.is_window_function)
{
return {}; /// Not optimized when predicate contains stateful function or indeterministic function or window functions
}
if (!expression_info.is_array_join) if (!expression_info.is_array_join)
{ {
@ -190,6 +194,12 @@ bool PredicateExpressionsOptimizer::tryMovePredicatesFromHavingToWhere(ASTSelect
if (expression_info.is_stateful_function) if (expression_info.is_stateful_function)
return false; return false;
if (expression_info.is_window_function)
{
// Window functions are not allowed in either HAVING or WHERE.
return false;
}
if (expression_info.is_aggregate_function) if (expression_info.is_aggregate_function)
having_predicates.emplace_back(moving_predicate); having_predicates.emplace_back(moving_predicate);
else else

View File

@ -88,7 +88,7 @@ bool PredicateRewriteVisitorData::rewriteSubquery(ASTSelectQuery & subquery, con
|| (!optimize_with && subquery.with()) || (!optimize_with && subquery.with())
|| subquery.withFill() || subquery.withFill()
|| subquery.limitBy() || subquery.limitLength() || subquery.limitBy() || subquery.limitLength()
|| hasStatefulFunction(subquery.select(), context)) || hasNonRewritableFunction(subquery.select(), context))
return false; return false;
for (const auto & predicate : predicates) for (const auto & predicate : predicates)

View File

@ -148,9 +148,9 @@ void QueryNormalizer::visit(ASTSelectQuery & select, const ASTPtr &, Data & data
/// Don't go into select query. It processes children itself. /// Don't go into select query. It processes children itself.
/// Do not go to the left argument of lambda expressions, so as not to replace the formal parameters /// Do not go to the left argument of lambda expressions, so as not to replace the formal parameters
/// on aliases in expressions of the form 123 AS x, arrayMap(x -> 1, [2]). /// on aliases in expressions of the form 123 AS x, arrayMap(x -> 1, [2]).
void QueryNormalizer::visitChildren(const ASTPtr & node, Data & data) void QueryNormalizer::visitChildren(IAST * node, Data & data)
{ {
if (const auto * func_node = node->as<ASTFunction>()) if (auto * func_node = node->as<ASTFunction>())
{ {
if (func_node->tryGetQueryArgument()) if (func_node->tryGetQueryArgument())
{ {
@ -176,6 +176,16 @@ void QueryNormalizer::visitChildren(const ASTPtr & node, Data & data)
visit(child, data); visit(child, data);
} }
} }
if (func_node->window_partition_by)
{
visitChildren(func_node->window_partition_by.get(), data);
}
if (func_node->window_order_by)
{
visitChildren(func_node->window_order_by.get(), data);
}
} }
else if (!node->as<ASTSelectQuery>()) else if (!node->as<ASTSelectQuery>())
{ {
@ -221,7 +231,7 @@ void QueryNormalizer::visit(ASTPtr & ast, Data & data)
if (ast.get() != initial_ast.get()) if (ast.get() != initial_ast.get())
visit(ast, data); visit(ast, data);
else else
visitChildren(ast, data); visitChildren(ast.get(), data);
current_asts.erase(initial_ast.get()); current_asts.erase(initial_ast.get());
current_asts.erase(ast.get()); current_asts.erase(ast.get());

View File

@ -69,7 +69,7 @@ private:
static void visit(ASTTablesInSelectQueryElement &, const ASTPtr &, Data &); static void visit(ASTTablesInSelectQueryElement &, const ASTPtr &, Data &);
static void visit(ASTSelectQuery &, const ASTPtr &, Data &); static void visit(ASTSelectQuery &, const ASTPtr &, Data &);
static void visitChildren(const ASTPtr &, Data & data); static void visitChildren(IAST * node, Data & data);
}; };
} }

View File

@ -29,6 +29,7 @@
#include <DataTypes/DataTypeNullable.h> #include <DataTypes/DataTypeNullable.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/WriteBufferFromOStream.h>
#include <Storages/IStorage.h> #include <Storages/IStorage.h>
#include <AggregateFunctions/AggregateFunctionFactory.h> #include <AggregateFunctions/AggregateFunctionFactory.h>
@ -445,6 +446,8 @@ std::vector<const ASTFunction *> getAggregates(ASTPtr & query, const ASTSelectQu
for (auto & arg : node->arguments->children) for (auto & arg : node->arguments->children)
{ {
assertNoAggregates(arg, "inside another aggregate function"); assertNoAggregates(arg, "inside another aggregate function");
// We also can't have window functions inside aggregate functions,
// because the window functions are calculated later.
assertNoWindows(arg, "inside an aggregate function"); assertNoWindows(arg, "inside an aggregate function");
} }
} }
@ -454,7 +457,9 @@ std::vector<const ASTFunction *> getAggregates(ASTPtr & query, const ASTSelectQu
std::vector<const ASTFunction *> getWindowFunctions(ASTPtr & query, const ASTSelectQuery & select_query) std::vector<const ASTFunction *> getWindowFunctions(ASTPtr & query, const ASTSelectQuery & select_query)
{ {
/// There can not be window functions inside the WHERE and PREWHERE. /// There can not be window functions inside the WHERE, PREWHERE and HAVING
if (select_query.having())
assertNoWindows(select_query.having(), "in HAVING");
if (select_query.where()) if (select_query.where())
assertNoWindows(select_query.where(), "in WHERE"); assertNoWindows(select_query.where(), "in WHERE");
if (select_query.prewhere()) if (select_query.prewhere())
@ -463,17 +468,34 @@ std::vector<const ASTFunction *> getWindowFunctions(ASTPtr & query, const ASTSel
GetAggregatesVisitor::Data data; GetAggregatesVisitor::Data data;
GetAggregatesVisitor(data).visit(query); GetAggregatesVisitor(data).visit(query);
/// There can not be other window functions within the aggregate functions. /// Window functions cannot be inside aggregates or other window functions.
/// Aggregate functions can be inside window functions because they are
/// calculated earlier.
for (const ASTFunction * node : data.window_functions) for (const ASTFunction * node : data.window_functions)
{ {
if (node->arguments) if (node->arguments)
{ {
for (auto & arg : node->arguments->children) for (auto & arg : node->arguments->children)
{ {
assertNoAggregates(arg, "inside a window function");
assertNoWindows(arg, "inside another window function"); assertNoWindows(arg, "inside another window function");
} }
} }
if (node->window_partition_by)
{
for (auto & arg : node->window_partition_by->children)
{
assertNoWindows(arg, "inside PARTITION BY of a window");
}
}
if (node->window_order_by)
{
for (auto & arg : node->window_order_by->children)
{
assertNoWindows(arg, "inside ORDER BY of a window");
}
}
} }
return data.window_functions; return data.window_functions;

View File

@ -39,6 +39,16 @@ void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr) const
(*it)->appendColumnName(ostr); (*it)->appendColumnName(ostr);
} }
writeChar(')', ostr); writeChar(')', ostr);
if (is_window_function)
{
writeCString(" OVER (", ostr);
FormatSettings settings{ostr, true /* one_line */};
FormatState state;
FormatStateStacked frame;
appendWindowDescription(settings, state, frame);
writeCString(")", ostr);
}
} }
/** Get the text that identifies this element. */ /** Get the text that identifies this element. */
@ -57,17 +67,20 @@ ASTPtr ASTFunction::clone() const
if (window_name) if (window_name)
{ {
res->set(res->window_name, window_name->clone()); res->window_name = window_name->clone();
res->children.push_back(res->window_name);
} }
if (window_partition_by) if (window_partition_by)
{ {
res->set(res->window_partition_by, window_partition_by->clone()); res->window_partition_by = window_partition_by->clone();
res->children.push_back(res->window_partition_by);
} }
if (window_order_by) if (window_order_by)
{ {
res->set(res->window_order_by, window_order_by->clone()); res->window_order_by = window_order_by->clone();
res->children.push_back(res->window_order_by);
} }
return res; return res;

View File

@ -21,9 +21,25 @@ public:
ASTPtr parameters; ASTPtr parameters;
bool is_window_function = false; bool is_window_function = false;
ASTIdentifier * window_name;
ASTExpressionList * window_partition_by; // We have to make these fields ASTPtr because this is what the visitors
ASTExpressionList * window_order_by; // expect. Some of them take const ASTPtr & (makes no sense), and some
// take ASTPtr & and modify it. I don't understand how the latter is
// compatible with also having an owning `children` array -- apparently it
// leads to some dangling children that are not referenced by the fields of
// the AST class itself. Some older code hints at the idea of having
// ownership in `children` only, and making the class fields to be raw
// pointers of proper type (see e.g. IAST::set), but this is not compatible
// with the visitor interface.
// ASTIdentifier
ASTPtr window_name;
// ASTExpressionList
ASTPtr window_partition_by;
// ASTExpressionList of
ASTPtr window_order_by;
/// do not print empty parentheses if there are no args - compatibility with new AST for data types and engine names. /// do not print empty parentheses if there are no args - compatibility with new AST for data types and engine names.
bool no_empty_args = false; bool no_empty_args = false;

View File

@ -419,7 +419,8 @@ bool ParserWindowDefinition::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
ParserIdentifier window_name_parser; ParserIdentifier window_name_parser;
if (window_name_parser.parse(pos, window_name_ast, expected)) if (window_name_parser.parse(pos, window_name_ast, expected))
{ {
function->set(function->window_name, window_name_ast); function->children.push_back(window_name_ast);
function->window_name = window_name_ast;
return true; return true;
} }
else else
@ -442,7 +443,8 @@ bool ParserWindowDefinition::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
ASTPtr partition_by_ast; ASTPtr partition_by_ast;
if (columns_partition_by.parse(pos, partition_by_ast, expected)) if (columns_partition_by.parse(pos, partition_by_ast, expected))
{ {
function->set(function->window_partition_by, partition_by_ast); function->children.push_back(partition_by_ast);
function->window_partition_by = partition_by_ast;
} }
else else
{ {
@ -455,7 +457,8 @@ bool ParserWindowDefinition::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
ASTPtr order_by_ast; ASTPtr order_by_ast;
if (columns_order_by.parse(pos, order_by_ast, expected)) if (columns_order_by.parse(pos, order_by_ast, expected))
{ {
function->set(function->window_order_by, order_by_ast); function->children.push_back(order_by_ast);
function->window_order_by = order_by_ast;
} }
else else
{ {

View File

@ -46,6 +46,8 @@ static void doDescribeHeader(const Block & header, size_t count, IQueryPlanStep:
first = false; first = false;
elem.dumpNameAndType(settings.out); elem.dumpNameAndType(settings.out);
settings.out << ": ";
elem.dumpStructure(settings.out);
settings.out << '\n'; settings.out << '\n';
} }
} }

View File

@ -247,6 +247,15 @@ static void explainStep(
step.describeActions(settings); step.describeActions(settings);
} }
std::string debugExplainStep(const IQueryPlanStep & step)
{
WriteBufferFromOwnString out;
IQueryPlanStep::FormatSettings settings{.out = out};
QueryPlan::ExplainPlanOptions options{.actions = true};
explainStep(step, settings, options);
return out.str();
}
void QueryPlan::explainPlan(WriteBuffer & buffer, const ExplainPlanOptions & options) void QueryPlan::explainPlan(WriteBuffer & buffer, const ExplainPlanOptions & options)
{ {
checkInitialized(); checkInitialized();
@ -488,6 +497,7 @@ static bool tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Node *
{ {
auto & parent = parent_node->step; auto & parent = parent_node->step;
auto & child = child_node->step; auto & child = child_node->step;
/// TODO: FilterStep /// TODO: FilterStep
auto * parent_expr = typeid_cast<ExpressionStep *>(parent.get()); auto * parent_expr = typeid_cast<ExpressionStep *>(parent.get());
auto * child_expr = typeid_cast<ExpressionStep *>(child.get()); auto * child_expr = typeid_cast<ExpressionStep *>(child.get());

View File

@ -97,4 +97,6 @@ private:
std::vector<std::shared_ptr<Context>> interpreter_context; std::vector<std::shared_ptr<Context>> interpreter_context;
}; };
std::string debugExplainStep(const IQueryPlanStep & step);
} }

View File

@ -77,6 +77,11 @@ void WindowTransform::transform(Chunk & chunk)
ws.argument_columns.clear(); ws.argument_columns.clear();
for (const auto column_index : ws.argument_column_indices) for (const auto column_index : ws.argument_column_indices)
{ {
// Aggregate functions can't work with constant columns, so we have to
// materialize them like the Aggregator does.
columns[column_index]
= std::move(columns[column_index])->convertToFullColumnIfConst();
ws.argument_columns.push_back(columns[column_index].get()); ws.argument_columns.push_back(columns[column_index].get());
} }

View File

@ -46,7 +46,7 @@ struct MergeTreeWriterSettings
bool rewrite_primary_key; bool rewrite_primary_key;
bool blocks_are_granules_size; bool blocks_are_granules_size;
/// Used for AIO threshold comparsion /// Used for AIO threshold comparison
/// FIXME currently doesn't work because WriteBufferAIO contain obscure bug(s) /// FIXME currently doesn't work because WriteBufferAIO contain obscure bug(s)
size_t estimated_size = 0; size_t estimated_size = 0;
}; };

View File

@ -26,7 +26,7 @@ void ReplicatedMergeTreeAltersSequence::addMetadataAlter(
int alter_version, std::lock_guard<std::mutex> & /*state_lock*/) int alter_version, std::lock_guard<std::mutex> & /*state_lock*/)
{ {
/// Data alter (mutation) always added before. See ReplicatedMergeTreeQueue::pullLogsToQueue. /// Data alter (mutation) always added before. See ReplicatedMergeTreeQueue::pullLogsToQueue.
/// So mutation alredy added to this sequence or doesn't exist. /// So mutation already added to this sequence or doesn't exist.
if (!queue_state.count(alter_version)) if (!queue_state.count(alter_version))
queue_state.emplace(alter_version, AlterState{.metadata_finished=false, .data_finished=true}); queue_state.emplace(alter_version, AlterState{.metadata_finished=false, .data_finished=true});
else else

View File

@ -48,16 +48,15 @@ def dml_with_materialize_mysql_database(clickhouse_node, mysql_node, service_nam
"/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */" "/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */"
"unsigned_float FLOAT UNSIGNED, _float FLOAT, " "unsigned_float FLOAT UNSIGNED, _float FLOAT, "
"unsigned_double DOUBLE UNSIGNED, _double DOUBLE, " "unsigned_double DOUBLE UNSIGNED, _double DOUBLE, "
"_varchar VARCHAR(10), _char CHAR(10), " "_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), "
"/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */" "/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */"
"_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;") "_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;")
# it already has some data # it already has some data
mysql_node.query(""" mysql_node.query("""
INSERT INTO test_database.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', INSERT INTO test_database.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true); '2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true);
""") """)
clickhouse_node.query( clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format( "CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name)) service_name))
@ -65,51 +64,51 @@ def dml_with_materialize_mysql_database(clickhouse_node, mysql_node, service_nam
assert "test_database" in clickhouse_node.query("SHOW DATABASES") assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV", check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\t2020-01-01\t" "1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n") "2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n")
mysql_node.query(""" mysql_node.query("""
INSERT INTO test_database.test_table_1 VALUES(2, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', INSERT INTO test_database.test_table_1 VALUES(2, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', false); '2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', false);
""") """)
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV", check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\t2020-01-01\t" "1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" "2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n") "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n")
mysql_node.query("UPDATE test_database.test_table_1 SET unsigned_tiny_int = 2 WHERE `key` = 1") mysql_node.query("UPDATE test_database.test_table_1 SET unsigned_tiny_int = 2 WHERE `key` = 1")
check_query(clickhouse_node, """ check_query(clickhouse_node, """
SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int, SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,
small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer,
unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col,
_date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */
_bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV _bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV
""", """,
"1\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\t2020-01-01\t" "1\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" "2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\t2020-01-01\t2020-01-01 00:00:00\t0\n") "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n")
# update primary key # update primary key
mysql_node.query("UPDATE test_database.test_table_1 SET `key` = 3 WHERE `unsigned_tiny_int` = 2") mysql_node.query("UPDATE test_database.test_table_1 SET `key` = 3 WHERE `unsigned_tiny_int` = 2")
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int," check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, " " small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, " " unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ " " _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV", " _bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV",
"2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" "2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t" "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t"
"4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\t2020-01-01\t2020-01-01 00:00:00\t1\n") "4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database.test_table_1 WHERE `key` = 2') mysql_node.query('DELETE FROM test_database.test_table_1 WHERE `key` = 2')
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int," check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, " " small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, " " unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ " " _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV", " _bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV",
"3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\t2020-01-01\t" "3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n") "2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database.test_table_1 WHERE `unsigned_tiny_int` = 2') mysql_node.query('DELETE FROM test_database.test_table_1 WHERE `unsigned_tiny_int` = 2')

View File

@ -148,6 +148,13 @@ def test_table_function(started_cluster):
assert node1.query("SELECT sum(`money`) FROM {}".format(table_function)).rstrip() == '60000' assert node1.query("SELECT sum(`money`) FROM {}".format(table_function)).rstrip() == '60000'
conn.close() conn.close()
def test_binary_type(started_cluster):
conn = get_mysql_conn()
with conn.cursor() as cursor:
cursor.execute("CREATE TABLE clickhouse.binary_type (id INT PRIMARY KEY, data BINARY(16) NOT NULL)")
table_function = "mysql('mysql1:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('binary_type')
node1.query("INSERT INTO {} VALUES (42, 'clickhouse')".format('TABLE FUNCTION ' + table_function))
assert node1.query("SELECT * FROM {}".format(table_function)) == '42\tclickhouse\\0\\0\\0\\0\\0\\0\n'
def test_enum_type(started_cluster): def test_enum_type(started_cluster):
table_name = 'test_enum_type' table_name = 'test_enum_type'

View File

@ -0,0 +1,38 @@
<test>
<preconditions>
<table_exists>hits_100m_single</table_exists>
</preconditions>
<settings>
<allow_experimental_window_functions>1</allow_experimental_window_functions>
</settings>
<!--
For some counters, find top 10 users by the numer of records.
First with LIMIT BY, next with window functions.
-->
<query><![CDATA[
select CounterID, UserID, count(*) user_hits
from hits_100m_single
where CounterID < 10000
group by CounterID, UserID
order by user_hits desc
limit 10 by CounterID
format Null
]]></query>
<query><![CDATA[
select *
from (
select CounterID, UserID, count(*) user_hits,
count() over (partition by CounterID order by user_hits desc)
user_rank
from hits_100m_single
where CounterID < 10000
group by CounterID, UserID
)
where user_rank <= 10
format Null
]]></query>
</test>

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
echo 'DROP TABLE IF EXISTS long_insert' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @- echo 'DROP TABLE IF EXISTS long_insert' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @-

View File

@ -2,6 +2,7 @@
set -e set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
echo 'DROP TABLE IF EXISTS insert_fewer_columns' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @- echo 'DROP TABLE IF EXISTS insert_fewer_columns' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @-

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
set -o errexit set -o errexit

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
set -o errexit set -o errexit

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
set -o errexit set -o errexit

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
set -o errexit set -o errexit

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
set -o errexit set -o errexit

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
[ "$NO_SHELL_CONFIG" ] || . "$CURDIR"/../shell_config.sh [ "$NO_SHELL_CONFIG" ] || . "$CURDIR"/../shell_config.sh
seq 1 1000 | sed -r 's/.+/CREATE TABLE IF NOT EXISTS buf_00097 (a UInt8) ENGINE = Buffer('$CLICKHOUSE_DATABASE', b, 1, 1, 1, 1, 1, 1, 1); DROP TABLE buf_00097;/' | $CLICKHOUSE_CLIENT -n seq 1 1000 | sed -r 's/.+/CREATE TABLE IF NOT EXISTS buf_00097 (a UInt8) ENGINE = Buffer('$CLICKHOUSE_DATABASE', b, 1, 1, 1, 1, 1, 1, 1); DROP TABLE buf_00097;/' | $CLICKHOUSE_CLIENT -n

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
export NO_SHELL_CONFIG=1 export NO_SHELL_CONFIG=1

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --query="SELECT sum(dummy) FROM remote('localhost', system, one) WHERE 1 GLOBAL IN (SELECT 1)" $CLICKHOUSE_CLIENT --query="SELECT sum(dummy) FROM remote('localhost', system, one) WHERE 1 GLOBAL IN (SELECT 1)"

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
set -o errexit set -o errexit

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n --query=" $CLICKHOUSE_CLIENT -n --query="

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
function create { function create {

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=DROP+TABLE" -d 'IF EXISTS insert' ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=DROP+TABLE" -d 'IF EXISTS insert'

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
(echo 'SELECT number FROM system.numbers WHERE transform(number, ['; seq 1 100000 | tr '\n' ','; echo '0],['; seq 1 100000 | tr '\n' ','; echo '0]) = 10000000 LIMIT 1';) | $CLICKHOUSE_CLIENT --max_query_size=100000000 (echo 'SELECT number FROM system.numbers WHERE transform(number, ['; seq 1 100000 | tr '\n' ','; echo '0],['; seq 1 100000 | tr '\n' ','; echo '0]) = 10000000 LIMIT 1';) | $CLICKHOUSE_CLIENT --max_query_size=100000000

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1" -d @- <<< "DROP TABLE IF EXISTS test_00210" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1" -d @- <<< "DROP TABLE IF EXISTS test_00210"

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
CLICKHOUSE_TIMEZONE_ESCAPED=$($CLICKHOUSE_CLIENT --query="SELECT timezone()" | sed 's/[]\/$*.^+:()[]/\\&/g') CLICKHOUSE_TIMEZONE_ESCAPED=$($CLICKHOUSE_CLIENT --query="SELECT timezone()" | sed 's/[]\/$*.^+:()[]/\\&/g')

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS csv"; $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS csv";

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&enable_http_compression=1" -d 'SELECT number FROM system.numbers LIMIT 10'; ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&enable_http_compression=1" -d 'SELECT number FROM system.numbers LIMIT 10';

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
echo -ne '1,Hello\n2,World\n' | ${CLICKHOUSE_CURL} -sSF 'file=@-' "${CLICKHOUSE_URL}&query=SELECT+*+FROM+file&file_format=CSV&file_types=UInt8,String"; echo -ne '1,Hello\n2,World\n' | ${CLICKHOUSE_CURL} -sSF 'file=@-' "${CLICKHOUSE_URL}&query=SELECT+*+FROM+file&file_format=CSV&file_types=UInt8,String";

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
# POST permits everything. # POST permits everything.

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS tskv"; $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS tskv";

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1 AS k, count() GROUP BY k WITH TOTALS"; ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1 AS k, count() GROUP BY k WITH TOTALS";

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
echo -ne '\x50\x74\x32\xf2\x59\xe9\x8a\xdb\x37\xc6\x4a\xa7\xfb\x22\xc4\x39''\x82\x13\x00\x00\x00\x09\x00\x00\x00''\x90SELECT 1\n' | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&decompress=1" --data-binary @- echo -ne '\x50\x74\x32\xf2\x59\xe9\x8a\xdb\x37\xc6\x4a\xa7\xfb\x22\xc4\x39''\x82\x13\x00\x00\x00\x09\x00\x00\x00''\x90SELECT 1\n' | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&decompress=1" --data-binary @-

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
echo 'DROP TABLE IF EXISTS bom' | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data-binary @- echo 'DROP TABLE IF EXISTS bom' | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data-binary @-

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT a' | wc -l ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT a' | wc -l

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'DROP TABLE IF EXISTS bad_arrays' ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'DROP TABLE IF EXISTS bad_arrays'

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
clickhouse_client_removed_host_parameter --host="${CLICKHOUSE_HOST}" --query="SELECT 1"; clickhouse_client_removed_host_parameter --host="${CLICKHOUSE_HOST}" --query="SELECT 1";

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS numbers"; $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS numbers";

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --query="SELECT 1" $CLICKHOUSE_CLIENT --query="SELECT 1"

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
clickhouse_client_removed_host_parameter --host="${CLICKHOUSE_HOST}" --query="SELECT * FROM ext" --format=Vertical --external --file=- --structure="s String" --name=ext --format=JSONEachRow <<< '{"s":"Hello"}' clickhouse_client_removed_host_parameter --host="${CLICKHOUSE_HOST}" --query="SELECT * FROM ext" --format=Vertical --external --file=- --structure="s String" --name=ext --format=JSONEachRow <<< '{"s":"Hello"}'

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&add_http_cors_header=1" -H "Origin:smi2.ru" --data-binary @- <<< "SELECT 1" 2>&1 | grep -F "< Access-Control-Allow-Origin: *" | wc -l ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&add_http_cors_header=1" -H "Origin:smi2.ru" --data-binary @- <<< "SELECT 1" 2>&1 | grep -F "< Access-Control-Allow-Origin: *" | wc -l

View File

@ -3,6 +3,7 @@
set -e set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS json_noisy" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS json_noisy"

View File

@ -2,6 +2,7 @@
set -e set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} -sS --local-port 1390 "${CLICKHOUSE_URL}&query_id=my_id&query=SELECT+port+FROM+system.processes+WHERE+query_id%3D%27my_id%27+ORDER+BY+elapsed+LIMIT+1" ${CLICKHOUSE_CURL} -sS --local-port 1390 "${CLICKHOUSE_URL}&query_id=my_id&query=SELECT+port+FROM+system.processes+WHERE+query_id%3D%27my_id%27+ORDER+BY+elapsed+LIMIT+1"

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery --query="SELECT 1; SELECT xyz; SELECT 2;" 2> /dev/null || true; $CLICKHOUSE_CLIENT --multiquery --query="SELECT 1; SELECT xyz; SELECT 2;" 2> /dev/null || true;

View File

@ -2,6 +2,7 @@
set -e set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
TABLE_HASH="cityHash64(groupArray(cityHash64(*)))" TABLE_HASH="cityHash64(groupArray(cityHash64(*)))"

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
# We should have correct env vars from shell_config.sh to run this test # We should have correct env vars from shell_config.sh to run this test

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
env TZ=UTC ${CLICKHOUSE_CLIENT} --use_client_time_zone=1 --query="SELECT toDateTime(1000000000)" env TZ=UTC ${CLICKHOUSE_CLIENT} --use_client_time_zone=1 --query="SELECT toDateTime(1000000000)"

View File

@ -3,6 +3,7 @@
set -e set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
echo -ne "1\n2\n3\n" | $CLICKHOUSE_CLIENT --query="SELECT * FROM _data" --external --file=- --types=Int8; echo -ne "1\n2\n3\n" | $CLICKHOUSE_CLIENT --query="SELECT * FROM _data" --external --file=- --types=Int8;

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
echo 'one block' echo 'one block'

View File

@ -3,6 +3,7 @@
set -e set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
echo -ne '\\tHello\t123\t\\N\n\\N\t\t2000-01-01 00:00:00\n' | ${CLICKHOUSE_LOCAL} --input-format=TabSeparated --output-format=TabSeparated --structure='s Nullable(String), x Nullable(UInt64), t Nullable(DateTime)' --query="SELECT * FROM table" echo -ne '\\tHello\t123\t\\N\n\\N\t\t2000-01-01 00:00:00\n' | ${CLICKHOUSE_LOCAL} --input-format=TabSeparated --output-format=TabSeparated --structure='s Nullable(String), x Nullable(UInt64), t Nullable(DateTime)' --query="SELECT * FROM table"

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
URL="${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/" URL="${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/"

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
# We should have correct env vars from shell_config.sh to run this test # We should have correct env vars from shell_config.sh to run this test

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
# We should have correct env vars from shell_config.sh to run this test # We should have correct env vars from shell_config.sh to run this test

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
# We should have correct env vars from shell_config.sh to run this test # We should have correct env vars from shell_config.sh to run this test

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
# We should have correct env vars from shell_config.sh to run this test # We should have correct env vars from shell_config.sh to run this test

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
# We should have correct env vars from shell_config.sh to run this test # We should have correct env vars from shell_config.sh to run this test

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
function perform() function perform()

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=5&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d 'SELECT max(number) FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]' ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=5&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d 'SELECT max(number) FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]'

View File

@ -3,6 +3,7 @@
set -e set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
QUERY_FIELND_NUM=4 QUERY_FIELND_NUM=4

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --query="SELECT * FROM system.build_options" | perl -lnE 'print $1 if /(BUILD_DATE|BUILD_TYPE|CXX_COMPILER)\s+\S+/ || /(CXX_FLAGS|LINK_FLAGS|TZDATA_VERSION)/'; $CLICKHOUSE_CLIENT --query="SELECT * FROM system.build_options" | perl -lnE 'print $1 if /(BUILD_DATE|BUILD_TYPE|CXX_COMPILER)\s+\S+/ || /(CXX_FLAGS|LINK_FLAGS|TZDATA_VERSION)/';

Some files were not shown because too many files have changed in this diff Show More