mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
18fad6d322
@ -2,7 +2,7 @@
|
||||
#include <errmsg.h>
|
||||
#include <mysql.h>
|
||||
#else
|
||||
#include <mysql/errmsg.h>
|
||||
#include <mysql/errmsg.h> //Y_IGNORE
|
||||
#include <mysql/mysql.h>
|
||||
#endif
|
||||
|
||||
|
39
base/mysqlxx/ya.make
Normal file
39
base/mysqlxx/ya.make
Normal file
@ -0,0 +1,39 @@
|
||||
# This file is generated automatically, do not edit. See 'ya.make.in' and use 'utils/generate-ya-make' to regenerate it.
|
||||
LIBRARY()
|
||||
|
||||
OWNER(g:clickhouse)
|
||||
|
||||
CFLAGS(-g0)
|
||||
|
||||
PEERDIR(
|
||||
contrib/restricted/boost/libs
|
||||
contrib/libs/libmysql_r
|
||||
contrib/libs/poco/Foundation
|
||||
contrib/libs/poco/Util
|
||||
)
|
||||
|
||||
ADDINCL(
|
||||
GLOBAL clickhouse/base
|
||||
clickhouse/base
|
||||
contrib/libs/libmysql_r
|
||||
)
|
||||
|
||||
NO_COMPILER_WARNINGS()
|
||||
|
||||
NO_UTIL()
|
||||
|
||||
SRCS(
|
||||
Connection.cpp
|
||||
Exception.cpp
|
||||
Pool.cpp
|
||||
PoolFactory.cpp
|
||||
PoolWithFailover.cpp
|
||||
Query.cpp
|
||||
ResultBase.cpp
|
||||
Row.cpp
|
||||
UseQueryResult.cpp
|
||||
Value.cpp
|
||||
|
||||
)
|
||||
|
||||
END()
|
28
base/mysqlxx/ya.make.in
Normal file
28
base/mysqlxx/ya.make.in
Normal file
@ -0,0 +1,28 @@
|
||||
LIBRARY()
|
||||
|
||||
OWNER(g:clickhouse)
|
||||
|
||||
CFLAGS(-g0)
|
||||
|
||||
PEERDIR(
|
||||
contrib/restricted/boost/libs
|
||||
contrib/libs/libmysql_r
|
||||
contrib/libs/poco/Foundation
|
||||
contrib/libs/poco/Util
|
||||
)
|
||||
|
||||
ADDINCL(
|
||||
GLOBAL clickhouse/base
|
||||
clickhouse/base
|
||||
contrib/libs/libmysql_r
|
||||
)
|
||||
|
||||
NO_COMPILER_WARNINGS()
|
||||
|
||||
NO_UTIL()
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests/ | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
@ -4,6 +4,7 @@ RECURSE(
|
||||
common
|
||||
daemon
|
||||
loggers
|
||||
mysqlxx
|
||||
pcg-random
|
||||
widechar_width
|
||||
readpassphrase
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 2a1bf7d87b4a03561fc66fbb49cee8a288983c5d
|
||||
Subproject commit 976874b7aa7f422bf4ea595bb7d1166c617b1c26
|
@ -148,5 +148,10 @@ toc_title: Adopters
|
||||
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
|
||||
| <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) |
|
||||
| <a href="https://www.tesla.com/" class="favicon">Tesla</a> | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) |
|
||||
| <a href="https://www.kgk-global.com/en/" class="favicon">KGK Global</a> | Vehicle monitoring | — | — | — | [Press release, June 2021](https://zoom.cnews.ru/news/item/530921) |
|
||||
| <a href="https://www.bilibili.com/" class="favicon">BiliBili</a> | Video sharing | — | — | — | [Blog post, June 2021](https://chowdera.com/2021/06/20210622012241476b.html) |
|
||||
| <a href="https://gigapipe.com/" class="favicon">Gigapipe</a> | Managed ClickHouse | Main product | — | — | [Official website](https://gigapipe.com/) |
|
||||
| <a href="https://www.hydrolix.io/" class="favicon">Hydrolix</a> | Cloud data platform | Main product | — | — | [Documentation](https://docs.hydrolix.io/guide/query) |
|
||||
| <a href="https://www.argedor.com/en/clickhouse/" class="favicon">Argedor</a> | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) |
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->
|
||||
|
@ -3079,4 +3079,69 @@ SELECT
|
||||
FROM fuse_tbl
|
||||
```
|
||||
|
||||
## flatten_nested {#flatten-nested}
|
||||
|
||||
Sets the data format of a [nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 1 — Nested column is flattened to separate arrays.
|
||||
- 0 — Nested column stays a single array of tuples.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
**Usage**
|
||||
|
||||
If the setting is set to `0`, it is possible to use an arbitrary level of nesting.
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SET flatten_nested = 1;
|
||||
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
|
||||
|
||||
SHOW CREATE TABLE t_nest;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE TABLE default.t_nest
|
||||
(
|
||||
`n.a` Array(UInt32),
|
||||
`n.b` Array(UInt32)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY tuple()
|
||||
SETTINGS index_granularity = 8192 │
|
||||
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SET flatten_nested = 0;
|
||||
|
||||
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
|
||||
|
||||
SHOW CREATE TABLE t_nest;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─statement──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE TABLE default.t_nest
|
||||
(
|
||||
`n` Nested(a UInt32, b UInt32)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY tuple()
|
||||
SETTINGS index_granularity = 8192 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
|
||||
|
@ -74,4 +74,26 @@ Received exception from server (version 1.1.54388):
|
||||
Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not.
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/data_types/array/) <!--hide-->
|
||||
## Array Size {#array-size}
|
||||
|
||||
It is possible to find the size of an array by using the `size0` subcolumn without reading the whole column. For multi-dimensional arrays you can use `sizeN-1`, where `N` is the wanted dimension.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE t_arr (`arr` Array(Array(Array(UInt32)))) ENGINE = MergeTree ORDER BY tuple();
|
||||
|
||||
INSERT INTO t_arr VALUES ([[[12, 13, 0, 1],[12]]]);
|
||||
|
||||
SELECT arr.size0, arr.size1, arr.size2 FROM t_arr;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─arr.size0─┬─arr.size1─┬─arr.size2─┐
|
||||
│ 1 │ [2] │ [[4,1]] │
|
||||
└───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
@ -34,7 +34,7 @@ CREATE TABLE test.visits
|
||||
|
||||
This example declares the `Goals` nested data structure, which contains data about conversions (goals reached). Each row in the ‘visits’ table can correspond to zero or any number of conversions.
|
||||
|
||||
Only a single nesting level is supported. Columns of nested structures containing arrays are equivalent to multidimensional arrays, so they have limited support (there is no support for storing these columns in tables with the MergeTree engine).
|
||||
When [flatten_nested](../../../operations/settings/settings.md#flatten-nested) is set to `0` (which is not by default), arbitrary levels of nesting are supported.
|
||||
|
||||
In most cases, when working with a nested data structure, its columns are specified with column names separated by a dot. These columns make up an array of matching types. All the column arrays of a single nested data structure have the same length.
|
||||
|
||||
|
@ -20,6 +20,33 @@ To store `Nullable` type values in a table column, ClickHouse uses a separate fi
|
||||
!!! info "Note"
|
||||
Using `Nullable` almost always negatively affects performance, keep this in mind when designing your databases.
|
||||
|
||||
## Finding NULL {#finding-null}
|
||||
|
||||
It is possible to find `NULL` values in a column by using `null` subcolumn without reading the whole column. It returns `1` if the corresponding value is `NULL` and `0` otherwise.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE nullable (`n` Nullable(UInt32)) ENGINE = MergeTree ORDER BY tuple();
|
||||
|
||||
INSERT INTO nullable VALUES (1) (NULL) (2) (NULL);
|
||||
|
||||
SELECT n.null FROM nullable;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─n.null─┐
|
||||
│ 0 │
|
||||
│ 1 │
|
||||
│ 0 │
|
||||
│ 1 │
|
||||
└────────┘
|
||||
```
|
||||
|
||||
## Usage Example {#usage-example}
|
||||
|
||||
``` sql
|
||||
|
@ -47,4 +47,32 @@ SELECT tuple(1, NULL) AS x, toTypeName(x)
|
||||
└──────────┴─────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Addressing Tuple Elements {#addressing-tuple-elements}
|
||||
|
||||
It is possible to read elements of named tuples using indexes and names:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE named_tuples (`a` Tuple(s String, i Int64)) ENGINE = Memory;
|
||||
|
||||
INSERT INTO named_tuples VALUES (('y', 10)), (('x',-10));
|
||||
|
||||
SELECT a.s FROM named_tuples;
|
||||
|
||||
SELECT a.2 FROM named_tuples;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─a.s─┐
|
||||
│ y │
|
||||
│ x │
|
||||
└─────┘
|
||||
|
||||
┌─tupleElement(a, 2)─┐
|
||||
│ 10 │
|
||||
│ -10 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/data_types/tuple/) <!--hide-->
|
||||
|
@ -39,13 +39,44 @@ Accepts zero arguments and returns an empty array of the appropriate type.
|
||||
|
||||
Accepts an empty array and returns a one-element array that is equal to the default value.
|
||||
|
||||
## range(end), range(start, end \[, step\]) {#rangeend-rangestart-end-step}
|
||||
|
||||
Returns an array of numbers from start to end-1 by step.
|
||||
If the argument `start` is not specified, defaults to 0.
|
||||
If the argument `step` is not specified, defaults to 1.
|
||||
It behaviors almost like pythonic `range`. But the difference is that all the arguments type must be `UInt` numbers.
|
||||
Just in case, an exception is thrown if arrays with a total length of more than 100,000,000 elements are created in a data block.
|
||||
## range(end), range(\[start, \] end \[, step\]) {#range}
|
||||
|
||||
Returns an array of `UInt` numbers from `start` to `end - 1` by `step`.
|
||||
|
||||
**Syntax**
|
||||
``` sql
|
||||
range([start, ] end [, step])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `start` — The first element of the array. Optional, required if `step` is used. Default value: 0. [UInt](../data-types/int-uint.md)
|
||||
- `end` — The number before which the array is constructed. Required. [UInt](../data-types/int-uint.md)
|
||||
- `step` — Determines the incremental step between each element in the array. Optional. Default value: 1. [UInt](../data-types/int-uint.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Array of `UInt` numbers from `start` to `end - 1` by `step`.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
- All arguments must be positive values: `start`, `end`, `step` are `UInt` data types, as well as elements of the returned array.
|
||||
- An exception is thrown if query results in arrays with a total length of more than 100,000,000 elements.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
``` sql
|
||||
SELECT range(5), range(1, 5), range(1, 5, 2);
|
||||
```
|
||||
Result:
|
||||
```txt
|
||||
┌─range(5)────┬─range(1, 5)─┬─range(1, 5, 2)─┐
|
||||
│ [0,1,2,3,4] │ [1,2,3,4] │ [1,3] │
|
||||
└─────────────┴─────────────┴────────────────┘
|
||||
```
|
||||
|
||||
## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1}
|
||||
|
||||
|
@ -2957,4 +2957,70 @@ SELECT
|
||||
FROM fuse_tbl
|
||||
```
|
||||
|
||||
## flatten_nested {#flatten-nested}
|
||||
|
||||
Устанавливает формат данных у [вложенных](../../sql-reference/data-types/nested-data-structures/nested.md) столбцов.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 1 — вложенный столбец преобразуется к отдельным массивам.
|
||||
- 0 — вложенный столбец преобразуется к массиву кортежей.
|
||||
|
||||
Значение по умолчанию: `1`.
|
||||
|
||||
**Использование**
|
||||
|
||||
Если установлено значение `0`, можно использовать любой уровень вложенности.
|
||||
|
||||
**Примеры**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SET flatten_nested = 1;
|
||||
|
||||
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
|
||||
|
||||
SHOW CREATE TABLE t_nest;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE TABLE default.t_nest
|
||||
(
|
||||
`n.a` Array(UInt32),
|
||||
`n.b` Array(UInt32)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY tuple()
|
||||
SETTINGS index_granularity = 8192 │
|
||||
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SET flatten_nested = 0;
|
||||
|
||||
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
|
||||
|
||||
SHOW CREATE TABLE t_nest;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─statement──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE TABLE default.t_nest
|
||||
(
|
||||
`n` Nested(a UInt32, b UInt32)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY tuple()
|
||||
SETTINGS index_granularity = 8192 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide-->
|
||||
|
@ -5,11 +5,9 @@ toc_title: Array(T)
|
||||
|
||||
# Array(T) {#data-type-array}
|
||||
|
||||
Массив из элементов типа `T`.
|
||||
Массив из элементов типа `T`. `T` может любым, в том числе массивом. Таким образом поддерживаются многомерные массивы.
|
||||
|
||||
`T` может любым, в том числе, массивом. Таким образом поддержаны многомерные массивы.
|
||||
|
||||
## Создание массива {#sozdanie-massiva}
|
||||
## Создание массива {#creating-an-array}
|
||||
|
||||
Массив можно создать с помощью функции:
|
||||
|
||||
@ -45,7 +43,7 @@ SELECT [1, 2] AS x, toTypeName(x)
|
||||
└───────┴────────────────────┘
|
||||
```
|
||||
|
||||
## Особенности работы с типами данных {#osobennosti-raboty-s-tipami-dannykh}
|
||||
## Особенности работы с типами данных {#working-with-data-types}
|
||||
|
||||
Максимальный размер массива ограничен одним миллионом элементов.
|
||||
|
||||
@ -76,3 +74,26 @@ Received exception from server (version 1.1.54388):
|
||||
Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not.
|
||||
```
|
||||
|
||||
## Размер массива {#array-size}
|
||||
|
||||
Узнать размер массива можно с помощью подстолбца `size0` без чтения всего столбца. Для многомерных массивов можно использовать подстолбец `sizeN-1`, где `N` — требуемое измерение.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
CREATE TABLE t_arr (`arr` Array(Array(Array(UInt32)))) ENGINE = MergeTree ORDER BY tuple();
|
||||
|
||||
INSERT INTO t_arr VALUES ([[[12, 13, 0, 1],[12]]]);
|
||||
|
||||
SELECT arr.size0, arr.size1, arr.size2 FROM t_arr;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─arr.size0─┬─arr.size1─┬─arr.size2─┐
|
||||
│ 1 │ [2] │ [[4,1]] │
|
||||
└───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
@ -29,7 +29,7 @@ CREATE TABLE test.visits
|
||||
|
||||
В этом примере объявлена вложенная структура данных `Goals`, содержащая данные о достижении целей. Каждой строке таблицы visits может соответствовать от нуля до произвольного количества достижений целей.
|
||||
|
||||
Поддерживается только один уровень вложенности. Столбцы вложенных структур, содержащие массивы, эквивалентны многомерным массивам, поэтому их поддержка ограничена (не поддерживается хранение таких столбцов в таблицах с движком семейства MergeTree).
|
||||
Если настройка [flatten_nested](../../../operations/settings/settings.md#flatten-nested) установлена в значение `0` (что не является значением по умолчанию), поддерживаются любые уровни вложенности.
|
||||
|
||||
В большинстве случаев, при работе с вложенной структурой данных, указываются отдельные её столбцы. Для этого, имена столбцов указываются через точку. Эти столбцы представляют собой массивы соответствующих типов. Все столбцы-массивы одной вложенной структуры данных имеют одинаковые длины.
|
||||
|
||||
|
@ -13,7 +13,7 @@ toc_title: Nullable
|
||||
|
||||
`NULL` — значение по умолчанию для типа `Nullable`, если в конфигурации сервера ClickHouse не указано иное.
|
||||
|
||||
## Особенности хранения {#osobennosti-khraneniia}
|
||||
## Особенности хранения {#storage-features}
|
||||
|
||||
Для хранения значения типа `Nullable` ClickHouse использует:
|
||||
|
||||
@ -27,7 +27,34 @@ toc_title: Nullable
|
||||
!!! info "Info"
|
||||
Почти всегда использование `Nullable` снижает производительность, учитывайте это при проектировании своих баз.
|
||||
|
||||
## Пример использования {#primer-ispolzovaniia}
|
||||
## Поиск NULL {#finding-null}
|
||||
|
||||
Найти в столбце значения `NULL` можно с помощью подстолбца `null`, при этом весь столбец считывать не требуется. Подстолбец содержит `1`, если соответствующее значение равно `NULL`, и `0` если не равно.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE nullable (`n` Nullable(UInt32)) ENGINE = MergeTree ORDER BY tuple();
|
||||
|
||||
INSERT INTO nullable VALUES (1) (NULL) (2) (NULL);
|
||||
|
||||
SELECT n.null FROM nullable;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─n.null─┐
|
||||
│ 0 │
|
||||
│ 1 │
|
||||
│ 0 │
|
||||
│ 1 │
|
||||
└────────┘
|
||||
```
|
||||
|
||||
## Пример использования {#usage-example}
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog
|
||||
@ -47,4 +74,3 @@ SELECT x + y from t_null
|
||||
│ 5 │
|
||||
└────────────┘
|
||||
```
|
||||
|
||||
|
@ -47,3 +47,30 @@ SELECT tuple(1,NULL) AS x, toTypeName(x)
|
||||
└──────────┴─────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Адресация элементов кортежа {#addressing-tuple-elements}
|
||||
|
||||
К элементам кортежа можно обращаться по индексу и по имени:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE named_tuples (`a` Tuple(s String, i Int64)) ENGINE = Memory;
|
||||
|
||||
INSERT INTO named_tuples VALUES (('y', 10)), (('x',-10));
|
||||
|
||||
SELECT a.s FROM named_tuples;
|
||||
|
||||
SELECT a.2 FROM named_tuples;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─a.s─┐
|
||||
│ y │
|
||||
│ x │
|
||||
└─────┘
|
||||
|
||||
┌─tupleElement(a, 2)─┐
|
||||
│ 10 │
|
||||
│ -10 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
@ -39,10 +39,49 @@ toc_title: "Массивы"
|
||||
|
||||
Принимает пустой массив и возвращает массив из одного элемента, равного значению по умолчанию.
|
||||
|
||||
## range(N) {#rangen}
|
||||
|
||||
Возвращает массив чисел от 0 до N-1.
|
||||
На всякий случай, если на блок данных, создаются массивы суммарной длины больше 100 000 000 элементов, то кидается исключение.
|
||||
## range(end), range(\[start, \] end \[, step\]) {#range}
|
||||
|
||||
Возвращает массив чисел от `start` до `end - 1` с шагом `step`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
range([start, ] end [, step])
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `start` — начало диапазона. Обязательно, когда указан `step`. По умолчанию равно `0`. Тип: [UInt](../data-types/int-uint.md)
|
||||
- `end` — конец диапазона. Обязательный аргумент. Должен быть больше, чем `start`. Тип: [UInt](../data-types/int-uint.md)
|
||||
- `step` — шаг обхода. Необязательный аргумент. По умолчанию равен `1`. Тип: [UInt](../data-types/int-uint.md)
|
||||
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
- массив `UInt` чисел от `start` до `end - 1` с шагом `step`
|
||||
|
||||
|
||||
**Особенности реализации**
|
||||
|
||||
- Не поддерживаются отрицательные значения аргументов: `start`, `end`, `step` имеют тип `UInt`.
|
||||
|
||||
- Если в результате запроса создаются массивы суммарной длиной больше 100 000 000 элементов, то генерируется исключение.
|
||||
|
||||
|
||||
**Примеры**
|
||||
|
||||
Запрос:
|
||||
``` sql
|
||||
SELECT range(5), range(1, 5), range(1, 5, 2);
|
||||
```
|
||||
Ответ:
|
||||
```txt
|
||||
┌─range(5)────┬─range(1, 5)─┬─range(1, 5, 2)─┐
|
||||
│ [0,1,2,3,4] │ [1,2,3,4] │ [1,3] │
|
||||
└─────────────┴─────────────┴────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## array(x1, …), оператор \[x1, …\] {#arrayx1-operator-x1}
|
||||
|
||||
@ -1576,4 +1615,4 @@ SELECT arrayProduct([toDecimal64(1,8), toDecimal64(2,8), toDecimal64(3,8)]) as r
|
||||
┌─res─┬─toTypeName(arrayProduct(array(toDecimal64(1, 8), toDecimal64(2, 8), toDecimal64(3, 8))))─┐
|
||||
│ 6 │ Float64 │
|
||||
└─────┴──────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
```
|
||||
|
@ -132,7 +132,7 @@ void ODBCBlockInputStream::insertValue(
|
||||
auto value = row.get<std::string>(idx);
|
||||
ReadBufferFromString in(value);
|
||||
time_t time = 0;
|
||||
readDateTimeText(time, in);
|
||||
readDateTimeText(time, in, assert_cast<const DataTypeDateTime *>(data_type.get())->getTimeZone());
|
||||
if (time < 0)
|
||||
time = 0;
|
||||
assert_cast<ColumnUInt32 &>(column).insertValue(time);
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
void setFileProgressCallback(ContextMutablePtr context, bool write_progress_on_update = false);
|
||||
|
||||
/// How much seconds passed since query execution start.
|
||||
UInt64 elapsedSeconds() const { return watch.elapsedSeconds(); }
|
||||
double elapsedSeconds() const { return watch.elapsedSeconds(); }
|
||||
|
||||
private:
|
||||
/// This flag controls whether to show the progress bar. We start showing it after
|
||||
|
@ -59,7 +59,7 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
|
||||
"or bad RAM on host (look at dmesg or kern.log for enormous amount of EDAC errors, "
|
||||
"ECC-related reports, Machine Check Exceptions, mcelog; note that ECC memory can fail "
|
||||
"if the number of errors is huge) or bad CPU on host. If you read data from disk, "
|
||||
"this can be caused by disk bit rott. This exception protects ClickHouse "
|
||||
"this can be caused by disk bit rot. This exception protects ClickHouse "
|
||||
"from data corruption due to hardware failures.";
|
||||
|
||||
auto flip_bit = [](char * buf, size_t pos)
|
||||
|
@ -23,9 +23,10 @@ using IndexToLogEntry = std::unordered_map<uint64_t, LogEntryPtr>;
|
||||
enum class ChangelogVersion : uint8_t
|
||||
{
|
||||
V0 = 0,
|
||||
V1 = 1, /// with 64 bit buffer header
|
||||
};
|
||||
|
||||
static constexpr auto CURRENT_CHANGELOG_VERSION = ChangelogVersion::V0;
|
||||
static constexpr auto CURRENT_CHANGELOG_VERSION = ChangelogVersion::V1;
|
||||
|
||||
struct ChangelogRecordHeader
|
||||
{
|
||||
|
@ -204,7 +204,7 @@ SnapshotMetadataPtr KeeperStorageSnapshot::deserialize(KeeperStorage & storage,
|
||||
uint8_t version;
|
||||
readBinary(version, in);
|
||||
SnapshotVersion current_version = static_cast<SnapshotVersion>(version);
|
||||
if (current_version > SnapshotVersion::V1)
|
||||
if (current_version > CURRENT_SNAPSHOT_VERSION)
|
||||
throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unsupported snapshot version {}", version);
|
||||
|
||||
SnapshotMetadataPtr result = deserializeSnapshotMetadata(in);
|
||||
|
@ -14,8 +14,11 @@ enum SnapshotVersion : uint8_t
|
||||
{
|
||||
V0 = 0,
|
||||
V1 = 1, /// with ACL map
|
||||
V2 = 2, /// with 64 bit buffer header
|
||||
};
|
||||
|
||||
static constexpr auto CURRENT_SNAPSHOT_VERSION = SnapshotVersion::V2;
|
||||
|
||||
struct KeeperStorageSnapshot
|
||||
{
|
||||
public:
|
||||
@ -30,7 +33,7 @@ public:
|
||||
|
||||
KeeperStorage * storage;
|
||||
|
||||
SnapshotVersion version = SnapshotVersion::V1;
|
||||
SnapshotVersion version = CURRENT_SNAPSHOT_VERSION;
|
||||
SnapshotMetadataPtr snapshot_meta;
|
||||
int64_t session_id;
|
||||
size_t snapshot_container_size;
|
||||
|
@ -170,7 +170,7 @@ void PostgreSQLBlockInputStream::insertValue(IColumn & column, std::string_view
|
||||
{
|
||||
ReadBufferFromString in(value);
|
||||
time_t time = 0;
|
||||
readDateTimeText(time, in);
|
||||
readDateTimeText(time, in, assert_cast<const DataTypeDateTime *>(data_type.get())->getTimeZone());
|
||||
if (time < 0)
|
||||
time = 0;
|
||||
assert_cast<ColumnUInt32 &>(column).insertValue(time);
|
||||
@ -272,11 +272,11 @@ void PostgreSQLBlockInputStream::prepareArrayInfo(size_t column_idx, const DataT
|
||||
else if (which.isDate())
|
||||
parser = [](std::string & field) -> Field { return UInt16{LocalDate{field}.getDayNum()}; };
|
||||
else if (which.isDateTime())
|
||||
parser = [](std::string & field) -> Field
|
||||
parser = [nested](std::string & field) -> Field
|
||||
{
|
||||
ReadBufferFromString in(field);
|
||||
time_t time = 0;
|
||||
readDateTimeText(time, in);
|
||||
readDateTimeText(time, in, assert_cast<const DataTypeDateTime *>(nested.get())->getTimeZone());
|
||||
return time;
|
||||
};
|
||||
else if (which.isDecimal32())
|
||||
|
@ -169,7 +169,7 @@ namespace
|
||||
{
|
||||
ReadBufferFromString in(value);
|
||||
time_t time = 0;
|
||||
readDateTimeText(time, in);
|
||||
readDateTimeText(time, in, assert_cast<const DataTypeDateTime &>(data_type).getTimeZone());
|
||||
if (time < 0)
|
||||
time = 0;
|
||||
assert_cast<ColumnUInt32 &>(column).insertValue(time);
|
||||
|
@ -22,9 +22,10 @@ const char * ParserMultiplicativeExpression::operators[] =
|
||||
nullptr
|
||||
};
|
||||
|
||||
const char * ParserUnaryMinusExpression::operators[] =
|
||||
const char * ParserUnaryExpression::operators[] =
|
||||
{
|
||||
"-", "negate",
|
||||
"NOT", "not",
|
||||
nullptr
|
||||
};
|
||||
|
||||
@ -539,7 +540,7 @@ bool ParserPrefixUnaryOperatorExpression::parseImpl(Pos & pos, ASTPtr & node, Ex
|
||||
}
|
||||
|
||||
|
||||
bool ParserUnaryMinusExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
bool ParserUnaryExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
/// As an exception, negative numbers should be parsed as literals, and not as an application of the operator.
|
||||
|
||||
|
@ -245,14 +245,14 @@ protected:
|
||||
};
|
||||
|
||||
|
||||
class ParserUnaryMinusExpression : public IParserBase
|
||||
class ParserUnaryExpression : public IParserBase
|
||||
{
|
||||
private:
|
||||
static const char * operators[];
|
||||
ParserPrefixUnaryOperatorExpression operator_parser {operators, std::make_unique<ParserTupleElementExpression>()};
|
||||
|
||||
protected:
|
||||
const char * getName() const override { return "unary minus expression"; }
|
||||
const char * getName() const override { return "unary expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
@ -262,7 +262,7 @@ class ParserMultiplicativeExpression : public IParserBase
|
||||
{
|
||||
private:
|
||||
static const char * operators[];
|
||||
ParserLeftAssociativeBinaryOperatorList operator_parser {operators, std::make_unique<ParserUnaryMinusExpression>()};
|
||||
ParserLeftAssociativeBinaryOperatorList operator_parser {operators, std::make_unique<ParserUnaryExpression>()};
|
||||
|
||||
protected:
|
||||
const char * getName() const override { return "multiplicative expression"; }
|
||||
|
@ -423,6 +423,9 @@ KeyCondition::KeyCondition(
|
||||
*/
|
||||
Block block_with_constants = getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context);
|
||||
|
||||
for (const auto & [name, _] : query_info.syntax_analyzer_result->array_join_result_to_source)
|
||||
array_joined_columns.insert(name);
|
||||
|
||||
const ASTSelectQuery & select = query_info.query->as<ASTSelectQuery &>();
|
||||
if (select.where() || select.prewhere())
|
||||
{
|
||||
@ -610,6 +613,10 @@ bool KeyCondition::canConstantBeWrappedByMonotonicFunctions(
|
||||
DataTypePtr & out_type)
|
||||
{
|
||||
String expr_name = node->getColumnNameWithoutAlias();
|
||||
|
||||
if (array_joined_columns.count(expr_name))
|
||||
return false;
|
||||
|
||||
if (key_subexpr_names.count(expr_name) == 0)
|
||||
return false;
|
||||
|
||||
@ -714,6 +721,9 @@ bool KeyCondition::canConstantBeWrappedByFunctions(
|
||||
{
|
||||
String expr_name = ast->getColumnNameWithoutAlias();
|
||||
|
||||
if (array_joined_columns.count(expr_name))
|
||||
return false;
|
||||
|
||||
if (key_subexpr_names.count(expr_name) == 0)
|
||||
{
|
||||
/// Let's check another one case.
|
||||
@ -1075,6 +1085,9 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctionsImpl(
|
||||
// Key columns should use canonical names for index analysis
|
||||
String name = node->getColumnNameWithoutAlias();
|
||||
|
||||
if (array_joined_columns.count(name))
|
||||
return false;
|
||||
|
||||
auto it = key_columns.find(name);
|
||||
if (key_columns.end() != it)
|
||||
{
|
||||
|
@ -459,6 +459,8 @@ private:
|
||||
const ExpressionActionsPtr key_expr;
|
||||
/// All intermediate columns are used to calculate key_expr.
|
||||
const NameSet key_subexpr_names;
|
||||
|
||||
NameSet array_joined_columns;
|
||||
PreparedSets prepared_sets;
|
||||
|
||||
// If true, always allow key_expr to be wrapped by function
|
||||
|
@ -139,7 +139,8 @@ def configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file):
|
||||
testcase_args = copy.deepcopy(args)
|
||||
|
||||
testcase_args.testcase_start_time = datetime.now()
|
||||
testcase_args.testcase_client = f"{testcase_args.client} --log_comment='{case_file}'"
|
||||
testcase_basename = os.path.basename(case_file)
|
||||
testcase_args.testcase_client = f"{testcase_args.client} --log_comment='{testcase_basename}'"
|
||||
|
||||
if testcase_args.database:
|
||||
database = testcase_args.database
|
||||
|
@ -377,8 +377,8 @@ class ClickhouseIntegrationTestsRunner:
|
||||
|
||||
test_cmd = ' '.join([test for test in sorted(test_names)])
|
||||
parallel_cmd = " --parallel {} ".format(num_workers) if num_workers > 0 else ""
|
||||
cmd = "cd {}/tests/integration && ./runner --tmpfs {} -t {} {} '-ss -rfEp --color=no --durations=0 {}' | tee {}".format(
|
||||
repo_path, image_cmd, test_cmd, parallel_cmd, _get_deselect_option(self.should_skip_tests()), output_path)
|
||||
cmd = "cd {}/tests/integration && ./runner --tmpfs {} -t {} {} '-ss -rfEp --run-id={} --color=no --durations=0 {}' | tee {}".format(
|
||||
repo_path, image_cmd, test_cmd, parallel_cmd, i, _get_deselect_option(self.should_skip_tests()), output_path)
|
||||
|
||||
with open(log_path, 'w') as log:
|
||||
logging.info("Executing cmd: %s", cmd)
|
||||
|
@ -28,4 +28,10 @@ def cleanup_environment():
|
||||
logging.exception(f"cleanup_environment:{str(e)}")
|
||||
pass
|
||||
|
||||
yield
|
||||
yield
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--run-id", default="", help="run-id is used as postfix in _instances_{} directory")
|
||||
|
||||
def pytest_configure(config):
|
||||
os.environ['INTEGRATION_TESTS_RUN_ID'] = config.option.run_id
|
||||
|
@ -1,6 +1,7 @@
|
||||
import os
|
||||
import subprocess as sp
|
||||
import tempfile
|
||||
import logging
|
||||
from threading import Timer
|
||||
|
||||
|
||||
@ -105,6 +106,7 @@ class CommandRequest:
|
||||
stderr = self.stderr_file.read().decode('utf-8', errors='replace')
|
||||
|
||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||
logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}")
|
||||
raise QueryTimeoutExceedException('Client timed out!')
|
||||
|
||||
if (self.process.returncode != 0 or stderr) and not self.ignore_error:
|
||||
|
@ -29,7 +29,6 @@ from dict2xml import dict2xml
|
||||
from kazoo.client import KazooClient
|
||||
from kazoo.exceptions import KazooException
|
||||
from minio import Minio
|
||||
from minio.deleteobjects import DeleteObject
|
||||
from helpers.test_tools import assert_eq_with_retry
|
||||
|
||||
import docker
|
||||
@ -172,6 +171,13 @@ def enable_consistent_hash_plugin(rabbitmq_id):
|
||||
p.communicate()
|
||||
return p.returncode == 0
|
||||
|
||||
def get_instances_dir():
|
||||
if 'INTEGRATION_TESTS_RUN_ID' in os.environ and os.environ['INTEGRATION_TESTS_RUN_ID']:
|
||||
return '_instances_' + shlex.quote(os.environ['INTEGRATION_TESTS_RUN_ID'])
|
||||
else:
|
||||
return '_instances'
|
||||
|
||||
|
||||
class ClickHouseCluster:
|
||||
"""ClickHouse cluster with several instances and (possibly) ZooKeeper.
|
||||
|
||||
@ -203,7 +209,14 @@ class ClickHouseCluster:
|
||||
project_name = pwd.getpwuid(os.getuid()).pw_name + p.basename(self.base_dir) + self.name
|
||||
# docker-compose removes everything non-alphanumeric from project names so we do it too.
|
||||
self.project_name = re.sub(r'[^a-z0-9]', '', project_name.lower())
|
||||
self.instances_dir = p.join(self.base_dir, '_instances' + ('' if not self.name else '_' + self.name))
|
||||
instances_dir_name = '_instances'
|
||||
if self.name:
|
||||
instances_dir_name += '_' + self.name
|
||||
|
||||
if 'INTEGRATION_TESTS_RUN_ID' in os.environ and os.environ['INTEGRATION_TESTS_RUN_ID']:
|
||||
instances_dir_name += '_' + shlex.quote(os.environ['INTEGRATION_TESTS_RUN_ID'])
|
||||
|
||||
self.instances_dir = p.join(self.base_dir, instances_dir_name)
|
||||
self.docker_logs_path = p.join(self.instances_dir, 'docker.log')
|
||||
self.env_file = p.join(self.instances_dir, DEFAULT_ENV_NAME)
|
||||
self.env_variables = {}
|
||||
@ -421,7 +434,15 @@ class ClickHouseCluster:
|
||||
pass
|
||||
|
||||
def get_docker_handle(self, docker_id):
|
||||
return self.docker_client.containers.get(docker_id)
|
||||
exception = None
|
||||
for i in range(5):
|
||||
try:
|
||||
return self.docker_client.containers.get(docker_id)
|
||||
except Exception as ex:
|
||||
print("Got exception getting docker handle", str(ex))
|
||||
time.sleep(i * 2)
|
||||
exception = ex
|
||||
raise exception
|
||||
|
||||
def get_client_cmd(self):
|
||||
cmd = self.client_bin_path
|
||||
@ -577,7 +598,7 @@ class ClickHouseCluster:
|
||||
self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_hdfs.yml')])
|
||||
self.base_hdfs_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name,
|
||||
'--file', p.join(docker_compose_yml_dir, 'docker_compose_hdfs.yml')]
|
||||
print("HDFS BASE CMD:{}".format(self.base_hdfs_cmd))
|
||||
logging.debug("HDFS BASE CMD:{self.base_hdfs_cmd)}")
|
||||
return self.base_hdfs_cmd
|
||||
|
||||
def setup_kerberized_hdfs_cmd(self, instance, env_variables, docker_compose_yml_dir):
|
||||
@ -1217,8 +1238,8 @@ class ClickHouseCluster:
|
||||
for bucket in buckets:
|
||||
if minio_client.bucket_exists(bucket):
|
||||
delete_object_list = map(
|
||||
lambda x: DeleteObject(x.object_name),
|
||||
minio_client.list_objects(bucket, recursive=True),
|
||||
lambda x: x.object_name,
|
||||
minio_client.list_objects_v2(bucket, recursive=True),
|
||||
)
|
||||
errors = minio_client.remove_objects(bucket, delete_object_list)
|
||||
for error in errors:
|
||||
@ -1468,9 +1489,9 @@ class ClickHouseCluster:
|
||||
instance.docker_client = self.docker_client
|
||||
instance.ip_address = self.get_instance_ip(instance.name)
|
||||
|
||||
logging.debug("Waiting for ClickHouse start...")
|
||||
logging.debug("Waiting for ClickHouse start in {instance}, ip: {instance.ip_address}...")
|
||||
instance.wait_for_start(start_timeout)
|
||||
logging.debug("ClickHouse started")
|
||||
logging.debug("ClickHouse {instance} started")
|
||||
|
||||
instance.client = Client(instance.ip_address, command=self.client_bin_path)
|
||||
|
||||
@ -1864,8 +1885,7 @@ class ClickHouseInstance:
|
||||
self.start_clickhouse(stop_start_wait_sec)
|
||||
|
||||
def exec_in_container(self, cmd, detach=False, nothrow=False, **kwargs):
|
||||
container_id = self.get_docker_handle().id
|
||||
return self.cluster.exec_in_container(container_id, cmd, detach, nothrow, **kwargs)
|
||||
return self.cluster.exec_in_container(self.docker_id, cmd, detach, nothrow, **kwargs)
|
||||
|
||||
def contains_in_log(self, substring):
|
||||
result = self.exec_in_container(
|
||||
@ -1905,8 +1925,7 @@ class ClickHouseInstance:
|
||||
["bash", "-c", "echo $(if [ -e '{}' ]; then echo 'yes'; else echo 'no'; fi)".format(path)]) == 'yes\n'
|
||||
|
||||
def copy_file_to_container(self, local_path, dest_path):
|
||||
container_id = self.get_docker_handle().id
|
||||
return self.cluster.copy_file_to_container(container_id, local_path, dest_path)
|
||||
return self.cluster.copy_file_to_container(self.docker_id, local_path, dest_path)
|
||||
|
||||
def get_process_pid(self, process_name):
|
||||
output = self.exec_in_container(["bash", "-c",
|
||||
@ -1961,6 +1980,7 @@ class ClickHouseInstance:
|
||||
self.get_docker_handle().start()
|
||||
|
||||
def wait_for_start(self, start_timeout=None, connection_timeout=None):
|
||||
handle = self.get_docker_handle()
|
||||
|
||||
if start_timeout is None or start_timeout <= 0:
|
||||
raise Exception("Invalid timeout: {}".format(start_timeout))
|
||||
@ -1983,11 +2003,10 @@ class ClickHouseInstance:
|
||||
return False
|
||||
|
||||
while True:
|
||||
handle = self.get_docker_handle()
|
||||
handle.reload()
|
||||
status = handle.status
|
||||
if status == 'exited':
|
||||
raise Exception("Instance `{}' failed to start. Container status: {}, logs: {}"
|
||||
.format(self.name, status, handle.logs().decode('utf-8')))
|
||||
raise Exception(f"Instance `{self.name}' failed to start. Container status: {status}, logs: {handle.logs().decode('utf-8')}")
|
||||
|
||||
deadline = start_time + start_timeout
|
||||
# It is possible that server starts slowly.
|
||||
@ -1997,9 +2016,8 @@ class ClickHouseInstance:
|
||||
|
||||
current_time = time.time()
|
||||
if current_time >= deadline:
|
||||
raise Exception("Timed out while waiting for instance `{}' with ip address {} to start. "
|
||||
"Container status: {}, logs: {}".format(self.name, self.ip_address, status,
|
||||
handle.logs().decode('utf-8')))
|
||||
raise Exception(f"Timed out while waiting for instance `{self.name}' with ip address {self.ip_address} to start. " \
|
||||
f"Container status: {status}, logs: {handle.logs().decode('utf-8')}")
|
||||
|
||||
socket_timeout = min(start_timeout, deadline - current_time)
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
import difflib
|
||||
import time
|
||||
import logging
|
||||
from io import IOBase
|
||||
|
||||
|
||||
@ -56,7 +57,7 @@ def assert_eq_with_retry(instance, query, expectation, retry_count=20, sleep_tim
|
||||
break
|
||||
time.sleep(sleep_time)
|
||||
except Exception as ex:
|
||||
print(("assert_eq_with_retry retry {} exception {}".format(i + 1, ex)))
|
||||
logging.exception(f"assert_eq_with_retry retry {i+1} exception {ex}")
|
||||
time.sleep(sleep_time)
|
||||
else:
|
||||
val = TSV(get_result(instance.query(query, user=user, stdin=stdin, timeout=timeout, settings=settings,
|
||||
@ -76,7 +77,7 @@ def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_ti
|
||||
break
|
||||
time.sleep(sleep_time)
|
||||
except Exception as ex:
|
||||
print("contains_in_log_with_retry retry {} exception {}".format(i + 1, ex))
|
||||
logging.exception(f"contains_in_log_with_retry retry {i+1} exception {ex}")
|
||||
time.sleep(sleep_time)
|
||||
else:
|
||||
raise AssertionError("'{}' not found in logs".format(substring))
|
||||
@ -89,7 +90,7 @@ def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, setti
|
||||
break
|
||||
except Exception as ex:
|
||||
exception = ex
|
||||
print("Failed to execute query '", query, "' on instance", instance.name, "will retry")
|
||||
logging.exception(f"Failed to execute query '{query}' on instance '{instance.name}' will retry")
|
||||
time.sleep(sleep_time)
|
||||
else:
|
||||
raise exception
|
||||
|
@ -1,6 +1,6 @@
|
||||
[pytest]
|
||||
python_files = test*.py
|
||||
norecursedirs = _instances
|
||||
norecursedirs = _instances*
|
||||
timeout = 1800
|
||||
junit_duration_report = call
|
||||
junit_suite_name = integration
|
||||
|
@ -43,8 +43,8 @@ def test_backup_from_old_version(started_cluster):
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
node1.exec_in_container(['bash', '-c',
|
||||
'cp -r /var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/ /var/lib/clickhouse/data/default/dest_table/detached'])
|
||||
node1.exec_in_container(['find', '/var/lib/clickhouse/shadow/1/data/default/source_table'])
|
||||
node1.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/', '/var/lib/clickhouse/data/default/dest_table/detached'])
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
@ -81,8 +81,7 @@ def test_backup_from_old_version_setting(started_cluster):
|
||||
|
||||
assert node2.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
node2.exec_in_container(['bash', '-c',
|
||||
'cp -r /var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/ /var/lib/clickhouse/data/default/dest_table/detached'])
|
||||
node2.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/', '/var/lib/clickhouse/data/default/dest_table/detached'])
|
||||
|
||||
assert node2.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
@ -123,8 +122,7 @@ def test_backup_from_old_version_config(started_cluster):
|
||||
|
||||
assert node3.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
node3.exec_in_container(['bash', '-c',
|
||||
'cp -r /var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/ /var/lib/clickhouse/data/default/dest_table/detached'])
|
||||
node3.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/', '/var/lib/clickhouse/data/default/dest_table/detached'])
|
||||
|
||||
assert node3.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
@ -156,8 +154,7 @@ def test_backup_and_alter(started_cluster):
|
||||
|
||||
node4.query("ALTER TABLE test.backup_table DROP PARTITION tuple()")
|
||||
|
||||
node4.exec_in_container(['bash', '-c',
|
||||
'cp -r /var/lib/clickhouse/shadow/1/data/test/backup_table/all_1_1_0/ /var/lib/clickhouse/data/test/backup_table/detached'])
|
||||
node4.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/test/backup_table/all_1_1_0/', '/var/lib/clickhouse/data/test/backup_table/detached'])
|
||||
|
||||
node4.query("ALTER TABLE test.backup_table ATTACH PARTITION tuple()")
|
||||
|
||||
|
@ -39,7 +39,7 @@ class Task:
|
||||
for instance_name, _ in cluster.instances.items():
|
||||
instance = cluster.instances[instance_name]
|
||||
instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_taxi_data.xml'), self.container_task_file)
|
||||
print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file))
|
||||
logging.debug(f"Copied task file to container of '{instance_name}' instance. Path {self.container_task_file}")
|
||||
|
||||
|
||||
def start(self):
|
||||
@ -48,11 +48,11 @@ class Task:
|
||||
node.query("DROP DATABASE IF EXISTS dailyhistory SYNC;")
|
||||
node.query("DROP DATABASE IF EXISTS monthlyhistory SYNC;")
|
||||
|
||||
instance = cluster.instances['first']
|
||||
first = cluster.instances['first']
|
||||
|
||||
# daily partition database
|
||||
instance.query("CREATE DATABASE IF NOT EXISTS dailyhistory on cluster events;")
|
||||
instance.query("""CREATE TABLE dailyhistory.yellow_tripdata_staging ON CLUSTER events
|
||||
first.query("CREATE DATABASE IF NOT EXISTS dailyhistory on cluster events;")
|
||||
first.query("""CREATE TABLE dailyhistory.yellow_tripdata_staging ON CLUSTER events
|
||||
(
|
||||
id UUID DEFAULT generateUUIDv4(),
|
||||
vendor_id String,
|
||||
@ -84,12 +84,12 @@ class Task:
|
||||
ORDER BY (tpep_pickup_datetime, id)
|
||||
PARTITION BY (toYYYYMMDD(tpep_pickup_datetime))""")
|
||||
|
||||
instance.query("""CREATE TABLE dailyhistory.yellow_tripdata
|
||||
first.query("""CREATE TABLE dailyhistory.yellow_tripdata
|
||||
ON CLUSTER events
|
||||
AS dailyhistory.yellow_tripdata_staging
|
||||
ENGINE = Distributed('events', 'dailyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""")
|
||||
|
||||
instance.query("""INSERT INTO dailyhistory.yellow_tripdata
|
||||
first.query("""INSERT INTO dailyhistory.yellow_tripdata
|
||||
SELECT * FROM generateRandom(
|
||||
'id UUID DEFAULT generateUUIDv4(),
|
||||
vendor_id String,
|
||||
@ -119,8 +119,8 @@ class Task:
|
||||
1, 10, 2) LIMIT 50;""")
|
||||
|
||||
# monthly partition database
|
||||
instance.query("create database IF NOT EXISTS monthlyhistory on cluster events;")
|
||||
instance.query("""CREATE TABLE monthlyhistory.yellow_tripdata_staging ON CLUSTER events
|
||||
first.query("create database IF NOT EXISTS monthlyhistory on cluster events;")
|
||||
first.query("""CREATE TABLE monthlyhistory.yellow_tripdata_staging ON CLUSTER events
|
||||
(
|
||||
id UUID DEFAULT generateUUIDv4(),
|
||||
vendor_id String,
|
||||
@ -153,16 +153,16 @@ class Task:
|
||||
ORDER BY (tpep_pickup_datetime, id)
|
||||
PARTITION BY (pickup_location_id, toYYYYMM(tpep_pickup_datetime))""")
|
||||
|
||||
instance.query("""CREATE TABLE monthlyhistory.yellow_tripdata
|
||||
first.query("""CREATE TABLE monthlyhistory.yellow_tripdata
|
||||
ON CLUSTER events
|
||||
AS monthlyhistory.yellow_tripdata_staging
|
||||
ENGINE = Distributed('events', 'monthlyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""")
|
||||
|
||||
|
||||
def check(self):
|
||||
instance = cluster.instances["first"]
|
||||
a = TSV(instance.query("SELECT count() from dailyhistory.yellow_tripdata"))
|
||||
b = TSV(instance.query("SELECT count() from monthlyhistory.yellow_tripdata"))
|
||||
first = cluster.instances["first"]
|
||||
a = TSV(first.query("SELECT count() from dailyhistory.yellow_tripdata"))
|
||||
b = TSV(first.query("SELECT count() from monthlyhistory.yellow_tripdata"))
|
||||
assert a == b, "Distributed tables"
|
||||
|
||||
for instance_name, instance in cluster.instances.items():
|
||||
@ -187,10 +187,10 @@ def execute_task(started_cluster, task, cmd_options):
|
||||
task.start()
|
||||
|
||||
zk = started_cluster.get_kazoo_client('zoo1')
|
||||
print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1]))
|
||||
logging.debug("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1]))
|
||||
|
||||
# Run cluster-copier processes on each node
|
||||
docker_api = docker.from_env().api
|
||||
docker_api = started_cluster.docker_client.api
|
||||
copiers_exec_ids = []
|
||||
|
||||
cmd = ['/usr/bin/clickhouse', 'copier',
|
||||
@ -201,9 +201,9 @@ def execute_task(started_cluster, task, cmd_options):
|
||||
'--base-dir', '/var/log/clickhouse-server/copier']
|
||||
cmd += cmd_options
|
||||
|
||||
print(cmd)
|
||||
logging.debug(f"execute_task cmd: {cmd}")
|
||||
|
||||
for instance_name, instance in started_cluster.instances.items():
|
||||
for instance_name in started_cluster.instances.keys():
|
||||
instance = started_cluster.instances[instance_name]
|
||||
container = instance.get_docker_handle()
|
||||
instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs_three_nodes/config-copier.xml"), "/etc/clickhouse-server/config-copier.xml")
|
||||
|
@ -430,7 +430,7 @@ def execute_task(started_cluster, task, cmd_options):
|
||||
print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1]))
|
||||
|
||||
# Run cluster-copier processes on each node
|
||||
docker_api = docker.from_env().api
|
||||
docker_api = started_cluster.docker_client.api
|
||||
copiers_exec_ids = []
|
||||
|
||||
cmd = ['/usr/bin/clickhouse', 'copier',
|
||||
@ -443,7 +443,7 @@ def execute_task(started_cluster, task, cmd_options):
|
||||
|
||||
print(cmd)
|
||||
|
||||
for instance_name, instance in started_cluster.instances.items():
|
||||
for instance_name in started_cluster.instances.keys():
|
||||
instance = started_cluster.instances[instance_name]
|
||||
container = instance.get_docker_handle()
|
||||
instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs_two_nodes/config-copier.xml"), "/etc/clickhouse-server/config-copier.xml")
|
||||
|
@ -150,7 +150,7 @@ def test_reload_after_loading(started_cluster):
|
||||
time.sleep(1) # see the comment above
|
||||
replace_in_file_in_container('/etc/clickhouse-server/dictionaries/executable.xml', '82', '83')
|
||||
replace_in_file_in_container('/etc/clickhouse-server/dictionaries/file.txt', '102', '103')
|
||||
time.sleep(7)
|
||||
time.sleep(10)
|
||||
assert query("SELECT dictGetInt32('file', 'a', toUInt64(9))") == "103\n"
|
||||
assert query("SELECT dictGetInt32('executable', 'a', toUInt64(7))") == "83\n"
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
<yandex>
|
||||
<zookeeper>
|
||||
<!-- Required for correct timing in current test case -->
|
||||
<session_timeout_ms replace="1">10000</session_timeout_ms>
|
||||
<session_timeout_ms replace="1">15000</session_timeout_ms>
|
||||
</zookeeper>
|
||||
</yandex>
|
||||
|
@ -1,6 +1,6 @@
|
||||
<yandex>
|
||||
<zookeeper>
|
||||
<!-- Required for correct timing in current test case -->
|
||||
<session_timeout_ms replace="1">10000</session_timeout_ms>
|
||||
<session_timeout_ms replace="1">15000</session_timeout_ms>
|
||||
</zookeeper>
|
||||
</yandex>
|
||||
|
@ -53,6 +53,7 @@ def test_default_database(test_cluster):
|
||||
|
||||
def test_create_view(test_cluster):
|
||||
instance = test_cluster.instances['ch3']
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test.super_simple_view ON CLUSTER 'cluster'")
|
||||
test_cluster.ddl_check_query(instance,
|
||||
"CREATE VIEW test.super_simple_view ON CLUSTER 'cluster' AS SELECT * FROM system.numbers FORMAT TSV")
|
||||
test_cluster.ddl_check_query(instance,
|
||||
@ -76,7 +77,7 @@ def test_on_server_fail(test_cluster):
|
||||
|
||||
kill_instance.get_docker_handle().stop()
|
||||
request = instance.get_query_request("CREATE TABLE test.test_server_fail ON CLUSTER 'cluster' (i Int8) ENGINE=Null",
|
||||
timeout=30)
|
||||
timeout=180)
|
||||
kill_instance.get_docker_handle().start()
|
||||
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test.__nope__ ON CLUSTER 'cluster'")
|
||||
@ -92,27 +93,6 @@ def test_on_server_fail(test_cluster):
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE test.test_server_fail ON CLUSTER 'cluster'")
|
||||
|
||||
|
||||
def _test_on_connection_losses(test_cluster, zk_timeout):
|
||||
instance = test_cluster.instances['ch1']
|
||||
kill_instance = test_cluster.instances['ch2']
|
||||
|
||||
with PartitionManager() as pm:
|
||||
pm.drop_instance_zk_connections(kill_instance)
|
||||
request = instance.get_query_request("DROP TABLE IF EXISTS test.__nope__ ON CLUSTER 'cluster'", timeout=20)
|
||||
time.sleep(zk_timeout)
|
||||
pm.restore_instance_zk_connections(kill_instance)
|
||||
|
||||
test_cluster.check_all_hosts_successfully_executed(request.get_answer())
|
||||
|
||||
|
||||
def test_on_connection_loss(test_cluster):
|
||||
_test_on_connection_losses(test_cluster, 5) # connection loss will occur only (3 sec ZK timeout in config)
|
||||
|
||||
|
||||
def test_on_session_expired(test_cluster):
|
||||
_test_on_connection_losses(test_cluster, 15) # session should be expired (3 sec ZK timeout in config)
|
||||
|
||||
|
||||
def test_simple_alters(test_cluster):
|
||||
instance = test_cluster.instances['ch2']
|
||||
|
||||
@ -190,7 +170,7 @@ def test_implicit_macros(test_cluster):
|
||||
|
||||
instance = test_cluster.instances['ch2']
|
||||
|
||||
test_cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test_db ON CLUSTER '{cluster}'")
|
||||
test_cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test_db ON CLUSTER '{cluster}' SYNC")
|
||||
test_cluster.ddl_check_query(instance, "CREATE DATABASE IF NOT EXISTS test_db ON CLUSTER '{cluster}'")
|
||||
|
||||
test_cluster.ddl_check_query(instance, """
|
||||
@ -270,6 +250,15 @@ def test_create_reserved(test_cluster):
|
||||
def test_rename(test_cluster):
|
||||
instance = test_cluster.instances['ch1']
|
||||
rules = test_cluster.pm_random_drops.pop_rules()
|
||||
test_cluster.ddl_check_query(instance,
|
||||
"DROP TABLE IF EXISTS rename_shard ON CLUSTER cluster SYNC")
|
||||
test_cluster.ddl_check_query(instance,
|
||||
"DROP TABLE IF EXISTS rename_new ON CLUSTER cluster SYNC")
|
||||
test_cluster.ddl_check_query(instance,
|
||||
"DROP TABLE IF EXISTS rename_old ON CLUSTER cluster SYNC")
|
||||
test_cluster.ddl_check_query(instance,
|
||||
"DROP TABLE IF EXISTS rename ON CLUSTER cluster SYNC")
|
||||
|
||||
test_cluster.ddl_check_query(instance,
|
||||
"CREATE TABLE rename_shard ON CLUSTER cluster (id Int64, sid String DEFAULT concat('old', toString(id))) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/staging/test_shard', '{replica}') ORDER BY (id)")
|
||||
test_cluster.ddl_check_query(instance,
|
||||
@ -326,12 +315,15 @@ def test_socket_timeout(test_cluster):
|
||||
def test_replicated_without_arguments(test_cluster):
|
||||
rules = test_cluster.pm_random_drops.pop_rules()
|
||||
instance = test_cluster.instances['ch1']
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test_atomic.rmt ON CLUSTER cluster SYNC")
|
||||
test_cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test_atomic ON CLUSTER cluster SYNC")
|
||||
|
||||
test_cluster.ddl_check_query(instance, "CREATE DATABASE test_atomic ON CLUSTER cluster ENGINE=Atomic")
|
||||
assert "are supported only for ON CLUSTER queries with Atomic database engine" in \
|
||||
instance.query_and_get_error("CREATE TABLE test_atomic.rmt (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n")
|
||||
test_cluster.ddl_check_query(instance,
|
||||
"CREATE TABLE test_atomic.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree() ORDER BY n")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE test_atomic.rmt ON CLUSTER cluster")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE test_atomic.rmt ON CLUSTER cluster SYNC")
|
||||
test_cluster.ddl_check_query(instance,
|
||||
"CREATE TABLE test_atomic.rmt UUID '12345678-0000-4000-8000-000000000001' ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n")
|
||||
assert instance.query("SHOW CREATE test_atomic.rmt FORMAT TSVRaw") == \
|
||||
@ -349,7 +341,7 @@ def test_replicated_without_arguments(test_cluster):
|
||||
"CREATE TABLE test_atomic.rsmt ON CLUSTER cluster (n UInt64, m UInt64, k UInt64) ENGINE=ReplicatedSummingMergeTree((m, k)) ORDER BY n")
|
||||
test_cluster.ddl_check_query(instance,
|
||||
"CREATE TABLE test_atomic.rvcmt ON CLUSTER cluster (n UInt64, m Int8, k UInt64) ENGINE=ReplicatedVersionedCollapsingMergeTree(m, k) ORDER BY n")
|
||||
test_cluster.ddl_check_query(instance, "DROP DATABASE test_atomic ON CLUSTER cluster")
|
||||
test_cluster.ddl_check_query(instance, "DROP DATABASE test_atomic ON CLUSTER cluster SYNC")
|
||||
|
||||
test_cluster.ddl_check_query(instance, "CREATE DATABASE test_ordinary ON CLUSTER cluster ENGINE=Ordinary")
|
||||
assert "are supported only for ON CLUSTER queries with Atomic database engine" in \
|
||||
@ -359,7 +351,7 @@ def test_replicated_without_arguments(test_cluster):
|
||||
test_cluster.ddl_check_query(instance, "CREATE TABLE test_ordinary.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{table}/', '{replica}') ORDER BY n")
|
||||
assert instance.query("SHOW CREATE test_ordinary.rmt FORMAT TSVRaw") == \
|
||||
"CREATE TABLE test_ordinary.rmt\n(\n `n` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree('/{shard}/rmt/', '{replica}')\nORDER BY n\nSETTINGS index_granularity = 8192\n"
|
||||
test_cluster.ddl_check_query(instance, "DROP DATABASE test_ordinary ON CLUSTER cluster")
|
||||
test_cluster.ddl_check_query(instance, "DROP DATABASE test_ordinary ON CLUSTER cluster SYNC")
|
||||
test_cluster.pm_random_drops.push_rules(rules)
|
||||
|
||||
|
||||
|
@ -38,9 +38,9 @@ def test_cluster(request):
|
||||
def test_replicated_alters(test_cluster):
|
||||
instance = test_cluster.instances['ch2']
|
||||
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS merge_for_alter ON CLUSTER cluster")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_32 ON CLUSTER cluster")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_64 ON CLUSTER cluster")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS merge_for_alter ON CLUSTER cluster SYNC")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_32 ON CLUSTER cluster SYNC")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_64 ON CLUSTER cluster SYNC")
|
||||
|
||||
# Temporarily disable random ZK packet drops, they might broke creation if ReplicatedMergeTree replicas
|
||||
firewall_drops_rules = test_cluster.pm_random_drops.pop_rules()
|
||||
@ -90,10 +90,10 @@ ENGINE = Distributed(cluster, default, merge_for_alter, i)
|
||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in range(4)]))
|
||||
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE merge_for_alter ON CLUSTER cluster")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE merge_for_alter ON CLUSTER cluster SYNC")
|
||||
|
||||
# Enable random ZK packet drops
|
||||
test_cluster.pm_random_drops.push_rules(firewall_drops_rules)
|
||||
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_32 ON CLUSTER cluster")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_64 ON CLUSTER cluster")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_32 ON CLUSTER cluster SYNC")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_64 ON CLUSTER cluster SYNC")
|
||||
|
@ -6,7 +6,7 @@ import threading
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.cluster import ClickHouseCluster, get_instances_dir
|
||||
|
||||
|
||||
# By default the exceptions that was throwed in threads will be ignored
|
||||
@ -30,7 +30,7 @@ class SafeThread(threading.Thread):
|
||||
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/node/configs/config.d/storage_conf.xml')
|
||||
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node/configs/config.d/storage_conf.xml'.format(get_instances_dir()))
|
||||
|
||||
|
||||
def replace_config(old, new):
|
||||
|
@ -5,10 +5,10 @@ import string
|
||||
import time
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.cluster import ClickHouseCluster, get_instances_dir
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
NOT_RESTORABLE_CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/node_not_restorable/configs/config.d/storage_conf_not_restorable.xml')
|
||||
NOT_RESTORABLE_CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node_not_restorable/configs/config.d/storage_conf_not_restorable.xml'.format(get_instances_dir()))
|
||||
COMMON_CONFIGS = ["configs/config.d/bg_processing_pool_conf.xml", "configs/config.d/log_conf.xml", "configs/config.d/clusters.xml"]
|
||||
|
||||
|
||||
|
@ -2,13 +2,14 @@ import os
|
||||
import time
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.cluster import ClickHouseCluster, get_instances_dir
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance('node', main_configs=["configs/max_table_size_to_drop.xml"])
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/node/configs/config.d/max_table_size_to_drop.xml')
|
||||
|
||||
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node/configs/config.d/max_table_size_to_drop.xml'.format(get_instances_dir()))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
|
@ -21,16 +21,27 @@ create_table_sql_template = """
|
||||
PRIMARY KEY (`id`)) ENGINE=InnoDB;
|
||||
"""
|
||||
|
||||
def create_mysql_db(conn, name):
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute(
|
||||
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name))
|
||||
drop_table_sql_template = """
|
||||
DROP TABLE IF EXISTS `clickhouse`.`{}`;
|
||||
"""
|
||||
|
||||
def get_mysql_conn(started_cluster, host):
|
||||
conn = pymysql.connect(user='root', password='clickhouse', host=host, port=started_cluster.mysql_port)
|
||||
return conn
|
||||
|
||||
def create_mysql_table(conn, tableName):
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute(create_table_sql_template.format(tableName))
|
||||
|
||||
def drop_mysql_table(conn, tableName):
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute(drop_table_sql_template.format(tableName))
|
||||
|
||||
def create_mysql_db(conn, name):
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute("DROP DATABASE IF EXISTS {}".format(name))
|
||||
cursor.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
@ -51,7 +62,10 @@ def started_cluster():
|
||||
|
||||
def test_many_connections(started_cluster):
|
||||
table_name = 'test_many_connections'
|
||||
node1.query(f'DROP TABLE IF EXISTS {table_name}')
|
||||
|
||||
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
|
||||
drop_mysql_table(conn, table_name)
|
||||
create_mysql_table(conn, table_name)
|
||||
|
||||
node1.query('''
|
||||
@ -66,14 +80,18 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
|
||||
query += "SELECT id FROM {t})"
|
||||
|
||||
assert node1.query(query.format(t=table_name)) == '250\n'
|
||||
drop_mysql_table(conn, table_name)
|
||||
conn.close()
|
||||
|
||||
|
||||
def test_insert_select(started_cluster):
|
||||
table_name = 'test_insert_select'
|
||||
node1.query(f'DROP TABLE IF EXISTS {table_name}')
|
||||
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
|
||||
drop_mysql_table(conn, table_name)
|
||||
create_mysql_table(conn, table_name)
|
||||
|
||||
|
||||
node1.query('''
|
||||
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
|
||||
'''.format(table_name, table_name))
|
||||
@ -87,7 +105,9 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
|
||||
|
||||
def test_replace_select(started_cluster):
|
||||
table_name = 'test_replace_select'
|
||||
node1.query(f'DROP TABLE IF EXISTS {table_name}')
|
||||
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
|
||||
drop_mysql_table(conn, table_name)
|
||||
create_mysql_table(conn, table_name)
|
||||
|
||||
node1.query('''
|
||||
@ -106,7 +126,9 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
|
||||
|
||||
def test_insert_on_duplicate_select(started_cluster):
|
||||
table_name = 'test_insert_on_duplicate_select'
|
||||
node1.query(f'DROP TABLE IF EXISTS {table_name}')
|
||||
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
|
||||
drop_mysql_table(conn, table_name)
|
||||
create_mysql_table(conn, table_name)
|
||||
|
||||
node1.query('''
|
||||
@ -125,7 +147,10 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
|
||||
|
||||
def test_where(started_cluster):
|
||||
table_name = 'test_where'
|
||||
node1.query(f'DROP TABLE IF EXISTS {table_name}')
|
||||
|
||||
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
|
||||
drop_mysql_table(conn, table_name)
|
||||
create_mysql_table(conn, table_name)
|
||||
node1.query('''
|
||||
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
|
||||
@ -146,6 +171,7 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
|
||||
|
||||
def test_table_function(started_cluster):
|
||||
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
|
||||
drop_mysql_table(conn, 'table_function')
|
||||
create_mysql_table(conn, 'table_function')
|
||||
table_function = "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('table_function')
|
||||
assert node1.query("SELECT count() FROM {}".format(table_function)).rstrip() == '0'
|
||||
@ -168,6 +194,8 @@ def test_table_function(started_cluster):
|
||||
|
||||
def test_binary_type(started_cluster):
|
||||
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
|
||||
drop_mysql_table(conn, 'binary_type')
|
||||
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute("CREATE TABLE clickhouse.binary_type (id INT PRIMARY KEY, data BINARY(16) NOT NULL)")
|
||||
table_function = "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('binary_type')
|
||||
@ -177,7 +205,10 @@ def test_binary_type(started_cluster):
|
||||
|
||||
def test_enum_type(started_cluster):
|
||||
table_name = 'test_enum_type'
|
||||
node1.query(f'DROP TABLE IF EXISTS {table_name}')
|
||||
|
||||
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
|
||||
drop_mysql_table(conn, table_name)
|
||||
create_mysql_table(conn, table_name)
|
||||
node1.query('''
|
||||
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, source Enum8('IP' = 1, 'URL' = 2)) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse', 1);
|
||||
@ -186,20 +217,8 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, source Enum8('
|
||||
assert node1.query("SELECT source FROM {} LIMIT 1".format(table_name)).rstrip() == 'URL'
|
||||
conn.close()
|
||||
|
||||
def get_mysql_conn(started_cluster, host):
|
||||
conn = pymysql.connect(user='root', password='clickhouse', host=host, port=started_cluster.mysql_port)
|
||||
return conn
|
||||
|
||||
|
||||
def create_mysql_db(conn, name):
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute("DROP DATABASE IF EXISTS {}".format(name))
|
||||
cursor.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name))
|
||||
|
||||
def create_mysql_table(conn, tableName):
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute(create_table_sql_template.format(tableName))
|
||||
|
||||
def test_mysql_distributed(started_cluster):
|
||||
table_name = 'test_replicas'
|
||||
|
||||
@ -218,6 +237,8 @@ def test_mysql_distributed(started_cluster):
|
||||
create_mysql_table(conn3, table_name)
|
||||
create_mysql_table(conn4, table_name)
|
||||
|
||||
node2.query('DROP TABLE IF EXISTS test_replicas')
|
||||
|
||||
# Storage with with 3 replicas
|
||||
node2.query('''
|
||||
CREATE TABLE test_replicas
|
||||
@ -227,6 +248,7 @@ def test_mysql_distributed(started_cluster):
|
||||
# Fill remote tables with different data to be able to check
|
||||
nodes = [node1, node2, node2, node2]
|
||||
for i in range(1, 5):
|
||||
nodes[i-1].query('DROP TABLE IF EXISTS test_replica{}'.format(i))
|
||||
nodes[i-1].query('''
|
||||
CREATE TABLE test_replica{}
|
||||
(id UInt32, name String, age UInt32, money UInt32)
|
||||
@ -249,6 +271,8 @@ def test_mysql_distributed(started_cluster):
|
||||
assert(result == 'host2\nhost3\nhost4\n')
|
||||
|
||||
# Storage with with two shards, each has 2 replicas
|
||||
node2.query('DROP TABLE IF EXISTS test_shards')
|
||||
|
||||
node2.query('''
|
||||
CREATE TABLE test_shards
|
||||
(id UInt32, name String, age UInt32, money UInt32)
|
||||
@ -275,9 +299,12 @@ def test_mysql_distributed(started_cluster):
|
||||
|
||||
def test_external_settings(started_cluster):
|
||||
table_name = 'test_external_settings'
|
||||
node1.query(f'DROP TABLE IF EXISTS {table_name}')
|
||||
conn = get_mysql_conn(started_cluster, started_cluster.mysql_ip)
|
||||
drop_mysql_table(conn, table_name)
|
||||
create_mysql_table(conn, table_name)
|
||||
|
||||
node3.query(f'DROP TABLE IF EXISTS {table_name}')
|
||||
node3.query('''
|
||||
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
|
||||
'''.format(table_name, table_name))
|
||||
|
@ -308,6 +308,21 @@ def test_postgres_distributed(started_cluster):
|
||||
assert(result == 'host2\nhost4\n' or result == 'host3\nhost4\n')
|
||||
|
||||
|
||||
def test_datetime_with_timezone(started_cluster):
|
||||
conn = get_postgres_conn(started_cluster, started_cluster.postgres_ip, True)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("CREATE TABLE test_timezone (ts timestamp without time zone, ts_z timestamp with time zone)")
|
||||
cursor.execute("insert into test_timezone select '2014-04-04 20:00:00', '2014-04-04 20:00:00'::timestamptz at time zone 'America/New_York';")
|
||||
cursor.execute("select * from test_timezone")
|
||||
result = cursor.fetchall()[0]
|
||||
print(result[0], str(result[1])[:-6])
|
||||
node1.query("create table test_timezone ( ts DateTime, ts_z DateTime('America/New_York')) ENGINE PostgreSQL('postgres1:5432', 'clickhouse', 'test_timezone', 'postgres', 'mysecretpassword');")
|
||||
assert(node1.query("select ts from test_timezone").strip() == str(result[0]))
|
||||
# [:-6] because 2014-04-04 16:00:00+00:00 -> 2014-04-04 16:00:00
|
||||
assert(node1.query("select ts_z from test_timezone").strip() == str(result[1])[:-6])
|
||||
assert(node1.query("select * from test_timezone") == "2014-04-04 20:00:00\t2014-04-04 16:00:00\n")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cluster.start()
|
||||
input("Cluster created, press any key to destroy...")
|
||||
|
@ -9,12 +9,13 @@ import time
|
||||
|
||||
import helpers.client
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
|
||||
from helpers.cluster import ClickHouseCluster, ClickHouseInstance, get_instances_dir
|
||||
|
||||
MINIO_INTERNAL_PORT = 9001
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/dummy/configs/config.d/defaultS3.xml')
|
||||
|
||||
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/dummy/configs/config.d/defaultS3.xml'.format(get_instances_dir()))
|
||||
|
||||
|
||||
# Creates S3 bucket for tests and allows anonymous read-write access to it.
|
||||
|
@ -20,10 +20,6 @@ system_logs = [
|
||||
('system.metric_log', 1),
|
||||
]
|
||||
|
||||
# Default timeout for flush is 60
|
||||
# decrease timeout for the test to show possible issues.
|
||||
timeout = pytest.mark.timeout(30)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module', autouse=True)
|
||||
def start_cluster():
|
||||
@ -39,7 +35,6 @@ def flush_logs():
|
||||
node.query('SYSTEM FLUSH LOGS')
|
||||
|
||||
|
||||
@timeout
|
||||
@pytest.mark.parametrize('table,exists', system_logs)
|
||||
def test_system_logs(flush_logs, table, exists):
|
||||
q = 'SELECT * FROM {}'.format(table)
|
||||
@ -51,7 +46,6 @@ def test_system_logs(flush_logs, table, exists):
|
||||
|
||||
# Logic is tricky, let's check that there is no hang in case of message queue
|
||||
# is not empty (this is another code path in the code).
|
||||
@timeout
|
||||
def test_system_logs_non_empty_queue():
|
||||
node.query('SELECT 1', settings={
|
||||
# right now defaults are the same,
|
||||
|
@ -30,6 +30,7 @@ def started_cluster():
|
||||
|
||||
def test_chroot_with_same_root(started_cluster):
|
||||
for i, node in enumerate([node1, node2]):
|
||||
node.query('DROP TABLE IF EXISTS simple SYNC')
|
||||
node.query('''
|
||||
CREATE TABLE simple (date Date, id UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
|
||||
@ -44,6 +45,7 @@ def test_chroot_with_same_root(started_cluster):
|
||||
|
||||
def test_chroot_with_different_root(started_cluster):
|
||||
for i, node in [(1, node1), (3, node3)]:
|
||||
node.query('DROP TABLE IF EXISTS simple_different SYNC')
|
||||
node.query('''
|
||||
CREATE TABLE simple_different (date Date, id UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple_different', '{replica}', date, id, 8192);
|
||||
|
@ -22,6 +22,8 @@ def started_cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
def test_identity(started_cluster):
|
||||
node1.query('DROP TABLE IF EXISTS simple SYNC')
|
||||
|
||||
node1.query('''
|
||||
CREATE TABLE simple (date Date, id UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
|
||||
|
9
tests/queries/0_stateless/01921_not_chain.reference
Normal file
9
tests/queries/0_stateless/01921_not_chain.reference
Normal file
@ -0,0 +1,9 @@
|
||||
-- { echo }
|
||||
SELECT 1 != (NOT 1);
|
||||
1
|
||||
SELECT 1 != NOT 1;
|
||||
1
|
||||
EXPLAIN SYNTAX SELECT 1 != (NOT 1);
|
||||
SELECT 1 != NOT 1
|
||||
EXPLAIN SYNTAX SELECT 1 != NOT 1;
|
||||
SELECT 1 != NOT 1
|
5
tests/queries/0_stateless/01921_not_chain.sql
Normal file
5
tests/queries/0_stateless/01921_not_chain.sql
Normal file
@ -0,0 +1,5 @@
|
||||
-- { echo }
|
||||
SELECT 1 != (NOT 1);
|
||||
SELECT 1 != NOT 1;
|
||||
EXPLAIN SYNTAX SELECT 1 != (NOT 1);
|
||||
EXPLAIN SYNTAX SELECT 1 != NOT 1;
|
@ -0,0 +1 @@
|
||||
a c
|
10
tests/queries/0_stateless/01922_array_join_with_index.sql
Normal file
10
tests/queries/0_stateless/01922_array_join_with_index.sql
Normal file
@ -0,0 +1,10 @@
|
||||
DROP TABLE IF EXISTS t_array_index;
|
||||
|
||||
CREATE TABLE t_array_index (n Nested(key String, value String))
|
||||
ENGINE = MergeTree ORDER BY n.key;
|
||||
|
||||
INSERT INTO t_array_index VALUES (['a', 'b'], ['c', 'd']);
|
||||
|
||||
SELECT * FROM t_array_index ARRAY JOIN n WHERE n.key = 'a';
|
||||
|
||||
DROP TABLE IF EXISTS t_array_index;
|
@ -246,3 +246,4 @@
|
||||
01901_test_attach_partition_from
|
||||
01910_view_dictionary
|
||||
01824_prefer_global_in_and_join
|
||||
01576_alias_column_rewrite
|
||||
|
Loading…
Reference in New Issue
Block a user