Merge branch 'master' into named-collections-in-clickhouse-local

This commit is contained in:
Alexey Milovidov 2024-07-12 23:18:27 +02:00
commit c458a504bf
73 changed files with 4355 additions and 2120 deletions

View File

@ -86,6 +86,7 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo
ENV MINIO_ROOT_USER="clickhouse" ENV MINIO_ROOT_USER="clickhouse"
ENV MINIO_ROOT_PASSWORD="clickhouse" ENV MINIO_ROOT_PASSWORD="clickhouse"
ENV EXPORT_S3_STORAGE_POLICIES=1 ENV EXPORT_S3_STORAGE_POLICIES=1
ENV CLICKHOUSE_GRPC_CLIENT="/usr/share/clickhouse-utils/grpc-client/clickhouse-grpc-client.py"
RUN npm install -g azurite@3.30.0 \ RUN npm install -g azurite@3.30.0 \
&& npm install -g tslib && npm install -g node && npm install -g tslib && npm install -g node

View File

@ -8,6 +8,7 @@ cryptography==3.4.8
dbus-python==1.2.18 dbus-python==1.2.18
distro==1.7.0 distro==1.7.0
docutils==0.17.1 docutils==0.17.1
grpcio==1.47.0
gyp==0.1 gyp==0.1
httplib2==0.20.2 httplib2==0.20.2
idna==3.3 idna==3.3
@ -28,6 +29,7 @@ packaging==24.1
pandas==1.5.3 pandas==1.5.3
pip==24.1.1 pip==24.1.1
pipdeptree==2.23.0 pipdeptree==2.23.0
protobuf==4.25.3
pyarrow==15.0.0 pyarrow==15.0.0
pyasn1==0.4.8 pyasn1==0.4.8
PyJWT==2.3.0 PyJWT==2.3.0

View File

@ -6,8 +6,8 @@ source /setup_export_logs.sh
# fail on errors, verbose and export all env variables # fail on errors, verbose and export all env variables
set -e -x -a set -e -x -a
MAX_RUN_TIME=${MAX_RUN_TIME:-10800} MAX_RUN_TIME=${MAX_RUN_TIME:-7200}
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 10800 : MAX_RUN_TIME)) MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0} USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0} USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
@ -320,7 +320,7 @@ export -f run_tests
# This should be enough to setup job and collect artifacts # This should be enough to setup job and collect artifacts
TIMEOUT=$((MAX_RUN_TIME - 600)) TIMEOUT=$((MAX_RUN_TIME - 700))
if [ "$NUM_TRIES" -gt "1" ]; then if [ "$NUM_TRIES" -gt "1" ]; then
# We don't run tests with Ordinary database in PRs, only in master. # We don't run tests with Ordinary database in PRs, only in master.
# So run new/changed tests with Ordinary at least once in flaky check. # So run new/changed tests with Ordinary at least once in flaky check.

View File

@ -11,6 +11,7 @@ TIMEOUT_SIGN = "[ Timeout! "
UNKNOWN_SIGN = "[ UNKNOWN " UNKNOWN_SIGN = "[ UNKNOWN "
SKIPPED_SIGN = "[ SKIPPED " SKIPPED_SIGN = "[ SKIPPED "
HUNG_SIGN = "Found hung queries in processlist" HUNG_SIGN = "Found hung queries in processlist"
SERVER_DIED_SIGN = "Server died, terminating all processes"
DATABASE_SIGN = "Database: " DATABASE_SIGN = "Database: "
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"] SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
@ -25,6 +26,7 @@ def process_test_log(log_path, broken_tests):
failed = 0 failed = 0
success = 0 success = 0
hung = False hung = False
server_died = False
retries = False retries = False
success_finish = False success_finish = False
test_results = [] test_results = []
@ -41,6 +43,8 @@ def process_test_log(log_path, broken_tests):
if HUNG_SIGN in line: if HUNG_SIGN in line:
hung = True hung = True
break break
if SERVER_DIED_SIGN in line:
server_died = True
if RETRIES_SIGN in line: if RETRIES_SIGN in line:
retries = True retries = True
if any( if any(
@ -123,6 +127,7 @@ def process_test_log(log_path, broken_tests):
failed, failed,
success, success,
hung, hung,
server_died,
success_finish, success_finish,
retries, retries,
test_results, test_results,
@ -150,6 +155,7 @@ def process_result(result_path, broken_tests):
failed, failed,
success, success,
hung, hung,
server_died,
success_finish, success_finish,
retries, retries,
test_results, test_results,
@ -165,6 +171,10 @@ def process_result(result_path, broken_tests):
description = "Some queries hung, " description = "Some queries hung, "
state = "failure" state = "failure"
test_results.append(("Some queries hung", "FAIL", "0", "")) test_results.append(("Some queries hung", "FAIL", "0", ""))
elif server_died:
description = "Server died, "
state = "failure"
test_results.append(("Server died", "FAIL", "0", ""))
elif not success_finish: elif not success_finish:
description = "Tests are not finished, " description = "Tests are not finished, "
state = "failure" state = "failure"
@ -218,5 +228,20 @@ if __name__ == "__main__":
state, description, test_results = process_result(args.in_results_dir, broken_tests) state, description, test_results = process_result(args.in_results_dir, broken_tests)
logging.info("Result parsed") logging.info("Result parsed")
status = (state, description) status = (state, description)
def test_result_comparator(item):
# sort by status then by check name
order = {
"FAIL": 0,
"Timeout": 1,
"NOT_FAILED": 2,
"BROKEN": 3,
"OK": 4,
"SKIPPED": 5,
}
return order.get(item[1], 10), str(item[0]), item[1]
test_results.sort(key=test_result_comparator)
write_results(args.out_results_file, args.out_status_file, test_results, status) write_results(args.out_results_file, args.out_status_file, test_results, status)
logging.info("Result written") logging.info("Result written")

View File

@ -16,7 +16,7 @@ sidebar_label: clickhouse-local
While `clickhouse-local` is a great tool for development and testing purposes, and for processing files, it is not suitable for serving end users or applications. In these scenarios, it is recommended to use the open-source [ClickHouse](https://clickhouse.com/docs/en/install). ClickHouse is a powerful OLAP database that is designed to handle large-scale analytical workloads. It provides fast and efficient processing of complex queries on large datasets, making it ideal for use in production environments where high-performance is critical. Additionally, ClickHouse offers a wide range of features such as replication, sharding, and high availability, which are essential for scaling up to handle large datasets and serving applications. If you need to handle larger datasets or serve end users or applications, we recommend using open-source ClickHouse instead of `clickhouse-local`. While `clickhouse-local` is a great tool for development and testing purposes, and for processing files, it is not suitable for serving end users or applications. In these scenarios, it is recommended to use the open-source [ClickHouse](https://clickhouse.com/docs/en/install). ClickHouse is a powerful OLAP database that is designed to handle large-scale analytical workloads. It provides fast and efficient processing of complex queries on large datasets, making it ideal for use in production environments where high-performance is critical. Additionally, ClickHouse offers a wide range of features such as replication, sharding, and high availability, which are essential for scaling up to handle large datasets and serving applications. If you need to handle larger datasets or serve end users or applications, we recommend using open-source ClickHouse instead of `clickhouse-local`.
Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local CSVs](#query-data-in-a-csv-file-using-sql) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3). Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local file](#query_data_in_file) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3).
## Download clickhouse-local ## Download clickhouse-local

View File

@ -18,7 +18,7 @@ ClickHouse also supports:
During aggregation, all `NULL` arguments are skipped. If the aggregation has several arguments it will ignore any row in which one or more of them are NULL. During aggregation, all `NULL` arguments are skipped. If the aggregation has several arguments it will ignore any row in which one or more of them are NULL.
There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases when followed by the modifier `RESPECT NULLS`: `FIRST_VALUE(b) RESPECT NULLS`. There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases (`any` and `anyLast` respectively) when followed by the modifier `RESPECT NULLS`. For example, `FIRST_VALUE(b) RESPECT NULLS`.
**Examples:** **Examples:**

View File

@ -5,12 +5,12 @@ sidebar_position: 102
# any # any
Selects the first encountered value of a column. Selects the first encountered value of a column, ignoring any `NULL` values.
**Syntax** **Syntax**
```sql ```sql
any(column) any(column) [RESPECT NULLS]
``` ```
Aliases: `any_value`, [`first_value`](../reference/first_value.md). Aliases: `any_value`, [`first_value`](../reference/first_value.md).
@ -20,7 +20,9 @@ Aliases: `any_value`, [`first_value`](../reference/first_value.md).
**Returned value** **Returned value**
By default, it ignores NULL values and returns the first NOT NULL value found in the column. Like [`first_value`](../../../sql-reference/aggregate-functions/reference/first_value.md) it supports `RESPECT NULLS`, in which case it will select the first value passed, independently on whether it's NULL or not. :::note
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
:::
:::note :::note
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour. The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.

View File

@ -1,44 +0,0 @@
---
slug: /en/sql-reference/aggregate-functions/reference/any_respect_nulls
sidebar_position: 103
---
# any_respect_nulls
Selects the first encountered value of a column, irregardless of whether it is a `NULL` value or not.
Alias: `any_value_respect_nulls`, `first_value_repect_nulls`.
**Syntax**
```sql
any_respect_nulls(column)
```
**Parameters**
- `column`: The column name.
**Returned value**
- The last value encountered, irregardless of whether it is a `NULL` value or not.
**Example**
Query:
```sql
CREATE TABLE any_nulls (city Nullable(String)) ENGINE=Log;
INSERT INTO any_nulls (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
SELECT any(city), any_respect_nulls(city) FROM any_nulls;
```
```response
┌─any(city)─┬─any_respect_nulls(city)─┐
│ Amsterdam │ ᴺᵁᴸᴸ │
└───────────┴─────────────────────────┘
```
**See Also**
- [any](../reference/any.md)

View File

@ -5,17 +5,21 @@ sidebar_position: 105
# anyLast # anyLast
Selects the last value encountered. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function. Selects the last value encountered, ignoring any `NULL` values by default. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
**Syntax** **Syntax**
```sql ```sql
anyLast(column) anyLast(column) [RESPECT NULLS]
``` ```
**Parameters** **Parameters**
- `column`: The column name. - `column`: The column name.
:::note
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
:::
**Returned value** **Returned value**
- The last value encountered. - The last value encountered.

View File

@ -1,39 +0,0 @@
---
slug: /en/sql-reference/aggregate-functions/reference/anylast_respect_nulls
sidebar_position: 106
---
# anyLast_respect_nulls
Selects the last value encountered, irregardless of whether it is `NULL` or not.
**Syntax**
```sql
anyLast_respect_nulls(column)
```
**Parameters**
- `column`: The column name.
**Returned value**
- The last value encountered, irregardless of whether it is `NULL` or not.
**Example**
Query:
```sql
CREATE TABLE any_last_nulls (city Nullable(String)) ENGINE=Log;
INSERT INTO any_last_nulls (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
SELECT anyLast(city), anyLast_respect_nulls(city) FROM any_last_nulls;
```
```response
┌─anyLast(city)─┬─anyLast_respect_nulls(city)─┐
│ Valencia │ ᴺᵁᴸᴸ │
└───────────────┴─────────────────────────────┘
```

View File

@ -45,10 +45,9 @@ ClickHouse-specific aggregate functions:
- [aggThrow](../reference/aggthrow.md) - [aggThrow](../reference/aggthrow.md)
- [analysisOfVariance](../reference/analysis_of_variance.md) - [analysisOfVariance](../reference/analysis_of_variance.md)
- [any](../reference/any_respect_nulls.md) - [any](../reference/any.md)
- [anyHeavy](../reference/anyheavy.md) - [anyHeavy](../reference/anyheavy.md)
- [anyLast](../reference/anylast.md) - [anyLast](../reference/anylast.md)
- [anyLast](../reference/anylast_respect_nulls.md)
- [boundingRatio](../reference/boundrat.md) - [boundingRatio](../reference/boundrat.md)
- [first_value](../reference/first_value.md) - [first_value](../reference/first_value.md)
- [last_value](../reference/last_value.md) - [last_value](../reference/last_value.md)

View File

@ -1,6 +1,6 @@
--- ---
slug: /en/sql-reference/data-types/dynamic slug: /en/sql-reference/data-types/dynamic
sidebar_position: 56 sidebar_position: 62
sidebar_label: Dynamic sidebar_label: Dynamic
--- ---
@ -494,13 +494,43 @@ SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) O
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`. As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
## JSONExtract functions with Dynamic
All `JSONExtract*` functions support `Dynamic` type:
```sql
SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Dynamic') AS dynamic, dynamicType(dynamic) AS dynamic_type;
```
```text
┌─dynamic─┬─dynamic_type───────────┐
│ [1,2,3] │ Array(Nullable(Int64)) │
└─────────┴────────────────────────┘
```
```sql
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_dynamics, mapApply((k, v) -> (k, variantType(v)), map_of_dynamics) AS map_of_dynamic_types```
```text
┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────┐
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │
└──────────────────────────────────┴─────────────────────────────────────────────────┘
```
```sql
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS dynamics, arrayMap(x -> (x.1, variantType(x.2)), dynamics) AS dynamic_types```
```
```text
┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────┐
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘
```
### Binary output format ### Binary output format
In [RowBinary](../../interfaces/formats.md#rowbinary-rowbinary) format values of `Dynamic` type are serialized in the following format: In RowBinary format values of `Dynamic` type are serialized in the following format:
```text ```text
<binary_encoded_data_type><value_in_binary_format_according_to_the_data_type> <binary_encoded_data_type><value_in_binary_format_according_to_the_data_type>
``` ```
See the [data types binary encoding specification](../../sql-reference/data-types/data-types-binary-encoding.md)

View File

@ -314,10 +314,71 @@ SELECT groupBitXor(cityHash64(*)) FROM table
Calculates a 32-bit hash code from any type of integer. Calculates a 32-bit hash code from any type of integer.
This is a relatively fast non-cryptographic hash function of average quality for numbers. This is a relatively fast non-cryptographic hash function of average quality for numbers.
**Syntax**
```sql
intHash32(int)
```
**Arguments**
- `int` — Integer to hash. [(U)Int*](../data-types/int-uint.md).
**Returned value**
- 32-bit hash code. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT intHash32(42);
```
Result:
```response
┌─intHash32(42)─┐
│ 1228623923 │
└───────────────┘
```
## intHash64 ## intHash64
Calculates a 64-bit hash code from any type of integer. Calculates a 64-bit hash code from any type of integer.
It works faster than intHash32. Average quality. This is a relatively fast non-cryptographic hash function of average quality for numbers.
It works faster than [intHash32](#inthash32).
**Syntax**
```sql
intHash64(int)
```
**Arguments**
- `int` — Integer to hash. [(U)Int*](../data-types/int-uint.md).
**Returned value**
- 64-bit hash code. [UInt64](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT intHash64(42);
```
Result:
```response
┌────────intHash64(42)─┐
│ 11490350930367293593 │
└──────────────────────┘
```
## SHA1, SHA224, SHA256, SHA512, SHA512_256 ## SHA1, SHA224, SHA256, SHA512, SHA512_256

View File

@ -0,0 +1,73 @@
---
slug: /en/sql-reference/window-functions/dense_rank
sidebar_label: dense_rank
sidebar_position: 7
---
# dense_rank
Ranks the current row within its partition without gaps. In other words, if the value of any new row encountered is equal to the value of one of the previous rows then it will receive the next successive rank without any gaps in ranking.
The [rank](./rank.md) function provides the same behaviour, but with gaps in ranking.
**Syntax**
```sql
dense_rank (column_name)
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Returned value**
- A number for the current row within its partition, without gaps in ranking. [UInt64](../data-types/int-uint.md).
**Example**
The following example is based on the example provided in the video instructional [Ranking window functions in ClickHouse](https://youtu.be/Yku9mmBYm_4?si=XIMu1jpYucCQEoXA).
Query:
```sql
CREATE TABLE salaries
(
`team` String,
`player` String,
`salary` UInt32,
`position` String
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
```
```sql
SELECT player, salary,
dense_rank() OVER (ORDER BY salary DESC) AS dense_rank
FROM salaries;
```
Result:
```response
┌─player──────────┬─salary─┬─dense_rank─┐
1. │ Gary Chen │ 195000 │ 1 │
2. │ Robert George │ 195000 │ 1 │
3. │ Charles Juarez │ 190000 │ 2 │
4. │ Michael Stanley │ 150000 │ 3 │
5. │ Douglas Benson │ 150000 │ 3 │
6. │ Scott Harrison │ 150000 │ 3 │
7. │ James Henderson │ 140000 │ 4 │
└─────────────────┴────────┴────────────┘
```

View File

@ -0,0 +1,79 @@
---
slug: /en/sql-reference/window-functions/first_value
sidebar_label: first_value
sidebar_position: 3
---
# first_value
Returns the first value evaluated within its ordered frame. By default, NULL arguments are skipped, however the `RESPECT NULLS` modifier can be used to override this behaviour.
**Syntax**
```sql
first_value (column_name) [[RESPECT NULLS] | [IGNORE NULLS]]
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
Alias: `any`.
:::note
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
:::
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Returned value**
- The first value evaluated within its ordered frame.
**Example**
In this example the `first_value` function is used to find the highest paid footballer from a fictional dataset of salaries of Premier League football players.
Query:
```sql
DROP TABLE IF EXISTS salaries;
CREATE TABLE salaries
(
`team` String,
`player` String,
`salary` UInt32,
`position` String
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
```
```sql
SELECT player, salary,
first_value(player) OVER (ORDER BY salary DESC) AS highest_paid_player
FROM salaries;
```
Result:
```response
┌─player──────────┬─salary─┬─highest_paid_player─┐
1. │ Gary Chen │ 196000 │ Gary Chen │
2. │ Robert George │ 195000 │ Gary Chen │
3. │ Charles Juarez │ 190000 │ Gary Chen │
4. │ Scott Harrison │ 180000 │ Gary Chen │
5. │ Douglas Benson │ 150000 │ Gary Chen │
6. │ James Henderson │ 140000 │ Gary Chen │
7. │ Michael Stanley │ 100000 │ Gary Chen │
└─────────────────┴────────┴─────────────────────┘
```

View File

@ -1,10 +1,11 @@
--- ---
slug: /en/sql-reference/window-functions/ slug: /en/sql-reference/window-functions/
sidebar_position: 62
sidebar_label: Window Functions sidebar_label: Window Functions
title: Window Functions sidebar_position: 1
--- ---
# Window Functions
Windows functions let you perform calculations across a set of rows that are related to the current row. Windows functions let you perform calculations across a set of rows that are related to the current row.
Some of the calculations that you can do are similar to those that can be done with an aggregate function, but a window function doesn't cause rows to be grouped into a single output - the individual rows are still returned. Some of the calculations that you can do are similar to those that can be done with an aggregate function, but a window function doesn't cause rows to be grouped into a single output - the individual rows are still returned.
@ -12,8 +13,8 @@ Some of the calculations that you can do are similar to those that can be done w
ClickHouse supports the standard grammar for defining windows and window functions. The table below indicates whether a feature is currently supported. ClickHouse supports the standard grammar for defining windows and window functions. The table below indicates whether a feature is currently supported.
| Feature | Supported? | | Feature | Supported? |
|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |--------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| ad hoc window specification (`count(*) over (partition by id order by time desc)`) | ✅ | | ad hoc window specification (`count(*) over (partition by id order by time desc)`) | ✅ |
| expressions involving window functions, e.g. `(count(*) over ()) / 2)` | ✅ | | expressions involving window functions, e.g. `(count(*) over ()) / 2)` | ✅ |
| `WINDOW` clause (`select ... from table window w as (partition by id)`) | ✅ | | `WINDOW` clause (`select ... from table window w as (partition by id)`) | ✅ |
@ -75,14 +76,14 @@ WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
These functions can be used only as a window function. These functions can be used only as a window function.
- `row_number()` - Number the current row within its partition starting from 1. - [`row_number()`](./row_number.md) - Number the current row within its partition starting from 1.
- `first_value(x)` - Return the first non-NULL value evaluated within its ordered frame. - [`first_value(x)`](./first_value.md) - Return the first value evaluated within its ordered frame.
- `last_value(x)` - Return the last non-NULL value evaluated within its ordered frame. - [`last_value(x)`](./last_value.md) - Return the last value evaluated within its ordered frame.
- `nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame. - [`nth_value(x, offset)`](./nth_value.md) - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
- `rank()` - Rank the current row within its partition with gaps. - [`rank()`](./rank.md) - Rank the current row within its partition with gaps.
- `dense_rank()` - Rank the current row within its partition without gaps. - [`dense_rank()`](./dense_rank.md) - Rank the current row within its partition without gaps.
- `lagInFrame(x[, offset[, default]])` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame. The offset parameter, if not specified, defaults to 1, meaning it will fetch the value from the next row. If the calculated row exceeds the boundaries of the window frame, the specified default value is returned. - [`lagInFrame(x)`](./lagInFrame.md) - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
- `leadInFrame(x[, offset[, default]])` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame. If offset is not provided, it defaults to 1. If the offset leads to a position outside the window frame, the specified default value is used. - [`leadInFrame(x)`](./leadInFrame.md) - Return a value evaluated at the row that is offset rows after the current row within the ordered frame.
## Examples ## Examples

View File

@ -0,0 +1,79 @@
---
slug: /en/sql-reference/window-functions/lagInFrame
sidebar_label: lagInFrame
sidebar_position: 8
---
# lagInFrame
Returns a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
**Syntax**
```sql
lagInFrame(x[, offset[, default]])
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Parameters**
- `x` — Column name.
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
**Returned value**
- Value evaluated at the row that is at a specified physical offset before the current row within the ordered frame.
**Example**
This example looks at historical data for a specific stock and uses the `lagInFrame` function to calculate a day-to-day delta and percentage change in the closing price of the stock.
Query:
```sql
CREATE TABLE stock_prices
(
`date` Date,
`open` Float32, -- opening price
`high` Float32, -- daily high
`low` Float32, -- daily low
`close` Float32, -- closing price
`volume` UInt32 -- trade volume
)
Engine = Memory;
INSERT INTO stock_prices FORMAT Values
('2024-06-03', 113.62, 115.00, 112.00, 115.00, 438392000),
('2024-06-04', 115.72, 116.60, 114.04, 116.44, 403324000),
('2024-06-05', 118.37, 122.45, 117.47, 122.44, 528402000),
('2024-06-06', 124.05, 125.59, 118.32, 121.00, 664696000),
('2024-06-07', 119.77, 121.69, 118.02, 120.89, 412386000);
```
```sql
SELECT
date,
close,
lagInFrame(close, 1, close) OVER (ORDER BY date ASC) AS previous_day_close,
COALESCE(ROUND(close - previous_day_close, 2)) AS delta,
COALESCE(ROUND((delta / previous_day_close) * 100, 2)) AS percent_change
FROM stock_prices
ORDER BY date DESC;
```
Result:
```response
┌───────date─┬──close─┬─previous_day_close─┬─delta─┬─percent_change─┐
1. │ 2024-06-07 │ 120.89 │ 121 │ -0.11 │ -0.09 │
2. │ 2024-06-06 │ 121 │ 122.44 │ -1.44 │ -1.18 │
3. │ 2024-06-05 │ 122.44 │ 116.44 │ 6 │ 5.15 │
4. │ 2024-06-04 │ 116.44 │ 115 │ 1.44 │ 1.25 │
5. │ 2024-06-03 │ 115 │ 115 │ 0 │ 0 │
└────────────┴────────┴────────────────────┴───────┴────────────────┘
```

View File

@ -0,0 +1,79 @@
---
slug: /en/sql-reference/window-functions/last_value
sidebar_label: last_value
sidebar_position: 4
---
# last_value
Returns the last value evaluated within its ordered frame. By default, NULL arguments are skipped, however the `RESPECT NULLS` modifier can be used to override this behaviour.
**Syntax**
```sql
last_value (column_name) [[RESPECT NULLS] | [IGNORE NULLS]]
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
Alias: `anyLast`.
:::note
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
:::
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Returned value**
- The last value evaluated within its ordered frame.
**Example**
In this example the `last_value` function is used to find the highest paid footballer from a fictional dataset of salaries of Premier League football players.
Query:
```sql
DROP TABLE IF EXISTS salaries;
CREATE TABLE salaries
(
`team` String,
`player` String,
`salary` UInt32,
`position` String
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
```
```sql
SELECT player, salary,
last_value(player) OVER (ORDER BY salary DESC RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS lowest_paid_player
FROM salaries;
```
Result:
```response
┌─player──────────┬─salary─┬─lowest_paid_player─┐
1. │ Gary Chen │ 196000 │ Michael Stanley │
2. │ Robert George │ 195000 │ Michael Stanley │
3. │ Charles Juarez │ 190000 │ Michael Stanley │
4. │ Scott Harrison │ 180000 │ Michael Stanley │
5. │ Douglas Benson │ 150000 │ Michael Stanley │
6. │ James Henderson │ 140000 │ Michael Stanley │
7. │ Michael Stanley │ 100000 │ Michael Stanley │
└─────────────────┴────────┴────────────────────┘
```

View File

@ -0,0 +1,60 @@
---
slug: /en/sql-reference/window-functions/leadInFrame
sidebar_label: leadInFrame
sidebar_position: 9
---
# leadInFrame
Returns a value evaluated at the row that is offset rows after the current row within the ordered frame.
**Syntax**
```sql
leadInFrame(x[, offset[, default]])
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Parameters**
- `x` — Column name.
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
**Returned value**
- value evaluated at the row that is offset rows after the current row within the ordered frame.
**Example**
This example looks at [historical data](https://www.kaggle.com/datasets/sazidthe1/nobel-prize-data) for Nobel Prize winners and uses the `leadInFrame` function to return a list of successive winners in the physics category.
Query:
```sql
CREATE OR REPLACE VIEW nobel_prize_laureates AS FROM file('nobel_laureates_data.csv') SELECT *;
```
```sql
FROM nobel_prize_laureates SELECT fullName, leadInFrame(year, 1, year) OVER (PARTITION BY category ORDER BY year) AS year, category, motivation WHERE category == 'physics' ORDER BY year DESC LIMIT 9;
```
Result:
```response
┌─fullName─────────┬─year─┬─category─┬─motivation─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
1. │ Pierre Agostini │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
2. │ Ferenc Krausz │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
3. │ Anne L Huillier │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
4. │ Alain Aspect │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
5. │ Anton Zeilinger │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
6. │ John Clauser │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
7. │ Syukuro Manabe │ 2021 │ physics │ for the physical modelling of Earths climate quantifying variability and reliably predicting global warming │
8. │ Klaus Hasselmann │ 2021 │ physics │ for the physical modelling of Earths climate quantifying variability and reliably predicting global warming │
9. │ Giorgio Parisi │ 2021 │ physics │ for the discovery of the interplay of disorder and fluctuations in physical systems from atomic to planetary scales │
└──────────────────┴──────┴──────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```

View File

@ -0,0 +1,75 @@
---
slug: /en/sql-reference/window-functions/nth_value
sidebar_label: nth_value
sidebar_position: 5
---
# nth_value
Returns the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
**Syntax**
```sql
nth_value (x, offset)
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Parameters**
- `x` — Column name.
- `offset` — nth row to evaluate current row against.
**Returned value**
- The first non-NULL value evaluated against the nth row (offset) in its ordered frame.
**Example**
In this example the `nth-value` function is used to find the third-highest salary from a fictional dataset of salaries of Premier League football players.
Query:
```sql
DROP TABLE IF EXISTS salaries;
CREATE TABLE salaries
(
`team` String,
`player` String,
`salary` UInt32,
`position` String
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
```
```sql
SELECT player, salary, nth_value(player,3) OVER(ORDER BY salary DESC) AS third_highest_salary FROM salaries;
```
Result:
```response
┌─player──────────┬─salary─┬─third_highest_salary─┐
1. │ Gary Chen │ 195000 │ │
2. │ Robert George │ 195000 │ │
3. │ Charles Juarez │ 190000 │ Charles Juarez │
4. │ Scott Harrison │ 180000 │ Charles Juarez │
5. │ Douglas Benson │ 150000 │ Charles Juarez │
6. │ James Henderson │ 140000 │ Charles Juarez │
7. │ Michael Stanley │ 100000 │ Charles Juarez │
└─────────────────┴────────┴──────────────────────┘
```

View File

@ -0,0 +1,74 @@
---
slug: /en/sql-reference/window-functions/rank
sidebar_label: rank
sidebar_position: 6
---
# rank
Ranks the current row within its partition with gaps. In other words, if the value of any row it encounters is equal to the value of a previous row then it will receive the same rank as that previous row.
The rank of the next row is then equal to the rank of the previous row plus a gap equal to the number of times the previous rank was given.
The [dense_rank](./dense_rank.md) function provides the same behaviour but without gaps in ranking.
**Syntax**
```sql
rank (column_name)
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Returned value**
- A number for the current row within its partition, including gaps. [UInt64](../data-types/int-uint.md).
**Example**
The following example is based on the example provided in the video instructional [Ranking window functions in ClickHouse](https://youtu.be/Yku9mmBYm_4?si=XIMu1jpYucCQEoXA).
Query:
```sql
CREATE TABLE salaries
(
`team` String,
`player` String,
`salary` UInt32,
`position` String
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
```
```sql
SELECT player, salary,
rank() OVER (ORDER BY salary DESC) AS rank
FROM salaries;
```
Result:
```response
┌─player──────────┬─salary─┬─rank─┐
1. │ Gary Chen │ 195000 │ 1 │
2. │ Robert George │ 195000 │ 1 │
3. │ Charles Juarez │ 190000 │ 3 │
4. │ Douglas Benson │ 150000 │ 4 │
5. │ Michael Stanley │ 150000 │ 4 │
6. │ Scott Harrison │ 150000 │ 4 │
7. │ James Henderson │ 140000 │ 7 │
└─────────────────┴────────┴──────┘
```

View File

@ -0,0 +1,67 @@
---
slug: /en/sql-reference/window-functions/row_number
sidebar_label: row_number
sidebar_position: 2
---
# row_number
Numbers the current row within its partition starting from 1.
**Syntax**
```sql
row_number (column_name)
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Returned value**
- A number for the current row within its partition. [UInt64](../data-types/int-uint.md).
**Example**
The following example is based on the example provided in the video instructional [Ranking window functions in ClickHouse](https://youtu.be/Yku9mmBYm_4?si=XIMu1jpYucCQEoXA).
Query:
```sql
CREATE TABLE salaries
(
`team` String,
`player` String,
`salary` UInt32,
`position` String
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M');
```
```sql
SELECT player, salary,
row_number() OVER (ORDER BY salary DESC) AS row_number
FROM salaries;
```
Result:
```response
┌─player──────────┬─salary─┬─row_number─┐
1. │ Gary Chen │ 195000 │ 1 │
2. │ Robert George │ 195000 │ 2 │
3. │ Charles Juarez │ 190000 │ 3 │
4. │ Scott Harrison │ 150000 │ 4 │
5. │ Michael Stanley │ 150000 │ 5 │
└─────────────────┴────────┴────────────┘
```

View File

@ -35,10 +35,9 @@ disable = '''
broad-except, broad-except,
bare-except, bare-except,
no-else-return, no-else-return,
global-statement global-statement,
''' '''
[tool.pylint.SIMILARITIES] [tool.pylint.SIMILARITIES]
# due to SQL # due to SQL
min-similarity-lines=1000 min-similarity-lines=1000

View File

@ -38,10 +38,19 @@ namespace ErrorCodes
extern const int CANNOT_MREMAP; extern const int CANNOT_MREMAP;
} }
void abortOnFailedAssertion(const String & description, void * const * trace, size_t trace_offset, size_t trace_size)
{
auto & logger = Poco::Logger::root();
LOG_FATAL(&logger, "Logical error: '{}'.", description);
if (trace)
LOG_FATAL(&logger, "Stack trace (when copying this message, always include the lines below):\n\n{}", StackTrace::toString(trace, trace_offset, trace_size));
abort();
}
void abortOnFailedAssertion(const String & description) void abortOnFailedAssertion(const String & description)
{ {
LOG_FATAL(&Poco::Logger::root(), "Logical error: '{}'.", description); StackTrace st;
abort(); abortOnFailedAssertion(description, st.getFramePointers().data(), st.getOffset(), st.getSize());
} }
bool terminate_on_any_exception = false; bool terminate_on_any_exception = false;
@ -58,7 +67,7 @@ void handle_error_code(const std::string & msg, int code, bool remote, const Exc
#ifdef ABORT_ON_LOGICAL_ERROR #ifdef ABORT_ON_LOGICAL_ERROR
if (code == ErrorCodes::LOGICAL_ERROR) if (code == ErrorCodes::LOGICAL_ERROR)
{ {
abortOnFailedAssertion(msg); abortOnFailedAssertion(msg, trace.data(), 0, trace.size());
} }
#endif #endif

View File

@ -25,8 +25,6 @@ namespace DB
class AtomicLogger; class AtomicLogger;
[[noreturn]] void abortOnFailedAssertion(const String & description);
/// This flag can be set for testing purposes - to check that no exceptions are thrown. /// This flag can be set for testing purposes - to check that no exceptions are thrown.
extern bool terminate_on_any_exception; extern bool terminate_on_any_exception;
@ -167,6 +165,8 @@ protected:
mutable std::vector<StackTrace::FramePointers> capture_thread_frame_pointers; mutable std::vector<StackTrace::FramePointers> capture_thread_frame_pointers;
}; };
[[noreturn]] void abortOnFailedAssertion(const String & description, void * const * trace, size_t trace_offset, size_t trace_size);
[[noreturn]] void abortOnFailedAssertion(const String & description);
std::string getExceptionStackTraceString(const std::exception & e); std::string getExceptionStackTraceString(const std::exception & e);
std::string getExceptionStackTraceString(std::exception_ptr e); std::string getExceptionStackTraceString(std::exception_ptr e);

View File

@ -14,6 +14,7 @@
namespace DB namespace DB
{ {
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int CANNOT_ALLOCATE_MEMORY; extern const int CANNOT_ALLOCATE_MEMORY;

View File

@ -545,7 +545,7 @@ std::string StackTrace::toString() const
return toStringCached(frame_pointers, offset, size); return toStringCached(frame_pointers, offset, size);
} }
std::string StackTrace::toString(void ** frame_pointers_raw, size_t offset, size_t size) std::string StackTrace::toString(void * const * frame_pointers_raw, size_t offset, size_t size)
{ {
__msan_unpoison(frame_pointers_raw, size * sizeof(*frame_pointers_raw)); __msan_unpoison(frame_pointers_raw, size * sizeof(*frame_pointers_raw));

View File

@ -59,7 +59,7 @@ public:
const FramePointers & getFramePointers() const { return frame_pointers; } const FramePointers & getFramePointers() const { return frame_pointers; }
std::string toString() const; std::string toString() const;
static std::string toString(void ** frame_pointers, size_t offset, size_t size); static std::string toString(void * const * frame_pointers, size_t offset, size_t size);
static void dropCache(); static void dropCache();
/// @param fatal - if true, will process inline frames (slower) /// @param fatal - if true, will process inline frames (slower)

View File

@ -996,6 +996,10 @@ void ZooKeeper::receiveEvent()
if (request_info.callback) if (request_info.callback)
request_info.callback(*response); request_info.callback(*response);
/// Finalize current session if we receive a hardware error from ZooKeeper
if (err != Error::ZOK && isHardwareError(err))
finalize(/*error_send*/ false, /*error_receive*/ true, fmt::format("Hardware error: {}", err));
} }

View File

@ -25,7 +25,7 @@ namespace DB
template <typename To, typename From> template <typename To, typename From>
inline To assert_cast(From && from) inline To assert_cast(From && from)
{ {
#ifndef NDEBUG #ifdef ABORT_ON_LOGICAL_ERROR
try try
{ {
if constexpr (std::is_pointer_v<To>) if constexpr (std::is_pointer_v<To>)

View File

@ -228,7 +228,6 @@ Pool::Entry Pool::tryGet()
for (auto connection_it = connections.cbegin(); connection_it != connections.cend();) for (auto connection_it = connections.cbegin(); connection_it != connections.cend();)
{ {
Connection * connection_ptr = *connection_it; Connection * connection_ptr = *connection_it;
/// Fixme: There is a race condition here b/c we do not synchronize with Pool::Entry's copy-assignment operator
if (connection_ptr->ref_count == 0) if (connection_ptr->ref_count == 0)
{ {
{ {

View File

@ -64,17 +64,6 @@ public:
decrementRefCount(); decrementRefCount();
} }
Entry & operator= (const Entry & src) /// NOLINT
{
pool = src.pool;
if (data)
decrementRefCount();
data = src.data;
if (data)
incrementRefCount();
return * this;
}
bool isNull() const bool isNull() const
{ {
return data == nullptr; return data == nullptr;

View File

@ -13,13 +13,11 @@ mysqlxx::Pool::Entry getWithFailover(mysqlxx::Pool & connections_pool)
constexpr size_t max_tries = 3; constexpr size_t max_tries = 3;
mysqlxx::Pool::Entry worker_connection;
for (size_t try_no = 1; try_no <= max_tries; ++try_no) for (size_t try_no = 1; try_no <= max_tries; ++try_no)
{ {
try try
{ {
worker_connection = connections_pool.tryGet(); mysqlxx::Pool::Entry worker_connection = connections_pool.tryGet();
if (!worker_connection.isNull()) if (!worker_connection.isNull())
{ {

View File

@ -11,6 +11,7 @@
#include <Parsers/ASTFunction.h> #include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h> #include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTLiteral.h> #include <Parsers/ASTLiteral.h>
#include <Parsers/ASTSubquery.h>
#include <Parsers/ASTSelectWithUnionQuery.h> #include <Parsers/ASTSelectWithUnionQuery.h>
#include <Parsers/ASTTTLElement.h> #include <Parsers/ASTTTLElement.h>
#include <Poco/String.h> #include <Poco/String.h>
@ -211,6 +212,13 @@ void DDLLoadingDependencyVisitor::extractTableNameFromArgument(const ASTFunction
qualified_name.database = table_identifier->getDatabaseName(); qualified_name.database = table_identifier->getDatabaseName();
qualified_name.table = table_identifier->shortName(); qualified_name.table = table_identifier->shortName();
} }
else if (arg->as<ASTSubquery>())
{
/// Allow IN subquery.
/// Do not add tables from the subquery into dependencies,
/// because CREATE will succeed anyway.
return;
}
else else
{ {
assert(false); assert(false);

View File

@ -107,12 +107,24 @@ void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name,
StoragePtr DatabaseAtomic::detachTable(ContextPtr /* context */, const String & name) StoragePtr DatabaseAtomic::detachTable(ContextPtr /* context */, const String & name)
{ {
// it is important to call the destructors of not_in_use without
// locked mutex to avoid potential deadlock.
DetachedTables not_in_use; DetachedTables not_in_use;
std::lock_guard lock(mutex); StoragePtr table;
auto table = DatabaseOrdinary::detachTableUnlocked(name); {
table_name_to_path.erase(name); std::lock_guard lock(mutex);
detached_tables.emplace(table->getStorageID().uuid, table); table = DatabaseOrdinary::detachTableUnlocked(name);
not_in_use = cleanupDetachedTables(); table_name_to_path.erase(name);
detached_tables.emplace(table->getStorageID().uuid, table);
not_in_use = cleanupDetachedTables();
}
if (!not_in_use.empty())
{
not_in_use.clear();
LOG_DEBUG(log, "Finished removing not used detached tables");
}
return table; return table;
} }

View File

@ -29,6 +29,7 @@
#include <Common/randomNumber.h> #include <Common/randomNumber.h>
#include <Common/setThreadName.h> #include <Common/setThreadName.h>
#include <base/sleep.h> #include <base/sleep.h>
#include <base/scope_guard.h>
#include <boost/algorithm/string/split.hpp> #include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/trim.hpp> #include <boost/algorithm/string/trim.hpp>
#include <Parsers/CommonParsers.h> #include <Parsers/CommonParsers.h>
@ -532,13 +533,17 @@ static inline void dumpDataForTables(
bool MaterializedMySQLSyncThread::prepareSynchronized(MaterializeMetadata & metadata) bool MaterializedMySQLSyncThread::prepareSynchronized(MaterializeMetadata & metadata)
{ {
bool opened_transaction = false; bool opened_transaction = false;
mysqlxx::PoolWithFailover::Entry connection;
while (!isCancelled()) while (!isCancelled())
{ {
try try
{ {
connection = pool.tryGet(); mysqlxx::PoolWithFailover::Entry connection = pool.tryGet();
SCOPE_EXIT({
if (opened_transaction)
connection->query("ROLLBACK").execute();
});
if (connection.isNull()) if (connection.isNull())
{ {
if (settings->max_wait_time_when_mysql_unavailable < 0) if (settings->max_wait_time_when_mysql_unavailable < 0)
@ -602,9 +607,6 @@ bool MaterializedMySQLSyncThread::prepareSynchronized(MaterializeMetadata & meta
{ {
tryLogCurrentException(log); tryLogCurrentException(log);
if (opened_transaction)
connection->query("ROLLBACK").execute();
if (settings->max_wait_time_when_mysql_unavailable < 0) if (settings->max_wait_time_when_mysql_unavailable < 0)
throw; throw;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,41 @@
#pragma once
#include <DataTypes/IDataType.h>
#include <Columns/IColumn.h>
#include <Formats/FormatSettings.h>
namespace DB
{
struct JSONExtractInsertSettings
{
/// If false, JSON boolean values won't be inserted into columns with integer types
/// It's used in JSONExtractInt64/JSONExtractUInt64/... functions.
bool convert_bool_to_integer = true;
/// If true, when complex type like Array/Map has both valid and invalid elements,
/// the default value will be inserted on invalid elements.
/// For example, if we have [1, "hello", 2] and type Array(UInt32),
/// we will insert [1, 0, 2] in the column. Used in all JSONExtract functions.
bool insert_default_on_invalid_elements_in_complex_types = false;
};
template <typename JSONParser>
class JSONExtractTreeNode
{
public:
JSONExtractTreeNode() = default;
virtual ~JSONExtractTreeNode() = default;
virtual bool insertResultToColumn(IColumn &, const typename JSONParser::Element &, const JSONExtractInsertSettings & insert_setting, const FormatSettings & format_settings, String & error) const = 0;
};
/// Build a tree for insertion JSON element into a column with provided data type.
template <typename JSONParser>
std::unique_ptr<JSONExtractTreeNode<JSONParser>> buildJSONExtractTree(const DataTypePtr & type, const char * source_for_exception_message);
template <typename JSONParser>
void jsonElementToString(const typename JSONParser::Element & element, WriteBuffer & buf, const FormatSettings & format_settings);
template <typename JSONParser, typename NumberType>
bool tryGetNumericValueFromJSONElement(NumberType & value, const typename JSONParser::Element & element, bool convert_bool_to_integer, String & error);
}

View File

@ -225,19 +225,6 @@ namespace
Paths paths; Paths paths;
}; };
bool checkIfTypesAreEqual(const DataTypes & types)
{
if (types.empty())
return true;
for (size_t i = 1; i < types.size(); ++i)
{
if (!types[0]->equals(*types[i]))
return false;
}
return true;
}
void updateTypeIndexes(DataTypes & data_types, TypeIndexesSet & type_indexes) void updateTypeIndexes(DataTypes & data_types, TypeIndexesSet & type_indexes)
{ {
type_indexes.clear(); type_indexes.clear();
@ -272,24 +259,31 @@ namespace
type_indexes.erase(TypeIndex::Nothing); type_indexes.erase(TypeIndex::Nothing);
} }
/// If we have both Int64 and UInt64, convert all Int64 to UInt64, /// If we have both Int64 and UInt64, convert all not-negative Int64 to UInt64,
/// because UInt64 is inferred only in case of Int64 overflow. /// because UInt64 is inferred only in case of Int64 overflow.
void transformIntegers(DataTypes & data_types, TypeIndexesSet & type_indexes) void transformIntegers(DataTypes & data_types, TypeIndexesSet & type_indexes, JSONInferenceInfo * json_info)
{ {
if (!type_indexes.contains(TypeIndex::Int64) || !type_indexes.contains(TypeIndex::UInt64)) if (!type_indexes.contains(TypeIndex::Int64) || !type_indexes.contains(TypeIndex::UInt64))
return; return;
bool have_negative_integers = false;
for (auto & type : data_types) for (auto & type : data_types)
{ {
if (WhichDataType(type).isInt64()) if (WhichDataType(type).isInt64())
type = std::make_shared<DataTypeUInt64>(); {
bool is_negative = json_info && json_info->negative_integers.contains(type.get());
have_negative_integers |= is_negative;
if (!is_negative)
type = std::make_shared<DataTypeUInt64>();
}
} }
type_indexes.erase(TypeIndex::Int64); if (!have_negative_integers)
type_indexes.erase(TypeIndex::Int64);
} }
/// If we have both Int64 and Float64 types, convert all Int64 to Float64. /// If we have both Int64 and Float64 types, convert all Int64 to Float64.
void transformIntegersAndFloatsToFloats(DataTypes & data_types, TypeIndexesSet & type_indexes) void transformIntegersAndFloatsToFloats(DataTypes & data_types, TypeIndexesSet & type_indexes, JSONInferenceInfo * json_info)
{ {
bool have_floats = type_indexes.contains(TypeIndex::Float64); bool have_floats = type_indexes.contains(TypeIndex::Float64);
bool have_integers = type_indexes.contains(TypeIndex::Int64) || type_indexes.contains(TypeIndex::UInt64); bool have_integers = type_indexes.contains(TypeIndex::Int64) || type_indexes.contains(TypeIndex::UInt64);
@ -300,7 +294,12 @@ namespace
{ {
WhichDataType which(type); WhichDataType which(type);
if (which.isInt64() || which.isUInt64()) if (which.isInt64() || which.isUInt64())
type = std::make_shared<DataTypeFloat64>(); {
auto new_type = std::make_shared<DataTypeFloat64>();
if (json_info && json_info->numbers_parsed_from_json_strings.erase(type.get()))
json_info->numbers_parsed_from_json_strings.insert(new_type.get());
type = new_type;
}
} }
type_indexes.erase(TypeIndex::Int64); type_indexes.erase(TypeIndex::Int64);
@ -635,9 +634,9 @@ namespace
if (settings.try_infer_integers) if (settings.try_infer_integers)
{ {
/// Transform Int64 to UInt64 if needed. /// Transform Int64 to UInt64 if needed.
transformIntegers(data_types, type_indexes); transformIntegers(data_types, type_indexes, json_info);
/// Transform integers to floats if needed. /// Transform integers to floats if needed.
transformIntegersAndFloatsToFloats(data_types, type_indexes); transformIntegersAndFloatsToFloats(data_types, type_indexes, json_info);
} }
/// Transform Date to DateTime or both to String if needed. /// Transform Date to DateTime or both to String if needed.
@ -887,7 +886,7 @@ namespace
} }
template <bool is_json> template <bool is_json>
DataTypePtr tryInferNumber(ReadBuffer & buf, const FormatSettings & settings) DataTypePtr tryInferNumber(ReadBuffer & buf, const FormatSettings & settings, JSONInferenceInfo * json_info)
{ {
if (buf.eof()) if (buf.eof())
return nullptr; return nullptr;
@ -911,7 +910,12 @@ namespace
Int64 tmp_int; Int64 tmp_int;
buf.position() = number_start; buf.position() = number_start;
if (tryReadIntText(tmp_int, buf)) if (tryReadIntText(tmp_int, buf))
return std::make_shared<DataTypeInt64>(); {
auto type = std::make_shared<DataTypeInt64>();
if (json_info && tmp_int < 0)
json_info->negative_integers.insert(type.get());
return type;
}
/// In case of Int64 overflow we can try to infer UInt64. /// In case of Int64 overflow we can try to infer UInt64.
UInt64 tmp_uint; UInt64 tmp_uint;
@ -934,7 +938,12 @@ namespace
Int64 tmp_int; Int64 tmp_int;
if (tryReadIntText(tmp_int, peekable_buf)) if (tryReadIntText(tmp_int, peekable_buf))
return std::make_shared<DataTypeInt64>(); {
auto type = std::make_shared<DataTypeInt64>();
if (json_info && tmp_int < 0)
json_info->negative_integers.insert(type.get());
return type;
}
peekable_buf.rollbackToCheckpoint(/* drop= */ true); peekable_buf.rollbackToCheckpoint(/* drop= */ true);
/// In case of Int64 overflow we can try to infer UInt64. /// In case of Int64 overflow we can try to infer UInt64.
@ -952,7 +961,7 @@ namespace
} }
template <bool is_json> template <bool is_json>
DataTypePtr tryInferNumberFromStringImpl(std::string_view field, const FormatSettings & settings) DataTypePtr tryInferNumberFromStringImpl(std::string_view field, const FormatSettings & settings, JSONInferenceInfo * json_inference_info = nullptr)
{ {
ReadBufferFromString buf(field); ReadBufferFromString buf(field);
@ -960,7 +969,12 @@ namespace
{ {
Int64 tmp_int; Int64 tmp_int;
if (tryReadIntText(tmp_int, buf) && buf.eof()) if (tryReadIntText(tmp_int, buf) && buf.eof())
return std::make_shared<DataTypeInt64>(); {
auto type = std::make_shared<DataTypeInt64>();
if (json_inference_info && tmp_int < 0)
json_inference_info->negative_integers.insert(type.get());
return type;
}
/// We can safely get back to the start of buffer, because we read from a string and we didn't reach eof. /// We can safely get back to the start of buffer, because we read from a string and we didn't reach eof.
buf.position() = buf.buffer().begin(); buf.position() = buf.buffer().begin();
@ -1011,7 +1025,7 @@ namespace
{ {
if (settings.json.try_infer_numbers_from_strings) if (settings.json.try_infer_numbers_from_strings)
{ {
if (auto number_type = tryInferNumberFromStringImpl<true>(field, settings)) if (auto number_type = tryInferNumberFromStringImpl<true>(field, settings, json_info))
{ {
json_info->numbers_parsed_from_json_strings.insert(number_type.get()); json_info->numbers_parsed_from_json_strings.insert(number_type.get());
return number_type; return number_type;
@ -1254,10 +1268,23 @@ namespace
} }
/// Number /// Number
return tryInferNumber<is_json>(buf, settings); return tryInferNumber<is_json>(buf, settings, json_info);
} }
} }
bool checkIfTypesAreEqual(const DataTypes & types)
{
if (types.empty())
return true;
for (size_t i = 1; i < types.size(); ++i)
{
if (!types[0]->equals(*types[i]))
return false;
}
return true;
}
void transformInferredTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings) void transformInferredTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings)
{ {
DataTypes types = {first, second}; DataTypes types = {first, second};
@ -1275,6 +1302,11 @@ void transformInferredJSONTypesIfNeeded(
second = std::move(types[1]); second = std::move(types[1]);
} }
void transformInferredJSONTypesIfNeeded(DataTypes & types, const FormatSettings & settings, JSONInferenceInfo * json_info)
{
transformInferredTypesIfNeededImpl<true>(types, settings, json_info);
}
void transformInferredJSONTypesFromDifferentFilesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings) void transformInferredJSONTypesFromDifferentFilesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings)
{ {
JSONInferenceInfo json_info; JSONInferenceInfo json_info;
@ -1396,6 +1428,12 @@ DataTypePtr tryInferNumberFromString(std::string_view field, const FormatSetting
return tryInferNumberFromStringImpl<false>(field, settings); return tryInferNumberFromStringImpl<false>(field, settings);
} }
DataTypePtr tryInferJSONNumberFromString(std::string_view field, const FormatSettings & settings, JSONInferenceInfo * json_info)
{
return tryInferNumberFromStringImpl<false>(field, settings, json_info);
}
DataTypePtr tryInferDateOrDateTimeFromString(std::string_view field, const FormatSettings & settings) DataTypePtr tryInferDateOrDateTimeFromString(std::string_view field, const FormatSettings & settings)
{ {
if (settings.try_infer_dates && tryInferDate(field)) if (settings.try_infer_dates && tryInferDate(field))

View File

@ -2,6 +2,7 @@
#include <DataTypes/IDataType.h> #include <DataTypes/IDataType.h>
#include <IO/ReadBuffer.h> #include <IO/ReadBuffer.h>
#include <Formats/FormatSettings.h>
#include <vector> #include <vector>
@ -18,6 +19,11 @@ struct JSONInferenceInfo
/// We store numbers that were parsed from strings. /// We store numbers that were parsed from strings.
/// It's used in types transformation to change such numbers back to string if needed. /// It's used in types transformation to change such numbers back to string if needed.
std::unordered_set<const IDataType *> numbers_parsed_from_json_strings; std::unordered_set<const IDataType *> numbers_parsed_from_json_strings;
/// Store integer types that were inferred from negative numbers.
/// It's used to determine common type for Int64 and UInt64
/// TODO: check it not only in JSON formats.
std::unordered_set<const IDataType *> negative_integers;
/// Indicates if currently we are inferring type for Map/Object key. /// Indicates if currently we are inferring type for Map/Object key.
bool is_object_key = false; bool is_object_key = false;
/// When we transform types for the same column from different files /// When we transform types for the same column from different files
@ -48,6 +54,7 @@ DataTypePtr tryInferDateOrDateTimeFromString(std::string_view field, const Forma
/// Try to parse a number value from a string. By default, it tries to parse Float64, /// Try to parse a number value from a string. By default, it tries to parse Float64,
/// but if setting try_infer_integers is enabled, it also tries to parse Int64. /// but if setting try_infer_integers is enabled, it also tries to parse Int64.
DataTypePtr tryInferNumberFromString(std::string_view field, const FormatSettings & settings); DataTypePtr tryInferNumberFromString(std::string_view field, const FormatSettings & settings);
DataTypePtr tryInferJSONNumberFromString(std::string_view field, const FormatSettings & settings, JSONInferenceInfo * json_info);
/// It takes two types inferred for the same column and tries to transform them to a common type if possible. /// It takes two types inferred for the same column and tries to transform them to a common type if possible.
/// It's also used when we try to infer some not ordinary types from another types. /// It's also used when we try to infer some not ordinary types from another types.
@ -77,6 +84,7 @@ void transformInferredTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, c
/// Example 2: /// Example 2:
/// We merge DataTypeJSONPaths types to a single DataTypeJSONPaths type with union of all JSON paths. /// We merge DataTypeJSONPaths types to a single DataTypeJSONPaths type with union of all JSON paths.
void transformInferredJSONTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings, JSONInferenceInfo * json_info); void transformInferredJSONTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings, JSONInferenceInfo * json_info);
void transformInferredJSONTypesIfNeeded(DataTypes & types, const FormatSettings & settings, JSONInferenceInfo * json_info);
/// Make final transform for types inferred in JSON format. It does 3 types of transformation: /// Make final transform for types inferred in JSON format. It does 3 types of transformation:
/// 1) Checks if type is unnamed Tuple(...), tries to transform nested types to find a common type for them and if all nested types /// 1) Checks if type is unnamed Tuple(...), tries to transform nested types to find a common type for them and if all nested types
@ -107,4 +115,6 @@ NamesAndTypesList getNamesAndRecursivelyNullableTypes(const Block & header);
/// Check if type contains Nothing, like Array(Tuple(Nullable(Nothing), String)) /// Check if type contains Nothing, like Array(Tuple(Nullable(Nothing), String))
bool checkIfTypeIsComplete(const DataTypePtr & type); bool checkIfTypeIsComplete(const DataTypePtr & type);
bool checkIfTypesAreEqual(const DataTypes & types);
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -49,7 +49,7 @@ namespace
const String & dest_blob_, const String & dest_blob_,
std::shared_ptr<const AzureBlobStorage::RequestSettings> settings_, std::shared_ptr<const AzureBlobStorage::RequestSettings> settings_,
ThreadPoolCallbackRunnerUnsafe<void> schedule_, ThreadPoolCallbackRunnerUnsafe<void> schedule_,
const Poco::Logger * log_) LoggerPtr log_)
: create_read_buffer(create_read_buffer_) : create_read_buffer(create_read_buffer_)
, client(client_) , client(client_)
, offset (offset_) , offset (offset_)
@ -74,7 +74,7 @@ namespace
const String & dest_blob; const String & dest_blob;
std::shared_ptr<const AzureBlobStorage::RequestSettings> settings; std::shared_ptr<const AzureBlobStorage::RequestSettings> settings;
ThreadPoolCallbackRunnerUnsafe<void> schedule; ThreadPoolCallbackRunnerUnsafe<void> schedule;
const Poco::Logger * log; const LoggerPtr log;
size_t max_single_part_upload_size; size_t max_single_part_upload_size;
struct UploadPartTask struct UploadPartTask
@ -83,7 +83,6 @@ namespace
size_t part_size; size_t part_size;
std::vector<std::string> block_ids; std::vector<std::string> block_ids;
bool is_finished = false; bool is_finished = false;
std::exception_ptr exception;
}; };
size_t normal_part_size; size_t normal_part_size;
@ -92,6 +91,7 @@ namespace
std::list<UploadPartTask> TSA_GUARDED_BY(bg_tasks_mutex) bg_tasks; std::list<UploadPartTask> TSA_GUARDED_BY(bg_tasks_mutex) bg_tasks;
int num_added_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0; int num_added_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0;
int num_finished_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0; int num_finished_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0;
std::exception_ptr bg_exception TSA_GUARDED_BY(bg_tasks_mutex);
std::mutex bg_tasks_mutex; std::mutex bg_tasks_mutex;
std::condition_variable bg_tasks_condvar; std::condition_variable bg_tasks_condvar;
@ -186,7 +186,7 @@ namespace
} }
catch (...) catch (...)
{ {
tryLogCurrentException(__PRETTY_FUNCTION__); tryLogCurrentException(log, fmt::format("While performing multipart upload of blob {} in container {}", dest_blob, dest_container_for_logging));
waitForAllBackgroundTasks(); waitForAllBackgroundTasks();
throw; throw;
} }
@ -242,7 +242,12 @@ namespace
} }
catch (...) catch (...)
{ {
task->exception = std::current_exception(); std::lock_guard lock(bg_tasks_mutex);
if (!bg_exception)
{
tryLogCurrentException(log, "While writing part");
bg_exception = std::current_exception(); /// The exception will be rethrown after all background tasks stop working.
}
} }
task_finish_notify(); task_finish_notify();
}, Priority{}); }, Priority{});
@ -299,13 +304,13 @@ namespace
/// Suppress warnings because bg_tasks_mutex is actually hold, but tsa annotations do not understand std::unique_lock /// Suppress warnings because bg_tasks_mutex is actually hold, but tsa annotations do not understand std::unique_lock
bg_tasks_condvar.wait(lock, [this]() {return TSA_SUPPRESS_WARNING_FOR_READ(num_added_bg_tasks) == TSA_SUPPRESS_WARNING_FOR_READ(num_finished_bg_tasks); }); bg_tasks_condvar.wait(lock, [this]() {return TSA_SUPPRESS_WARNING_FOR_READ(num_added_bg_tasks) == TSA_SUPPRESS_WARNING_FOR_READ(num_finished_bg_tasks); });
auto & tasks = TSA_SUPPRESS_WARNING_FOR_WRITE(bg_tasks); auto exception = TSA_SUPPRESS_WARNING_FOR_READ(bg_exception);
for (auto & task : tasks) if (exception)
{ std::rethrow_exception(exception);
if (task.exception)
std::rethrow_exception(task.exception); const auto & tasks = TSA_SUPPRESS_WARNING_FOR_READ(bg_tasks);
for (const auto & task : tasks)
block_ids.insert(block_ids.end(),task.block_ids.begin(), task.block_ids.end()); block_ids.insert(block_ids.end(),task.block_ids.begin(), task.block_ids.end());
}
} }
}; };
} }
@ -321,7 +326,8 @@ void copyDataToAzureBlobStorageFile(
std::shared_ptr<const AzureBlobStorage::RequestSettings> settings, std::shared_ptr<const AzureBlobStorage::RequestSettings> settings,
ThreadPoolCallbackRunnerUnsafe<void> schedule) ThreadPoolCallbackRunnerUnsafe<void> schedule)
{ {
UploadHelper helper{create_read_buffer, dest_client, offset, size, dest_container_for_logging, dest_blob, settings, schedule, &Poco::Logger::get("copyDataToAzureBlobStorageFile")}; auto log = getLogger("copyDataToAzureBlobStorageFile");
UploadHelper helper{create_read_buffer, dest_client, offset, size, dest_container_for_logging, dest_blob, settings, schedule, log};
helper.performCopy(); helper.performCopy();
} }
@ -339,9 +345,11 @@ void copyAzureBlobStorageFile(
const ReadSettings & read_settings, const ReadSettings & read_settings,
ThreadPoolCallbackRunnerUnsafe<void> schedule) ThreadPoolCallbackRunnerUnsafe<void> schedule)
{ {
auto log = getLogger("copyAzureBlobStorageFile");
if (settings->use_native_copy) if (settings->use_native_copy)
{ {
LOG_TRACE(getLogger("copyAzureBlobStorageFile"), "Copying Blob: {} from Container: {} using native copy", src_container_for_logging, src_blob); LOG_TRACE(log, "Copying Blob: {} from Container: {} using native copy", src_container_for_logging, src_blob);
ProfileEvents::increment(ProfileEvents::AzureCopyObject); ProfileEvents::increment(ProfileEvents::AzureCopyObject);
if (dest_client->GetClickhouseOptions().IsClientForDisk) if (dest_client->GetClickhouseOptions().IsClientForDisk)
ProfileEvents::increment(ProfileEvents::DiskAzureCopyObject); ProfileEvents::increment(ProfileEvents::DiskAzureCopyObject);
@ -352,7 +360,7 @@ void copyAzureBlobStorageFile(
if (size < settings->max_single_part_copy_size) if (size < settings->max_single_part_copy_size)
{ {
LOG_TRACE(getLogger("copyAzureBlobStorageFile"), "Copy blob sync {} -> {}", src_blob, dest_blob); LOG_TRACE(log, "Copy blob sync {} -> {}", src_blob, dest_blob);
block_blob_client_dest.CopyFromUri(source_uri); block_blob_client_dest.CopyFromUri(source_uri);
} }
else else
@ -368,7 +376,7 @@ void copyAzureBlobStorageFile(
if (copy_status.HasValue() && copy_status.Value() == Azure::Storage::Blobs::Models::CopyStatus::Success) if (copy_status.HasValue() && copy_status.Value() == Azure::Storage::Blobs::Models::CopyStatus::Success)
{ {
LOG_TRACE(getLogger("copyAzureBlobStorageFile"), "Copy of {} to {} finished", properties_model.CopySource.Value(), dest_blob); LOG_TRACE(log, "Copy of {} to {} finished", properties_model.CopySource.Value(), dest_blob);
} }
else else
{ {
@ -382,14 +390,14 @@ void copyAzureBlobStorageFile(
} }
else else
{ {
LOG_TRACE(&Poco::Logger::get("copyAzureBlobStorageFile"), "Reading from Container: {}, Blob: {}", src_container_for_logging, src_blob); LOG_TRACE(log, "Reading from Container: {}, Blob: {}", src_container_for_logging, src_blob);
auto create_read_buffer = [&] auto create_read_buffer = [&]
{ {
return std::make_unique<ReadBufferFromAzureBlobStorage>( return std::make_unique<ReadBufferFromAzureBlobStorage>(
src_client, src_blob, read_settings, settings->max_single_read_retries, settings->max_single_download_retries); src_client, src_blob, read_settings, settings->max_single_read_retries, settings->max_single_download_retries);
}; };
UploadHelper helper{create_read_buffer, dest_client, offset, size, dest_container_for_logging, dest_blob, settings, schedule, &Poco::Logger::get("copyAzureBlobStorageFile")}; UploadHelper helper{create_read_buffer, dest_client, offset, size, dest_container_for_logging, dest_blob, settings, schedule, log};
helper.performCopy(); helper.performCopy();
} }
} }

View File

@ -98,7 +98,6 @@ namespace
size_t part_size; size_t part_size;
String tag; String tag;
bool is_finished = false; bool is_finished = false;
std::exception_ptr exception;
}; };
size_t num_parts; size_t num_parts;
@ -111,6 +110,7 @@ namespace
size_t num_added_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0; size_t num_added_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0;
size_t num_finished_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0; size_t num_finished_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0;
size_t num_finished_parts TSA_GUARDED_BY(bg_tasks_mutex) = 0; size_t num_finished_parts TSA_GUARDED_BY(bg_tasks_mutex) = 0;
std::exception_ptr bg_exception TSA_GUARDED_BY(bg_tasks_mutex);
std::mutex bg_tasks_mutex; std::mutex bg_tasks_mutex;
std::condition_variable bg_tasks_condvar; std::condition_variable bg_tasks_condvar;
@ -273,7 +273,7 @@ namespace
} }
catch (...) catch (...)
{ {
tryLogCurrentException(__PRETTY_FUNCTION__); tryLogCurrentException(log, fmt::format("While performing multipart upload of {}", dest_key));
// Multipart upload failed because it wasn't possible to schedule all the tasks. // Multipart upload failed because it wasn't possible to schedule all the tasks.
// To avoid execution of already scheduled tasks we abort MultipartUpload. // To avoid execution of already scheduled tasks we abort MultipartUpload.
abortMultipartUpload(); abortMultipartUpload();
@ -385,7 +385,12 @@ namespace
} }
catch (...) catch (...)
{ {
task->exception = std::current_exception(); std::lock_guard lock(bg_tasks_mutex);
if (!bg_exception)
{
tryLogCurrentException(log, fmt::format("While writing part #{}", task->part_number));
bg_exception = std::current_exception(); /// The exception will be rethrown after all background tasks stop working.
}
} }
task_finish_notify(); task_finish_notify();
}, Priority{}); }, Priority{});
@ -435,22 +440,21 @@ namespace
/// Suppress warnings because bg_tasks_mutex is actually hold, but tsa annotations do not understand std::unique_lock /// Suppress warnings because bg_tasks_mutex is actually hold, but tsa annotations do not understand std::unique_lock
bg_tasks_condvar.wait(lock, [this]() {return TSA_SUPPRESS_WARNING_FOR_READ(num_added_bg_tasks) == TSA_SUPPRESS_WARNING_FOR_READ(num_finished_bg_tasks); }); bg_tasks_condvar.wait(lock, [this]() {return TSA_SUPPRESS_WARNING_FOR_READ(num_added_bg_tasks) == TSA_SUPPRESS_WARNING_FOR_READ(num_finished_bg_tasks); });
auto & tasks = TSA_SUPPRESS_WARNING_FOR_WRITE(bg_tasks); auto exception = TSA_SUPPRESS_WARNING_FOR_READ(bg_exception);
for (auto & task : tasks) if (exception)
{ {
if (task.exception) /// abortMultipartUpload() might be called already, see processUploadPartRequest().
{ /// However if there were concurrent uploads at that time, those part uploads might or might not succeed.
/// abortMultipartUpload() might be called already, see processUploadPartRequest(). /// As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free
/// However if there were concurrent uploads at that time, those part uploads might or might not succeed. /// all storage consumed by all parts.
/// As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free abortMultipartUpload();
/// all storage consumed by all parts.
abortMultipartUpload();
std::rethrow_exception(task.exception); std::rethrow_exception(exception);
}
part_tags.push_back(task.tag);
} }
const auto & tasks = TSA_SUPPRESS_WARNING_FOR_READ(bg_tasks);
for (const auto & task : tasks)
part_tags.push_back(task.tag);
} }
}; };

View File

@ -73,66 +73,55 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v
return false; return false;
} }
void OptimizeIfWithConstantConditionVisitor::visit(ASTPtr & current_ast) void OptimizeIfWithConstantConditionVisitorData::visit(ASTFunction & function_node, ASTPtr & ast)
{ {
if (!current_ast)
return;
checkStackSize(); checkStackSize();
for (ASTPtr & child : current_ast->children) if (function_node.name != "if")
return;
if (!function_node.arguments)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Wrong number of arguments for function 'if' (0 instead of 3)");
if (function_node.arguments->children.size() != 3)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Wrong number of arguments for function 'if' ({} instead of 3)",
function_node.arguments->children.size());
const auto * args = function_node.arguments->as<ASTExpressionList>();
ASTPtr condition_expr = args->children[0];
ASTPtr then_expr = args->children[1];
ASTPtr else_expr = args->children[2];
bool condition;
if (tryExtractConstValueFromCondition(condition_expr, condition))
{ {
auto * function_node = child->as<ASTFunction>(); ASTPtr replace_ast = condition ? then_expr : else_expr;
if (!function_node || function_node->name != "if") ASTPtr child_copy = ast;
String replace_alias = replace_ast->tryGetAlias();
String if_alias = ast->tryGetAlias();
if (replace_alias.empty())
{ {
visit(child); replace_ast->setAlias(if_alias);
continue; ast = replace_ast;
}
else
{
/// Only copy of one node is required here.
/// But IAST has only method for deep copy of subtree.
/// This can be a reason of performance degradation in case of deep queries.
ASTPtr replace_ast_deep_copy = replace_ast->clone();
replace_ast_deep_copy->setAlias(if_alias);
ast = replace_ast_deep_copy;
} }
if (!function_node->arguments) if (!if_alias.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Wrong number of arguments for function 'if' (0 instead of 3)");
if (function_node->arguments->children.size() != 3)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Wrong number of arguments for function 'if' ({} instead of 3)",
function_node->arguments->children.size());
visit(function_node->arguments);
const auto * args = function_node->arguments->as<ASTExpressionList>();
ASTPtr condition_expr = args->children[0];
ASTPtr then_expr = args->children[1];
ASTPtr else_expr = args->children[2];
bool condition;
if (tryExtractConstValueFromCondition(condition_expr, condition))
{ {
ASTPtr replace_ast = condition ? then_expr : else_expr; auto alias_it = aliases.find(if_alias);
ASTPtr child_copy = child; if (alias_it != aliases.end() && alias_it->second.get() == child_copy.get())
String replace_alias = replace_ast->tryGetAlias(); alias_it->second = ast;
String if_alias = child->tryGetAlias();
if (replace_alias.empty())
{
replace_ast->setAlias(if_alias);
child = replace_ast;
}
else
{
/// Only copy of one node is required here.
/// But IAST has only method for deep copy of subtree.
/// This can be a reason of performance degradation in case of deep queries.
ASTPtr replace_ast_deep_copy = replace_ast->clone();
replace_ast_deep_copy->setAlias(if_alias);
child = replace_ast_deep_copy;
}
if (!if_alias.empty())
{
auto alias_it = aliases.find(if_alias);
if (alias_it != aliases.end() && alias_it->second.get() == child_copy.get())
alias_it->second = child;
}
} }
} }
} }

View File

@ -1,23 +1,24 @@
#pragma once #pragma once
#include <Interpreters/Aliases.h> #include <Interpreters/Aliases.h>
#include <Interpreters/InDepthNodeVisitor.h>
namespace DB namespace DB
{ {
struct OptimizeIfWithConstantConditionVisitorData
/// It removes Function_if node from AST if condition is constant.
/// TODO: rewrite with InDepthNodeVisitor
class OptimizeIfWithConstantConditionVisitor
{ {
public: using TypeToVisit = ASTFunction;
explicit OptimizeIfWithConstantConditionVisitor(Aliases & aliases_)
explicit OptimizeIfWithConstantConditionVisitorData(Aliases & aliases_)
: aliases(aliases_) : aliases(aliases_)
{} {}
void visit(ASTPtr & ast); void visit(ASTFunction & function_node, ASTPtr & ast);
private: private:
Aliases & aliases; Aliases & aliases;
}; };
/// It removes Function_if node from AST if condition is constant.
using OptimizeIfWithConstantConditionVisitor = InDepthNodeVisitor<OneTypeMatcher<OptimizeIfWithConstantConditionVisitorData>, false>;
} }

View File

@ -577,7 +577,8 @@ void TreeOptimizer::optimizeIf(ASTPtr & query, Aliases & aliases, bool if_chain_
optimizeMultiIfToIf(query); optimizeMultiIfToIf(query);
/// Optimize if with constant condition after constants was substituted instead of scalar subqueries. /// Optimize if with constant condition after constants was substituted instead of scalar subqueries.
OptimizeIfWithConstantConditionVisitor(aliases).visit(query); OptimizeIfWithConstantConditionVisitorData visitor_data(aliases);
OptimizeIfWithConstantConditionVisitor(visitor_data).visit(query);
if (if_chain_to_multiif) if (if_chain_to_multiif)
OptimizeIfChainsVisitor().visit(query); OptimizeIfChainsVisitor().visit(query);

View File

@ -5,20 +5,21 @@
#include <base/hex.h> #include <base/hex.h>
#include <base/interpolate.h> #include <base/interpolate.h>
#include <Common/FailPoint.h>
#include <Common/Macros.h> #include <Common/Macros.h>
#include <Common/MemoryTracker.h> #include <Common/MemoryTracker.h>
#include <Common/ProfileEventsScope.h> #include <Common/ProfileEventsScope.h>
#include <Common/StringUtils.h> #include <Common/StringUtils.h>
#include <Common/ThreadFuzzer.h>
#include <Common/ZooKeeper/KeeperException.h> #include <Common/ZooKeeper/KeeperException.h>
#include <Common/ZooKeeper/Types.h> #include <Common/ZooKeeper/Types.h>
#include <Common/escapeForFileName.h> #include <Common/escapeForFileName.h>
#include <Common/formatReadable.h> #include <Common/formatReadable.h>
#include <Common/logger_useful.h>
#include <Common/noexcept_scope.h> #include <Common/noexcept_scope.h>
#include <Common/randomDelay.h>
#include <Common/thread_local_rng.h> #include <Common/thread_local_rng.h>
#include <Common/typeid_cast.h> #include <Common/typeid_cast.h>
#include <Common/ThreadFuzzer.h>
#include <Common/FailPoint.h>
#include <Common/randomDelay.h>
#include <Core/ServerUUID.h> #include <Core/ServerUUID.h>
@ -5272,6 +5273,8 @@ void StorageReplicatedMergeTree::flushAndPrepareForShutdown()
if (shutdown_prepared_called.exchange(true)) if (shutdown_prepared_called.exchange(true))
return; return;
LOG_TRACE(log, "Start preparing for shutdown");
try try
{ {
auto settings_ptr = getSettings(); auto settings_ptr = getSettings();
@ -5282,7 +5285,11 @@ void StorageReplicatedMergeTree::flushAndPrepareForShutdown()
stopBeingLeader(); stopBeingLeader();
if (attach_thread) if (attach_thread)
{
attach_thread->shutdown(); attach_thread->shutdown();
LOG_TRACE(log, "The attach thread is shutdown");
}
restarting_thread.shutdown(/* part_of_full_shutdown */true); restarting_thread.shutdown(/* part_of_full_shutdown */true);
/// Explicitly set the event, because the restarting thread will not set it again /// Explicitly set the event, because the restarting thread will not set it again
@ -5295,6 +5302,8 @@ void StorageReplicatedMergeTree::flushAndPrepareForShutdown()
shutdown_deadline.emplace(std::chrono::system_clock::now()); shutdown_deadline.emplace(std::chrono::system_clock::now());
throw; throw;
} }
LOG_TRACE(log, "Finished preparing for shutdown");
} }
void StorageReplicatedMergeTree::partialShutdown() void StorageReplicatedMergeTree::partialShutdown()
@ -5332,6 +5341,8 @@ void StorageReplicatedMergeTree::shutdown(bool)
if (shutdown_called.exchange(true)) if (shutdown_called.exchange(true))
return; return;
LOG_TRACE(log, "Shutdown started");
flushAndPrepareForShutdown(); flushAndPrepareForShutdown();
if (!shutdown_deadline.has_value()) if (!shutdown_deadline.has_value())
@ -5374,6 +5385,7 @@ void StorageReplicatedMergeTree::shutdown(bool)
/// Wait for all of them /// Wait for all of them
std::lock_guard lock(data_parts_exchange_ptr->rwlock); std::lock_guard lock(data_parts_exchange_ptr->rwlock);
} }
LOG_TRACE(log, "Shutdown finished");
} }

View File

@ -15,3 +15,4 @@ warn_return_any = True
no_implicit_reexport = True no_implicit_reexport = True
strict_equality = True strict_equality = True
extra_checks = True extra_checks = True
ignore_missing_imports = True

View File

@ -15,7 +15,7 @@ import upload_result_helper
from build_check import get_release_or_pr from build_check import get_release_or_pr
from ci_config import CI from ci_config import CI
from ci_metadata import CiMetadata from ci_metadata import CiMetadata
from ci_utils import GHActions, normalize_string from ci_utils import GHActions, normalize_string, Shell
from clickhouse_helper import ( from clickhouse_helper import (
CiLogsCredentials, CiLogsCredentials,
ClickHouseHelper, ClickHouseHelper,
@ -53,6 +53,7 @@ from stopwatch import Stopwatch
from tee_popen import TeePopen from tee_popen import TeePopen
from ci_cache import CiCache from ci_cache import CiCache
from ci_settings import CiSettings from ci_settings import CiSettings
from ci_buddy import CIBuddy
from version_helper import get_version_from_repo from version_helper import get_version_from_repo
# pylint: disable=too-many-lines # pylint: disable=too-many-lines
@ -262,6 +263,8 @@ def check_missing_images_on_dockerhub(
def _pre_action(s3, indata, pr_info): def _pre_action(s3, indata, pr_info):
print("Clear dmesg")
Shell.run("sudo dmesg --clear ||:")
CommitStatusData.cleanup() CommitStatusData.cleanup()
JobReport.cleanup() JobReport.cleanup()
BuildResult.cleanup() BuildResult.cleanup()
@ -992,6 +995,10 @@ def main() -> int:
ci_settings, ci_settings,
args.skip_jobs, args.skip_jobs,
) )
if IS_CI and pr_info.is_pr:
ci_cache.filter_out_not_affected_jobs()
ci_cache.print_status() ci_cache.print_status()
if IS_CI and not pr_info.is_merge_queue: if IS_CI and not pr_info.is_merge_queue:
@ -1118,6 +1125,14 @@ def main() -> int:
### POST action: start ### POST action: start
elif args.post: elif args.post:
if Shell.check(
"sudo dmesg -T | grep -q -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE'"
):
print("WARNING: OOM while job execution")
CIBuddy(dry_run=not pr_info.is_release).post_error(
"Out Of Memory", job_name=_get_ext_check_name(args.job_name)
)
job_report = JobReport.load() if JobReport.exist() else None job_report = JobReport.load() if JobReport.exist() else None
if job_report: if job_report:
ch_helper = ClickHouseHelper() ch_helper = ClickHouseHelper()

88
tests/ci/ci_buddy.py Normal file
View File

@ -0,0 +1,88 @@
import json
import os
import boto3
import requests
from botocore.exceptions import ClientError
from pr_info import PRInfo
from ci_utils import Shell
class CIBuddy:
_HEADERS = {"Content-Type": "application/json"}
def __init__(self, dry_run=False):
self.repo = os.getenv("GITHUB_REPOSITORY", "")
self.dry_run = dry_run
res = self._get_webhooks()
self.test_channel = ""
self.dev_ci_channel = ""
if res:
self.test_channel = json.loads(res)["test_channel"]
self.dev_ci_channel = json.loads(res)["ci_channel"]
self.job_name = os.getenv("CHECK_NAME", "unknown")
pr_info = PRInfo()
self.pr_number = pr_info.number
self.head_ref = pr_info.head_ref
self.commit_url = pr_info.commit_html_url
@staticmethod
def _get_webhooks():
name = "ci_buddy_web_hooks"
session = boto3.Session(region_name="us-east-1") # Replace with your region
ssm_client = session.client("ssm")
json_string = None
try:
response = ssm_client.get_parameter(
Name=name,
WithDecryption=True, # Set to True if the parameter is a SecureString
)
json_string = response["Parameter"]["Value"]
except ClientError as e:
print(f"An error occurred: {e}")
return json_string
def post(self, message, dry_run=None):
if dry_run is None:
dry_run = self.dry_run
print(f"Posting slack message, dry_run [{dry_run}]")
if dry_run:
url = self.test_channel
else:
url = self.dev_ci_channel
data = {"text": message}
try:
requests.post(url, headers=self._HEADERS, data=json.dumps(data), timeout=10)
except Exception as e:
print(f"ERROR: Failed to post message, ex {e}")
def post_error(self, error_description, job_name="", with_instance_info=True):
instance_id, instance_type = "unknown", "unknown"
if with_instance_info:
instance_id = Shell.run("ec2metadata --instance-id") or instance_id
instance_type = Shell.run("ec2metadata --instance-type") or instance_type
if not job_name:
job_name = os.getenv("CHECK_NAME", "unknown")
line_err = f":red_circle: *Error: {error_description}*\n\n"
line_ghr = f" *Runner:* `{instance_type}`, `{instance_id}`\n"
line_job = f" *Job:* `{job_name}`\n"
line_pr_ = f" *PR:* <https://github.com/{self.repo}/pull/{self.pr_number}|#{self.pr_number}>\n"
line_br_ = f" *Branch:* `{self.head_ref}`, <{self.commit_url}|commit>\n"
message = line_err
message += line_job
if with_instance_info:
message += line_ghr
if self.pr_number > 0:
message += line_pr_
else:
message += line_br_
self.post(message)
if __name__ == "__main__":
# test
buddy = CIBuddy(dry_run=True)
buddy.post_error("Out of memory")

View File

@ -674,6 +674,78 @@ class CiCache:
bucket=S3_BUILDS_BUCKET, file_path=result_json_path, s3_path=s3_path bucket=S3_BUILDS_BUCKET, file_path=result_json_path, s3_path=s3_path
) )
def filter_out_not_affected_jobs(self):
"""
Filter is to be applied in PRs to remove jobs that are not affected by the change
It removes jobs from @jobs_to_do if it is a:
1. test job and it is in @jobs_to_wait (no need to wait not affected jobs in PRs)
2. test job and it has finished on release branch (even if failed)
2. build job which is not required by any test job that is left in @jobs_to_do
:return:
"""
# 1.
remove_from_await_list = []
for job_name, job_config in self.jobs_to_wait.items():
if CI.is_test_job(job_name) and job_name != CI.JobNames.BUILD_CHECK:
remove_from_await_list.append(job_name)
for job in remove_from_await_list:
print(f"Filter job [{job}] - test job and not affected by the change")
del self.jobs_to_wait[job]
del self.jobs_to_do[job]
# 2.
remove_from_to_do = []
for job_name, job_config in self.jobs_to_do.items():
if CI.is_test_job(job_name) and job_name != CI.JobNames.BUILD_CHECK:
batches_to_remove = []
assert job_config.batches is not None
for batch in job_config.batches:
if self.is_failed(
job_name, batch, job_config.num_batches, release_branch=True
):
print(
f"Filter [{job_name}/{batch}] - not affected by the change (failed on release branch)"
)
batches_to_remove.append(batch)
for batch in batches_to_remove:
job_config.batches.remove(batch)
if not job_config.batches:
print(
f"Filter [{job_name}] - not affected by the change (failed on release branch)"
)
remove_from_to_do.append(job_name)
for job in remove_from_to_do:
del self.jobs_to_do[job]
# 3.
required_builds = [] # type: List[str]
for job_name, job_config in self.jobs_to_do.items():
if CI.is_test_job(job_name) and job_config.required_builds:
required_builds += job_config.required_builds
required_builds = list(set(required_builds))
remove_builds = [] # type: List[str]
has_builds_to_do = False
for job_name, job_config in self.jobs_to_do.items():
if CI.is_build_job(job_name):
if job_name not in required_builds:
remove_builds.append(job_name)
else:
has_builds_to_do = True
for build_job in remove_builds:
print(
f"Filter build job [{build_job}] - not affected and not required by test jobs"
)
del self.jobs_to_do[build_job]
if build_job in self.jobs_to_wait:
del self.jobs_to_wait[build_job]
if not has_builds_to_do and CI.JobNames.BUILD_CHECK in self.jobs_to_do:
print(f"Filter job [{CI.JobNames.BUILD_CHECK}] - no builds to do")
del self.jobs_to_do[CI.JobNames.BUILD_CHECK]
def await_pending_jobs(self, is_release: bool, dry_run: bool = False) -> None: def await_pending_jobs(self, is_release: bool, dry_run: bool = False) -> None:
""" """
await pending jobs to be finished await pending jobs to be finished

View File

@ -378,7 +378,7 @@ class CommonJobConfigs:
), ),
run_command='functional_test_check.py "$CHECK_NAME"', run_command='functional_test_check.py "$CHECK_NAME"',
runner_type=Runners.FUNC_TESTER, runner_type=Runners.FUNC_TESTER,
timeout=10800, timeout=7200,
) )
STATEFUL_TEST = JobConfig( STATEFUL_TEST = JobConfig(
job_name_keyword="stateful", job_name_keyword="stateful",

View File

@ -1,4 +1,5 @@
import os import os
import subprocess
from contextlib import contextmanager from contextlib import contextmanager
from pathlib import Path from pathlib import Path
from typing import Any, Iterator, List, Union from typing import Any, Iterator, List, Union
@ -42,3 +43,43 @@ class GHActions:
for line in lines: for line in lines:
print(line) print(line)
print("::endgroup::") print("::endgroup::")
class Shell:
@classmethod
def run_strict(cls, command):
subprocess.run(
command + " 2>&1",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=True,
)
@classmethod
def run(cls, command):
res = ""
result = subprocess.run(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=False,
)
if result.returncode == 0:
res = result.stdout
return res.strip()
@classmethod
def check(cls, command):
result = subprocess.run(
command + " 2>&1",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=False,
)
return result.returncode == 0

View File

@ -108,6 +108,7 @@ def get_run_command(
"--privileged " "--privileged "
f"{ci_logs_args}" f"{ci_logs_args}"
f"--volume={repo_path}/tests:/usr/share/clickhouse-test " f"--volume={repo_path}/tests:/usr/share/clickhouse-test "
f"--volume={repo_path}/utils/grpc-client:/usr/share/clickhouse-utils/grpc-client "
f"{volume_with_broken_test}" f"{volume_with_broken_test}"
f"--volume={result_path}:/test_output " f"--volume={result_path}:/test_output "
f"--volume={server_log_path}:/var/log/clickhouse-server " f"--volume={server_log_path}:/var/log/clickhouse-server "

View File

@ -1,4 +1,5 @@
"""Module to get the token for GitHub""" """Module to get the token for GitHub"""
from dataclasses import dataclass from dataclasses import dataclass
import json import json
import time import time

View File

@ -417,7 +417,7 @@ class TestCIConfig(unittest.TestCase):
assert not ci_cache.jobs_to_skip assert not ci_cache.jobs_to_skip
assert not ci_cache.jobs_to_wait assert not ci_cache.jobs_to_wait
# pretend there are pending jobs that we neet to wait # pretend there are pending jobs that we need to wait
ci_cache.jobs_to_wait = dict(ci_cache.jobs_to_do) ci_cache.jobs_to_wait = dict(ci_cache.jobs_to_do)
for job, config in ci_cache.jobs_to_wait.items(): for job, config in ci_cache.jobs_to_wait.items():
assert not config.pending_batches assert not config.pending_batches
@ -489,3 +489,87 @@ class TestCIConfig(unittest.TestCase):
self.assertCountEqual( self.assertCountEqual(
list(ci_cache.jobs_to_do) + ci_cache.jobs_to_skip, all_jobs_in_wf list(ci_cache.jobs_to_do) + ci_cache.jobs_to_skip, all_jobs_in_wf
) )
def test_ci_py_filters_not_affected_jobs_in_prs(self):
"""
checks ci.py filters not affected jobs in PRs
"""
settings = CiSettings()
settings.no_ci_cache = True
pr_info = PRInfo(github_event=_TEST_EVENT_JSON)
pr_info.event_type = EventType.PUSH
pr_info.number = 0
assert pr_info.is_release and not pr_info.is_merge_queue
ci_cache = CIPY._configure_jobs(
S3Helper(), pr_info, settings, skip_jobs=False, dry_run=True
)
self.assertTrue(not ci_cache.jobs_to_skip, "Must be no jobs in skip list")
all_jobs_in_wf = list(ci_cache.jobs_to_do)
assert not ci_cache.jobs_to_wait
assert not ci_cache.jobs_to_skip
# pretend there are pending jobs that we need to wait
for job, job_config in ci_cache.jobs_to_do.items():
ci_cache.jobs_to_wait[job] = job_config
# remove couple tests from to_wait and
# expect they are preserved in @jobs_to_to along with required package_asan
del ci_cache.jobs_to_wait[CI.JobNames.STATELESS_TEST_ASAN]
del ci_cache.jobs_to_wait[CI.JobNames.INTEGRATION_TEST_TSAN]
del ci_cache.jobs_to_wait[CI.JobNames.STATELESS_TEST_MSAN]
# pretend we have some batches failed for one of the job from the to_do list
failed_job = CI.JobNames.INTEGRATION_TEST_TSAN
failed_job_config = ci_cache.jobs_to_do[failed_job]
FAILED_BATCHES = [0, 3]
for batch in FAILED_BATCHES:
assert batch < failed_job_config.num_batches
record = CiCache.Record(
record_type=CiCache.RecordType.FAILED,
job_name=failed_job,
job_digest=ci_cache.job_digests[failed_job],
batch=batch,
num_batches=failed_job_config.num_batches,
release_branch=True,
)
for record_t_, records_ in ci_cache.records.items():
if record_t_.value == CiCache.RecordType.FAILED.value:
records_[record.to_str_key()] = record
# pretend we have all batches failed for one of the job from the to_do list
failed_job = CI.JobNames.STATELESS_TEST_MSAN
failed_job_config = ci_cache.jobs_to_do[failed_job]
assert failed_job_config.num_batches > 1
for batch in range(failed_job_config.num_batches):
record = CiCache.Record(
record_type=CiCache.RecordType.FAILED,
job_name=failed_job,
job_digest=ci_cache.job_digests[failed_job],
batch=batch,
num_batches=failed_job_config.num_batches,
release_branch=True,
)
for record_t_, records_ in ci_cache.records.items():
if record_t_.value == CiCache.RecordType.FAILED.value:
records_[record.to_str_key()] = record
ci_cache.filter_out_not_affected_jobs()
expected_to_do = [
CI.JobNames.STATELESS_TEST_ASAN,
CI.BuildNames.PACKAGE_ASAN,
CI.JobNames.INTEGRATION_TEST_TSAN,
CI.BuildNames.PACKAGE_TSAN,
CI.JobNames.BUILD_CHECK,
]
self.assertCountEqual(
list(ci_cache.jobs_to_wait),
[
CI.BuildNames.PACKAGE_ASAN,
CI.BuildNames.PACKAGE_TSAN,
CI.JobNames.BUILD_CHECK,
],
)
self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do)
self.assertTrue(ci_cache.jobs_to_do[CI.JobNames.INTEGRATION_TEST_TSAN].batches)
for batch in ci_cache.jobs_to_do[CI.JobNames.INTEGRATION_TEST_TSAN].batches:
self.assertTrue(batch not in FAILED_BATCHES)

View File

@ -1750,7 +1750,7 @@ class TestCase:
return TestResult( return TestResult(
self.name, self.name,
TestStatus.FAIL, TestStatus.FAIL,
FailureReason.INTERNAL_QUERY_FAIL, FailureReason.TIMEOUT,
total_time, total_time,
self.add_info_about_settings( self.add_info_about_settings(
self.get_description_from_exception_info(sys.exc_info()) self.get_description_from_exception_info(sys.exc_info())
@ -2189,11 +2189,26 @@ def run_tests_array(all_tests_with_params: Tuple[List[str], int, TestSuite, bool
sys.stdout.flush() sys.stdout.flush()
while True: while True:
test_result = test_case.run( # This is the upper level timeout
args, test_suite, client_options, server_logs_level # It helps with completely frozen processes, like in case of gdb errors
) def timeout_handler(signum, frame):
test_result = test_case.process_result(test_result, MESSAGES) raise TimeoutError("Test execution timed out")
if not test_result.need_retry:
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(int(args.timeout * 1.1))
test_result = None
try:
test_result = test_case.run(
args, test_suite, client_options, server_logs_level
)
test_result = test_case.process_result(test_result, MESSAGES)
break
except TimeoutError:
break
finally:
signal.alarm(0)
if not test_result or not test_result.need_retry:
break break
restarted_tests.append(test_result) restarted_tests.append(test_result)
@ -2452,6 +2467,10 @@ def override_envs(*args_, **kwargs):
run_tests_array(*args_, **kwargs) run_tests_array(*args_, **kwargs)
def run_tests_process(*args, **kwargs):
return run_tests_array(*args, **kwargs)
def do_run_tests(jobs, test_suite: TestSuite): def do_run_tests(jobs, test_suite: TestSuite):
if jobs > 1 and len(test_suite.parallel_tests) > 0: if jobs > 1 and len(test_suite.parallel_tests) > 0:
print( print(
@ -2475,39 +2494,70 @@ def do_run_tests(jobs, test_suite: TestSuite):
# of failures will be nearly the same for all tests from the group. # of failures will be nearly the same for all tests from the group.
random.shuffle(test_suite.parallel_tests) random.shuffle(test_suite.parallel_tests)
batch_size = max(1, len(test_suite.parallel_tests) // jobs) batch_size = max(1, (len(test_suite.parallel_tests) // jobs) + 1)
parallel_tests_array = [] parallel_tests_array = []
for job in range(jobs): for job in range(jobs):
range_ = job * batch_size, job * batch_size + batch_size range_ = job * batch_size, job * batch_size + batch_size
batch = test_suite.parallel_tests[range_[0] : range_[1]] batch = test_suite.parallel_tests[range_[0] : range_[1]]
parallel_tests_array.append((batch, batch_size, test_suite, True)) parallel_tests_array.append((batch, batch_size, test_suite, True))
try: processes = []
with multiprocessing.Pool(processes=jobs + 1) as pool:
future = pool.map_async(run_tests_array, parallel_tests_array)
if args.run_sequential_tests_in_parallel: for test_batch in parallel_tests_array:
# Run parallel tests and sequential tests at the same time process = multiprocessing.Process(
# Sequential tests will use different ClickHouse instance target=run_tests_process, args=(test_batch,)
# In this process we can safely override values in `args` and `os.environ` )
future_seq = pool.map_async( processes.append(process)
override_envs, process.start()
[
(
test_suite.sequential_tests,
len(test_suite.sequential_tests),
test_suite,
False,
)
],
)
future_seq.wait()
future.wait() if args.run_sequential_tests_in_parallel:
finally: # Run parallel tests and sequential tests at the same time
pool.terminate() # Sequential tests will use different ClickHouse instance
pool.close() # In this process we can safely override values in `args` and `os.environ`
pool.join() process = multiprocessing.Process(
target=override_envs,
args=(
(
test_suite.sequential_tests,
len(test_suite.sequential_tests),
test_suite,
False,
),
),
)
processes.append(process)
process.start()
while processes:
sys.stdout.flush()
# Periodically check the server for hangs
# and stop all processes in this case
try:
clickhouse_execute(
args,
query="SELECT 1 /*hang up check*/",
max_http_retries=5,
timeout=20,
)
except Exception:
print("Hang up check failed")
server_died.set()
if server_died.is_set():
print("Server died, terminating all processes...")
kill_gdb_if_any()
# Wait for test results
sleep(args.timeout)
for p in processes:
if p.is_alive():
p.terminate()
break
for p in processes[:]:
if not p.is_alive():
processes.remove(p)
sleep(5)
if not args.run_sequential_tests_in_parallel: if not args.run_sequential_tests_in_parallel:
run_tests_array( run_tests_array(
@ -3358,6 +3408,14 @@ def parse_args():
return parser.parse_args() return parser.parse_args()
class Terminated(KeyboardInterrupt):
pass
def signal_handler(sig, frame):
raise Terminated(f"Terminated with {sig} signal")
if __name__ == "__main__": if __name__ == "__main__":
stop_time = None stop_time = None
exit_code = multiprocessing.Value("i", 0) exit_code = multiprocessing.Value("i", 0)
@ -3369,6 +3427,9 @@ if __name__ == "__main__":
# infinite tests processes left # infinite tests processes left
# (new process group is required to avoid killing some parent processes) # (new process group is required to avoid killing some parent processes)
os.setpgid(0, 0) os.setpgid(0, 0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGHUP, signal_handler)
try: try:
args = parse_args() args = parse_args()

View File

@ -0,0 +1,3 @@
<clickhouse>
<grpc_port>9100</grpc_port>
</clickhouse>

View File

@ -27,6 +27,7 @@ ln -sf $SRC_PATH/config.d/secure_ports.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/clusters.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/clusters.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/graphite.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/graphite.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/graphite_alternative.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/graphite_alternative.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/grpc_protocol.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/database_atomic.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/database_atomic.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/max_concurrent_queries.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/max_concurrent_queries.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/merge_tree_settings.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/merge_tree_settings.xml $DEST_SERVER_PATH/config.d/

View File

@ -78,13 +78,13 @@ def wait_rabbitmq_to_start(rabbitmq_docker_id, cookie, timeout=180):
def kill_rabbitmq(rabbitmq_id): def kill_rabbitmq(rabbitmq_id):
p = subprocess.Popen(("docker", "stop", rabbitmq_id), stdout=subprocess.PIPE) p = subprocess.Popen(("docker", "stop", rabbitmq_id), stdout=subprocess.PIPE)
p.communicate() p.wait(timeout=60)
return p.returncode == 0 return p.returncode == 0
def revive_rabbitmq(rabbitmq_id, cookie): def revive_rabbitmq(rabbitmq_id, cookie):
p = subprocess.Popen(("docker", "start", rabbitmq_id), stdout=subprocess.PIPE) p = subprocess.Popen(("docker", "start", rabbitmq_id), stdout=subprocess.PIPE)
p.communicate() p.wait(timeout=60)
wait_rabbitmq_to_start(rabbitmq_id, cookie) wait_rabbitmq_to_start(rabbitmq_id, cookie)

View File

@ -17,3 +17,27 @@ ENGINE = MergeTree ORDER BY conversation;
INSERT INTO t2(conversation) VALUES (42); INSERT INTO t2(conversation) VALUES (42);
select * from t2; select * from t2;
drop table t1;
INSERT INTO t2(conversation) VALUES (42); -- { serverError UNKNOWN_TABLE }
drop table t2;
CREATE TABLE t2 (
`conversation` UInt64,
CONSTRAINT constraint_conversation CHECK conversation IN (SELECT id FROM t1)
)
ENGINE = MergeTree ORDER BY conversation;
INSERT INTO t2(conversation) VALUES (42); -- { serverError UNKNOWN_TABLE }
CREATE TABLE t1 (
`id` UInt64
)
ENGINE = MergeTree ORDER BY id;
INSERT INTO t1(id) VALUES (42);
INSERT INTO t2(conversation) VALUES (42);
select * from t2;

View File

@ -0,0 +1,21 @@
2020-01-01
2020-01-01
2020-01-01 00:00:00
2020-01-01 00:00:00.000000
127.0.0.1
2001:db8:85a3::8a2e:370:7334
42
42
42
42
42
42
42
42
42
42
Hello
Hello
\0\0\0
Hello\0\0\0\0\0
5801c962-1182-458a-89f8-d077da5074f9

View File

@ -0,0 +1,29 @@
set allow_suspicious_low_cardinality_types=1;
select JSONExtract('{"a" : "2020-01-01"}', 'a', 'Date');
select JSONExtract('{"a" : "2020-01-01"}', 'a', 'Date32');
select JSONExtract('{"a" : "2020-01-01 00:00:00"}', 'a', 'DateTime');
select JSONExtract('{"a" : "2020-01-01 00:00:00.000000"}', 'a', 'DateTime64(6)');
select JSONExtract('{"a" : "127.0.0.1"}', 'a', 'IPv4');
select JSONExtract('{"a" : "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}', 'a', 'IPv6');
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt8)');
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int8)');
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt16)');
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int16)');
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt32)');
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int32)');
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt64)');
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int64)');
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Float32)');
select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Float32)');
select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(String)');
select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(FixedString(5))');
select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(FixedString(3))');
select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(FixedString(10))');
select JSONExtract('{"a" : "5801c962-1182-458a-89f8-d077da5074f9"}', 'a', 'LowCardinality(UUID)');

View File

@ -0,0 +1,30 @@
true Bool
42 Int64
-42 Int64
18446744073709551615 UInt64
42.42 Float64
42 Int64
-42 Int64
18446744073709551615 UInt64
Hello String
2020-01-01 Date
2020-01-01 00:00:00.000000000 DateTime64(9)
[1,2,3] Array(Nullable(Int64))
['str1','str2','str3'] Array(Nullable(String))
[[[1],[2,3,4]],[[5,6],[7]]] Array(Array(Array(Nullable(Int64))))
['2020-01-01 00:00:00.000000000','2020-01-01 00:00:00.000000000'] Array(Nullable(DateTime64(9)))
['2020-01-01','2020-01-01 date'] Array(Nullable(String))
['2020-01-01','2020-01-01 00:00:00','str'] Array(Nullable(String))
['2020-01-01','2020-01-01 00:00:00','42'] Array(Nullable(String))
['str','42'] Array(Nullable(String))
[42,42.42] Array(Nullable(Float64))
[42,18446744073709552000,42.42] Array(Nullable(Float64))
[42,42.42] Array(Nullable(Float64))
[NULL,NULL] Array(Nullable(String))
[NULL,42] Array(Nullable(Int64))
[[NULL],[],[42]] Array(Array(Nullable(Int64)))
[[],[NULL,NULL],[1,NULL,3],[NULL,2,NULL]] Array(Array(Nullable(Int64)))
[[],[NULL,NULL],['1',NULL,'3'],[NULL,'2',NULL],['2020-01-01']] Array(Array(Nullable(String)))
('str',42,[42]) Tuple(Nullable(String), Nullable(Int64), Array(Nullable(Int64)))
[42,18446744073709551615] Array(Nullable(UInt64))
(-42,18446744073709551615) Tuple(Nullable(Int64), Nullable(UInt64))

View File

@ -0,0 +1,37 @@
set input_format_json_try_infer_numbers_from_strings=1;
select JSONExtract(materialize('{"d" : true}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : 42}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : -42}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : 18446744073709551615}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : 42.42}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : "42"}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : "-42"}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : "18446744073709551615"}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : "Hello"}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : "2020-01-01"}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : "2020-01-01 00:00:00.000"}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : [1, 2, 3]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : ["str1", "str2", "str3"]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : [[[1], [2, 3, 4]], [[5, 6], [7]]]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 00:00:00"]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 date"]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 00:00:00", "str"]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 00:00:00", "42"]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : ["str", "42"]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : [42, 42.42]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : [42, 18446744073709551615, 42.42]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : [42, 42.42]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : [null, null]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : [null, 42]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : [[null], [], [42]]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"a" : [[], [null, null], ["1", null, "3"], [null, "2", null]]}'), 'a', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"a" : [[], [null, null], ["1", null, "3"], [null, "2", null], ["2020-01-01"]]}'), 'a', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : ["str", 42, [42]]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : [42, 18446744073709551615]}'), 'd', 'Dynamic') as d, dynamicType(d);
select JSONExtract(materialize('{"d" : [-42, 18446744073709551615]}'), 'd', 'Dynamic') as d, dynamicType(d);

View File

@ -0,0 +1 @@
ok

View File

@ -0,0 +1,14 @@
#!/usr/bin/env bash
# Tags: no-fasttest
# Tag no-fasttest: In fasttest, ENABLE_LIBRARIES=0, so the grpc library is not built
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
if [[ -z "$CLICKHOUSE_GRPC_CLIENT" ]]; then
CLICKHOUSE_GRPC_CLIENT="$CURDIR/../../../utils/grpc-client/clickhouse-grpc-client.py"
fi
# Simple test.
$CLICKHOUSE_GRPC_CLIENT --query "SELECT 'ok'"

View File

@ -1900,11 +1900,13 @@ kurtosis
kurtpop kurtpop
kurtsamp kurtsamp
laion laion
lagInFrame
lang lang
laravel laravel
largestTriangleThreeBuckets largestTriangleThreeBuckets
latencies latencies
ldap ldap
leadInFrame
leftPad leftPad
leftPadUTF leftPadUTF
leftUTF leftUTF

View File

@ -0,0 +1,52 @@
#!/usr/bin/env python3
# This is a helper utility.
# It generates files in the "pb2" folder using the protocol buffer compiler.
# This script must be called manually after any change pf "clickhouse_grpc.proto"
import grpc_tools # pip3 install grpcio-tools
import os, shutil, subprocess
# Settings.
script_path = os.path.realpath(__file__)
script_name = os.path.basename(script_path)
script_dir = os.path.dirname(script_path)
root_dir = os.path.abspath(os.path.join(script_dir, "../.."))
grpc_proto_dir = os.path.abspath(os.path.join(root_dir, "src/Server/grpc_protos"))
grpc_proto_filename = "clickhouse_grpc.proto"
# Files in the "pb2" folder which will be generated by this script.
pb2_filenames = ["clickhouse_grpc_pb2.py", "clickhouse_grpc_pb2_grpc.py"]
pb2_dir = os.path.join(script_dir, "pb2")
# Processes the protobuf schema with the protocol buffer compiler and generates the "pb2" folder.
def generate_pb2():
print(f"Generating files:")
for pb2_filename in pb2_filenames:
print(os.path.join(pb2_dir, pb2_filename))
os.makedirs(pb2_dir, exist_ok=True)
cmd = [
"python3",
"-m",
"grpc_tools.protoc",
"-I" + grpc_proto_dir,
"--python_out=" + pb2_dir,
"--grpc_python_out=" + pb2_dir,
os.path.join(grpc_proto_dir, grpc_proto_filename),
]
subprocess.run(cmd)
for pb2_filename in pb2_filenames:
assert os.path.exists(os.path.join(pb2_dir, pb2_filename))
print("Done! (generate_pb2)")
# MAIN
if __name__ == "__main__":
generate_pb2()

View File

@ -1,29 +0,0 @@
#!/usr/bin/env python3
import grpc_tools # pip3 install grpcio-tools
import os
import subprocess
script_dir = os.path.dirname(os.path.realpath(__file__))
dest_dir = script_dir
src_dir = os.path.abspath(os.path.join(script_dir, "../../../src/Server/grpc_protos"))
src_filename = "clickhouse_grpc.proto"
def generate():
cmd = [
"python3",
"-m",
"grpc_tools.protoc",
"-I" + src_dir,
"--python_out=" + dest_dir,
"--grpc_python_out=" + dest_dir,
os.path.join(src_dir, src_filename),
]
subprocess.run(cmd)
if __name__ == "__main__":
generate()