Merge branch 'master' into Azure_fix_stateless_tests

This commit is contained in:
Smita Kulkarni 2024-05-27 19:33:05 +02:00
commit b22f1891de
90 changed files with 1360 additions and 322 deletions

View File

@ -994,25 +994,681 @@ Result:
└─────────────────────────────────────────────┘
```
## reinterpretAsUInt(8\|16\|32\|64)
## reinterpretAsUInt8
## reinterpretAsInt(8\|16\|32\|64)
Performs byte reinterpretation by treating the input value as a value of type UInt8. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
## reinterpretAsFloat(32\|64)
**Syntax**
```sql
reinterpretAsUInt8(x)
```
**Parameters**
- `x`: value to byte reinterpret as UInt8. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as UInt8. [UInt8](../data-types/int-uint.md/#uint8-uint16-uint32-uint64-uint128-uint256-int8-int16-int32-int64-int128-int256).
**Example**
Query:
```sql
SELECT
toInt8(257) AS x,
toTypeName(x),
reinterpretAsUInt8(x) AS res,
toTypeName(res);
```
Result:
```response
┌─x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 1 │ Int8 │ 1 │ UInt8 │
└───┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsUInt16
Performs byte reinterpretation by treating the input value as a value of type UInt16. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsUInt16(x)
```
**Parameters**
- `x`: value to byte reinterpret as UInt16. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as UInt16. [UInt16](../data-types/int-uint.md/#uint8-uint16-uint32-uint64-uint128-uint256-int8-int16-int32-int64-int128-int256).
**Example**
Query:
```sql
SELECT
toUInt8(257) AS x,
toTypeName(x),
reinterpretAsUInt16(x) AS res,
toTypeName(res);
```
Result:
```response
┌─x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 1 │ UInt8 │ 1 │ UInt16 │
└───┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsUInt32
Performs byte reinterpretation by treating the input value as a value of type UInt32. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsUInt32(x)
```
**Parameters**
- `x`: value to byte reinterpret as UInt32. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as UInt32. [UInt32](../data-types/int-uint.md/#uint8-uint16-uint32-uint64-uint128-uint256-int8-int16-int32-int64-int128-int256).
**Example**
Query:
```sql
SELECT
toUInt16(257) AS x,
toTypeName(x),
reinterpretAsUInt32(x) AS res,
toTypeName(res)
```
Result:
```response
┌───x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 257 │ UInt16 │ 257 │ UInt32 │
└─────┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsUInt64
Performs byte reinterpretation by treating the input value as a value of type UInt64. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsUInt64(x)
```
**Parameters**
- `x`: value to byte reinterpret as UInt64. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as UInt64. [UInt64](../data-types/int-uint.md/#uint8-uint16-uint32-uint64-uint128-uint256-int8-int16-int32-int64-int128-int256).
**Example**
Query:
```sql
SELECT
toUInt32(257) AS x,
toTypeName(x),
reinterpretAsUInt64(x) AS res,
toTypeName(res)
```
Result:
```response
┌───x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 257 │ UInt32 │ 257 │ UInt64 │
└─────┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsUInt128
Performs byte reinterpretation by treating the input value as a value of type UInt128. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsUInt128(x)
```
**Parameters**
- `x`: value to byte reinterpret as UInt128. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as UInt128. [UInt128](../data-types/int-uint.md/#uint8-uint16-uint32-uint64-uint128-uint256-int8-int16-int32-int64-int128-int256).
**Example**
Query:
```sql
SELECT
toUInt64(257) AS x,
toTypeName(x),
reinterpretAsUInt128(x) AS res,
toTypeName(res)
```
Result:
```response
┌───x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 257 │ UInt64 │ 257 │ UInt128 │
└─────┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsUInt256
Performs byte reinterpretation by treating the input value as a value of type UInt256. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsUInt256(x)
```
**Parameters**
- `x`: value to byte reinterpret as UInt256. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as UInt256. [UInt256](../data-types/int-uint.md/#uint8-uint16-uint32-uint64-uint128-uint256-int8-int16-int32-int64-int128-int256).
**Example**
Query:
```sql
SELECT
toUInt128(257) AS x,
toTypeName(x),
reinterpretAsUInt256(x) AS res,
toTypeName(res)
```
Result:
```response
┌───x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 257 │ UInt128 │ 257 │ UInt256 │
└─────┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsInt8
Performs byte reinterpretation by treating the input value as a value of type Int8. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsInt8(x)
```
**Parameters**
- `x`: value to byte reinterpret as Int8. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as Int8. [Int8](../data-types/int-uint.md/#int-ranges).
**Example**
Query:
```sql
SELECT
toUInt8(257) AS x,
toTypeName(x),
reinterpretAsInt8(x) AS res,
toTypeName(res);
```
Result:
```response
┌─x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 1 │ UInt8 │ 1 │ Int8 │
└───┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsInt16
Performs byte reinterpretation by treating the input value as a value of type Int16. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsInt16(x)
```
**Parameters**
- `x`: value to byte reinterpret as Int16. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as Int16. [Int16](../data-types/int-uint.md/#int-ranges).
**Example**
Query:
```sql
SELECT
toInt8(257) AS x,
toTypeName(x),
reinterpretAsInt16(x) AS res,
toTypeName(res);
```
Result:
```response
┌─x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 1 │ Int8 │ 1 │ Int16 │
└───┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsInt32
Performs byte reinterpretation by treating the input value as a value of type Int32. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsInt32(x)
```
**Parameters**
- `x`: value to byte reinterpret as Int32. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as Int32. [Int32](../data-types/int-uint.md/#int-ranges).
**Example**
Query:
```sql
SELECT
toInt16(257) AS x,
toTypeName(x),
reinterpretAsInt32(x) AS res,
toTypeName(res);
```
Result:
```response
┌───x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 257 │ Int16 │ 257 │ Int32 │
└─────┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsInt64
Performs byte reinterpretation by treating the input value as a value of type Int64. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsInt64(x)
```
**Parameters**
- `x`: value to byte reinterpret as Int64. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as Int64. [Int64](../data-types/int-uint.md/#int-ranges).
**Example**
Query:
```sql
SELECT
toInt32(257) AS x,
toTypeName(x),
reinterpretAsInt64(x) AS res,
toTypeName(res);
```
Result:
```response
┌───x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 257 │ Int32 │ 257 │ Int64 │
└─────┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsInt128
Performs byte reinterpretation by treating the input value as a value of type Int128. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsInt128(x)
```
**Parameters**
- `x`: value to byte reinterpret as Int128. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as Int128. [Int128](../data-types/int-uint.md/#int-ranges).
**Example**
Query:
```sql
SELECT
toInt64(257) AS x,
toTypeName(x),
reinterpretAsInt128(x) AS res,
toTypeName(res);
```
Result:
```response
┌───x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 257 │ Int64 │ 257 │ Int128 │
└─────┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsInt256
Performs byte reinterpretation by treating the input value as a value of type Int256. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsInt256(x)
```
**Parameters**
- `x`: value to byte reinterpret as Int256. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as Int256. [Int256](../data-types/int-uint.md/#int-ranges).
**Example**
Query:
```sql
SELECT
toInt128(257) AS x,
toTypeName(x),
reinterpretAsInt256(x) AS res,
toTypeName(res);
```
Result:
```response
┌───x─┬─toTypeName(x)─┬─res─┬─toTypeName(res)─┐
│ 257 │ Int128 │ 257 │ Int256 │
└─────┴───────────────┴─────┴─────────────────┘
```
## reinterpretAsFloat32
Performs byte reinterpretation by treating the input value as a value of type Float32. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsFloat32(x)
```
**Parameters**
- `x`: value to reinterpret as Float32. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as Float32. [Float32](../data-types/float.md).
**Example**
Query:
```sql
SELECT reinterpretAsUInt32(toFloat32(0.2)) as x, reinterpretAsFloat32(x);
```
Result:
```response
┌──────────x─┬─reinterpretAsFloat32(x)─┐
│ 1045220557 │ 0.2 │
└────────────┴─────────────────────────┘
```
## reinterpretAsFloat64
Performs byte reinterpretation by treating the input value as a value of type Float64. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless.
**Syntax**
```sql
reinterpretAsFloat64(x)
```
**Parameters**
- `x`: value to reinterpret as Float64. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Reinterpreted value `x` as Float64. [Float64](../data-types/float.md).
**Example**
Query:
```sql
SELECT reinterpretAsUInt64(toFloat64(0.2)) as x, reinterpretAsFloat64(x);
```
Result:
```response
┌───────────────────x─┬─reinterpretAsFloat64(x)─┐
│ 4596373779694328218 │ 0.2 │
└─────────────────────┴─────────────────────────┘
```
## reinterpretAsDate
Accepts a string, fixed string or numeric value and interprets the bytes as a number in host order (little endian). It returns a date from the interpreted number as the number of days since the beginning of the Unix Epoch.
**Syntax**
```sql
reinterpretAsDate(x)
```
**Parameters**
- `x`: number of days since the beginning of the Unix Epoch. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Date. [Date](../data-types/date.md).
**Implementation details**
:::note
If the provided string isnt long enough, the function works as if the string is padded with the necessary number of null bytes. If the string is longer than needed, the extra bytes are ignored.
:::
**Example**
Query:
```sql
SELECT reinterpretAsDate(65), reinterpretAsDate('A');
```
Result:
```response
┌─reinterpretAsDate(65)─┬─reinterpretAsDate('A')─┐
│ 1970-03-07 │ 1970-03-07 │
└───────────────────────┴────────────────────────┘
```
## reinterpretAsDateTime
These functions accept a string and interpret the bytes placed at the beginning of the string as a number in host order (little endian). If the string isnt long enough, the functions work as if the string is padded with the necessary number of null bytes. If the string is longer than needed, the extra bytes are ignored. A date is interpreted as the number of days since the beginning of the Unix Epoch, and a date with time is interpreted as the number of seconds since the beginning of the Unix Epoch.
These functions accept a string and interpret the bytes placed at the beginning of the string as a number in host order (little endian). Returns a date with time interpreted as the number of seconds since the beginning of the Unix Epoch.
**Syntax**
```sql
reinterpretAsDateTime(x)
```
**Parameters**
- `x`: number of seconds since the beginning of the Unix Epoch. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md), [UUID](../data-types/uuid.md), [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
**Returned value**
- Date and Time. [DateTime](../data-types/datetime.md).
**Implementation details**
:::note
If the provided string isnt long enough, the function works as if the string is padded with the necessary number of null bytes. If the string is longer than needed, the extra bytes are ignored.
:::
**Example**
Query:
```sql
SELECT reinterpretAsDateTime(65), reinterpretAsDateTime('A');
```
Result:
```response
┌─reinterpretAsDateTime(65)─┬─reinterpretAsDateTime('A')─┐
│ 1970-01-01 01:01:05 │ 1970-01-01 01:01:05 │
└───────────────────────────┴────────────────────────────┘
```
## reinterpretAsString
This function accepts a number or date or date with time and returns a string containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a string that is one byte long.
This function accepts a number, date or date with time and returns a string containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a string that is one byte long.
**Syntax**
```sql
reinterpretAsString(x)
```
**Parameters**
- `x`: value to reinterpret to string. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md).
**Returned value**
- String containing bytes representing `x`. [String](../data-types/fixedstring.md).
**Example**
Query:
```sql
SELECT
reinterpretAsString(toDateTime('1970-01-01 01:01:05')),
reinterpretAsString(toDate('1970-03-07'));
```
Result:
```response
┌─reinterpretAsString(toDateTime('1970-01-01 01:01:05'))─┬─reinterpretAsString(toDate('1970-03-07'))─┐
│ A │ A │
└────────────────────────────────────────────────────────┴───────────────────────────────────────────┘
```
## reinterpretAsFixedString
This function accepts a number or date or date with time and returns a FixedString containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a FixedString that is one byte long.
This function accepts a number, date or date with time and returns a FixedString containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a FixedString that is one byte long.
**Syntax**
```sql
reinterpretAsFixedString(x)
```
**Parameters**
- `x`: value to reinterpret to string. [(U)Int*](../data-types/int-uint.md), [Float](../data-types/float.md), [Date](../data-types/date.md), [DateTime](../data-types/datetime.md).
**Returned value**
- Fixed string containing bytes representing `x`. [FixedString](../data-types/fixedstring.md).
**Example**
Query:
```sql
SELECT
reinterpretAsFixedString(toDateTime('1970-01-01 01:01:05')),
reinterpretAsFixedString(toDate('1970-03-07'));
```
Result:
```response
┌─reinterpretAsFixedString(toDateTime('1970-01-01 01:01:05'))─┬─reinterpretAsFixedString(toDate('1970-03-07'))─┐
│ A │ A │
└─────────────────────────────────────────────────────────────┴────────────────────────────────────────────────┘
```
## reinterpretAsUUID
@ -1020,7 +1676,7 @@ This function accepts a number or date or date with time and returns a FixedStri
In addition to the UUID functions listed here, there is dedicated [UUID function documentation](../functions/uuid-functions.md).
:::
Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string is longer than 16 bytes, the extra bytes at the end are ignored.
Accepts a 16 byte string and returns a UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string is longer than 16 bytes, the extra bytes at the end are ignored.
**Syntax**

View File

@ -10,7 +10,6 @@ namespace DB
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int KEEPER_EXCEPTION;
}
@ -442,7 +441,7 @@ void ReconfigCommand::execute(const DB::ASTKeeperQuery * query, DB::KeeperClient
new_members = query->args[1].safeGet<String>();
break;
default:
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected operation: {}", operation);
UNREACHABLE();
}
auto response = client->zookeeper->reconfig(joining, leaving, new_members);

View File

@ -155,8 +155,8 @@ auto instructionFailToString(InstructionFail fail)
ret("AVX2");
case InstructionFail::AVX512:
ret("AVX512");
#undef ret
}
UNREACHABLE();
}

View File

@ -144,7 +144,8 @@ AccessEntityPtr deserializeAccessEntity(const String & definition, const String
catch (Exception & e)
{
e.addMessage("Could not parse " + file_path);
throw;
e.rethrow();
UNREACHABLE();
}
}

View File

@ -258,7 +258,7 @@ namespace
case TABLE_LEVEL: return AccessFlags::allFlagsGrantableOnTableLevel();
case COLUMN_LEVEL: return AccessFlags::allFlagsGrantableOnColumnLevel();
}
chassert(false);
UNREACHABLE();
}
}

View File

@ -257,7 +257,8 @@ std::vector<UUID> IAccessStorage::insert(const std::vector<AccessEntityPtr> & mu
}
e.addMessage("After successfully inserting {}/{}: {}", successfully_inserted.size(), multiple_entities.size(), successfully_inserted_str);
}
throw;
e.rethrow();
UNREACHABLE();
}
}
@ -360,7 +361,8 @@ std::vector<UUID> IAccessStorage::remove(const std::vector<UUID> & ids, bool thr
}
e.addMessage("After successfully removing {}/{}: {}", removed_names.size(), ids.size(), removed_names_str);
}
throw;
e.rethrow();
UNREACHABLE();
}
}
@ -456,7 +458,8 @@ std::vector<UUID> IAccessStorage::update(const std::vector<UUID> & ids, const Up
}
e.addMessage("After successfully updating {}/{}: {}", names_of_updated.size(), ids.size(), names_of_updated_str);
}
throw;
e.rethrow();
UNREACHABLE();
}
}

View File

@ -60,13 +60,14 @@ struct GroupArrayTrait
template <typename Trait>
constexpr const char * getNameByTrait()
{
if constexpr (Trait::last)
if (Trait::last)
return "groupArrayLast";
switch (Trait::sampler)
{
case Sampler::NONE: return "groupArray";
case Sampler::RNG: return "groupArraySample";
}
if (Trait::sampler == Sampler::NONE)
return "groupArray";
else if (Trait::sampler == Sampler::RNG)
return "groupArraySample";
UNREACHABLE();
}
template <typename T>
@ -752,10 +753,11 @@ size_t getMaxArraySize()
return 0xFFFFFF;
}
bool hasLimitArraySize()
bool discardOnLimitReached()
{
if (auto context = Context::getGlobalContextInstance())
return context->getServerSettings().aggregate_function_group_array_has_limit_size;
return context->getServerSettings().aggregate_function_group_array_action_when_limit_is_reached
== GroupArrayActionWhenLimitReached::DISCARD;
return false;
}
@ -766,7 +768,7 @@ AggregateFunctionPtr createAggregateFunctionGroupArray(
{
assertUnary(name, argument_types);
bool limit_size = hasLimitArraySize();
bool has_limit = discardOnLimitReached();
UInt64 max_elems = getMaxArraySize();
if (parameters.empty())
@ -783,14 +785,14 @@ AggregateFunctionPtr createAggregateFunctionGroupArray(
(type == Field::Types::UInt64 && parameters[0].get<UInt64>() == 0))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name);
limit_size = true;
has_limit = true;
max_elems = parameters[0].get<UInt64>();
}
else
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Incorrect number of parameters for aggregate function {}, should be 0 or 1", name);
if (!limit_size)
if (!has_limit)
{
if (Tlast)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "groupArrayLast make sense only with max_elems (groupArrayLast(max_elems)())");

View File

@ -414,6 +414,7 @@ public:
break;
return (i == events_size) ? base - i : unmatched_idx;
}
UNREACHABLE();
}
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override

View File

@ -463,6 +463,7 @@ public:
return "sumWithOverflow";
else if constexpr (Type == AggregateFunctionTypeSumKahan)
return "sumKahan";
UNREACHABLE();
}
explicit AggregateFunctionSum(const DataTypes & argument_types_)

View File

@ -24,6 +24,9 @@ void ArrayJoinNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_stat
buffer << std::string(indent, ' ') << "ARRAY_JOIN id: " << format_state.getNodeId(this);
buffer << ", is_left: " << is_left;
if (hasAlias())
buffer << ", alias: " << getAlias();
buffer << '\n' << std::string(indent + 2, ' ') << "TABLE EXPRESSION\n";
getTableExpression()->dumpTreeImpl(buffer, format_state, indent + 4);

View File

@ -99,6 +99,23 @@ bool checkIfGroupAlwaysTrueGraph(const Analyzer::CNF::OrGroup & group, const Com
return false;
}
bool checkIfGroupAlwaysTrueAtoms(const Analyzer::CNF::OrGroup & group)
{
/// Filters out groups containing mutually exclusive atoms,
/// since these groups are always True
for (const auto & atom : group)
{
auto negated(atom);
negated.negative = !atom.negative;
if (group.contains(negated))
{
return true;
}
}
return false;
}
bool checkIfAtomAlwaysFalseFullMatch(const Analyzer::CNF::AtomicFormula & atom, const ConstraintsDescription::QueryTreeData & query_tree_constraints)
{
const auto constraint_atom_ids = query_tree_constraints.getAtomIds(atom.node_with_hash);
@ -644,7 +661,8 @@ void optimizeWithConstraints(Analyzer::CNF & cnf, const QueryTreeNodes & table_e
cnf.filterAlwaysTrueGroups([&](const auto & group)
{
/// remove always true groups from CNF
return !checkIfGroupAlwaysTrueFullMatch(group, query_tree_constraints) && !checkIfGroupAlwaysTrueGraph(group, compare_graph);
return !checkIfGroupAlwaysTrueFullMatch(group, query_tree_constraints)
&& !checkIfGroupAlwaysTrueGraph(group, compare_graph) && !checkIfGroupAlwaysTrueAtoms(group);
})
.filterAlwaysFalseAtoms([&](const Analyzer::CNF::AtomicFormula & atom)
{

View File

@ -607,6 +607,10 @@ struct ScopeAliases
std::unordered_set<QueryTreeNodePtr> nodes_with_duplicated_aliases;
std::vector<QueryTreeNodePtr> cloned_nodes_with_duplicated_aliases;
/// Names which are aliases from ARRAY JOIN.
/// This is needed to properly qualify columns from matchers and avoid name collision.
std::unordered_set<std::string> array_join_aliases;
std::unordered_map<std::string, QueryTreeNodePtr> & getAliasMap(IdentifierLookupContext lookup_context)
{
switch (lookup_context)
@ -1526,7 +1530,7 @@ private:
ProjectionNames resolveFunction(QueryTreeNodePtr & function_node, IdentifierResolveScope & scope);
ProjectionNames resolveExpressionNode(QueryTreeNodePtr & node, IdentifierResolveScope & scope, bool allow_lambda_expression, bool allow_table_expression);
ProjectionNames resolveExpressionNode(QueryTreeNodePtr & node, IdentifierResolveScope & scope, bool allow_lambda_expression, bool allow_table_expression, bool ignore_alias = false);
ProjectionNames resolveExpressionNodeList(QueryTreeNodePtr & node_list, IdentifierResolveScope & scope, bool allow_lambda_expression, bool allow_table_expression);
@ -2858,7 +2862,7 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromExpressionArguments(cons
bool QueryAnalyzer::tryBindIdentifierToAliases(const IdentifierLookup & identifier_lookup, const IdentifierResolveScope & scope)
{
return scope.aliases.find(identifier_lookup, ScopeAliases::FindOption::FIRST_NAME) != nullptr;
return scope.aliases.find(identifier_lookup, ScopeAliases::FindOption::FIRST_NAME) != nullptr || scope.aliases.array_join_aliases.contains(identifier_lookup.identifier.front());
}
/** Resolve identifier from scope aliases.
@ -3889,12 +3893,39 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromArrayJoin(const Identifi
{
auto & array_join_column_expression_typed = array_join_column_expression->as<ColumnNode &>();
if (array_join_column_expression_typed.getAlias() == identifier_lookup.identifier.getFullName())
IdentifierView identifier_view(identifier_lookup.identifier);
if (identifier_view.isCompound() && from_array_join_node.hasAlias() && identifier_view.front() == from_array_join_node.getAlias())
identifier_view.popFirst();
const auto & alias_or_name = array_join_column_expression_typed.hasAlias()
? array_join_column_expression_typed.getAlias()
: array_join_column_expression_typed.getColumnName();
if (identifier_view.front() == alias_or_name)
identifier_view.popFirst();
else if (identifier_view.getFullName() == alias_or_name)
identifier_view.popFirst(identifier_view.getPartsSize()); /// Clear
else
continue;
if (identifier_view.empty())
{
auto array_join_column = std::make_shared<ColumnNode>(array_join_column_expression_typed.getColumn(),
array_join_column_expression_typed.getColumnSource());
return array_join_column;
}
auto compound_expr = tryResolveIdentifierFromCompoundExpression(
identifier_lookup.identifier,
identifier_lookup.identifier.getPartsSize() - identifier_view.getPartsSize() /*identifier_bind_size*/,
array_join_column_expression,
{} /* compound_expression_source */,
scope,
true /* can_be_not_found */);
if (compound_expr)
return compound_expr;
}
if (!resolved_identifier)
@ -6284,7 +6315,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
*
* 4. If node has alias, update its value in scope alias map. Deregister alias from expression_aliases_in_resolve_process.
*/
ProjectionNames QueryAnalyzer::resolveExpressionNode(QueryTreeNodePtr & node, IdentifierResolveScope & scope, bool allow_lambda_expression, bool allow_table_expression)
ProjectionNames QueryAnalyzer::resolveExpressionNode(QueryTreeNodePtr & node, IdentifierResolveScope & scope, bool allow_lambda_expression, bool allow_table_expression, bool ignore_alias)
{
checkStackSize();
@ -6334,7 +6365,7 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(QueryTreeNodePtr & node, Id
* To support both (SELECT 1) AS expression in projection and (SELECT 1) as subquery in IN, do not use
* alias table because in alias table subquery could be evaluated as scalar.
*/
bool use_alias_table = true;
bool use_alias_table = !ignore_alias;
if (is_duplicated_alias || (allow_table_expression && isSubqueryNodeType(node->getNodeType())))
use_alias_table = false;
@ -6634,7 +6665,8 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(QueryTreeNodePtr & node, Id
if (is_duplicated_alias)
scope.non_cached_identifier_lookups_during_expression_resolve.erase({Identifier{node_alias}, IdentifierLookupContext::EXPRESSION});
resolved_expressions.emplace(node, result_projection_names);
if (!ignore_alias)
resolved_expressions.emplace(node, result_projection_names);
scope.popExpressionNode();
bool expression_was_root = scope.expressions_in_resolve_process_stack.empty();
@ -7569,22 +7601,25 @@ void QueryAnalyzer::resolveArrayJoin(QueryTreeNodePtr & array_join_node, Identif
for (auto & array_join_expression : array_join_nodes)
{
auto array_join_expression_alias = array_join_expression->getAlias();
if (!array_join_expression_alias.empty() && scope.aliases.alias_name_to_expression_node->contains(array_join_expression_alias))
throw Exception(ErrorCodes::MULTIPLE_EXPRESSIONS_FOR_ALIAS,
"ARRAY JOIN expression {} with duplicate alias {}. In scope {}",
array_join_expression->formatASTForErrorMessage(),
array_join_expression_alias,
scope.scope_node->formatASTForErrorMessage());
/// Add array join expression into scope
expressions_visitor.visit(array_join_expression);
for (const auto & elem : array_join_nodes)
{
if (elem->hasAlias())
scope.aliases.array_join_aliases.insert(elem->getAlias());
for (auto & child : elem->getChildren())
{
if (child)
expressions_visitor.visit(child);
}
}
std::string identifier_full_name;
if (auto * identifier_node = array_join_expression->as<IdentifierNode>())
identifier_full_name = identifier_node->getIdentifier().getFullName();
resolveExpressionNode(array_join_expression, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
resolveExpressionNode(array_join_expression, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/, true /*ignore_alias*/);
auto process_array_join_expression = [&](QueryTreeNodePtr & expression)
{
@ -7651,27 +7686,7 @@ void QueryAnalyzer::resolveArrayJoin(QueryTreeNodePtr & array_join_node, Identif
}
}
/** Allow to resolve ARRAY JOIN columns from aliases with types after ARRAY JOIN only after ARRAY JOIN expression list is resolved, because
* during resolution of ARRAY JOIN expression list we must use column type before ARRAY JOIN.
*
* Example: SELECT id, value_element FROM test_table ARRAY JOIN [[1,2,3]] AS value_element, value_element AS value
* It is expected that `value_element AS value` expression inside ARRAY JOIN expression list will be
* resolved as `value_element` expression with type before ARRAY JOIN.
* And it is expected that `value_element` inside projection expression list will be resolved as `value_element` expression
* with type after ARRAY JOIN.
*/
array_join_nodes = std::move(array_join_column_expressions);
for (auto & array_join_column_expression : array_join_nodes)
{
auto it = scope.aliases.alias_name_to_expression_node->find(array_join_column_expression->getAlias());
if (it != scope.aliases.alias_name_to_expression_node->end())
{
auto & array_join_column_expression_typed = array_join_column_expression->as<ColumnNode &>();
auto array_join_column = std::make_shared<ColumnNode>(array_join_column_expression_typed.getColumn(),
array_join_column_expression_typed.getColumnSource());
it->second = std::move(array_join_column);
}
}
}
void QueryAnalyzer::checkDuplicateTableNamesOrAlias(const QueryTreeNodePtr & join_node, QueryTreeNodePtr & left_table_expr, QueryTreeNodePtr & right_table_expr, IdentifierResolveScope & scope)

View File

@ -41,6 +41,7 @@ UInt8 getDayOfWeek(const cctz::civil_day & date)
case cctz::weekday::saturday: return 6;
case cctz::weekday::sunday: return 7;
}
UNREACHABLE();
}
inline cctz::time_point<cctz::seconds> lookupTz(const cctz::time_zone & cctz_time_zone, const cctz::civil_day & date)

View File

@ -34,6 +34,8 @@ Int64 IntervalKind::toAvgNanoseconds() const
default:
return toAvgSeconds() * NANOSECONDS_PER_SECOND;
}
UNREACHABLE();
}
Int32 IntervalKind::toAvgSeconds() const
@ -52,6 +54,7 @@ Int32 IntervalKind::toAvgSeconds() const
case IntervalKind::Kind::Quarter: return 7889238; /// Exactly 1/4 of a year.
case IntervalKind::Kind::Year: return 31556952; /// The average length of a Gregorian year is equal to 365.2425 days
}
UNREACHABLE();
}
Float64 IntervalKind::toSeconds() const
@ -77,6 +80,7 @@ Float64 IntervalKind::toSeconds() const
default:
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Not possible to get precise number of seconds in non-precise interval");
}
UNREACHABLE();
}
bool IntervalKind::isFixedLength() const
@ -95,6 +99,7 @@ bool IntervalKind::isFixedLength() const
case IntervalKind::Kind::Quarter:
case IntervalKind::Kind::Year: return false;
}
UNREACHABLE();
}
IntervalKind IntervalKind::fromAvgSeconds(Int64 num_seconds)
@ -136,6 +141,7 @@ const char * IntervalKind::toKeyword() const
case IntervalKind::Kind::Quarter: return "QUARTER";
case IntervalKind::Kind::Year: return "YEAR";
}
UNREACHABLE();
}
@ -155,6 +161,7 @@ const char * IntervalKind::toLowercasedKeyword() const
case IntervalKind::Kind::Quarter: return "quarter";
case IntervalKind::Kind::Year: return "year";
}
UNREACHABLE();
}
@ -185,6 +192,7 @@ const char * IntervalKind::toDateDiffUnit() const
case IntervalKind::Kind::Year:
return "year";
}
UNREACHABLE();
}
@ -215,6 +223,7 @@ const char * IntervalKind::toNameOfFunctionToIntervalDataType() const
case IntervalKind::Kind::Year:
return "toIntervalYear";
}
UNREACHABLE();
}
@ -248,6 +257,7 @@ const char * IntervalKind::toNameOfFunctionExtractTimePart() const
case IntervalKind::Kind::Year:
return "toYear";
}
UNREACHABLE();
}

View File

@ -54,6 +54,8 @@ String toString(TargetArch arch)
case TargetArch::AMXTILE: return "amxtile";
case TargetArch::AMXINT8: return "amxint8";
}
UNREACHABLE();
}
}

View File

@ -75,6 +75,7 @@ const char * TasksStatsCounters::metricsProviderString(MetricsProvider provider)
case MetricsProvider::Netlink:
return "netlink";
}
UNREACHABLE();
}
bool TasksStatsCounters::checkIfAvailable()

View File

@ -146,6 +146,8 @@ const char * errorMessage(Error code)
case Error::ZSESSIONMOVED: return "Session moved to another server, so operation is ignored";
case Error::ZNOTREADONLY: return "State-changing request is passed to read-only server";
}
UNREACHABLE();
}
bool isHardwareError(Error zk_return_code)

View File

@ -466,6 +466,7 @@ void CompressionCodecDeflateQpl::doDecompressData(const char * source, UInt32 so
sw_codec->doDecompressData(source, source_size, dest, uncompressed_size);
return;
}
UNREACHABLE();
}
void CompressionCodecDeflateQpl::flushAsynchronousDecompressRequests()

View File

@ -21,11 +21,6 @@
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
}
/** NOTE DoubleDelta is surprisingly bad name. The only excuse is that it comes from an academic paper.
* Most people will think that "double delta" is just applying delta transform twice.
* But in fact it is something more than applying delta transform twice.
@ -147,9 +142,9 @@ namespace ErrorCodes
{
extern const int CANNOT_COMPRESS;
extern const int CANNOT_DECOMPRESS;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_SYNTAX_FOR_CODEC_TYPE;
extern const int ILLEGAL_CODEC_PARAMETER;
extern const int LOGICAL_ERROR;
}
namespace
@ -168,8 +163,9 @@ inline Int64 getMaxValueForByteSize(Int8 byte_size)
case sizeof(UInt64):
return std::numeric_limits<Int64>::max();
default:
throw Exception(ErrorCodes::LOGICAL_ERROR, "only 1, 2, 4 and 8 data sizes are supported");
assert(false && "only 1, 2, 4 and 8 data sizes are supported");
}
UNREACHABLE();
}
struct WriteSpec

View File

@ -5,12 +5,6 @@
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
ClusterUpdateActions joiningToClusterUpdates(const ClusterConfigPtr & cfg, std::string_view joining)
{
ClusterUpdateActions out;
@ -85,7 +79,7 @@ String serializeClusterConfig(const ClusterConfigPtr & cfg, const ClusterUpdateA
new_config.emplace_back(RaftServerConfig{*cfg->get_server(priority->id)});
}
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected update");
UNREACHABLE();
}
for (const auto & item : cfg->get_servers())

View File

@ -990,7 +990,7 @@ KeeperServer::ConfigUpdateState KeeperServer::applyConfigUpdate(
raft_instance->set_priority(update->id, update->priority, /*broadcast on live leader*/true);
return Accepted;
}
std::unreachable();
UNREACHABLE();
}
ClusterUpdateActions KeeperServer::getRaftConfigurationDiff(const Poco::Util::AbstractConfiguration & config)

View File

@ -667,6 +667,8 @@ public:
case Types::AggregateFunctionState: return f(field.template get<AggregateFunctionStateData>());
case Types::CustomType: return f(field.template get<CustomType>());
}
UNREACHABLE();
}
String dump() const;

View File

@ -3,6 +3,7 @@
#include <Core/BaseSettings.h>
#include <Core/Defines.h>
#include <Core/SettingsEnums.h>
namespace Poco::Util
@ -51,7 +52,7 @@ namespace DB
M(UInt64, max_temporary_data_on_disk_size, 0, "The maximum amount of storage that could be used for external aggregation, joins or sorting., ", 0) \
M(String, temporary_data_in_cache, "", "Cache disk name for temporary data.", 0) \
M(UInt64, aggregate_function_group_array_max_element_size, 0xFFFFFF, "Max array element size in bytes for groupArray function. This limit is checked at serialization and help to avoid large state size.", 0) \
M(Bool, aggregate_function_group_array_has_limit_size, false, "When the max array element size is exceeded, a `Too large array size` exception will be thrown by default. When set to true, no exception will be thrown, and the excess elements will be discarded.", 0) \
M(GroupArrayActionWhenLimitReached, aggregate_function_group_array_action_when_limit_is_reached, GroupArrayActionWhenLimitReached::THROW, "Action to execute when max array element size is exceeded in groupArray: `throw` exception, or `discard` extra values", 0) \
M(UInt64, max_server_memory_usage, 0, "Maximum total memory usage of the server in bytes. Zero means unlimited.", 0) \
M(Double, max_server_memory_usage_to_ram_ratio, 0.9, "Same as max_server_memory_usage but in to RAM ratio. Allows to lower max memory on low-memory systems.", 0) \
M(UInt64, merges_mutations_memory_usage_soft_limit, 0, "Maximum total memory usage for merges and mutations in bytes. Zero means unlimited.", 0) \

View File

@ -229,4 +229,9 @@ IMPLEMENT_SETTING_ENUM(SQLSecurityType, ErrorCodes::BAD_ARGUMENTS,
{{"DEFINER", SQLSecurityType::DEFINER},
{"INVOKER", SQLSecurityType::INVOKER},
{"NONE", SQLSecurityType::NONE}})
IMPLEMENT_SETTING_ENUM(
GroupArrayActionWhenLimitReached,
ErrorCodes::BAD_ARGUMENTS,
{{"throw", GroupArrayActionWhenLimitReached::THROW}, {"discard", GroupArrayActionWhenLimitReached::DISCARD}})
}

View File

@ -370,4 +370,12 @@ DECLARE_SETTING_ENUM(SchemaInferenceMode)
DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeOverflowBehavior, FormatSettings::DateTimeOverflowBehavior)
DECLARE_SETTING_ENUM(SQLSecurityType)
enum class GroupArrayActionWhenLimitReached : uint8_t
{
THROW,
DISCARD
};
DECLARE_SETTING_ENUM(GroupArrayActionWhenLimitReached)
}

View File

@ -36,6 +36,7 @@ String ISerialization::kindToString(Kind kind)
case Kind::SPARSE:
return "Sparse";
}
UNREACHABLE();
}
ISerialization::Kind ISerialization::stringToKind(const String & str)

View File

@ -140,6 +140,7 @@ private:
case ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE:
return "REMOTE_FS_READ_AND_PUT_IN_CACHE";
}
UNREACHABLE();
}
size_t first_offset = 0;

View File

@ -17,6 +17,7 @@ std::string toString(MetadataStorageTransactionState state)
case MetadataStorageTransactionState::PARTIALLY_ROLLED_BACK:
return "PARTIALLY_ROLLED_BACK";
}
UNREACHABLE();
}
}

View File

@ -575,24 +575,22 @@ void S3ObjectStorage::applyNewSettings(
ContextPtr context,
const ApplyNewSettingsOptions & options)
{
auto new_s3_settings = getSettings(config, config_prefix, context, context->getSettingsRef().s3_validate_request_settings);
if (!static_headers.empty())
{
new_s3_settings->auth_settings.headers.insert(
new_s3_settings->auth_settings.headers.end(),
static_headers.begin(), static_headers.end());
}
auto settings_from_config = getSettings(config, config_prefix, context, context->getSettingsRef().s3_validate_request_settings);
auto modified_settings = std::make_unique<S3ObjectStorageSettings>(*s3_settings.get());
modified_settings->auth_settings.updateFrom(settings_from_config->auth_settings);
modified_settings->request_settings = settings_from_config->request_settings;
if (auto endpoint_settings = context->getStorageS3Settings().getSettings(uri.uri.toString(), context->getUserName()))
new_s3_settings->auth_settings.updateFrom(endpoint_settings->auth_settings);
modified_settings->auth_settings.updateFrom(endpoint_settings->auth_settings);
auto current_s3_settings = s3_settings.get();
if (options.allow_client_change && (current_s3_settings->auth_settings.hasUpdates(new_s3_settings->auth_settings) || for_disk_s3))
auto current_settings = s3_settings.get();
if (options.allow_client_change
&& (current_settings->auth_settings.hasUpdates(modified_settings->auth_settings) || for_disk_s3))
{
auto new_client = getClient(config, config_prefix, context, *new_s3_settings, for_disk_s3, &uri);
auto new_client = getClient(config, config_prefix, context, *modified_settings, for_disk_s3, &uri);
client.set(std::move(new_client));
}
s3_settings.set(std::move(new_s3_settings));
s3_settings.set(std::move(modified_settings));
}
std::unique_ptr<IObjectStorage> S3ObjectStorage::cloneObjectStorage(

View File

@ -54,8 +54,7 @@ private:
const S3Capabilities & s3_capabilities_,
ObjectStorageKeysGeneratorPtr key_generator_,
const String & disk_name_,
bool for_disk_s3_ = true,
const HTTPHeaderEntries & static_headers_ = {})
bool for_disk_s3_ = true)
: uri(uri_)
, disk_name(disk_name_)
, client(std::move(client_))
@ -64,7 +63,6 @@ private:
, key_generator(std::move(key_generator_))
, log(getLogger(logger_name))
, for_disk_s3(for_disk_s3_)
, static_headers(static_headers_)
{
}
@ -189,7 +187,6 @@ private:
LoggerPtr log;
const bool for_disk_s3;
const HTTPHeaderEntries static_headers;
};
}

View File

@ -112,6 +112,7 @@ DiskPtr VolumeJBOD::getDisk(size_t /* index */) const
return disks_by_size.top().disk;
}
}
UNREACHABLE();
}
ReservationPtr VolumeJBOD::reserve(UInt64 bytes)
@ -163,6 +164,7 @@ ReservationPtr VolumeJBOD::reserve(UInt64 bytes)
return reservation;
}
}
UNREACHABLE();
}
bool VolumeJBOD::areMergesAvoided() const

View File

@ -62,6 +62,7 @@ String escapingRuleToString(FormatSettings::EscapingRule escaping_rule)
case FormatSettings::EscapingRule::Raw:
return "Raw";
}
UNREACHABLE();
}
void skipFieldByEscapingRule(ReadBuffer & buf, FormatSettings::EscapingRule escaping_rule, const FormatSettings & format_settings)

View File

@ -149,6 +149,8 @@ struct IntegerRoundingComputation
return x;
}
}
UNREACHABLE();
}
static ALWAYS_INLINE T compute(T x, T scale)
@ -161,6 +163,8 @@ struct IntegerRoundingComputation
case ScaleMode::Negative:
return computeImpl(x, scale);
}
UNREACHABLE();
}
static ALWAYS_INLINE void compute(const T * __restrict in, size_t scale, T * __restrict out) requires std::integral<T>
@ -243,6 +247,8 @@ inline float roundWithMode(float x, RoundingMode mode)
case RoundingMode::Ceil: return ceilf(x);
case RoundingMode::Trunc: return truncf(x);
}
UNREACHABLE();
}
inline double roundWithMode(double x, RoundingMode mode)
@ -254,6 +260,8 @@ inline double roundWithMode(double x, RoundingMode mode)
case RoundingMode::Ceil: return ceil(x);
case RoundingMode::Trunc: return trunc(x);
}
UNREACHABLE();
}
template <typename T>

View File

@ -232,6 +232,7 @@ struct TimeWindowImpl<TUMBLE>
default:
throw Exception(ErrorCodes::SYNTAX_ERROR, "Fraction seconds are unsupported by windows yet");
}
UNREACHABLE();
}
template <typename ToType, IntervalKind::Kind unit>
@ -421,6 +422,7 @@ struct TimeWindowImpl<HOP>
default:
throw Exception(ErrorCodes::SYNTAX_ERROR, "Fraction seconds are unsupported by windows yet");
}
UNREACHABLE();
}
template <typename ToType, IntervalKind::Kind kind>

View File

@ -381,6 +381,8 @@ bool PointInPolygonWithGrid<CoordinateType>::contains(CoordinateType x, Coordina
case CellType::complexPolygon:
return boost::geometry::within(Point(x, y), polygons[cell.index_of_inner_polygon]);
}
UNREACHABLE();
}

View File

@ -35,6 +35,7 @@ namespace
case UserDefinedSQLObjectType::Function:
return "function_";
}
UNREACHABLE();
}
constexpr std::string_view sql_extension = ".sql";

View File

@ -52,6 +52,7 @@ std::string toContentEncodingName(CompressionMethod method)
case CompressionMethod::None:
return "";
}
UNREACHABLE();
}
CompressionMethod chooseHTTPCompressionMethod(const std::string & list)

View File

@ -88,6 +88,7 @@ public:
case Status::TOO_LARGE_COMPRESSED_BLOCK:
return "TOO_LARGE_COMPRESSED_BLOCK";
}
UNREACHABLE();
}
explicit HadoopSnappyReadBuffer(

View File

@ -117,6 +117,8 @@ size_t AggregatedDataVariants::size() const
APPLY_FOR_AGGREGATED_VARIANTS(M)
#undef M
}
UNREACHABLE();
}
size_t AggregatedDataVariants::sizeWithoutOverflowRow() const
@ -134,6 +136,8 @@ size_t AggregatedDataVariants::sizeWithoutOverflowRow() const
APPLY_FOR_AGGREGATED_VARIANTS(M)
#undef M
}
UNREACHABLE();
}
const char * AggregatedDataVariants::getMethodName() const
@ -151,6 +155,8 @@ const char * AggregatedDataVariants::getMethodName() const
APPLY_FOR_AGGREGATED_VARIANTS(M)
#undef M
}
UNREACHABLE();
}
bool AggregatedDataVariants::isTwoLevel() const
@ -168,6 +174,8 @@ bool AggregatedDataVariants::isTwoLevel() const
APPLY_FOR_AGGREGATED_VARIANTS(M)
#undef M
}
UNREACHABLE();
}
bool AggregatedDataVariants::isConvertibleToTwoLevel() const

View File

@ -799,6 +799,7 @@ String FileSegment::stateToString(FileSegment::State state)
case FileSegment::State::DETACHED:
return "DETACHED";
}
UNREACHABLE();
}
bool FileSegment::assertCorrectness() const

View File

@ -309,6 +309,7 @@ ComparisonGraphCompareResult ComparisonGraph<Node>::pathToCompareResult(Path pat
case Path::GREATER: return inverse ? ComparisonGraphCompareResult::LESS : ComparisonGraphCompareResult::GREATER;
case Path::GREATER_OR_EQUAL: return inverse ? ComparisonGraphCompareResult::LESS_OR_EQUAL : ComparisonGraphCompareResult::GREATER_OR_EQUAL;
}
UNREACHABLE();
}
template <ComparisonGraphNodeType Node>

View File

@ -26,6 +26,7 @@ static String typeToString(FilesystemCacheLogElement::CacheType type)
case FilesystemCacheLogElement::CacheType::WRITE_THROUGH_CACHE:
return "WRITE_THROUGH_CACHE";
}
UNREACHABLE();
}
ColumnsDescription FilesystemCacheLogElement::getColumnsDescription()

View File

@ -705,6 +705,7 @@ namespace
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
}
UNREACHABLE();
}
}
@ -2640,6 +2641,8 @@ private:
default:
throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys (type: {})", parent.data->type);
}
UNREACHABLE();
}
template <typename Map>

View File

@ -322,6 +322,8 @@ public:
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
}
UNREACHABLE();
}
size_t getTotalByteCountImpl(Type which) const
@ -336,6 +338,8 @@ public:
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
}
UNREACHABLE();
}
size_t getBufferSizeInCells(Type which) const
@ -350,6 +354,8 @@ public:
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
}
UNREACHABLE();
}
/// NOLINTEND(bugprone-macro-parentheses)
};

View File

@ -33,6 +33,7 @@ BlockIO InterpreterTransactionControlQuery::execute()
case ASTTransactionControl::SET_SNAPSHOT:
return executeSetSnapshot(session_context, tcl.snapshot);
}
UNREACHABLE();
}
BlockIO InterpreterTransactionControlQuery::executeBegin(ContextMutablePtr session_context)

View File

@ -41,6 +41,8 @@ size_t SetVariantsTemplate<Variant>::getTotalRowCount() const
APPLY_FOR_SET_VARIANTS(M)
#undef M
}
UNREACHABLE();
}
template <typename Variant>
@ -55,6 +57,8 @@ size_t SetVariantsTemplate<Variant>::getTotalByteCount() const
APPLY_FOR_SET_VARIANTS(M)
#undef M
}
UNREACHABLE();
}
template <typename Variant>

View File

@ -164,6 +164,12 @@ public:
void pushNotIn(CNFQuery::AtomicFormula & atom);
/// Reduces CNF groups by removing mutually exclusive atoms
/// found across groups, in case other atoms are identical.
/// Might require multiple passes to complete reduction.
///
/// Example:
/// (x OR y) AND (x OR !y) -> x
template <typename TAndGroup>
TAndGroup reduceOnceCNFStatements(const TAndGroup & groups)
{
@ -175,10 +181,19 @@ TAndGroup reduceOnceCNFStatements(const TAndGroup & groups)
bool inserted = false;
for (const auto & atom : group)
{
copy.erase(atom);
using AtomType = std::decay_t<decltype(atom)>;
AtomType negative_atom(atom);
negative_atom.negative = !atom.negative;
// Sikpping erase-insert for mutually exclusive atoms within
// single group, since it won't insert negative atom, which
// will break the logic of this rule
if (copy.contains(negative_atom))
{
continue;
}
copy.erase(atom);
copy.insert(negative_atom);
if (groups.contains(copy))
@ -209,6 +224,10 @@ bool isCNFGroupSubset(const TOrGroup & left, const TOrGroup & right)
return true;
}
/// Removes CNF groups if subset group is found in CNF.
///
/// Example:
/// (x OR y) AND (x) -> x
template <typename TAndGroup>
TAndGroup filterCNFSubsets(const TAndGroup & groups)
{

View File

@ -91,6 +91,22 @@ bool checkIfGroupAlwaysTrueGraph(const CNFQuery::OrGroup & group, const Comparis
return false;
}
bool checkIfGroupAlwaysTrueAtoms(const CNFQuery::OrGroup & group)
{
/// Filters out groups containing mutually exclusive atoms,
/// since these groups are always True
for (const auto & atom : group)
{
auto negated(atom);
negated.negative = !atom.negative;
if (group.contains(negated))
{
return true;
}
}
return false;
}
bool checkIfAtomAlwaysFalseFullMatch(const CNFQuery::AtomicFormula & atom, const ConstraintsDescription & constraints_description)
{
@ -158,7 +174,8 @@ void WhereConstraintsOptimizer::perform()
.filterAlwaysTrueGroups([&compare_graph, this](const auto & group)
{
/// remove always true groups from CNF
return !checkIfGroupAlwaysTrueFullMatch(group, metadata_snapshot->getConstraints()) && !checkIfGroupAlwaysTrueGraph(group, compare_graph);
return !checkIfGroupAlwaysTrueFullMatch(group, metadata_snapshot->getConstraints())
&& !checkIfGroupAlwaysTrueGraph(group, compare_graph) && !checkIfGroupAlwaysTrueAtoms(group);
})
.filterAlwaysFalseAtoms([&compare_graph, this](const auto & atom)
{

View File

@ -40,6 +40,8 @@ public:
case TableOverride: return "EXPLAIN TABLE OVERRIDE";
case CurrentTransaction: return "EXPLAIN CURRENT TRANSACTION";
}
UNREACHABLE();
}
static ExplainKind fromString(const String & str)

View File

@ -42,7 +42,7 @@ Token quotedString(const char *& pos, const char * const token_begin, const char
continue;
}
chassert(false);
UNREACHABLE();
}
}
@ -538,6 +538,8 @@ const char * getTokenName(TokenType type)
APPLY_FOR_TOKENS(M)
#undef M
}
UNREACHABLE();
}

View File

@ -657,6 +657,7 @@ DataTypePtr MsgPackSchemaReader::getDataType(const msgpack::object & object)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Msgpack extension type {:x} is not supported", object_ext.type());
}
}
UNREACHABLE();
}
std::optional<DataTypes> MsgPackSchemaReader::readRowAndGetDataTypes()

View File

@ -36,6 +36,8 @@ std::string IProcessor::statusToName(Status status)
case Status::ExpandPipeline:
return "ExpandPipeline";
}
UNREACHABLE();
}
}

View File

@ -381,7 +381,7 @@ Pipe ReadFromMergeTree::readFromPoolParallelReplicas(
auto algorithm = std::make_unique<MergeTreeThreadSelectAlgorithm>(i);
auto processor = std::make_unique<MergeTreeSelectProcessor>(
pool, std::move(algorithm), storage_snapshot, prewhere_info,
pool, std::move(algorithm), prewhere_info,
actions_settings, block_size_copy, reader_settings);
auto source = std::make_shared<MergeTreeSource>(std::move(processor));
@ -480,7 +480,7 @@ Pipe ReadFromMergeTree::readFromPool(
auto algorithm = std::make_unique<MergeTreeThreadSelectAlgorithm>(i);
auto processor = std::make_unique<MergeTreeSelectProcessor>(
pool, std::move(algorithm), storage_snapshot, prewhere_info,
pool, std::move(algorithm), prewhere_info,
actions_settings, block_size_copy, reader_settings);
auto source = std::make_shared<MergeTreeSource>(std::move(processor));
@ -592,7 +592,7 @@ Pipe ReadFromMergeTree::readInOrder(
algorithm = std::make_unique<MergeTreeInOrderSelectAlgorithm>(i);
auto processor = std::make_unique<MergeTreeSelectProcessor>(
pool, std::move(algorithm), storage_snapshot, prewhere_info,
pool, std::move(algorithm), prewhere_info,
actions_settings, block_size, reader_settings);
processor->addPartLevelToChunk(isQueryWithFinal());
@ -1136,6 +1136,8 @@ static void addMergingFinal(
return std::make_shared<GraphiteRollupSortedTransform>(header, num_outputs,
sort_description, max_block_size_rows, /*max_block_size_bytes=*/0, merging_params.graphite_params, now);
}
UNREACHABLE();
};
pipe.addTransform(get_merging_processor());
@ -2123,6 +2125,8 @@ static const char * indexTypeToString(ReadFromMergeTree::IndexType type)
case ReadFromMergeTree::IndexType::Skip:
return "Skip";
}
UNREACHABLE();
}
static const char * readTypeToString(ReadFromMergeTree::ReadType type)
@ -2138,6 +2142,8 @@ static const char * readTypeToString(ReadFromMergeTree::ReadType type)
case ReadFromMergeTree::ReadType::ParallelReplicas:
return "Parallel";
}
UNREACHABLE();
}
void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const

View File

@ -86,6 +86,8 @@ static String totalsModeToString(TotalsMode totals_mode, double auto_include_thr
case TotalsMode::AFTER_HAVING_AUTO:
return "after_having_auto threshold " + std::to_string(auto_include_threshold);
}
UNREACHABLE();
}
void TotalsHavingStep::describeActions(FormatSettings & settings) const

View File

@ -67,6 +67,7 @@ static FillColumnDescription::StepFunction getStepFunction(
FOR_EACH_INTERVAL_KIND(DECLARE_CASE)
#undef DECLARE_CASE
}
UNREACHABLE();
}
static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & type)

View File

@ -898,6 +898,8 @@ static std::exception_ptr addStorageToException(std::exception_ptr ptr, const St
{
return std::current_exception();
}
UNREACHABLE();
}
void FinalizingViewsTransform::work()

View File

@ -93,6 +93,7 @@ String BackgroundJobsAssignee::toString(Type type)
case Type::Moving:
return "Moving";
}
UNREACHABLE();
}
void BackgroundJobsAssignee::start()

View File

@ -2964,6 +2964,8 @@ String KeyCondition::RPNElement::toString(std::string_view column_name, bool pri
case ALWAYS_TRUE:
return "true";
}
UNREACHABLE();
}

View File

@ -1177,6 +1177,8 @@ String MergeTreeData::MergingParams::getModeName() const
case Graphite: return "Graphite";
case VersionedCollapsing: return "VersionedCollapsing";
}
UNREACHABLE();
}
Int64 MergeTreeData::getMaxBlockNumber() const

View File

@ -360,6 +360,8 @@ Block MergeTreeDataWriter::mergeBlock(
return std::make_shared<GraphiteRollupSortedAlgorithm>(
block, 1, sort_description, block_size + 1, /*block_size_bytes=*/0, merging_params.graphite_params, time(nullptr));
}
UNREACHABLE();
};
auto merging_algorithm = get_merging_algorithm();

View File

@ -26,14 +26,12 @@ namespace ErrorCodes
MergeTreeSelectProcessor::MergeTreeSelectProcessor(
MergeTreeReadPoolPtr pool_,
MergeTreeSelectAlgorithmPtr algorithm_,
const StorageSnapshotPtr & storage_snapshot_,
const PrewhereInfoPtr & prewhere_info_,
const ExpressionActionsSettings & actions_settings_,
const MergeTreeReadTask::BlockSizeParams & block_size_params_,
const MergeTreeReaderSettings & reader_settings_)
: pool(std::move(pool_))
, algorithm(std::move(algorithm_))
, storage_snapshot(storage_snapshot_)
, prewhere_info(prewhere_info_)
, actions_settings(actions_settings_)
, prewhere_actions(getPrewhereActions(prewhere_info, actions_settings, reader_settings_.enable_multiple_prewhere_read_steps))

View File

@ -41,7 +41,6 @@ public:
MergeTreeSelectProcessor(
MergeTreeReadPoolPtr pool_,
MergeTreeSelectAlgorithmPtr algorithm_,
const StorageSnapshotPtr & storage_snapshot_,
const PrewhereInfoPtr & prewhere_info_,
const ExpressionActionsSettings & actions_settings_,
const MergeTreeReadTask::BlockSizeParams & block_size_params_,
@ -71,7 +70,6 @@ private:
const MergeTreeReadPoolPtr pool;
const MergeTreeSelectAlgorithmPtr algorithm;
const StorageSnapshotPtr storage_snapshot;
const PrewhereInfoPtr prewhere_info;
const ExpressionActionsSettings actions_settings;

View File

@ -616,6 +616,8 @@ PartMovesBetweenShardsOrchestrator::Entry PartMovesBetweenShardsOrchestrator::st
}
}
}
UNREACHABLE();
}
void PartMovesBetweenShardsOrchestrator::removePins(const Entry & entry, zkutil::ZooKeeperPtr zk)

View File

@ -254,21 +254,17 @@ ReadBufferIterator::Data ReadBufferIterator::next()
}
}
LOG_TEST(getLogger("KSSENII"), "Will read columns from {}", current_object_info->getPath());
std::unique_ptr<ReadBuffer> read_buf;
CompressionMethod compression_method;
using ObjectInfoInArchive = StorageObjectStorageSource::ArchiveIterator::ObjectInfoInArchive;
if (const auto * object_info_in_archive = dynamic_cast<const ObjectInfoInArchive *>(current_object_info.get()))
{
LOG_TEST(getLogger("KSSENII"), "Will read columns from {} from archive", current_object_info->getPath());
compression_method = chooseCompressionMethod(filename, configuration->compression_method);
const auto & archive_reader = object_info_in_archive->archive_reader;
read_buf = archive_reader->readFile(object_info_in_archive->path_in_archive, /*throw_on_not_found=*/true);
}
else
{
LOG_TEST(getLogger("KSSENII"), "Will read columns from {} from s3", current_object_info->getPath());
compression_method = chooseCompressionMethod(filename, configuration->compression_method);
read_buf = object_storage->readObject(
StoredObject(current_object_info->getPath()),

View File

@ -136,7 +136,7 @@ ObjectStoragePtr StorageS3Configuration::createObjectStorage(ContextPtr context,
return std::make_shared<S3ObjectStorage>(
std::move(client), std::move(s3_settings), url, s3_capabilities,
key_generator, "StorageS3", false, headers_from_ast);
key_generator, "StorageS3", false);
}
void StorageS3Configuration::fromNamedCollection(const NamedCollection & collection)

View File

@ -297,6 +297,7 @@ namespace
CASE_WINDOW_KIND(Year)
#undef CASE_WINDOW_KIND
}
UNREACHABLE();
}
class AddingAggregatedChunkInfoTransform : public ISimpleTransform
@ -919,6 +920,7 @@ UInt32 StorageWindowView::getWindowLowerBound(UInt32 time_sec)
CASE_WINDOW_KIND(Year)
#undef CASE_WINDOW_KIND
}
UNREACHABLE();
}
UInt32 StorageWindowView::getWindowUpperBound(UInt32 time_sec)
@ -946,6 +948,7 @@ UInt32 StorageWindowView::getWindowUpperBound(UInt32 time_sec)
CASE_WINDOW_KIND(Year)
#undef CASE_WINDOW_KIND
}
UNREACHABLE();
}
void StorageWindowView::addFireSignal(std::set<UInt32> & signals)

View File

@ -401,30 +401,40 @@ class BuildResult:
@classmethod
def load_any(cls, build_name: str, pr_number: int, head_ref: str): # type: ignore
"""
loads report from suitable report file with the following priority:
1. report from PR with the same @pr_number
2. report from branch with the same @head_ref
3. report from the master
4. any other report
loads build report from one of all available report files (matching the job digest)
with the following priority:
1. report for the current PR @pr_number (might happen in PR' wf with or without job reuse)
2. report for the current branch @head_ref (might happen in release/master' wf with or without job reuse)
3. report for master branch (might happen in any workflow in case of job reuse)
4. any other report (job reuse from another PR, if master report is not available yet)
"""
reports = []
pr_report = None
ref_report = None
master_report = None
any_report = None
for file in Path(REPORT_PATH).iterdir():
if f"{build_name}.json" in file.name:
reports.append(file)
if not reports:
return None
file_path = None
for file in reports:
if pr_number and f"_{pr_number}_" in file.name:
file_path = file
break
if f"_{head_ref}_" in file.name:
file_path = file
break
any_report = file
if "_master_" in file.name:
file_path = file
break
return cls.load_from_file(file_path or reports[-1])
master_report = file
elif f"_{head_ref}_" in file.name:
ref_report = file
elif pr_number and f"_{pr_number}_" in file.name:
pr_report = file
if not any_report:
return None
if pr_report:
file_path = pr_report
elif ref_report:
file_path = ref_report
elif master_report:
file_path = master_report
else:
file_path = any_report
return cls.load_from_file(file_path)
@classmethod
def load_from_file(cls, file: Union[Path, str]): # type: ignore

View File

@ -1,4 +1,4 @@
<clickhouse>
<aggregate_function_group_array_max_element_size>10</aggregate_function_group_array_max_element_size>
<aggregate_function_group_array_has_limit_size>false</aggregate_function_group_array_has_limit_size>
<aggregate_function_group_array_action_when_limit_is_reached>throw</aggregate_function_group_array_action_when_limit_is_reached>
</clickhouse>

View File

@ -80,8 +80,8 @@ def test_limit_size(started_cluster):
node2.replace_in_config(
"/etc/clickhouse-server/config.d/group_array_max_element_size.xml",
"false",
"true",
"throw",
"discard",
)
node2.restart_clickhouse()
@ -91,8 +91,8 @@ def test_limit_size(started_cluster):
node2.replace_in_config(
"/etc/clickhouse-server/config.d/group_array_max_element_size.xml",
"true",
"false",
"discard",
"throw",
)
node2.restart_clickhouse()

View File

@ -45,7 +45,13 @@ SELECT id, value, value_1, value_2 FROM test_table ARRAY JOIN [[1, 2, 3]] AS val
0 Value [1,2,3] 1
0 Value [1,2,3] 2
0 Value [1,2,3] 3
SELECT 1 AS value FROM test_table ARRAY JOIN [1,2,3] AS value; -- { serverError 179 }
SELECT 1 AS value FROM test_table ARRAY JOIN [1,2,3] AS value;
1
1
1
1
1
1
SELECT 'ARRAY JOIN with column';
ARRAY JOIN with column
SELECT id, value, test_table.value_array FROM test_table ARRAY JOIN value_array;
@ -84,7 +90,13 @@ SELECT id, value, value_array AS value_array_array_alias FROM test_table ARRAY J
0 Value [4,5,6]
SELECT '--';
--
SELECT id AS value FROM test_table ARRAY JOIN value_array AS value; -- { serverError 179 }
SELECT id AS value FROM test_table ARRAY JOIN value_array AS value;
0
0
0
0
0
0
SELECT '--';
--
SELECT id, value, value_array AS value_array_array_alias, value_array_array_alias_element FROM test_table ARRAY JOIN value_array_array_alias AS value_array_array_alias_element;
@ -120,3 +132,7 @@ WHERE NOT ignore(elem)
GROUP BY
sum(ignore(ignore(ignore(1., 1, 36, 8, 8), ignore(52, 37, 37, '03147_parquet_memory_tracking.parquet', 37, 37, toUInt256(37), 37, 37, toNullable(37), 37, 37), 1., 1, 36, 8, 8), emptyArrayToSingle(arrayMap(x -> toString(x), arrayMap(x -> nullIf(x, 2), arrayJoin([[1]])))))) IGNORE NULLS,
modulo(toLowCardinality('03147_parquet_memory_tracking.parquet'), number, toLowCardinality(3)); -- { serverError UNKNOWN_IDENTIFIER }
[1,2] 1
[1,2] 2
1
2

View File

@ -33,7 +33,7 @@ SELECT '--';
SELECT id, value, value_1, value_2 FROM test_table ARRAY JOIN [[1, 2, 3]] AS value_1 ARRAY JOIN value_1 AS value_2;
SELECT 1 AS value FROM test_table ARRAY JOIN [1,2,3] AS value; -- { serverError 179 }
SELECT 1 AS value FROM test_table ARRAY JOIN [1,2,3] AS value;
SELECT 'ARRAY JOIN with column';
@ -53,7 +53,7 @@ SELECT id, value, value_array AS value_array_array_alias FROM test_table ARRAY J
SELECT '--';
SELECT id AS value FROM test_table ARRAY JOIN value_array AS value; -- { serverError 179 }
SELECT id AS value FROM test_table ARRAY JOIN value_array AS value;
SELECT '--';
@ -80,3 +80,6 @@ GROUP BY
-- { echoOff }
DROP TABLE test_table;
select [1, 2] as arr, x from system.one array join arr as x;
select x + 1 as x from (select [number] as arr from numbers(2)) as s array join arr as x;

View File

@ -1,11 +1,10 @@
-- { echoOn }
SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_element, value_element AS value;
0 [1,2,3] [1,2,3]
SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_element, value_element AS value; -- { serverError UNKNOWN_IDENTIFIER }
SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_element ARRAY JOIN value_element AS value;
0 [1,2,3] 1
0 [1,2,3] 2
0 [1,2,3] 3
SELECT value_element, value FROM test_table ARRAY JOIN [1048577] AS value_element, arrayMap(x -> value_element, ['']) AS value;
1048577 [1048577]
SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem, arrayMap(x -> splitByChar(x, elem), ['']) AS unused; -- { serverError 44 }
SELECT value_element, value FROM test_table ARRAY JOIN [1048577] AS value_element ARRAY JOIN arrayMap(x -> value_element, ['']) AS value;
1048577 1048577
SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem ARRAY JOIN arrayMap(x -> splitByChar(x, elem), ['']) AS unused; -- { serverError ILLEGAL_COLUMN }

View File

@ -11,13 +11,13 @@ INSERT INTO test_table VALUES (0, 'Value');
-- { echoOn }
SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_element, value_element AS value;
SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_element, value_element AS value; -- { serverError UNKNOWN_IDENTIFIER }
SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_element ARRAY JOIN value_element AS value;
SELECT value_element, value FROM test_table ARRAY JOIN [1048577] AS value_element, arrayMap(x -> value_element, ['']) AS value;
SELECT value_element, value FROM test_table ARRAY JOIN [1048577] AS value_element ARRAY JOIN arrayMap(x -> value_element, ['']) AS value;
SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem, arrayMap(x -> splitByChar(x, elem), ['']) AS unused; -- { serverError 44 }
SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem ARRAY JOIN arrayMap(x -> splitByChar(x, elem), ['']) AS unused; -- { serverError ILLEGAL_COLUMN }
-- { echoOff }

View File

@ -7,8 +7,7 @@ CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_suspicious_variant_types=1 --max_insert_threads 4 --group_by_two_level_threshold 752249 --group_by_two_level_threshold_bytes 15083870 --distributed_aggregation_memory_efficient 1 --fsync_metadata 1 --output_format_parallel_formatting 0 --input_format_parallel_parsing 0 --min_chunk_bytes_for_parallel_parsing 6583861 --max_read_buffer_size 640584 --prefer_localhost_replica 1 --max_block_size 38844 --max_threads 48 --optimize_append_index 0 --optimize_if_chain_to_multiif 1 --optimize_if_transform_strings_to_enum 0 --optimize_read_in_order 1 --optimize_or_like_chain 0 --optimize_substitute_columns 1 --enable_multiple_prewhere_read_steps 1 --read_in_order_two_level_merge_threshold 4 --optimize_aggregation_in_order 0 --aggregation_in_order_max_block_bytes 18284646 --use_uncompressed_cache 1 --min_bytes_to_use_direct_io 10737418240 --min_bytes_to_use_mmap_io 10737418240 --local_filesystem_read_method pread --remote_filesystem_read_method read --local_filesystem_read_prefetch 1 --filesystem_cache_segments_batch_size 0 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 0 --throw_on_error_from_cache_on_write_operations 1 --remote_filesystem_read_prefetch 0 --allow_prefetched_read_pool_for_remote_filesystem 0 --filesystem_prefetch_max_memory_usage 128Mi --filesystem_prefetches_limit 0 --filesystem_prefetch_min_bytes_for_single_read_task 16Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 0 --compile_aggregate_expressions 1 --compile_sort_description 0 --merge_tree_coarse_index_granularity 31 --optimize_distinct_in_order 1 --max_bytes_before_external_sort 1 --max_bytes_before_external_group_by 1 --max_bytes_before_remerge_sort 2640239625 --min_compress_block_size 3114155 --max_compress_block_size 226550 --merge_tree_compact_parts_min_granules_to_multibuffer_read 118 --optimize_sorting_by_input_stream_properties 0 --http_response_buffer_size 543038 --http_wait_end_of_query False --enable_memory_bound_merging_of_aggregation_results 1 --min_count_to_compile_expression 3 --min_count_to_compile_aggregate_expression 3 --min_count_to_compile_sort_description 0 --session_timezone America/Mazatlan --prefer_warmed_unmerged_parts_seconds 8 --use_page_cache_for_disks_without_file_cache False --page_cache_inject_eviction True --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.82 "
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_suspicious_variant_types=1"
function test()
{

View File

@ -0,0 +1,32 @@
MergeTree compact + horizontal merge
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
MergeTree wide + horizontal merge
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
MergeTree compact + vertical merge
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
MergeTree wide + vertical merge
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2

View File

@ -7,36 +7,11 @@ CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_merge_tree_settings --allow_experimental_dynamic_type=1 --optimize_aggregation_in_order 0 --index_granularity_bytes 10485760 --index_granularity 8128 --merge_max_block_size 8128"
# Fix some settings to avoid timeouts because of some settings randomization
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_merge_tree_settings --allow_experimental_dynamic_type=1 --index_granularity_bytes 10485760 --index_granularity 8128 --merge_max_block_size 8128 --optimize_aggregation_in_order 0"
function test()
{
echo "ReplacingMergeTree"
$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=ReplacingMergeTree order by id settings $1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(50000, 100000)"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -nm -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "drop table test"
echo "SummingMergeTree"
$CH_CLIENT -q "create table test (id UInt64, sum UInt64, d Dynamic) engine=SummingMergeTree(sum) order by id settings $1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, 1, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, 1, 'str_' || toString(number) from numbers(50000, 100000)"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()"
$CH_CLIENT -nm -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()"
$CH_CLIENT -q "drop table test"
echo "AggregatingMergeTree"
$CH_CLIENT -q "create table test (id UInt64, sum AggregateFunction(sum, UInt64), d Dynamic) engine=AggregatingMergeTree() order by id settings $1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, sumState(1::UInt64), number from numbers(100000) group by number"

View File

@ -1,88 +0,0 @@
MergeTree compact + horizontal merge
ReplacingMergeTree
100000 String
100000 UInt64
50000 UInt64
100000 String
SummingMergeTree
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
AggregatingMergeTree
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
MergeTree wide + horizontal merge
ReplacingMergeTree
100000 String
100000 UInt64
50000 UInt64
100000 String
SummingMergeTree
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
AggregatingMergeTree
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
MergeTree compact + vertical merge
ReplacingMergeTree
100000 String
100000 UInt64
50000 UInt64
100000 String
SummingMergeTree
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
AggregatingMergeTree
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
MergeTree wide + vertical merge
ReplacingMergeTree
100000 String
100000 UInt64
50000 UInt64
100000 String
SummingMergeTree
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
AggregatingMergeTree
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2

View File

@ -1,44 +0,0 @@
MergeTree compact + horizontal merge
CollapsingMergeTree
100000 String
100000 UInt64
50000 String
50000 UInt64
VersionedCollapsingMergeTree
100000 String
100000 UInt64
75000 String
75000 UInt64
MergeTree wide + horizontal merge
CollapsingMergeTree
100000 String
100000 UInt64
50000 String
50000 UInt64
VersionedCollapsingMergeTree
100000 String
100000 UInt64
75000 String
75000 UInt64
MergeTree compact + vertical merge
CollapsingMergeTree
100000 String
100000 UInt64
50000 String
50000 UInt64
VersionedCollapsingMergeTree
100000 String
100000 UInt64
75000 String
75000 UInt64
MergeTree wide + vertical merge
CollapsingMergeTree
100000 String
100000 UInt64
50000 String
50000 UInt64
VersionedCollapsingMergeTree
100000 String
100000 UInt64
75000 String
75000 UInt64

View File

@ -0,0 +1,20 @@
MergeTree compact + horizontal merge
100000 String
100000 UInt64
50000 String
50000 UInt64
MergeTree wide + horizontal merge
100000 String
100000 UInt64
50000 String
50000 UInt64
MergeTree compact + vertical merge
100000 String
100000 UInt64
50000 String
50000 UInt64
MergeTree wide + vertical merge
100000 String
100000 UInt64
50000 String
50000 UInt64

View File

@ -0,0 +1,38 @@
#!/usr/bin/env bash
# Tags: long
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# reset --log_comment
CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
# Fix some settings to avoid timeouts because of some settings randomization
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_merge_tree_settings --allow_experimental_dynamic_type=1 --index_granularity_bytes 10485760 --index_granularity 8128 --merge_max_block_size 8128"
function test()
{
$CH_CLIENT -q "create table test (id UInt64, sign Int8, d Dynamic) engine=CollapsingMergeTree(sign) order by id settings $1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, 1, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, -1, 'str_' || toString(number) from numbers(50000, 100000)"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -nm -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "drop table test"
}
$CH_CLIENT -q "drop table if exists test;"
echo "MergeTree compact + horizontal merge"
test "min_rows_for_wide_part=100000000000, min_bytes_for_wide_part=1000000000000"
echo "MergeTree wide + horizontal merge"
test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1"
echo "MergeTree compact + vertical merge"
test "min_rows_for_wide_part=100000000000, min_bytes_for_wide_part=1000000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1"
echo "MergeTree wide + vertical merge"
test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1"

View File

@ -0,0 +1,20 @@
MergeTree compact + horizontal merge
100000 String
100000 UInt64
50000 UInt64
100000 String
MergeTree wide + horizontal merge
100000 String
100000 UInt64
50000 UInt64
100000 String
MergeTree compact + vertical merge
100000 String
100000 UInt64
50000 UInt64
100000 String
MergeTree wide + vertical merge
100000 String
100000 UInt64
50000 UInt64
100000 String

View File

@ -0,0 +1,39 @@
#!/usr/bin/env bash
# Tags: long
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# reset --log_comment
CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
# Fix some settings to avoid timeouts because of some settings randomization
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_merge_tree_settings --allow_experimental_dynamic_type=1 --index_granularity_bytes 10485760 --index_granularity 8128 --merge_max_block_size 8128"
function test()
{
$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=ReplacingMergeTree order by id settings $1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(50000, 100000)"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -nm -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "drop table test"
}
$CH_CLIENT -q "drop table if exists test;"
echo "MergeTree compact + horizontal merge"
test "min_rows_for_wide_part=100000000000, min_bytes_for_wide_part=1000000000000, vertical_merge_algorithm_min_rows_to_activate=10000000000, vertical_merge_algorithm_min_columns_to_activate=100000000000"
echo "MergeTree wide + horizontal merge"
test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1,vertical_merge_algorithm_min_rows_to_activate=1000000000, vertical_merge_algorithm_min_columns_to_activate=1000000000000"
echo "MergeTree compact + vertical merge"
test "min_rows_for_wide_part=100000000000, min_bytes_for_wide_part=1000000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1"
echo "MergeTree wide + vertical merge"
test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1"

View File

@ -0,0 +1,32 @@
MergeTree compact + horizontal merge
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
MergeTree wide + horizontal merge
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
MergeTree compact + vertical merge
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2
MergeTree wide + vertical merge
100000 String
100000 UInt64
200000 1
50000 String
100000 UInt64
100000 1
50000 2

View File

@ -0,0 +1,40 @@
#!/usr/bin/env bash
# Tags: long
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# reset --log_comment
CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
# Fix some settings to avoid timeouts because of some settings randomization
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_merge_tree_settings --allow_experimental_dynamic_type=1 --index_granularity_bytes 10485760 --index_granularity 8128 --merge_max_block_size 8128"
function test()
{
$CH_CLIENT -q "create table test (id UInt64, sum UInt64, d Dynamic) engine=SummingMergeTree(sum) order by id settings $1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, 1, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, 1, 'str_' || toString(number) from numbers(50000, 100000)"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()"
$CH_CLIENT -nm -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()"
$CH_CLIENT -q "drop table test"
}
$CH_CLIENT -q "drop table if exists test;"
echo "MergeTree compact + horizontal merge"
test "min_rows_for_wide_part=100000000000, min_bytes_for_wide_part=1000000000000, vertical_merge_algorithm_min_rows_to_activate=10000000000, vertical_merge_algorithm_min_columns_to_activate=100000000000"
echo "MergeTree wide + horizontal merge"
test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1,vertical_merge_algorithm_min_rows_to_activate=1000000000, vertical_merge_algorithm_min_columns_to_activate=1000000000000"
echo "MergeTree compact + vertical merge"
test "min_rows_for_wide_part=100000000000, min_bytes_for_wide_part=1000000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1"
echo "MergeTree wide + vertical merge"
test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1"

View File

@ -0,0 +1,20 @@
MergeTree compact + horizontal merge
100000 String
100000 UInt64
75000 String
75000 UInt64
MergeTree wide + horizontal merge
100000 String
100000 UInt64
75000 String
75000 UInt64
MergeTree compact + vertical merge
100000 String
100000 UInt64
75000 String
75000 UInt64
MergeTree wide + vertical merge
100000 String
100000 UInt64
75000 String
75000 UInt64

View File

@ -7,23 +7,12 @@ CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1"
# Fix some settings to avoid timeouts because of some settings randomization
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_merge_tree_settings --allow_experimental_dynamic_type=1 --index_granularity_bytes 10485760 --index_granularity 8128 --merge_max_block_size 8128"
function test()
{
echo "CollapsingMergeTree"
$CH_CLIENT -q "create table test (id UInt64, sign Int8, d Dynamic) engine=CollapsingMergeTree(sign) order by id settings $1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, 1, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, -1, 'str_' || toString(number) from numbers(50000, 100000)"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -nm -q "system start merges test; optimize table test final"
$CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)"
$CH_CLIENT -q "drop table test"
echo "VersionedCollapsingMergeTree"
$CH_CLIENT -q "create table test (id UInt64, sign Int8, version UInt8, d Dynamic) engine=VersionedCollapsingMergeTree(sign, version) order by id settings $1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, 1, 1, number from numbers(100000)"
@ -44,7 +33,7 @@ echo "MergeTree wide + horizontal merge"
test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1"
echo "MergeTree compact + vertical merge"
test "min_rows_for_wide_part=100000000000, min_bytes_for_wide_part=1000000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8128, merge_max_block_size=8128;"
test "min_rows_for_wide_part=100000000000, min_bytes_for_wide_part=1000000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;"
echo "MergeTree wide + vertical merge"
test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8128, merge_max_block_size=8128;"
test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;"

View File

@ -0,0 +1,12 @@
Hello [1,2] 1
Hello [1,2] 2
Hello [1,2] 1
Hello [1,2] 1
Hello [1,2] 2
Hello [1,2] 2
Hello 1
Hello 2
Hello 1
Hello 1
Hello 2
Hello 2

View File

@ -0,0 +1,10 @@
CREATE TABLE arrays_test (s String, arr Array(UInt8)) ENGINE = MergeTree() ORDER BY (s);
INSERT INTO arrays_test VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []);
SELECT s, arr, a FROM remote('127.0.0.2', currentDatabase(), arrays_test) ARRAY JOIN arr AS a WHERE a < 3 ORDER BY a;
SELECT s, arr, a FROM remote('127.0.0.{1,2}', currentDatabase(), arrays_test) ARRAY JOIN arr AS a WHERE a < 3 ORDER BY a;
SELECT s, arr FROM remote('127.0.0.2', currentDatabase(), arrays_test) ARRAY JOIN arr WHERE arr < 3 ORDER BY arr;
SELECT s, arr FROM remote('127.0.0.{1,2}', currentDatabase(), arrays_test) ARRAY JOIN arr WHERE arr < 3 ORDER BY arr;

View File

@ -0,0 +1,23 @@
-- Expected plan with analyzer:
SELECT id
FROM `03161_table`
WHERE f
SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 1
-- Expected result with analyzer:
1
-- Expected plan w/o analyzer:
SELECT id
FROM `03161_table`
WHERE f
SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 0
-- Expected result w/o analyzer:
1
-- Reproducer from the issue with analyzer
2
-- Reproducer from the issue w/o analyzer
2

View File

@ -0,0 +1,72 @@
DROP TABLE IF EXISTS 03161_table;
CREATE TABLE 03161_table (id UInt32, f UInt8) ENGINE = Memory;
INSERT INTO 03161_table VALUES (0, 0), (1, 1), (2, 0);
SELECT '-- Expected plan with analyzer:';
EXPLAIN SYNTAX
SELECT id
FROM 03161_table
WHERE f AND (NOT(f) OR f)
SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 1;
SELECT '';
SELECT '-- Expected result with analyzer:';
SELECT id
FROM 03161_table
WHERE f AND (NOT(f) OR f)
SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 1;
SELECT '';
SELECT '-- Expected plan w/o analyzer:';
EXPLAIN SYNTAX
SELECT id
FROM 03161_table
WHERE f AND (NOT(f) OR f)
SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 0;
SELECT '';
SELECT '-- Expected result w/o analyzer:';
SELECT id
FROM 03161_table
WHERE f AND (NOT(f) OR f)
SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 0;
DROP TABLE IF EXISTS 03161_table;
-- Checking reproducer from GitHub issue
-- https://github.com/ClickHouse/ClickHouse/issues/57400
DROP TABLE IF EXISTS 03161_reproducer;
CREATE TABLE 03161_reproducer (c0 UInt8, c1 UInt8, c2 UInt8, c3 UInt8, c4 UInt8, c5 UInt8, c6 UInt8, c7 UInt8, c8 UInt8, c9 UInt8) ENGINE = Memory;
INSERT INTO 03161_reproducer VALUES (0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0), (0, 0, 0, 0, 0, 0, 0, 0, 1, 1), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0), (0, 0, 0, 0, 0, 0, 0, 1, 0, 1), (0, 0, 0, 0, 0, 0, 0, 1, 1, 0), (0, 0, 0, 0, 0, 0, 0, 1, 1, 1);
SELECT '';
SELECT '-- Reproducer from the issue with analyzer';
SELECT count()
FROM 03161_reproducer
WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7))
SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 1;
SELECT '';
SELECT '-- Reproducer from the issue w/o analyzer';
SELECT count()
FROM 03161_reproducer
WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7))
SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 0;
DROP TABLE IF EXISTS 03161_reproducer;