Merge branch 'master' into adapting-parquet-block-size

This commit is contained in:
LiuNeng 2024-06-07 11:00:45 +08:00 committed by GitHub
commit 6d20d66081
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
103 changed files with 1335 additions and 430 deletions

2
contrib/orc vendored

@ -1 +1 @@
Subproject commit e24f2c2a3ca0769c96704ab20ad6f512a83ea2ad Subproject commit 947cebaf9432d708253ac08dc3012daa6b4ede6f

View File

@ -30,6 +30,7 @@ RUN pip3 install \
mypy==1.8.0 \ mypy==1.8.0 \
pylint==3.1.0 \ pylint==3.1.0 \
python-magic==0.4.24 \ python-magic==0.4.24 \
flake8==4.0.1 \
requests \ requests \
thefuzz \ thefuzz \
types-requests \ types-requests \

View File

@ -9,6 +9,8 @@ echo "Check style" | ts
./check-style -n |& tee /test_output/style_output.txt ./check-style -n |& tee /test_output/style_output.txt
echo "Check python formatting with black" | ts echo "Check python formatting with black" | ts
./check-black -n |& tee /test_output/black_output.txt ./check-black -n |& tee /test_output/black_output.txt
echo "Check python with flake8" | ts
./check-flake8 |& tee /test_output/flake8_output.txt
echo "Check python type hinting with mypy" | ts echo "Check python type hinting with mypy" | ts
./check-mypy -n |& tee /test_output/mypy_output.txt ./check-mypy -n |& tee /test_output/mypy_output.txt
echo "Check typos" | ts echo "Check typos" | ts

View File

@ -25,7 +25,8 @@ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
./setup_minio.sh stateless # to have a proper environment ./setup_minio.sh stateless # to have a proper environment
echo "Get previous release tag" echo "Get previous release tag"
previous_release_tag=$(dpkg --info package_folder/clickhouse-client*.deb | grep "Version: " | awk '{print $2}' | cut -f1 -d'+' | get_previous_release_tag) # shellcheck disable=SC2016
previous_release_tag=$(dpkg-deb --showformat='${Version}' --show package_folder/clickhouse-client*.deb | get_previous_release_tag)
echo $previous_release_tag echo $previous_release_tag
echo "Clone previous release repository" echo "Clone previous release repository"

View File

@ -91,6 +91,9 @@ cd ./utils/check-style
# Check python type hinting with mypy # Check python type hinting with mypy
./check-mypy ./check-mypy
# Check python with flake8
./check-flake8
# Check code with codespell # Check code with codespell
./check-typos ./check-typos

View File

@ -3172,7 +3172,7 @@ Default value: `0`.
## lightweight_deletes_sync {#lightweight_deletes_sync} ## lightweight_deletes_sync {#lightweight_deletes_sync}
The same as 'mutation_sync', but controls only execution of lightweight deletes. The same as 'mutation_sync', but controls only execution of lightweight deletes.
Possible values: Possible values:
@ -4616,6 +4616,16 @@ Read more about [memory overcommit](memory-overcommit.md).
Default value: `1GiB`. Default value: `1GiB`.
## max_untracked_memory {#max_untracked_memory}
Small allocations and deallocations are grouped in thread local variable and tracked or profiled only when amount (in absolute value) becomes larger than specified value. If the value is higher than 'memory_profiler_step' it will be effectively lowered to 'memory_profiler_step'.
Default value: `4MiB`.
## min_untracked_memory {#min_untracked_memory}
Lower bound for untracked memory limit which is applied to threads with low memory consumption. Untracked memory limit equals thread memory usage divided by 16 and clamped between `min_untracked_memory` and `max_untracked_memory` for every thread. It guarantees that total untracked memory does not exceed 10% of current memory footprint even with a lot of small threads. To disable dynamic limit for untracked memory set value `4MiB`.
Default value: `4KiB`.
## Schema Inference settings ## Schema Inference settings
See [schema inference](../../interfaces/schema-inference.md#schema-inference-modes) documentation for more details. See [schema inference](../../interfaces/schema-inference.md#schema-inference-modes) documentation for more details.

View File

@ -24,6 +24,8 @@ Alias: `lttb`.
- `x` — x coordinate. [Integer](../../../sql-reference/data-types/int-uint.md) , [Float](../../../sql-reference/data-types/float.md) , [Decimal](../../../sql-reference/data-types/decimal.md) , [Date](../../../sql-reference/data-types/date.md), [Date32](../../../sql-reference/data-types/date32.md), [DateTime](../../../sql-reference/data-types/datetime.md), [DateTime64](../../../sql-reference/data-types/datetime64.md). - `x` — x coordinate. [Integer](../../../sql-reference/data-types/int-uint.md) , [Float](../../../sql-reference/data-types/float.md) , [Decimal](../../../sql-reference/data-types/decimal.md) , [Date](../../../sql-reference/data-types/date.md), [Date32](../../../sql-reference/data-types/date32.md), [DateTime](../../../sql-reference/data-types/datetime.md), [DateTime64](../../../sql-reference/data-types/datetime64.md).
- `y` — y coordinate. [Integer](../../../sql-reference/data-types/int-uint.md) , [Float](../../../sql-reference/data-types/float.md) , [Decimal](../../../sql-reference/data-types/decimal.md) , [Date](../../../sql-reference/data-types/date.md), [Date32](../../../sql-reference/data-types/date32.md), [DateTime](../../../sql-reference/data-types/datetime.md), [DateTime64](../../../sql-reference/data-types/datetime64.md). - `y` — y coordinate. [Integer](../../../sql-reference/data-types/int-uint.md) , [Float](../../../sql-reference/data-types/float.md) , [Decimal](../../../sql-reference/data-types/decimal.md) , [Date](../../../sql-reference/data-types/date.md), [Date32](../../../sql-reference/data-types/date32.md), [DateTime](../../../sql-reference/data-types/datetime.md), [DateTime64](../../../sql-reference/data-types/datetime64.md).
NaNs are ignored in the provided series, meaning that any NaN values will be excluded from the analysis. This ensures that the function operates only on valid numerical data.
**Parameters** **Parameters**
- `n` — number of points in the resulting series. [UInt64](../../../sql-reference/data-types/int-uint.md). - `n` — number of points in the resulting series. [UInt64](../../../sql-reference/data-types/int-uint.md).
@ -61,7 +63,7 @@ Result:
``` text ``` text
┌────────largestTriangleThreeBuckets(4)(x, y)───────────┐ ┌────────largestTriangleThreeBuckets(4)(x, y)───────────┐
│ [(1,10),(3,15),(5,40),(10,70)] │ │ [(1,10),(3,15),(9,55),(10,70)] │
└───────────────────────────────────────────────────────┘ └───────────────────────────────────────────────────────┘
``` ```

View File

@ -2423,11 +2423,7 @@ Result:
## toUnixTimestamp64Milli ## toUnixTimestamp64Milli
## toUnixTimestamp64Micro Converts a `DateTime64` to a `Int64` value with fixed millisecond precision. The input value is scaled up or down appropriately depending on its precision.
## toUnixTimestamp64Nano
Converts a `DateTime64` to a `Int64` value with fixed sub-second precision. Input value is scaled up or down appropriately depending on it precision.
:::note :::note
The output value is a timestamp in UTC, not in the timezone of `DateTime64`. The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
@ -2437,24 +2433,22 @@ The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
```sql ```sql
toUnixTimestamp64Milli(value) toUnixTimestamp64Milli(value)
toUnixTimestamp64Micro(value)
toUnixTimestamp64Nano(value)
``` ```
**Arguments** **Arguments**
- `value` — DateTime64 value with any precision. - `value` — DateTime64 value with any precision. [DateTime64](../data-types/datetime64.md).
**Returned value** **Returned value**
- `value` converted to the `Int64` data type. - `value` converted to the `Int64` data type. [Int64](../data-types/int-uint.md).
**Examples** **Example**
Query: Query:
```sql ```sql
WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64 WITH toDateTime64('2009-02-13 23:31:31.011', 3, 'UTC') AS dt64
SELECT toUnixTimestamp64Milli(dt64); SELECT toUnixTimestamp64Milli(dt64);
``` ```
@ -2462,14 +2456,77 @@ Result:
```response ```response
┌─toUnixTimestamp64Milli(dt64)─┐ ┌─toUnixTimestamp64Milli(dt64)─┐
│ 1568650812345 │ 1234567891011
└──────────────────────────────┘ └──────────────────────────────┘
``` ```
## toUnixTimestamp64Micro
Converts a `DateTime64` to a `Int64` value with fixed microsecond precision. The input value is scaled up or down appropriately depending on its precision.
:::note
The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
:::
**Syntax**
```sql
toUnixTimestamp64Micro(value)
```
**Arguments**
- `value` — DateTime64 value with any precision. [DateTime64](../data-types/datetime64.md).
**Returned value**
- `value` converted to the `Int64` data type. [Int64](../data-types/int-uint.md).
**Example**
Query: Query:
``` sql ```sql
WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64 WITH toDateTime64('1970-01-15 06:56:07.891011', 6, 'UTC') AS dt64
SELECT toUnixTimestamp64Micro(dt64);
```
Result:
```response
┌─toUnixTimestamp64Micro(dt64)─┐
│ 1234567891011 │
└──────────────────────────────┘
```
## toUnixTimestamp64Nano
Converts a `DateTime64` to a `Int64` value with fixed nanosecond precision. The input value is scaled up or down appropriately depending on its precision.
:::note
The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
:::
**Syntax**
```sql
toUnixTimestamp64Nano(value)
```
**Arguments**
- `value` — DateTime64 value with any precision. [DateTime64](../data-types/datetime64.md).
**Returned value**
- `value` converted to the `Int64` data type. [Int64](../data-types/int-uint.md).
**Example**
Query:
```sql
WITH toDateTime64('1970-01-01 00:20:34.567891011', 9, 'UTC') AS dt64
SELECT toUnixTimestamp64Nano(dt64); SELECT toUnixTimestamp64Nano(dt64);
``` ```
@ -2477,34 +2534,32 @@ Result:
```response ```response
┌─toUnixTimestamp64Nano(dt64)─┐ ┌─toUnixTimestamp64Nano(dt64)─┐
1568650812345678000 1234567891011
└─────────────────────────────┘ └─────────────────────────────┘
``` ```
## fromUnixTimestamp64Milli ## fromUnixTimestamp64Milli
## fromUnixTimestamp64Micro Converts an `Int64` to a `DateTime64` value with fixed millisecond precision and optional timezone. The input value is scaled up or down appropriately depending on its precision.
## fromUnixTimestamp64Nano :::note
Please note that input value is treated as a UTC timestamp, not timestamp at the given (or implicit) timezone.
Converts an `Int64` to a `DateTime64` value with fixed sub-second precision and optional timezone. Input value is scaled up or down appropriately depending on its precision. Please note that input value is treated as UTC timestamp, not timestamp at given (or implicit) timezone. :::
**Syntax** **Syntax**
``` sql ``` sql
fromUnixTimestamp64Milli(value[, timezone]) fromUnixTimestamp64Milli(value[, timezone])
fromUnixTimestamp64Micro(value[, timezone])
fromUnixTimestamp64Nano(value[, timezone])
``` ```
**Arguments** **Arguments**
- `value``Int64` value with any precision. - `value` — value with any precision. [Int64](../data-types/int-uint.md).
- `timezone``String` (optional) timezone name of the result. - `timezone` — (optional) timezone name of the result. [String](../data-types/string.md).
**Returned value** **Returned value**
- `value` converted to the `DateTime64` data type. - `value` converted to DateTime64 with precision `3`. [DateTime64](../data-types/datetime64.md).
**Example** **Example**
@ -2512,15 +2567,101 @@ Query:
``` sql ``` sql
WITH CAST(1234567891011, 'Int64') AS i64 WITH CAST(1234567891011, 'Int64') AS i64
SELECT fromUnixTimestamp64Milli(i64, 'UTC'); SELECT
fromUnixTimestamp64Milli(i64, 'UTC') AS x,
toTypeName(x);
``` ```
Result: Result:
```response ```response
┌─fromUnixTimestamp64Milli(i64, 'UTC')─┐ ┌───────────────────────x─┬─toTypeName(x)────────┐
│ 2009-02-13 23:31:31.011 │ │ 2009-02-13 23:31:31.011 │ DateTime64(3, 'UTC') │
└──────────────────────────────────────┘ └─────────────────────────┴──────────────────────┘
```
## fromUnixTimestamp64Micro
Converts an `Int64` to a `DateTime64` value with fixed microsecond precision and optional timezone. The input value is scaled up or down appropriately depending on its precision.
:::note
Please note that input value is treated as a UTC timestamp, not timestamp at the given (or implicit) timezone.
:::
**Syntax**
``` sql
fromUnixTimestamp64Micro(value[, timezone])
```
**Arguments**
- `value` — value with any precision. [Int64](../data-types/int-uint.md).
- `timezone` — (optional) timezone name of the result. [String](../data-types/string.md).
**Returned value**
- `value` converted to DateTime64 with precision `6`. [DateTime64](../data-types/datetime64.md).
**Example**
Query:
``` sql
WITH CAST(1234567891011, 'Int64') AS i64
SELECT
fromUnixTimestamp64Micro(i64, 'UTC') AS x,
toTypeName(x);
```
Result:
```response
┌──────────────────────────x─┬─toTypeName(x)────────┐
│ 1970-01-15 06:56:07.891011 │ DateTime64(6, 'UTC') │
└────────────────────────────┴──────────────────────┘
```
## fromUnixTimestamp64Nano
Converts an `Int64` to a `DateTime64` value with fixed nanosecond precision and optional timezone. The input value is scaled up or down appropriately depending on its precision.
:::note
Please note that input value is treated as a UTC timestamp, not timestamp at the given (or implicit) timezone.
:::
**Syntax**
``` sql
fromUnixTimestamp64Nano(value[, timezone])
```
**Arguments**
- `value` — value with any precision. [Int64](../data-types/int-uint.md).
- `timezone` — (optional) timezone name of the result. [String](../data-types/string.md).
**Returned value**
- `value` converted to DateTime64 with precision `9`. [DateTime64](../data-types/datetime64.md).
**Example**
Query:
``` sql
WITH CAST(1234567891011, 'Int64') AS i64
SELECT
fromUnixTimestamp64Nano(i64, 'UTC') AS x,
toTypeName(x);
```
Result:
```response
┌─────────────────────────────x─┬─toTypeName(x)────────┐
│ 1970-01-01 00:20:34.567891011 │ DateTime64(9, 'UTC') │
└───────────────────────────────┴──────────────────────┘
``` ```
## formatRow ## formatRow

View File

@ -139,7 +139,7 @@ For the query to run successfully, the following conditions must be met:
ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1 ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1
``` ```
This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`. This query copies the data partition from `table1` to `table2` and replaces the existing partition in `table2`. The operation is atomic.
Note that: Note that:

View File

@ -1,5 +1,6 @@
#include <Analyzer/ArrayJoinNode.h> #include <Analyzer/ArrayJoinNode.h>
#include <Analyzer/ColumnNode.h> #include <Analyzer/ColumnNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/Utils.h> #include <Analyzer/Utils.h>
#include <IO/Operators.h> #include <IO/Operators.h>
#include <IO/WriteBuffer.h> #include <IO/WriteBuffer.h>
@ -64,7 +65,12 @@ ASTPtr ArrayJoinNode::toASTImpl(const ConvertToASTOptions & options) const
auto * column_node = array_join_expression->as<ColumnNode>(); auto * column_node = array_join_expression->as<ColumnNode>();
if (column_node && column_node->getExpression()) if (column_node && column_node->getExpression())
array_join_expression_ast = column_node->getExpression()->toAST(options); {
if (const auto * function_node = column_node->getExpression()->as<FunctionNode>(); function_node && function_node->getFunctionName() == "nested")
array_join_expression_ast = array_join_expression->toAST(options);
else
array_join_expression_ast = column_node->getExpression()->toAST(options);
}
else else
array_join_expression_ast = array_join_expression->toAST(options); array_join_expression_ast = array_join_expression->toAST(options);

View File

@ -22,6 +22,7 @@ public:
if (query_node->hasOrderBy()) if (query_node->hasOrderBy())
{ {
QueryTreeNodeConstRawPtrWithHashSet unique_expressions_nodes_set;
QueryTreeNodes result_nodes; QueryTreeNodes result_nodes;
auto & query_order_by_nodes = query_node->getOrderBy().getNodes(); auto & query_order_by_nodes = query_node->getOrderBy().getNodes();
@ -45,10 +46,9 @@ public:
query_order_by_nodes = std::move(result_nodes); query_order_by_nodes = std::move(result_nodes);
} }
unique_expressions_nodes_set.clear();
if (query_node->hasLimitBy()) if (query_node->hasLimitBy())
{ {
QueryTreeNodeConstRawPtrWithHashSet unique_expressions_nodes_set;
QueryTreeNodes result_nodes; QueryTreeNodes result_nodes;
auto & query_limit_by_nodes = query_node->getLimitBy().getNodes(); auto & query_limit_by_nodes = query_node->getLimitBy().getNodes();
@ -63,9 +63,6 @@ public:
query_limit_by_nodes = std::move(result_nodes); query_limit_by_nodes = std::move(result_nodes);
} }
} }
private:
QueryTreeNodeConstRawPtrWithHashSet unique_expressions_nodes_set;
}; };
} }

View File

@ -57,6 +57,7 @@ AllocationTrace CurrentMemoryTracker::allocImpl(Int64 size, bool throw_if_memory
{ {
auto res = memory_tracker->allocImpl(will_be, throw_if_memory_exceeded); auto res = memory_tracker->allocImpl(will_be, throw_if_memory_exceeded);
current_thread->untracked_memory = 0; current_thread->untracked_memory = 0;
current_thread->updateUntrackedMemoryLimit(memory_tracker->get());
return res; return res;
} }
else else
@ -84,6 +85,13 @@ void CurrentMemoryTracker::check()
std::ignore = memory_tracker->allocImpl(0, true); std::ignore = memory_tracker->allocImpl(0, true);
} }
Int64 CurrentMemoryTracker::get()
{
if (auto * memory_tracker = getMemoryTracker())
return memory_tracker->get();
return 0;
}
AllocationTrace CurrentMemoryTracker::alloc(Int64 size) AllocationTrace CurrentMemoryTracker::alloc(Int64 size)
{ {
bool throw_if_memory_exceeded = true; bool throw_if_memory_exceeded = true;
@ -103,10 +111,12 @@ AllocationTrace CurrentMemoryTracker::free(Int64 size)
if (current_thread) if (current_thread)
{ {
current_thread->untracked_memory -= size; current_thread->untracked_memory -= size;
if (current_thread->untracked_memory < -current_thread->untracked_memory_limit) // Note that we use `max_untracked_memory` and not `untracked_memory_limit` to create hysteresis to avoid track/untrack cycles
if (current_thread->untracked_memory < -current_thread->max_untracked_memory)
{ {
Int64 untracked_memory = current_thread->untracked_memory; Int64 untracked_memory = current_thread->untracked_memory;
current_thread->untracked_memory = 0; current_thread->untracked_memory = 0;
current_thread->updateUntrackedMemoryLimit(memory_tracker->get() + untracked_memory);
return memory_tracker->free(-untracked_memory); return memory_tracker->free(-untracked_memory);
} }
} }

View File

@ -12,7 +12,9 @@ struct CurrentMemoryTracker
/// This function should be called after memory deallocation. /// This function should be called after memory deallocation.
[[nodiscard]] static AllocationTrace free(Int64 size); [[nodiscard]] static AllocationTrace free(Int64 size);
static void check(); static void check();
[[nodiscard]] static Int64 get();
/// Throws MEMORY_LIMIT_EXCEEDED (if it's allowed to throw exceptions) /// Throws MEMORY_LIMIT_EXCEEDED (if it's allowed to throw exceptions)
static void injectFault(); static void injectFault();

View File

@ -140,6 +140,18 @@ inline bool isPrintableASCII(char c)
return uc >= 32 && uc <= 126; /// 127 is ASCII DEL. return uc >= 32 && uc <= 126; /// 127 is ASCII DEL.
} }
inline bool isCSIParameterByte(char c)
{
uint8_t uc = c;
return uc >= 0x30 && uc <= 0x3F; /// ASCII 09:;<=>?
}
inline bool isCSIIntermediateByte(char c)
{
uint8_t uc = c;
return uc >= 0x20 && uc <= 0x2F; /// ASCII !"#$%&'()*+,-./
}
inline bool isCSIFinalByte(char c) inline bool isCSIFinalByte(char c)
{ {
uint8_t uc = c; uint8_t uc = c;

View File

@ -183,6 +183,12 @@ public:
Int64 untracked_memory = 0; Int64 untracked_memory = 0;
/// Each thread could new/delete memory in range of (-untracked_memory_limit, untracked_memory_limit) without access to common counters. /// Each thread could new/delete memory in range of (-untracked_memory_limit, untracked_memory_limit) without access to common counters.
Int64 untracked_memory_limit = 4 * 1024 * 1024; Int64 untracked_memory_limit = 4 * 1024 * 1024;
/// To keep total untracked memory limited to `untracked_memory_ratio * RSS` we have to account threads with small and large memory footprint differently.
/// For this purpose we dynamically change `untracked_memory_limit` after every tracking event using a simple formula:
/// untracked_memory_limit = clamp(untracked_memory_ratio * cur_memory_bytes, min_untracked_memory, max_untracked_memory)
/// Note that this values are updated when thread is attached to a group
Int64 min_untracked_memory = 4 * 1024 * 1024; // Default value is kept 4MB mostly for tests and client (should be changed to 4KB as default value a setting)
Int64 max_untracked_memory = 4 * 1024 * 1024;
/// Statistics of read and write rows/bytes /// Statistics of read and write rows/bytes
Progress progress_in; Progress progress_in;
@ -309,6 +315,12 @@ public:
void initGlobalProfiler(UInt64 global_profiler_real_time_period, UInt64 global_profiler_cpu_time_period); void initGlobalProfiler(UInt64 global_profiler_real_time_period, UInt64 global_profiler_cpu_time_period);
void updateUntrackedMemoryLimit(Int64 current)
{
constexpr Int64 untracked_memory_ratio_bits = 4; // untracked_memory_ratio = 1.0 / (1 << untracked_memory_ratio_bits) = 1.0 / 16 = 6.25%
untracked_memory_limit = std::clamp<Int64>(current >> untracked_memory_ratio_bits, min_untracked_memory, max_untracked_memory);
}
private: private:
void applyGlobalSettings(); void applyGlobalSettings();
void applyQuerySettings(); void applyQuerySettings();

View File

@ -103,7 +103,7 @@ template <ComputeWidthMode mode>
size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t limit) noexcept size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t limit) noexcept
{ {
UTF8Decoder decoder; UTF8Decoder decoder;
int isEscapeSequence = false; bool is_escape_sequence = false;
size_t width = 0; size_t width = 0;
size_t rollback = 0; size_t rollback = 0;
for (size_t i = 0; i < size; ++i) for (size_t i = 0; i < size; ++i)
@ -116,6 +116,9 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
while (i + 15 < size) while (i + 15 < size)
{ {
if (is_escape_sequence)
break;
__m128i bytes = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&data[i])); __m128i bytes = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&data[i]));
const uint16_t non_regular_width_mask = _mm_movemask_epi8( const uint16_t non_regular_width_mask = _mm_movemask_epi8(
@ -132,25 +135,28 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
} }
else else
{ {
if (isEscapeSequence) i += 16;
{ width += 16;
break;
}
else
{
i += 16;
width += 16;
}
} }
} }
#endif #endif
while (i < size && isPrintableASCII(data[i])) while (i < size && isPrintableASCII(data[i]))
{ {
if (!isEscapeSequence) bool ignore_width = is_escape_sequence && (isCSIParameterByte(data[i]) || isCSIIntermediateByte(data[i]));
if (ignore_width || (data[i] == '[' && is_escape_sequence))
{
/// don't count the width
}
else if (is_escape_sequence && isCSIFinalByte(data[i]))
{
is_escape_sequence = false;
}
else
{
++width; ++width;
else if (isCSIFinalByte(data[i]) && data[i - 1] != '\x1b') }
isEscapeSequence = false; /// end of CSI escape sequence reached
++i; ++i;
} }
@ -178,7 +184,7 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
// special treatment for '\t' and for ESC // special treatment for '\t' and for ESC
size_t next_width = width; size_t next_width = width;
if (decoder.codepoint == '\x1b') if (decoder.codepoint == '\x1b')
isEscapeSequence = true; is_escape_sequence = true;
else if (decoder.codepoint == '\t') else if (decoder.codepoint == '\t')
next_width += 8 - (prefix + width) % 8; next_width += 8 - (prefix + width) % 8;
else else

View File

@ -160,8 +160,8 @@ class IColumn;
M(Bool, enable_multiple_prewhere_read_steps, true, "Move more conditions from WHERE to PREWHERE and do reads from disk and filtering in multiple steps if there are multiple conditions combined with AND", 0) \ M(Bool, enable_multiple_prewhere_read_steps, true, "Move more conditions from WHERE to PREWHERE and do reads from disk and filtering in multiple steps if there are multiple conditions combined with AND", 0) \
M(Bool, move_primary_key_columns_to_end_of_prewhere, true, "Move PREWHERE conditions containing primary key columns to the end of AND chain. It is likely that these conditions are taken into account during primary key analysis and thus will not contribute a lot to PREWHERE filtering.", 0) \ M(Bool, move_primary_key_columns_to_end_of_prewhere, true, "Move PREWHERE conditions containing primary key columns to the end of AND chain. It is likely that these conditions are taken into account during primary key analysis and thus will not contribute a lot to PREWHERE filtering.", 0) \
\ \
M(Bool, allow_statistics_optimize, false, "Allows using statistics to optimize queries", 0) \ M(Bool, allow_statistics_optimize, false, "Allows using statistics to optimize queries", 0) ALIAS(allow_statistic_optimize) \
M(Bool, allow_experimental_statistics, false, "Allows using statistics", 0) \ M(Bool, allow_experimental_statistics, false, "Allows using statistics", 0) ALIAS(allow_experimental_statistic) \
\ \
M(UInt64, alter_sync, 1, "Wait for actions to manipulate the partitions. 0 - do not wait, 1 - wait for execution only of itself, 2 - wait for everyone.", 0) ALIAS(replication_alter_partitions_sync) \ M(UInt64, alter_sync, 1, "Wait for actions to manipulate the partitions. 0 - do not wait, 1 - wait for execution only of itself, 2 - wait for everyone.", 0) ALIAS(replication_alter_partitions_sync) \
M(Int64, replication_wait_for_inactive_replica_timeout, 120, "Wait for inactive replica to execute ALTER/OPTIMIZE. Time in seconds, 0 - do not wait, negative - wait for unlimited time.", 0) \ M(Int64, replication_wait_for_inactive_replica_timeout, 120, "Wait for inactive replica to execute ALTER/OPTIMIZE. Time in seconds, 0 - do not wait, negative - wait for unlimited time.", 0) \
@ -491,6 +491,7 @@ class IColumn;
M(UInt64, max_memory_usage_for_user, 0, "Maximum memory usage for processing all concurrently running queries for the user. Zero means unlimited.", 0) \ M(UInt64, max_memory_usage_for_user, 0, "Maximum memory usage for processing all concurrently running queries for the user. Zero means unlimited.", 0) \
M(UInt64, memory_overcommit_ratio_denominator_for_user, 1_GiB, "It represents soft memory limit on the global level. This value is used to compute query overcommit ratio.", 0) \ M(UInt64, memory_overcommit_ratio_denominator_for_user, 1_GiB, "It represents soft memory limit on the global level. This value is used to compute query overcommit ratio.", 0) \
M(UInt64, max_untracked_memory, (4 * 1024 * 1024), "Small allocations and deallocations are grouped in thread local variable and tracked or profiled only when amount (in absolute value) becomes larger than specified value. If the value is higher than 'memory_profiler_step' it will be effectively lowered to 'memory_profiler_step'.", 0) \ M(UInt64, max_untracked_memory, (4 * 1024 * 1024), "Small allocations and deallocations are grouped in thread local variable and tracked or profiled only when amount (in absolute value) becomes larger than specified value. If the value is higher than 'memory_profiler_step' it will be effectively lowered to 'memory_profiler_step'.", 0) \
M(UInt64, min_untracked_memory, (4 * 1024), "Lower bound for untracked memory limit which is applied to threads with low memory consumption. Untracked memory limit equals thread_memory_usage/16 and clamped between min_untracked_memory and max_untracked_memory for every thread.", 0) \
M(UInt64, memory_profiler_step, (4 * 1024 * 1024), "Whenever query memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stack trace. Zero means disabled memory profiler. Values lower than a few megabytes will slow down query processing.", 0) \ M(UInt64, memory_profiler_step, (4 * 1024 * 1024), "Whenever query memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stack trace. Zero means disabled memory profiler. Values lower than a few megabytes will slow down query processing.", 0) \
M(Float, memory_profiler_sample_probability, 0., "Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation (can be changed with `memory_profiler_sample_min_allocation_size` and `memory_profiler_sample_max_allocation_size`). Note that sampling happens only when the amount of untracked memory exceeds 'max_untracked_memory'. You may want to set 'max_untracked_memory' to 0 for extra fine grained sampling.", 0) \ M(Float, memory_profiler_sample_probability, 0., "Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation (can be changed with `memory_profiler_sample_min_allocation_size` and `memory_profiler_sample_max_allocation_size`). Note that sampling happens only when the amount of untracked memory exceeds 'max_untracked_memory'. You may want to set 'max_untracked_memory' to 0 for extra fine grained sampling.", 0) \
M(UInt64, memory_profiler_sample_min_allocation_size, 0, "Collect random allocations of size greater or equal than specified value with probability equal to `memory_profiler_sample_probability`. 0 means disabled. You may want to set 'max_untracked_memory' to 0 to make this threshold to work as expected.", 0) \ M(UInt64, memory_profiler_sample_min_allocation_size, 0, "Collect random allocations of size greater or equal than specified value with probability equal to `memory_profiler_sample_probability`. 0 means disabled. You may want to set 'max_untracked_memory' to 0 to make this threshold to work as expected.", 0) \

View File

@ -83,7 +83,7 @@ namespace SettingsChangesHistory
/// For newly added setting choose the most appropriate previous_value (for example, if new setting /// For newly added setting choose the most appropriate previous_value (for example, if new setting
/// controls new feature and it's 'true' by default, use 'false' as previous_value). /// controls new feature and it's 'true' by default, use 'false' as previous_value).
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972) /// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history = static const std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
{ {
{"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"},
{"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"},
@ -98,7 +98,12 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
{"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"},
{"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."}, {"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."},
{"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."}, {"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."},
{"min_untracked_memory", 4_MiB, 4_KiB, "A new setting to enable more accurate memory tracking."},
{"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"}, {"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"},
{"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."},
{"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."},
{"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."},
{"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."}
}}, }},
{"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"},
{"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."},

View File

@ -543,6 +543,7 @@ template <typename DataType> constexpr bool IsDataTypeNumber = false;
template <typename DataType> constexpr bool IsDataTypeDateOrDateTime = false; template <typename DataType> constexpr bool IsDataTypeDateOrDateTime = false;
template <typename DataType> constexpr bool IsDataTypeDate = false; template <typename DataType> constexpr bool IsDataTypeDate = false;
template <typename DataType> constexpr bool IsDataTypeEnum = false; template <typename DataType> constexpr bool IsDataTypeEnum = false;
template <typename DataType> constexpr bool IsDataTypeStringOrFixedString = false;
template <typename DataType> constexpr bool IsDataTypeDecimalOrNumber = IsDataTypeDecimal<DataType> || IsDataTypeNumber<DataType>; template <typename DataType> constexpr bool IsDataTypeDecimalOrNumber = IsDataTypeDecimal<DataType> || IsDataTypeNumber<DataType>;
@ -556,6 +557,8 @@ class DataTypeDate;
class DataTypeDate32; class DataTypeDate32;
class DataTypeDateTime; class DataTypeDateTime;
class DataTypeDateTime64; class DataTypeDateTime64;
class DataTypeString;
class DataTypeFixedString;
template <is_decimal T> constexpr bool IsDataTypeDecimal<DataTypeDecimal<T>> = true; template <is_decimal T> constexpr bool IsDataTypeDecimal<DataTypeDecimal<T>> = true;
@ -572,6 +575,9 @@ template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDate32> = tru
template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDateTime> = true; template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDateTime> = true;
template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDateTime64> = true; template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDateTime64> = true;
template <> inline constexpr bool IsDataTypeStringOrFixedString<DataTypeString> = true;
template <> inline constexpr bool IsDataTypeStringOrFixedString<DataTypeFixedString> = true;
template <typename T> template <typename T>
class DataTypeEnum; class DataTypeEnum;

View File

@ -1,20 +1,21 @@
#include <filesystem>
#include <Databases/DatabaseAtomic.h> #include <Databases/DatabaseAtomic.h>
#include <Databases/DatabaseFactory.h>
#include <Databases/DatabaseOnDisk.h> #include <Databases/DatabaseOnDisk.h>
#include <Databases/DatabaseReplicated.h> #include <Databases/DatabaseReplicated.h>
#include <Databases/DatabaseFactory.h> #include <IO/ReadBufferFromFile.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/ReadBufferFromFile.h> #include <Interpreters/Context.h>
#include <Interpreters/DDLTask.h>
#include <Interpreters/DatabaseCatalog.h>
#include <Interpreters/ExternalDictionariesLoader.h>
#include <Parsers/formatAST.h> #include <Parsers/formatAST.h>
#include <Storages/StorageMaterializedView.h>
#include "Common/logger_useful.h"
#include <Common/PoolId.h> #include <Common/PoolId.h>
#include <Common/atomicRename.h> #include <Common/atomicRename.h>
#include <Common/filesystemHelpers.h> #include <Common/filesystemHelpers.h>
#include <Storages/StorageMaterializedView.h>
#include <Interpreters/Context.h>
#include <Interpreters/DatabaseCatalog.h>
#include <Interpreters/ExternalDictionariesLoader.h>
#include <filesystem>
#include <Interpreters/DDLTask.h>
namespace fs = std::filesystem; namespace fs = std::filesystem;
@ -393,6 +394,7 @@ DatabaseAtomic::DetachedTables DatabaseAtomic::cleanupDetachedTables()
{ {
DetachedTables not_in_use; DetachedTables not_in_use;
auto it = detached_tables.begin(); auto it = detached_tables.begin();
LOG_DEBUG(log, "There are {} detached tables. Start searching non used tables.", detached_tables.size());
while (it != detached_tables.end()) while (it != detached_tables.end())
{ {
if (it->second.unique()) if (it->second.unique())
@ -403,6 +405,7 @@ DatabaseAtomic::DetachedTables DatabaseAtomic::cleanupDetachedTables()
else else
++it; ++it;
} }
LOG_DEBUG(log, "Found {} non used tables in detached tables.", not_in_use.size());
/// It should be destroyed in caller with released database mutex /// It should be destroyed in caller with released database mutex
return not_in_use; return not_in_use;
} }

View File

@ -709,7 +709,7 @@ bool tryParseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateL
else else
return tryReadFloatTextFast(x, rb); return tryReadFloatTextFast(x, rb);
} }
else /*if constexpr (is_integer_v<typename DataType::FieldType>)*/ else /*if constexpr (is_integral_v<typename DataType::FieldType>)*/
return tryReadIntText(x, rb); return tryReadIntText(x, rb);
} }
@ -814,6 +814,16 @@ enum class ConvertFromStringParsingMode : uint8_t
BestEffortUS BestEffortUS
}; };
struct AccurateConvertStrategyAdditions
{
UInt32 scale { 0 };
};
struct AccurateOrNullConvertStrategyAdditions
{
UInt32 scale { 0 };
};
template <typename FromDataType, typename ToDataType, typename Name, template <typename FromDataType, typename ToDataType, typename Name,
ConvertFromStringExceptionMode exception_mode, ConvertFromStringParsingMode parsing_mode> ConvertFromStringExceptionMode exception_mode, ConvertFromStringParsingMode parsing_mode>
struct ConvertThroughParsing struct ConvertThroughParsing
@ -1020,7 +1030,13 @@ struct ConvertThroughParsing
break; break;
} }
} }
parseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone, precise_float_parsing); if constexpr (std::is_same_v<Additions, AccurateConvertStrategyAdditions>)
{
if (!tryParseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone, precise_float_parsing))
throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse string to type {}", TypeName<typename ToDataType::FieldType>);
}
else
parseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone, precise_float_parsing);
} while (false); } while (false);
} }
} }
@ -1120,16 +1136,6 @@ struct ConvertThroughParsing
/// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type. /// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type.
struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; }; struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; };
struct AccurateConvertStrategyAdditions
{
UInt32 scale { 0 };
};
struct AccurateOrNullConvertStrategyAdditions
{
UInt32 scale { 0 };
};
enum class BehaviourOnErrorFromString : uint8_t enum class BehaviourOnErrorFromString : uint8_t
{ {
ConvertDefaultBehaviorTag, ConvertDefaultBehaviorTag,
@ -3174,8 +3180,11 @@ private:
{ {
TypeIndex from_type_index = from_type->getTypeId(); TypeIndex from_type_index = from_type->getTypeId();
WhichDataType which(from_type_index); WhichDataType which(from_type_index);
TypeIndex to_type_index = to_type->getTypeId();
WhichDataType to(to_type_index);
bool can_apply_accurate_cast = (cast_type == CastType::accurate || cast_type == CastType::accurateOrNull) bool can_apply_accurate_cast = (cast_type == CastType::accurate || cast_type == CastType::accurateOrNull)
&& (which.isInt() || which.isUInt() || which.isFloat()); && (which.isInt() || which.isUInt() || which.isFloat());
can_apply_accurate_cast |= cast_type == CastType::accurate && which.isStringOrFixedString() && to.isNativeInteger();
FormatSettings::DateTimeOverflowBehavior date_time_overflow_behavior = default_date_time_overflow_behavior; FormatSettings::DateTimeOverflowBehavior date_time_overflow_behavior = default_date_time_overflow_behavior;
if (context) if (context)
@ -3260,6 +3269,20 @@ private:
return true; return true;
} }
} }
else if constexpr (IsDataTypeStringOrFixedString<LeftDataType>)
{
if constexpr (IsDataTypeNumber<RightDataType>)
{
chassert(wrapper_cast_type == CastType::accurate);
result_column = ConvertImpl<LeftDataType, RightDataType, FunctionCastName>::execute(
arguments,
result_type,
input_rows_count,
BehaviourOnErrorFromString::ConvertDefaultBehaviorTag,
AccurateConvertStrategyAdditions());
}
return true;
}
return false; return false;
}); });

View File

@ -61,7 +61,7 @@ public:
return std::make_shared<DataTypeTuple>(tuple_arg_types); return std::make_shared<DataTypeTuple>(tuple_arg_types);
} }
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{ {
const size_t num_arguments = arguments.size(); const size_t num_arguments = arguments.size();
Columns columns; Columns columns;
@ -92,6 +92,9 @@ public:
columns.push_back(inner_col); columns.push_back(inner_col);
} }
if (columns.empty())
return ColumnTuple::create(input_rows_count);
return ColumnTuple::create(columns); return ColumnTuple::create(columns);
} }
}; };

View File

@ -240,4 +240,34 @@ bool SplitTokenExtractor::nextInStringLike(const char * data, size_t length, siz
return !bad_token && !token.empty(); return !bad_token && !token.empty();
} }
void SplitTokenExtractor::substringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter, bool is_prefix, bool is_suffix) const
{
size_t cur = 0;
size_t token_start = 0;
size_t token_len = 0;
while (cur < length && nextInString(data, length, &cur, &token_start, &token_len))
// In order to avoid filter updates with incomplete tokens,
// first token is ignored, unless substring is prefix and
// last token is ignored, unless substring is suffix
if ((token_start > 0 || is_prefix) && (token_start + token_len < length || is_suffix))
bloom_filter.add(data + token_start, token_len);
}
void SplitTokenExtractor::substringToGinFilter(const char * data, size_t length, GinFilter & gin_filter, bool is_prefix, bool is_suffix) const
{
gin_filter.setQueryString(data, length);
size_t cur = 0;
size_t token_start = 0;
size_t token_len = 0;
while (cur < length && nextInString(data, length, &cur, &token_start, &token_len))
// In order to avoid filter updates with incomplete tokens,
// first token is ignored, unless substring is prefix and
// last token is ignored, unless substring is suffix
if ((token_start > 0 || is_prefix) && (token_start + token_len < length || is_suffix))
gin_filter.addTerm(data + token_start, token_len);
}
} }

View File

@ -28,8 +28,22 @@ struct ITokenExtractor
/// It skips unescaped `%` and `_` and supports escaping symbols, but it is less lightweight. /// It skips unescaped `%` and `_` and supports escaping symbols, but it is less lightweight.
virtual bool nextInStringLike(const char * data, size_t length, size_t * pos, String & out) const = 0; virtual bool nextInStringLike(const char * data, size_t length, size_t * pos, String & out) const = 0;
/// Updates Bloom filter from exact-match string filter value
virtual void stringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const = 0; virtual void stringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const = 0;
/// Updates Bloom filter from substring-match string filter value.
/// An `ITokenExtractor` implementation may decide to skip certain
/// tokens depending on whether the substring is a prefix or a suffix.
virtual void substringToBloomFilter(
const char * data,
size_t length,
BloomFilter & bloom_filter,
bool is_prefix [[maybe_unused]],
bool is_suffix [[maybe_unused]]) const
{
stringToBloomFilter(data, length, bloom_filter);
}
virtual void stringPaddedToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const virtual void stringPaddedToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const
{ {
stringToBloomFilter(data, length, bloom_filter); stringToBloomFilter(data, length, bloom_filter);
@ -37,8 +51,22 @@ struct ITokenExtractor
virtual void stringLikeToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const = 0; virtual void stringLikeToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const = 0;
/// Updates GIN filter from exact-match string filter value
virtual void stringToGinFilter(const char * data, size_t length, GinFilter & gin_filter) const = 0; virtual void stringToGinFilter(const char * data, size_t length, GinFilter & gin_filter) const = 0;
/// Updates GIN filter from substring-match string filter value.
/// An `ITokenExtractor` implementation may decide to skip certain
/// tokens depending on whether the substring is a prefix or a suffix.
virtual void substringToGinFilter(
const char * data,
size_t length,
GinFilter & gin_filter,
bool is_prefix [[maybe_unused]],
bool is_suffix [[maybe_unused]]) const
{
stringToGinFilter(data, length, gin_filter);
}
virtual void stringPaddedToGinFilter(const char * data, size_t length, GinFilter & gin_filter) const virtual void stringPaddedToGinFilter(const char * data, size_t length, GinFilter & gin_filter) const
{ {
stringToGinFilter(data, length, gin_filter); stringToGinFilter(data, length, gin_filter);
@ -148,6 +176,11 @@ struct SplitTokenExtractor final : public ITokenExtractorHelper<SplitTokenExtrac
bool nextInStringLike(const char * data, size_t length, size_t * __restrict pos, String & token) const override; bool nextInStringLike(const char * data, size_t length, size_t * __restrict pos, String & token) const override;
void substringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter, bool is_prefix, bool is_suffix) const override;
void substringToGinFilter(const char * data, size_t length, GinFilter & gin_filter, bool is_prefix, bool is_suffix) const override;
}; };
} }

View File

@ -11,6 +11,7 @@
#include <Parsers/formatAST.h> #include <Parsers/formatAST.h>
#include <Parsers/queryNormalization.h> #include <Parsers/queryNormalization.h>
#include <Common/CurrentThread.h> #include <Common/CurrentThread.h>
#include <Common/CurrentMemoryTracker.h>
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/ProfileEvents.h> #include <Common/ProfileEvents.h>
#include <Common/QueryProfiler.h> #include <Common/QueryProfiler.h>
@ -210,9 +211,12 @@ void ThreadStatus::applyQuerySettings()
query_id_from_query_context = query_context_ptr->getCurrentQueryId(); query_id_from_query_context = query_context_ptr->getCurrentQueryId();
initQueryProfiler(); initQueryProfiler();
untracked_memory_limit = settings.max_untracked_memory; max_untracked_memory = settings.max_untracked_memory;
if (settings.memory_profiler_step && settings.memory_profiler_step < static_cast<UInt64>(untracked_memory_limit)) if (settings.memory_profiler_step && settings.memory_profiler_step < static_cast<UInt64>(max_untracked_memory))
untracked_memory_limit = settings.memory_profiler_step; max_untracked_memory = settings.memory_profiler_step;
min_untracked_memory = std::min<Int64>(settings.min_untracked_memory, max_untracked_memory);
updateUntrackedMemoryLimit(CurrentMemoryTracker::get());
#if defined(OS_LINUX) #if defined(OS_LINUX)
/// Set "nice" value if required. /// Set "nice" value if required.

View File

@ -59,9 +59,6 @@ Token quotedStringWithUnicodeQuotes(const char *& pos, const char * const token_
pos = find_first_symbols<'\xE2'>(pos, end); pos = find_first_symbols<'\xE2'>(pos, end);
if (pos + 2 >= end) if (pos + 2 >= end)
return Token(error_token, token_begin, end); return Token(error_token, token_begin, end);
/// Empty identifiers are not allowed, while empty strings are.
if (success_token == TokenType::QuotedIdentifier && pos + 3 >= end)
return Token(error_token, token_begin, end);
if (pos[0] == '\xE2' && pos[1] == '\x80' && pos[2] == expected_end_byte) if (pos[0] == '\xE2' && pos[1] == '\x80' && pos[2] == expected_end_byte)
{ {

View File

@ -269,7 +269,12 @@ convertFieldToORCLiteral(const orc::Type & orc_type, const Field & field, DataTy
case orc::SHORT: case orc::SHORT:
case orc::INT: case orc::INT:
case orc::LONG: { case orc::LONG: {
/// May throw exception /// May throw exception.
///
/// In particular, it'll throw if we request the column as unsigned, like this:
/// SELECT * FROM file('t.orc', ORC, 'x UInt8') WHERE x > 10
/// We have to reject this, otherwise it would miss values > 127 (because
/// they're treated as negative by ORC).
auto val = field.get<Int64>(); auto val = field.get<Int64>();
return orc::Literal(val); return orc::Literal(val);
} }

View File

@ -315,18 +315,20 @@ void ORCBlockOutputFormat::writeColumn(
if (null_bytemap) if (null_bytemap)
orc_column.hasNulls = true; orc_column.hasNulls = true;
/// ORC doesn't have unsigned types, so cast everything to signed and sign-extend to Int64 to
/// make the ORC library calculate min and max correctly.
switch (type->getTypeId()) switch (type->getTypeId())
{ {
case TypeIndex::Enum8: [[fallthrough]]; case TypeIndex::Enum8: [[fallthrough]];
case TypeIndex::Int8: case TypeIndex::Int8:
{ {
/// Note: Explicit cast to avoid clang-tidy error: 'signed char' to 'long' conversion; consider casting to 'unsigned char' first. /// Note: Explicit cast to avoid clang-tidy error: 'signed char' to 'long' conversion; consider casting to 'unsigned char' first.
writeNumbers<Int8, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const Int8 & value){ return static_cast<int64_t>(value); }); writeNumbers<Int8, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const Int8 & value){ return Int64(Int8(value)); });
break; break;
} }
case TypeIndex::UInt8: case TypeIndex::UInt8:
{ {
writeNumbers<UInt8, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt8 & value){ return value; }); writeNumbers<UInt8, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt8 & value){ return Int64(Int8(value)); });
break; break;
} }
case TypeIndex::Enum16: [[fallthrough]]; case TypeIndex::Enum16: [[fallthrough]];
@ -338,7 +340,7 @@ void ORCBlockOutputFormat::writeColumn(
case TypeIndex::Date: [[fallthrough]]; case TypeIndex::Date: [[fallthrough]];
case TypeIndex::UInt16: case TypeIndex::UInt16:
{ {
writeNumbers<UInt16, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt16 & value){ return value; }); writeNumbers<UInt16, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt16 & value){ return Int64(Int16(value)); });
break; break;
} }
case TypeIndex::Date32: [[fallthrough]]; case TypeIndex::Date32: [[fallthrough]];
@ -349,12 +351,12 @@ void ORCBlockOutputFormat::writeColumn(
} }
case TypeIndex::UInt32: case TypeIndex::UInt32:
{ {
writeNumbers<UInt32, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt32 & value){ return value; }); writeNumbers<UInt32, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt32 & value){ return Int64(Int32(value)); });
break; break;
} }
case TypeIndex::IPv4: case TypeIndex::IPv4:
{ {
writeNumbers<IPv4, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const IPv4 & value){ return value.toUnderType(); }); writeNumbers<IPv4, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const IPv4 & value){ return Int64(Int32(value.toUnderType())); });
break; break;
} }
case TypeIndex::Int64: case TypeIndex::Int64:

View File

@ -566,7 +566,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
out.function = RPNElement::FUNCTION_EQUALS; out.function = RPNElement::FUNCTION_EQUALS;
out.bloom_filter = std::make_unique<BloomFilter>(params); out.bloom_filter = std::make_unique<BloomFilter>(params);
const auto & value = const_value.get<String>(); const auto & value = const_value.get<String>();
token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); token_extractor->substringToBloomFilter(value.data(), value.size(), *out.bloom_filter, true, false);
return true; return true;
} }
else if (function_name == "endsWith") else if (function_name == "endsWith")
@ -575,7 +575,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
out.function = RPNElement::FUNCTION_EQUALS; out.function = RPNElement::FUNCTION_EQUALS;
out.bloom_filter = std::make_unique<BloomFilter>(params); out.bloom_filter = std::make_unique<BloomFilter>(params);
const auto & value = const_value.get<String>(); const auto & value = const_value.get<String>();
token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); token_extractor->substringToBloomFilter(value.data(), value.size(), *out.bloom_filter, false, true);
return true; return true;
} }
else if (function_name == "multiSearchAny" else if (function_name == "multiSearchAny"
@ -596,7 +596,15 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
bloom_filters.back().emplace_back(params); bloom_filters.back().emplace_back(params);
const auto & value = element.get<String>(); const auto & value = element.get<String>();
token_extractor->stringToBloomFilter(value.data(), value.size(), bloom_filters.back().back());
if (function_name == "multiSearchAny")
{
token_extractor->substringToBloomFilter(value.data(), value.size(), bloom_filters.back().back(), false, false);
}
else
{
token_extractor->stringToBloomFilter(value.data(), value.size(), bloom_filters.back().back());
}
} }
out.set_bloom_filters = std::move(bloom_filters); out.set_bloom_filters = std::move(bloom_filters);
return true; return true;
@ -625,12 +633,12 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
for (const auto & alternative : alternatives) for (const auto & alternative : alternatives)
{ {
bloom_filters.back().emplace_back(params); bloom_filters.back().emplace_back(params);
token_extractor->stringToBloomFilter(alternative.data(), alternative.size(), bloom_filters.back().back()); token_extractor->substringToBloomFilter(alternative.data(), alternative.size(), bloom_filters.back().back(), false, false);
} }
out.set_bloom_filters = std::move(bloom_filters); out.set_bloom_filters = std::move(bloom_filters);
} }
else else
token_extractor->stringToBloomFilter(required_substring.data(), required_substring.size(), *out.bloom_filter); token_extractor->substringToBloomFilter(required_substring.data(), required_substring.size(), *out.bloom_filter, false, false);
return true; return true;
} }

View File

@ -595,7 +595,7 @@ bool MergeTreeConditionFullText::traverseASTEquals(
out.function = RPNElement::FUNCTION_EQUALS; out.function = RPNElement::FUNCTION_EQUALS;
out.gin_filter = std::make_unique<GinFilter>(params); out.gin_filter = std::make_unique<GinFilter>(params);
const auto & value = const_value.get<String>(); const auto & value = const_value.get<String>();
token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); token_extractor->substringToGinFilter(value.data(), value.size(), *out.gin_filter, true, false);
return true; return true;
} }
else if (function_name == "endsWith") else if (function_name == "endsWith")
@ -604,7 +604,7 @@ bool MergeTreeConditionFullText::traverseASTEquals(
out.function = RPNElement::FUNCTION_EQUALS; out.function = RPNElement::FUNCTION_EQUALS;
out.gin_filter = std::make_unique<GinFilter>(params); out.gin_filter = std::make_unique<GinFilter>(params);
const auto & value = const_value.get<String>(); const auto & value = const_value.get<String>();
token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); token_extractor->substringToGinFilter(value.data(), value.size(), *out.gin_filter, false, true);
return true; return true;
} }
else if (function_name == "multiSearchAny") else if (function_name == "multiSearchAny")
@ -622,7 +622,7 @@ bool MergeTreeConditionFullText::traverseASTEquals(
gin_filters.back().emplace_back(params); gin_filters.back().emplace_back(params);
const auto & value = element.get<String>(); const auto & value = element.get<String>();
token_extractor->stringToGinFilter(value.data(), value.size(), gin_filters.back().back()); token_extractor->substringToGinFilter(value.data(), value.size(), gin_filters.back().back(), false, false);
} }
out.set_gin_filters = std::move(gin_filters); out.set_gin_filters = std::move(gin_filters);
return true; return true;
@ -650,14 +650,14 @@ bool MergeTreeConditionFullText::traverseASTEquals(
for (const auto & alternative : alternatives) for (const auto & alternative : alternatives)
{ {
gin_filters.back().emplace_back(params); gin_filters.back().emplace_back(params);
token_extractor->stringToGinFilter(alternative.data(), alternative.size(), gin_filters.back().back()); token_extractor->substringToGinFilter(alternative.data(), alternative.size(), gin_filters.back().back(), false, false);
} }
out.set_gin_filters = std::move(gin_filters); out.set_gin_filters = std::move(gin_filters);
} }
else else
{ {
out.gin_filter = std::make_unique<GinFilter>(params); out.gin_filter = std::make_unique<GinFilter>(params);
token_extractor->stringToGinFilter(required_substring.data(), required_substring.size(), *out.gin_filter); token_extractor->substringToGinFilter(required_substring.data(), required_substring.size(), *out.gin_filter, false, false);
} }
return true; return true;

View File

@ -212,6 +212,20 @@ FROM merge('system', '^asynchronous_metric_log')
WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'MaxPartCountForPartition' WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'MaxPartCountForPartition'
GROUP BY t GROUP BY t
ORDER BY t WITH FILL STEP {rounding:UInt32} ORDER BY t WITH FILL STEP {rounding:UInt32}
)EOQ") }
},
{
{ "dashboard", "Overview" },
{ "title", "Concurrent network connections" },
{ "query", trim(R"EOQ(
SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t,
sum(CurrentMetric_TCPConnection) AS TCP_Connections,
sum(CurrentMetric_MySQLConnection) AS MySQL_Connections,
sum(CurrentMetric_HTTPConnection) AS HTTP_Connections
FROM merge('system', '^metric_log')
WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32}
GROUP BY t
ORDER BY t WITH FILL STEP {rounding:UInt32}
)EOQ") } )EOQ") }
}, },
/// Default dashboard for ClickHouse Cloud /// Default dashboard for ClickHouse Cloud
@ -349,6 +363,11 @@ ORDER BY t WITH FILL STEP {rounding:UInt32}
{ "dashboard", "Cloud overview" }, { "dashboard", "Cloud overview" },
{ "title", "Network send bytes/sec" }, { "title", "Network send bytes/sec" },
{ "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(value)\nFROM (\n SELECT event_time, sum(value) AS value\n FROM clusterAllReplicas(default, merge('system', '^asynchronous_metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n AND metric LIKE 'NetworkSendBytes%'\n GROUP BY event_time)\nGROUP BY t\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } { "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(value)\nFROM (\n SELECT event_time, sum(value) AS value\n FROM clusterAllReplicas(default, merge('system', '^asynchronous_metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n AND metric LIKE 'NetworkSendBytes%'\n GROUP BY event_time)\nGROUP BY t\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" }
},
{
{ "dashboard", "Cloud overview" },
{ "title", "Concurrent network connections" },
{ "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, max(TCP_Connections), max(MySQL_Connections), max(HTTP_Connections) FROM (SELECT event_time, sum(CurrentMetric_TCPConnection) AS TCP_Connections, sum(CurrentMetric_MySQLConnection) AS MySQL_Connections, sum(CurrentMetric_HTTPConnection) AS HTTP_Connections FROM clusterAllReplicas(default, merge('system', '^metric_log')) WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY event_time) GROUP BY t ORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" }
} }
}; };

View File

@ -10,9 +10,21 @@ from typing import Any, Callable, List, Optional, Union
import requests import requests
import get_robot_token as grt # we need an updated ROBOT_TOKEN
from ci_config import CI_CONFIG from ci_config import CI_CONFIG
try:
# A work around for scripts using this downloading module without required deps
import get_robot_token as grt # we need an updated ROBOT_TOKEN
except ImportError:
class grt: # type: ignore
ROBOT_TOKEN = None
@staticmethod
def get_best_robot_token() -> str:
return ""
DOWNLOAD_RETRIES_COUNT = 5 DOWNLOAD_RETRIES_COUNT = 5
@ -63,15 +75,10 @@ def get_gh_api(
""" """
def set_auth_header(): def set_auth_header():
if "headers" in kwargs: headers = kwargs.get("headers", {})
if "Authorization" not in kwargs["headers"]: if "Authorization" not in headers:
kwargs["headers"][ headers["Authorization"] = f"Bearer {grt.get_best_robot_token()}"
"Authorization" kwargs["headers"] = headers
] = f"Bearer {grt.get_best_robot_token()}"
else:
kwargs["headers"] = {
"Authorization": f"Bearer {grt.get_best_robot_token()}"
}
if grt.ROBOT_TOKEN is not None: if grt.ROBOT_TOKEN is not None:
set_auth_header() set_auth_header()

View File

@ -442,7 +442,11 @@ def _configure_jobs(
# filter jobs in accordance with ci settings # filter jobs in accordance with ci settings
job_configs = ci_settings.apply( job_configs = ci_settings.apply(
job_configs, pr_info.is_release, is_pr=pr_info.is_pr, labels=pr_info.labels job_configs,
pr_info.is_release,
is_pr=pr_info.is_pr,
is_mq=pr_info.is_merge_queue,
labels=pr_info.labels,
) )
# check jobs in ci cache # check jobs in ci cache

View File

@ -134,6 +134,7 @@ class CiSettings:
job_config: JobConfig, job_config: JobConfig,
is_release: bool, is_release: bool,
is_pr: bool, is_pr: bool,
is_mq: bool,
labels: Iterable[str], labels: Iterable[str],
) -> bool: # type: ignore #too-many-return-statements ) -> bool: # type: ignore #too-many-return-statements
if self.do_not_test: if self.do_not_test:
@ -189,7 +190,7 @@ class CiSettings:
if job_config.release_only and not is_release: if job_config.release_only and not is_release:
return False return False
elif job_config.pr_only and not is_pr: elif job_config.pr_only and not is_pr and not is_mq:
return False return False
return not to_deny return not to_deny
@ -199,6 +200,7 @@ class CiSettings:
job_configs: Dict[str, JobConfig], job_configs: Dict[str, JobConfig],
is_release: bool, is_release: bool,
is_pr: bool, is_pr: bool,
is_mq: bool,
labels: Iterable[str], labels: Iterable[str],
) -> Dict[str, JobConfig]: ) -> Dict[str, JobConfig]:
""" """
@ -207,16 +209,24 @@ class CiSettings:
res = {} res = {}
for job, job_config in job_configs.items(): for job, job_config in job_configs.items():
if self._check_if_selected( if self._check_if_selected(
job, job_config, is_release=is_release, is_pr=is_pr, labels=labels job,
job_config,
is_release=is_release,
is_pr=is_pr,
is_mq=is_mq,
labels=labels,
): ):
res[job] = job_config res[job] = job_config
add_parents = []
for job in list(res): for job in list(res):
parent_jobs = CI_CONFIG.get_job_parents(job) parent_jobs = CI_CONFIG.get_job_parents(job)
for parent_job in parent_jobs: for parent_job in parent_jobs:
if parent_job not in res: if parent_job not in res:
add_parents.append(parent_job)
print(f"Job [{job}] requires [{parent_job}] - add") print(f"Job [{job}] requires [{parent_job}] - add")
res[parent_job] = job_configs[parent_job] for job in add_parents:
res[job] = job_configs[job]
for job, job_config in res.items(): for job, job_config in res.items():
batches = [] batches = []

View File

@ -1,79 +1,38 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import logging import logging
import os from pathlib import Path
import requests from build_download_helper import DownloadException, download_build_with_progress
from requests.adapters import HTTPAdapter # type: ignore from get_previous_release_tag import (
from urllib3.util.retry import Retry # type: ignore ReleaseInfo,
get_previous_release,
from get_previous_release_tag import ReleaseInfo, get_previous_release get_release_by_tag,
CLICKHOUSE_TAGS_URL = "https://api.github.com/repos/ClickHouse/ClickHouse/tags"
DOWNLOAD_PREFIX = (
"https://github.com/ClickHouse/ClickHouse/releases/download/v{version}-{type}/"
) )
CLICKHOUSE_COMMON_STATIC_PACKAGE_NAME = "clickhouse-common-static_{version}_amd64.deb"
CLICKHOUSE_COMMON_STATIC_DBG_PACKAGE_NAME = (
"clickhouse-common-static-dbg_{version}_amd64.deb"
)
CLICKHOUSE_CLIENT_PACKAGE_NAME = "clickhouse-client_{version}_amd64.deb"
CLICKHOUSE_LIBRARY_BRIDGE_PACKAGE_NAME = "clickhouse-library-bridge_{version}_amd64.deb"
CLICKHOUSE_ODBC_BRIDGE_PACKAGE_NAME = "clickhouse-odbc-bridge_{version}_amd64.deb"
CLICKHOUSE_SERVER_PACKAGE_NAME = "clickhouse-server_{version}_amd64.deb"
PACKAGES_DIR = "previous_release_package_folder/" PACKAGES_DIR = Path("previous_release_package_folder")
VERSION_PATTERN = r"((?:\d+\.)?(?:\d+\.)?(?:\d+\.)?\d+-[a-zA-Z]*)"
def download_package(url, out_path, retries=10, backoff_factor=0.3): def download_packages(release: ReleaseInfo, dest_path: Path = PACKAGES_DIR) -> None:
session = requests.Session() dest_path.mkdir(parents=True, exist_ok=True)
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=[500, 502, 503, 504],
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
response = session.get(url)
response.raise_for_status()
print(f"Download {url} to {out_path}")
with open(out_path, "wb") as fd:
fd.write(response.content)
def download_packages(release, dest_path=PACKAGES_DIR):
if not os.path.exists(dest_path):
os.makedirs(dest_path)
logging.info("Will download %s", release) logging.info("Will download %s", release)
def get_dest_path(pkg_name): for pkg, url in release.assets.items():
return os.path.join(dest_path, pkg_name) if not pkg.endswith("_amd64.deb") or "-dbg_" in pkg:
continue
for pkg in ( pkg_name = dest_path / pkg
CLICKHOUSE_COMMON_STATIC_PACKAGE_NAME, download_build_with_progress(url, pkg_name)
CLICKHOUSE_COMMON_STATIC_DBG_PACKAGE_NAME,
CLICKHOUSE_CLIENT_PACKAGE_NAME,
CLICKHOUSE_LIBRARY_BRIDGE_PACKAGE_NAME,
CLICKHOUSE_ODBC_BRIDGE_PACKAGE_NAME,
CLICKHOUSE_SERVER_PACKAGE_NAME,
):
url = (DOWNLOAD_PREFIX + pkg).format(version=release.version, type=release.type)
pkg_name = get_dest_path(pkg.format(version=release.version))
download_package(url, pkg_name)
def download_last_release(dest_path): def download_last_release(dest_path: Path) -> None:
current_release = get_previous_release(None) current_release = get_previous_release(None)
if current_release is None:
raise DownloadException("The current release is not found")
download_packages(current_release, dest_path=dest_path) download_packages(current_release, dest_path=dest_path)
if __name__ == "__main__": if __name__ == "__main__":
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
release = ReleaseInfo(input()) release = get_release_by_tag(input())
download_packages(release) download_packages(release)

View File

@ -2,47 +2,37 @@
import logging import logging
import re import re
from typing import List, Optional, Tuple from typing import Dict, List, Optional, Tuple
import requests from build_download_helper import get_gh_api
from git_helper import TAG_REGEXP
CLICKHOUSE_TAGS_URL = "https://api.github.com/repos/ClickHouse/ClickHouse/tags" from version_helper import (
CLICKHOUSE_PACKAGE_URL = ( ClickHouseVersion,
"https://github.com/ClickHouse/ClickHouse/releases/download/" get_version_from_string,
"v{version}-{type}/clickhouse-common-static_{version}_amd64.deb" get_version_from_tag,
) )
VERSION_PATTERN = r"(v(?:\d+\.)?(?:\d+\.)?(?:\d+\.)?\d+-[a-zA-Z]*)"
CLICKHOUSE_TAGS_URL = "https://api.github.com/repos/ClickHouse/ClickHouse/releases"
PACKAGE_REGEXP = r"\Aclickhouse-common-static_.+[.]deb"
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class Version:
def __init__(self, version: str):
self.version = version
def __lt__(self, other: "Version") -> bool:
return list(map(int, self.version.split("."))) < list(
map(int, other.version.split("."))
)
def __str__(self):
return self.version
class ReleaseInfo: class ReleaseInfo:
def __init__(self, release_tag: str): def __init__(self, release_tag: str, assets: Dict[str, str]):
self.version = Version(release_tag[1:].split("-")[0]) self.version = get_version_from_tag(release_tag)
self.type = release_tag[1:].split("-")[1] self.type = self.version.description
self.assets = assets
def __str__(self): def __str__(self):
return f"v{self.version}-{self.type}" return self.version.describe
def __repr__(self): def __repr__(self):
return f"ReleaseInfo: {self.version}-{self.type}" return f"ReleaseInfo: {self.version.describe}"
def find_previous_release( def find_previous_release(
server_version: Optional[Version], releases: List[ReleaseInfo] server_version: Optional[ClickHouseVersion], releases: List[ReleaseInfo]
) -> Tuple[bool, Optional[ReleaseInfo]]: ) -> Tuple[bool, Optional[ReleaseInfo]]:
releases.sort(key=lambda x: x.version, reverse=True) releases.sort(key=lambda x: x.version, reverse=True)
@ -54,15 +44,7 @@ def find_previous_release(
# Check if the artifact exists on GitHub. # Check if the artifact exists on GitHub.
# It can be not true for a short period of time # It can be not true for a short period of time
# after creating a tag for a new release before uploading the packages. # after creating a tag for a new release before uploading the packages.
if ( if any(re.match(PACKAGE_REGEXP, name) for name in release.assets.keys()):
requests.head(
CLICKHOUSE_PACKAGE_URL.format(
version=release.version, type=release.type
),
timeout=10,
).status_code
!= 404
):
return True, release return True, release
logger.debug( logger.debug(
@ -74,12 +56,14 @@ def find_previous_release(
return False, None return False, None
def get_previous_release(server_version: Optional[Version]) -> Optional[ReleaseInfo]: def get_previous_release(
server_version: Optional[ClickHouseVersion],
) -> Optional[ReleaseInfo]:
page = 1 page = 1
found = False found = False
while not found: while not found:
response = requests.get( response = get_gh_api(
CLICKHOUSE_TAGS_URL, {"page": page, "per_page": 100}, timeout=10 CLICKHOUSE_TAGS_URL, params={"page": page, "per_page": 100}, timeout=10
) )
if not response.ok: if not response.ok:
logger.error( logger.error(
@ -87,24 +71,42 @@ def get_previous_release(server_version: Optional[Version]) -> Optional[ReleaseI
) )
response.raise_for_status() response.raise_for_status()
releases_str = set(re.findall(VERSION_PATTERN, response.text)) releases = response.json()
if len(releases_str) == 0:
raise ValueError(
"Cannot find previous release for "
+ str(server_version)
+ " server version"
)
releases = [ReleaseInfo(release) for release in releases_str] release_infos = [] # type: List[ReleaseInfo]
found, previous_release = find_previous_release(server_version, releases) for r in releases:
if re.match(TAG_REGEXP, r["tag_name"]):
assets = {
a["name"]: a["browser_download_url"]
for a in r["assets"]
if a["state"] == "uploaded"
}
release_infos.append(ReleaseInfo(r["tag_name"], assets))
found, previous_release = find_previous_release(server_version, release_infos)
page += 1 page += 1
return previous_release return previous_release
def get_release_by_tag(tag: str) -> ReleaseInfo:
response = get_gh_api(f"{CLICKHOUSE_TAGS_URL}/tags/{tag}", timeout=10)
release = response.json()
assets = {
a["name"]: a["browser_download_url"]
for a in release["assets"]
if a["state"] == "uploaded"
}
return ReleaseInfo(release["tag_name"], assets)
def main(): def main():
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
server_version = Version(input()) version_string = input()
version_string = version_string.split("+", maxsplit=1)[0]
try:
server_version = get_version_from_string(version_string)
except ValueError:
server_version = get_version_from_tag(version_string)
print(get_previous_release(server_version)) print(get_previous_release(server_version))

View File

@ -23,7 +23,7 @@ from lambda_shared_package.lambda_shared.pr import (
check_pr_description, check_pr_description,
) )
from pr_info import PRInfo from pr_info import PRInfo
from report import FAILURE, PENDING, SUCCESS from report import FAILURE, PENDING, SUCCESS, StatusType
TRUSTED_ORG_IDS = { TRUSTED_ORG_IDS = {
54801242, # clickhouse 54801242, # clickhouse
@ -58,7 +58,7 @@ def pr_is_by_trusted_user(pr_user_login, pr_user_orgs):
# Returns can_run, description # Returns can_run, description
def should_run_ci_for_pr(pr_info: PRInfo) -> Tuple[bool, str]: def should_run_ci_for_pr(pr_info: PRInfo) -> Tuple[bool, str]:
# Consider the labels and whether the user is trusted. # Consider the labels and whether the user is trusted.
print("Got labels", pr_info.labels) logging.info("Got labels: %s", pr_info.labels)
if OK_SKIP_LABELS.intersection(pr_info.labels): if OK_SKIP_LABELS.intersection(pr_info.labels):
return True, "Don't try new checks for release/backports/cherry-picks" return True, "Don't try new checks for release/backports/cherry-picks"
@ -66,9 +66,10 @@ def should_run_ci_for_pr(pr_info: PRInfo) -> Tuple[bool, str]:
if Labels.CAN_BE_TESTED not in pr_info.labels and not pr_is_by_trusted_user( if Labels.CAN_BE_TESTED not in pr_info.labels and not pr_is_by_trusted_user(
pr_info.user_login, pr_info.user_orgs pr_info.user_login, pr_info.user_orgs
): ):
print( logging.info(
f"PRs by untrusted users need the '{Labels.CAN_BE_TESTED}' label - " "PRs by untrusted users need the '%s' label - "
"please contact a member of the core team" "please contact a member of the core team",
Labels.CAN_BE_TESTED,
) )
return False, "Needs 'can be tested' label" return False, "Needs 'can be tested' label"
@ -93,6 +94,7 @@ def main():
description = format_description(description) description = format_description(description)
gh = Github(get_best_robot_token(), per_page=100) gh = Github(get_best_robot_token(), per_page=100)
commit = get_commit(gh, pr_info.sha) commit = get_commit(gh, pr_info.sha)
status = SUCCESS # type: StatusType
description_error, category = check_pr_description(pr_info.body, GITHUB_REPOSITORY) description_error, category = check_pr_description(pr_info.body, GITHUB_REPOSITORY)
pr_labels_to_add = [] pr_labels_to_add = []
@ -125,13 +127,16 @@ def main():
f"::notice :: Add backport labels [{backport_labels}] for a given PR category" f"::notice :: Add backport labels [{backport_labels}] for a given PR category"
) )
print(f"Change labels: add {pr_labels_to_add}, remove {pr_labels_to_remove}") logging.info(
"Change labels: add %s, remove %s", pr_labels_to_add, pr_labels_to_remove
)
if pr_labels_to_add: if pr_labels_to_add:
post_labels(gh, pr_info, pr_labels_to_add) post_labels(gh, pr_info, pr_labels_to_add)
if pr_labels_to_remove: if pr_labels_to_remove:
remove_labels(gh, pr_info, pr_labels_to_remove) remove_labels(gh, pr_info, pr_labels_to_remove)
# 1. Next three IFs are in a correct order. First - fatal error
if description_error: if description_error:
print( print(
"::error ::Cannot run, PR description does not match the template: " "::error ::Cannot run, PR description does not match the template: "
@ -146,9 +151,10 @@ def main():
f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/" f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/"
"blob/master/.github/PULL_REQUEST_TEMPLATE.md?plain=1" "blob/master/.github/PULL_REQUEST_TEMPLATE.md?plain=1"
) )
status = FAILURE
post_commit_status( post_commit_status(
commit, commit,
FAILURE, status,
url, url,
format_description(description_error), format_description(description_error),
PR_CHECK, PR_CHECK,
@ -156,41 +162,38 @@ def main():
) )
sys.exit(1) sys.exit(1)
# 2. Then we check if the documentation is not created to fail the Mergeable check
if ( if (
Labels.PR_FEATURE in pr_info.labels Labels.PR_FEATURE in pr_info.labels
and not pr_info.has_changes_in_documentation() and not pr_info.has_changes_in_documentation()
): ):
print( print(
f"The '{Labels.PR_FEATURE}' in the labels, " f"::error ::The '{Labels.PR_FEATURE}' in the labels, "
"but there's no changed documentation" "but there's no changed documentation"
) )
post_commit_status( status = FAILURE
commit, description = f"expect adding docs for {Labels.PR_FEATURE}"
FAILURE, # 3. But we allow the workflow to continue
"",
f"expect adding docs for {Labels.PR_FEATURE}",
PR_CHECK,
pr_info,
)
# allow the workflow to continue
# 4. And post only a single commit status on a failure
if not can_run: if not can_run:
post_commit_status( post_commit_status(
commit, commit,
FAILURE, status,
"", "",
description, description,
PR_CHECK, PR_CHECK,
pr_info, pr_info,
) )
print("::notice ::Cannot run") print("::error ::Cannot run")
sys.exit(1) sys.exit(1)
# The status for continue can be posted only one time, not more.
post_commit_status( post_commit_status(
commit, commit,
SUCCESS, status,
"", "",
"ok", description,
PR_CHECK, PR_CHECK,
pr_info, pr_info,
) )

View File

@ -179,7 +179,11 @@ class TestCIOptions(unittest.TestCase):
) )
filtered_jobs = list( filtered_jobs = list(
ci_options.apply( ci_options.apply(
jobs_configs, is_release=False, is_pr=True, labels=["TEST_LABEL"] jobs_configs,
is_release=False,
is_pr=True,
is_mq=False,
labels=["TEST_LABEL"],
) )
) )
self.assertCountEqual( self.assertCountEqual(
@ -212,7 +216,9 @@ class TestCIOptions(unittest.TestCase):
jobs_configs["fuzzers"].run_by_label = "TEST_LABEL" jobs_configs["fuzzers"].run_by_label = "TEST_LABEL"
# no settings are set # no settings are set
filtered_jobs = list( filtered_jobs = list(
CiSettings().apply(jobs_configs, is_release=False, is_pr=True, labels=[]) CiSettings().apply(
jobs_configs, is_release=False, is_pr=False, is_mq=True, labels=[]
)
) )
self.assertCountEqual( self.assertCountEqual(
filtered_jobs, filtered_jobs,
@ -220,9 +226,21 @@ class TestCIOptions(unittest.TestCase):
"Fast test", "Fast test",
], ],
) )
filtered_jobs = list( filtered_jobs = list(
CiSettings().apply(jobs_configs, is_release=True, is_pr=False, labels=[]) CiSettings().apply(
jobs_configs, is_release=False, is_pr=True, is_mq=False, labels=[]
)
)
self.assertCountEqual(
filtered_jobs,
[
"Fast test",
],
)
filtered_jobs = list(
CiSettings().apply(
jobs_configs, is_release=True, is_pr=False, is_mq=False, labels=[]
)
) )
self.assertCountEqual( self.assertCountEqual(
filtered_jobs, filtered_jobs,
@ -240,7 +258,11 @@ class TestCIOptions(unittest.TestCase):
# no settings are set # no settings are set
filtered_jobs = list( filtered_jobs = list(
ci_settings.apply( ci_settings.apply(
jobs_configs, is_release=False, is_pr=True, labels=["TEST_LABEL"] jobs_configs,
is_release=False,
is_pr=True,
is_mq=False,
labels=["TEST_LABEL"],
) )
) )
self.assertCountEqual( self.assertCountEqual(
@ -253,7 +275,11 @@ class TestCIOptions(unittest.TestCase):
ci_settings.include_keywords = ["Fast"] ci_settings.include_keywords = ["Fast"]
filtered_jobs = list( filtered_jobs = list(
ci_settings.apply( ci_settings.apply(
jobs_configs, is_release=True, is_pr=False, labels=["TEST_LABEL"] jobs_configs,
is_release=True,
is_pr=False,
is_mq=False,
labels=["TEST_LABEL"],
) )
) )
self.assertCountEqual( self.assertCountEqual(
@ -277,7 +303,11 @@ class TestCIOptions(unittest.TestCase):
jobs_configs["Integration tests (asan)"].release_only = True jobs_configs["Integration tests (asan)"].release_only = True
filtered_jobs = list( filtered_jobs = list(
ci_options.apply( ci_options.apply(
jobs_configs, is_release=False, is_pr=True, labels=["TEST_LABEL"] jobs_configs,
is_release=False,
is_pr=True,
is_mq=False,
labels=["TEST_LABEL"],
) )
) )
self.assertCountEqual( self.assertCountEqual(

View File

@ -110,10 +110,9 @@ class HDFSApi(object):
logging.debug( logging.debug(
"Stdout:\n{}\n".format(res.stdout.decode("utf-8")) "Stdout:\n{}\n".format(res.stdout.decode("utf-8"))
) )
logging.debug("Env:\n{}\n".format(env))
raise Exception( raise Exception(
"Command {} return non-zero code {}: {}".format( "Command {} return non-zero code {}: {}".format(
args, res.returncode, res.stderr.decode("utf-8") cmd, res.returncode, res.stderr.decode("utf-8")
) )
) )

View File

@ -8,7 +8,7 @@ sys.path.insert(0, os.path.join(CURDIR))
from . import uexpect from . import uexpect
prompt = ":\) " prompt = ":\\) "
end_of_block = r".*\r\n.*\r\n" end_of_block = r".*\r\n.*\r\n"
@ -21,7 +21,7 @@ class client(object):
self.client.eol("\r") self.client.eol("\r")
self.client.logger(log, prefix=name) self.client.logger(log, prefix=name)
self.client.timeout(20) self.client.timeout(20)
self.client.expect("[#\$] ", timeout=2) self.client.expect("[#\\$] ", timeout=2)
self.client.send(command) self.client.send(command)
def __enter__(self): def __enter__(self):

View File

@ -1474,7 +1474,7 @@ def test_backup_all(exclude_system_log_tables):
restore_settings = [] restore_settings = []
if not exclude_system_log_tables: if not exclude_system_log_tables:
restore_settings.append("allow_non_empty_tables=true") restore_settings.append("allow_non_empty_tables=true")
restore_command = f"RESTORE ALL FROM {backup_name} {'SETTINGS '+ ', '.join(restore_settings) if restore_settings else ''}" restore_command = f"RESTORE ALL FROM {backup_name} {'SETTINGS ' + ', '.join(restore_settings) if restore_settings else ''}"
session_id = new_session_id() session_id = new_session_id()
instance.http_query( instance.http_query(

View File

@ -161,13 +161,13 @@ def wait_for_fail_restore(node, restore_id):
elif status == "RESTORING": elif status == "RESTORING":
assert_eq_with_retry( assert_eq_with_retry(
node, node,
f"SELECT status FROM system.backups WHERE id = '{backup_id}'", f"SELECT status FROM system.backups WHERE id = '{restore_id}'",
"RESTORE_FAILED", "RESTORE_FAILED",
sleep_time=2, sleep_time=2,
retry_count=50, retry_count=50,
) )
error = node.query( error = node.query(
f"SELECT error FROM system.backups WHERE id == '{backup_id}'" f"SELECT error FROM system.backups WHERE id == '{restore_id}'"
).rstrip("\n") ).rstrip("\n")
assert re.search( assert re.search(
"Cannot restore the table default.tbl because it already contains some data", "Cannot restore the table default.tbl because it already contains some data",

View File

@ -187,7 +187,7 @@ def check_convert_all_dbs_to_atomic():
# 6 tables, MVs contain 2 rows (inner tables does not match regexp) # 6 tables, MVs contain 2 rows (inner tables does not match regexp)
assert "8\t{}\n".format(8 * len("atomic")) == node.query( assert "8\t{}\n".format(8 * len("atomic")) == node.query(
"SELECT count(), sum(n) FROM atomic.merge".format(db) "SELECT count(), sum(n) FROM atomic.merge"
) )
node.query("DETACH TABLE ordinary.detached PERMANENTLY") node.query("DETACH TABLE ordinary.detached PERMANENTLY")

View File

@ -89,7 +89,7 @@ def test_aggregate_states(start_cluster):
logging.info("Skipping %s", aggregate_function) logging.info("Skipping %s", aggregate_function)
skipped += 1 skipped += 1
continue continue
logging.exception("Failed %s", function) logging.exception("Failed %s", aggregate_function)
failed += 1 failed += 1
continue continue

View File

@ -116,7 +116,7 @@ def test_usage(cluster, node_name):
(id Int32) ENGINE = MergeTree() ORDER BY id (id Int32) ENGINE = MergeTree() ORDER BY id
SETTINGS storage_policy = 'web'; SETTINGS storage_policy = 'web';
""".format( """.format(
i, uuids[i], i, i i, uuids[i]
) )
) )
@ -338,7 +338,7 @@ def test_page_cache(cluster):
(id Int32) ENGINE = MergeTree() ORDER BY id (id Int32) ENGINE = MergeTree() ORDER BY id
SETTINGS storage_policy = 'web'; SETTINGS storage_policy = 'web';
""".format( """.format(
i, uuids[i], i, i i, uuids[i]
) )
) )

View File

@ -45,9 +45,7 @@ def test_failed_async_inserts(started_cluster):
ignore_error=True, ignore_error=True,
) )
select_query = ( select_query = "SELECT value FROM system.events WHERE event == 'FailedAsyncInsertQuery' SETTINGS min_untracked_memory = '4Mi'"
"SELECT value FROM system.events WHERE event == 'FailedAsyncInsertQuery'"
)
assert node.query(select_query) == "4\n" assert node.query(select_query) == "4\n"

View File

@ -90,7 +90,7 @@ def wait_until_fully_merged(node, table):
except: except:
return return
raise Exception(f"There are still merges on-going after {retry} assignments") raise Exception(f"There are still merges on-going after {i} assignments")
def test_jbod_balanced_merge(start_cluster): def test_jbod_balanced_merge(start_cluster):

View File

@ -91,7 +91,7 @@ def test_jdbc_insert(started_cluster):
""" """
CREATE TABLE test.test_insert ENGINE = Memory AS CREATE TABLE test.test_insert ENGINE = Memory AS
SELECT * FROM test.ClickHouseTable; SELECT * FROM test.ClickHouseTable;
SELECT * SELECT *
FROM jdbc('{0}?mutation', 'INSERT INTO test.test_insert VALUES({1}, ''{1}'', ''{1}'')'); FROM jdbc('{0}?mutation', 'INSERT INTO test.test_insert VALUES({1}, ''{1}'', ''{1}'')');
""".format( """.format(
datasource, records datasource, records
@ -115,7 +115,7 @@ def test_jdbc_update(started_cluster):
""" """
CREATE TABLE test.test_update ENGINE = Memory AS CREATE TABLE test.test_update ENGINE = Memory AS
SELECT * FROM test.ClickHouseTable; SELECT * FROM test.ClickHouseTable;
SELECT * SELECT *
FROM jdbc( FROM jdbc(
'{}?mutation', '{}?mutation',
'SET mutations_sync = 1; ALTER TABLE test.test_update UPDATE Str=''{}'' WHERE Num = {} - 1;' 'SET mutations_sync = 1; ALTER TABLE test.test_update UPDATE Str=''{}'' WHERE Num = {} - 1;'
@ -145,7 +145,7 @@ def test_jdbc_delete(started_cluster):
""" """
CREATE TABLE test.test_delete ENGINE = Memory AS CREATE TABLE test.test_delete ENGINE = Memory AS
SELECT * FROM test.ClickHouseTable; SELECT * FROM test.ClickHouseTable;
SELECT * SELECT *
FROM jdbc( FROM jdbc(
'{}?mutation', '{}?mutation',
'SET mutations_sync = 1; ALTER TABLE test.test_delete DELETE WHERE Num < {} - 1;' 'SET mutations_sync = 1; ALTER TABLE test.test_delete DELETE WHERE Num < {} - 1;'
@ -158,7 +158,7 @@ def test_jdbc_delete(started_cluster):
expected = records - 1 expected = records - 1
actual = instance.query( actual = instance.query(
"SELECT Str FROM jdbc('{}', 'SELECT * FROM test.test_delete')".format( "SELECT Str FROM jdbc('{}', 'SELECT * FROM test.test_delete')".format(
datasource, records datasource
) )
) )
assert int(actual) == expected, "expecting {} but got {}".format(expected, actual) assert int(actual) == expected, "expecting {} but got {}".format(expected, actual)

View File

@ -1,5 +1,5 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
##!/usr/bin/env python3
import pytest import pytest
from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseCluster
import helpers.keeper_utils as keeper_utils import helpers.keeper_utils as keeper_utils

View File

@ -1,6 +1,5 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
#!/usr/bin/env python3
import pytest import pytest
from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseCluster
import helpers.keeper_utils as keeper_utils import helpers.keeper_utils as keeper_utils

View File

@ -1,6 +1,5 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
#!/usr/bin/env python3
import pytest import pytest
from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseCluster
import random import random

View File

@ -260,8 +260,9 @@ def test_create_table():
"CREATE TABLE table16 (`x` int) ENGINE = DeltaLake('http://minio1:9001/root/data/test11.csv.gz', 'minio', '[HIDDEN]')", "CREATE TABLE table16 (`x` int) ENGINE = DeltaLake('http://minio1:9001/root/data/test11.csv.gz', 'minio', '[HIDDEN]')",
"CREATE TABLE table17 (x int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'CSV') settings mode = 'ordered'", "CREATE TABLE table17 (x int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'CSV') settings mode = 'ordered'",
"CREATE TABLE table18 (x int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'CSV', 'gzip') settings mode = 'ordered'", "CREATE TABLE table18 (x int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'CSV', 'gzip') settings mode = 'ordered'",
"CREATE TABLE table19 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV') settings mode = 'ordered'", # due to sensitive data substituion the query will be normalized, so not "settings" but "SETTINGS"
"CREATE TABLE table20 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV', 'gzip') settings mode = 'ordered'", "CREATE TABLE table19 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV') SETTINGS mode = 'ordered'",
"CREATE TABLE table20 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV', 'gzip') SETTINGS mode = 'ordered'",
], ],
must_not_contain=[password], must_not_contain=[password],
) )

View File

@ -537,10 +537,7 @@ def test_freeze_unfreeze(cluster):
def test_apply_new_settings(cluster): def test_apply_new_settings(cluster):
node = cluster.instances[NODE_NAME] node = cluster.instances[NODE_NAME]
create_table(node, TABLE_NAME) create_table(node, TABLE_NAME)
config_path = os.path.join( config_path = os.path.join(SCRIPT_DIR, "./_gen/disk_storage_conf.xml")
SCRIPT_DIR,
"./_gen/disk_storage_conf.xml".format(cluster.instances_dir_name),
)
azure_query( azure_query(
node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}" node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}"

View File

@ -179,9 +179,7 @@ def test_different_data_types(started_cluster):
for i in range(10): for i in range(10):
col = random.choice(["a", "b", "c"]) col = random.choice(["a", "b", "c"])
cursor.execute("UPDATE test_data_types SET {} = {};".format(col, i)) cursor.execute("UPDATE test_data_types SET {} = {};".format(col, i))
cursor.execute( cursor.execute("UPDATE test_data_types SET i = '2020-12-12';")
"""UPDATE test_data_types SET i = '2020-12-12';""".format(col, i)
)
check_tables_are_synchronized(instance, "test_data_types", "id") check_tables_are_synchronized(instance, "test_data_types", "id")
@ -452,7 +450,7 @@ def test_many_concurrent_queries(started_cluster):
# also change primary key value # also change primary key value
print("try update primary key {}".format(thread_id)) print("try update primary key {}".format(thread_id))
cursor.execute( cursor.execute(
"UPDATE {table}_{} SET key=key%100000+100000*{} WHERE key%{}=0".format( "UPDATE {} SET key=key%100000+100000*{} WHERE key%{}=0".format(
table_name, i + 1, i + 1 table_name, i + 1, i + 1
) )
) )

View File

@ -28,7 +28,7 @@ def parse_response_line(line):
if line.startswith("#"): if line.startswith("#"):
return {} return {}
match = re.match("^([a-zA-Z_:][a-zA-Z0-9_:]+)(\{.*\})? -?(\d)", line) match = re.match(r"^([a-zA-Z_:][a-zA-Z0-9_:]+)(\{.*\})? -?(\d)", line)
assert match, line assert match, line
name, _, val = match.groups() name, _, val = match.groups()
return {name: int(val)} return {name: int(val)}

View File

@ -6,6 +6,7 @@ import time
import threading import threading
import pytest import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__) cluster = ClickHouseCluster(__file__)

View File

@ -136,7 +136,10 @@ def test_select_clamps_settings():
) )
assert ( assert (
distributed.query(query, settings={"max_memory_usage": 1}) distributed.query(
query,
settings={"max_memory_usage": 1, "min_untracked_memory": 4 * 1024 * 1024},
)
== "node1\tmax_memory_usage\t11111111\n" == "node1\tmax_memory_usage\t11111111\n"
"node1\treadonly\t0\n" "node1\treadonly\t0\n"
"node2\tmax_memory_usage\t0\n" "node2\tmax_memory_usage\t0\n"

View File

@ -4,7 +4,7 @@ import os
import json import json
import helpers.client import helpers.client
from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseCluster, ClickHouseInstance
from helpers.test_tools import TSV from helpers.test_tools import TSV
from helpers.s3_tools import prepare_s3_bucket, upload_directory, get_file_contents from helpers.s3_tools import prepare_s3_bucket, upload_directory, get_file_contents

View File

@ -1,5 +1,5 @@
import helpers.client import helpers.client
from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseCluster, ClickHouseInstance
from helpers.test_tools import TSV from helpers.test_tools import TSV
import pyspark import pyspark

View File

@ -702,7 +702,7 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster):
assert ( assert (
int(result1) == messages_num * threads_num int(result1) == messages_num * threads_num
), "ClickHouse lost some messages: {}".format(result) ), "ClickHouse lost some messages: {}".format(result1)
assert int(result2) == 10 assert int(result2) == 10
@ -1516,7 +1516,7 @@ def test_rabbitmq_hash_exchange(rabbitmq_cluster):
assert ( assert (
int(result1) == messages_num * threads_num int(result1) == messages_num * threads_num
), "ClickHouse lost some messages: {}".format(result) ), "ClickHouse lost some messages: {}".format(result1)
assert int(result2) == 4 * num_tables assert int(result2) == 4 * num_tables
@ -1966,7 +1966,7 @@ def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster):
assert ( assert (
int(result1) == messages_num * threads_num int(result1) == messages_num * threads_num
), "ClickHouse lost some messages: {}".format(result) ), "ClickHouse lost some messages: {}".format(result1)
# 4 tables, 2 consumers for each table => 8 consumer tags # 4 tables, 2 consumers for each table => 8 consumer tags
assert int(result2) == 8 assert int(result2) == 8
@ -2427,9 +2427,7 @@ def test_rabbitmq_drop_table_properly(rabbitmq_cluster):
time.sleep(30) time.sleep(30)
try: try:
exists = channel.queue_declare( exists = channel.queue_declare(queue="rabbit_queue_drop", passive=True)
callback, queue="rabbit_queue_drop", passive=True
)
except Exception as e: except Exception as e:
exists = False exists = False
@ -3364,7 +3362,7 @@ def test_rabbitmq_flush_by_block_size(rabbitmq_cluster):
routing_key="", routing_key="",
body=json.dumps({"key": 0, "value": 0}), body=json.dumps({"key": 0, "value": 0}),
) )
except e: except Exception as e:
logging.debug(f"Got error: {str(e)}") logging.debug(f"Got error: {str(e)}")
produce_thread = threading.Thread(target=produce) produce_thread = threading.Thread(target=produce)
@ -3442,7 +3440,7 @@ def test_rabbitmq_flush_by_time(rabbitmq_cluster):
) )
logging.debug("Produced a message") logging.debug("Produced a message")
time.sleep(0.8) time.sleep(0.8)
except e: except Exception as e:
logging.debug(f"Got error: {str(e)}") logging.debug(f"Got error: {str(e)}")
produce_thread = threading.Thread(target=produce) produce_thread = threading.Thread(target=produce)

View File

@ -1850,7 +1850,7 @@ class TestCancelBackgroundMoving:
config = inspect.cleandoc( config = inspect.cleandoc(
f""" f"""
<clickhouse> <clickhouse>
<max_local_write_bandwidth_for_server>{ 256 * 1024 }</max_local_write_bandwidth_for_server> <max_local_write_bandwidth_for_server>{256 * 1024}</max_local_write_bandwidth_for_server>
</clickhouse> </clickhouse>
""" """
) )

View File

@ -325,7 +325,7 @@ def optimize_with_retry(node, table_name, retry=20):
settings={"optimize_throw_if_noop": "1"}, settings={"optimize_throw_if_noop": "1"},
) )
break break
except e: except:
time.sleep(0.5) time.sleep(0.5)

View File

@ -7,7 +7,8 @@
-- sizeof(HLL) is (2^K * 6 / 8) -- sizeof(HLL) is (2^K * 6 / 8)
-- hence max_memory_usage for 100 rows = (96<<10)*100 = 9830400 -- hence max_memory_usage for 100 rows = (96<<10)*100 = 9830400
SET use_uncompressed_cache = 0; SET use_uncompressed_cache = 0;
SET min_untracked_memory = '4Mi';
-- HashTable for UInt32 (used until (1<<13) elements), hence 8192 elements -- HashTable for UInt32 (used until (1<<13) elements), hence 8192 elements
SELECT 'UInt32'; SELECT 'UInt32';

View File

@ -49,16 +49,16 @@ with client(name="client1>", log=log) as client1, client(
client1.send("WATCH 01056_window_view_proc_hop_watch.wv") client1.send("WATCH 01056_window_view_proc_hop_watch.wv")
client1.expect("Query id" + end_of_block) client1.expect("Query id" + end_of_block)
client1.expect("Progress: 0.00 rows.*\)") client1.expect("Progress: 0.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO 01056_window_view_proc_hop_watch.mt VALUES (1, now('US/Samoa') + 3)" "INSERT INTO 01056_window_view_proc_hop_watch.mt VALUES (1, now('US/Samoa') + 3)"
) )
client1.expect("1" + end_of_block) client1.expect("1" + end_of_block)
client1.expect("Progress: 1.00 rows.*\)") client1.expect("Progress: 1.00 rows.*\\)")
# send Ctrl-C # send Ctrl-C
client1.send("\x03", eol="") client1.send("\x03", eol="")
match = client1.expect("(%s)|([#\$] )" % prompt) match = client1.expect("(%s)|([#\\$] )" % prompt)
if match.groups()[1]: if match.groups()[1]:
client1.send(client1.command) client1.send(client1.command)
client1.expect(prompt) client1.expect(prompt)

View File

@ -47,7 +47,7 @@ with client(name="client1>", log=log) as client1, client(
client1.send("WATCH db_01059_event_hop_watch_strict_asc.wv") client1.send("WATCH db_01059_event_hop_watch_strict_asc.wv")
client1.expect("Query id" + end_of_block) client1.expect("Query id" + end_of_block)
client1.expect("Progress: 0.00 rows.*\)") client1.expect("Progress: 0.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO db_01059_event_hop_watch_strict_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));" "INSERT INTO db_01059_event_hop_watch_strict_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));"
) )
@ -57,7 +57,7 @@ with client(name="client1>", log=log) as client1, client(
) )
client2.expect("Ok.") client2.expect("Ok.")
client1.expect("1*1990-01-01 12:00:02" + end_of_block) client1.expect("1*1990-01-01 12:00:02" + end_of_block)
client1.expect("Progress: 1.00 rows.*\)") client1.expect("Progress: 1.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO db_01059_event_hop_watch_strict_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:10', 'US/Samoa'));" "INSERT INTO db_01059_event_hop_watch_strict_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:10', 'US/Samoa'));"
@ -65,11 +65,11 @@ with client(name="client1>", log=log) as client1, client(
client2.expect("Ok.") client2.expect("Ok.")
client1.expect("1*1990-01-01 12:00:06" + end_of_block) client1.expect("1*1990-01-01 12:00:06" + end_of_block)
client1.expect("1*1990-01-01 12:00:08" + end_of_block) client1.expect("1*1990-01-01 12:00:08" + end_of_block)
client1.expect("Progress: 3.00 rows.*\)") client1.expect("Progress: 3.00 rows.*\\)")
# send Ctrl-C # send Ctrl-C
client1.send("\x03", eol="") client1.send("\x03", eol="")
match = client1.expect("(%s)|([#\$] )" % prompt) match = client1.expect("(%s)|([#\\$] )" % prompt)
if match.groups()[1]: if match.groups()[1]:
client1.send(client1.command) client1.send(client1.command)
client1.expect(prompt) client1.expect(prompt)

View File

@ -49,7 +49,7 @@ with client(name="client1>", log=log) as client1, client(
client1.send("WATCH 01062_window_view_event_hop_watch_asc.wv") client1.send("WATCH 01062_window_view_event_hop_watch_asc.wv")
client1.expect("Query id" + end_of_block) client1.expect("Query id" + end_of_block)
client1.expect("Progress: 0.00 rows.*\)") client1.expect("Progress: 0.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));" "INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));"
) )
@ -69,11 +69,11 @@ with client(name="client1>", log=log) as client1, client(
client2.expect(prompt) client2.expect(prompt)
client1.expect("1" + end_of_block) client1.expect("1" + end_of_block)
client1.expect("2" + end_of_block) client1.expect("2" + end_of_block)
client1.expect("Progress: 3.00 rows.*\)") client1.expect("Progress: 3.00 rows.*\\)")
# send Ctrl-C # send Ctrl-C
client1.send("\x03", eol="") client1.send("\x03", eol="")
match = client1.expect("(%s)|([#\$] )" % prompt) match = client1.expect("(%s)|([#\\$] )" % prompt)
if match.groups()[1]: if match.groups()[1]:
client1.send(client1.command) client1.send(client1.command)
client1.expect(prompt) client1.expect(prompt)

View File

@ -50,7 +50,7 @@ with client(name="client1>", log=log) as client1, client(
client1.send("WATCH 01065_window_view_event_hop_watch_bounded.wv") client1.send("WATCH 01065_window_view_event_hop_watch_bounded.wv")
client1.expect("Query id" + end_of_block) client1.expect("Query id" + end_of_block)
client1.expect("Progress: 0.00 rows.*\)") client1.expect("Progress: 0.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO 01065_window_view_event_hop_watch_bounded.mt VALUES (1, '1990/01/01 12:00:00');" "INSERT INTO 01065_window_view_event_hop_watch_bounded.mt VALUES (1, '1990/01/01 12:00:00');"
) )
@ -72,7 +72,7 @@ with client(name="client1>", log=log) as client1, client(
# send Ctrl-C # send Ctrl-C
client1.send("\x03", eol="") client1.send("\x03", eol="")
match = client1.expect("(%s)|([#\$] )" % prompt) match = client1.expect("(%s)|([#\\$] )" % prompt)
if match.groups()[1]: if match.groups()[1]:
client1.send(client1.command) client1.send(client1.command)
client1.expect(prompt) client1.expect(prompt)

View File

@ -49,23 +49,23 @@ with client(name="client1>", log=log) as client1, client(
client1.send("WATCH 01069_window_view_proc_tumble_watch.wv") client1.send("WATCH 01069_window_view_proc_tumble_watch.wv")
client1.expect("Query id" + end_of_block) client1.expect("Query id" + end_of_block)
client1.expect("Progress: 0.00 rows.*\)") client1.expect("Progress: 0.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO 01069_window_view_proc_tumble_watch.mt VALUES (1, now('US/Samoa') + 3)" "INSERT INTO 01069_window_view_proc_tumble_watch.mt VALUES (1, now('US/Samoa') + 3)"
) )
client2.expect("Ok.") client2.expect("Ok.")
client1.expect("1" + end_of_block) client1.expect("1" + end_of_block)
client1.expect("Progress: 1.00 rows.*\)") client1.expect("Progress: 1.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO 01069_window_view_proc_tumble_watch.mt VALUES (1, now('US/Samoa') + 3)" "INSERT INTO 01069_window_view_proc_tumble_watch.mt VALUES (1, now('US/Samoa') + 3)"
) )
client2.expect("Ok.") client2.expect("Ok.")
client1.expect("1" + end_of_block) client1.expect("1" + end_of_block)
client1.expect("Progress: 2.00 rows.*\)") client1.expect("Progress: 2.00 rows.*\\)")
# send Ctrl-C # send Ctrl-C
client1.send("\x03", eol="") client1.send("\x03", eol="")
match = client1.expect("(%s)|([#\$] )" % prompt) match = client1.expect("(%s)|([#\\$] )" % prompt)
if match.groups()[1]: if match.groups()[1]:
client1.send(client1.command) client1.send(client1.command)
client1.expect(prompt) client1.expect(prompt)

View File

@ -49,7 +49,7 @@ with client(name="client1>", log=log) as client1, client(
client1.send("WATCH 01070_window_view_watch_events.wv EVENTS") client1.send("WATCH 01070_window_view_watch_events.wv EVENTS")
client1.expect("Query id" + end_of_block) client1.expect("Query id" + end_of_block)
client1.expect("Progress: 0.00 rows.*\)") client1.expect("Progress: 0.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO 01070_window_view_watch_events.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));" "INSERT INTO 01070_window_view_watch_events.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));"
) )
@ -59,11 +59,11 @@ with client(name="client1>", log=log) as client1, client(
) )
client2.expect("Ok.") client2.expect("Ok.")
client1.expect("1990-01-01 12:00:05" + end_of_block) client1.expect("1990-01-01 12:00:05" + end_of_block)
client1.expect("Progress: 1.00 rows.*\)") client1.expect("Progress: 1.00 rows.*\\)")
# send Ctrl-C # send Ctrl-C
client1.send("\x03", eol="") client1.send("\x03", eol="")
match = client1.expect("(%s)|([#\$] )" % prompt) match = client1.expect("(%s)|([#\\$] )" % prompt)
if match.groups()[1]: if match.groups()[1]:
client1.send(client1.command) client1.send(client1.command)
client1.expect(prompt) client1.expect(prompt)

View File

@ -55,7 +55,7 @@ with client(name="client1>", log=log) as client1, client(
client1.send("WATCH 01078_window_view_alter_query_watch.wv") client1.send("WATCH 01078_window_view_alter_query_watch.wv")
client1.expect("Query id" + end_of_block) client1.expect("Query id" + end_of_block)
client1.expect("Progress: 0.00 rows.*\)") client1.expect("Progress: 0.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO 01078_window_view_alter_query_watch.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));" "INSERT INTO 01078_window_view_alter_query_watch.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));"
) )
@ -65,7 +65,7 @@ with client(name="client1>", log=log) as client1, client(
) )
client2.expect("Ok.") client2.expect("Ok.")
client1.expect("1" + end_of_block) client1.expect("1" + end_of_block)
client1.expect("Progress: 1.00 rows.*\)") client1.expect("Progress: 1.00 rows.*\\)")
client2.send( client2.send(
"ALTER TABLE 01078_window_view_alter_query_watch.wv MODIFY QUERY SELECT count(a) * 2 AS count, hopEnd(wid) AS w_end FROM 01078_window_view_alter_query_watch.mt GROUP BY hop(timestamp, INTERVAL '2' SECOND, INTERVAL '3' SECOND, 'US/Samoa') AS wid" "ALTER TABLE 01078_window_view_alter_query_watch.wv MODIFY QUERY SELECT count(a) * 2 AS count, hopEnd(wid) AS w_end FROM 01078_window_view_alter_query_watch.mt GROUP BY hop(timestamp, INTERVAL '2' SECOND, INTERVAL '3' SECOND, 'US/Samoa') AS wid"
) )
@ -75,7 +75,7 @@ with client(name="client1>", log=log) as client1, client(
client1.expect(prompt) client1.expect(prompt)
client3.send("WATCH 01078_window_view_alter_query_watch.wv") client3.send("WATCH 01078_window_view_alter_query_watch.wv")
client3.expect("Query id" + end_of_block) client3.expect("Query id" + end_of_block)
client3.expect("Progress: 0.00 rows.*\)") client3.expect("Progress: 0.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO 01078_window_view_alter_query_watch.mt VALUES (1, toDateTime('1990/01/01 12:00:06', 'US/Samoa'));" "INSERT INTO 01078_window_view_alter_query_watch.mt VALUES (1, toDateTime('1990/01/01 12:00:06', 'US/Samoa'));"
) )
@ -85,11 +85,11 @@ with client(name="client1>", log=log) as client1, client(
) )
client2.expect("Ok.") client2.expect("Ok.")
client3.expect("2" + end_of_block) client3.expect("2" + end_of_block)
client3.expect("Progress: 1.00 rows.*\)") client3.expect("Progress: 1.00 rows.*\\)")
# send Ctrl-C # send Ctrl-C
client3.send("\x03", eol="") client3.send("\x03", eol="")
match = client3.expect("(%s)|([#\$] )" % prompt) match = client3.expect("(%s)|([#\\$] )" % prompt)
if match.groups()[1]: if match.groups()[1]:
client3.send(client3.command) client3.send(client3.command)
client3.expect(prompt) client3.expect(prompt)

View File

@ -49,7 +49,7 @@ with client(name="client1>", log=log) as client1, client(
client1.send("WATCH 01082_window_view_watch_limit.wv LIMIT 1") client1.send("WATCH 01082_window_view_watch_limit.wv LIMIT 1")
client1.expect("Query id" + end_of_block) client1.expect("Query id" + end_of_block)
client1.expect("Progress: 0.00 rows.*\)") client1.expect("Progress: 0.00 rows.*\\)")
client2.send( client2.send(
"INSERT INTO 01082_window_view_watch_limit.mt VALUES (1, '1990/01/01 12:00:00');" "INSERT INTO 01082_window_view_watch_limit.mt VALUES (1, '1990/01/01 12:00:00');"
) )
@ -59,7 +59,7 @@ with client(name="client1>", log=log) as client1, client(
) )
client2.expect("Ok.") client2.expect("Ok.")
client1.expect("1" + end_of_block) client1.expect("1" + end_of_block)
client1.expect("Progress: 1.00 rows.*\)") client1.expect("Progress: 1.00 rows.*\\)")
client1.expect("1 row" + end_of_block) client1.expect("1 row" + end_of_block)
client1.expect(prompt) client1.expect(prompt)

View File

@ -4,6 +4,11 @@
5 5
5 5
5 5
5
5
5
5
5
1 1
12 12
2023-05-30 14:38:20 2023-05-30 14:38:20

View File

@ -16,6 +16,21 @@ SELECT accurateCast(-129, 'Int8'); -- { serverError CANNOT_CONVERT_TYPE }
SELECT accurateCast(5, 'Int8'); SELECT accurateCast(5, 'Int8');
SELECT accurateCast(128, 'Int8'); -- { serverError CANNOT_CONVERT_TYPE } SELECT accurateCast(128, 'Int8'); -- { serverError CANNOT_CONVERT_TYPE }
SELECT accurateCast('-1', 'UInt8'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('5', 'UInt8');
SELECT accurateCast('257', 'UInt8'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('-1', 'UInt16'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('5', 'UInt16');
SELECT accurateCast('65536', 'UInt16'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('-1', 'UInt32'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('5', 'UInt32');
SELECT accurateCast('4294967296', 'UInt32'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('-1', 'UInt64'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('5', 'UInt64');
SELECT accurateCast('-129', 'Int8'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('5', 'Int8');
SELECT accurateCast('128', 'Int8'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast(10, 'Decimal32(9)'); -- { serverError DECIMAL_OVERFLOW } SELECT accurateCast(10, 'Decimal32(9)'); -- { serverError DECIMAL_OVERFLOW }
SELECT accurateCast(1, 'Decimal32(9)'); SELECT accurateCast(1, 'Decimal32(9)');
SELECT accurateCast(-10, 'Decimal32(9)'); -- { serverError DECIMAL_OVERFLOW } SELECT accurateCast(-10, 'Decimal32(9)'); -- { serverError DECIMAL_OVERFLOW }

View File

@ -15,6 +15,6 @@ log = None
with client(name="client1>", log=log) as client1: with client(name="client1>", log=log) as client1:
client1.expect(prompt) client1.expect(prompt)
client1.send("SELECT number FROM numbers(1000) FORMAT Null") client1.send("SELECT number FROM numbers(1000) FORMAT Null")
client1.expect("Progress: 1\.00 thousand rows, 8\.00 KB .*" + end_of_block) client1.expect("Progress: 1\\.00 thousand rows, 8\\.00 KB .*" + end_of_block)
client1.expect("0 rows in set. Elapsed: [\\w]{1}\.[\\w]{3} sec.") client1.expect("0 rows in set. Elapsed: [\\w]{1}\\.[\\w]{3} sec.")
client1.expect("Peak memory usage: .*B" + end_of_block) client1.expect("Peak memory usage: .*B" + end_of_block)

View File

@ -1,19 +1,19 @@
1 Hello ClickHouse 1 Well, Hello ClickHouse !
2 Hello World 2 Well, Hello World !
Granules: 6/6 Granules: 6/6
Granules: 2/6 Granules: 2/6
Granules: 6/6 Granules: 6/6
Granules: 2/6 Granules: 2/6
--- ---
1 Hello ClickHouse 1 Well, Hello ClickHouse !
2 Hello World 2 Well, Hello World !
6 World Champion 6 True World Champion
Granules: 6/6 Granules: 6/6
Granules: 3/6 Granules: 3/6
Granules: 6/6 Granules: 6/6
Granules: 3/6 Granules: 3/6
--- ---
5 OLAP Database 5 Its An OLAP Database
Granules: 6/6 Granules: 6/6
Granules: 1/6 Granules: 1/6
Granules: 6/6 Granules: 6/6

View File

@ -14,19 +14,19 @@ ENGINE = MergeTree
ORDER BY id ORDER BY id
SETTINGS index_granularity = 1; SETTINGS index_granularity = 1;
INSERT INTO tab VALUES (1, 'Hello ClickHouse'), (2, 'Hello World'), (3, 'Good Weather'), (4, 'Say Hello'), (5, 'OLAP Database'), (6, 'World Champion'); INSERT INTO tab VALUES (1, 'Well, Hello ClickHouse !'), (2, 'Well, Hello World !'), (3, 'Good Weather !'), (4, 'Say Hello !'), (5, 'Its An OLAP Database'), (6, 'True World Champion');
SELECT * FROM tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id; SELECT * FROM tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id;
-- Read 2/6 granules -- Read 2/6 granules
-- Required string: 'Hello ' -- Required string: ' Hello '
-- Alternatives: 'Hello ClickHouse', 'Hello World' -- Alternatives: ' Hello ClickHouse ', ' Hello World '
SELECT * SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes=1 EXPLAIN PLAN indexes=1
SELECT * FROM tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id SELECT * FROM tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'
@ -37,7 +37,7 @@ SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes=1 EXPLAIN PLAN indexes=1
SELECT * FROM tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id SELECT * FROM tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'
@ -46,17 +46,17 @@ SETTINGS
SELECT '---'; SELECT '---';
SELECT * FROM tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id; SELECT * FROM tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id;
-- Read 3/6 granules -- Read 3/6 granules
-- Required string: - -- Required string: -
-- Alternatives: 'ClickHouse', 'World' -- Alternatives: ' ClickHouse ', ' World '
SELECT * SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes = 1 EXPLAIN PLAN indexes = 1
SELECT * FROM tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id SELECT * FROM tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'
@ -67,7 +67,7 @@ SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes = 1 EXPLAIN PLAN indexes = 1
SELECT * FROM tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id SELECT * FROM tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'
@ -76,17 +76,17 @@ SETTINGS
SELECT '---'; SELECT '---';
SELECT * FROM tab WHERE match(str, 'OLAP.*') ORDER BY id; SELECT * FROM tab WHERE match(str, ' OLAP .*') ORDER BY id;
-- Read 1/6 granules -- Read 1/6 granules
-- Required string: 'OLAP' -- Required string: ' OLAP '
-- Alternatives: - -- Alternatives: -
SELECT * SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes = 1 EXPLAIN PLAN indexes = 1
SELECT * FROM tab WHERE match(str, 'OLAP (.*?)*') ORDER BY id SELECT * FROM tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'
@ -97,7 +97,7 @@ SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes = 1 EXPLAIN PLAN indexes = 1
SELECT * FROM tab WHERE match(str, 'OLAP (.*?)*') ORDER BY id SELECT * FROM tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'

View File

@ -13,19 +13,19 @@ af full_text
1 1
Test full_text() Test full_text()
af full_text af full_text
101 Alick a01 101 x Alick a01 y
106 Alick a06 106 x Alick a06 y
111 Alick b01 111 x Alick b01 y
116 Alick b06 116 x Alick b06 y
101 Alick a01 101 x Alick a01 y
106 Alick a06 106 x Alick a06 y
1 1
101 Alick a01 101 x Alick a01 y
111 Alick b01 111 x Alick b01 y
1 1
Test on array columns Test on array columns
af full_text af full_text
3 ['Click a03','Click b03'] 3 ['x Click a03 y','x Click b03 y']
1 1
Test on map columns Test on map columns
af full_text af full_text

View File

@ -67,7 +67,7 @@ CREATE TABLE tab_x(k UInt64, s String, INDEX af(s) TYPE full_text())
ENGINE = MergeTree() ORDER BY k ENGINE = MergeTree() ORDER BY k
SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi';
INSERT INTO tab_x VALUES (101, 'Alick a01'), (102, 'Blick a02'), (103, 'Click a03'), (104, 'Dlick a04'), (105, 'Elick a05'), (106, 'Alick a06'), (107, 'Blick a07'), (108, 'Click a08'), (109, 'Dlick a09'), (110, 'Elick a10'), (111, 'Alick b01'), (112, 'Blick b02'), (113, 'Click b03'), (114, 'Dlick b04'), (115, 'Elick b05'), (116, 'Alick b06'), (117, 'Blick b07'), (118, 'Click b08'), (119, 'Dlick b09'), (120, 'Elick b10'); INSERT INTO tab_x VALUES (101, 'x Alick a01 y'), (102, 'x Blick a02 y'), (103, 'x Click a03 y'), (104, 'x Dlick a04 y'), (105, 'x Elick a05 y'), (106, 'x Alick a06 y'), (107, 'x Blick a07 y'), (108, 'x Click a08 y'), (109, 'x Dlick a09 y'), (110, 'x Elick a10 y'), (111, 'x Alick b01 y'), (112, 'x Blick b02 y'), (113, 'x Click b03 y'), (114, 'x Dlick b04 y'), (115, 'x Elick b05 y'), (116, 'x Alick b06 y'), (117, 'x Blick b07 y'), (118, 'x Click b08 y'), (119, 'x Dlick b09 y'), (120, 'x Elick b10 y');
-- check full_text index was created -- check full_text index was created
SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab_x' AND database = currentDatabase() LIMIT 1; SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab_x' AND database = currentDatabase() LIMIT 1;
@ -86,27 +86,27 @@ SELECT read_rows==8 from system.query_log
LIMIT 1; LIMIT 1;
-- search full_text index with IN operator -- search full_text index with IN operator
SELECT * FROM tab_x WHERE s IN ('Alick a01', 'Alick a06') ORDER BY k; SELECT * FROM tab_x WHERE s IN ('x Alick a01 y', 'x Alick a06 y') ORDER BY k;
-- check the query only read 2 granules (4 rows total; each granule has 2 rows) -- check the query only read 2 granules (4 rows total; each granule has 2 rows)
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT read_rows==4 from system.query_log SELECT read_rows==4 from system.query_log
WHERE query_kind ='Select' WHERE query_kind ='Select'
AND current_database = currentDatabase() AND current_database = currentDatabase()
AND endsWith(trimRight(query), 'SELECT * FROM tab_x WHERE s IN (\'Alick a01\', \'Alick a06\') ORDER BY k;') AND endsWith(trimRight(query), 'SELECT * FROM tab_x WHERE s IN (\'x Alick a01 y\', \'x Alick a06 y\') ORDER BY k;')
AND type='QueryFinish' AND type='QueryFinish'
AND result_rows==2 AND result_rows==2
LIMIT 1; LIMIT 1;
-- search full_text index with multiSearch -- search full_text index with multiSearch
SELECT * FROM tab_x WHERE multiSearchAny(s, ['a01', 'b01']) ORDER BY k; SELECT * FROM tab_x WHERE multiSearchAny(s, [' a01 ', ' b01 ']) ORDER BY k;
-- check the query only read 2 granules (4 rows total; each granule has 2 rows) -- check the query only read 2 granules (4 rows total; each granule has 2 rows)
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT read_rows==4 from system.query_log SELECT read_rows==4 from system.query_log
WHERE query_kind ='Select' WHERE query_kind ='Select'
AND current_database = currentDatabase() AND current_database = currentDatabase()
AND endsWith(trimRight(query), 'SELECT * FROM tab_x WHERE multiSearchAny(s, [\'a01\', \'b01\']) ORDER BY k;') AND endsWith(trimRight(query), 'SELECT * FROM tab_x WHERE multiSearchAny(s, [\' a01 \', \' b01 \']) ORDER BY k;')
AND type='QueryFinish' AND type='QueryFinish'
AND result_rows==2 AND result_rows==2
LIMIT 1; LIMIT 1;
@ -126,14 +126,14 @@ INSERT INTO tab SELECT rowNumberInBlock(), groupArray(s) FROM tab_x GROUP BY k%1
SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1;
-- search full_text index with has -- search full_text index with has
SELECT * FROM tab WHERE has(s, 'Click a03') ORDER BY k; SELECT * FROM tab WHERE has(s, 'x Click a03 y') ORDER BY k;
-- check the query must read all 10 granules (20 rows total; each granule has 2 rows) -- check the query must read all 10 granules (20 rows total; each granule has 2 rows)
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT read_rows==2 from system.query_log SELECT read_rows==2 from system.query_log
WHERE query_kind ='Select' WHERE query_kind ='Select'
AND current_database = currentDatabase() AND current_database = currentDatabase()
AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE has(s, \'Click a03\') ORDER BY k;') AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE has(s, \'x Click a03 y\') ORDER BY k;')
AND type='QueryFinish' AND type='QueryFinish'
AND result_rows==1 AND result_rows==1
LIMIT 1; LIMIT 1;

View File

@ -32,12 +32,12 @@ with client(
) )
client1.expect(prompt) client1.expect(prompt)
client1.send(f"INSERT INTO test.infile_progress FROM INFILE '{filename}'") client1.send(f"INSERT INTO test.infile_progress FROM INFILE '{filename}'")
client1.expect("Progress: 5.00 rows, 10.00 B.*\)") client1.expect("Progress: 5.00 rows, 10.00 B.*\\)")
client1.expect(prompt) client1.expect(prompt)
# send Ctrl-C # send Ctrl-C
client1.send("\x03", eol="") client1.send("\x03", eol="")
match = client1.expect("(%s)|([#\$] )" % prompt) match = client1.expect("(%s)|([#\\$] )" % prompt)
if match.groups()[1]: if match.groups()[1]:
client1.send(client1.command) client1.send(client1.command)
client1.expect(prompt) client1.expect(prompt)

View File

@ -1,8 +1,4 @@
number Nullable(Int64) number Nullable(Int64)
u8 Nullable(Int8)
u16 Nullable(Int16)
u32 Nullable(Int32)
u64 Nullable(Int64)
i8 Nullable(Int8) i8 Nullable(Int8)
i16 Nullable(Int16) i16 Nullable(Int16)
i32 Nullable(Int32) i32 Nullable(Int32)
@ -22,34 +18,34 @@ d64 Nullable(Decimal(18, 10))
d128 Nullable(Decimal(38, 20)) d128 Nullable(Decimal(38, 20))
-- Go over all types individually -- Go over all types individually
-- { echoOn } -- { echoOn }
select count(), sum(number) from file('02892.orc') where indexHint(u8 in (10, 15, 250)); select count(), sum(number) from file('02892.orc') where indexHint(i8 in (10, 15, -6));
800 4229600 1100 5744450
select count(1), min(u8), max(u8) from file('02892.orc') where u8 in (10, 15, 250); select count(1), min(i8), max(i8) from file('02892.orc') where i8 in (10, 15, -6);
66 10 15 99 -6 15
select count(), sum(number) from file('02892.orc') where indexHint(i8 between -3 and 2); select count(), sum(number) from file('02892.orc') where indexHint(i8 between -3 and 2);
1000 4999500 1000 4999500
select count(1), min(i8), max(i8) from file('02892.orc') where i8 between -3 and 2; select count(1), min(i8), max(i8) from file('02892.orc') where i8 between -3 and 2;
208 -3 2 208 -3 2
select count(), sum(number) from file('02892.orc') where indexHint(u16 between 4000 and 61000 or u16 == 42); select count(), sum(number) from file('02892.orc') where indexHint(i16 between 4000 and 61000 or i16 == 42);
1800 6479100 1200 1099400
select count(1), min(u16), max(u16) from file('02892.orc') where u16 between 4000 and 61000 or u16 == 42; select count(1), min(i16), max(i16) from file('02892.orc') where i16 between 4000 and 61000 or i16 == 42;
1002 42 5000 1002 42 5000
select count(), sum(number) from file('02892.orc') where indexHint(i16 between -150 and 250); select count(), sum(number) from file('02892.orc') where indexHint(i16 between -150 and 250);
500 2474750 500 2474750
select count(1), min(i16), max(i16) from file('02892.orc') where i16 between -150 and 250; select count(1), min(i16), max(i16) from file('02892.orc') where i16 between -150 and 250;
401 -150 250 401 -150 250
select count(), sum(number) from file('02892.orc') where indexHint(u32 in (42, 4294966296)); select count(), sum(number) from file('02892.orc') where indexHint(i32 in (42, -1000));
200 999900 200 1099900
select count(1), min(u32), max(u32) from file('02892.orc') where u32 in (42, 4294966296); select count(1), min(i32), max(i32) from file('02892.orc') where i32 in (42, -1000);
1 42 42 2 -1000 42
select count(), sum(number) from file('02892.orc') where indexHint(i32 between -150 and 250); select count(), sum(number) from file('02892.orc') where indexHint(i32 between -150 and 250);
500 2474750 500 2474750
select count(1), min(i32), max(i32) from file('02892.orc') where i32 between -150 and 250; select count(1), min(i32), max(i32) from file('02892.orc') where i32 between -150 and 250;
401 -150 250 401 -150 250
select count(), sum(number) from file('02892.orc') where indexHint(u64 in (42, 18446744073709550616)); select count(), sum(number) from file('02892.orc') where indexHint(i64 in (42, -1000));
100 494950 200 1099900
select count(1), min(u64), max(u64) from file('02892.orc') where u64 in (42, 18446744073709550616); select count(1), min(i64), max(i64) from file('02892.orc') where i64 in (42, -1000);
1 42 42 2 -1000 42
select count(), sum(number) from file('02892.orc') where indexHint(i64 between -150 and 250); select count(), sum(number) from file('02892.orc') where indexHint(i64 between -150 and 250);
500 2474750 500 2474750
select count(1), min(i64), max(i64) from file('02892.orc') where i64 between -150 and 250; select count(1), min(i64), max(i64) from file('02892.orc') where i64 between -150 and 250;
@ -111,21 +107,21 @@ select count(), sum(number) from file('02892.orc') where indexHint(0);
0 \N 0 \N
select count(), min(number), max(number) from file('02892.orc') where indexHint(0); select count(), min(number), max(number) from file('02892.orc') where indexHint(0);
0 \N \N 0 \N \N
select count(), sum(number) from file('02892.orc') where indexHint(s like '99%' or u64 == 2000); select count(), sum(number) from file('02892.orc') where indexHint(s like '99%' or i64 == 2000);
300 1204850 300 1204850
select count(), min(s), max(s) from file('02892.orc') where (s like '99%' or u64 == 2000); select count(), min(s), max(s) from file('02892.orc') where (s like '99%' or i64 == 2000);
12 2000 999 12 2000 999
select count(), sum(number) from file('02892.orc') where indexHint(s like 'z%'); select count(), sum(number) from file('02892.orc') where indexHint(s like 'z%');
0 \N 0 \N
select count(), min(s), max(s) from file('02892.orc') where (s like 'z%'); select count(), min(s), max(s) from file('02892.orc') where (s like 'z%');
0 \N \N 0 \N \N
select count(), sum(number) from file('02892.orc') where indexHint(u8 == 10 or 1 == 1); select count(), sum(number) from file('02892.orc') where indexHint(i8 == 10 or 1 == 1);
10000 49995000 10000 49995000
select count(), min(u8), max(u8) from file('02892.orc') where (u8 == 10 or 1 == 1); select count(), min(i8), max(i8) from file('02892.orc') where (i8 == 10 or 1 == 1);
10000 -128 127 10000 -128 127
select count(), sum(number) from file('02892.orc') where indexHint(u8 < 0); select count(), sum(number) from file('02892.orc') where indexHint(i8 < 0);
5300 26042350 5300 26042350
select count(), min(u8), max(u8) from file('02892.orc') where (u8 < 0); select count(), min(i8), max(i8) from file('02892.orc') where (i8 < 0);
5001 -128 -1 5001 -128 -1
-- { echoOn } -- { echoOn }
select count(), sum(number) from file('02892.orc') where indexHint(sometimes_null is NULL); select count(), sum(number) from file('02892.orc') where indexHint(sometimes_null is NULL);

View File

@ -1,4 +1,4 @@
-- Tags: no-fasttest, no-parallel, no-cpu-aarch64 -- Tags: no-fasttest, no-parallel
set output_format_orc_string_as_string = 1; set output_format_orc_string_as_string = 1;
set output_format_orc_row_index_stride = 100; set output_format_orc_row_index_stride = 100;
@ -16,15 +16,9 @@ SET session_timezone = 'UTC';
-- Try all the types. -- Try all the types.
insert into function file('02892.orc') insert into function file('02892.orc')
-- Use negative numbers to test sign extension for signed types and lack of sign extension for
-- unsigned types.
with 5000 - number as n with 5000 - number as n
select select
number, number,
intDiv(n, 11)::UInt8 as u8,
n::UInt16 u16,
n::UInt32 as u32,
n::UInt64 as u64,
intDiv(n, 11)::Int8 as i8, intDiv(n, 11)::Int8 as i8,
n::Int16 i16, n::Int16 i16,
n::Int32 as i32, n::Int32 as i32,
@ -50,26 +44,26 @@ desc file('02892.orc');
-- Go over all types individually -- Go over all types individually
-- { echoOn } -- { echoOn }
select count(), sum(number) from file('02892.orc') where indexHint(u8 in (10, 15, 250)); select count(), sum(number) from file('02892.orc') where indexHint(i8 in (10, 15, -6));
select count(1), min(u8), max(u8) from file('02892.orc') where u8 in (10, 15, 250); select count(1), min(i8), max(i8) from file('02892.orc') where i8 in (10, 15, -6);
select count(), sum(number) from file('02892.orc') where indexHint(i8 between -3 and 2); select count(), sum(number) from file('02892.orc') where indexHint(i8 between -3 and 2);
select count(1), min(i8), max(i8) from file('02892.orc') where i8 between -3 and 2; select count(1), min(i8), max(i8) from file('02892.orc') where i8 between -3 and 2;
select count(), sum(number) from file('02892.orc') where indexHint(u16 between 4000 and 61000 or u16 == 42); select count(), sum(number) from file('02892.orc') where indexHint(i16 between 4000 and 61000 or i16 == 42);
select count(1), min(u16), max(u16) from file('02892.orc') where u16 between 4000 and 61000 or u16 == 42; select count(1), min(i16), max(i16) from file('02892.orc') where i16 between 4000 and 61000 or i16 == 42;
select count(), sum(number) from file('02892.orc') where indexHint(i16 between -150 and 250); select count(), sum(number) from file('02892.orc') where indexHint(i16 between -150 and 250);
select count(1), min(i16), max(i16) from file('02892.orc') where i16 between -150 and 250; select count(1), min(i16), max(i16) from file('02892.orc') where i16 between -150 and 250;
select count(), sum(number) from file('02892.orc') where indexHint(u32 in (42, 4294966296)); select count(), sum(number) from file('02892.orc') where indexHint(i32 in (42, -1000));
select count(1), min(u32), max(u32) from file('02892.orc') where u32 in (42, 4294966296); select count(1), min(i32), max(i32) from file('02892.orc') where i32 in (42, -1000);
select count(), sum(number) from file('02892.orc') where indexHint(i32 between -150 and 250); select count(), sum(number) from file('02892.orc') where indexHint(i32 between -150 and 250);
select count(1), min(i32), max(i32) from file('02892.orc') where i32 between -150 and 250; select count(1), min(i32), max(i32) from file('02892.orc') where i32 between -150 and 250;
select count(), sum(number) from file('02892.orc') where indexHint(u64 in (42, 18446744073709550616)); select count(), sum(number) from file('02892.orc') where indexHint(i64 in (42, -1000));
select count(1), min(u64), max(u64) from file('02892.orc') where u64 in (42, 18446744073709550616); select count(1), min(i64), max(i64) from file('02892.orc') where i64 in (42, -1000);
select count(), sum(number) from file('02892.orc') where indexHint(i64 between -150 and 250); select count(), sum(number) from file('02892.orc') where indexHint(i64 between -150 and 250);
select count(1), min(i64), max(i64) from file('02892.orc') where i64 between -150 and 250; select count(1), min(i64), max(i64) from file('02892.orc') where i64 between -150 and 250;
@ -117,17 +111,17 @@ select count(1), min(d128), max(128) from file('02892.orc') where (d128 between
select count(), sum(number) from file('02892.orc') where indexHint(0); select count(), sum(number) from file('02892.orc') where indexHint(0);
select count(), min(number), max(number) from file('02892.orc') where indexHint(0); select count(), min(number), max(number) from file('02892.orc') where indexHint(0);
select count(), sum(number) from file('02892.orc') where indexHint(s like '99%' or u64 == 2000); select count(), sum(number) from file('02892.orc') where indexHint(s like '99%' or i64 == 2000);
select count(), min(s), max(s) from file('02892.orc') where (s like '99%' or u64 == 2000); select count(), min(s), max(s) from file('02892.orc') where (s like '99%' or i64 == 2000);
select count(), sum(number) from file('02892.orc') where indexHint(s like 'z%'); select count(), sum(number) from file('02892.orc') where indexHint(s like 'z%');
select count(), min(s), max(s) from file('02892.orc') where (s like 'z%'); select count(), min(s), max(s) from file('02892.orc') where (s like 'z%');
select count(), sum(number) from file('02892.orc') where indexHint(u8 == 10 or 1 == 1); select count(), sum(number) from file('02892.orc') where indexHint(i8 == 10 or 1 == 1);
select count(), min(u8), max(u8) from file('02892.orc') where (u8 == 10 or 1 == 1); select count(), min(i8), max(i8) from file('02892.orc') where (i8 == 10 or 1 == 1);
select count(), sum(number) from file('02892.orc') where indexHint(u8 < 0); select count(), sum(number) from file('02892.orc') where indexHint(i8 < 0);
select count(), min(u8), max(u8) from file('02892.orc') where (u8 < 0); select count(), min(i8), max(i8) from file('02892.orc') where (i8 < 0);
-- { echoOff } -- { echoOff }
-- Nullable and LowCardinality. -- Nullable and LowCardinality.

View File

@ -1,5 +1,5 @@
1 Hello ClickHouse 1 Well, Hello ClickHouse !
2 Hello World 2 Well, Hello World !
1 Hello ClickHouse 1 Hello ClickHouse
2 Hello World 2 Hello World
Granules: 6/6 Granules: 6/6
@ -11,9 +11,9 @@
Granules: 6/6 Granules: 6/6
Granules: 2/6 Granules: 2/6
--- ---
1 Hello ClickHouse 1 Well, Hello ClickHouse !
2 Hello World 2 Well, Hello World !
6 World Champion 6 True World Champion
1 Hello ClickHouse 1 Hello ClickHouse
2 Hello World 2 Hello World
6 World Champion 6 World Champion
@ -26,7 +26,7 @@
Granules: 6/6 Granules: 6/6
Granules: 3/6 Granules: 3/6
--- ---
5 OLAP Database 5 Its An OLAP Database
5 OLAP Database 5 OLAP Database
Granules: 6/6 Granules: 6/6
Granules: 1/6 Granules: 1/6

View File

@ -21,21 +21,22 @@ ENGINE = MergeTree
ORDER BY id ORDER BY id
SETTINGS index_granularity = 1; SETTINGS index_granularity = 1;
INSERT INTO tokenbf_tab VALUES (1, 'Hello ClickHouse'), (2, 'Hello World'), (3, 'Good Weather'), (4, 'Say Hello'), (5, 'OLAP Database'), (6, 'World Champion'); INSERT INTO tokenbf_tab VALUES (1, 'Well, Hello ClickHouse !'), (2, 'Well, Hello World !'), (3, 'Good Weather !'), (4, 'Say Hello !'), (5, 'Its An OLAP Database'), (6, 'True World Champion');
INSERT INTO ngrambf_tab VALUES (1, 'Hello ClickHouse'), (2, 'Hello World'), (3, 'Good Weather'), (4, 'Say Hello'), (5, 'OLAP Database'), (6, 'World Champion'); INSERT INTO ngrambf_tab VALUES (1, 'Hello ClickHouse'), (2, 'Hello World'), (3, 'Good Weather'), (4, 'Say Hello'), (5, 'OLAP Database'), (6, 'World Champion');
SELECT * FROM tokenbf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id; SELECT * FROM tokenbf_tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id;
SELECT * FROM ngrambf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id; SELECT * FROM ngrambf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id;
-- Read 2/6 granules -- Read 2/6 granules
-- Required string: 'Hello ' -- Required string: 'Hello '
-- Alternatives: 'Hello ClickHouse', 'Hello World' -- Alternatives: 'Hello ClickHouse', 'Hello World'
-- Surrounded by spaces for tokenbf
SELECT * SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes=1 EXPLAIN PLAN indexes=1
SELECT * FROM tokenbf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id SELECT * FROM tokenbf_tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'
@ -46,7 +47,7 @@ SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes=1 EXPLAIN PLAN indexes=1
SELECT * FROM tokenbf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id SELECT * FROM tokenbf_tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'
@ -78,18 +79,19 @@ SETTINGS
SELECT '---'; SELECT '---';
SELECT * FROM tokenbf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id; SELECT * FROM tokenbf_tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id;
SELECT * FROM ngrambf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id; SELECT * FROM ngrambf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id;
-- Read 3/6 granules -- Read 3/6 granules
-- Required string: - -- Required string: -
-- Alternatives: 'ClickHouse', 'World' -- Alternatives: 'ClickHouse', 'World'
-- Surrounded by spaces for tokenbf
SELECT * SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes = 1 EXPLAIN PLAN indexes = 1
SELECT * FROM tokenbf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id SELECT * FROM tokenbf_tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'
@ -100,7 +102,7 @@ SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes = 1 EXPLAIN PLAN indexes = 1
SELECT * FROM tokenbf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id SELECT * FROM tokenbf_tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'
@ -131,18 +133,19 @@ SETTINGS
SELECT '---'; SELECT '---';
SELECT * FROM tokenbf_tab WHERE match(str, 'OLAP.*') ORDER BY id; SELECT * FROM tokenbf_tab WHERE match(str, ' OLAP .*') ORDER BY id;
SELECT * FROM ngrambf_tab WHERE match(str, 'OLAP.*') ORDER BY id; SELECT * FROM ngrambf_tab WHERE match(str, 'OLAP.*') ORDER BY id;
-- Read 1/6 granules -- Read 1/6 granules
-- Required string: 'OLAP' -- Required string: 'OLAP'
-- Alternatives: - -- Alternatives: -
-- Surrounded by spaces for tokenbf
SELECT * SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes = 1 EXPLAIN PLAN indexes = 1
SELECT * FROM tokenbf_tab WHERE match(str, 'OLAP (.*?)*') ORDER BY id SELECT * FROM tokenbf_tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'
@ -152,7 +155,7 @@ SELECT *
FROM FROM
( (
EXPLAIN PLAN indexes = 1 EXPLAIN PLAN indexes = 1
SELECT * FROM tokenbf_tab WHERE match(str, 'OLAP (.*?)*') ORDER BY id SELECT * FROM tokenbf_tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id
) )
WHERE WHERE
explain LIKE '%Granules: %' explain LIKE '%Granules: %'

View File

@ -13,6 +13,8 @@ create table dist_out as data engine=Distributed(test_shard_localhost, currentDa
set prefer_localhost_replica=0; set prefer_localhost_replica=0;
set min_untracked_memory='4Mi'; -- Disable precise memory tracking
insert into dist_in select number/100, number from system.numbers limit 1e6 settings max_memory_usage='20Mi'; insert into dist_in select number/100, number from system.numbers limit 1e6 settings max_memory_usage='20Mi';
system flush distributed dist_in; -- { serverError MEMORY_LIMIT_EXCEEDED } system flush distributed dist_in; -- { serverError MEMORY_LIMIT_EXCEEDED }
system flush distributed dist_in settings max_memory_usage=0; system flush distributed dist_in settings max_memory_usage=0;

View File

@ -1,5 +1,10 @@
┏━━━┓ ┏━━━┓
┃ x ┃ ┃ x ┃
┡━━━┩ ┡━━━┩
1. │ █ │ 1. │ █ │
└───┘ └───┘
┏━━━━━━━━━┳━━━━━━━━━━┓
┃ 'Hello' ┃ x ┃
┡━━━━━━━━━╇━━━━━━━━━━┩
1. │ Hello │ █ test █ │
└─────────┴──────────┘

View File

@ -1 +1,2 @@
SELECT format('\x1b[38;2;{0};{1};{2}m█\x1b[0m', 255, 128, 0) AS x FORMAT Pretty; SELECT format('\x1b[38;2;{0};{1};{2}m█\x1b[0m', 255, 128, 128) AS x FORMAT Pretty;
SELECT 'Hello', format('\x1b[38;2;{0};{1};{2}m█\x1b[0m test \x1b[38;2;{0};{1};{2}m█\x1b[0m', 255, 128, 128) AS x FORMAT Pretty;

View File

@ -10,3 +10,5 @@ Hello 1
Hello 1 Hello 1
Hello 2 Hello 2
Hello 2 Hello 2
2020-01-01 a 2
2020-01-01 b 4

View File

@ -8,3 +8,21 @@ SELECT s, arr, a FROM remote('127.0.0.{1,2}', currentDatabase(), arrays_test) AR
SELECT s, arr FROM remote('127.0.0.2', currentDatabase(), arrays_test) ARRAY JOIN arr WHERE arr < 3 ORDER BY arr; SELECT s, arr FROM remote('127.0.0.2', currentDatabase(), arrays_test) ARRAY JOIN arr WHERE arr < 3 ORDER BY arr;
SELECT s, arr FROM remote('127.0.0.{1,2}', currentDatabase(), arrays_test) ARRAY JOIN arr WHERE arr < 3 ORDER BY arr; SELECT s, arr FROM remote('127.0.0.{1,2}', currentDatabase(), arrays_test) ARRAY JOIN arr WHERE arr < 3 ORDER BY arr;
create table hourly(
hour datetime,
`metric.names` Array(String),
`metric.values` Array(Int64)
) Engine=Memory
as select '2020-01-01', ['a', 'b'], [1,2];
SELECT
toDate(hour) AS day,
`metric.names`,
sum(`metric.values`)
FROM remote('127.0.0.{1,2}', currentDatabase(), hourly)
ARRAY JOIN metric
GROUP BY
day,
metric.names
ORDER BY metric.names;

View File

@ -0,0 +1,41 @@
-- { echoOn }
select x from file('i8.orc') where indexHint(x = -128);
-128
select x from file('i8.orc') where indexHint(x = 128);
select x from file('u8.orc') where indexHint(x = -128);
-128
select x from file('u8.orc') where indexHint(x = 128);
select x from file('i16.orc') where indexHint(x = -32768);
-32768
select x from file('i16.orc') where indexHint(x = 32768);
select x from file('u16.orc') where indexHint(x = -32768);
-32768
select x from file('u16.orc') where indexHint(x = 32768);
select x from file('i32.orc') where indexHint(x = -2147483648);
-2147483648
select x from file('i32.orc') where indexHint(x = 2147483648);
select x from file('u32.orc') where indexHint(x = -2147483648);
-2147483648
select x from file('u32.orc') where indexHint(x = 2147483648);
select x from file('i64.orc') where indexHint(x = -9223372036854775808);
-9223372036854775808
select x from file('i64.orc') where indexHint(x = 9223372036854775808);
-9223372036854775808
select x from file('u64.orc') where indexHint(x = -9223372036854775808);
-9223372036854775808
select x from file('u64.orc') where indexHint(x = 9223372036854775808);
-9223372036854775808
select x from file('u8.orc', ORC, 'x UInt8') where indexHint(x > 10);
128
select x from file('u8.orc', ORC, 'x UInt64') where indexHint(x > 10);
18446744073709551488
select x from file('u16.orc', ORC, 'x UInt16') where indexHint(x > 10);
32768
select x from file('u16.orc', ORC, 'x UInt64') where indexHint(x > 10);
18446744073709518848
select x from file('u32.orc', ORC, 'x UInt32') where indexHint(x > 10);
2147483648
select x from file('u32.orc', ORC, 'x UInt64') where indexHint(x > 10);
18446744071562067968
select x from file('u64.orc', ORC, 'x UInt64') where indexHint(x > 10);
9223372036854775808

View File

@ -0,0 +1,42 @@
-- Tags: no-fasttest, no-parallel
set input_format_orc_filter_push_down = 1;
set engine_file_truncate_on_insert = 1;
insert into function file('i8.orc') select materialize(-128)::Int8 as x;
insert into function file('u8.orc') select materialize(128)::UInt8 as x;
insert into function file('i16.orc') select materialize(-32768)::Int16 as x;
insert into function file('u16.orc') select materialize(32768)::UInt16 as x;
insert into function file('i32.orc') select materialize(-2147483648)::Int32 as x;
insert into function file('u32.orc') select materialize(2147483648)::UInt32 as x;
insert into function file('i64.orc') select materialize(-9223372036854775808)::Int64 as x;
insert into function file('u64.orc') select materialize(9223372036854775808)::UInt64 as x;
-- { echoOn }
select x from file('i8.orc') where indexHint(x = -128);
select x from file('i8.orc') where indexHint(x = 128);
select x from file('u8.orc') where indexHint(x = -128);
select x from file('u8.orc') where indexHint(x = 128);
select x from file('i16.orc') where indexHint(x = -32768);
select x from file('i16.orc') where indexHint(x = 32768);
select x from file('u16.orc') where indexHint(x = -32768);
select x from file('u16.orc') where indexHint(x = 32768);
select x from file('i32.orc') where indexHint(x = -2147483648);
select x from file('i32.orc') where indexHint(x = 2147483648);
select x from file('u32.orc') where indexHint(x = -2147483648);
select x from file('u32.orc') where indexHint(x = 2147483648);
select x from file('i64.orc') where indexHint(x = -9223372036854775808);
select x from file('i64.orc') where indexHint(x = 9223372036854775808);
select x from file('u64.orc') where indexHint(x = -9223372036854775808);
select x from file('u64.orc') where indexHint(x = 9223372036854775808);
select x from file('u8.orc', ORC, 'x UInt8') where indexHint(x > 10);
select x from file('u8.orc', ORC, 'x UInt64') where indexHint(x > 10);
select x from file('u16.orc', ORC, 'x UInt16') where indexHint(x > 10);
select x from file('u16.orc', ORC, 'x UInt64') where indexHint(x > 10);
select x from file('u32.orc', ORC, 'x UInt32') where indexHint(x > 10);
select x from file('u32.orc', ORC, 'x UInt64') where indexHint(x > 10);
select x from file('u64.orc', ORC, 'x UInt64') where indexHint(x > 10);

View File

@ -0,0 +1,39 @@
QUERY id: 0
PROJECTION COLUMNS
id UInt64
PROJECTION
LIST id: 1, nodes: 1
COLUMN id: 2, column_name: id, result_type: UInt64, source_id: 3
JOIN TREE
TABLE id: 3, alias: __table1, table_name: default.test, final: 1
WHERE
FUNCTION id: 4, function_name: in, function_type: ordinary, result_type: UInt8
ARGUMENTS
LIST id: 5, nodes: 2
COLUMN id: 2, column_name: id, result_type: UInt64, source_id: 3
QUERY id: 6, is_subquery: 1, is_distinct: 1
PROJECTION COLUMNS
id UInt64
PROJECTION
LIST id: 7, nodes: 1
COLUMN id: 8, column_name: id, result_type: UInt64, source_id: 9
JOIN TREE
TABLE id: 9, alias: __table1, table_name: default.test, final: 1
ORDER BY
LIST id: 10, nodes: 1
SORT id: 11, sort_direction: ASCENDING, with_fill: 0
EXPRESSION
COLUMN id: 8, column_name: id, result_type: UInt64, source_id: 9
LIMIT
CONSTANT id: 12, constant_value: UInt64_4, constant_value_type: UInt64
ORDER BY
LIST id: 13, nodes: 1
SORT id: 14, sort_direction: ASCENDING, with_fill: 0
EXPRESSION
COLUMN id: 2, column_name: id, result_type: UInt64, source_id: 3
LIMIT BY LIMIT
CONSTANT id: 15, constant_value: UInt64_1, constant_value_type: UInt64
LIMIT BY
LIST id: 16, nodes: 1
COLUMN id: 2, column_name: id, result_type: UInt64, source_id: 3
SETTINGS allow_experimental_analyzer=1

View File

@ -0,0 +1,16 @@
CREATE TABLE test
ENGINE = ReplacingMergeTree
PRIMARY KEY id
AS SELECT number AS id FROM numbers(100);
EXPLAIN QUERY TREE SELECT id
FROM test FINAL
WHERE id IN (
SELECT DISTINCT id
FROM test FINAL
ORDER BY id ASC
LIMIT 4
)
ORDER BY id ASC
LIMIT 1 BY id
SETTINGS allow_experimental_analyzer = 1;

View File

@ -0,0 +1,83 @@
-------- Bloom filter --------
-- No skip for prefix
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for prefix with complete token
Parts: 1/1
Parts: 0/1
-- No skip for suffix
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for suffix with complete token
Parts: 1/1
Parts: 0/1
-- No skip for substring
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for substring with complete token
Parts: 1/1
Parts: 0/1
-- No skip for multiple substrings
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for multiple substrings with complete tokens
Parts: 1/1
Parts: 0/1
-- No skip for multiple non-existsing substrings, only one with complete token
Parts: 1/1
Parts: 1/1
-------- GIN filter --------
-- No skip for prefix
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for prefix with complete token
Parts: 1/1
Parts: 0/1
-- No skip for suffix
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for suffix with complete token
Parts: 1/1
Parts: 0/1
-- No skip for substring
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for substring with complete token
Parts: 1/1
Parts: 0/1
-- No skip for multiple substrings
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for multiple substrings with complete tokens
Parts: 1/1
Parts: 0/1
-- No skip for multiple non-existsing substrings, only one with complete token
Parts: 1/1
Parts: 1/1

View File

@ -0,0 +1,229 @@
SELECT '-------- Bloom filter --------';
SELECT '';
DROP TABLE IF EXISTS 03165_token_bf;
SET allow_experimental_full_text_index=1;
CREATE TABLE 03165_token_bf
(
id Int64,
message String,
INDEX idx_message message TYPE tokenbf_v1(32768, 3, 2) GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY id;
INSERT INTO 03165_token_bf VALUES(1, 'Service is not ready');
SELECT '-- No skip for prefix';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv');
SELECT '';
SELECT '-- Skip for prefix with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv i')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv i');
SELECT '';
SELECT '-- No skip for suffix';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE endsWith(message, 'eady')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE endsWith(message, 'eady');
SELECT '';
SELECT '-- Skip for suffix with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE endsWith(message, ' eady')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE endsWith(message, ' eady');
SELECT '';
SELECT '-- No skip for substring';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE match(message, 'no')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE match(message, 'no');
SELECT '';
SELECT '-- Skip for substring with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE match(message, ' xyz ')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE match(message, ' xyz ');
SELECT '';
SELECT '-- No skip for multiple substrings';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, ['ce', 'no'])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, ['ce', 'no']);
SELECT '';
SELECT '-- Skip for multiple substrings with complete tokens';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', ' yz '])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', ' yz ']);
SELECT '';
SELECT '-- No skip for multiple non-existsing substrings, only one with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', 'yz'])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', 'yz']);
DROP TABLE IF EXISTS 03165_token_bf;
SELECT '';
SELECT '-------- GIN filter --------';
SELECT '';
SET allow_experimental_inverted_index=1;
DROP TABLE IF EXISTS 03165_token_ft;
CREATE TABLE 03165_token_ft
(
id Int64,
message String,
INDEX idx_message message TYPE full_text() GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY id;
INSERT INTO 03165_token_ft VALUES(1, 'Service is not ready');
SELECT '-- No skip for prefix';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv');
SELECT '';
SELECT '-- Skip for prefix with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv i')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv i');
SELECT '';
SELECT '-- No skip for suffix';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE endsWith(message, 'eady')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE endsWith(message, 'eady');
SELECT '';
SELECT '-- Skip for suffix with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE endsWith(message, ' eady')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE endsWith(message, ' eady');
SELECT '';
SELECT '-- No skip for substring';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE match(message, 'no')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE match(message, 'no');
SELECT '';
SELECT '-- Skip for substring with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE match(message, ' xyz ')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE match(message, ' xyz ');
SELECT '';
SELECT '-- No skip for multiple substrings';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, ['ce', 'no'])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, ['ce', 'no']);
SELECT '';
SELECT '-- Skip for multiple substrings with complete tokens';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, [' wx ', ' yz '])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, [' wx ', ' yz ']);
SELECT '';
SELECT '-- No skip for multiple non-existsing substrings, only one with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, [' wx ', 'yz'])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, [' wx ', 'yz']);

View File

@ -0,0 +1 @@
()

View File

@ -0,0 +1 @@
SELECT ()||();

View File

@ -0,0 +1 @@
test

View File

@ -0,0 +1 @@
SELECT test AS column

View File

@ -8,7 +8,7 @@ sys.path.insert(0, os.path.join(CURDIR))
import uexpect import uexpect
prompt = ":\) " prompt = ":\\) "
end_of_block = r".*\r\n.*\r\n" end_of_block = r".*\r\n.*\r\n"
@ -21,7 +21,7 @@ class client(object):
self.client.eol("\r") self.client.eol("\r")
self.client.logger(log, prefix=name) self.client.logger(log, prefix=name)
self.client.timeout(120) self.client.timeout(120)
self.client.expect("[#\$] ", timeout=60) self.client.expect("[#\\$] ", timeout=60)
self.client.send(command) self.client.send(command)
def __enter__(self): def __enter__(self):

View File

@ -10,7 +10,7 @@ import uexpect
class shell(object): class shell(object):
def __init__(self, command=None, name="", log=None, prompt="[#\$] "): def __init__(self, command=None, name="", log=None, prompt="[#\\$] "):
if command is None: if command is None:
command = ["/bin/bash", "--noediting"] command = ["/bin/bash", "--noediting"]
self.prompt = prompt self.prompt = prompt

Some files were not shown because too many files have changed in this diff Show More