mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Merge branch 'master' into variant_inference
This commit is contained in:
commit
23e89074c2
7
.github/workflows/backport_branches.yml
vendored
7
.github/workflows/backport_branches.yml
vendored
@ -260,13 +260,18 @@ jobs:
|
||||
- name: Finish label
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# update mergeable check
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 merge_pr.py --set-ci-status
|
||||
# update overall ci report
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 merge_pr.py
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
|
1
.github/workflows/jepsen.yml
vendored
1
.github/workflows/jepsen.yml
vendored
@ -64,6 +64,7 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
|
7
.github/workflows/merge_queue.yml
vendored
7
.github/workflows/merge_queue.yml
vendored
@ -103,9 +103,14 @@ jobs:
|
||||
- name: Check and set merge status
|
||||
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 merge_pr.py --set-ci-status
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
|
1
.github/workflows/nightly.yml
vendored
1
.github/workflows/nightly.yml
vendored
@ -52,6 +52,7 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
|
6
.github/workflows/pull_request.yml
vendored
6
.github/workflows/pull_request.yml
vendored
@ -170,7 +170,11 @@ jobs:
|
||||
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
python3 merge_pr.py --set-ci-status
|
||||
- name: Check Workflow results
|
||||
uses: ./.github/actions/check_workflow
|
||||
with:
|
||||
|
4
.github/workflows/release_branches.yml
vendored
4
.github/workflows/release_branches.yml
vendored
@ -481,12 +481,10 @@ jobs:
|
||||
- name: Finish label
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# update mergeable check
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
# update overall ci report
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
|
@ -218,6 +218,6 @@ function stop_logs_replication
|
||||
clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | {
|
||||
tee /dev/stderr
|
||||
} | {
|
||||
xargs -n1 -r -i clickhouse-client --query "drop table {}"
|
||||
timeout --preserve-status --signal TERM --kill-after 5m 15m xargs -n1 -r -i clickhouse-client --query "drop table {}"
|
||||
}
|
||||
}
|
||||
|
@ -28,9 +28,9 @@
|
||||
</table_function_remote_max_addresses>
|
||||
|
||||
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
|
||||
<allow_experimental_analyzer>
|
||||
<enable_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</enable_analyzer>
|
||||
|
||||
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
|
||||
<allow_experimental_object_type>
|
||||
|
@ -139,9 +139,9 @@ EOL
|
||||
</table_function_remote_max_addresses>
|
||||
|
||||
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
|
||||
<allow_experimental_analyzer>
|
||||
<enable_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</enable_analyzer>
|
||||
|
||||
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
|
||||
<allow_experimental_object_type>
|
||||
|
67
docs/changelogs/v24.6.3.95-stable.md
Normal file
67
docs/changelogs/v24.6.3.95-stable.md
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.3.95-stable (8325c920d11) FIXME as compared to v24.6.2.17-stable (5710a8b5c0c)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#66770](https://github.com/ClickHouse/ClickHouse/issues/66770): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
* Backported in [#66885](https://github.com/ClickHouse/ClickHouse/issues/66885): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66693](https://github.com/ClickHouse/ClickHouse/issues/66693): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#67816](https://github.com/ClickHouse/ClickHouse/issues/67816): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67503](https://github.com/ClickHouse/ClickHouse/issues/67503): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#67852](https://github.com/ClickHouse/ClickHouse/issues/67852): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#67838](https://github.com/ClickHouse/ClickHouse/issues/67838): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#66303](https://github.com/ClickHouse/ClickHouse/issues/66303): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66330](https://github.com/ClickHouse/ClickHouse/issues/66330): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#66157](https://github.com/ClickHouse/ClickHouse/issues/66157): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#66210](https://github.com/ClickHouse/ClickHouse/issues/66210): Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66456](https://github.com/ClickHouse/ClickHouse/issues/66456): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66228](https://github.com/ClickHouse/ClickHouse/issues/66228): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66183](https://github.com/ClickHouse/ClickHouse/issues/66183): Fix rare case with missing data in the result of distributed query, close [#61432](https://github.com/ClickHouse/ClickHouse/issues/61432). [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66271](https://github.com/ClickHouse/ClickHouse/issues/66271): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66682](https://github.com/ClickHouse/ClickHouse/issues/66682): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66587](https://github.com/ClickHouse/ClickHouse/issues/66587): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||
* Backported in [#66362](https://github.com/ClickHouse/ClickHouse/issues/66362): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66613](https://github.com/ClickHouse/ClickHouse/issues/66613): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66577](https://github.com/ClickHouse/ClickHouse/issues/66577): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66721](https://github.com/ClickHouse/ClickHouse/issues/66721): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66670](https://github.com/ClickHouse/ClickHouse/issues/66670): Fix reading of uninitialized memory when hashing empty tuples. This closes [#66559](https://github.com/ClickHouse/ClickHouse/issues/66559). [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#66952](https://github.com/ClickHouse/ClickHouse/issues/66952): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66956](https://github.com/ClickHouse/ClickHouse/issues/66956): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66716](https://github.com/ClickHouse/ClickHouse/issues/66716): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66759](https://github.com/ClickHouse/ClickHouse/issues/66759): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66751](https://github.com/ClickHouse/ClickHouse/issues/66751): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67635](https://github.com/ClickHouse/ClickHouse/issues/67635): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#67482](https://github.com/ClickHouse/ClickHouse/issues/67482): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#67199](https://github.com/ClickHouse/ClickHouse/issues/67199): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#67381](https://github.com/ClickHouse/ClickHouse/issues/67381): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67244](https://github.com/ClickHouse/ClickHouse/issues/67244): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#67578](https://github.com/ClickHouse/ClickHouse/issues/67578): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67808](https://github.com/ClickHouse/ClickHouse/issues/67808): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Backport [#66599](https://github.com/ClickHouse/ClickHouse/issues/66599) to 24.6: Fix dropping named collection in local storage"'. [#66922](https://github.com/ClickHouse/ClickHouse/pull/66922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#66332](https://github.com/ClickHouse/ClickHouse/issues/66332): Do not raise a NOT_IMPLEMENTED error when getting s3 metrics with a multiple disk configuration. [#65403](https://github.com/ClickHouse/ClickHouse/pull/65403) ([Elena Torró](https://github.com/elenatorro)).
|
||||
* Backported in [#66142](https://github.com/ClickHouse/ClickHouse/issues/66142): Fix flaky test_storage_s3_queue tests. [#66009](https://github.com/ClickHouse/ClickHouse/pull/66009) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#66389](https://github.com/ClickHouse/ClickHouse/issues/66389): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66428](https://github.com/ClickHouse/ClickHouse/issues/66428): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66546](https://github.com/ClickHouse/ClickHouse/issues/66546): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66861](https://github.com/ClickHouse/ClickHouse/issues/66861): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Backported in [#66877](https://github.com/ClickHouse/ClickHouse/issues/66877): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67061](https://github.com/ClickHouse/ClickHouse/issues/67061): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#66940](https://github.com/ClickHouse/ClickHouse/issues/66940): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67254](https://github.com/ClickHouse/ClickHouse/issues/67254): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67414](https://github.com/ClickHouse/ClickHouse/issues/67414): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||
|
@ -118,7 +118,7 @@ And the result of interpreting the `INSERT SELECT` query is a "completed" `Query
|
||||
|
||||
`InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are performed. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted into separate classes to allow for modular transformations of the query.
|
||||
|
||||
To address current problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` is being developed. It is a new version of `InterpreterSelectQuery` that does not use `ExpressionAnalyzer` and introduces an additional abstraction level between `AST` and `QueryPipeline` called `QueryTree`. It is not production-ready yet, but it can be tested with the `allow_experimental_analyzer` flag.
|
||||
To address problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` has been developed. This is a new version of the `InterpreterSelectQuery`, which does not use the `ExpressionAnalyzer` and introduces an additional layer of abstraction between `AST` and `QueryPipeline`, called `QueryTree'. It is fully ready for use in production, but just in case it can be turned off by setting the value of the `enable_analyzer` setting to `false`.
|
||||
|
||||
## Functions {#functions}
|
||||
|
||||
|
@ -123,7 +123,7 @@ To ensure consistent and expected results, especially when migrating old queries
|
||||
In the new version of the analyzer, the rules for determining the common supertype for columns specified in the `USING` clause have been standardized to produce more predictable outcomes, especially when dealing with type modifiers like `LowCardinality` and `Nullable`.
|
||||
|
||||
- `LowCardinality(T)` and `T`: When a column of type `LowCardinality(T)` is joined with a column of type `T`, the resulting common supertype will be `T`, effectively discarding the `LowCardinality` modifier.
|
||||
|
||||
|
||||
- `Nullable(T)` and `T`: When a column of type `Nullable(T)` is joined with a column of type `T`, the resulting common supertype will be `Nullable(T)`, ensuring that the nullable property is preserved.
|
||||
|
||||
**Example:**
|
||||
@ -144,7 +144,7 @@ During projection names computation, aliases are not substituted.
|
||||
SELECT
|
||||
1 + 1 AS x,
|
||||
x + 1
|
||||
SETTINGS allow_experimental_analyzer = 0
|
||||
SETTINGS enable_analyzer = 0
|
||||
FORMAT PrettyCompact
|
||||
|
||||
┌─x─┬─plus(plus(1, 1), 1)─┐
|
||||
@ -154,7 +154,7 @@ FORMAT PrettyCompact
|
||||
SELECT
|
||||
1 + 1 AS x,
|
||||
x + 1
|
||||
SETTINGS allow_experimental_analyzer = 1
|
||||
SETTINGS enable_analyzer = 1
|
||||
FORMAT PrettyCompact
|
||||
|
||||
┌─x─┬─plus(x, 1)─┐
|
||||
@ -177,7 +177,7 @@ SELECT toTypeName(if(0, [2, 3, 4], 'String'))
|
||||
|
||||
### Heterogeneous clusters
|
||||
|
||||
The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `allow_experimental_analyzer` setting values.
|
||||
The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `enable_analyzer` setting values.
|
||||
|
||||
### Mutations are interpreted by previous analyzer
|
||||
|
||||
|
@ -4051,7 +4051,7 @@ Rewrite aggregate functions with if expression as argument when logically equiva
|
||||
For example, `avg(if(cond, col, null))` can be rewritten to `avgOrNullIf(cond, col)`. It may improve performance.
|
||||
|
||||
:::note
|
||||
Supported only with experimental analyzer (`allow_experimental_analyzer = 1`).
|
||||
Supported only with experimental analyzer (`enable_analyzer = 1`).
|
||||
:::
|
||||
|
||||
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
|
||||
|
@ -115,7 +115,7 @@ ClickHouse — полноценная столбцовая СУБД. Данны
|
||||
|
||||
`InterpreterSelectQuery` использует `ExpressionAnalyzer` и `ExpressionActions` механизмы для анализа запросов и преобразований. Именно здесь выполняется большинство оптимизаций запросов на основе правил. `ExpressionAnalyzer` написан довольно грязно и должен быть переписан: различные преобразования запросов и оптимизации должны быть извлечены в отдельные классы, чтобы позволить модульные преобразования или запросы.
|
||||
|
||||
Для решения текущих проблем, существующих в интерпретаторах, разрабатывается новый `InterpreterSelectQueryAnalyzer`. Это новая версия `InterpreterSelectQuery`, которая не использует `ExpressionAnalyzer` и вводит дополнительный уровень абстракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он еще не готов к использованию в продакшене, но его можно протестировать с помощью флага `allow_experimental_analyzer`.
|
||||
Для решения проблем, существующих в интерпретаторах, был разработан новый `InterpreterSelectQueryAnalyzer`. Это новая версия `InterpreterSelectQuery`, которая не использует `ExpressionAnalyzer` и вводит дополнительный уровень абстракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он полностью готов к использованию в продакшене, но на всякий случай его можно выключить, установив значение настройки `enable_analyzer` в `false`.
|
||||
|
||||
## Функции {#functions}
|
||||
|
||||
|
@ -33,12 +33,12 @@ ConnectionEstablisher::ConnectionEstablisher(
|
||||
{
|
||||
}
|
||||
|
||||
void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::string & fail_message)
|
||||
void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::string & fail_message, bool force_connected)
|
||||
{
|
||||
try
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::DistributedConnectionTries);
|
||||
result.entry = pool->get(*timeouts, settings, /* force_connected = */ false);
|
||||
result.entry = pool->get(*timeouts, settings, force_connected);
|
||||
AsyncCallbackSetter async_setter(&*result.entry, std::move(async_callback));
|
||||
|
||||
UInt64 server_revision = 0;
|
||||
|
@ -24,7 +24,13 @@ public:
|
||||
const QualifiedTableName * table_to_check = nullptr);
|
||||
|
||||
/// Establish connection and save it in result, write possible exception message in fail_message.
|
||||
void run(TryResult & result, std::string & fail_message);
|
||||
/// The connection is returned from connection pool and it can be stale. Use force_connected flag to ensure that connection is working one.
|
||||
/// NOTE: force_connected is false by default due to the following consideration ...
|
||||
/// When true, it implies sending a Ping packet to another peer and, if it fails - reestablishing the connection.
|
||||
/// Ping-Pong round trip can be unnecessary in case of connection is still alive.
|
||||
/// So, the optimistic approach is used by default. In this case, stale connections can be handled by retrying,
|
||||
/// - see ConnectionPoolWithFailover, as example
|
||||
void run(TryResult & result, std::string & fail_message, bool force_connected = false);
|
||||
|
||||
/// Set async callback that will be called when reading from socket blocks.
|
||||
void setAsyncCallback(AsyncCallback async_callback_) { async_callback = std::move(async_callback_); }
|
||||
|
@ -943,8 +943,7 @@ class IColumn;
|
||||
\
|
||||
M(Bool, allow_experimental_join_condition, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y.", 0) \
|
||||
\
|
||||
/* Analyzer: It's not experimental anymore (WIP) */ \
|
||||
M(Bool, allow_experimental_analyzer, true, "Allow new query analyzer.", IMPORTANT) \
|
||||
M(Bool, allow_experimental_analyzer, true, "Allow new query analyzer.", IMPORTANT) ALIAS(enable_analyzer) \
|
||||
M(Bool, analyzer_compatibility_join_using_top_level_identifier, false, "Force to resolve identifier in JOIN USING from projection (for example, in `SELECT a + 1 AS b FROM t1 JOIN t2 USING (b)` join will be performed by `t1.a + 1 = t2.b`, rather then `t1.b = t2.b`).", 0) \
|
||||
\
|
||||
M(Bool, allow_experimental_live_view, false, "Enable LIVE VIEW. Not mature enough.", 0) \
|
||||
|
@ -80,6 +80,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"allow_archive_path_syntax", true, true, "Added new setting to allow disabling archive path syntax."},
|
||||
{"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"},
|
||||
{"input_format_json_ignore_key_case", false, false, "Ignore json key case while read json field from string."},
|
||||
{"enable_analyzer", 1, 1, "Added an alias to a setting `allow_experimental_analyzer`."},
|
||||
}
|
||||
},
|
||||
{"24.7",
|
||||
|
@ -690,6 +690,12 @@ void validateAnalyzerSettings(ASTPtr ast, bool context_value)
|
||||
if (top_level != value->safeGet<bool>())
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Setting 'allow_experimental_analyzer' is changed in the subquery. Top level value: {}", top_level);
|
||||
}
|
||||
|
||||
if (auto * value = set_query->changes.tryGet("enable_analyzer"))
|
||||
{
|
||||
if (top_level != value->safeGet<bool>())
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Setting 'enable_analyzer' is changed in the subquery. Top level value: {}", top_level);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto child : node->children)
|
||||
|
@ -442,6 +442,15 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes
|
||||
|
||||
const auto & params = aggregating->getParams();
|
||||
const auto & keys = params.keys;
|
||||
/** The filter is applied either to aggregation keys or aggregation result
|
||||
* (columns under aggregation is not available in outer scope, so we can't have a filter for them).
|
||||
* The filter for the aggregation result is not pushed down, so the only valid case is filtering aggregation keys.
|
||||
* In case keys are empty, do not push down the filter.
|
||||
* Also with empty keys we can have an issue with `empty_result_for_aggregation_by_empty_set`,
|
||||
* since we can gen a result row when everything is filtered.
|
||||
*/
|
||||
if (keys.empty())
|
||||
return 0;
|
||||
|
||||
const bool filter_column_is_not_among_aggregation_keys
|
||||
= std::find(keys.begin(), keys.end(), filter->getFilterColumnName()) == keys.end();
|
||||
|
@ -89,12 +89,12 @@ RemoteQueryExecutor::RemoteQueryExecutor(
|
||||
auto table_name = main_table.getQualifiedName();
|
||||
|
||||
ConnectionEstablisher connection_establisher(pool, &timeouts, current_settings, log, &table_name);
|
||||
connection_establisher.run(result, fail_message);
|
||||
connection_establisher.run(result, fail_message, /*force_connected=*/ true);
|
||||
}
|
||||
else
|
||||
{
|
||||
ConnectionEstablisher connection_establisher(pool, &timeouts, current_settings, log, nullptr);
|
||||
connection_establisher.run(result, fail_message);
|
||||
connection_establisher.run(result, fail_message, /*force_connected=*/ true);
|
||||
}
|
||||
|
||||
std::vector<IConnectionPool::Entry> connection_entries;
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Storages/MergeTree/IMergeTreeDataPartWriter.h>
|
||||
#include <Common/MemoryTrackerBlockerInThread.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -71,9 +72,21 @@ IMergeTreeDataPartWriter::IMergeTreeDataPartWriter(
|
||||
|
||||
Columns IMergeTreeDataPartWriter::releaseIndexColumns()
|
||||
{
|
||||
return Columns(
|
||||
std::make_move_iterator(index_columns.begin()),
|
||||
std::make_move_iterator(index_columns.end()));
|
||||
/// The memory for index was allocated without thread memory tracker.
|
||||
/// We need to deallocate it in shrinkToFit without memory tracker as well.
|
||||
MemoryTrackerBlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
Columns result;
|
||||
result.reserve(index_columns.size());
|
||||
|
||||
for (auto & column : index_columns)
|
||||
{
|
||||
column->shrinkToFit();
|
||||
result.push_back(std::move(column));
|
||||
}
|
||||
|
||||
index_columns.clear();
|
||||
return result;
|
||||
}
|
||||
|
||||
SerializationPtr IMergeTreeDataPartWriter::getSerialization(const String & column_name) const
|
||||
|
@ -255,6 +255,12 @@ void MergeTreeDataPartWriterOnDisk::initPrimaryIndex()
|
||||
index_compressor_stream = std::make_unique<CompressedWriteBuffer>(*index_file_hashing_stream, primary_key_compression_codec, settings.primary_key_compress_block_size);
|
||||
index_source_hashing_stream = std::make_unique<HashingWriteBuffer>(*index_compressor_stream);
|
||||
}
|
||||
|
||||
const auto & primary_key_types = metadata_snapshot->getPrimaryKey().data_types;
|
||||
index_serializations.reserve(primary_key_types.size());
|
||||
|
||||
for (const auto & type : primary_key_types)
|
||||
index_serializations.push_back(type->getDefaultSerialization());
|
||||
}
|
||||
}
|
||||
|
||||
@ -300,22 +306,30 @@ void MergeTreeDataPartWriterOnDisk::initSkipIndices()
|
||||
store = std::make_shared<GinIndexStore>(stream_name, data_part_storage, data_part_storage, storage_settings->max_digestion_size_per_segment);
|
||||
gin_index_stores[stream_name] = store;
|
||||
}
|
||||
|
||||
skip_indices_aggregators.push_back(skip_index->createIndexAggregatorForPart(store, settings));
|
||||
skip_index_accumulated_marks.push_back(0);
|
||||
}
|
||||
}
|
||||
|
||||
void MergeTreeDataPartWriterOnDisk::calculateAndSerializePrimaryIndexRow(const Block & index_block, size_t row)
|
||||
{
|
||||
chassert(index_block.columns() == index_serializations.size());
|
||||
auto & index_stream = compress_primary_key ? *index_source_hashing_stream : *index_file_hashing_stream;
|
||||
|
||||
for (size_t i = 0; i < index_block.columns(); ++i)
|
||||
{
|
||||
const auto & column = index_block.getByPosition(i).column;
|
||||
|
||||
index_columns[i]->insertFrom(*column, row);
|
||||
index_serializations[i]->serializeBinary(*column, row, index_stream, {});
|
||||
}
|
||||
}
|
||||
|
||||
void MergeTreeDataPartWriterOnDisk::calculateAndSerializePrimaryIndex(const Block & primary_index_block, const Granules & granules_to_write)
|
||||
{
|
||||
size_t primary_columns_num = primary_index_block.columns();
|
||||
if (index_columns.empty())
|
||||
{
|
||||
index_types = primary_index_block.getDataTypes();
|
||||
index_columns.resize(primary_columns_num);
|
||||
last_block_index_columns.resize(primary_columns_num);
|
||||
for (size_t i = 0; i < primary_columns_num; ++i)
|
||||
index_columns[i] = primary_index_block.getByPosition(i).column->cloneEmpty();
|
||||
}
|
||||
if (!metadata_snapshot->hasPrimaryKey())
|
||||
return;
|
||||
|
||||
{
|
||||
/** While filling index (index_columns), disable memory tracker.
|
||||
@ -326,25 +340,20 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializePrimaryIndex(const Bloc
|
||||
*/
|
||||
MemoryTrackerBlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
if (index_columns.empty())
|
||||
index_columns = primary_index_block.cloneEmptyColumns();
|
||||
|
||||
/// Write index. The index contains Primary Key value for each `index_granularity` row.
|
||||
for (const auto & granule : granules_to_write)
|
||||
{
|
||||
if (metadata_snapshot->hasPrimaryKey() && granule.mark_on_start)
|
||||
{
|
||||
for (size_t j = 0; j < primary_columns_num; ++j)
|
||||
{
|
||||
const auto & primary_column = primary_index_block.getByPosition(j);
|
||||
index_columns[j]->insertFrom(*primary_column.column, granule.start_row);
|
||||
primary_column.type->getDefaultSerialization()->serializeBinary(
|
||||
*primary_column.column, granule.start_row, compress_primary_key ? *index_source_hashing_stream : *index_file_hashing_stream, {});
|
||||
}
|
||||
}
|
||||
if (granule.mark_on_start)
|
||||
calculateAndSerializePrimaryIndexRow(primary_index_block, granule.start_row);
|
||||
}
|
||||
}
|
||||
|
||||
/// store last index row to write final mark at the end of column
|
||||
for (size_t j = 0; j < primary_columns_num; ++j)
|
||||
last_block_index_columns[j] = primary_index_block.getByPosition(j).column;
|
||||
/// Store block with last index row to write final mark at the end of column
|
||||
if (with_final_mark)
|
||||
last_index_block = primary_index_block;
|
||||
}
|
||||
|
||||
void MergeTreeDataPartWriterOnDisk::calculateAndSerializeStatistics(const Block & block)
|
||||
@ -421,19 +430,14 @@ void MergeTreeDataPartWriterOnDisk::fillPrimaryIndexChecksums(MergeTreeData::Dat
|
||||
|
||||
if (index_file_hashing_stream)
|
||||
{
|
||||
if (write_final_mark)
|
||||
if (write_final_mark && last_index_block)
|
||||
{
|
||||
for (size_t j = 0; j < index_columns.size(); ++j)
|
||||
{
|
||||
const auto & column = *last_block_index_columns[j];
|
||||
size_t last_row_number = column.size() - 1;
|
||||
index_columns[j]->insertFrom(column, last_row_number);
|
||||
index_types[j]->getDefaultSerialization()->serializeBinary(
|
||||
column, last_row_number, compress_primary_key ? *index_source_hashing_stream : *index_file_hashing_stream, {});
|
||||
}
|
||||
last_block_index_columns.clear();
|
||||
MemoryTrackerBlockerInThread temporarily_disable_memory_tracker;
|
||||
calculateAndSerializePrimaryIndexRow(last_index_block, last_index_block.rows() - 1);
|
||||
}
|
||||
|
||||
last_index_block.clear();
|
||||
|
||||
if (compress_primary_key)
|
||||
{
|
||||
index_source_hashing_stream->finalize();
|
||||
|
@ -173,10 +173,10 @@ protected:
|
||||
std::unique_ptr<HashingWriteBuffer> index_source_hashing_stream;
|
||||
bool compress_primary_key;
|
||||
|
||||
DataTypes index_types;
|
||||
/// Index columns from the last block
|
||||
/// It's written to index file in the `writeSuffixAndFinalizePart` method
|
||||
Columns last_block_index_columns;
|
||||
/// Last block with index columns.
|
||||
/// It's written to index file in the `writeSuffixAndFinalizePart` method.
|
||||
Block last_index_block;
|
||||
Serializations index_serializations;
|
||||
|
||||
bool data_written = false;
|
||||
|
||||
@ -193,6 +193,7 @@ private:
|
||||
void initStatistics();
|
||||
|
||||
virtual void fillIndexGranularity(size_t index_granularity_for_block, size_t rows_in_block) = 0;
|
||||
void calculateAndSerializePrimaryIndexRow(const Block & index_block, size_t row);
|
||||
|
||||
struct ExecutionStatistics
|
||||
{
|
||||
|
@ -371,67 +371,78 @@ bool MergeTreeIndexConditionBloomFilter::extractAtomFromTree(const RPNBuilderTre
|
||||
|
||||
bool MergeTreeIndexConditionBloomFilter::traverseFunction(const RPNBuilderTreeNode & node, RPNElement & out, const RPNBuilderTreeNode * parent)
|
||||
{
|
||||
bool maybe_useful = false;
|
||||
if (!node.isFunction())
|
||||
return false;
|
||||
|
||||
if (node.isFunction())
|
||||
const auto function = node.toFunctionNode();
|
||||
auto arguments_size = function.getArgumentsSize();
|
||||
auto function_name = function.getFunctionName();
|
||||
|
||||
if (parent == nullptr)
|
||||
{
|
||||
const auto function = node.toFunctionNode();
|
||||
auto arguments_size = function.getArgumentsSize();
|
||||
auto function_name = function.getFunctionName();
|
||||
|
||||
/// Recurse a little bit for indexOf().
|
||||
for (size_t i = 0; i < arguments_size; ++i)
|
||||
{
|
||||
auto argument = function.getArgumentAt(i);
|
||||
if (traverseFunction(argument, out, &node))
|
||||
maybe_useful = true;
|
||||
}
|
||||
|
||||
if (arguments_size != 2)
|
||||
return false;
|
||||
|
||||
auto lhs_argument = function.getArgumentAt(0);
|
||||
auto rhs_argument = function.getArgumentAt(1);
|
||||
|
||||
if (functionIsInOrGlobalInOperator(function_name))
|
||||
{
|
||||
if (auto future_set = rhs_argument.tryGetPreparedSet(); future_set)
|
||||
{
|
||||
if (auto prepared_set = future_set->buildOrderedSetInplace(rhs_argument.getTreeContext().getQueryContext()); prepared_set)
|
||||
{
|
||||
if (prepared_set->hasExplicitSetElements())
|
||||
{
|
||||
const auto prepared_info = getPreparedSetInfo(prepared_set);
|
||||
if (traverseTreeIn(function_name, lhs_argument, prepared_set, prepared_info.type, prepared_info.column, out))
|
||||
maybe_useful = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (function_name == "equals" ||
|
||||
function_name == "notEquals" ||
|
||||
function_name == "has" ||
|
||||
function_name == "mapContains" ||
|
||||
function_name == "indexOf" ||
|
||||
function_name == "hasAny" ||
|
||||
function_name == "hasAll")
|
||||
{
|
||||
Field const_value;
|
||||
DataTypePtr const_type;
|
||||
|
||||
if (rhs_argument.tryGetConstant(const_value, const_type))
|
||||
{
|
||||
if (traverseTreeEquals(function_name, lhs_argument, const_type, const_value, out, parent))
|
||||
maybe_useful = true;
|
||||
}
|
||||
else if (lhs_argument.tryGetConstant(const_value, const_type))
|
||||
{
|
||||
if (traverseTreeEquals(function_name, rhs_argument, const_type, const_value, out, parent))
|
||||
maybe_useful = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return maybe_useful;
|
||||
if (arguments_size != 2)
|
||||
return false;
|
||||
|
||||
/// indexOf() should be inside comparison function, e.g. greater(indexOf(key, 42), 0).
|
||||
/// Other conditions should be at top level, e.g. equals(key, 42), not equals(equals(key, 42), 1).
|
||||
if ((function_name == "indexOf") != (parent != nullptr))
|
||||
return false;
|
||||
|
||||
auto lhs_argument = function.getArgumentAt(0);
|
||||
auto rhs_argument = function.getArgumentAt(1);
|
||||
|
||||
if (functionIsInOrGlobalInOperator(function_name))
|
||||
{
|
||||
if (auto future_set = rhs_argument.tryGetPreparedSet(); future_set)
|
||||
{
|
||||
if (auto prepared_set = future_set->buildOrderedSetInplace(rhs_argument.getTreeContext().getQueryContext()); prepared_set)
|
||||
{
|
||||
if (prepared_set->hasExplicitSetElements())
|
||||
{
|
||||
const auto prepared_info = getPreparedSetInfo(prepared_set);
|
||||
if (traverseTreeIn(function_name, lhs_argument, prepared_set, prepared_info.type, prepared_info.column, out))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (function_name == "equals" ||
|
||||
function_name == "notEquals" ||
|
||||
function_name == "has" ||
|
||||
function_name == "mapContains" ||
|
||||
function_name == "indexOf" ||
|
||||
function_name == "hasAny" ||
|
||||
function_name == "hasAll")
|
||||
{
|
||||
Field const_value;
|
||||
DataTypePtr const_type;
|
||||
|
||||
if (rhs_argument.tryGetConstant(const_value, const_type))
|
||||
{
|
||||
if (traverseTreeEquals(function_name, lhs_argument, const_type, const_value, out, parent))
|
||||
return true;
|
||||
}
|
||||
else if (lhs_argument.tryGetConstant(const_value, const_type) && (function_name == "equals" || function_name == "notEquals"))
|
||||
{
|
||||
if (traverseTreeEquals(function_name, rhs_argument, const_type, const_value, out, parent))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MergeTreeIndexConditionBloomFilter::traverseTreeIn(
|
||||
|
@ -682,6 +682,34 @@ class CI:
|
||||
assert res, f"not a build [{build_name}] or invalid JobConfig"
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def is_workflow_ok(cls) -> bool:
|
||||
# TODO: temporary method to make Mergeable check working
|
||||
res = cls.GH.get_workflow_results()
|
||||
if not res:
|
||||
print("ERROR: no workflow results found")
|
||||
return False
|
||||
for workflow_job, workflow_data in res.items():
|
||||
status = workflow_data["result"]
|
||||
if status in (
|
||||
cls.GH.ActionStatuses.SUCCESS,
|
||||
cls.GH.ActionStatuses.SKIPPED,
|
||||
):
|
||||
print(f"Workflow status for [{workflow_job}] is [{status}] - continue")
|
||||
elif status in (cls.GH.ActionStatuses.FAILURE,):
|
||||
if workflow_job in (
|
||||
WorkflowStages.TESTS_2,
|
||||
WorkflowStages.TESTS_2_WW,
|
||||
):
|
||||
print(
|
||||
f"Failed Workflow status for [{workflow_job}], it's not required - continue"
|
||||
)
|
||||
continue
|
||||
|
||||
print(f"Failed Workflow status for [{workflow_job}]")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = ArgumentParser(
|
||||
|
@ -44,9 +44,10 @@ class GH:
|
||||
FAILURE = "failure"
|
||||
PENDING = "pending"
|
||||
SUCCESS = "success"
|
||||
SKIPPED = "skipped"
|
||||
|
||||
@classmethod
|
||||
def _get_workflow_results(cls):
|
||||
def get_workflow_results(cls):
|
||||
if not Path(Envs.WORKFLOW_RESULT_FILE).exists():
|
||||
print(
|
||||
f"ERROR: Failed to get workflow results from file [{Envs.WORKFLOW_RESULT_FILE}]"
|
||||
@ -65,13 +66,13 @@ class GH:
|
||||
|
||||
@classmethod
|
||||
def print_workflow_results(cls):
|
||||
res = cls._get_workflow_results()
|
||||
res = cls.get_workflow_results()
|
||||
results = [f"{job}: {data['result']}" for job, data in res.items()]
|
||||
cls.print_in_group("Workflow results", results)
|
||||
|
||||
@classmethod
|
||||
def is_workflow_ok(cls) -> bool:
|
||||
res = cls._get_workflow_results()
|
||||
res = cls.get_workflow_results()
|
||||
for _job, data in res.items():
|
||||
if data["result"] == "failure":
|
||||
return False
|
||||
@ -79,7 +80,7 @@ class GH:
|
||||
|
||||
@classmethod
|
||||
def get_workflow_job_result(cls, wf_job_name: str) -> Optional[str]:
|
||||
res = cls._get_workflow_results()
|
||||
res = cls.get_workflow_results()
|
||||
if wf_job_name in res:
|
||||
return res[wf_job_name]["result"] # type: ignore
|
||||
else:
|
||||
|
@ -23,7 +23,7 @@ from commit_status_helper import (
|
||||
from get_robot_token import get_best_robot_token
|
||||
from github_helper import GitHub, NamedUser, PullRequest, Repository
|
||||
from pr_info import PRInfo
|
||||
from report import SUCCESS, FAILURE
|
||||
from report import SUCCESS
|
||||
from env_helper import GITHUB_UPSTREAM_REPOSITORY, GITHUB_REPOSITORY
|
||||
from synchronizer_utils import SYNC_BRANCH_PREFIX
|
||||
from ci_config import CI
|
||||
@ -248,23 +248,27 @@ def main():
|
||||
repo = gh.get_repo(args.repo)
|
||||
|
||||
if args.set_ci_status:
|
||||
CI.GH.print_workflow_results()
|
||||
# set Mergeable check status and exit
|
||||
assert args.wf_status in (FAILURE, SUCCESS)
|
||||
commit = get_commit(gh, args.pr_info.sha)
|
||||
statuses = get_commit_filtered_statuses(commit)
|
||||
|
||||
has_failed_statuses = False
|
||||
has_native_failed_status = False
|
||||
for status in statuses:
|
||||
print(f"Check status [{status.context}], [{status.state}]")
|
||||
if CI.is_required(status.context) and status.state != SUCCESS:
|
||||
print(f"WARNING: Failed status [{status.context}], [{status.state}]")
|
||||
if (
|
||||
CI.is_required(status.context)
|
||||
and status.state != SUCCESS
|
||||
and status.context != CI.StatusNames.SYNC
|
||||
):
|
||||
print(
|
||||
f"WARNING: Not success status [{status.context}], [{status.state}]"
|
||||
)
|
||||
has_failed_statuses = True
|
||||
if status.context != CI.StatusNames.SYNC:
|
||||
has_native_failed_status = True
|
||||
|
||||
if args.wf_status == SUCCESS or has_failed_statuses:
|
||||
# set Mergeable check if workflow is successful (green)
|
||||
workflow_ok = CI.is_workflow_ok()
|
||||
if workflow_ok or has_failed_statuses:
|
||||
# set Mergeable Check if workflow is successful (green)
|
||||
# or if we have GH statuses with failures (red)
|
||||
# to avoid false-green on a died runner
|
||||
state = trigger_mergeable_check(
|
||||
@ -283,7 +287,7 @@ def main():
|
||||
print(
|
||||
"Workflow failed but no failed statuses found (died runner?) - cannot set Mergeable Check status"
|
||||
)
|
||||
if args.wf_status == SUCCESS and not has_native_failed_status:
|
||||
if workflow_ok and not has_failed_statuses:
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
@ -4484,7 +4484,7 @@ class ClickHouseInstance:
|
||||
|
||||
use_old_analyzer = os.environ.get("CLICKHOUSE_USE_OLD_ANALYZER") is not None
|
||||
# If specific version was used there can be no
|
||||
# allow_experimental_analyzer setting, so do this only if it was
|
||||
# enable_analyzer setting, so do this only if it was
|
||||
# explicitly requested.
|
||||
if self.tag:
|
||||
use_old_analyzer = False
|
||||
|
@ -1,4 +1,5 @@
|
||||
import uuid
|
||||
import time
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
@ -58,12 +59,12 @@ ORDER BY h;"""
|
||||
== TSV([["backward", "true"], ["current", "true"]])
|
||||
)
|
||||
|
||||
# Should be enabled everywhere
|
||||
analyzer_enabled = current.query(
|
||||
# Should be enabled explicitly on the old instance.
|
||||
analyzer_enabled = backward.query(
|
||||
f"""
|
||||
SELECT
|
||||
DISTINCT Settings['allow_experimental_analyzer']
|
||||
FROM clusterAllReplicas('test_cluster_mixed', system.query_log)
|
||||
FROM system.query_log
|
||||
WHERE initial_query_id = '{query_id}';"""
|
||||
)
|
||||
|
||||
@ -78,6 +79,8 @@ WHERE initial_query_id = '{query_id}';"""
|
||||
current.query("SYSTEM FLUSH LOGS")
|
||||
backward.query("SYSTEM FLUSH LOGS")
|
||||
|
||||
# The old version doesn't know about the alias.
|
||||
# For this we will ask about the old experimental name.
|
||||
assert (
|
||||
backward.query(
|
||||
"""
|
||||
@ -98,3 +101,26 @@ WHERE initial_query_id = '{query_id}';"""
|
||||
)
|
||||
|
||||
assert TSV(analyzer_enabled) == TSV("0")
|
||||
|
||||
# Only new version knows about the alias
|
||||
# and it will send the old setting `allow_experimental_analyzer`
|
||||
# to the remote server.
|
||||
query_id = str(uuid.uuid4())
|
||||
current.query(
|
||||
"SELECT * FROM clusterAllReplicas('test_cluster_mixed', system.tables) SETTINGS enable_analyzer = 1;",
|
||||
query_id=query_id,
|
||||
)
|
||||
|
||||
current.query("SYSTEM FLUSH LOGS")
|
||||
backward.query("SYSTEM FLUSH LOGS")
|
||||
|
||||
# Should be disabled explicitly everywhere.
|
||||
analyzer_enabled = current.query(
|
||||
f"""
|
||||
SELECT
|
||||
DISTINCT Settings['allow_experimental_analyzer']
|
||||
FROM system.query_log
|
||||
WHERE initial_query_id = '{query_id}';"""
|
||||
)
|
||||
|
||||
assert TSV(analyzer_enabled) == TSV("1")
|
||||
|
@ -31,6 +31,9 @@ def started_cluster():
|
||||
|
||||
|
||||
def test_distributed_type_object(started_cluster):
|
||||
node1.query("TRUNCATE TABLE local_table")
|
||||
node2.query("TRUNCATE TABLE local_table")
|
||||
|
||||
node1.query(
|
||||
'INSERT INTO local_table FORMAT JSONEachRow {"id": 1, "data": {"k1": 10}}'
|
||||
)
|
||||
@ -89,7 +92,7 @@ def test_distributed_type_object(started_cluster):
|
||||
assert (
|
||||
TSV(
|
||||
node1.query(
|
||||
"SELECT id, data.k1, data.k2.k3, data.k2.k4, data.k5 FROM dist_table ORDER BY id SETTINGS allow_experimental_analyzer = 0"
|
||||
"SELECT id, data.k1, data.k2.k3, data.k2.k4, data.k5 FROM dist_table ORDER BY id SETTINGS enable_analyzer = 0"
|
||||
)
|
||||
)
|
||||
== expected
|
||||
|
@ -1,7 +1,7 @@
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<allow_experimental_analyzer>1</allow_experimental_analyzer>
|
||||
<enable_analyzer>1</enable_analyzer>
|
||||
<allow_experimental_parallel_reading_from_replicas>1</allow_experimental_parallel_reading_from_replicas>
|
||||
<cluster_for_parallel_replicas>default</cluster_for_parallel_replicas>
|
||||
<max_parallel_replicas>100</max_parallel_replicas>
|
||||
|
@ -459,7 +459,7 @@ def test_show_profiles():
|
||||
|
||||
query_possible_response = [
|
||||
"CREATE SETTINGS PROFILE `default`\n",
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n",
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS enable_analyzer = true\n",
|
||||
]
|
||||
assert (
|
||||
instance.query("SHOW CREATE SETTINGS PROFILE default")
|
||||
@ -470,7 +470,7 @@ def test_show_profiles():
|
||||
"CREATE SETTINGS PROFILE `default`\n"
|
||||
"CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n"
|
||||
"CREATE SETTINGS PROFILE `xyz`\n",
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n"
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS enable_analyzer = true\n"
|
||||
"CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n"
|
||||
"CREATE SETTINGS PROFILE `xyz`\n",
|
||||
]
|
||||
@ -482,7 +482,7 @@ def test_show_profiles():
|
||||
"CREATE SETTINGS PROFILE `xyz`\n"
|
||||
)
|
||||
expected_access_analyzer = (
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n"
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS enable_analyzer = true\n"
|
||||
"CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n"
|
||||
"CREATE SETTINGS PROFILE `xyz`\n"
|
||||
)
|
||||
|
@ -16,4 +16,4 @@
|
||||
<query>SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null;</query>
|
||||
<query>SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null SETTINGS
|
||||
allow_experimental_analyzer=1</query>
|
||||
</test>
|
||||
</test>
|
||||
|
@ -28,8 +28,8 @@ RENAME TABLE set2 TO set;
|
||||
SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set;
|
||||
|
||||
create table tab (x String) engine = MergeTree order by x as select 'Hello';
|
||||
SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings allow_experimental_analyzer=0;
|
||||
SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings allow_experimental_analyzer=1;
|
||||
SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings enable_analyzer=0;
|
||||
SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings enable_analyzer=1;
|
||||
DROP TABLE tab;
|
||||
|
||||
DROP TABLE set;
|
||||
|
@ -3,5 +3,6 @@ SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN
|
||||
SET join_algorithm = 'auto';
|
||||
SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN (SELECT number AS y FROM system.numbers LIMIT 5) js2;
|
||||
|
||||
-- Just to test that we preserved old setting name this we use `enable_analyzer` instead of `enable_analyzer` here.
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN (SELECT number AS y FROM system.numbers LIMIT 5) js2;
|
||||
|
@ -1,6 +1,6 @@
|
||||
-- Tags: shard
|
||||
|
||||
set allow_experimental_analyzer = 1;
|
||||
set enable_analyzer = 1;
|
||||
set enable_positional_arguments = 0;
|
||||
|
||||
select 40 as z from (select * from system.numbers limit 3) group by z;
|
||||
|
@ -4,10 +4,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1 AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1234567890123 AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSON";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toFloat32(1.23) AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSONCompact";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1 AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1234567890123 AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSON";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toFloat32(1.23) AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSONCompact";
|
||||
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDate('2010-01-01') AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDateTime('2010-01-01 01:02:03', 'UTC') AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSON";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1.1 AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSONCompact";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDate('2010-01-01') AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDateTime('2010-01-01 01:02:03', 'UTC') AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSON";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1.1 AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSONCompact";
|
||||
|
@ -1,8 +1,8 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/45804
|
||||
|
||||
CREATE TABLE myRMT(
|
||||
CREATE TABLE myRMT(
|
||||
key Int64,
|
||||
someCol String,
|
||||
ver DateTime
|
||||
|
@ -1,6 +1,6 @@
|
||||
SET any_join_distinct_right_table_keys = 1;
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
select x, y from (select 1 as x, 2 as y, x, y);
|
||||
select x, y from (select 1 as x, 1 as y, x, y);
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
SET output_format_write_statistics = 0;
|
||||
SET extremes = 1;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SET output_format_json_quote_64bit_integers = 1;
|
||||
SELECT toInt64(0) as i0, toUInt64(0) as u0, toInt64(9223372036854775807) as ip, toInt64(-9223372036854775808) as in, toUInt64(18446744073709551615) as up, [toInt64(0)] as arr, (toUInt64(0), toUInt64(0)) as tuple GROUP BY i0, u0, ip, in, up, arr, tuple WITH TOTALS FORMAT JSON;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
SET join_use_nulls = 0;
|
||||
SET any_join_distinct_right_table_keys = 1;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
with pow(2,2) as four select pow(four, 2), 2 as two, pow(two, 2);
|
||||
select `pow(four, 2)`, `pow(two, 2)` from (with pow(2,2) as four select pow(four, 2), 2 as two, pow(two, 2));
|
||||
|
@ -390,7 +390,7 @@ ANY LEFT JOIN
|
||||
) USING (id)
|
||||
WHERE id = 1
|
||||
2000-01-01 1 test string 1 1 2000-01-01 test string 1 1
|
||||
------- allow_experimental_analyzer=1 -------
|
||||
------- enable_analyzer=1 -------
|
||||
1 2000-01-01 test string 1 1 2000-01-01 test string 1 1
|
||||
SELECT
|
||||
id,
|
||||
@ -454,7 +454,7 @@ FROM
|
||||
)
|
||||
WHERE id = 1
|
||||
2000-01-01 1 test string 1 1
|
||||
------- allow_experimental_analyzer=1 -------
|
||||
------- enable_analyzer=1 -------
|
||||
1 2000-01-01 test string 1 1 2000-01-01 test string 1 1
|
||||
SELECT
|
||||
date,
|
||||
@ -484,7 +484,7 @@ ANY LEFT JOIN
|
||||
) AS b USING (id)
|
||||
WHERE b.id = 1
|
||||
2000-01-01 1 test string 1 1 2000-01-01 test string 1 1
|
||||
------- allow_experimental_analyzer=1 -------
|
||||
------- enable_analyzer=1 -------
|
||||
1 2000-01-01 test string 1 1 2000-01-01 test string 1 1
|
||||
SELECT
|
||||
id,
|
||||
@ -510,7 +510,7 @@ ANY LEFT JOIN
|
||||
) AS b USING (date, id)
|
||||
WHERE b.date = toDate(\'2000-01-01\')
|
||||
1 2000-01-01 test string 1 1
|
||||
------- allow_experimental_analyzer=1 -------
|
||||
------- enable_analyzer=1 -------
|
||||
2000-01-01 1 test string 1 1
|
||||
SELECT
|
||||
date,
|
||||
@ -593,7 +593,7 @@ SEMI LEFT JOIN
|
||||
) AS r USING (id)
|
||||
WHERE r.id = 1
|
||||
2000-01-01 1 test string 1 1 2000-01-01 test string 1 1
|
||||
------- allow_experimental_analyzer=1 -------
|
||||
------- enable_analyzer=1 -------
|
||||
1 2000-01-01 test string 1 1 2000-01-01 test string 1 1
|
||||
SELECT value + t1.value AS expr
|
||||
FROM
|
||||
|
@ -110,9 +110,9 @@ SELECT * FROM (SELECT * FROM test_00597 UNION ALL SELECT * FROM test_00597) WHER
|
||||
|
||||
-- Optimize predicate expression with join query
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS allow_experimental_analyzer=0;
|
||||
SELECT '------- allow_experimental_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS enable_analyzer=0;
|
||||
SELECT '------- enable_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS enable_analyzer=1;
|
||||
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1;
|
||||
SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1;
|
||||
@ -123,30 +123,30 @@ SELECT b.value FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 AS b USING
|
||||
|
||||
-- Optimize predicate expression with join and nested subquery
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1;
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS allow_experimental_analyzer=0;
|
||||
SELECT '------- allow_experimental_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS enable_analyzer=0;
|
||||
SELECT '------- enable_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS enable_analyzer=1;
|
||||
|
||||
-- Optimize predicate expression with join query and qualified
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS allow_experimental_analyzer=0;
|
||||
SELECT '------- allow_experimental_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS enable_analyzer=0;
|
||||
SELECT '------- enable_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS enable_analyzer=1;
|
||||
|
||||
-- Compatibility test
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01');
|
||||
SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS allow_experimental_analyzer=0;
|
||||
SELECT '------- allow_experimental_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS enable_analyzer=0;
|
||||
SELECT '------- enable_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS enable_analyzer=1;
|
||||
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1;
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1;
|
||||
|
||||
-- Explain with join subquery
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS allow_experimental_analyzer=0;
|
||||
SELECT '------- allow_experimental_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS enable_analyzer=0;
|
||||
SELECT '------- enable_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS enable_analyzer=1;
|
||||
|
||||
-- issue 20497
|
||||
EXPLAIN SYNTAX SELECT value + t1.value AS expr FROM (SELECT t0.value, t1.value FROM test_00597 AS t0 FULL JOIN test_00597 AS t1 USING date) WHERE expr < 3;
|
||||
|
@ -17,7 +17,7 @@ QUERY id: 0
|
||||
LIST id: 5, nodes: 2
|
||||
COLUMN id: 6, column_name: g, result_type: String, source_id: 3
|
||||
CONSTANT id: 7, constant_value: Tuple_(\'5\', \'6\'), constant_value_type: Tuple(String, String)
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
2
|
||||
2
|
||||
QUERY id: 0
|
||||
@ -42,4 +42,4 @@ QUERY id: 0
|
||||
LIST id: 11, nodes: 2
|
||||
COLUMN id: 8, column_name: g, result_type: String, source_id: 3
|
||||
CONSTANT id: 12, constant_value: \'6\', constant_value_type: String
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
|
@ -12,13 +12,13 @@ SELECT count() FROM regression_for_in_operator_view WHERE g IN ('5','6');
|
||||
|
||||
SET optimize_min_equality_disjunction_chain_length = 1;
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6';
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1;
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1;
|
||||
|
||||
SET optimize_min_equality_disjunction_chain_length = 3;
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6';
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1;
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1;
|
||||
|
||||
DROP TABLE regression_for_in_operator_view;
|
||||
DROP TABLE regression_for_in_operator;
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
drop table if exists tab1;
|
||||
drop table if exists tab2;
|
||||
|
@ -27,7 +27,7 @@ SELECT a > 0, b > 0, g > 0 FROM decimal ORDER BY a DESC;
|
||||
SELECT a, g > toInt8(0), g > toInt16(0), g > toInt32(0), g > toInt64(0) FROM decimal ORDER BY a;
|
||||
SELECT a, g > toUInt8(0), g > toUInt16(0), g > toUInt32(0), g > toUInt64(0) FROM decimal ORDER BY a;
|
||||
SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42);
|
||||
SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42) SETTINGS allow_experimental_analyzer = 1;
|
||||
SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42) SETTINGS enable_analyzer = 1;
|
||||
SELECT a, b, g FROM decimal WHERE a > 0 AND a <= 42 AND b <= 42 AND g <= 42;
|
||||
|
||||
SELECT d, e, f from decimal WHERE d > 0 AND d < 1 AND e > 0 AND e < 1 AND f > 0 AND f < 1;
|
||||
|
@ -1,6 +1,6 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS one;
|
||||
CREATE TABLE one(dummy UInt8) ENGINE = Memory;
|
||||
|
@ -49,7 +49,7 @@ QUERY id: 0
|
||||
LIST id: 12, nodes: 2
|
||||
COLUMN id: 13, column_name: s, result_type: UInt64, source_id: 3
|
||||
CONSTANT id: 14, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
1 21
|
||||
1 22
|
||||
1 23
|
||||
@ -99,7 +99,7 @@ QUERY id: 0
|
||||
LIST id: 14, nodes: 2
|
||||
COLUMN id: 15, column_name: s, result_type: UInt64, source_id: 3
|
||||
CONSTANT id: 16, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
1 1 21 1 1 1
|
||||
1 1 22 0 1 1
|
||||
1 1 23 0 0 1
|
||||
@ -152,7 +152,7 @@ QUERY id: 0
|
||||
CONSTANT id: 16, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
JOIN TREE
|
||||
TABLE id: 3, alias: __table1, table_name: default.bug
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
21 1
|
||||
22 1
|
||||
23 1
|
||||
@ -185,7 +185,7 @@ QUERY id: 0
|
||||
CONSTANT id: 6, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
JOIN TREE
|
||||
TABLE id: 3, alias: __table1, table_name: default.bug
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
1 21
|
||||
1 22
|
||||
1 23
|
||||
@ -237,7 +237,7 @@ QUERY id: 0
|
||||
LIST id: 12, nodes: 2
|
||||
COLUMN id: 13, column_name: s, result_type: UInt64, source_id: 3
|
||||
CONSTANT id: 14, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
1 21
|
||||
1 22
|
||||
1 23
|
||||
@ -287,7 +287,7 @@ QUERY id: 0
|
||||
LIST id: 14, nodes: 2
|
||||
COLUMN id: 15, column_name: s, result_type: UInt64, source_id: 3
|
||||
CONSTANT id: 16, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
1 1 21 1 1 1
|
||||
1 1 22 0 1 1
|
||||
1 1 23 0 0 1
|
||||
@ -348,7 +348,7 @@ QUERY id: 0
|
||||
CONSTANT id: 21, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
JOIN TREE
|
||||
TABLE id: 3, alias: __table1, table_name: default.bug
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
21 1
|
||||
22 1
|
||||
23 1
|
||||
@ -381,7 +381,7 @@ QUERY id: 0
|
||||
CONSTANT id: 6, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
JOIN TREE
|
||||
TABLE id: 3, alias: __table1, table_name: default.bug
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
21 1
|
||||
22 1
|
||||
23 1
|
||||
@ -414,4 +414,4 @@ QUERY id: 0
|
||||
CONSTANT id: 6, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
JOIN TREE
|
||||
TABLE id: 3, alias: __table1, table_name: default.bug
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
|
@ -7,43 +7,43 @@ set optimize_min_equality_disjunction_chain_length = 2;
|
||||
select * from bug;
|
||||
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23);
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;
|
||||
explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;
|
||||
explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23);
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug;
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select s, (s=21 or s=22 or s=23) from bug;
|
||||
select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
|
||||
set optimize_min_equality_disjunction_chain_length = 3;
|
||||
|
||||
select * from bug;
|
||||
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23);
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;
|
||||
explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;
|
||||
explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23);
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug;
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select s, (s=21 or s=22 or s=23) from bug;
|
||||
select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select s, (s=21 or 22=s or 23=s) from bug;
|
||||
select s, (s=21 or 22=s or 23=s) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or 22=s or 23=s) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
select s, (s=21 or 22=s or 23=s) from bug SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or 22=s or 23=s) from bug SETTINGS enable_analyzer = 1;;
|
||||
|
||||
DROP TABLE bug;
|
||||
|
@ -1,3 +1,3 @@
|
||||
SET allow_experimental_analyzer=0;
|
||||
SET enable_analyzer=0;
|
||||
select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
|
@ -1,3 +1,3 @@
|
||||
SET allow_experimental_analyzer=1;
|
||||
SET enable_analyzer=1;
|
||||
select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
|
@ -1,4 +1,4 @@
|
||||
set allow_experimental_analyzer = 1;
|
||||
set enable_analyzer = 1;
|
||||
set joined_subquery_requires_alias = 0;
|
||||
|
||||
select * from (select dummy as val from system.one) any left join (select dummy as val from system.one) using val;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
{% for join_algorithm in ['partial_merge', 'full_sorting_merge', 'grace_hash'] -%}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
select s.a as a, s.a + 1 as b from (select 10 as a) s;
|
||||
select s.a + 1 as a, s.a as b from (select 10 as a) s;
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET output_format_pretty_color = 1;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS table1;
|
||||
DROP TABLE IF EXISTS table2;
|
||||
|
@ -1,6 +1,6 @@
|
||||
SET any_join_distinct_right_table_keys = 1;
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT * FROM (SELECT 1 AS a, 'x' AS b) join (SELECT 1 as a, 'y' as b) using a;
|
||||
SELECT * FROM (SELECT 1 AS a, 'x' AS b) left join (SELECT 1 as a, 'y' as b) using a;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS table1;
|
||||
DROP TABLE IF EXISTS table2;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS table1;
|
||||
DROP TABLE IF EXISTS table2;
|
||||
|
@ -13,7 +13,7 @@ SELECT joinGet('kv_overwrite', 'v', toUInt32(1));
|
||||
CREATE TABLE t2 (k UInt32, v UInt32) ENGINE = Memory;
|
||||
INSERT INTO t2 VALUES (1, 2), (1, 3);
|
||||
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT v FROM (SELECT 1 as k) t1 ANY INNER JOIN t2 USING (k) SETTINGS join_any_take_last_row = 0;
|
||||
SELECT v FROM (SELECT 1 as k) t1 ANY INNER JOIN t2 USING (k) SETTINGS join_any_take_last_row = 1;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul') AS x, toDate('2000-01-02') AS y, x > y ? x : y AS z;
|
||||
SELECT materialize(toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul')) AS x, toDate('2000-01-02') AS y, x > y ? x : y AS z;
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET any_join_distinct_right_table_keys = 1;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS t1_00848;
|
||||
DROP TABLE IF EXISTS t2_00848;
|
||||
|
@ -12,7 +12,7 @@ CREATE TABLE t2 (a UInt32, b Nullable(Int32)) ENGINE = Memory;
|
||||
CREATE TABLE t3 (a UInt32, b Nullable(Int32)) ENGINE = Memory;
|
||||
CREATE TABLE t4 (a UInt32, b Nullable(Int32)) ENGINE = Memory;
|
||||
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
|
||||
--- EXPLAIN SYNTAX (old AST based optimization)
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
@ -62,56 +62,56 @@ SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explai
|
||||
|
||||
--- EXPLAIN QUERY TREE
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.b = t2.b) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.b = t2.b) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t2.a = t1.a AND t2.a = t3.a AND t2.a = t4.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t2.a = t1.a AND t2.a = t3.a AND t2.a = t4.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t3.a = t1.a AND t3.a = t2.a AND t3.a = t4.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t3.a = t1.a AND t3.a = t2.a AND t3.a = t4.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t4.a = t1.a AND t4.a = t2.a AND t4.a = t3.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t4.a = t1.a AND t4.a = t2.a AND t4.a = t3.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 CROSS JOIN t2 CROSS JOIN t3 CROSS JOIN t4) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 CROSS JOIN t2 CROSS JOIN t3 CROSS JOIN t4) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 CROSS JOIN t3) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 CROSS JOIN t3) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 USING a CROSS JOIN t3) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 USING a CROSS JOIN t3) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 ON t1.a = t2.a CROSS JOIN t3) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 ON t1.a = t2.a CROSS JOIN t3) SETTINGS enable_analyzer = 1;
|
||||
|
||||
INSERT INTO t1 values (1,1), (2,2), (3,3), (4,4);
|
||||
INSERT INTO t2 values (1,1), (1, Null);
|
||||
INSERT INTO t3 values (1,1), (1, Null);
|
||||
INSERT INTO t4 values (1,1), (1, Null);
|
||||
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT 'SELECT * FROM t1, t2';
|
||||
SELECT * FROM t1, t2
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT ax, c FROM (SELECT [1,2] ax, 0 c) ARRAY JOIN ax JOIN (SELECT 0 c) USING (c);
|
||||
SELECT ax, c FROM (SELECT [3,4] ax, 0 c) JOIN (SELECT 0 c) USING (c) ARRAY JOIN ax;
|
||||
|
@ -1,4 +1,4 @@
|
||||
set allow_experimental_analyzer = 1;
|
||||
set enable_analyzer = 1;
|
||||
set distributed_product_mode = 'local';
|
||||
|
||||
drop table if exists shard1;
|
||||
|
@ -2,7 +2,7 @@ SELECT flatten(arrayJoin([[[1, 2, 3], [4, 5]], [[6], [7, 8]]]));
|
||||
SELECT arrayFlatten(arrayJoin([[[[]], [[1], [], [2, 3]]], [[[4]]]]));
|
||||
SELECT flatten(arrayMap(x -> arrayMap(y -> arrayMap(z -> range(x), range(x)), range(x)), range(number))) FROM numbers(6);
|
||||
SELECT flatten(arrayMap(x -> arrayMap(y -> arrayMap(z -> range(z), range(y)), range(x)), range(number))) FROM numbers(6);
|
||||
SELECT flatten(arrayMap(x -> arrayMap(x -> arrayMap(x -> range(x), range(x)), range(x)), range(number))) FROM numbers(6) SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT flatten(arrayMap(x -> arrayMap(x -> arrayMap(x -> range(x), range(x)), range(x)), range(number))) FROM numbers(6) SETTINGS enable_analyzer=1;
|
||||
SELECT arrayFlatten([[[1, 2, 3], [4, 5]], [[6], [7, 8]]]);
|
||||
SELECT flatten([[[]]]);
|
||||
SELECT arrayFlatten([]);
|
||||
|
@ -28,6 +28,8 @@
|
||||
"rows_read": 3,
|
||||
8 aбвгдеёж
|
||||
"rows_read": 2,
|
||||
13
|
||||
1
|
||||
1 column-oriented
|
||||
2 column-oriented
|
||||
"rows_read": 4,
|
||||
|
@ -103,6 +103,10 @@ $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filte
|
||||
$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE (s, lower(s)) IN (('aбвгдеёж', 'aбвгдеёж'), ('abc', 'cba')) ORDER BY k"
|
||||
$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE (s, lower(s)) IN (('aбвгдеёж', 'aбвгдеёж'), ('abc', 'cba')) ORDER BY k FORMAT JSON" | grep "rows_read"
|
||||
|
||||
# Weird conditions not supported by the index.
|
||||
$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT count() FROM bloom_filter_idx WHERE (s = 'asd') = (s = 'asd')"
|
||||
$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT count() FROM bloom_filter_idx WHERE has(['asd', 'some string'], s)"
|
||||
|
||||
|
||||
# TOKEN BF
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET optimize_read_in_order = 1, query_plan_read_in_order = 1, allow_experimental_analyzer = 0;
|
||||
SET optimize_read_in_order = 1, query_plan_read_in_order = 1, enable_analyzer = 0;
|
||||
|
||||
drop table if exists tab;
|
||||
drop table if exists tab2;
|
||||
|
@ -14,6 +14,11 @@
|
||||
0
|
||||
2
|
||||
2
|
||||
18
|
||||
100
|
||||
100
|
||||
3
|
||||
100
|
||||
1
|
||||
1
|
||||
1
|
||||
|
@ -25,6 +25,15 @@ WITH ((1, 2), (2, 3)) AS liter_prepared_set SELECT COUNT() FROM single_column_bl
|
||||
WITH ((1, 1), (2, 2)) AS liter_prepared_set SELECT COUNT() FROM single_column_bloom_filter WHERE (i32, i64) IN liter_prepared_set SETTINGS max_rows_to_read = 6;
|
||||
WITH ((1, (1, 1)), (2, (2, 2))) AS liter_prepared_set SELECT COUNT() FROM single_column_bloom_filter WHERE (i64, (i64, i32)) IN liter_prepared_set SETTINGS max_rows_to_read = 6;
|
||||
|
||||
-- Check that indexHint() works (but it doesn't work with COUNT()).
|
||||
SELECT SUM(ignore(*) + 1) FROM single_column_bloom_filter WHERE indexHint(i32 in (3, 15, 50));
|
||||
|
||||
-- The index doesn't understand expressions like these, but it shouldn't break the query.
|
||||
SELECT COUNT() FROM single_column_bloom_filter WHERE (i32 = 200) = (i32 = 200);
|
||||
SELECT SUM(ignore(*) + 1) FROM single_column_bloom_filter WHERE indexHint((i32 = 200) != (i32 = 200));
|
||||
SELECT COUNT() FROM single_column_bloom_filter WHERE indexOf([10, 20, 30], i32) != 0;
|
||||
SELECT COUNT() FROM single_column_bloom_filter WHERE has([100, 200, 300], 200);
|
||||
|
||||
DROP TABLE IF EXISTS single_column_bloom_filter;
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
SET joined_subquery_requires_alias = 1;
|
||||
|
||||
SELECT * FROM (SELECT 1 as A, 2 as B) X
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT 11 AS n GROUP BY n WITH TOTALS;
|
||||
SELECT 12 AS n GROUP BY n WITH ROLLUP;
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET output_format_pretty_color=1;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
select * from system.one cross join system.one;
|
||||
select * from system.one cross join system.one r;
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET send_logs_level = 'fatal';
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
SET allow_experimental_window_view = 1;
|
||||
DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier};
|
||||
set allow_deprecated_database_ordinary=1;
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
SET send_logs_level = 'fatal';
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
SET allow_experimental_window_view = 1;
|
||||
DROP DATABASE IF EXISTS test_01048;
|
||||
set allow_deprecated_database_ordinary=1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
-- { echoOn }
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
{% for join_algorithm in ['default', 'partial_merge', 'parallel_hash'] -%}
|
||||
SET join_algorithm = '{{ join_algorithm }}';
|
||||
SET join_use_nulls = 0;
|
||||
@ -313,7 +313,7 @@ SELECT r.lc, materialize(r.lc), toTypeName(r.lc), toTypeName(materialize(r.lc))
|
||||
str str LowCardinality(Nullable(String)) LowCardinality(Nullable(String))
|
||||
str_r str_r LowCardinality(Nullable(String)) LowCardinality(Nullable(String))
|
||||
\N \N LowCardinality(Nullable(String)) LowCardinality(Nullable(String))
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
{% for join_algorithm in ['default', 'partial_merge', 'parallel_hash'] -%}
|
||||
SET join_algorithm = '{{ join_algorithm }}';
|
||||
SET join_use_nulls = 0;
|
||||
|
@ -23,9 +23,9 @@ INSERT INTO nl VALUES (0, 'str'), (2, 'str_l');
|
||||
INSERT INTO l_lc VALUES (0, 'str'), (2, 'str_l');
|
||||
|
||||
-- { echoOn }
|
||||
{% for allow_experimental_analyzer in [0, 1] -%}
|
||||
{% for enable_analyzer in [0, 1] -%}
|
||||
|
||||
SET allow_experimental_analyzer = {{ allow_experimental_analyzer }};
|
||||
SET enable_analyzer = {{ enable_analyzer }};
|
||||
|
||||
{% for join_algorithm in ['default', 'partial_merge', 'parallel_hash'] -%}
|
||||
SET join_algorithm = '{{ join_algorithm }}';
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
SET allow_experimental_window_view = 1;
|
||||
|
||||
DROP TABLE IF EXISTS mt;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
SET allow_experimental_window_view = 1;
|
||||
|
||||
DROP TABLE IF EXISTS mt;
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -20,7 +20,7 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send("SET allow_experimental_analyzer = 0")
|
||||
client1.send("SET enable_analyzer = 0")
|
||||
client1.expect(prompt)
|
||||
client1.send("SET allow_experimental_window_view = 1")
|
||||
client1.expect(prompt)
|
||||
@ -28,7 +28,7 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.send("SET allow_experimental_window_view = 1")
|
||||
client2.expect(prompt)
|
||||
client2.send("SET allow_experimental_analyzer = 0")
|
||||
client2.send("SET enable_analyzer = 0")
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send("CREATE DATABASE IF NOT EXISTS 01056_window_view_proc_hop_watch")
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -20,13 +20,13 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send("SET allow_experimental_analyzer = 0")
|
||||
client1.send("SET enable_analyzer = 0")
|
||||
client1.expect(prompt)
|
||||
client1.send("SET allow_experimental_window_view = 1")
|
||||
client1.expect(prompt)
|
||||
client1.send("SET window_view_heartbeat_interval = 1")
|
||||
client1.expect(prompt)
|
||||
client2.send("SET allow_experimental_analyzer = 0")
|
||||
client2.send("SET enable_analyzer = 0")
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send("CREATE DATABASE IF NOT EXISTS db_01059_event_hop_watch_strict_asc")
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -20,7 +20,7 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send("SET allow_experimental_analyzer = 0")
|
||||
client1.send("SET enable_analyzer = 0")
|
||||
client1.expect(prompt)
|
||||
client1.send("SET allow_experimental_window_view = 1")
|
||||
client1.expect(prompt)
|
||||
@ -28,7 +28,7 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.send("SET allow_experimental_window_view = 1")
|
||||
client2.expect(prompt)
|
||||
client2.send("SET allow_experimental_analyzer = 0")
|
||||
client2.send("SET enable_analyzer = 0")
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send(f"DROP TABLE IF EXISTS {database_name}.mt")
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -19,7 +19,7 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send("SET allow_experimental_analyzer = 0")
|
||||
client1.send("SET enable_analyzer = 0")
|
||||
client1.expect(prompt)
|
||||
client1.send("SET allow_experimental_window_view = 1")
|
||||
client1.expect(prompt)
|
||||
@ -27,7 +27,7 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.send("SET allow_experimental_window_view = 1")
|
||||
client2.expect(prompt)
|
||||
client2.send("SET allow_experimental_analyzer = 0")
|
||||
client2.send("SET enable_analyzer = 0")
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send(
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -20,7 +20,7 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send("SET allow_experimental_analyzer = 0")
|
||||
client1.send("SET enable_analyzer = 0")
|
||||
client1.expect(prompt)
|
||||
client1.send("SET allow_experimental_window_view = 1")
|
||||
client1.expect(prompt)
|
||||
@ -28,7 +28,7 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.send("SET allow_experimental_window_view = 1")
|
||||
client2.expect(prompt)
|
||||
client2.send("SET allow_experimental_analyzer = 0")
|
||||
client2.send("SET enable_analyzer = 0")
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send("CREATE DATABASE 01069_window_view_proc_tumble_watch")
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user