diff --git a/contrib/liburing b/contrib/liburing
index f5a48392c4e..f4e42a515cd 160000
--- a/contrib/liburing
+++ b/contrib/liburing
@@ -1 +1 @@
-Subproject commit f5a48392c4ea33f222cbebeb2e2fc31620162949
+Subproject commit f4e42a515cd78c8c9cac2be14222834be5f8df2b
diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh
index 69422992cfb..b4376fe2409 100755
--- a/docker/test/fuzzer/run-fuzzer.sh
+++ b/docker/test/fuzzer/run-fuzzer.sh
@@ -86,7 +86,7 @@ function download
chmod +x clickhouse
# clickhouse may be compressed - run once to decompress
- ./clickhouse ||:
+ ./clickhouse --query "SELECT 1" ||:
ln -s ./clickhouse ./clickhouse-server
ln -s ./clickhouse ./clickhouse-client
ln -s ./clickhouse ./clickhouse-local
diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md
index f185c11bab3..228b2c8884f 100644
--- a/docs/en/engines/table-engines/mergetree-family/mergetree.md
+++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md
@@ -870,6 +870,11 @@ Tags:
- `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`.
- `least_used_ttl_ms` - Configure timeout (in milliseconds) for the updating available space on all disks (`0` - update always, `-1` - never update, default is `60000`). Note, if the disk can be used by ClickHouse only and is not subject to a online filesystem resize/shrink you can use `-1`, in all other cases it is not recommended, since eventually it will lead to incorrect space distribution.
- `prefer_not_to_merge` — You should not use this setting. Disables merging of data parts on this volume (this is harmful and leads to performance degradation). When this setting is enabled (don't do it), merging data on this volume is not allowed (which is bad). This allows (but you don't need it) controlling (if you want to control something, you're making a mistake) how ClickHouse works with slow disks (but ClickHouse knows better, so please don't use this setting).
+- `volume_priority` — Defines the priority (order) in which volumes are filled. Lower value means higher priority. The parameter values should be natural numbers and collectively cover the range from 1 to N (lowest priority given) without skipping any numbers.
+ * If _all_ volumes are tagged, they are prioritized in given order.
+ * If only _some_ volumes are tagged, those without the tag have the lowest priority, and they are prioritized in the order they are defined in config.
+ * If _no_ volumes are tagged, their priority is set correspondingly to their order they are declared in configuration.
+ * Two volumes cannot have the same priority value.
Configuration examples:
@@ -919,7 +924,8 @@ In given example, the `hdd_in_order` policy implements the [round-robin](https:/
If there are different kinds of disks available in the system, `moving_from_ssd_to_hdd` policy can be used instead. The volume `hot` consists of an SSD disk (`fast_ssd`), and the maximum size of a part that can be stored on this volume is 1GB. All the parts with the size larger than 1GB will be stored directly on the `cold` volume, which contains an HDD disk `disk1`.
Also, once the disk `fast_ssd` gets filled by more than 80%, data will be transferred to the `disk1` by a background process.
-The order of volume enumeration within a storage policy is important. Once a volume is overfilled, data are moved to the next one. The order of disk enumeration is important as well because data are stored on them in turns.
+The order of volume enumeration within a storage policy is important in case at least one of the volumes listed has no explicit `volume_priority` parameter.
+Once a volume is overfilled, data are moved to the next one. The order of disk enumeration is important as well because data are stored on them in turns.
When creating a table, one can apply one of the configured storage policies to it:
diff --git a/docs/en/engines/table-engines/special/distributed.md b/docs/en/engines/table-engines/special/distributed.md
index c3b8a2f2048..4e0ee9bfcc9 100644
--- a/docs/en/engines/table-engines/special/distributed.md
+++ b/docs/en/engines/table-engines/special/distributed.md
@@ -74,6 +74,10 @@ Specifying the `sharding_key` is necessary for the following:
`fsync_directories` - do the `fsync` for directories. Guarantees that the OS refreshed directory metadata after operations related to background inserts on Distributed table (after insert, after sending the data to shard, etc.).
+#### skip_unavailable_shards
+
+`skip_unavailable_shards` - If true, ClickHouse silently skips unavailable shards. Shard is marked as unavailable when: 1) The shard cannot be reached due to a connection failure. 2) Shard is unresolvable through DNS. 3) Table does not exist on the shard. Default false.
+
#### bytes_to_throw_insert
`bytes_to_throw_insert` - if more than this number of compressed bytes will be pending for background INSERT, an exception will be thrown. 0 - do not throw. Default 0.
@@ -102,6 +106,10 @@ Specifying the `sharding_key` is necessary for the following:
`background_insert_max_sleep_time_ms` - same as [distributed_background_insert_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_background_insert_max_sleep_time_ms)
+#### flush_on_detach
+
+`flush_on_detach` - Flush data to remote nodes on DETACH/DROP/server shutdown. Default true.
+
:::note
**Durability settings** (`fsync_...`):
diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md
index e8662ec16fa..234420de374 100644
--- a/docs/en/getting-started/install.md
+++ b/docs/en/getting-started/install.md
@@ -79,10 +79,7 @@ It is recommended to use official pre-compiled `deb` packages for Debian or Ubun
#### Setup the Debian repository
``` bash
sudo apt-get install -y apt-transport-https ca-certificates dirmngr
-GNUPGHOME=$(mktemp -d)
-sudo GNUPGHOME="$GNUPGHOME" gpg --no-default-keyring --keyring /usr/share/keyrings/clickhouse-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 8919F6BD2B48D754
-sudo rm -rf "$GNUPGHOME"
-sudo chmod +r /usr/share/keyrings/clickhouse-keyring.gpg
+sudo gpg --no-default-keyring --keyring /usr/share/keyrings/clickhouse-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 8919F6BD2B48D754
echo "deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb stable main" | sudo tee \
/etc/apt/sources.list.d/clickhouse.list
diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md
index 238621ec49b..30dfde7c80b 100644
--- a/docs/en/operations/server-configuration-parameters/settings.md
+++ b/docs/en/operations/server-configuration-parameters/settings.md
@@ -199,6 +199,20 @@ Type: Bool
Default: 0
+
+## dns_cache_max_size
+
+Internal DNS cache max size in bytes.
+
+:::note
+ClickHouse also has a reverse cache, so the actual memory usage could be twice as much.
+:::
+
+Type: UInt64
+
+Default: 1024
+
+
## dns_cache_update_period
Internal DNS cache update period in seconds.
diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md
index 06219d21311..5433a2866a2 100644
--- a/docs/en/operations/settings/settings.md
+++ b/docs/en/operations/settings/settings.md
@@ -1776,7 +1776,7 @@ Default value: 0 (no restriction).
## insert_quorum {#insert_quorum}
:::note
-`insert_quorum` does not apply when using the [`SharedMergeTree` table engine](/en/cloud/reference/shared-merge-tree) in ClickHouse Cloud as all inserts are quorum inserted.
+This setting is not applicable to SharedMergeTree, see [SharedMergeTree consistency](/docs/en/cloud/reference/shared-merge-tree/#consistency) for more information.
:::
Enables the quorum writes.
@@ -1819,7 +1819,7 @@ See also:
## insert_quorum_parallel {#insert_quorum_parallel}
:::note
-`insert_quorum_parallel` does not apply when using the [`SharedMergeTree` table engine](/en/cloud/reference/shared-merge-tree) in ClickHouse Cloud as all inserts are quorum inserted.
+This setting is not applicable to SharedMergeTree, see [SharedMergeTree consistency](/docs/en/cloud/reference/shared-merge-tree/#consistency) for more information.
:::
Enables or disables parallelism for quorum `INSERT` queries. If enabled, additional `INSERT` queries can be sent while previous queries have not yet finished. If disabled, additional writes to the same table will be rejected.
@@ -1839,6 +1839,10 @@ See also:
## select_sequential_consistency {#select_sequential_consistency}
+:::note
+This setting differ in behavior between SharedMergeTree and ReplicatedMergeTree, see [SharedMergeTree consistency](/docs/en/cloud/reference/shared-merge-tree/#consistency) for more information about the behavior of `select_sequential_consistency` in SharedMergeTree.
+:::
+
Enables or disables sequential consistency for `SELECT` queries. Requires `insert_quorum_parallel` to be disabled (enabled by default).
Possible values:
@@ -2037,7 +2041,7 @@ Possible values:
- 0 — Disabled.
- 1 — Enabled.
-Default value: 1.
+Default value: 0.
By default, async inserts are inserted into replicated tables by the `INSERT` statement enabling [async_insert](#async-insert) are deduplicated (see [Data Replication](../../engines/table-engines/mergetree-family/replication.md)).
For the replicated tables, by default, only 10000 of the most recent inserts for each partition are deduplicated (see [replicated_deduplication_window_for_async_inserts](merge-tree-settings.md/#replicated-deduplication-window-async-inserts), [replicated_deduplication_window_seconds_for_async_inserts](merge-tree-settings.md/#replicated-deduplication-window-seconds-async-inserts)).
@@ -3445,7 +3449,7 @@ Has an effect only when the connection is made through the MySQL wire protocol.
- 0 - Use `BLOB`.
- 1 - Use `TEXT`.
-Default value: `0`.
+Default value: `1`.
## mysql_map_fixed_string_to_text_in_show_columns {#mysql_map_fixed_string_to_text_in_show_columns}
@@ -3456,7 +3460,7 @@ Has an effect only when the connection is made through the MySQL wire protocol.
- 0 - Use `BLOB`.
- 1 - Use `TEXT`.
-Default value: `0`.
+Default value: `1`.
## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold}
@@ -3706,7 +3710,7 @@ Default value: `0`.
## allow_experimental_live_view {#allow-experimental-live-view}
-Allows creation of experimental [live views](../../sql-reference/statements/create/view.md/#live-view).
+Allows creation of a deprecated LIVE VIEW.
Possible values:
@@ -3717,21 +3721,15 @@ Default value: `0`.
## live_view_heartbeat_interval {#live-view-heartbeat-interval}
-Sets the heartbeat interval in seconds to indicate [live view](../../sql-reference/statements/create/view.md/#live-view) is alive .
-
-Default value: `15`.
+Deprecated.
## max_live_view_insert_blocks_before_refresh {#max-live-view-insert-blocks-before-refresh}
-Sets the maximum number of inserted blocks after which mergeable blocks are dropped and query for [live view](../../sql-reference/statements/create/view.md/#live-view) is re-executed.
-
-Default value: `64`.
+Deprecated.
## periodic_live_view_refresh {#periodic-live-view-refresh}
-Sets the interval in seconds after which periodically refreshed [live view](../../sql-reference/statements/create/view.md/#live-view) is forced to refresh.
-
-Default value: `60`.
+Deprecated.
## http_connection_timeout {#http_connection_timeout}
diff --git a/docs/en/operations/system-tables/dns_cache.md b/docs/en/operations/system-tables/dns_cache.md
new file mode 100644
index 00000000000..824ce016a70
--- /dev/null
+++ b/docs/en/operations/system-tables/dns_cache.md
@@ -0,0 +1,38 @@
+---
+slug: /en/operations/system-tables/dns_cache
+---
+# dns_cache
+
+Contains information about cached DNS records.
+
+Columns:
+
+- `hostname` ([String](../../sql-reference/data-types/string.md)) — cached hostname
+- `ip_address` ([String](../../sql-reference/data-types/string.md)) — ip address for the hostname
+- `ip_family` ([Enum](../../sql-reference/data-types/enum.md)) — family of the ip address, possible values:
+ - 'IPv4'
+ - 'IPv6'
+ - 'UNIX_LOCAL'
+- `cached_at` ([DateTime](../../sql-reference/data-types/datetime.md)) - when the record was cached
+
+**Example**
+
+Query:
+
+```sql
+SELECT * FROM system.dns_cache;
+```
+
+Result:
+
+| hostname | ip\_address | ip\_family | cached\_at |
+| :--- | :--- | :--- | :--- |
+| localhost | ::1 | IPv6 | 2024-02-11 17:04:40 |
+| localhost | 127.0.0.1 | IPv4 | 2024-02-11 17:04:40 |
+
+**See also**
+
+- [disable_internal_dns_cache setting](../../operations/server-configuration-parameters/settings.md#disable_internal_dns_cache)
+- [dns_cache_max_size setting](../../operations/server-configuration-parameters/settings.md#dns_cache_max_size)
+- [dns_cache_update_period setting](../../operations/server-configuration-parameters/settings.md#dns_cache_update_period)
+- [dns_max_consecutive_failures setting](../../operations/server-configuration-parameters/settings.md#dns_max_consecutive_failures)
diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md
index 757afff599c..d0da4d37d8d 100644
--- a/docs/en/operations/tips.md
+++ b/docs/en/operations/tips.md
@@ -111,6 +111,14 @@ On newer Linux kernels transparent huge pages are alright.
$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled
```
+If you want to modify the transparent huge pages setting permanently, editing the `/etc/default/grub` to add the `transparent_hugepage=never` to the `GRUB_CMDLINE_LINUX_DEFAULT` option:
+
+```bash
+$ GRUB_CMDLINE_LINUX_DEFAULT="transparent_hugepage=madvise ..."
+```
+
+After that, run the `sudo update-grub` command then reboot to take effect.
+
## Hypervisor configuration
If you are using OpenStack, set
diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparrayintersect.md b/docs/en/sql-reference/aggregate-functions/reference/grouparrayintersect.md
new file mode 100644
index 00000000000..5cac88be073
--- /dev/null
+++ b/docs/en/sql-reference/aggregate-functions/reference/grouparrayintersect.md
@@ -0,0 +1,50 @@
+---
+slug: /en/sql-reference/aggregate-functions/reference/grouparrayintersect
+sidebar_position: 115
+---
+
+# groupArrayIntersect
+
+Return an intersection of given arrays (Return all items of arrays, that are in all given arrays).
+
+**Syntax**
+
+``` sql
+groupArrayIntersect(x)
+```
+
+**Arguments**
+
+- `x` — Argument (column name or expression).
+
+**Returned values**
+
+- Array that contains elements that are in all arrays.
+
+Type: [Array](../../data-types/array.md).
+
+**Examples**
+
+Consider table `numbers`:
+
+``` text
+┌─a──────────────┐
+│ [1,2,4] │
+│ [1,5,2,8,-1,0] │
+│ [1,5,7,5,8,2] │
+└────────────────┘
+```
+
+Query with column name as argument:
+
+``` sql
+SELECT groupArrayIntersect(a) as intersection FROM numbers;
+```
+
+Result:
+
+```text
+┌─intersection──────┐
+│ [1, 2] │
+└───────────────────┘
+```
diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md
index 93d4282c32b..b99d4b06d55 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/index.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/index.md
@@ -55,6 +55,7 @@ ClickHouse-specific aggregate functions:
- [groupArrayMovingSum](/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md)
- [groupArraySample](./grouparraysample.md)
- [groupArraySorted](/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md)
+- [groupArrayIntersect](./grouparrayintersect.md)
- [groupBitAnd](/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md)
- [groupBitOr](/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md)
- [groupBitXor](/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md)
diff --git a/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md b/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md
index f7615d90790..ddac82a0977 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md
@@ -5,25 +5,25 @@ sidebar_position: 221
# stochasticLinearRegression
-This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size and has few methods for updating weights ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (used by default), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)).
+This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size, and has a few methods for updating weights ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (used by default), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), and [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)).
### Parameters
There are 4 customizable parameters. They are passed to the function sequentially, but there is no need to pass all four - default values will be used, however good model required some parameter tuning.
``` text
-stochasticLinearRegression(1.0, 1.0, 10, 'SGD')
+stochasticLinearRegression(0.00001, 0.1, 15, 'Adam')
```
-1. `learning rate` is the coefficient on step length, when gradient descent step is performed. Too big learning rate may cause infinite weights of the model. Default is `0.00001`.
+1. `learning rate` is the coefficient on step length, when the gradient descent step is performed. A learning rate that is too big may cause infinite weights of the model. Default is `0.00001`.
2. `l2 regularization coefficient` which may help to prevent overfitting. Default is `0.1`.
-3. `mini-batch size` sets the number of elements, which gradients will be computed and summed to perform one step of gradient descent. Pure stochastic descent uses one element, however having small batches(about 10 elements) make gradient steps more stable. Default is `15`.
-4. `method for updating weights`, they are: `Adam` (by default), `SGD`, `Momentum`, `Nesterov`. `Momentum` and `Nesterov` require little bit more computations and memory, however they happen to be useful in terms of speed of convergence and stability of stochastic gradient methods.
+3. `mini-batch size` sets the number of elements, which gradients will be computed and summed to perform one step of gradient descent. Pure stochastic descent uses one element, however, having small batches (about 10 elements) makes gradient steps more stable. Default is `15`.
+4. `method for updating weights`, they are: `Adam` (by default), `SGD`, `Momentum`, and `Nesterov`. `Momentum` and `Nesterov` require a little bit more computations and memory, however, they happen to be useful in terms of speed of convergence and stability of stochastic gradient methods.
### Usage
-`stochasticLinearRegression` is used in two steps: fitting the model and predicting on new data. In order to fit the model and save its state for later usage we use `-State` combinator, which basically saves the state (model weights, etc).
-To predict we use function [evalMLMethod](../../../sql-reference/functions/machine-learning-functions.md#machine_learning_methods-evalmlmethod), which takes a state as an argument as well as features to predict on.
+`stochasticLinearRegression` is used in two steps: fitting the model and predicting on new data. In order to fit the model and save its state for later usage, we use the `-State` combinator, which saves the state (e.g. model weights).
+To predict, we use the function [evalMLMethod](../../../sql-reference/functions/machine-learning-functions.md#machine_learning_methods-evalmlmethod), which takes a state as an argument as well as features to predict on.
@@ -44,12 +44,12 @@ stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2)
AS state FROM train_data;
```
-Here we also need to insert data into `train_data` table. The number of parameters is not fixed, it depends only on number of arguments, passed into `linearRegressionState`. They all must be numeric values.
-Note that the column with target value(which we would like to learn to predict) is inserted as the first argument.
+Here, we also need to insert data into the `train_data` table. The number of parameters is not fixed, it depends only on the number of arguments passed into `linearRegressionState`. They all must be numeric values.
+Note that the column with target value (which we would like to learn to predict) is inserted as the first argument.
**2.** Predicting
-After saving a state into the table, we may use it multiple times for prediction, or even merge with other states and create new even better models.
+After saving a state into the table, we may use it multiple times for prediction or even merge with other states and create new, even better models.
``` sql
WITH (SELECT state FROM your_model) AS model SELECT
diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md
index d05e7bbfe51..739b688a0d2 100644
--- a/docs/en/sql-reference/functions/other-functions.md
+++ b/docs/en/sql-reference/functions/other-functions.md
@@ -780,8 +780,52 @@ If executed in the context of a distributed table, this function generates a nor
## version()
-Returns the server version as a string.
-If executed in the context of a distributed table, this function generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
+Returns the current version of ClickHouse as a string in the form of:
+
+- Major version
+- Minor version
+- Patch version
+- Number of commits since the previous stable release.
+
+```plaintext
+major_version.minor_version.patch_version.number_of_commits_since_the_previous_stable_release
+```
+
+If executed in the context of a distributed table, this function generates a normal column with values relevant to each shard. Otherwise, it produces a constant value.
+
+**Syntax**
+
+```sql
+version()
+```
+
+**Arguments**
+
+None.
+
+**Returned value**
+
+Type: [String](../data-types/string)
+
+**Implementation details**
+
+None.
+
+**Example**
+
+Query:
+
+```sql
+SELECT version()
+```
+
+**Result**:
+
+```response
+┌─version()─┐
+│ 24.2.1.1 │
+└───────────┘
+```
## buildId()
diff --git a/docs/en/sql-reference/statements/insert-into.md b/docs/en/sql-reference/statements/insert-into.md
index f5544f96750..a76692cf291 100644
--- a/docs/en/sql-reference/statements/insert-into.md
+++ b/docs/en/sql-reference/statements/insert-into.md
@@ -176,7 +176,7 @@ INSERT INTO infile_globs FROM INFILE 'input_?.csv' FORMAT CSV;
```
:::
-## Inserting into Table Function
+## Inserting using a Table Function
Data can be inserted into tables referenced by [table functions](../../sql-reference/table-functions/index.md).
@@ -204,7 +204,7 @@ Result:
└─────┴───────────────────────┘
```
-## Inserts into ClickHouse Cloud
+## Inserting into ClickHouse Cloud
By default, services on ClickHouse Cloud provide multiple replicas for high availability. When you connect to a service, a connection is established to one of these replicas.
@@ -218,6 +218,12 @@ SELECT .... SETTINGS select_sequential_consistency = 1;
Note that using `select_sequential_consistency` will increase the load on ClickHouse Keeper (used by ClickHouse Cloud internally) and may result in slower performance depending on the load on the service. We recommend against enabling this setting unless necessary. The recommended approach is to execute read/writes in the same session or to use a client driver that uses the native protocol (and thus supports sticky connections).
+## Inserting into a replicated setup
+
+In a replicated setup, data will be visible on other replicas after it has been replicated. Data begins being replicated (downloaded on other replicas) immediately after an `INSERT`. This differs from ClickHouse Cloud, where data is immediately written to shared storage and replicas subscribe to metadata changes.
+
+Note that for replicated setups, `INSERTs` can sometimes take a considerable amount of time (in the order of one second) as it requires committing to ClickHouse Keeper for distributed consensus. Using S3 for storage also adds additional latency.
+
## Performance Considerations
`INSERT` sorts the input data by primary key and splits them into partitions by a partition key. If you insert data into several partitions at once, it can significantly reduce the performance of the `INSERT` query. To avoid this:
@@ -230,7 +236,15 @@ Performance will not decrease if:
- Data is added in real time.
- You upload data that is usually sorted by time.
-It's also possible to asynchronously insert data in small but frequent inserts. The data from such insertions is combined into batches and then safely inserted into a table. To enable the asynchronous mode, switch on the [async_insert](../../operations/settings/settings.md#async-insert) setting. Note that asynchronous insertions are supported only over HTTP protocol, and deduplication is not supported for them.
+### Asynchronous inserts
+
+It is possible to asynchronously insert data in small but frequent inserts. The data from such insertions is combined into batches and then safely inserted into a table. To use asynchronous inserts, enable the [`async_insert`](../../operations/settings/settings.md#async-insert) setting.
+
+Using `async_insert` or the [`Buffer` table engine](/en/engines/table-engines/special/buffer) results in additional buffering.
+
+### Large or long-running inserts
+
+When you are inserting large amounts of data, ClickHouse will optimize write performance through a process called "squashing". Small blocks of inserted data in memory are merged and squashed into larger blocks before being written to disk. Squashing reduces the overhead associated with each write operation. In this process, inserted data will be available to query after ClickHouse completes writing each [`max_insert_block_size`](/en/operations/settings/settings#max_insert_block_size) rows.
**See Also**
diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md
index 5d416dfffb3..868571f3bb2 100644
--- a/docs/en/sql-reference/statements/system.md
+++ b/docs/en/sql-reference/statements/system.md
@@ -68,7 +68,7 @@ RELOAD FUNCTION [ON CLUSTER cluster_name] function_name
Clears ClickHouse’s internal DNS cache. Sometimes (for old ClickHouse versions) it is necessary to use this command when changing the infrastructure (changing the IP address of another ClickHouse server or the server used by dictionaries).
-For more convenient (automatic) cache management, see disable_internal_dns_cache, dns_cache_update_period parameters.
+For more convenient (automatic) cache management, see disable_internal_dns_cache, dns_cache_max_size, dns_cache_update_period parameters.
## DROP MARK CACHE
diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md
index 9f223157ea7..faa492d4d85 100644
--- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md
+++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md
@@ -679,11 +679,20 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y);
Тэги:
-- `policy_name_N` — название политики. Названия политик должны быть уникальны.
-- `volume_name_N` — название тома. Названия томов должны быть уникальны.
-- `disk` — диск, находящийся внутри тома.
-- `max_data_part_size_bytes` — максимальный размер куска данных, который может находиться на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том.
-- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). Для перемещения куски сортируются по размеру от большего к меньшему (по убыванию) и выбираются куски, совокупный размер которых достаточен для соблюдения условия `move_factor`, если совокупный размер всех партов недостаточен, будут перемещены все парты.
+- `policy_name_N` — название политики. Названия политик должны быть уникальны.
+- `volume_name_N` — название тома. Названия томов должны быть уникальны.
+- `disk` — диск, находящийся внутри тома.
+- `max_data_part_size_bytes` — максимальный размер куска данных, который может находиться на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том.
+- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). Для перемещения куски сортируются по размеру от большего к меньшему (по убыванию) и выбираются куски, совокупный размер которых достаточен для соблюдения условия `move_factor`, если совокупный размер всех партов недостаточен, будут перемещены все парты.
+- `perform_ttl_move_on_insert` — отключает перемещение данных с истекшим TTL при вставке. По умолчанию (если включено), если мы вставляем часть данных, которая уже просрочилась по правилу перемещения по сроку жизни, она немедленно перемещается на том / диск, указанный в правиле перемещения. Это может значительно замедлить вставку в случае, если целевой том / диск медленный (например, S3). Если отключено, то просроченная часть данных записывается на том по умолчанию, а затем сразу перемещается на том, указанный в правиле для истёкшего TTL.
+- `load_balancing` - политика балансировки дисков, `round_robin` или `least_used`.
+- `least_used_ttl_ms` - устанавливает таймаут (в миллисекундах) для обновления доступного пространства на всех дисках (`0` - обновлять всегда, `-1` - никогда не обновлять, значение по умолчанию - `60000`). Обратите внимание, если диск используется только ClickHouse и не будет подвергаться изменению размеров файловой системы на лету, можно использовать значение `-1`. Во всех остальных случаях это не рекомендуется, так как в конечном итоге это приведет к неправильному распределению пространства.
+- `prefer_not_to_merge` — эту настройку лучше не использовать. Она отключает слияние частей данных на этом томе (что потенциально вредно и может привести к замедлению). Когда эта настройка включена (не делайте этого), объединение данных на этом томе запрещено (что плохо). Это позволяет (но вам это не нужно) контролировать (если вы хотите что-то контролировать, вы делаете ошибку), как ClickHouse взаимодействует с медленными дисками (но ClickHouse лучше знает, поэтому, пожалуйста, не используйте эту настройку).
+- `volume_priority` — Определяет приоритет (порядок), в котором заполняются тома. Чем меньше значение -- тем выше приоритет. Значения параметра должны быть натуральными числами и охватывать диапазон от 1 до N (N - наибольшее значение параметра из указанных) без пропусков.
+ * Если _все_ тома имеют этот параметр, они приоритизируются в указанном порядке.
+ * Если его имеют лишь _некоторые_, то не имеющие этого параметра тома имеют самый низкий приоритет. Те, у которых он указан, приоритизируются в соответствии со значением тега, приоритет остальных определяется порядком описания в конфигурационном файле относительно друг друга.
+ * Если _ни одному_ тому не присвоен этот параметр, их порядок определяется порядком описания в конфигурационном файле.
+ * Приоритет нескольких томов не может быть одинаковым.
Примеры конфигураций:
@@ -733,7 +742,7 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y);
Если система содержит диски различных типов, то может пригодиться политика `moving_from_ssd_to_hdd`. В томе `hot` находится один SSD-диск (`fast_ssd`), а также задается ограничение на максимальный размер куска, который может храниться на этом томе (1GB). Все куски такой таблицы больше 1GB будут записываться сразу на том `cold`, в котором содержится один HDD-диск `disk1`. Также при заполнении диска `fast_ssd` более чем на 80% данные будут переноситься на диск `disk1` фоновым процессом.
-Порядок томов в политиках хранения важен, при достижении условий на переполнение тома данные переносятся на следующий. Порядок дисков в томах так же важен, данные пишутся по очереди на каждый из них.
+Порядок томов в политиках хранения важен в случае, если приоритеты томов (`volume_priority`) не указаны явно: при достижении условий на переполнение тома данные переносятся на следующий. Порядок дисков в томах так же важен, данные пишутся по очереди на каждый из них.
После задания конфигурации политик хранения их можно использовать, как настройку при создании таблиц:
diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md
index cd949e9e6b1..a56afda641b 100644
--- a/docs/ru/operations/settings/settings.md
+++ b/docs/ru/operations/settings/settings.md
@@ -3258,7 +3258,7 @@ SELECT * FROM test2;
## allow_experimental_live_view {#allow-experimental-live-view}
-Включает экспериментальную возможность использования [LIVE-представлений](../../sql-reference/statements/create/view.md#live-view).
+Включает устаревшую возможность использования [LIVE-представлений](../../sql-reference/statements/create/view.md#live-view).
Возможные значения:
- 0 — живые представления не поддерживаются.
@@ -3268,21 +3268,15 @@ SELECT * FROM test2;
## live_view_heartbeat_interval {#live-view-heartbeat-interval}
-Задает интервал в секундах для периодической проверки существования [LIVE VIEW](../../sql-reference/statements/create/view.md#live-view).
-
-Значение по умолчанию: `15`.
+Устарело.
## max_live_view_insert_blocks_before_refresh {#max-live-view-insert-blocks-before-refresh}
-Задает наибольшее число вставок, после которых запрос на формирование [LIVE VIEW](../../sql-reference/statements/create/view.md#live-view) исполняется снова.
-
-Значение по умолчанию: `64`.
+Устарело.
## periodic_live_view_refresh {#periodic-live-view-refresh}
-Задает время в секундах, по истечении которого [LIVE VIEW](../../sql-reference/statements/create/view.md#live-view) с установленным автообновлением обновляется.
-
-Значение по умолчанию: `60`.
+Устарело.
## check_query_single_value_result {#check_query_single_value_result}
diff --git a/docs/zh/sql-reference/statements/grant.md b/docs/zh/sql-reference/statements/grant.md
index 7e7cdbff350..fea51d590d5 100644
--- a/docs/zh/sql-reference/statements/grant.md
+++ b/docs/zh/sql-reference/statements/grant.md
@@ -280,9 +280,6 @@ GRANT INSERT(x,y) ON db.table TO john
- `ALTER MOVE PARTITION`. 级别: `TABLE`. 别名: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART`
- `ALTER FETCH PARTITION`. 级别: `TABLE`. 别名: `FETCH PARTITION`
- `ALTER FREEZE PARTITION`. 级别: `TABLE`. 别名: `FREEZE PARTITION`
- - `ALTER VIEW` 级别: `GROUP`
- - `ALTER VIEW REFRESH`. 级别: `VIEW`. 别名: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`
- - `ALTER VIEW MODIFY QUERY`. 级别: `VIEW`. 别名: `ALTER TABLE MODIFY QUERY`
如何对待该层级的示例:
- `ALTER` 权限包含所有其它 `ALTER *` 的权限
diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp
index c9945fbfae5..6dc33042a05 100644
--- a/programs/server/Server.cpp
+++ b/programs/server/Server.cpp
@@ -1774,6 +1774,8 @@ try
}
else
{
+ DNSResolver::instance().setCacheMaxSize(server_settings.dns_cache_max_size);
+
/// Initialize a watcher periodically updating DNS cache
dns_cache_updater = std::make_unique(
global_context, server_settings.dns_cache_update_period, server_settings.dns_max_consecutive_failures);
diff --git a/programs/server/config.xml b/programs/server/config.xml
index 96036fe4896..f8c4f2bf9fa 100644
--- a/programs/server/config.xml
+++ b/programs/server/config.xml
@@ -1392,13 +1392,27 @@
-
+
+
+
+
+
+
+
assumption ->
DROP TABLE constraint_test_transitivity;
-
CREATE TABLE constraint_test_strong_connectivity (a String, b String, c String, d String, CONSTRAINT c1 ASSUME a <= b AND b <= c AND c <= d AND d <= a) ENGINE = TinyLog;
INSERT INTO constraint_test_strong_connectivity (a, b, c, d) VALUES ('1', '2', '3', '4');
@@ -71,7 +73,6 @@ SELECT count() FROM constraint_test_transitivity3 WHERE b >= a; ---> assumption
DROP TABLE constraint_test_transitivity3;
-
CREATE TABLE constraint_test_constants_repl (a Int64, b Int64, c Int64, d Int64, CONSTRAINT c1 ASSUME a - b = 10 AND c + d = 20) ENGINE = TinyLog;
INSERT INTO constraint_test_constants_repl (a, b, c, d) VALUES (1, 2, 3, 4);
diff --git a/tests/queries/0_stateless/01625_constraints_index_append.reference b/tests/queries/0_stateless/01625_constraints_index_append.reference
index 591d8a85897..1b1a48c0e29 100644
--- a/tests/queries/0_stateless/01625_constraints_index_append.reference
+++ b/tests/queries/0_stateless/01625_constraints_index_append.reference
@@ -1,23 +1,15 @@
-SELECT i AS i
-FROM index_append_test_test
-PREWHERE a = 0
-WHERE (a = 0) AND indexHint((i + 40) > 0)
-SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, optimize_move_to_prewhere = 1, optimize_substitute_columns = 1, optimize_append_index = 1
-1
-SELECT i AS i
-FROM index_append_test_test
-PREWHERE a < 0
-SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, optimize_move_to_prewhere = 1, optimize_substitute_columns = 1, optimize_append_index = 1
-0
-SELECT i AS i
-FROM index_append_test_test
-PREWHERE a >= 0
-WHERE (a >= 0) AND indexHint((i + 40) > 0)
-SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, optimize_move_to_prewhere = 1, optimize_substitute_columns = 1, optimize_append_index = 1
-1
-SELECT i AS i
-FROM index_append_test_test
-PREWHERE (2 * b) < 100
-WHERE ((2 * b) < 100) AND indexHint(i < 100)
-SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, optimize_move_to_prewhere = 1, optimize_substitute_columns = 1, optimize_append_index = 1
-1
+ Filter column: and(equals(a, 0), indexHint(greater(plus(i, 40), 0))) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: equals(a, 0)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: less(a, 0) (removed)
+ Filter column: and(greaterOrEquals(a, 0), indexHint(greater(plus(i, 40), 0))) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greaterOrEquals(a, 0)
+ Filter column: and(less(multiply(2, b), 100), indexHint(less(i, 100))) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: less(multiply(2, b), 100)
diff --git a/tests/queries/0_stateless/01625_constraints_index_append.sh b/tests/queries/0_stateless/01625_constraints_index_append.sh
deleted file mode 100755
index acceedbb1d1..00000000000
--- a/tests/queries/0_stateless/01625_constraints_index_append.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-
-CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
-# shellcheck source=../shell_config.sh
-. "$CURDIR"/../shell_config.sh
-
-# We should have correct env vars from shell_config.sh to run this test
-
-$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS index_append_test_test;"
-
-$CLICKHOUSE_CLIENT --query "CREATE TABLE index_append_test_test (i Int64, a UInt32, b UInt64, CONSTRAINT c1 ASSUME i <= 2 * b AND i + 40 > a) ENGINE = MergeTree() ORDER BY i;"
-$CLICKHOUSE_CLIENT --query "INSERT INTO index_append_test_test VALUES (1, 10, 1), (2, 20, 2);"
-
-function run_with_settings()
-{
- query="$1 SETTINGS convert_query_to_cnf = 1\
- , optimize_using_constraints = 1\
- , optimize_move_to_prewhere = 1\
- , optimize_substitute_columns = 1\
- , optimize_append_index = 1"
-
- if [[ $query =~ "EXPLAIN QUERY TREE" ]]; then query="${query}, allow_experimental_analyzer = 1"; fi
-
- $CLICKHOUSE_CLIENT --query="$query"
-
-}
-
-run_with_settings "EXPLAIN SYNTAX SELECT i FROM index_append_test_test WHERE a = 0"
-run_with_settings "EXPLAIN QUERY TREE SELECT i FROM index_append_test_test WHERE a = 0" | grep -Fac "indexHint"
-run_with_settings "EXPLAIN SYNTAX SELECT i FROM index_append_test_test WHERE a < 0"
-run_with_settings "EXPLAIN QUERY TREE SELECT i FROM index_append_test_test WHERE a < 0" | grep -Fac "indexHint"
-run_with_settings "EXPLAIN SYNTAX SELECT i FROM index_append_test_test WHERE a >= 0"
-run_with_settings "EXPLAIN QUERY TREE SELECT i FROM index_append_test_test WHERE a >= 0" | grep -Fac "indexHint"
-run_with_settings "EXPLAIN SYNTAX SELECT i FROM index_append_test_test WHERE 2 * b < 100"
-run_with_settings "EXPLAIN QUERY TREE SELECT i FROM index_append_test_test WHERE 2 * b < 100" | grep -Fac "indexHint"
-
-$CLICKHOUSE_CLIENT --query "DROP TABLE index_append_test_test;"
diff --git a/tests/queries/0_stateless/01625_constraints_index_append.sql b/tests/queries/0_stateless/01625_constraints_index_append.sql
new file mode 100644
index 00000000000..482cd325bb7
--- /dev/null
+++ b/tests/queries/0_stateless/01625_constraints_index_append.sql
@@ -0,0 +1,26 @@
+-- Tags: no-parallel
+
+-- CNF optimization uses QueryNodeHash to order conditions. We need fixed database.table.column identifier name to stabilize result
+DROP DATABASE IF EXISTS db_memory_01625;
+CREATE DATABASE db_memory_01625 ENGINE = Memory;
+USE db_memory_01625;
+
+DROP TABLE IF EXISTS index_append_test_test;
+
+CREATE TABLE index_append_test_test (i Int64, a UInt32, b UInt64, CONSTRAINT c1 ASSUME i <= 2 * b AND i + 40 > a) ENGINE = MergeTree() ORDER BY i;
+
+INSERT INTO index_append_test_test VALUES (1, 10, 1), (2, 20, 2);
+
+SET convert_query_to_cnf = 1;
+SET optimize_using_constraints = 1;
+SET optimize_move_to_prewhere = 1;
+SET optimize_substitute_columns = 1;
+SET optimize_append_index = 1;
+
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a = 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a < 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a >= 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE 2 * b < 100) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
+
+DROP TABLE index_append_test_test;
+DROP DATABASE db_memory_01625;
diff --git a/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.reference b/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.reference
index 98c76cc2a50..26a0e97729c 100644
--- a/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.reference
+++ b/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.reference
@@ -1,114 +1,38 @@
optimize_move_to_prewhere_if_final = 1
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-PREWHERE x > 100
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-FINAL
-PREWHERE x > 100
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-PREWHERE y > 100
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-FINAL
-PREWHERE y > 100
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-PREWHERE (x + y) > 100
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-FINAL
-PREWHERE (x + y) > 100
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-FINAL
-WHERE z > 400
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-FINAL
-PREWHERE y > 100
-WHERE (y > 100) AND (z > 400)
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-FINAL
-PREWHERE x > 50
-WHERE (x > 50) AND (z > 400)
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-FINAL
-PREWHERE (x + y) > 50
-WHERE ((x + y) > 50) AND (z > 400)
-
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greater(x, 100) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greater(x, 100) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greater(y, 100) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greater(y, 100) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greater(plus(x, y), 100) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greater(plus(x, y), 100) (removed)
+ Filter
+ Filter column: and(greater(y, 100), greater(z, 400)) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greater(y, 100)
+ Filter
+ Filter column: and(greater(x, 50), greater(z, 400)) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greater(x, 50)
+ Filter
+ Filter column: and(greater(plus(x, y), 50), greater(z, 400)) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greater(plus(x, y), 50)
optimize_move_to_prewhere_if_final = 0
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-PREWHERE y > 100
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-FINAL
-WHERE y > 100
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-FINAL
-WHERE z > 400
-
-SELECT
- x,
- y,
- z
-FROM prewhere_move_select_final
-FINAL
-WHERE (y > 100) AND (z > 400)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: greater(y, 100) (removed)
diff --git a/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.sql b/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.sql
index ede15738c5b..d4830e9e357 100644
--- a/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.sql
+++ b/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.sql
@@ -10,42 +10,27 @@ select 'optimize_move_to_prewhere_if_final = 1';
SET optimize_move_to_prewhere_if_final = 1;
-- order key can be pushed down with final
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final WHERE x > 100;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE x > 100;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final WHERE y > 100;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final WHERE x + y > 100;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE x + y > 100;
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final WHERE x > 100) WHERE explain LIKE '%Prewhere%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE x > 100) WHERE explain LIKE '%Prewhere%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final WHERE y > 100) WHERE explain LIKE '%Prewhere%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100) WHERE explain LIKE '%Prewhere%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final WHERE x + y > 100) WHERE explain LIKE '%Prewhere%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE x + y > 100) WHERE explain LIKE '%Prewhere%';
-- can not be pushed down
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE z > 400;
+SELECT * FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE z > 400) WHERE explain LIKE '%Prewhere filter';
-- only condition with x/y can be pushed down
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100 and z > 400;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE x > 50 and z > 400;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE x + y > 50 and z > 400;
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100 and z > 400) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE x > 50 and z > 400) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE x + y > 50 and z > 400) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%';
-select '';
select 'optimize_move_to_prewhere_if_final = 0';
SET optimize_move_to_prewhere_if_final = 0;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final WHERE y > 100;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE z > 400;
-select '';
-EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100 and z > 400;
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final WHERE y > 100) WHERE explain LIKE '%Prewhere%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100) WHERE explain LIKE '%Prewhere%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE z > 400) WHERE explain LIKE '%Prewhere%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100 and z > 400) WHERE explain LIKE '%Prewhere%';
DROP TABLE prewhere_move_select_final;
diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference
index 80bd7dfd8c0..19018a610b7 100644
--- a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference
+++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference
@@ -9,7 +9,7 @@ String1_0 String2_0 String3_0 String4_0 1
Expression ((Projection + Before ORDER BY))
Filter (WHERE)
Join (JOIN FillRightFirst)
- Filter (( + Before JOIN))
+ Expression
ReadFromMergeTree (default.t1)
Indexes:
PrimaryKey
diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference
index fd1bc713b08..3a015d32539 100644
--- a/tests/queries/0_stateless/01786_explain_merge_tree.reference
+++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference
@@ -1,77 +1,79 @@
- ReadFromMergeTree (default.test_index)
- Indexes:
- MinMax
- Keys:
- y
- Parts: 4/5
- Granules: 11/12
- Partition
- Keys:
- y
- bitAnd(z, 3)
- Parts: 3/4
- Granules: 10/11
- PrimaryKey
- Keys:
- x
- y
- Parts: 2/3
- Granules: 6/10
- Skip
- Name: t_minmax
- Description: minmax GRANULARITY 2
- Parts: 1/2
- Granules: 3/6
- Skip
- Name: t_set
- Description: set GRANULARITY 2
- Parts: 1/1
- Granules: 2/3
+ ReadFromMergeTree (default.test_index)
+ Indexes:
+ MinMax
+ Keys:
+ y
+ Parts: 4/5
+ Granules: 11/12
+ Partition
+ Keys:
+ y
+ bitAnd(z, 3)
+ Parts: 3/4
+ Granules: 10/11
+ PrimaryKey
+ Keys:
+ x
+ y
+ Parts: 2/3
+ Granules: 6/10
+ Skip
+ Name: t_minmax
+ Description: minmax GRANULARITY 2
+ Parts: 1/2
+ Granules: 3/6
+ Skip
+ Name: t_set
+ Description: set GRANULARITY 2
+ Parts: 1/1
+ Granules: 2/3
-----------------
- "Node Type": "ReadFromMergeTree",
- "Description": "default.test_index",
- "Indexes": [
- {
- "Type": "MinMax",
- "Keys": ["y"],
- "Initial Parts": 5,
- "Selected Parts": 4,
- "Initial Granules": 12,
- "Selected Granules": 11
- },
- {
- "Type": "Partition",
- "Keys": ["y", "bitAnd(z, 3)"],
- "Initial Parts": 4,
- "Selected Parts": 3,
- "Initial Granules": 11,
- "Selected Granules": 10
- },
- {
- "Type": "PrimaryKey",
- "Keys": ["x", "y"],
- "Initial Parts": 3,
- "Selected Parts": 2,
- "Initial Granules": 10,
- "Selected Granules": 6
- },
- {
- "Type": "Skip",
- "Name": "t_minmax",
- "Description": "minmax GRANULARITY 2",
- "Initial Parts": 2,
- "Selected Parts": 1,
- "Initial Granules": 6,
- "Selected Granules": 3
- },
- {
- "Type": "Skip",
- "Name": "t_set",
- "Description": "set GRANULARITY 2",
- "Initial Parts": 1,
- "Selected Parts": 1,
- "Initial Granules": 3,
- "Selected Granules": 2
+ "Node Type": "ReadFromMergeTree",
+ "Description": "default.test_index",
+ "Indexes": [
+ {
+ "Type": "MinMax",
+ "Keys": ["y"],
+ "Initial Parts": 5,
+ "Selected Parts": 4,
+ "Initial Granules": 12,
+ "Selected Granules": 11
+ },
+ {
+ "Type": "Partition",
+ "Keys": ["y", "bitAnd(z, 3)"],
+ "Initial Parts": 4,
+ "Selected Parts": 3,
+ "Initial Granules": 11,
+ "Selected Granules": 10
+ },
+ {
+ "Type": "PrimaryKey",
+ "Keys": ["x", "y"],
+ "Initial Parts": 3,
+ "Selected Parts": 2,
+ "Initial Granules": 10,
+ "Selected Granules": 6
+ },
+ {
+ "Type": "Skip",
+ "Name": "t_minmax",
+ "Description": "minmax GRANULARITY 2",
+ "Initial Parts": 2,
+ "Selected Parts": 1,
+ "Initial Granules": 6,
+ "Selected Granules": 3
+ },
+ {
+ "Type": "Skip",
+ "Name": "t_set",
+ "Description": "set GRANULARITY 2",
+ "Initial Parts": 1,
+ "Selected Parts": 1,
+ "Initial Granules": 3,
+ "Selected Granules": 2
+ }
+ ]
}
]
}
@@ -89,15 +91,15 @@
ReadType: InReverseOrder
Parts: 1
Granules: 3
- ReadFromMergeTree (default.idx)
- Indexes:
- PrimaryKey
- Keys:
- x
- plus(x, y)
- Condition: or((x in 2-element set), (plus(plus(x, y), 1) in (-Inf, 2]))
- Parts: 1/1
- Granules: 1/1
+ ReadFromMergeTree (default.idx)
+ Indexes:
+ PrimaryKey
+ Keys:
+ x
+ plus(x, y)
+ Condition: or((x in 2-element set), (plus(plus(x, y), 1) in (-Inf, 2]))
+ Parts: 1/1
+ Granules: 1/1
ReadFromMergeTree (default.test_index)
Indexes:
MinMax
diff --git a/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.reference b/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.reference
index 686a864f222..ccd51cba776 100644
--- a/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.reference
+++ b/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.reference
@@ -1,12 +1,19 @@
1 Wide
2 Compact
35
-SELECT count()
-FROM t_move_to_prewhere
-PREWHERE a AND b AND c AND (NOT ignore(fat_string))
+ Filter
+ Filter column: and(a, b, c, not(ignore(fat_string))) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: and(a, b, c) (removed)
1 Compact
2 Compact
35
SELECT count()
FROM t_move_to_prewhere
-PREWHERE a AND b AND c AND (NOT ignore(fat_string))
+WHERE a AND b AND c AND (NOT ignore(fat_string))
+ Filter
+ Filter column: and(a, b, c, not(ignore(fat_string))) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: a
diff --git a/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.sql b/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.sql
index 2987c541aef..6ad804ac1b3 100644
--- a/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.sql
+++ b/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.sql
@@ -2,6 +2,7 @@
SET optimize_move_to_prewhere = 1;
SET convert_query_to_cnf = 0;
+SET move_all_conditions_to_prewhere = 0;
DROP TABLE IF EXISTS t_move_to_prewhere;
@@ -17,7 +18,7 @@ WHERE table = 't_move_to_prewhere' AND database = currentDatabase()
ORDER BY partition;
SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string);
-EXPLAIN SYNTAX SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string);
+SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%';
DROP TABLE IF EXISTS t_move_to_prewhere;
@@ -38,5 +39,6 @@ ORDER BY partition;
SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string);
EXPLAIN SYNTAX SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string);
+SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%';
DROP TABLE IF EXISTS t_move_to_prewhere;
diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference
index e60fb844de8..7382b24afbc 100644
--- a/tests/queries/0_stateless/02117_show_create_table_system.reference
+++ b/tests/queries/0_stateless/02117_show_create_table_system.reference
@@ -686,9 +686,6 @@ CREATE TABLE system.projection_parts
`rows_where_ttl_info.expression` Array(String),
`rows_where_ttl_info.min` Array(DateTime),
`rows_where_ttl_info.max` Array(DateTime),
- `is_broken` UInt8,
- `exception_code` Int32,
- `exception` String,
`bytes` UInt64 ALIAS bytes_on_disk,
`marks_size` UInt64 ALIAS marks_bytes,
`part_name` String ALIAS name
diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference
index f3415a34823..d608364e01b 100644
--- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference
+++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference
@@ -64,8 +64,10 @@ ExpressionTransform
(Sorting)
(Expression)
ExpressionTransform
- (ReadFromMergeTree)
- MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
+ (Expression)
+ ExpressionTransform
+ (ReadFromMergeTree)
+ MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
(Expression)
ExpressionTransform
(Limit)
@@ -91,8 +93,10 @@ ExpressionTransform
PartialSortingTransform
(Expression)
ExpressionTransform
- (ReadFromMergeTree)
- MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
+ (Expression)
+ ExpressionTransform
+ (ReadFromMergeTree)
+ MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
(Expression)
ExpressionTransform
(Limit)
@@ -115,7 +119,7 @@ SELECT
date,
i
FROM t_read_in_order
-PREWHERE date = \'2020-10-12\'
+WHERE date = \'2020-10-12\'
ORDER BY i DESC
LIMIT 5
(Expression)
@@ -125,9 +129,11 @@ ExpressionTransform
(Sorting)
(Expression)
ExpressionTransform
- (ReadFromMergeTree)
- ReverseTransform
- MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1
+ (Expression)
+ ExpressionTransform
+ (ReadFromMergeTree)
+ ReverseTransform
+ MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1
(Expression)
ExpressionTransform
(Limit)
diff --git a/tests/queries/0_stateless/02156_storage_merge_prewhere.reference b/tests/queries/0_stateless/02156_storage_merge_prewhere.reference
index 74ba452d783..86a36a9392c 100644
--- a/tests/queries/0_stateless/02156_storage_merge_prewhere.reference
+++ b/tests/queries/0_stateless/02156_storage_merge_prewhere.reference
@@ -1,12 +1,15 @@
-SELECT count()
-FROM t_02156_merge1
-PREWHERE notEmpty(v) AND (k = 3)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: and(notEmpty(v), equals(k, 3)) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: and(notEmpty(v), equals(k, 3)) (removed)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: and(notEmpty(v), equals(k, 3)) (removed)
2
-SELECT count()
-FROM t_02156_merge2
-WHERE (k = 3) AND notEmpty(v)
+ Filter column: and(equals(k, 3), notEmpty(v)) (removed)
2
-SELECT count()
-FROM t_02156_merge3
-WHERE (k = 3) AND notEmpty(v)
+ Filter column: and(equals(k, 3), notEmpty(v)) (removed)
+ Filter column: and(equals(k, 3), notEmpty(v)) (removed)
2
diff --git a/tests/queries/0_stateless/02156_storage_merge_prewhere.sql b/tests/queries/0_stateless/02156_storage_merge_prewhere.sql
index 83d88a68d9b..ca61a8f2d57 100644
--- a/tests/queries/0_stateless/02156_storage_merge_prewhere.sql
+++ b/tests/queries/0_stateless/02156_storage_merge_prewhere.sql
@@ -1,5 +1,6 @@
SET optimize_move_to_prewhere = 1;
SET enable_multiple_prewhere_read_steps = 1;
+SET prefer_localhost_replica = 1; -- Make sure plan is reliable
DROP TABLE IF EXISTS t_02156_mt1;
DROP TABLE IF EXISTS t_02156_mt2;
@@ -23,13 +24,13 @@ INSERT INTO t_02156_mt1 SELECT number, toString(number) FROM numbers(10000);
INSERT INTO t_02156_mt2 SELECT number, toString(number) FROM numbers(10000);
INSERT INTO t_02156_log SELECT number, toString(number) FROM numbers(10000);
-EXPLAIN SYNTAX SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v);
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v);
-EXPLAIN SYNTAX SELECT count() FROM t_02156_merge2 WHERE k = 3 AND notEmpty(v);
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge2 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT count() FROM t_02156_merge2 WHERE k = 3 AND notEmpty(v);
-EXPLAIN SYNTAX SELECT count() FROM t_02156_merge3 WHERE k = 3 AND notEmpty(v);
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge3 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT count() FROM t_02156_merge3 WHERE k = 3 AND notEmpty(v);
DROP TABLE IF EXISTS t_02156_mt1;
diff --git a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.reference b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.reference
index 69571551c2b..85e8a802bdc 100644
--- a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.reference
+++ b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.reference
@@ -56,6 +56,7 @@ algorithm: Thread
Sorting (Stream): a ASC, b ASC
Sorting (Stream): a ASC, b ASC
Sorting (Stream): a ASC, b ASC
+Sorting (Stream): a ASC, b ASC
-- check that reading in order optimization for ORDER BY and DISTINCT applied correctly in the same query
-- disabled, check that sorting description for ReadFromMergeTree match ORDER BY columns
Sorting (Stream): a ASC
diff --git a/tests/queries/0_stateless/02346_inverted_index_match_predicate.reference b/tests/queries/0_stateless/02346_inverted_index_match_predicate.reference
index 9dc8d5b76d9..84fc422379c 100644
--- a/tests/queries/0_stateless/02346_inverted_index_match_predicate.reference
+++ b/tests/queries/0_stateless/02346_inverted_index_match_predicate.reference
@@ -1,20 +1,20 @@
1 Hello ClickHouse
2 Hello World
- Granules: 6/6
- Granules: 2/6
+ Granules: 6/6
+ Granules: 2/6
Granules: 6/6
Granules: 2/6
---
1 Hello ClickHouse
2 Hello World
6 World Champion
- Granules: 6/6
- Granules: 3/6
+ Granules: 6/6
+ Granules: 3/6
Granules: 6/6
Granules: 3/6
---
5 OLAP Database
- Granules: 6/6
- Granules: 1/6
+ Granules: 6/6
+ Granules: 1/6
Granules: 6/6
Granules: 1/6
diff --git a/tests/queries/0_stateless/02354_vector_search_queries.reference b/tests/queries/0_stateless/02354_vector_search_queries.reference
index befa6af5a08..41c1915ecc3 100644
--- a/tests/queries/0_stateless/02354_vector_search_queries.reference
+++ b/tests/queries/0_stateless/02354_vector_search_queries.reference
@@ -18,17 +18,18 @@ ARRAY, 10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule,
- Annoy: WHERE-type, EXPLAIN
Expression ((Projection + Before ORDER BY))
Limit (preliminary LIMIT (without OFFSET))
- ReadFromMergeTree (default.tab_annoy)
- Indexes:
- PrimaryKey
- Condition: true
- Parts: 1/1
- Granules: 1/1
- Skip
- Name: idx
- Description: annoy GRANULARITY 100000000
- Parts: 1/1
- Granules: 1/1
+ Expression
+ ReadFromMergeTree (default.tab_annoy)
+ Indexes:
+ PrimaryKey
+ Condition: true
+ Parts: 1/1
+ Granules: 1/1
+ Skip
+ Name: idx
+ Description: annoy GRANULARITY 100000000
+ Parts: 1/1
+ Granules: 1/1
- Annoy: ORDER-BY-type, EXPLAIN
Expression (Projection)
Limit (preliminary LIMIT (without OFFSET))
@@ -48,17 +49,18 @@ Expression (Projection)
- Usearch: WHERE-type, EXPLAIN
Expression ((Projection + Before ORDER BY))
Limit (preliminary LIMIT (without OFFSET))
- ReadFromMergeTree (default.tab_usearch)
- Indexes:
- PrimaryKey
- Condition: true
- Parts: 1/1
- Granules: 1/1
- Skip
- Name: idx
- Description: usearch GRANULARITY 100000000
- Parts: 1/1
- Granules: 1/1
+ Expression
+ ReadFromMergeTree (default.tab_usearch)
+ Indexes:
+ PrimaryKey
+ Condition: true
+ Parts: 1/1
+ Granules: 1/1
+ Skip
+ Name: idx
+ Description: usearch GRANULARITY 100000000
+ Parts: 1/1
+ Granules: 1/1
- Usearch: ORDER-BY-type, EXPLAIN
Expression (Projection)
Limit (preliminary LIMIT (without OFFSET))
@@ -95,17 +97,18 @@ ARRAY vectors, 12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2
- Annoy: WHERE-type, EXPLAIN
Expression ((Projection + Before ORDER BY))
Limit (preliminary LIMIT (without OFFSET))
- ReadFromMergeTree (default.tab_annoy)
- Indexes:
- PrimaryKey
- Condition: true
- Parts: 1/1
- Granules: 4/4
- Skip
- Name: idx
- Description: annoy GRANULARITY 2
- Parts: 1/1
- Granules: 1/4
+ Expression
+ ReadFromMergeTree (default.tab_annoy)
+ Indexes:
+ PrimaryKey
+ Condition: true
+ Parts: 1/1
+ Granules: 4/4
+ Skip
+ Name: idx
+ Description: annoy GRANULARITY 2
+ Parts: 1/1
+ Granules: 1/4
- Annoy: ORDER-BY-type, EXPLAIN
Expression (Projection)
Limit (preliminary LIMIT (without OFFSET))
@@ -125,17 +128,18 @@ Expression (Projection)
- Usearch: WHERE-type, EXPLAIN
Expression ((Projection + Before ORDER BY))
Limit (preliminary LIMIT (without OFFSET))
- ReadFromMergeTree (default.tab_usearch)
- Indexes:
- PrimaryKey
- Condition: true
- Parts: 1/1
- Granules: 4/4
- Skip
- Name: idx
- Description: usearch GRANULARITY 2
- Parts: 1/1
- Granules: 1/4
+ Expression
+ ReadFromMergeTree (default.tab_usearch)
+ Indexes:
+ PrimaryKey
+ Condition: true
+ Parts: 1/1
+ Granules: 4/4
+ Skip
+ Name: idx
+ Description: usearch GRANULARITY 2
+ Parts: 1/1
+ Granules: 1/4
- Usearch: ORDER-BY-type, EXPLAIN
Expression (Projection)
Limit (preliminary LIMIT (without OFFSET))
diff --git a/tests/queries/0_stateless/02402_merge_engine_with_view.sql b/tests/queries/0_stateless/02402_merge_engine_with_view.sql
index ae9de1426e7..81c2d67d05b 100644
--- a/tests/queries/0_stateless/02402_merge_engine_with_view.sql
+++ b/tests/queries/0_stateless/02402_merge_engine_with_view.sql
@@ -11,4 +11,4 @@ SELECT * FROM m2 WHERE id > 1 AND id < 5 ORDER BY id SETTINGS force_primary_key=
-- #40706
CREATE VIEW v AS SELECT 1;
-SELECT 1 FROM merge(currentDatabase(), '^v$');
\ No newline at end of file
+SELECT 1 FROM merge(currentDatabase(), '^v$');
diff --git a/tests/queries/0_stateless/02481_aggregation_in_order_plan.reference b/tests/queries/0_stateless/02481_aggregation_in_order_plan.reference
index 969ec320790..ec3d1c15690 100644
--- a/tests/queries/0_stateless/02481_aggregation_in_order_plan.reference
+++ b/tests/queries/0_stateless/02481_aggregation_in_order_plan.reference
@@ -4,7 +4,7 @@
0 1 2 200
Aggregating
Order: a ASC, c ASC
- ReadFromMergeTree (default.tab)
+ ReadFromMergeTree (default.tab)
Aggregating
Order: __table1.a ASC, __table1.c ASC
ReadFromMergeTree (default.tab)
diff --git a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference
index 7e43f249a74..9bb0c022752 100644
--- a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference
+++ b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference
@@ -13,7 +13,7 @@ FROM
day_,
type_1
FROM test_grouping_sets_predicate
- PREWHERE day_ = \'2023-01-05\'
+ WHERE day_ = \'2023-01-05\'
GROUP BY
GROUPING SETS (
(day_, type_1),
@@ -39,8 +39,10 @@ ExpressionTransform × 2
Copy 1 → 2
(Expression)
ExpressionTransform
- (ReadFromMergeTree)
- MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
+ (Expression)
+ ExpressionTransform
+ (ReadFromMergeTree)
+ MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
(Expression)
ExpressionTransform × 2
(Filter)
@@ -70,8 +72,8 @@ ExpressionTransform × 2
FilterTransform
(Filter)
FilterTransform
- (Filter)
- FilterTransform
+ (Expression)
+ ExpressionTransform
(ReadFromMergeTree)
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
(Expression)
diff --git a/tests/queries/0_stateless/02559_multiple_read_steps_in_prewhere.sql b/tests/queries/0_stateless/02559_multiple_read_steps_in_prewhere.sql
index 1e969afac33..805186edcbd 100644
--- a/tests/queries/0_stateless/02559_multiple_read_steps_in_prewhere.sql
+++ b/tests/queries/0_stateless/02559_multiple_read_steps_in_prewhere.sql
@@ -1,6 +1,6 @@
DROP TABLE IF EXISTS test_02559;
-CREATE TABLE test_02559 (id1 UInt64, id2 UInt64) ENGINE=MergeTree ORDER BY id1;
+CREATE TABLE test_02559 (id1 UInt64, id2 UInt64) ENGINE=MergeTree ORDER BY id1 SETTINGS min_bytes_for_wide_part = 0;
INSERT INTO test_02559 SELECT number, number FROM numbers(10);
diff --git a/tests/queries/0_stateless/02725_agg_projection_resprect_PK.reference b/tests/queries/0_stateless/02725_agg_projection_resprect_PK.reference
index e6b95502e1e..80bff2c12b3 100644
--- a/tests/queries/0_stateless/02725_agg_projection_resprect_PK.reference
+++ b/tests/queries/0_stateless/02725_agg_projection_resprect_PK.reference
@@ -1,2 +1,2 @@
ReadFromMergeTree (p1)
- Granules: 1/12
+ Granules: 1/12
diff --git a/tests/queries/0_stateless/02771_ignore_data_skipping_indices.reference b/tests/queries/0_stateless/02771_ignore_data_skipping_indices.reference
index 33df18c8801..e23e3094ca3 100644
--- a/tests/queries/0_stateless/02771_ignore_data_skipping_indices.reference
+++ b/tests/queries/0_stateless/02771_ignore_data_skipping_indices.reference
@@ -1,43 +1,43 @@
1 2 3
1 2 3
1 2 3
- ReadFromMergeTree (default.data_02771)
- Indexes:
- PrimaryKey
- Condition: true
- Parts: 1/1
- Granules: 1/1
- Skip
- Name: x_idx
- Description: minmax GRANULARITY 1
- Parts: 0/1
- Granules: 0/1
- Skip
- Name: y_idx
- Description: minmax GRANULARITY 1
- Parts: 0/0
- Granules: 0/0
- Skip
- Name: xy_idx
- Description: minmax GRANULARITY 1
- Parts: 0/0
- Granules: 0/0
- ReadFromMergeTree (default.data_02771)
- Indexes:
- PrimaryKey
- Condition: true
- Parts: 1/1
- Granules: 1/1
- Skip
- Name: x_idx
- Description: minmax GRANULARITY 1
- Parts: 0/1
- Granules: 0/1
- Skip
- Name: y_idx
- Description: minmax GRANULARITY 1
- Parts: 0/0
- Granules: 0/0
+ ReadFromMergeTree (default.data_02771)
+ Indexes:
+ PrimaryKey
+ Condition: true
+ Parts: 1/1
+ Granules: 1/1
+ Skip
+ Name: x_idx
+ Description: minmax GRANULARITY 1
+ Parts: 0/1
+ Granules: 0/1
+ Skip
+ Name: y_idx
+ Description: minmax GRANULARITY 1
+ Parts: 0/0
+ Granules: 0/0
+ Skip
+ Name: xy_idx
+ Description: minmax GRANULARITY 1
+ Parts: 0/0
+ Granules: 0/0
+ ReadFromMergeTree (default.data_02771)
+ Indexes:
+ PrimaryKey
+ Condition: true
+ Parts: 1/1
+ Granules: 1/1
+ Skip
+ Name: x_idx
+ Description: minmax GRANULARITY 1
+ Parts: 0/1
+ Granules: 0/1
+ Skip
+ Name: y_idx
+ Description: minmax GRANULARITY 1
+ Parts: 0/0
+ Granules: 0/0
ReadFromMergeTree (default.data_02771)
Indexes:
PrimaryKey
diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect b/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect
index 8ba5774820e..3798acf2a93 100755
--- a/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect
+++ b/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect
@@ -123,7 +123,7 @@ expect -- "| dt_tz2 | DATETIME | NO | | NULL | |
expect -- "| enm | TEXT | NO | | NULL | |"
expect -- "| f32 | FLOAT | NO | | NULL | |"
expect -- "| f64 | DOUBLE | NO | | NULL | |"
-expect -- "| fs | BLOB | NO | | NULL | |"
+expect -- "| fs | TEXT | NO | | NULL | |"
expect -- "| i128 | TEXT | NO | | NULL | |"
expect -- "| i16 | SMALLINT | NO | | NULL | |"
expect -- "| i256 | TEXT | NO | | NULL | |"
@@ -132,74 +132,8 @@ expect -- "| i64 | BIGINT | NO | | NULL | |
expect -- "| i8 | TINYINT | NO | | NULL | |"
expect -- "| ip4 | TEXT | NO | | NULL | |"
expect -- "| ip6 | TEXT | NO | | NULL | |"
-expect -- "| lfs | BLOB | NO | | NULL | |"
-expect -- "| lnfs | BLOB | YES | | NULL | |"
-expect -- "| lns | BLOB | YES | | NULL | |"
-expect -- "| ls | BLOB | NO | | NULL | |"
-expect -- "| m | JSON | NO | | NULL | |"
-expect -- "| m_complex | JSON | NO | | NULL | |"
-expect -- "| mpg | TEXT | NO | | NULL | |"
-expect -- "| ndt64 | DATETIME | YES | | NULL | |"
-expect -- "| ndt64_tz | DATETIME | YES | | NULL | |"
-expect -- "| nested.col1 | TEXT | NO | | NULL | |"
-expect -- "| nested.col2 | TEXT | NO | | NULL | |"
-expect -- "| nfs | BLOB | YES | | NULL | |"
-expect -- "| ns | BLOB | YES | | NULL | |"
-expect -- "| o | JSON | NO | | NULL | |"
-expect -- "| p | TEXT | NO | | NULL | |"
-expect -- "| pg | TEXT | NO | | NULL | |"
-expect -- "| r | TEXT | NO | | NULL | |"
-expect -- "| s | BLOB | NO | | NULL | |"
-expect -- "| sagg | TEXT | NO | | NULL | |"
-expect -- "| t | JSON | NO | | NULL | |"
-expect -- "| ui128 | TEXT | NO | | NULL | |"
-expect -- "| ui16 | SMALLINT UNSIGNED | NO | | NULL | |"
-expect -- "| ui256 | TEXT | NO | | NULL | |"
-expect -- "| ui32 | INTEGER UNSIGNED | NO | | NULL | |"
-expect -- "| ui64 | BIGINT UNSIGNED | NO | | NULL | |"
-expect -- "| ui8 | TINYINT UNSIGNED | NO | | NULL | |"
-expect -- "| uuid | CHAR | NO | | NULL | |"
-expect -- "+---------------+-------------------+------+------+---------+-------+"
-
-send -- "SHOW COLUMNS FROM tab SETTINGS mysql_map_string_to_text_in_show_columns=1;\r"
-expect -- "+---------------+-------------------+------+------+---------+-------+"
-expect -- "| field | type | null | key | default | extra |"
-expect -- "+---------------+-------------------+------+------+---------+-------+"
-expect -- "| a | TEXT | NO | | NULL | |"
-expect -- "| agg | TEXT | NO | | NULL | |"
-expect -- "| b | TINYINT | NO | | NULL | |"
-expect -- "| d | DATE | NO | | NULL | |"
-expect -- "| d32 | DATE | NO | | NULL | |"
-expect -- "| dec128 | DECIMAL(38, 2) | NO | | NULL | |"
-expect -- "| dec128_native | DECIMAL(35, 30) | NO | | NULL | |"
-expect -- "| dec128_text | TEXT | NO | | NULL | |"
-expect -- "| dec256 | TEXT | NO | | NULL | |"
-expect -- "| dec256_native | DECIMAL(65, 2) | NO | | NULL | |"
-expect -- "| dec256_text | TEXT | NO | | NULL | |"
-expect -- "| dec32 | DECIMAL(9, 2) | NO | | NULL | |"
-expect -- "| dec64 | DECIMAL(18, 2) | NO | | NULL | |"
-expect -- "| dt | DATETIME | NO | | NULL | |"
-expect -- "| dt64 | DATETIME | NO | | NULL | |"
-expect -- "| dt64_3_tz1 | DATETIME | NO | | NULL | |"
-expect -- "| dt64_3_tz2 | DATETIME | NO | | NULL | |"
-expect -- "| dt64_6 | DATETIME | NO | | NULL | |"
-expect -- "| dt64_9 | DATETIME | NO | | NULL | |"
-expect -- "| dt_tz1 | DATETIME | NO | | NULL | |"
-expect -- "| dt_tz2 | DATETIME | NO | | NULL | |"
-expect -- "| enm | TEXT | NO | | NULL | |"
-expect -- "| f32 | FLOAT | NO | | NULL | |"
-expect -- "| f64 | DOUBLE | NO | | NULL | |"
-expect -- "| fs | BLOB | NO | | NULL | |"
-expect -- "| i128 | TEXT | NO | | NULL | |"
-expect -- "| i16 | SMALLINT | NO | | NULL | |"
-expect -- "| i256 | TEXT | NO | | NULL | |"
-expect -- "| i32 | INTEGER | NO | | NULL | |"
-expect -- "| i64 | BIGINT | NO | | NULL | |"
-expect -- "| i8 | TINYINT | NO | | NULL | |"
-expect -- "| ip4 | TEXT | NO | | NULL | |"
-expect -- "| ip6 | TEXT | NO | | NULL | |"
-expect -- "| lfs | BLOB | NO | | NULL | |"
-expect -- "| lnfs | BLOB | YES | | NULL | |"
+expect -- "| lfs | TEXT | NO | | NULL | |"
+expect -- "| lnfs | TEXT | YES | | NULL | |"
expect -- "| lns | TEXT | YES | | NULL | |"
expect -- "| ls | TEXT | NO | | NULL | |"
expect -- "| m | JSON | NO | | NULL | |"
@@ -209,7 +143,7 @@ expect -- "| ndt64 | DATETIME | YES | | NULL | |
expect -- "| ndt64_tz | DATETIME | YES | | NULL | |"
expect -- "| nested.col1 | TEXT | NO | | NULL | |"
expect -- "| nested.col2 | TEXT | NO | | NULL | |"
-expect -- "| nfs | BLOB | YES | | NULL | |"
+expect -- "| nfs | TEXT | YES | | NULL | |"
expect -- "| ns | TEXT | YES | | NULL | |"
expect -- "| o | JSON | NO | | NULL | |"
expect -- "| p | TEXT | NO | | NULL | |"
@@ -227,7 +161,7 @@ expect -- "| ui8 | TINYINT UNSIGNED | NO | | NULL | |
expect -- "| uuid | CHAR | NO | | NULL | |"
expect -- "+---------------+-------------------+------+------+---------+-------+"
-send -- "SHOW COLUMNS FROM tab SETTINGS mysql_map_fixed_string_to_text_in_show_columns=1;\r"
+send -- "SHOW COLUMNS FROM tab SETTINGS mysql_map_string_to_text_in_show_columns=0;\r"
expect -- "+---------------+-------------------+------+------+---------+-------+"
expect -- "| field | type | null | key | default | extra |"
expect -- "+---------------+-------------------+------+------+---------+-------+"
@@ -293,6 +227,73 @@ expect -- "| ui8 | TINYINT UNSIGNED | NO | | NULL | |
expect -- "| uuid | CHAR | NO | | NULL | |"
expect -- "+---------------+-------------------+------+------+---------+-------+"
+send -- "SHOW COLUMNS FROM tab SETTINGS mysql_map_fixed_string_to_text_in_show_columns=0;\r"
+expect -- "+---------------+-------------------+------+------+---------+-------+"
+expect -- "| field | type | null | key | default | extra |"
+expect -- "+---------------+-------------------+------+------+---------+-------+"
+expect -- "| a | TEXT | NO | | NULL | |"
+expect -- "| agg | TEXT | NO | | NULL | |"
+expect -- "| b | TINYINT | NO | | NULL | |"
+expect -- "| d | DATE | NO | | NULL | |"
+expect -- "| d32 | DATE | NO | | NULL | |"
+expect -- "| dec128 | DECIMAL(38, 2) | NO | | NULL | |"
+expect -- "| dec128_native | DECIMAL(35, 30) | NO | | NULL | |"
+expect -- "| dec128_text | TEXT | NO | | NULL | |"
+expect -- "| dec256 | TEXT | NO | | NULL | |"
+expect -- "| dec256_native | DECIMAL(65, 2) | NO | | NULL | |"
+expect -- "| dec256_text | TEXT | NO | | NULL | |"
+expect -- "| dec32 | DECIMAL(9, 2) | NO | | NULL | |"
+expect -- "| dec64 | DECIMAL(18, 2) | NO | | NULL | |"
+expect -- "| dt | DATETIME | NO | | NULL | |"
+expect -- "| dt64 | DATETIME | NO | | NULL | |"
+expect -- "| dt64_3_tz1 | DATETIME | NO | | NULL | |"
+expect -- "| dt64_3_tz2 | DATETIME | NO | | NULL | |"
+expect -- "| dt64_6 | DATETIME | NO | | NULL | |"
+expect -- "| dt64_9 | DATETIME | NO | | NULL | |"
+expect -- "| dt_tz1 | DATETIME | NO | | NULL | |"
+expect -- "| dt_tz2 | DATETIME | NO | | NULL | |"
+expect -- "| enm | TEXT | NO | | NULL | |"
+expect -- "| f32 | FLOAT | NO | | NULL | |"
+expect -- "| f64 | DOUBLE | NO | | NULL | |"
+expect -- "| fs | BLOB | NO | | NULL | |"
+expect -- "| i128 | TEXT | NO | | NULL | |"
+expect -- "| i16 | SMALLINT | NO | | NULL | |"
+expect -- "| i256 | TEXT | NO | | NULL | |"
+expect -- "| i32 | INTEGER | NO | | NULL | |"
+expect -- "| i64 | BIGINT | NO | | NULL | |"
+expect -- "| i8 | TINYINT | NO | | NULL | |"
+expect -- "| ip4 | TEXT | NO | | NULL | |"
+expect -- "| ip6 | TEXT | NO | | NULL | |"
+expect -- "| lfs | BLOB | NO | | NULL | |"
+expect -- "| lnfs | BLOB | YES | | NULL | |"
+expect -- "| lns | TEXT | YES | | NULL | |"
+expect -- "| ls | TEXT | NO | | NULL | |"
+expect -- "| m | JSON | NO | | NULL | |"
+expect -- "| m_complex | JSON | NO | | NULL | |"
+expect -- "| mpg | TEXT | NO | | NULL | |"
+expect -- "| ndt64 | DATETIME | YES | | NULL | |"
+expect -- "| ndt64_tz | DATETIME | YES | | NULL | |"
+expect -- "| nested.col1 | TEXT | NO | | NULL | |"
+expect -- "| nested.col2 | TEXT | NO | | NULL | |"
+expect -- "| nfs | BLOB | YES | | NULL | |"
+expect -- "| ns | TEXT | YES | | NULL | |"
+expect -- "| o | JSON | NO | | NULL | |"
+expect -- "| p | TEXT | NO | | NULL | |"
+expect -- "| pg | TEXT | NO | | NULL | |"
+expect -- "| r | TEXT | NO | | NULL | |"
+expect -- "| s | TEXT | NO | | NULL | |"
+expect -- "| sagg | TEXT | NO | | NULL | |"
+expect -- "| t | JSON | NO | | NULL | |"
+expect -- "| ui128 | TEXT | NO | | NULL | |"
+expect -- "| ui16 | SMALLINT UNSIGNED | NO | | NULL | |"
+expect -- "| ui256 | TEXT | NO | | NULL | |"
+expect -- "| ui32 | INTEGER UNSIGNED | NO | | NULL | |"
+expect -- "| ui64 | BIGINT UNSIGNED | NO | | NULL | |"
+expect -- "| ui8 | TINYINT UNSIGNED | NO | | NULL | |"
+expect -- "| uuid | CHAR | NO | | NULL | |"
+expect -- "+---------------+-------------------+------+------+---------+-------+"
+
+
send -- "DROP TABLE tab;"
send -- "quit;\r"
diff --git a/tests/queries/0_stateless/02809_prewhere_and_in.reference b/tests/queries/0_stateless/02809_prewhere_and_in.reference
index 3080ae862bb..54ea18b3eab 100644
--- a/tests/queries/0_stateless/02809_prewhere_and_in.reference
+++ b/tests/queries/0_stateless/02809_prewhere_and_in.reference
@@ -1,8 +1,8 @@
-PREWHERE a IN
-PREWHERE a IN
-PREWHERE a IN
-PREWHERE a IN
-PREWHERE b NOT IN
-PREWHERE b NOT IN
-PREWHERE b NOT IN
-PREWHERE b NOT IN
+ Prewhere filter
+ Prewhere filter
+ Prewhere filter
+ Prewhere filter
+ Prewhere filter
+ Prewhere filter
+ Prewhere filter
+ Prewhere filter
diff --git a/tests/queries/0_stateless/02809_prewhere_and_in.sql b/tests/queries/0_stateless/02809_prewhere_and_in.sql
index 345577d6c7c..448f9512cf6 100644
--- a/tests/queries/0_stateless/02809_prewhere_and_in.sql
+++ b/tests/queries/0_stateless/02809_prewhere_and_in.sql
@@ -16,40 +16,16 @@ AS SELECT * FROM numbers(10);
SET optimize_move_to_prewhere=1;
-- Queries with 'IN'
-SELECT substring(explain, 1, 13) FROM (EXPLAIN SYNTAX
- SELECT * FROM t_02809 WHERE a IN (SELECT * FROM system.one)
-) WHERE explain LIKE '%WHERE%';
-
-SELECT substring(explain, 1, 13) FROM (EXPLAIN SYNTAX
- SELECT * FROM t_02809 WHERE a IN (1,2,3)
-) WHERE explain LIKE '%WHERE%';
-
-SELECT substring(explain, 1, 13) FROM (EXPLAIN SYNTAX
- SELECT * FROM t_02809 WHERE a IN t_02809_set
-) WHERE explain LIKE '%WHERE%';
-
-SELECT substring(explain, 1, 13) FROM (EXPLAIN SYNTAX
- SELECT * FROM t_02809 WHERE a IN t_02809_aux
-) WHERE explain LIKE '%WHERE%';
-
+SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a IN (SELECT * FROM system.one)) WHERE explain LIKE '%Prewhere filter';
+SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a IN (1,2,3)) WHERE explain LIKE '%Prewhere filter';
+SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a IN t_02809_set) WHERE explain LIKE '%Prewhere filter';
+SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a IN t_02809_aux) WHERE explain LIKE '%Prewhere filter';
-- Queries with 'NOT IN'
-SELECT substring(explain, 1, 17) FROM (EXPLAIN SYNTAX
- SELECT * FROM t_02809 WHERE b NOT IN (SELECT * FROM system.one)
-) WHERE explain LIKE '%WHERE%';
-
-SELECT substring(explain, 1, 17) FROM (EXPLAIN SYNTAX
- SELECT * FROM t_02809 WHERE b NOT IN (1,2,3)
-) WHERE explain LIKE '%WHERE%';
-
-SELECT substring(explain, 1, 17) FROM (EXPLAIN SYNTAX
- SELECT * FROM t_02809 WHERE b NOT IN t_02809_set
-) WHERE explain LIKE '%WHERE%';
-
-SELECT substring(explain, 1, 17) FROM (EXPLAIN SYNTAX
- SELECT * FROM t_02809 WHERE b NOT IN t_02809_aux
-) WHERE explain LIKE '%WHERE%';
-
+SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a NOT IN (SELECT * FROM system.one)) WHERE explain LIKE '%Prewhere filter';
+SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a NOT IN (1,2,3)) WHERE explain LIKE '%Prewhere filter';
+SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a NOT IN t_02809_set) WHERE explain LIKE '%Prewhere filter';
+SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a NOT IN t_02809_aux) WHERE explain LIKE '%Prewhere filter';
DROP TABLE t_02809;
DROP TABLE t_02809_set;
diff --git a/tests/queries/0_stateless/02842_move_pk_to_end_of_prewhere.reference b/tests/queries/0_stateless/02842_move_pk_to_end_of_prewhere.reference
index 6e04d969e67..b91a4dd2f68 100644
--- a/tests/queries/0_stateless/02842_move_pk_to_end_of_prewhere.reference
+++ b/tests/queries/0_stateless/02842_move_pk_to_end_of_prewhere.reference
@@ -1,20 +1,15 @@
-SELECT count()
-FROM t_02848_mt1
-PREWHERE notEmpty(v) AND (k = 3)
+ Prewhere filter
+ Prewhere filter column: and(notEmpty(v), equals(k, 3)) (removed)
1
-SELECT count()
-FROM t_02848_mt2
-PREWHERE (d LIKE \'%es%\') AND (c < 20) AND (b = \'3\') AND (a = 3)
+ Prewhere filter
+ Prewhere filter column: and(like(d, \'%es%\'), less(c, 20), equals(b, \'3\'), equals(a, 3)) (removed)
1
-SELECT count()
-FROM t_02848_mt2
-PREWHERE (d LIKE \'%es%\') AND (c < 20) AND (c > 0) AND (a = 3)
+ Prewhere filter
+ Prewhere filter column: and(like(d, \'%es%\'), less(c, 20), greater(c, 0), equals(a, 3)) (removed)
1
-SELECT count()
-FROM t_02848_mt2
-PREWHERE (d LIKE \'%es%\') AND (b = \'3\') AND (c < 20)
+ Prewhere filter
+ Prewhere filter column: and(like(d, \'%es%\'), equals(b, \'3\'), less(c, 20)) (removed)
1
-SELECT count()
-FROM t_02848_mt2
-PREWHERE (d LIKE \'%es%\') AND (b = \'3\') AND (a = 3)
+ Prewhere filter
+ Prewhere filter column: and(like(d, \'%es%\'), equals(b, \'3\'), equals(a, 3)) (removed)
1
diff --git a/tests/queries/0_stateless/02842_move_pk_to_end_of_prewhere.sql b/tests/queries/0_stateless/02842_move_pk_to_end_of_prewhere.sql
index bc9d7e5664e..f863d765798 100644
--- a/tests/queries/0_stateless/02842_move_pk_to_end_of_prewhere.sql
+++ b/tests/queries/0_stateless/02842_move_pk_to_end_of_prewhere.sql
@@ -8,7 +8,7 @@ CREATE TABLE t_02848_mt1 (k UInt32, v String) ENGINE = MergeTree ORDER BY k SETT
INSERT INTO t_02848_mt1 SELECT number, toString(number) FROM numbers(100);
-EXPLAIN SYNTAX SELECT count() FROM t_02848_mt1 WHERE k = 3 AND notEmpty(v);
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02848_mt1 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere filter%' OR explain LIKE '%Filter%';
SELECT count() FROM t_02848_mt1 WHERE k = 3 AND notEmpty(v);
CREATE TABLE t_02848_mt2 (a UInt32, b String, c Int32, d String) ENGINE = MergeTree ORDER BY (a,b,c) SETTINGS min_bytes_for_wide_part=0;
@@ -18,16 +18,16 @@ INSERT INTO t_02848_mt2 SELECT number, toString(number), number, 'aaaabbbbccccdd
-- the estimated column sizes are: {a: 428, b: 318, c: 428, d: 73}
-- it's not correct but let's fix it in the future.
-EXPLAIN SYNTAX SELECT count() FROM t_02848_mt2 WHERE a = 3 AND b == '3' AND c < 20 AND d like '%es%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_String', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02848_mt2 WHERE a = 3 AND b == '3' AND c < 20 AND d like '%es%') WHERE explain LIKE '%Prewhere filter%' OR explain LIKE '%Filter%';
SELECT count() FROM t_02848_mt2 WHERE a = 3 AND b == '3' AND c < 20 AND d like '%es%';
-EXPLAIN SYNTAX SELECT count() FROM t_02848_mt2 WHERE a = 3 AND c < 20 AND c > 0 AND d like '%es%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_String', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02848_mt2 WHERE a = 3 AND c < 20 AND c > 0 AND d like '%es%') WHERE explain LIKE '%Prewhere filter%' OR explain LIKE '%Filter%';
SELECT count() FROM t_02848_mt2 WHERE a = 3 AND c < 20 AND c > 0 AND d like '%es%';
-EXPLAIN SYNTAX SELECT count() FROM t_02848_mt2 WHERE b == '3' AND c < 20 AND d like '%es%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_String', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02848_mt2 WHERE b == '3' AND c < 20 AND d like '%es%') WHERE explain LIKE '%Prewhere filter%' OR explain LIKE '%Filter%';
SELECT count() FROM t_02848_mt2 WHERE b == '3' AND c < 20 AND d like '%es%';
-EXPLAIN SYNTAX SELECT count() FROM t_02848_mt2 WHERE a = 3 AND b == '3' AND d like '%es%';
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_String', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02848_mt2 WHERE a = 3 AND b == '3' AND d like '%es%') WHERE explain LIKE '%Prewhere filter%' OR explain LIKE '%Filter%';
SELECT count() FROM t_02848_mt2 WHERE a = 3 AND b == '3' AND d like '%es%';
DROP TABLE t_02848_mt1;
diff --git a/tests/queries/0_stateless/02864_statistic_operate.reference b/tests/queries/0_stateless/02864_statistic_operate.reference
index 7fad7c810c1..3e291485031 100644
--- a/tests/queries/0_stateless/02864_statistic_operate.reference
+++ b/tests/queries/0_stateless/02864_statistic_operate.reference
@@ -1,31 +1,31 @@
CREATE TABLE default.t1\n(\n `a` Float64 STATISTIC(tdigest),\n `b` Int64 STATISTIC(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192
After insert
-SELECT count()
-FROM t1
-PREWHERE (a < 10) AND (b < 10)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: and(less(a, 10), less(b, 10)) (removed)
10
0
After drop statistic
-SELECT count()
-FROM t1
-PREWHERE (b < 10) AND (a < 10)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: and(less(b, 10), less(a, 10)) (removed)
10
CREATE TABLE default.t1\n(\n `a` Float64,\n `b` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192
After add statistic
CREATE TABLE default.t1\n(\n `a` Float64 STATISTIC(tdigest),\n `b` Int64 STATISTIC(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192
After materialize statistic
-SELECT count()
-FROM t1
-PREWHERE (a < 10) AND (b < 10)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: and(less(a, 10), less(b, 10)) (removed)
20
After merge
-SELECT count()
-FROM t1
-PREWHERE (a < 10) AND (b < 10)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: and(less(a, 10), less(b, 10)) (removed)
20
CREATE TABLE default.t1\n(\n `a` Float64 STATISTIC(tdigest),\n `c` Int64 STATISTIC(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192
After rename
-SELECT count()
-FROM t1
-PREWHERE (a < 10) AND (c < 10)
+ Prewhere info
+ Prewhere filter
+ Prewhere filter column: and(less(a, 10), less(c, 10)) (removed)
20
diff --git a/tests/queries/0_stateless/02864_statistic_operate.sql b/tests/queries/0_stateless/02864_statistic_operate.sql
index 29bd213f04a..5f1c30f8eec 100644
--- a/tests/queries/0_stateless/02864_statistic_operate.sql
+++ b/tests/queries/0_stateless/02864_statistic_operate.sql
@@ -3,7 +3,7 @@ DROP TABLE IF EXISTS t1;
SET allow_experimental_statistic = 1;
SET allow_statistic_optimize = 1;
-CREATE TABLE t1
+CREATE TABLE t1
(
a Float64 STATISTIC(tdigest),
b Int64 STATISTIC(tdigest),
@@ -16,14 +16,14 @@ SHOW CREATE TABLE t1;
INSERT INTO t1 select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000;
SELECT 'After insert';
-EXPLAIN SYNTAX SELECT count(*) FROM t1 WHERE b < 10 and a < 10;
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT count(*) FROM t1 WHERE b < 10 and a < 10;
SELECT count(*) FROM t1 WHERE b < NULL and a < '10';
ALTER TABLE t1 DROP STATISTIC a, b TYPE tdigest;
SELECT 'After drop statistic';
-EXPLAIN SYNTAX SELECT count(*) FROM t1 WHERE b < 10 and a < 10;
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT count(*) FROM t1 WHERE b < 10 and a < 10;
SHOW CREATE TABLE t1;
@@ -38,20 +38,20 @@ ALTER TABLE t1 MATERIALIZE STATISTIC a, b TYPE tdigest;
INSERT INTO t1 select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000;
SELECT 'After materialize statistic';
-EXPLAIN SYNTAX SELECT count(*) FROM t1 WHERE b < 10 and a < 10;
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT count(*) FROM t1 WHERE b < 10 and a < 10;
OPTIMIZE TABLE t1 FINAL;
SELECT 'After merge';
-EXPLAIN SYNTAX SELECT count(*) FROM t1 WHERE b < 10 and a < 10;
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT count(*) FROM t1 WHERE b < 10 and a < 10;
ALTER TABLE t1 RENAME COLUMN b TO c;
SHOW CREATE TABLE t1;
SELECT 'After rename';
-EXPLAIN SYNTAX SELECT count(*) FROM t1 WHERE c < 10 and a < 10;
+SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT count(*) FROM t1 WHERE c < 10 and a < 10;
DROP TABLE IF EXISTS t1;
diff --git a/tests/queries/0_stateless/02882_primary_key_index_in_function_different_types.reference b/tests/queries/0_stateless/02882_primary_key_index_in_function_different_types.reference
index 7a5e798359b..8999deae9f6 100644
--- a/tests/queries/0_stateless/02882_primary_key_index_in_function_different_types.reference
+++ b/tests/queries/0_stateless/02882_primary_key_index_in_function_different_types.reference
@@ -1,47 +1,51 @@
-CreatingSets
- Expression
- ReadFromMergeTree
- Indexes:
- PrimaryKey
- Keys:
- id
- value
- Condition: and((id in (-Inf, 10]), (value in 1-element set))
- Parts: 1/1
- Granules: 1/1
-CreatingSets
- Expression
- ReadFromMergeTree
- Indexes:
- PrimaryKey
- Keys:
- id
- value
- Condition: and((id in (-Inf, 10]), (value in 1-element set))
- Parts: 1/1
- Granules: 1/1
-CreatingSets
- Expression
- ReadFromMergeTree
- Indexes:
- PrimaryKey
- Keys:
- id
- value
- Condition: and((id in (-Inf, 10]), (value in 5-element set))
- Parts: 1/1
- Granules: 1/1
-CreatingSets
- Expression
- ReadFromMergeTree
- Indexes:
- PrimaryKey
- Keys:
- id
- value
- Condition: and((id in (-Inf, 10]), (value in 5-element set))
- Parts: 1/1
- Granules: 1/1
+CreatingSets
+ Expression
+ Expression
+ ReadFromMergeTree
+ Indexes:
+ PrimaryKey
+ Keys:
+ id
+ value
+ Condition: and((value in 1-element set), (id in (-Inf, 10]))
+ Parts: 1/1
+ Granules: 1/1
+CreatingSets
+ Expression
+ Expression
+ ReadFromMergeTree
+ Indexes:
+ PrimaryKey
+ Keys:
+ id
+ value
+ Condition: and((value in 1-element set), (id in (-Inf, 10]))
+ Parts: 1/1
+ Granules: 1/1
+CreatingSets
+ Expression
+ Expression
+ ReadFromMergeTree
+ Indexes:
+ PrimaryKey
+ Keys:
+ id
+ value
+ Condition: and((value in 5-element set), (id in (-Inf, 10]))
+ Parts: 1/1
+ Granules: 1/1
+CreatingSets
+ Expression
+ Expression
+ ReadFromMergeTree
+ Indexes:
+ PrimaryKey
+ Keys:
+ id
+ value
+ Condition: and((value in 5-element set), (id in (-Inf, 10]))
+ Parts: 1/1
+ Granules: 1/1
CreatingSets
Expression
Expression
diff --git a/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference b/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference
index 3f5700b6d63..786a6b3bf25 100644
--- a/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference
+++ b/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference
@@ -7,3 +7,6 @@ Expression ((Projection + Before ORDER BY))
Aggregating
Expression (Before GROUP BY)
ReadFromMerge
+ ReadFromMergeTree (default.mt1)
+ ReadFromMergeTree (default.mt2)
+ ReadFromStorage (TinyLog)
diff --git a/tests/queries/0_stateless/02943_tokenbf_and_ngrambf_indexes_support_match_function.reference b/tests/queries/0_stateless/02943_tokenbf_and_ngrambf_indexes_support_match_function.reference
index 1cf1644fe0a..0e1954cde62 100644
--- a/tests/queries/0_stateless/02943_tokenbf_and_ngrambf_indexes_support_match_function.reference
+++ b/tests/queries/0_stateless/02943_tokenbf_and_ngrambf_indexes_support_match_function.reference
@@ -2,12 +2,12 @@
2 Hello World
1 Hello ClickHouse
2 Hello World
- Granules: 6/6
- Granules: 2/6
Granules: 6/6
Granules: 2/6
- Granules: 6/6
- Granules: 2/6
+ Granules: 6/6
+ Granules: 2/6
+ Granules: 6/6
+ Granules: 2/6
Granules: 6/6
Granules: 2/6
---
@@ -17,22 +17,22 @@
1 Hello ClickHouse
2 Hello World
6 World Champion
- Granules: 6/6
- Granules: 3/6
Granules: 6/6
Granules: 3/6
- Granules: 6/6
- Granules: 3/6
+ Granules: 6/6
+ Granules: 3/6
+ Granules: 6/6
+ Granules: 3/6
Granules: 6/6
Granules: 3/6
---
5 OLAP Database
5 OLAP Database
- Granules: 6/6
- Granules: 1/6
Granules: 6/6
Granules: 1/6
- Granules: 6/6
- Granules: 1/6
+ Granules: 6/6
+ Granules: 1/6
+ Granules: 6/6
+ Granules: 1/6
Granules: 6/6
Granules: 1/6
diff --git a/tests/queries/0_stateless/02961_storage_config_volume_priority.reference b/tests/queries/0_stateless/02961_storage_config_volume_priority.reference
new file mode 100644
index 00000000000..ba48e75ae25
--- /dev/null
+++ b/tests/queries/0_stateless/02961_storage_config_volume_priority.reference
@@ -0,0 +1,9 @@
+vol2_02961 1
+vol1_02961 2
+vol_untagged2_02961 3
+vol_untagged1_02961 4
+check non-unique values dont work
+1
+check no gaps in range allowed
+1
+restore valid config
diff --git a/tests/queries/0_stateless/02961_storage_config_volume_priority.sh b/tests/queries/0_stateless/02961_storage_config_volume_priority.sh
new file mode 100755
index 00000000000..4e085541a8d
--- /dev/null
+++ b/tests/queries/0_stateless/02961_storage_config_volume_priority.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+# Tags: no-fasttest, no-parallel, no-random-settings
+
+CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CUR_DIR"/../shell_config.sh
+
+
+$CLICKHOUSE_CLIENT --query "
+SELECT
+ volume_name,
+ volume_priority
+FROM system.storage_policies
+WHERE policy_name = 'policy_02961'
+ORDER BY volume_priority ASC;
+"
+
+config_path=/etc/clickhouse-server/config.d/storage_conf_02961.xml
+config_path_tmp=$config_path.tmp
+
+echo 'check non-unique values dont work'
+cat $config_path \
+| sed "s|2<\/volume_priority>|1<\/volume_priority>|" \
+> $config_path_tmp
+mv $config_path_tmp $config_path
+
+$CLICKHOUSE_CLIENT -nm --query "
+set send_logs_level='error';
+SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must be unique across the policy'
+
+#first, restore original values
+cat $config_path \
+| sed '0,/1<\/volume_priority>/s//2<\/volume_priority>/' \
+> $config_path_tmp
+mv $config_path_tmp $config_path
+
+echo 'check no gaps in range allowed'
+cat $config_path \
+| sed '0,/1<\/volume_priority>/s//3<\/volume_priority>/' \
+> $config_path_tmp
+mv $config_path_tmp $config_path
+
+$CLICKHOUSE_CLIENT -nm --query "
+set send_logs_level='error';
+SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must cover the range from 1 to N (lowest priority specified) without gaps'
+
+echo 'restore valid config'
+cat $config_path \
+| sed '0,/3<\/volume_priority>/s//1<\/volume_priority>/' \
+> $config_path_tmp
+mv $config_path_tmp $config_path
diff --git a/tests/queries/0_stateless/02968_mysql_prefer_column_name_to_alias.reference b/tests/queries/0_stateless/02968_mysql_prefer_column_name_to_alias.reference
new file mode 100644
index 00000000000..2491b55b493
--- /dev/null
+++ b/tests/queries/0_stateless/02968_mysql_prefer_column_name_to_alias.reference
@@ -0,0 +1,2 @@
+b count()
+2 1
diff --git a/tests/queries/0_stateless/02968_mysql_prefer_column_name_to_alias.sh b/tests/queries/0_stateless/02968_mysql_prefer_column_name_to_alias.sh
new file mode 100755
index 00000000000..4457aafb8b2
--- /dev/null
+++ b/tests/queries/0_stateless/02968_mysql_prefer_column_name_to_alias.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+# Tags: no-fasttest
+# Tag no-fasttest: requires mysql client
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+# Some BI tools which connect to ClickHouse's MySQL port, run queries which succeed only with (the analyzer enabled)
+# or (without analyzer and setting prefer_column_name_to_alias = 1). Since the setting is too impactful to enable it
+# globally, it is enabled only by the MySQL handler internally as a workaround. Run a query from Bug 56173 to verify.
+#
+# When the analyzer is the new default, the test and the workaround can be deleted.
+${MYSQL_CLIENT} --execute "select a + b as b, count() from (select 1 as a, 1 as b) group by a + b";
diff --git a/tests/queries/0_stateless/02987_group_array_intersect.reference b/tests/queries/0_stateless/02987_group_array_intersect.reference
new file mode 100644
index 00000000000..7ec64a889f5
--- /dev/null
+++ b/tests/queries/0_stateless/02987_group_array_intersect.reference
@@ -0,0 +1,21 @@
+[]
+[]
+[NULL]
+[NULL]
+[]
+[[1,2,4,5]]
+[]
+[1,4,5]
+[]
+[]
+1000000
+999999
+[9]
+['a','c']
+1000000
+999999
+['1']
+[]
+['2023-01-01 00:00:00']
+['2023-01-01']
+['2023-01-01']
diff --git a/tests/queries/0_stateless/02987_group_array_intersect.sql b/tests/queries/0_stateless/02987_group_array_intersect.sql
new file mode 100644
index 00000000000..703914e464d
--- /dev/null
+++ b/tests/queries/0_stateless/02987_group_array_intersect.sql
@@ -0,0 +1,91 @@
+DROP TABLE IF EXISTS test_empty;
+CREATE TABLE test_empty (a Array(Int64)) engine=MergeTree ORDER BY a;
+INSERT INTO test_empty VALUES ([]);
+SELECT groupArrayIntersect(*) FROM test_empty;
+INSERT INTO test_empty VALUES ([1]);
+SELECT groupArrayIntersect(*) FROM test_empty;
+DROP TABLE test_empty;
+
+DROP TABLE IF EXISTS test_null;
+CREATE TABLE test_null (a Array(Nullable(Int64))) engine=MergeTree ORDER BY a SETTINGS allow_nullable_key=1;
+INSERT INTO test_null VALUES ([NULL, NULL]);
+SELECT groupArrayIntersect(*) FROM test_null;
+INSERT INTO test_null VALUES ([NULL]);
+SELECT groupArrayIntersect(*) FROM test_null;
+INSERT INTO test_null VALUES ([1,2]);
+SELECT groupArrayIntersect(*) FROM test_null;
+DROP TABLE test_null;
+
+DROP TABLE IF EXISTS test_nested_arrays;
+CREATE TABLE test_nested_arrays (a Array(Array(Int64))) engine=MergeTree ORDER BY a;
+INSERT INTO test_nested_arrays VALUES ([[1,2,3,4,5,6], [1,2,4,5]]);
+INSERT INTO test_nested_arrays VALUES ([[1,2,4,5]]);
+SELECT groupArrayIntersect(*) FROM test_nested_arrays;
+INSERT INTO test_nested_arrays VALUES ([[1,4,3,0,5,5,5]]);
+SELECT groupArrayIntersect(*) FROM test_nested_arrays;
+DROP TABLE test_nested_arrays;
+
+DROP TABLE IF EXISTS test_numbers;
+CREATE TABLE test_numbers (a Array(Int64)) engine=MergeTree ORDER BY a;
+INSERT INTO test_numbers VALUES ([1,2,3,4,5,6]);
+INSERT INTO test_numbers VALUES ([1,2,4,5]);
+INSERT INTO test_numbers VALUES ([1,4,3,0,5,5,5]);
+SELECT groupArrayIntersect(*) FROM test_numbers;
+INSERT INTO test_numbers VALUES ([9]);
+SELECT groupArrayIntersect(*) FROM test_numbers;
+DROP TABLE test_numbers;
+
+DROP TABLE IF EXISTS test_big_numbers_sep;
+CREATE TABLE test_big_numbers_sep (a Array(Int64)) engine=MergeTree ORDER BY a;
+INSERT INTO test_big_numbers_sep SELECT array(number) FROM numbers_mt(1000000);
+SELECT groupArrayIntersect(*) FROM test_big_numbers_sep;
+DROP TABLE test_big_numbers_sep;
+
+DROP TABLE IF EXISTS test_big_numbers;
+CREATE TABLE test_big_numbers (a Array(Int64)) engine=MergeTree ORDER BY a;
+INSERT INTO test_big_numbers SELECT range(1000000);
+SELECT length(groupArrayIntersect(*)) FROM test_big_numbers;
+INSERT INTO test_big_numbers SELECT range(999999);
+SELECT length(groupArrayIntersect(*)) FROM test_big_numbers;
+INSERT INTO test_big_numbers VALUES ([9]);
+SELECT groupArrayIntersect(*) FROM test_big_numbers;
+DROP TABLE test_big_numbers;
+
+DROP TABLE IF EXISTS test_string;
+CREATE TABLE test_string (a Array(String)) engine=MergeTree ORDER BY a;
+INSERT INTO test_string VALUES (['a', 'b', 'c', 'd', 'e', 'f']);
+INSERT INTO test_string VALUES (['a', 'aa', 'b', 'bb', 'c', 'cc', 'd', 'dd', 'f', 'ff']);
+INSERT INTO test_string VALUES (['ae', 'ab', 'a', 'bb', 'c']);
+SELECT groupArrayIntersect(*) FROM test_string;
+DROP TABLE test_string;
+
+DROP TABLE IF EXISTS test_big_string;
+CREATE TABLE test_big_string (a Array(String)) engine=MergeTree ORDER BY a;
+INSERT INTO test_big_string SELECT groupArray(toString(number)) FROM numbers_mt(1000000);
+SELECT length(groupArrayIntersect(*)) FROM test_big_string;
+INSERT INTO test_big_string SELECT groupArray(toString(number)) FROM numbers_mt(999999);
+SELECT length(groupArrayIntersect(*)) FROM test_big_string;
+INSERT INTO test_big_string VALUES (['1']);
+SELECT groupArrayIntersect(*) FROM test_big_string;
+INSERT INTO test_big_string VALUES (['a']);
+SELECT groupArrayIntersect(*) FROM test_big_string;
+DROP TABLE test_big_string;
+
+DROP TABLE IF EXISTS test_datetime;
+CREATE TABLE test_datetime (a Array(DateTime)) engine=MergeTree ORDER BY a;
+INSERT INTO test_datetime VALUES ([toDateTime('2023-01-01 00:00:00'), toDateTime('2023-01-01 01:02:03'), toDateTime('2023-01-01 02:03:04')]);
+INSERT INTO test_datetime VALUES ([toDateTime('2023-01-01 00:00:00'), toDateTime('2023-01-01 01:02:04'), toDateTime('2023-01-01 02:03:05')]);
+SELECT groupArrayIntersect(*) from test_datetime;
+DROP TABLE test_datetime;
+
+DROP TABLE IF EXISTS test_date32;
+CREATE TABLE test_date32 (a Array(Date32)) engine=MergeTree ORDER BY a;
+INSERT INTO test_date32 VALUES ([toDate32('2023-01-01 00:00:00'), toDate32('2023-01-01 00:00:01')]);
+SELECT groupArrayIntersect(*) from test_date32;
+DROP TABLE test_date32;
+
+DROP TABLE IF EXISTS test_date;
+CREATE TABLE test_date (a Array(Date)) engine=MergeTree ORDER BY a;
+INSERT INTO test_date VALUES ([toDate('2023-01-01 00:00:00'), toDate('2023-01-01 00:00:01')]);
+SELECT groupArrayIntersect(*) from test_date;
+DROP TABLE test_date;
diff --git a/tests/queries/0_stateless/02998_system_dns_cache_table.reference b/tests/queries/0_stateless/02998_system_dns_cache_table.reference
new file mode 100644
index 00000000000..ed6cb000142
--- /dev/null
+++ b/tests/queries/0_stateless/02998_system_dns_cache_table.reference
@@ -0,0 +1 @@
+localhost 127.0.0.1 IPv4 1
diff --git a/tests/queries/0_stateless/02998_system_dns_cache_table.sh b/tests/queries/0_stateless/02998_system_dns_cache_table.sh
new file mode 100755
index 00000000000..41d2386fe9c
--- /dev/null
+++ b/tests/queries/0_stateless/02998_system_dns_cache_table.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+# Retries are necessary because the DNS cache may be flushed before second statement is executed
+i=0 retries=3
+while [[ $i -lt $retries ]]; do
+ ${CLICKHOUSE_CURL} -sS --fail --data "SELECT * FROM url('http://localhost:8123/ping', CSV, 'auto', headers())" "${CLICKHOUSE_URL}" | grep -oP -q 'Ok.' && \
+ RECORDS=$(${CLICKHOUSE_CURL} -sS --fail --data "SELECT hostname, ip_address, ip_family, (isNotNull(cached_at) AND cached_at > '1970-01-01 00:00:00') FROM system.dns_cache WHERE hostname = 'localhost' and ip_family = 'IPv4';" "${CLICKHOUSE_URL}")
+
+ if [ "${RECORDS}" != "" ]; then
+ echo "${RECORDS}"
+ exit 0
+ fi
+
+ ((++i))
+ sleep 0.2
+done
+
+echo "All tries to fetch entries for localhost failed, no rows returned.
+Probably the DNS cache is disabled or the ClickHouse instance not responds to ping."
+exit 1
diff --git a/tests/queries/0_stateless/02999_ulid_short_circuit.reference b/tests/queries/0_stateless/02999_ulid_short_circuit.reference
new file mode 100644
index 00000000000..51460c40e48
--- /dev/null
+++ b/tests/queries/0_stateless/02999_ulid_short_circuit.reference
@@ -0,0 +1,2 @@
+2024-02-20 16:53:57.105
+2024-02-21 12:00:00.000
diff --git a/tests/queries/0_stateless/02999_ulid_short_circuit.sql b/tests/queries/0_stateless/02999_ulid_short_circuit.sql
new file mode 100644
index 00000000000..4453d9dbe47
--- /dev/null
+++ b/tests/queries/0_stateless/02999_ulid_short_circuit.sql
@@ -0,0 +1,5 @@
+-- Tags: no-fasttest
+
+SET session_timezone='Europe/Madrid'; -- disable time zone randomization in CI
+SELECT if(length(x) = 26, ULIDStringToDateTime(x, 'Europe/Madrid'), toDateTime('2024-02-21 12:00:00', 'Europe/Madrid')) AS datetime
+FROM values('x String', '01HQ3KJJKHRWP357YVYBX32WHY', '01HQ3KJJKH')
diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt
index 4d5fbd46dd8..e05d8ea81ab 100644
--- a/utils/check-style/aspell-ignore/en/aspell-dict.txt
+++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt
@@ -772,6 +772,7 @@ ReferenceKeyed
Refreshable
RegexpTree
RemoteRead
+SharedMergeTree
ReplacingMergeTree
ReplicasMaxAbsoluteDelay
ReplicasMaxInsertsInQueue
@@ -1614,6 +1615,8 @@ greaterorequals
greenspace
groupArray
groupArrayInsertAt
+grouparrayintersect
+groupArrayIntersect
groupArrayLast
groupArrayMovingAvg
groupArrayMovingSum
@@ -1741,6 +1744,7 @@ isValidJSON
isValidUTF
isZeroOrNull
iteratively
+iTerm
jaccard
jaccardIndex
jaroSimilarity
@@ -2313,6 +2317,7 @@ shardNum
sharded
sharding
shortcircuit
+Shortkeys
shortkeys
shoutout
simdjson