mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge branch 'master' into mv
This commit is contained in:
commit
524d53199d
142
CHANGELOG.md
142
CHANGELOG.md
@ -1,4 +1,5 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v23.12, 2023-12-28](#2312)**<br/>
|
||||
**[ClickHouse release v23.11, 2023-12-06](#2311)**<br/>
|
||||
**[ClickHouse release v23.10, 2023-11-02](#2310)**<br/>
|
||||
**[ClickHouse release v23.9, 2023-09-28](#239)**<br/>
|
||||
@ -14,6 +15,147 @@
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### <a id="2312"></a> ClickHouse release 23.12, 2023-12-28
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Fix check for non-deterministic functions in TTL expressions. Previously, you could create a TTL expression with non-deterministic functions in some cases, which could lead to undefined behavior later. This fixes [#37250](https://github.com/ClickHouse/ClickHouse/issues/37250). Disallow TTL expressions that don't depend on any columns of a table by default. It can be allowed back by `SET allow_suspicious_ttl_expressions = 1` or `SET compatibility = '23.11'`. Closes [#37286](https://github.com/ClickHouse/ClickHouse/issues/37286). [#51858](https://github.com/ClickHouse/ClickHouse/pull/51858) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The MergeTree setting `clean_deleted_rows` is deprecated, it has no effect anymore. The `CLEANUP` keyword for the `OPTIMIZE` is not allowed by default (it can be unlocked with the `allow_experimental_replacing_merge_with_cleanup` setting). [#58267](https://github.com/ClickHouse/ClickHouse/pull/58267) ([Alexander Tokmakov](https://github.com/tavplubix)). This fixes [#57930](https://github.com/ClickHouse/ClickHouse/issues/57930). This closes [#54988](https://github.com/ClickHouse/ClickHouse/issues/54988). This closes [#54570](https://github.com/ClickHouse/ClickHouse/issues/54570). This closes [#50346](https://github.com/ClickHouse/ClickHouse/issues/50346). This closes [#47579](https://github.com/ClickHouse/ClickHouse/issues/47579). The feature has to be removed because it is not good. We have to remove it as quickly as possible, because there is no other option. [#57932](https://github.com/ClickHouse/ClickHouse/pull/57932) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### New Feature
|
||||
* Introduce `PASTE JOIN`, which allows users to join tables without `ON` clause simply by row numbers. Example: `SELECT * FROM (SELECT number AS a FROM numbers(2)) AS t1 PASTE JOIN (SELECT number AS a FROM numbers(2) ORDER BY a DESC) AS t2`. [#57995](https://github.com/ClickHouse/ClickHouse/pull/57995) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* The `ORDER BY` clause now supports specifying `ALL`, meaning that ClickHouse sorts by all columns in the `SELECT` clause. Example: `SELECT col1, col2 FROM tab WHERE [...] ORDER BY ALL`. [#57875](https://github.com/ClickHouse/ClickHouse/pull/57875) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Support negative positional arguments. Closes [#57736](https://github.com/ClickHouse/ClickHouse/issues/57736). [#57741](https://github.com/ClickHouse/ClickHouse/pull/57741) ([flynn](https://github.com/ucasfl)).
|
||||
* Added functions for punycode encoding/decoding: `punycodeEncode()` and `punycodeDecode()`. [#57969](https://github.com/ClickHouse/ClickHouse/pull/57969) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added a new mutation command `ALTER TABLE <table> APPLY DELETED MASK`, which allows to enforce applying of mask written by lightweight delete and to remove rows marked as deleted from disk. [#57433](https://github.com/ClickHouse/ClickHouse/pull/57433) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* A handler `/binary` opens a visual viewer of symbols inside the ClickHouse binary. [#58211](https://github.com/ClickHouse/ClickHouse/pull/58211) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added a new SQL function `sqid` to generate Sqids (https://sqids.org/), example: `SELECT sqid(125, 126)`. [#57512](https://github.com/ClickHouse/ClickHouse/pull/57512) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add a new function `seriesPeriodDetectFFT` to detect series period using FFT. [#57574](https://github.com/ClickHouse/ClickHouse/pull/57574) ([Bhavna Jindal](https://github.com/bhavnajindal)).
|
||||
* Add an HTTP endpoint for checking if Keeper is ready to accept traffic. [#55876](https://github.com/ClickHouse/ClickHouse/pull/55876) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Add 'union' mode for schema inference. In this mode the resulting table schema is the union of all files schemas (so schema is inferred from each file). The mode of schema inference is controlled by a setting `schema_inference_mode` with two possible values - `default` and `union`. Closes [#55428](https://github.com/ClickHouse/ClickHouse/issues/55428). [#55892](https://github.com/ClickHouse/ClickHouse/pull/55892) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add new setting `input_format_csv_try_infer_numbers_from_strings` that allows to infer numbers from strings in CSV format. Closes [#56455](https://github.com/ClickHouse/ClickHouse/issues/56455). [#56859](https://github.com/ClickHouse/ClickHouse/pull/56859) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* When the number of databases or tables exceeds a configurable threshold, show a warning to the user. [#57375](https://github.com/ClickHouse/ClickHouse/pull/57375) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Dictionary with `HASHED_ARRAY` (and `COMPLEX_KEY_HASHED_ARRAY`) layout supports `SHARDS` similarly to `HASHED`. [#57544](https://github.com/ClickHouse/ClickHouse/pull/57544) ([vdimir](https://github.com/vdimir)).
|
||||
* Add asynchronous metrics for total primary key bytes and total allocated primary key bytes in memory. [#57551](https://github.com/ClickHouse/ClickHouse/pull/57551) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add `SHA512_256` function. [#57645](https://github.com/ClickHouse/ClickHouse/pull/57645) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add `FORMAT_BYTES` as an alias for `formatReadableSize`. [#57592](https://github.com/ClickHouse/ClickHouse/pull/57592) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Allow passing optional session token to the `s3` table function. [#57850](https://github.com/ClickHouse/ClickHouse/pull/57850) ([Shani Elharrar](https://github.com/shanielh)).
|
||||
* Introduce a new setting `http_make_head_request`. If it is turned off, the URL table engine will not do a HEAD request to determine the file size. This is needed to support inefficient, misconfigured, or not capable HTTP servers. [#54602](https://github.com/ClickHouse/ClickHouse/pull/54602) ([Fionera](https://github.com/fionera)).
|
||||
* It is now possible to refer to ALIAS column in index (non-primary-key) definitions (issue [#55650](https://github.com/ClickHouse/ClickHouse/issues/55650)). Example: `CREATE TABLE tab(col UInt32, col_alias ALIAS col + 1, INDEX idx (col_alias) TYPE minmax) ENGINE = MergeTree ORDER BY col;`. [#57546](https://github.com/ClickHouse/ClickHouse/pull/57546) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added a new setting `readonly` which can be used to specify an S3 disk is read only. It can be useful to create a table on a disk of `s3_plain` type, while having read only access to the underlying S3 bucket. [#57977](https://github.com/ClickHouse/ClickHouse/pull/57977) ([Pengyuan Bian](https://github.com/bianpengyuan)).
|
||||
* The primary key analysis in MergeTree tables will now be applied to predicates that include the virtual column `_part_offset` (optionally with `_part`). This feature can serve as a special kind of a secondary index. [#58224](https://github.com/ClickHouse/ClickHouse/pull/58224) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Made copy between s3 disks using a s3-server-side copy instead of copying through the buffer. Improves `BACKUP/RESTORE` operations and `clickhouse-disks copy` command. [#56744](https://github.com/ClickHouse/ClickHouse/pull/56744) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Hash JOIN respects setting `max_joined_block_size_rows` and do not produce large blocks for `ALL JOIN`. [#56996](https://github.com/ClickHouse/ClickHouse/pull/56996) ([vdimir](https://github.com/vdimir)).
|
||||
* Release memory for aggregation earlier. This may avoid unnecessary external aggregation. [#57691](https://github.com/ClickHouse/ClickHouse/pull/57691) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Improve performance of string serialization. [#57717](https://github.com/ClickHouse/ClickHouse/pull/57717) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Support trivial count optimization for `Merge`-engine tables. [#57867](https://github.com/ClickHouse/ClickHouse/pull/57867) ([skyoct](https://github.com/skyoct)).
|
||||
* Optimized aggregation in some cases. [#57872](https://github.com/ClickHouse/ClickHouse/pull/57872) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* The `hasAny` function can now take advantage of the full-text skipping indices. [#57878](https://github.com/ClickHouse/ClickHouse/pull/57878) ([Jpnock](https://github.com/Jpnock)).
|
||||
* Function `if(cond, then, else)` (and its alias `cond ? then : else`) were optimized to use branch-free evaluation. [#57885](https://github.com/ClickHouse/ClickHouse/pull/57885) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
* Extract non intersecting parts ranges from MergeTree table during FINAL processing. That way we can avoid additional FINAL logic for this non intersecting parts ranges. In case when amount of duplicate values with same primary key is low, performance will be almost the same as without FINAL. Improve reading performance for MergeTree FINAL when `do_not_merge_across_partitions_select_final` setting is set. [#58120](https://github.com/ClickHouse/ClickHouse/pull/58120) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* MergeTree automatically derive `do_not_merge_across_partitions_select_final` setting if partition key expression contains only columns from primary key expression. [#58218](https://github.com/ClickHouse/ClickHouse/pull/58218) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Speedup `MIN` and `MAX` for native types. [#58231](https://github.com/ClickHouse/ClickHouse/pull/58231) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Implement `SLRU` cache policy for filesystem cache. [#57076](https://github.com/ClickHouse/ClickHouse/pull/57076) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* The limit for the number of connections per endpoint for background fetches was raised from `15` to the value of `background_fetches_pool_size` setting. - MergeTree-level setting `replicated_max_parallel_fetches_for_host` became obsolete - MergeTree-level settings `replicated_fetches_http_connection_timeout`, `replicated_fetches_http_send_timeout` and `replicated_fetches_http_receive_timeout` are moved to the Server-level. - Setting `keep_alive_timeout` is added to the list of Server-level settings. [#57523](https://github.com/ClickHouse/ClickHouse/pull/57523) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Make querying `system.filesystem_cache` not memory intensive. [#57687](https://github.com/ClickHouse/ClickHouse/pull/57687) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Reduce memory usage on strings deserialization. [#57787](https://github.com/ClickHouse/ClickHouse/pull/57787) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* More efficient constructor for Enum - it makes sense when Enum has a boatload of values. [#57887](https://github.com/ClickHouse/ClickHouse/pull/57887) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* An improvement for reading from the filesystem cache: always use `pread` method. [#57970](https://github.com/ClickHouse/ClickHouse/pull/57970) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Add optimization for AND notEquals chain in logical expression optimizer [#58214](https://github.com/ClickHouse/ClickHouse/pull/58214) ([Kevin Mingtarja](https://github.com/kevinmingtarja)).
|
||||
|
||||
#### Improvement
|
||||
* Support for soft memory limit in Keeper. It will refuse requests if the memory usage is close to the maximum. [#57271](https://github.com/ClickHouse/ClickHouse/pull/57271) ([Han Fei](https://github.com/hanfei1991)). [#57699](https://github.com/ClickHouse/ClickHouse/pull/57699) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Make inserts into distributed tables handle updated cluster configuration properly. When the list of cluster nodes is dynamically updated, the Directory Monitor of the distribution table will update it. [#42826](https://github.com/ClickHouse/ClickHouse/pull/42826) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Do not allow creating a replicated table with inconsistent merge parameters. [#56833](https://github.com/ClickHouse/ClickHouse/pull/56833) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Show uncompressed size in `system.tables`. [#56618](https://github.com/ClickHouse/ClickHouse/issues/56618). [#57186](https://github.com/ClickHouse/ClickHouse/pull/57186) ([Chen Lixiang](https://github.com/chenlx0)).
|
||||
* Add `skip_unavailable_shards` as a setting for `Distributed` tables that is similar to the corresponding query-level setting. Closes [#43666](https://github.com/ClickHouse/ClickHouse/issues/43666). [#57218](https://github.com/ClickHouse/ClickHouse/pull/57218) ([Gagan Goel](https://github.com/tntnatbry)).
|
||||
* The function `substring` (aliases: `substr`, `mid`) can now be used with `Enum` types. Previously, the first function argument had to be a value of type `String` or `FixedString`. This improves compatibility with 3rd party tools such as Tableau via MySQL interface. [#57277](https://github.com/ClickHouse/ClickHouse/pull/57277) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||
* Function `format` now supports arbitrary argument types (instead of only `String` and `FixedString` arguments). This is important to calculate `SELECT format('The {0} to all questions is {1}', 'answer', 42)`. [#57549](https://github.com/ClickHouse/ClickHouse/pull/57549) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Allows to use the `date_trunc` function with a case insensitive first argument. Both cases are now supported: `SELECT date_trunc('day', now())` and `SELECT date_trunc('DAY', now())`. [#57624](https://github.com/ClickHouse/ClickHouse/pull/57624) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Better hints when a table doesn't exist. [#57342](https://github.com/ClickHouse/ClickHouse/pull/57342) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Allow to overwrite `max_partition_size_to_drop` and `max_table_size_to_drop` server settings in query time. [#57452](https://github.com/ClickHouse/ClickHouse/pull/57452) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Slightly better inference of unnamed tupes in JSON formats. [#57751](https://github.com/ClickHouse/ClickHouse/pull/57751) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for read-only flag when connecting to Keeper (fixes [#53749](https://github.com/ClickHouse/ClickHouse/issues/53749)). [#57479](https://github.com/ClickHouse/ClickHouse/pull/57479) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* Fix possible distributed sends stuck due to "No such file or directory" (during recovering a batch from disk). Fix possible issues with `error_count` from `system.distribution_queue` (in case of `distributed_directory_monitor_max_sleep_time_ms` >5min). Introduce profile event to track async INSERT failures - `DistributedAsyncInsertionFailures`. [#57480](https://github.com/ClickHouse/ClickHouse/pull/57480) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support PostgreSQL generated columns and default column values in `MaterializedPostgreSQL` (experimental feature). Closes [#40449](https://github.com/ClickHouse/ClickHouse/issues/40449). [#57568](https://github.com/ClickHouse/ClickHouse/pull/57568) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow to apply some filesystem cache config settings changes without server restart. [#57578](https://github.com/ClickHouse/ClickHouse/pull/57578) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Properly handling PostgreSQL table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot](https://github.com/myrrc)).
|
||||
* Expose the total number of errors occurred since last server restart as a `ClickHouseErrorMetric_ALL` metric. [#57627](https://github.com/ClickHouse/ClickHouse/pull/57627) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Allow nodes in the configuration file with `from_env`/`from_zk` reference and non empty element with replace=1. [#57628](https://github.com/ClickHouse/ClickHouse/pull/57628) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* A table function `fuzzJSON` which allows generating a lot of malformed JSON for fuzzing. [#57646](https://github.com/ClickHouse/ClickHouse/pull/57646) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Allow IPv6 to UInt128 conversion and binary arithmetic. [#57707](https://github.com/ClickHouse/ClickHouse/pull/57707) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Add a setting for `async inserts deduplication cache` - how long we wait for cache update. Deprecate setting `async_block_ids_cache_min_update_interval_ms`. Now cache is updated only in case of conflicts. [#57743](https://github.com/ClickHouse/ClickHouse/pull/57743) ([alesapin](https://github.com/alesapin)).
|
||||
* `sleep()` function now can be cancelled with `KILL QUERY`. [#57746](https://github.com/ClickHouse/ClickHouse/pull/57746) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Forbid `CREATE TABLE ... AS SELECT` queries for `Replicated` table engines in the experimental `Replicated` database because they are not supported. Reference [#35408](https://github.com/ClickHouse/ClickHouse/issues/35408). [#57796](https://github.com/ClickHouse/ClickHouse/pull/57796) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix and improve transforming queries for external databases, to recursively obtain all compatible predicates. [#57888](https://github.com/ClickHouse/ClickHouse/pull/57888) ([flynn](https://github.com/ucasfl)).
|
||||
* Support dynamic reloading of the filesystem cache size. Closes [#57866](https://github.com/ClickHouse/ClickHouse/issues/57866). [#57897](https://github.com/ClickHouse/ClickHouse/pull/57897) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Correctly support `system.stack_trace` for threads with blocked SIGRTMIN (these threads can exist in low-quality external libraries such as Apache rdkafka). [#57907](https://github.com/ClickHouse/ClickHouse/pull/57907) ([Azat Khuzhin](https://github.com/azat)). Aand also send signal to the threads only if it is not blocked to avoid waiting `storage_system_stack_trace_pipe_read_timeout_ms` when it does not make any sense. [#58136](https://github.com/ClickHouse/ClickHouse/pull/58136) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Tolerate keeper failures in the quorum inserts' check. [#57986](https://github.com/ClickHouse/ClickHouse/pull/57986) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add max/peak RSS (`MemoryResidentMax`) into system.asynchronous_metrics. [#58095](https://github.com/ClickHouse/ClickHouse/pull/58095) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* This PR allows users to use s3-style links (`https://` and `s3://`) without mentioning region if it's not default. Also find the correct region if the user mentioned the wrong one. [#58148](https://github.com/ClickHouse/ClickHouse/pull/58148) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* `clickhouse-format --obfuscate` will know about Settings, MergeTreeSettings, and time zones and keep their names unchanged. [#58179](https://github.com/ClickHouse/ClickHouse/pull/58179) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added explicit `finalize()` function in `ZipArchiveWriter`. Simplify too complicated code in `ZipArchiveWriter`. This fixes [#58074](https://github.com/ClickHouse/ClickHouse/issues/58074). [#58202](https://github.com/ClickHouse/ClickHouse/pull/58202) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Make caches with the same path use the same cache objects. This behaviour existed before, but was broken in 23.4. If such caches with the same path have different set of cache settings, an exception will be thrown, that this is not allowed. [#58264](https://github.com/ClickHouse/ClickHouse/pull/58264) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Parallel replicas (experimental feature): friendly settings [#57542](https://github.com/ClickHouse/ClickHouse/pull/57542) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Parallel replicas (experimental feature): announcement response handling improvement [#57749](https://github.com/ClickHouse/ClickHouse/pull/57749) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Parallel replicas (experimental feature): give more respect to `min_number_of_marks` in `ParallelReplicasReadingCoordinator` [#57763](https://github.com/ClickHouse/ClickHouse/pull/57763) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Parallel replicas (experimental feature): disable parallel replicas with IN (subquery) [#58133](https://github.com/ClickHouse/ClickHouse/pull/58133) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Parallel replicas (experimental feature): add profile event 'ParallelReplicasUsedCount' [#58173](https://github.com/ClickHouse/ClickHouse/pull/58173) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Non POST requests such as HEAD will be readonly similar to GET. [#58060](https://github.com/ClickHouse/ClickHouse/pull/58060) ([San](https://github.com/santrancisco)).
|
||||
* Add `bytes_uncompressed` column to `system.part_log` [#58167](https://github.com/ClickHouse/ClickHouse/pull/58167) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Add base backup name to `system.backups` and `system.backup_log` tables [#58178](https://github.com/ClickHouse/ClickHouse/pull/58178) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Add support for specifying query parameters in the command line in clickhouse-local [#58210](https://github.com/ClickHouse/ClickHouse/pull/58210) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Randomize more settings [#39663](https://github.com/ClickHouse/ClickHouse/pull/39663) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Randomize disabled optimizations in CI [#57315](https://github.com/ClickHouse/ClickHouse/pull/57315) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Allow usage of Azure-related table engines/functions on macOS. [#51866](https://github.com/ClickHouse/ClickHouse/pull/51866) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse Fast Test now uses Musl instead of GLibc. [#57711](https://github.com/ClickHouse/ClickHouse/pull/57711) ([Alexey Milovidov](https://github.com/alexey-milovidov)). The fully-static Musl build is available to download from the CI.
|
||||
* Run ClickBench for every commit. This closes [#57708](https://github.com/ClickHouse/ClickHouse/issues/57708). [#57712](https://github.com/ClickHouse/ClickHouse/pull/57712) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove the usage of a harmful C/POSIX `select` function from external libraries. [#57467](https://github.com/ClickHouse/ClickHouse/pull/57467) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Settings only available in ClickHouse Cloud will be also present in the open-source ClickHouse build for convenience. [#57638](https://github.com/ClickHouse/ClickHouse/pull/57638) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Fixed a possibility of sorting order breakage in TTL GROUP BY [#49103](https://github.com/ClickHouse/ClickHouse/pull/49103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix: split `lttb` bucket strategy, first bucket and last bucket should only contain single point [#57003](https://github.com/ClickHouse/ClickHouse/pull/57003) ([FFish](https://github.com/wxybear)).
|
||||
* Fix possible deadlock in the `Template` format during sync after error [#57004](https://github.com/ClickHouse/ClickHouse/pull/57004) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix early stop while parsing a file with skipping lots of errors [#57006](https://github.com/ClickHouse/ClickHouse/pull/57006) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Prevent dictionary's ACL bypass via the `dictionary` table function [#57362](https://github.com/ClickHouse/ClickHouse/pull/57362) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Fix another case of a "non-ready set" error found by Fuzzer. [#57423](https://github.com/ClickHouse/ClickHouse/pull/57423) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix several issues regarding PostgreSQL `array_ndims` usage. [#57436](https://github.com/ClickHouse/ClickHouse/pull/57436) ([Ryan Jacobs](https://github.com/ryanmjacobs)).
|
||||
* Fix RWLock inconsistency after write lock timeout [#57454](https://github.com/ClickHouse/ClickHouse/pull/57454) ([Vitaly Baranov](https://github.com/vitlibar)). Fix RWLock inconsistency after write lock timeout (again) [#57733](https://github.com/ClickHouse/ClickHouse/pull/57733) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix: don't exclude ephemeral column when building pushing to view chain [#57461](https://github.com/ClickHouse/ClickHouse/pull/57461) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* MaterializedPostgreSQL (experimental issue): fix issue [#41922](https://github.com/ClickHouse/ClickHouse/issues/41922), add test for [#41923](https://github.com/ClickHouse/ClickHouse/issues/41923) [#57515](https://github.com/ClickHouse/ClickHouse/pull/57515) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix crash in clickhouse-local [#57553](https://github.com/ClickHouse/ClickHouse/pull/57553) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* A fix for Hash JOIN. [#57564](https://github.com/ClickHouse/ClickHouse/pull/57564) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix possible error in PostgreSQL source [#57567](https://github.com/ClickHouse/ClickHouse/pull/57567) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix type correction in Hash JOIN for nested LowCardinality. [#57614](https://github.com/ClickHouse/ClickHouse/pull/57614) ([vdimir](https://github.com/vdimir)).
|
||||
* Avoid hangs of `system.stack_trace` by correctly prohibiting parallel reading from it. [#57641](https://github.com/ClickHouse/ClickHouse/pull/57641) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix an error for aggregation of sparse columns with `any(...) RESPECT NULL` [#57710](https://github.com/ClickHouse/ClickHouse/pull/57710) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix unary operators parsing [#57713](https://github.com/ClickHouse/ClickHouse/pull/57713) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix dependency loading for the experimental table engine `MaterializedPostgreSQL`. [#57754](https://github.com/ClickHouse/ClickHouse/pull/57754) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix retries for disconnected nodes for BACKUP/RESTORE ON CLUSTER [#57764](https://github.com/ClickHouse/ClickHouse/pull/57764) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix result of external aggregation in case of partially materialized projection [#57790](https://github.com/ClickHouse/ClickHouse/pull/57790) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix merge in aggregation functions with `*Map` combinator [#57795](https://github.com/ClickHouse/ClickHouse/pull/57795) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Disable `system.kafka_consumers` because it has a bug. [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix LowCardinality keys support in Merge JOIN. [#57827](https://github.com/ClickHouse/ClickHouse/pull/57827) ([vdimir](https://github.com/vdimir)).
|
||||
* A fix for `InterpreterCreateQuery` related to the sample block. [#57855](https://github.com/ClickHouse/ClickHouse/pull/57855) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `addresses_expr` were ignored for named collections from PostgreSQL. [#57874](https://github.com/ClickHouse/ClickHouse/pull/57874) ([joelynch](https://github.com/joelynch)).
|
||||
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)). Then it was rewritten from Rust to C++ for better [memory-safety](https://www.memorysafety.org/). [#57994](https://github.com/ClickHouse/ClickHouse/pull/57994) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Normalize function names in `CREATE INDEX` [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix handling of unavailable replicas before first request happened [#57933](https://github.com/ClickHouse/ClickHouse/pull/57933) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix literal alias misclassification [#57988](https://github.com/ClickHouse/ClickHouse/pull/57988) ([Chen768959](https://github.com/Chen768959)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix integer overflow in the `Poco` library, related to `UTF32Encoding` [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Fix parallel replicas (experimental feature) in presence of a scalar subquery with a big integer value [#58118](https://github.com/ClickHouse/ClickHouse/pull/58118) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `accurateCastOrNull` for out-of-range `DateTime` [#58139](https://github.com/ClickHouse/ClickHouse/pull/58139) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Fix possible `PARAMETER_OUT_OF_BOUND` error during subcolumns reading from a wide part in MergeTree [#58175](https://github.com/ClickHouse/ClickHouse/pull/58175) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix a slow-down of CREATE VIEW with an enormous number of subqueries [#58220](https://github.com/ClickHouse/ClickHouse/pull/58220) ([Tao Wang](https://github.com/wangtZJU)).
|
||||
* Fix parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
|
||||
### <a id="2311"></a> ClickHouse release 23.11, 2023-12-06
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -25,7 +25,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[ORDER BY expr]
|
||||
[PRIMARY KEY expr]
|
||||
[SAMPLE BY expr]
|
||||
[SETTINGS name=value, clean_deleted_rows=value, ...]
|
||||
[SETTINGS name=value, ...]
|
||||
```
|
||||
|
||||
For a description of request parameters, see [statement description](../../../sql-reference/statements/create/table.md).
|
||||
@ -88,53 +88,6 @@ SELECT * FROM mySecondReplacingMT FINAL;
|
||||
└─────┴─────────┴─────────────────────┘
|
||||
```
|
||||
|
||||
### is_deleted
|
||||
|
||||
`is_deleted` — Name of a column used during a merge to determine whether the data in this row represents the state or is to be deleted; `1` is a “deleted“ row, `0` is a “state“ row.
|
||||
|
||||
Column data type — `UInt8`.
|
||||
|
||||
:::note
|
||||
`is_deleted` can only be enabled when `ver` is used.
|
||||
|
||||
The row is deleted when `OPTIMIZE ... FINAL CLEANUP` or `OPTIMIZE ... FINAL` is used, or if the engine setting `clean_deleted_rows` has been set to `Always`.
|
||||
|
||||
No matter the operation on the data, the version must be increased. If two inserted rows have the same version number, the last inserted row is the one kept.
|
||||
|
||||
:::
|
||||
|
||||
Example:
|
||||
```sql
|
||||
-- with ver and is_deleted
|
||||
CREATE OR REPLACE TABLE myThirdReplacingMT
|
||||
(
|
||||
`key` Int64,
|
||||
`someCol` String,
|
||||
`eventTime` DateTime,
|
||||
`is_deleted` UInt8
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(eventTime, is_deleted)
|
||||
ORDER BY key;
|
||||
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 01:01:01', 0);
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 01:01:01', 1);
|
||||
|
||||
select * from myThirdReplacingMT final;
|
||||
|
||||
0 rows in set. Elapsed: 0.003 sec.
|
||||
|
||||
-- delete rows with is_deleted
|
||||
OPTIMIZE TABLE myThirdReplacingMT FINAL CLEANUP;
|
||||
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 00:00:00', 0);
|
||||
|
||||
select * from myThirdReplacingMT final;
|
||||
|
||||
┌─key─┬─someCol─┬───────────eventTime─┬─is_deleted─┐
|
||||
│ 1 │ first │ 2020-01-01 00:00:00 │ 0 │
|
||||
└─────┴─────────┴─────────────────────┴────────────┘
|
||||
```
|
||||
|
||||
## Query clauses
|
||||
|
||||
When creating a `ReplacingMergeTree` table the same [clauses](../../../engines/table-engines/mergetree-family/mergetree.md) are required, as when creating a `MergeTree` table.
|
||||
|
@ -852,16 +852,6 @@ If the file name for column is too long (more than `max_file_name_length` bytes)
|
||||
|
||||
The maximal length of the file name to keep it as is without hashing. Takes effect only if setting `replace_long_file_name_to_hash` is enabled. The value of this setting does not include the length of file extension. So, it is recommended to set it below the maximum filename length (usually 255 bytes) with some gap to avoid filesystem errors. Default value: 127.
|
||||
|
||||
## clean_deleted_rows
|
||||
|
||||
Enable/disable automatic deletion of rows flagged as `is_deleted` when perform `OPTIMIZE ... FINAL` on a table using the ReplacingMergeTree engine. When disabled, the `CLEANUP` keyword has to be added to the `OPTIMIZE ... FINAL` to have the same behaviour.
|
||||
|
||||
Possible values:
|
||||
|
||||
- `Always` or `Never`.
|
||||
|
||||
Default value: `Never`
|
||||
|
||||
## allow_experimental_block_number_column
|
||||
|
||||
Persists virtual column `_block_number` on merges.
|
||||
|
@ -86,59 +86,6 @@ SELECT * FROM mySecondReplacingMT FINAL;
|
||||
│ 1 │ first │ 2020-01-01 01:01:01 │
|
||||
└─────┴─────────┴─────────────────────┘
|
||||
```
|
||||
### is_deleted
|
||||
|
||||
`is_deleted` — Имя столбца, который используется во время слияния для обозначения того, нужно ли отображать строку или она подлежит удалению; `1` - для удаления строки, `0` - для отображения строки.
|
||||
|
||||
Тип данных столбца — `UInt8`.
|
||||
|
||||
:::note
|
||||
`is_deleted` может быть использован, если `ver` используется.
|
||||
|
||||
Строка удаляется в следующих случаях:
|
||||
|
||||
- при использовании инструкции `OPTIMIZE ... FINAL CLEANUP`
|
||||
- при использовании инструкции `OPTIMIZE ... FINAL`
|
||||
- параметр движка `clean_deleted_rows` установлен в значение `Always` (по умолчанию - `Never`)
|
||||
- есть новые версии строки
|
||||
|
||||
Не рекомендуется выполнять `FINAL CLEANUP` или использовать параметр движка `clean_deleted_rows` со значением `Always`, это может привести к неожиданным результатам, например удаленные строки могут вновь появиться.
|
||||
|
||||
Вне зависимости от производимых изменений над данными, версия должна увеличиваться. Если у двух строк одна и та же версия, то остается только последняя вставленная строка.
|
||||
:::
|
||||
|
||||
Пример:
|
||||
|
||||
```sql
|
||||
-- with ver and is_deleted
|
||||
CREATE OR REPLACE TABLE myThirdReplacingMT
|
||||
(
|
||||
`key` Int64,
|
||||
`someCol` String,
|
||||
`eventTime` DateTime,
|
||||
`is_deleted` UInt8
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(eventTime, is_deleted)
|
||||
ORDER BY key;
|
||||
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 01:01:01', 0);
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 01:01:01', 1);
|
||||
|
||||
select * from myThirdReplacingMT final;
|
||||
|
||||
0 rows in set. Elapsed: 0.003 sec.
|
||||
|
||||
-- delete rows with is_deleted
|
||||
OPTIMIZE TABLE myThirdReplacingMT FINAL CLEANUP;
|
||||
|
||||
INSERT INTO myThirdReplacingMT Values (1, 'first', '2020-01-01 00:00:00', 0);
|
||||
|
||||
select * from myThirdReplacingMT final;
|
||||
|
||||
┌─key─┬─someCol─┬───────────eventTime─┬─is_deleted─┐
|
||||
│ 1 │ first │ 2020-01-01 00:00:00 │ 0 │
|
||||
└─────┴─────────┴─────────────────────┴────────────┘
|
||||
```
|
||||
|
||||
## Секции запроса
|
||||
|
||||
|
1
programs/server/config.d/graphite_alternative.xml
Symbolic link
1
programs/server/config.d/graphite_alternative.xml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../tests/config/config.d/graphite_alternative.xml
|
@ -98,8 +98,6 @@ IMPLEMENT_SETTING_AUTO_ENUM(DefaultDatabaseEngine, ErrorCodes::BAD_ARGUMENTS)
|
||||
|
||||
IMPLEMENT_SETTING_AUTO_ENUM(DefaultTableEngine, ErrorCodes::BAD_ARGUMENTS)
|
||||
|
||||
IMPLEMENT_SETTING_AUTO_ENUM(CleanDeletedRows, ErrorCodes::BAD_ARGUMENTS)
|
||||
|
||||
IMPLEMENT_SETTING_MULTI_ENUM(MySQLDataTypesSupport, ErrorCodes::UNKNOWN_MYSQL_DATATYPES_SUPPORT_LEVEL,
|
||||
{{"decimal", MySQLDataTypesSupport::DECIMAL},
|
||||
{"datetime64", MySQLDataTypesSupport::DATETIME64},
|
||||
|
@ -140,14 +140,6 @@ enum class DefaultTableEngine
|
||||
|
||||
DECLARE_SETTING_ENUM(DefaultTableEngine)
|
||||
|
||||
enum class CleanDeletedRows
|
||||
{
|
||||
Never = 0, /// Disable.
|
||||
Always,
|
||||
};
|
||||
|
||||
DECLARE_SETTING_ENUM(CleanDeletedRows)
|
||||
|
||||
enum class MySQLDataTypesSupport
|
||||
{
|
||||
DECIMAL, // convert MySQL's decimal and number to ClickHouse Decimal when applicable
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/getMultipleKeysFromConfig.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Common/callOnce.h>
|
||||
#include <Common/SharedLockGuard.h>
|
||||
#include <Coordination/KeeperDispatcher.h>
|
||||
@ -33,7 +32,6 @@
|
||||
#include <Storages/StorageS3Settings.h>
|
||||
#include <Disks/DiskLocal.h>
|
||||
#include <Disks/ObjectStorages/DiskObjectStorage.h>
|
||||
#include <Disks/ObjectStorages/IObjectStorage.h>
|
||||
#include <Disks/StoragePolicy.h>
|
||||
#include <Disks/IO/IOUringReader.h>
|
||||
#include <IO/SynchronousReader.h>
|
||||
@ -45,7 +43,6 @@
|
||||
#include <Interpreters/Cache/FileCacheFactory.h>
|
||||
#include <Interpreters/SessionTracker.h>
|
||||
#include <Core/ServerSettings.h>
|
||||
#include <Interpreters/PreparedSets.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Core/SettingsQuirks.h>
|
||||
#include <Access/AccessControl.h>
|
||||
|
@ -79,7 +79,7 @@ BlockIO InterpreterOptimizeQuery::execute()
|
||||
if (auto * snapshot_data = dynamic_cast<MergeTreeData::SnapshotData *>(storage_snapshot->data.get()))
|
||||
snapshot_data->parts = {};
|
||||
|
||||
table->optimize(query_ptr, metadata_snapshot, ast.partition, ast.final, ast.deduplicate, column_names, ast.cleanup, getContext());
|
||||
table->optimize(query_ptr, metadata_snapshot, ast.partition, ast.final, ast.deduplicate, column_names, getContext());
|
||||
|
||||
return {};
|
||||
}
|
||||
|
@ -24,9 +24,6 @@ void ASTOptimizeQuery::formatQueryImpl(const FormatSettings & settings, FormatSt
|
||||
if (deduplicate)
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " DEDUPLICATE" << (settings.hilite ? hilite_none : "");
|
||||
|
||||
if (cleanup)
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " CLEANUP" << (settings.hilite ? hilite_none : "");
|
||||
|
||||
if (deduplicate_by_columns)
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " BY " << (settings.hilite ? hilite_none : "");
|
||||
|
@ -21,12 +21,10 @@ public:
|
||||
bool deduplicate = false;
|
||||
/// Deduplicate by columns.
|
||||
ASTPtr deduplicate_by_columns;
|
||||
/// Delete 'is_deleted' data
|
||||
bool cleanup = false;
|
||||
/** Get the text that identifies this element. */
|
||||
String getID(char delim) const override
|
||||
{
|
||||
return "OptimizeQuery" + (delim + getDatabase()) + delim + getTable() + (final ? "_final" : "") + (deduplicate ? "_deduplicate" : "")+ (cleanup ? "_cleanup" : "");
|
||||
return "OptimizeQuery" + (delim + getDatabase()) + delim + getTable() + (final ? "_final" : "") + (deduplicate ? "_deduplicate" : "");
|
||||
}
|
||||
|
||||
ASTPtr clone() const override
|
||||
|
@ -39,7 +39,6 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte
|
||||
ASTPtr partition;
|
||||
bool final = false;
|
||||
bool deduplicate = false;
|
||||
bool cleanup = false;
|
||||
String cluster_str;
|
||||
|
||||
if (!s_optimize_table.ignore(pos, expected))
|
||||
@ -70,9 +69,6 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte
|
||||
if (s_deduplicate.ignore(pos, expected))
|
||||
deduplicate = true;
|
||||
|
||||
if (s_cleanup.ignore(pos, expected))
|
||||
cleanup = true;
|
||||
|
||||
ASTPtr deduplicate_by_columns;
|
||||
if (deduplicate && s_by.ignore(pos, expected))
|
||||
{
|
||||
@ -81,6 +77,9 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Obsolete feature, ignored for backward compatibility.
|
||||
s_cleanup.ignore(pos, expected);
|
||||
|
||||
auto query = std::make_shared<ASTOptimizeQuery>();
|
||||
node = query;
|
||||
|
||||
@ -90,7 +89,6 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte
|
||||
query->final = final;
|
||||
query->deduplicate = deduplicate;
|
||||
query->deduplicate_by_columns = deduplicate_by_columns;
|
||||
query->cleanup = cleanup;
|
||||
query->database = database;
|
||||
query->table = table;
|
||||
|
||||
|
@ -3,30 +3,22 @@
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int INCORRECT_DATA;
|
||||
}
|
||||
|
||||
ReplacingSortedAlgorithm::ReplacingSortedAlgorithm(
|
||||
const Block & header_,
|
||||
size_t num_inputs,
|
||||
SortDescription description_,
|
||||
const String & is_deleted_column,
|
||||
const String & version_column,
|
||||
size_t max_block_size_rows,
|
||||
size_t max_block_size_bytes,
|
||||
WriteBuffer * out_row_sources_buf_,
|
||||
bool use_average_block_sizes,
|
||||
bool cleanup_)
|
||||
bool use_average_block_sizes)
|
||||
: IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, max_row_refs)
|
||||
, merged_data(header_.cloneEmptyColumns(), use_average_block_sizes, max_block_size_rows, max_block_size_bytes), cleanup(cleanup_)
|
||||
, merged_data(header_.cloneEmptyColumns(), use_average_block_sizes, max_block_size_rows, max_block_size_bytes)
|
||||
{
|
||||
if (!is_deleted_column.empty())
|
||||
is_deleted_column_number = header_.getPositionByName(is_deleted_column);
|
||||
if (!version_column.empty())
|
||||
version_column_number = header_.getPositionByName(version_column);
|
||||
}
|
||||
@ -73,15 +65,7 @@ IMergingAlgorithm::Status ReplacingSortedAlgorithm::merge()
|
||||
|
||||
/// Write the data for the previous primary key.
|
||||
if (!selected_row.empty())
|
||||
{
|
||||
if (is_deleted_column_number!=-1)
|
||||
{
|
||||
if (!(cleanup && assert_cast<const ColumnUInt8 &>(*(*selected_row.all_columns)[is_deleted_column_number]).getData()[selected_row.row_num]))
|
||||
insertRow();
|
||||
}
|
||||
else
|
||||
insertRow();
|
||||
}
|
||||
insertRow();
|
||||
|
||||
selected_row.clear();
|
||||
}
|
||||
@ -91,13 +75,6 @@ IMergingAlgorithm::Status ReplacingSortedAlgorithm::merge()
|
||||
if (out_row_sources_buf)
|
||||
current_row_sources.emplace_back(current.impl->order, true);
|
||||
|
||||
if ((is_deleted_column_number!=-1))
|
||||
{
|
||||
const UInt8 is_deleted = assert_cast<const ColumnUInt8 &>(*current->all_columns[is_deleted_column_number]).getData()[current->getRow()];
|
||||
if ((is_deleted != 1) && (is_deleted != 0))
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect data: is_deleted = {} (must be 1 or 0).", toString(is_deleted));
|
||||
}
|
||||
|
||||
/// A non-strict comparison, since we select the last row for the same version values.
|
||||
if (version_column_number == -1
|
||||
|| selected_row.empty()
|
||||
@ -128,15 +105,7 @@ IMergingAlgorithm::Status ReplacingSortedAlgorithm::merge()
|
||||
|
||||
/// We will write the data for the last primary key.
|
||||
if (!selected_row.empty())
|
||||
{
|
||||
if (is_deleted_column_number!=-1)
|
||||
{
|
||||
if (!(cleanup && assert_cast<const ColumnUInt8 &>(*(*selected_row.all_columns)[is_deleted_column_number]).getData()[selected_row.row_num]))
|
||||
insertRow();
|
||||
}
|
||||
else
|
||||
insertRow();
|
||||
}
|
||||
insertRow();
|
||||
|
||||
return Status(merged_data.pull(), true);
|
||||
}
|
||||
|
@ -21,13 +21,11 @@ public:
|
||||
ReplacingSortedAlgorithm(
|
||||
const Block & header, size_t num_inputs,
|
||||
SortDescription description_,
|
||||
const String & is_deleted_column,
|
||||
const String & version_column,
|
||||
size_t max_block_size_rows,
|
||||
size_t max_block_size_bytes,
|
||||
WriteBuffer * out_row_sources_buf_ = nullptr,
|
||||
bool use_average_block_sizes = false,
|
||||
bool cleanup = false);
|
||||
bool use_average_block_sizes = false);
|
||||
|
||||
const char * getName() const override { return "ReplacingSortedAlgorithm"; }
|
||||
Status merge() override;
|
||||
@ -35,9 +33,7 @@ public:
|
||||
private:
|
||||
MergedData merged_data;
|
||||
|
||||
ssize_t is_deleted_column_number = -1;
|
||||
ssize_t version_column_number = -1;
|
||||
bool cleanup = false;
|
||||
|
||||
using RowRef = detail::RowRefWithOwnedChunk;
|
||||
static constexpr size_t max_row_refs = 2; /// last, current.
|
||||
|
@ -14,24 +14,21 @@ public:
|
||||
ReplacingSortedTransform(
|
||||
const Block & header, size_t num_inputs,
|
||||
SortDescription description_,
|
||||
const String & is_deleted_column, const String & version_column,
|
||||
const String & version_column,
|
||||
size_t max_block_size_rows,
|
||||
size_t max_block_size_bytes,
|
||||
WriteBuffer * out_row_sources_buf_ = nullptr,
|
||||
bool use_average_block_sizes = false,
|
||||
bool cleanup = false)
|
||||
bool use_average_block_sizes = false)
|
||||
: IMergingTransform(
|
||||
num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false,
|
||||
header,
|
||||
num_inputs,
|
||||
std::move(description_),
|
||||
is_deleted_column,
|
||||
version_column,
|
||||
max_block_size_rows,
|
||||
max_block_size_bytes,
|
||||
out_row_sources_buf_,
|
||||
use_average_block_sizes,
|
||||
cleanup)
|
||||
use_average_block_sizes)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1025,7 +1025,7 @@ static void addMergingFinal(
|
||||
|
||||
case MergeTreeData::MergingParams::Replacing:
|
||||
return std::make_shared<ReplacingSortedTransform>(header, num_outputs,
|
||||
sort_description, merging_params.is_deleted_column, merging_params.version_column, max_block_size_rows, /*max_block_size_bytes=*/0, /*out_row_sources_buf_*/ nullptr, /*use_average_block_sizes*/ false, /*cleanup*/ !merging_params.is_deleted_column.empty());
|
||||
sort_description, merging_params.version_column, max_block_size_rows, /*max_block_size_bytes=*/0, /*out_row_sources_buf_*/ nullptr, /*use_average_block_sizes*/ false);
|
||||
|
||||
case MergeTreeData::MergingParams::VersionedCollapsing:
|
||||
return std::make_shared<VersionedCollapsingTransform>(header, num_outputs,
|
||||
@ -1128,8 +1128,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal(
|
||||
/// can use parallel select on such parts.
|
||||
bool no_merging_final = do_not_merge_across_partitions_select_final &&
|
||||
std::distance(parts_to_merge_ranges[range_index], parts_to_merge_ranges[range_index + 1]) == 1 &&
|
||||
parts_to_merge_ranges[range_index]->data_part->info.level > 0 &&
|
||||
data.merging_params.is_deleted_column.empty();
|
||||
parts_to_merge_ranges[range_index]->data_part->info.level > 0;
|
||||
|
||||
if (no_merging_final)
|
||||
{
|
||||
@ -1839,8 +1838,6 @@ Pipe ReadFromMergeTree::spreadMarkRanges(
|
||||
}
|
||||
}
|
||||
|
||||
if (!data.merging_params.is_deleted_column.empty() && !names.contains(data.merging_params.is_deleted_column))
|
||||
column_names_to_read.push_back(data.merging_params.is_deleted_column);
|
||||
if (!data.merging_params.sign_column.empty() && !names.contains(data.merging_params.sign_column))
|
||||
column_names_to_read.push_back(data.merging_params.sign_column);
|
||||
if (!data.merging_params.version_column.empty() && !names.contains(data.merging_params.version_column))
|
||||
|
@ -515,7 +515,6 @@ public:
|
||||
bool /*final*/,
|
||||
bool /*deduplicate*/,
|
||||
const Names & /* deduplicate_by_columns */,
|
||||
bool /*cleanup*/,
|
||||
ContextPtr /*context*/)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method optimize is not supported by storage {}", getName());
|
||||
|
@ -312,7 +312,6 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
reserved_space,
|
||||
entry.deduplicate,
|
||||
entry.deduplicate_by_columns,
|
||||
entry.cleanup,
|
||||
storage.merging_params,
|
||||
NO_TRANSACTION_PTR);
|
||||
|
||||
|
@ -131,7 +131,6 @@ void MergePlainMergeTreeTask::prepare()
|
||||
merge_mutate_entry->tagger->reserved_space,
|
||||
deduplicate,
|
||||
deduplicate_by_columns,
|
||||
cleanup,
|
||||
storage.merging_params,
|
||||
txn);
|
||||
}
|
||||
|
@ -20,7 +20,6 @@ public:
|
||||
StorageMetadataPtr metadata_snapshot_,
|
||||
bool deduplicate_,
|
||||
Names deduplicate_by_columns_,
|
||||
bool cleanup_,
|
||||
MergeMutateSelectedEntryPtr merge_mutate_entry_,
|
||||
TableLockHolder table_lock_holder_,
|
||||
IExecutableTask::TaskResultCallback & task_result_callback_)
|
||||
@ -28,7 +27,6 @@ public:
|
||||
, metadata_snapshot(std::move(metadata_snapshot_))
|
||||
, deduplicate(deduplicate_)
|
||||
, deduplicate_by_columns(std::move(deduplicate_by_columns_))
|
||||
, cleanup(cleanup_)
|
||||
, merge_mutate_entry(std::move(merge_mutate_entry_))
|
||||
, table_lock_holder(std::move(table_lock_holder_))
|
||||
, task_result_callback(task_result_callback_)
|
||||
@ -69,7 +67,6 @@ private:
|
||||
StorageMetadataPtr metadata_snapshot;
|
||||
bool deduplicate;
|
||||
Names deduplicate_by_columns;
|
||||
bool cleanup;
|
||||
MergeMutateSelectedEntryPtr merge_mutate_entry{nullptr};
|
||||
TableLockHolder table_lock_holder;
|
||||
FutureMergedMutatedPartPtr future_part{nullptr};
|
||||
|
@ -42,7 +42,6 @@ namespace ErrorCodes
|
||||
extern const int ABORTED;
|
||||
extern const int DIRECTORY_ALREADY_EXISTS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
}
|
||||
|
||||
|
||||
@ -70,10 +69,7 @@ static void extractMergingAndGatheringColumns(
|
||||
|
||||
/// Force version column for Replacing mode
|
||||
if (merging_params.mode == MergeTreeData::MergingParams::Replacing)
|
||||
{
|
||||
key_columns.emplace(merging_params.is_deleted_column);
|
||||
key_columns.emplace(merging_params.version_column);
|
||||
}
|
||||
|
||||
/// Force sign column for VersionedCollapsing mode. Version is already in primary key.
|
||||
if (merging_params.mode == MergeTreeData::MergingParams::VersionedCollapsing)
|
||||
@ -510,12 +506,13 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const
|
||||
/// In special case, when there is only one source part, and no rows were skipped, we may have
|
||||
/// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total
|
||||
/// number of input rows.
|
||||
if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count + input_rows_filtered)
|
||||
if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1)
|
||||
&& sum_input_rows_exact != rows_sources_count + input_rows_filtered)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Number of rows in source parts ({}) excluding filtered rows ({}) differs from number "
|
||||
"of bytes written to rows_sources file ({}). It is a bug.",
|
||||
sum_input_rows_exact, input_rows_filtered, rows_sources_count);
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Number of rows in source parts ({}) excluding filtered rows ({}) differs from number "
|
||||
"of bytes written to rows_sources file ({}). It is a bug.",
|
||||
sum_input_rows_exact, input_rows_filtered, rows_sources_count);
|
||||
|
||||
/// TemporaryDataOnDisk::createRawStream returns WriteBufferFromFile implementing IReadableWriteBuffer
|
||||
/// and we expect to get ReadBufferFromFile here.
|
||||
@ -759,7 +756,6 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c
|
||||
global_ctx->space_reservation,
|
||||
global_ctx->deduplicate,
|
||||
global_ctx->deduplicate_by_columns,
|
||||
global_ctx->cleanup,
|
||||
projection_merging_params,
|
||||
global_ctx->need_prefix,
|
||||
global_ctx->new_data_part.get(),
|
||||
@ -1023,13 +1019,9 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream()
|
||||
break;
|
||||
|
||||
case MergeTreeData::MergingParams::Replacing:
|
||||
if (global_ctx->cleanup && !data_settings->allow_experimental_replacing_merge_with_cleanup)
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental merges with CLEANUP are not allowed");
|
||||
|
||||
merged_transform = std::make_shared<ReplacingSortedTransform>(
|
||||
header, pipes.size(), sort_description, ctx->merging_params.is_deleted_column, ctx->merging_params.version_column,
|
||||
merge_block_size_rows, merge_block_size_bytes, ctx->rows_sources_write_buf.get(), ctx->blocks_are_granules_size,
|
||||
global_ctx->cleanup);
|
||||
header, pipes.size(), sort_description, ctx->merging_params.version_column,
|
||||
merge_block_size_rows, merge_block_size_bytes, ctx->rows_sources_write_buf.get(), ctx->blocks_are_granules_size);
|
||||
break;
|
||||
|
||||
case MergeTreeData::MergingParams::Graphite:
|
||||
@ -1118,8 +1110,6 @@ MergeAlgorithm MergeTask::ExecuteAndFinalizeHorizontalPart::chooseMergeAlgorithm
|
||||
return MergeAlgorithm::Horizontal;
|
||||
if (global_ctx->future_part->part_format.storage_type != MergeTreeDataPartStorageType::Full)
|
||||
return MergeAlgorithm::Horizontal;
|
||||
if (global_ctx->cleanup)
|
||||
return MergeAlgorithm::Horizontal;
|
||||
|
||||
if (!data_settings->allow_vertical_merges_from_compact_to_wide_parts)
|
||||
{
|
||||
|
@ -67,7 +67,6 @@ public:
|
||||
ReservationSharedPtr space_reservation_,
|
||||
bool deduplicate_,
|
||||
Names deduplicate_by_columns_,
|
||||
bool cleanup_,
|
||||
MergeTreeData::MergingParams merging_params_,
|
||||
bool need_prefix,
|
||||
IMergeTreeDataPart * parent_part_,
|
||||
@ -91,7 +90,6 @@ public:
|
||||
global_ctx->space_reservation = std::move(space_reservation_);
|
||||
global_ctx->deduplicate = std::move(deduplicate_);
|
||||
global_ctx->deduplicate_by_columns = std::move(deduplicate_by_columns_);
|
||||
global_ctx->cleanup = std::move(cleanup_);
|
||||
global_ctx->parent_part = std::move(parent_part_);
|
||||
global_ctx->data = std::move(data_);
|
||||
global_ctx->mutator = std::move(mutator_);
|
||||
@ -160,7 +158,6 @@ private:
|
||||
ReservationSharedPtr space_reservation{nullptr};
|
||||
bool deduplicate{false};
|
||||
Names deduplicate_by_columns{};
|
||||
bool cleanup{false};
|
||||
|
||||
NamesAndTypesList gathering_columns{};
|
||||
NamesAndTypesList merging_columns{};
|
||||
|
@ -846,10 +846,6 @@ void MergeTreeData::MergingParams::check(const StorageInMemoryMetadata & metadat
|
||||
{
|
||||
const auto columns = metadata.getColumns().getAllPhysical();
|
||||
|
||||
if (!is_deleted_column.empty() && mode != MergingParams::Replacing)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"is_deleted column for MergeTree cannot be specified in modes except Replacing.");
|
||||
|
||||
if (!sign_column.empty() && mode != MergingParams::Collapsing && mode != MergingParams::VersionedCollapsing)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Sign column for MergeTree cannot be specified "
|
||||
@ -919,41 +915,6 @@ void MergeTreeData::MergingParams::check(const StorageInMemoryMetadata & metadat
|
||||
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "Version column {} does not exist in table declaration.", version_column);
|
||||
};
|
||||
|
||||
/// Check that if the is_deleted column is needed, it exists and is of type UInt8. If exist, version column must be defined too but version checks are not done here.
|
||||
auto check_is_deleted_column = [this, & columns](bool is_optional, const std::string & storage)
|
||||
{
|
||||
if (is_deleted_column.empty())
|
||||
{
|
||||
if (is_optional)
|
||||
return;
|
||||
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: is_deleted ({}) column for storage {} is empty", is_deleted_column, storage);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (version_column.empty() && !is_optional)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: Version column ({}) for storage {} is empty while is_deleted ({}) is not.",
|
||||
version_column, storage, is_deleted_column);
|
||||
|
||||
bool miss_is_deleted_column = true;
|
||||
for (const auto & column : columns)
|
||||
{
|
||||
if (column.name == is_deleted_column)
|
||||
{
|
||||
if (!typeid_cast<const DataTypeUInt8 *>(column.type.get()))
|
||||
throw Exception(ErrorCodes::BAD_TYPE_OF_FIELD, "is_deleted column ({}) for storage {} must have type UInt8. Provided column of type {}.",
|
||||
is_deleted_column, storage, column.type->getName());
|
||||
miss_is_deleted_column = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (miss_is_deleted_column)
|
||||
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "is_deleted column {} does not exist in table declaration.", is_deleted_column);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
if (mode == MergingParams::Collapsing)
|
||||
check_sign_column(false, "CollapsingMergeTree");
|
||||
|
||||
@ -990,7 +951,6 @@ void MergeTreeData::MergingParams::check(const StorageInMemoryMetadata & metadat
|
||||
|
||||
if (mode == MergingParams::Replacing)
|
||||
{
|
||||
check_is_deleted_column(true, "ReplacingMergeTree");
|
||||
check_version_column(true, "ReplacingMergeTree");
|
||||
}
|
||||
|
||||
|
@ -349,9 +349,6 @@ public:
|
||||
/// For Collapsing and VersionedCollapsing mode.
|
||||
String sign_column;
|
||||
|
||||
/// For Replacing mode. Can be empty for Replacing.
|
||||
String is_deleted_column;
|
||||
|
||||
/// For Summing mode. If empty - columns_to_sum is determined automatically.
|
||||
Names columns_to_sum;
|
||||
|
||||
|
@ -676,7 +676,6 @@ MergeTaskPtr MergeTreeDataMergerMutator::mergePartsToTemporaryPart(
|
||||
ReservationSharedPtr space_reservation,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
const MergeTreeData::MergingParams & merging_params,
|
||||
const MergeTreeTransactionPtr & txn,
|
||||
bool need_prefix,
|
||||
@ -693,7 +692,6 @@ MergeTaskPtr MergeTreeDataMergerMutator::mergePartsToTemporaryPart(
|
||||
space_reservation,
|
||||
deduplicate,
|
||||
deduplicate_by_columns,
|
||||
cleanup,
|
||||
merging_params,
|
||||
need_prefix,
|
||||
parent_part,
|
||||
|
@ -165,7 +165,6 @@ public:
|
||||
ReservationSharedPtr space_reservation,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
const MergeTreeData::MergingParams & merging_params,
|
||||
const MergeTreeTransactionPtr & txn,
|
||||
bool need_prefix = true,
|
||||
|
@ -325,7 +325,7 @@ Block MergeTreeDataWriter::mergeBlock(
|
||||
return nullptr;
|
||||
case MergeTreeData::MergingParams::Replacing:
|
||||
return std::make_shared<ReplacingSortedAlgorithm>(
|
||||
block, 1, sort_description, merging_params.is_deleted_column, merging_params.version_column, block_size + 1, /*block_size_bytes=*/0);
|
||||
block, 1, sort_description, merging_params.version_column, block_size + 1, /*block_size_bytes=*/0);
|
||||
case MergeTreeData::MergingParams::Collapsing:
|
||||
return std::make_shared<CollapsingSortedAlgorithm>(
|
||||
block, 1, sort_description, merging_params.sign_column,
|
||||
|
@ -192,7 +192,6 @@ struct Settings;
|
||||
M(Bool, remote_fs_zero_copy_path_compatible_mode, false, "Run zero-copy in compatible mode during conversion process.", 0) \
|
||||
M(Bool, cache_populated_by_fetch, false, "Only available in ClickHouse Cloud", 0) \
|
||||
M(Bool, allow_experimental_block_number_column, false, "Enable persisting column _block_number for each row.", 0) \
|
||||
M(Bool, allow_experimental_replacing_merge_with_cleanup, false, "Allow experimental CLEANUP merges for ReplacingMergeTree with is_deleted column.", 0) \
|
||||
\
|
||||
/** Compress marks and primary key. */ \
|
||||
M(Bool, compress_marks, true, "Marks support compression, reduce mark file size and speed up network transmission.", 0) \
|
||||
@ -233,7 +232,7 @@ struct Settings;
|
||||
MAKE_OBSOLETE_MERGE_TREE_SETTING(M, Seconds, replicated_fetches_http_send_timeout, 0) \
|
||||
MAKE_OBSOLETE_MERGE_TREE_SETTING(M, Seconds, replicated_fetches_http_receive_timeout, 0) \
|
||||
MAKE_OBSOLETE_MERGE_TREE_SETTING(M, UInt64, replicated_max_parallel_fetches_for_host, DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT) \
|
||||
MAKE_OBSOLETE_MERGE_TREE_SETTING(M, CleanDeletedRows, clean_deleted_rows, CleanDeletedRows::Never) \
|
||||
MAKE_OBSOLETE_MERGE_TREE_SETTING(M, String, clean_deleted_rows, "") \
|
||||
|
||||
/// Settings that should not change after the creation of a table.
|
||||
/// NOLINTNEXTLINE
|
||||
|
@ -1057,7 +1057,6 @@ public:
|
||||
ctx->space_reservation,
|
||||
false, // TODO Do we need deduplicate for projections
|
||||
{},
|
||||
false, // no cleanup
|
||||
projection_merging_params,
|
||||
NO_TRANSACTION_PTR,
|
||||
/* need_prefix */ true,
|
||||
|
@ -96,9 +96,6 @@ void ReplicatedMergeTreeLogEntryData::writeText(WriteBuffer & out) const
|
||||
}
|
||||
}
|
||||
|
||||
if (cleanup)
|
||||
out << "\ncleanup: " << cleanup;
|
||||
|
||||
break;
|
||||
|
||||
case DROP_RANGE:
|
||||
@ -273,7 +270,11 @@ void ReplicatedMergeTreeLogEntryData::readText(ReadBuffer & in, MergeTreeDataFor
|
||||
deduplicate_by_columns = std::move(new_deduplicate_by_columns);
|
||||
}
|
||||
else if (checkString("cleanup: ", in))
|
||||
{
|
||||
/// Obsolete option, does nothing.
|
||||
bool cleanup = false;
|
||||
in >> cleanup;
|
||||
}
|
||||
else
|
||||
trailing_newline_found = true;
|
||||
}
|
||||
|
@ -98,7 +98,6 @@ struct ReplicatedMergeTreeLogEntryData
|
||||
Strings source_parts;
|
||||
bool deduplicate = false; /// Do deduplicate on merge
|
||||
Strings deduplicate_by_columns = {}; // Which columns should be checked for duplicates, empty means 'all' (default).
|
||||
bool cleanup = false;
|
||||
MergeType merge_type = MergeType::Regular;
|
||||
String column_name;
|
||||
String index_name;
|
||||
|
@ -52,7 +52,6 @@ ReplicatedMergeTreeTableMetadata::ReplicatedMergeTreeTableMetadata(const MergeTr
|
||||
index_granularity = data_settings->index_granularity;
|
||||
merging_params_mode = static_cast<int>(data.merging_params.mode);
|
||||
sign_column = data.merging_params.sign_column;
|
||||
is_deleted_column = data.merging_params.is_deleted_column;
|
||||
columns_to_sum = fmt::format("{}", fmt::join(data.merging_params.columns_to_sum.begin(), data.merging_params.columns_to_sum.end(), ","));
|
||||
version_column = data.merging_params.version_column;
|
||||
if (data.merging_params.mode == MergeTreeData::MergingParams::Graphite)
|
||||
@ -157,8 +156,6 @@ void ReplicatedMergeTreeTableMetadata::write(WriteBuffer & out) const
|
||||
out << "merge parameters format version: " << merge_params_version << "\n";
|
||||
if (!version_column.empty())
|
||||
out << "version column: " << version_column << "\n";
|
||||
if (!is_deleted_column.empty())
|
||||
out << "is_deleted column: " << is_deleted_column << "\n";
|
||||
if (!columns_to_sum.empty())
|
||||
out << "columns to sum: " << columns_to_sum << "\n";
|
||||
if (!graphite_params_hash.empty())
|
||||
@ -224,9 +221,6 @@ void ReplicatedMergeTreeTableMetadata::read(ReadBuffer & in)
|
||||
if (checkString("version column: ", in))
|
||||
in >> version_column >> "\n";
|
||||
|
||||
if (checkString("is_deleted column: ", in))
|
||||
in >> is_deleted_column >> "\n";
|
||||
|
||||
if (checkString("columns to sum: ", in))
|
||||
in >> columns_to_sum >> "\n";
|
||||
|
||||
@ -279,10 +273,6 @@ void ReplicatedMergeTreeTableMetadata::checkImmutableFieldsEquals(const Replicat
|
||||
throw Exception(ErrorCodes::METADATA_MISMATCH, "Existing table metadata in ZooKeeper differs in version column. "
|
||||
"Stored in ZooKeeper: {}, local: {}", from_zk.version_column, version_column);
|
||||
|
||||
if (is_deleted_column != from_zk.is_deleted_column)
|
||||
throw Exception(ErrorCodes::METADATA_MISMATCH, "Existing table metadata in ZooKeeper differs in is_deleted column. "
|
||||
"Stored in ZooKeeper: {}, local: {}", from_zk.is_deleted_column, is_deleted_column);
|
||||
|
||||
if (columns_to_sum != from_zk.columns_to_sum)
|
||||
throw Exception(ErrorCodes::METADATA_MISMATCH, "Existing table metadata in ZooKeeper differs in sum columns. "
|
||||
"Stored in ZooKeeper: {}, local: {}", from_zk.columns_to_sum, columns_to_sum);
|
||||
|
@ -29,7 +29,6 @@ struct ReplicatedMergeTreeTableMetadata
|
||||
int merge_params_version = REPLICATED_MERGE_TREE_METADATA_WITH_ALL_MERGE_PARAMETERS;
|
||||
String sign_column;
|
||||
String version_column;
|
||||
String is_deleted_column;
|
||||
String columns_to_sum;
|
||||
String graphite_params_hash;
|
||||
String primary_key;
|
||||
|
@ -138,7 +138,7 @@ static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
* CollapsingMergeTree(date, [sample_key], primary_key, index_granularity, sign)
|
||||
* SummingMergeTree(date, [sample_key], primary_key, index_granularity, [columns_to_sum])
|
||||
* AggregatingMergeTree(date, [sample_key], primary_key, index_granularity)
|
||||
* ReplacingMergeTree(date, [sample_key], primary_key, index_granularity, [version_column [, is_deleted_column]])
|
||||
* ReplacingMergeTree(date, [sample_key], primary_key, index_granularity, [version_column])
|
||||
* GraphiteMergeTree(date, [sample_key], primary_key, index_granularity, 'config_element')
|
||||
*
|
||||
* Alternatively, you can specify:
|
||||
@ -441,11 +441,11 @@ static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
}
|
||||
else if (merging_params.mode == MergeTreeData::MergingParams::Replacing)
|
||||
{
|
||||
// if there is args and number of optional parameter is higher than 1
|
||||
// is_deleted is not allowed with the 'allow_deprecated_syntax_for_merge_tree' settings
|
||||
/// Due to a misfortune, there could be an extra obsolete parameter.
|
||||
/// We ignore it for backward compatibility.
|
||||
if (arg_cnt - arg_num == 2 && !engine_args[arg_cnt - 1]->as<ASTLiteral>() && is_extended_storage_def)
|
||||
{
|
||||
if (!tryGetIdentifierNameInto(engine_args[arg_cnt - 1], merging_params.is_deleted_column))
|
||||
if (!tryGetIdentifierName(engine_args[arg_cnt - 1]))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "is_deleted column name must be an identifier {}", verbose_help_message);
|
||||
--arg_cnt;
|
||||
}
|
||||
|
@ -321,7 +321,6 @@ bool StorageEmbeddedRocksDB::optimize(
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & /* deduplicate_by_columns */,
|
||||
bool cleanup,
|
||||
ContextPtr /*context*/)
|
||||
{
|
||||
if (partition)
|
||||
@ -333,9 +332,6 @@ bool StorageEmbeddedRocksDB::optimize(
|
||||
if (deduplicate)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DEDUPLICATE cannot be specified when optimizing table of type EmbeddedRocksDB");
|
||||
|
||||
if (cleanup)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "CLEANUP cannot be specified when optimizing table of type EmbeddedRocksDB");
|
||||
|
||||
std::shared_lock lock(rocksdb_ptr_mx);
|
||||
rocksdb::CompactRangeOptions compact_options;
|
||||
auto status = rocksdb_ptr->CompactRange(compact_options, nullptr, nullptr);
|
||||
|
@ -65,7 +65,6 @@ public:
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr context) override;
|
||||
|
||||
bool supportsParallelInsert() const override { return true; }
|
||||
|
@ -685,7 +685,7 @@ void StorageBuffer::flushAndPrepareForShutdown()
|
||||
|
||||
try
|
||||
{
|
||||
optimize(nullptr /*query*/, getInMemoryMetadataPtr(), {} /*partition*/, false /*final*/, false /*deduplicate*/, {}, false /*cleanup*/, getContext());
|
||||
optimize(nullptr /*query*/, getInMemoryMetadataPtr(), {} /*partition*/, false /*final*/, false /*deduplicate*/, {}, getContext());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -711,7 +711,6 @@ bool StorageBuffer::optimize(
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & /* deduplicate_by_columns */,
|
||||
bool cleanup,
|
||||
ContextPtr /*context*/)
|
||||
{
|
||||
if (partition)
|
||||
@ -723,9 +722,6 @@ bool StorageBuffer::optimize(
|
||||
if (deduplicate)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DEDUPLICATE cannot be specified when optimizing table of type Buffer");
|
||||
|
||||
if (cleanup)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "CLEANUP cannot be specified when optimizing table of type Buffer");
|
||||
|
||||
flushAllBuffers(false);
|
||||
return true;
|
||||
}
|
||||
@ -1058,7 +1054,7 @@ void StorageBuffer::alter(const AlterCommands & params, ContextPtr local_context
|
||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||
|
||||
/// Flush buffers to the storage because BufferSource skips buffers with old metadata_version.
|
||||
optimize({} /*query*/, metadata_snapshot, {} /*partition_id*/, false /*final*/, false /*deduplicate*/, {}, false /*cleanup*/, local_context);
|
||||
optimize({} /*query*/, metadata_snapshot, {} /*partition_id*/, false /*final*/, false /*deduplicate*/, {}, local_context);
|
||||
|
||||
StorageInMemoryMetadata new_metadata = *metadata_snapshot;
|
||||
params.apply(new_metadata, local_context);
|
||||
|
@ -100,7 +100,6 @@ public:
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr context) override;
|
||||
|
||||
bool supportsSampling() const override { return true; }
|
||||
|
@ -278,13 +278,12 @@ bool StorageMaterializedView::optimize(
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr local_context)
|
||||
{
|
||||
checkStatementCanBeForwarded();
|
||||
auto storage_ptr = getTargetTable();
|
||||
auto metadata_snapshot = storage_ptr->getInMemoryMetadataPtr();
|
||||
return storage_ptr->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, cleanup, local_context);
|
||||
return storage_ptr->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, local_context);
|
||||
}
|
||||
|
||||
std::tuple<ContextMutablePtr, std::shared_ptr<ASTInsertQuery>> StorageMaterializedView::prepareRefresh() const
|
||||
|
@ -48,7 +48,6 @@ public:
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr context) override;
|
||||
|
||||
void alter(const AlterCommands & params, ContextPtr context, AlterLockHolder & table_lock_holder) override;
|
||||
|
@ -62,7 +62,6 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_POLICY;
|
||||
extern const int NO_SUCH_DATA_PART;
|
||||
extern const int ABORTED;
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
}
|
||||
|
||||
namespace ActionLocks
|
||||
@ -1096,7 +1095,6 @@ bool StorageMergeTree::merge(
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
const MergeTreeTransactionPtr & txn,
|
||||
String & out_disable_reason,
|
||||
bool optimize_skip_merged_partitions)
|
||||
@ -1136,7 +1134,7 @@ bool StorageMergeTree::merge(
|
||||
/// Copying a vector of columns `deduplicate by columns.
|
||||
IExecutableTask::TaskResultCallback f = [](bool) {};
|
||||
auto task = std::make_shared<MergePlainMergeTreeTask>(
|
||||
*this, metadata_snapshot, deduplicate, deduplicate_by_columns, cleanup, merge_mutate_entry, table_lock_holder, f);
|
||||
*this, metadata_snapshot, deduplicate, deduplicate_by_columns, merge_mutate_entry, table_lock_holder, f);
|
||||
|
||||
task->setCurrentTransaction(MergeTreeTransactionHolder{}, MergeTreeTransactionPtr{txn});
|
||||
|
||||
@ -1374,7 +1372,7 @@ bool StorageMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assign
|
||||
|
||||
if (merge_entry)
|
||||
{
|
||||
auto task = std::make_shared<MergePlainMergeTreeTask>(*this, metadata_snapshot, /* deduplicate */ false, Names{}, /* cleanup */ false, merge_entry, shared_lock, common_assignee_trigger);
|
||||
auto task = std::make_shared<MergePlainMergeTreeTask>(*this, metadata_snapshot, /* deduplicate */ false, Names{}, merge_entry, shared_lock, common_assignee_trigger);
|
||||
task->setCurrentTransaction(std::move(transaction_for_merge), std::move(txn));
|
||||
bool scheduled = assignee.scheduleMergeMutateTask(task);
|
||||
/// The problem that we already booked a slot for TTL merge, but a merge list entry will be created only in a prepare method
|
||||
@ -1508,7 +1506,6 @@ bool StorageMergeTree::optimize(
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr local_context)
|
||||
{
|
||||
if (deduplicate)
|
||||
@ -1524,16 +1521,6 @@ bool StorageMergeTree::optimize(
|
||||
String disable_reason;
|
||||
if (!partition && final)
|
||||
{
|
||||
if (cleanup && this->merging_params.mode != MergingParams::Mode::Replacing)
|
||||
{
|
||||
constexpr const char * message = "Cannot OPTIMIZE with CLEANUP table: {}";
|
||||
disable_reason = "only ReplacingMergeTree can be CLEANUP";
|
||||
throw Exception(ErrorCodes::CANNOT_ASSIGN_OPTIMIZE, message, disable_reason);
|
||||
}
|
||||
|
||||
if (cleanup && !getSettings()->allow_experimental_replacing_merge_with_cleanup)
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental merges with CLEANUP are not allowed");
|
||||
|
||||
DataPartsVector data_parts = getVisibleDataPartsVector(local_context);
|
||||
std::unordered_set<String> partition_ids;
|
||||
|
||||
@ -1548,7 +1535,6 @@ bool StorageMergeTree::optimize(
|
||||
true,
|
||||
deduplicate,
|
||||
deduplicate_by_columns,
|
||||
cleanup,
|
||||
txn,
|
||||
disable_reason,
|
||||
local_context->getSettingsRef().optimize_skip_merged_partitions))
|
||||
@ -1576,7 +1562,6 @@ bool StorageMergeTree::optimize(
|
||||
final,
|
||||
deduplicate,
|
||||
deduplicate_by_columns,
|
||||
cleanup,
|
||||
txn,
|
||||
disable_reason,
|
||||
local_context->getSettingsRef().optimize_skip_merged_partitions))
|
||||
|
@ -81,7 +81,6 @@ public:
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr context) override;
|
||||
|
||||
void mutate(const MutationCommands & commands, ContextPtr context) override;
|
||||
@ -170,14 +169,13 @@ private:
|
||||
* Returns true if merge is finished successfully.
|
||||
*/
|
||||
bool merge(
|
||||
bool aggressive,
|
||||
const String & partition_id,
|
||||
bool final, bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
const MergeTreeTransactionPtr & txn,
|
||||
String & out_disable_reason,
|
||||
bool optimize_skip_merged_partitions = false);
|
||||
bool aggressive,
|
||||
const String & partition_id,
|
||||
bool final, bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
const MergeTreeTransactionPtr & txn,
|
||||
String & out_disable_reason,
|
||||
bool optimize_skip_merged_partitions = false);
|
||||
|
||||
void renameAndCommitEmptyParts(MutableDataPartsVector & new_parts, Transaction & transaction);
|
||||
|
||||
|
@ -121,16 +121,15 @@ public:
|
||||
}
|
||||
|
||||
bool optimize(
|
||||
const ASTPtr & query,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
const ASTPtr & partition,
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr context) override
|
||||
const ASTPtr & query,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
const ASTPtr & partition,
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
ContextPtr context) override
|
||||
{
|
||||
return getNested()->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, cleanup, context);
|
||||
return getNested()->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, context);
|
||||
}
|
||||
|
||||
void mutate(const MutationCommands & commands, ContextPtr context) override { getNested()->mutate(commands, context); }
|
||||
|
@ -3730,12 +3730,10 @@ void StorageReplicatedMergeTree::mergeSelectingTask()
|
||||
future_merged_part->part_format,
|
||||
deduplicate,
|
||||
deduplicate_by_columns,
|
||||
/*cleanup*/ false,
|
||||
nullptr,
|
||||
merge_pred->getVersion(),
|
||||
future_merged_part->merge_type);
|
||||
|
||||
|
||||
if (create_result == CreateMergeEntryResult::Ok)
|
||||
return AttemptStatus::EntryCreated;
|
||||
if (create_result == CreateMergeEntryResult::LogUpdated)
|
||||
@ -3852,7 +3850,6 @@ StorageReplicatedMergeTree::CreateMergeEntryResult StorageReplicatedMergeTree::c
|
||||
const MergeTreeDataPartFormat & merged_part_format,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ReplicatedMergeTreeLogEntryData * out_log_entry,
|
||||
int32_t log_version,
|
||||
MergeType merge_type)
|
||||
@ -3892,7 +3889,6 @@ StorageReplicatedMergeTree::CreateMergeEntryResult StorageReplicatedMergeTree::c
|
||||
entry.merge_type = merge_type;
|
||||
entry.deduplicate = deduplicate;
|
||||
entry.deduplicate_by_columns = deduplicate_by_columns;
|
||||
entry.cleanup = cleanup;
|
||||
entry.create_time = time(nullptr);
|
||||
|
||||
for (const auto & part : parts)
|
||||
@ -5627,7 +5623,6 @@ bool StorageReplicatedMergeTree::optimize(
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr query_context)
|
||||
{
|
||||
/// NOTE: exclusive lock cannot be used here, since this may lead to deadlock (see comments below),
|
||||
@ -5639,13 +5634,6 @@ bool StorageReplicatedMergeTree::optimize(
|
||||
if (!is_leader)
|
||||
throw Exception(ErrorCodes::NOT_A_LEADER, "OPTIMIZE cannot be done on this replica because it is not a leader");
|
||||
|
||||
if (cleanup)
|
||||
{
|
||||
if (!getSettings()->allow_experimental_replacing_merge_with_cleanup)
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental merges with CLEANUP are not allowed");
|
||||
LOG_DEBUG(log, "Cleanup the ReplicatedMergeTree.");
|
||||
}
|
||||
|
||||
auto handle_noop = [&]<typename... Args>(FormatStringHelper<Args...> fmt_string, Args && ...args)
|
||||
{
|
||||
PreformattedMessage message = fmt_string.format(std::forward<Args>(args)...);
|
||||
@ -5724,7 +5712,6 @@ bool StorageReplicatedMergeTree::optimize(
|
||||
future_merged_part->uuid,
|
||||
future_merged_part->part_format,
|
||||
deduplicate, deduplicate_by_columns,
|
||||
cleanup,
|
||||
&merge_entry, can_merge.getVersion(),
|
||||
future_merged_part->merge_type);
|
||||
|
||||
@ -5749,13 +5736,6 @@ bool StorageReplicatedMergeTree::optimize(
|
||||
bool assigned = false;
|
||||
if (!partition && final)
|
||||
{
|
||||
if (cleanup && this->merging_params.mode != MergingParams::Mode::Replacing)
|
||||
{
|
||||
constexpr const char * message = "Cannot OPTIMIZE with CLEANUP table: {}";
|
||||
String disable_reason = "only ReplacingMergeTree can be CLEANUP";
|
||||
throw Exception(ErrorCodes::CANNOT_ASSIGN_OPTIMIZE, message, disable_reason);
|
||||
}
|
||||
|
||||
DataPartsVector data_parts = getVisibleDataPartsVector(query_context);
|
||||
std::unordered_set<String> partition_ids;
|
||||
|
||||
|
@ -178,7 +178,6 @@ public:
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr query_context) override;
|
||||
|
||||
void alter(const AlterCommands & commands, ContextPtr query_context, AlterLockHolder & table_lock_holder) override;
|
||||
@ -746,7 +745,6 @@ private:
|
||||
const MergeTreeDataPartFormat & merged_part_format,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ReplicatedMergeTreeLogEntryData * out_log_entry,
|
||||
int32_t log_version,
|
||||
MergeType merge_type);
|
||||
|
@ -435,12 +435,11 @@ bool StorageWindowView::optimize(
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr local_context)
|
||||
{
|
||||
auto storage_ptr = getInnerTable();
|
||||
auto metadata_snapshot = storage_ptr->getInMemoryMetadataPtr();
|
||||
return getInnerTable()->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, cleanup, local_context);
|
||||
return getInnerTable()->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, local_context);
|
||||
}
|
||||
|
||||
void StorageWindowView::alter(
|
||||
|
@ -134,7 +134,6 @@ public:
|
||||
bool final,
|
||||
bool deduplicate,
|
||||
const Names & deduplicate_by_columns,
|
||||
bool cleanup,
|
||||
ContextPtr context) override;
|
||||
|
||||
void alter(const AlterCommands & params, ContextPtr context, AlterLockHolder & table_lock_holder) override;
|
||||
|
@ -3,5 +3,7 @@
|
||||
2018-01-01 2 2
|
||||
2018-01-01 2 2
|
||||
== (Replicas) Test optimize ==
|
||||
d1 2 1
|
||||
d2 1 0
|
||||
d3 2 1
|
||||
d4 1 0
|
||||
|
@ -3,28 +3,28 @@ set optimize_on_insert = 0;
|
||||
drop table if exists tab_00577;
|
||||
create table tab_00577 (date Date, version UInt64, val UInt64) engine = ReplacingMergeTree(version) partition by date order by date settings enable_vertical_merge_algorithm = 1,
|
||||
vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0, min_rows_for_wide_part = 0,
|
||||
min_bytes_for_wide_part = 0, allow_experimental_replacing_merge_with_cleanup=1;
|
||||
min_bytes_for_wide_part = 0;
|
||||
insert into tab_00577 values ('2018-01-01', 2, 2), ('2018-01-01', 1, 1);
|
||||
insert into tab_00577 values ('2018-01-01', 0, 0);
|
||||
select * from tab_00577 order by version;
|
||||
OPTIMIZE TABLE tab_00577 FINAL CLEANUP;
|
||||
OPTIMIZE TABLE tab_00577 FINAL;
|
||||
select * from tab_00577;
|
||||
drop table tab_00577;
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS testCleanupR1;
|
||||
CREATE TABLE testCleanupR1 (uid String, version UInt32, is_deleted UInt8)
|
||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/test_cleanup/', 'r1', version, is_deleted)
|
||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/test_cleanup/', 'r1', version)
|
||||
ORDER BY uid SETTINGS enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0, min_rows_for_wide_part = 0,
|
||||
min_bytes_for_wide_part = 0, allow_experimental_replacing_merge_with_cleanup=1;
|
||||
min_bytes_for_wide_part = 0;
|
||||
INSERT INTO testCleanupR1 (*) VALUES ('d1', 1, 0),('d2', 1, 0),('d3', 1, 0),('d4', 1, 0);
|
||||
INSERT INTO testCleanupR1 (*) VALUES ('d3', 2, 1);
|
||||
INSERT INTO testCleanupR1 (*) VALUES ('d1', 2, 1);
|
||||
SYSTEM SYNC REPLICA testCleanupR1; -- Avoid "Cannot select parts for optimization: Entry for part all_2_2_0 hasn't been read from the replication log yet"
|
||||
|
||||
OPTIMIZE TABLE testCleanupR1 FINAL CLEANUP;
|
||||
OPTIMIZE TABLE testCleanupR1 FINAL;
|
||||
|
||||
-- Only d3 to d5 remain
|
||||
SELECT '== (Replicas) Test optimize ==';
|
||||
SELECT * FROM testCleanupR1 order by uid;
|
||||
DROP TABLE IF EXISTS testCleanupR1
|
||||
DROP TABLE IF EXISTS testCleanupR1
|
||||
|
@ -1,121 +0,0 @@
|
||||
== Test SELECT ... FINAL - no is_deleted ==
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
d6 2 1
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
d6 2 1
|
||||
== Test SELECT ... FINAL - no is_deleted SETTINGS clean_deleted_rows=Always ==
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
d6 2 1
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
d6 2 1
|
||||
== Test SELECT ... FINAL ==
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
d6 2 1
|
||||
== Insert backups ==
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
== Insert a second batch with overlaping data ==
|
||||
d1 5 0
|
||||
d2 3 0
|
||||
d3 3 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
== Only last version remains after OPTIMIZE W/ CLEANUP ==
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 1 0
|
||||
d5 1 0
|
||||
d6 3 0
|
||||
== OPTIMIZE W/ CLEANUP (remove d6) ==
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 1 0
|
||||
d5 1 0
|
||||
== Test of the SETTINGS clean_deleted_rows as Always ==
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
d6 2 1
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
== Test of the SETTINGS clean_deleted_rows as Never ==
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 3 0
|
||||
d5 1 0
|
||||
d6 2 1
|
||||
== (Replicas) Test optimize ==
|
||||
d2 1 0
|
||||
d4 1 0
|
||||
== (Replicas) Test settings ==
|
||||
c2 1 0
|
||||
c4 1 0
|
||||
no cleanup 1 d1 5 0
|
||||
no cleanup 1 d2 1 0
|
||||
no cleanup 1 d3 1 0
|
||||
no cleanup 1 d4 3 0
|
||||
no cleanup 1 d5 1 0
|
||||
no cleanup 2 d1 5 0
|
||||
no cleanup 2 d2 1 0
|
||||
no cleanup 2 d3 1 0
|
||||
no cleanup 2 d4 3 0
|
||||
no cleanup 2 d5 1 0
|
||||
no cleanup 2 d6 2 1
|
||||
no cleanup 3 d1 5 0
|
||||
no cleanup 3 d2 1 0
|
||||
no cleanup 3 d3 1 0
|
||||
no cleanup 3 d4 3 0
|
||||
no cleanup 3 d5 1 0
|
||||
no cleanup 4 d1 5 0
|
||||
no cleanup 4 d2 1 0
|
||||
no cleanup 4 d3 1 0
|
||||
no cleanup 4 d4 3 0
|
||||
no cleanup 4 d5 1 0
|
||||
no cleanup 4 d6 2 1
|
||||
== Check cleanup & settings for other merge trees ==
|
||||
d1 1 1
|
||||
d1 1 1
|
||||
d1 1 1
|
||||
d1 1 1 1
|
||||
d1 1 1 1
|
@ -1,174 +0,0 @@
|
||||
-- Tags: zookeeper
|
||||
|
||||
-- Settings allow_deprecated_syntax_for_merge_tree prevent to enable the is_deleted column
|
||||
set allow_deprecated_syntax_for_merge_tree=0;
|
||||
|
||||
-- Test the bahaviour without the is_deleted column
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1;
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0);
|
||||
SELECT '== Test SELECT ... FINAL - no is_deleted ==';
|
||||
select * from test FINAL order by uid;
|
||||
OPTIMIZE TABLE test FINAL CLEANUP;
|
||||
select * from test order by uid;
|
||||
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1;
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0);
|
||||
SELECT '== Test SELECT ... FINAL - no is_deleted SETTINGS clean_deleted_rows=Always ==';
|
||||
select * from test FINAL order by uid;
|
||||
OPTIMIZE TABLE test FINAL CLEANUP;
|
||||
select * from test order by uid;
|
||||
|
||||
-- Test the new behaviour
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1;
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0);
|
||||
SELECT '== Test SELECT ... FINAL ==';
|
||||
select * from test FINAL order by uid;
|
||||
select * from test order by uid;
|
||||
|
||||
SELECT '== Insert backups ==';
|
||||
INSERT INTO test (*) VALUES ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1);
|
||||
select * from test FINAL order by uid;
|
||||
|
||||
SELECT '== Insert a second batch with overlaping data ==';
|
||||
INSERT INTO test (*) VALUES ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 1), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0), ('d2', 2, 1), ('d2', 3, 0), ('d3', 2, 1), ('d3', 3, 0);
|
||||
select * from test FINAL order by uid;
|
||||
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1;
|
||||
|
||||
-- Expect d6 to be version=3 is_deleted=false
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 3, 0);
|
||||
-- Insert previous version of 'd6' but only v=3 is_deleted=false will remain
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 2, 1);
|
||||
SELECT '== Only last version remains after OPTIMIZE W/ CLEANUP ==';
|
||||
OPTIMIZE TABLE test FINAL CLEANUP;
|
||||
select * from test order by uid;
|
||||
|
||||
-- insert d6 v=3 is_deleted=true (timestamp more recent so this version should be the one take into acount)
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 3, 1);
|
||||
|
||||
SELECT '== OPTIMIZE W/ CLEANUP (remove d6) ==';
|
||||
OPTIMIZE TABLE test FINAL CLEANUP;
|
||||
-- No d6 anymore
|
||||
select * from test order by uid;
|
||||
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1;
|
||||
|
||||
SELECT '== Test of the SETTINGS clean_deleted_rows as Always ==';
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0);
|
||||
-- Even if the setting is set to Always, the SELECT FINAL doesn't delete rows
|
||||
select * from test FINAL order by uid;
|
||||
select * from test order by uid;
|
||||
|
||||
OPTIMIZE TABLE test FINAL;
|
||||
-- d6 has to be removed since we set clean_deleted_rows as 'Always'
|
||||
select * from test where is_deleted=0 order by uid;
|
||||
|
||||
SELECT '== Test of the SETTINGS clean_deleted_rows as Never ==';
|
||||
ALTER TABLE test MODIFY SETTING clean_deleted_rows='Never';
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0);
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0);
|
||||
OPTIMIZE TABLE test FINAL;
|
||||
-- d6 has NOT to be removed since we set clean_deleted_rows as 'Never'
|
||||
select * from test order by uid;
|
||||
|
||||
DROP TABLE IF EXISTS testCleanupR1;
|
||||
|
||||
CREATE TABLE testCleanupR1 (uid String, version UInt32, is_deleted UInt8)
|
||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/test_cleanup/', 'r1', version, is_deleted)
|
||||
ORDER BY uid settings allow_experimental_replacing_merge_with_cleanup=1;
|
||||
|
||||
|
||||
INSERT INTO testCleanupR1 (*) VALUES ('d1', 1, 0),('d2', 1, 0),('d3', 1, 0),('d4', 1, 0);
|
||||
INSERT INTO testCleanupR1 (*) VALUES ('d3', 2, 1);
|
||||
INSERT INTO testCleanupR1 (*) VALUES ('d1', 2, 1);
|
||||
SYSTEM SYNC REPLICA testCleanupR1; -- Avoid "Cannot select parts for optimization: Entry for part all_2_2_0 hasn't been read from the replication log yet"
|
||||
|
||||
OPTIMIZE TABLE testCleanupR1 FINAL CLEANUP;
|
||||
|
||||
-- Only d3 to d5 remain
|
||||
SELECT '== (Replicas) Test optimize ==';
|
||||
SELECT * FROM testCleanupR1 order by uid;
|
||||
|
||||
------------------------------
|
||||
|
||||
DROP TABLE IF EXISTS testSettingsR1;
|
||||
|
||||
CREATE TABLE testSettingsR1 (col1 String, version UInt32, is_deleted UInt8)
|
||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/test_setting/', 'r1', version, is_deleted)
|
||||
ORDER BY col1
|
||||
SETTINGS clean_deleted_rows = 'Always', allow_experimental_replacing_merge_with_cleanup=1;
|
||||
|
||||
INSERT INTO testSettingsR1 (*) VALUES ('c1', 1, 1),('c2', 1, 0),('c3', 1, 1),('c4', 1, 0);
|
||||
SYSTEM SYNC REPLICA testSettingsR1; -- Avoid "Cannot select parts for optimization: Entry for part all_2_2_0 hasn't been read from the replication log yet"
|
||||
|
||||
OPTIMIZE TABLE testSettingsR1 FINAL;
|
||||
|
||||
-- Only d3 to d5 remain
|
||||
SELECT '== (Replicas) Test settings ==';
|
||||
SELECT * FROM testSettingsR1 where is_deleted=0 order by col1;
|
||||
|
||||
|
||||
------------------------------
|
||||
-- Check errors
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1;
|
||||
|
||||
-- is_deleted == 0/1
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 2); -- { serverError INCORRECT_DATA }
|
||||
|
||||
DROP TABLE IF EXISTS test;
|
||||
-- checkis_deleted type
|
||||
CREATE TABLE test (uid String, version UInt32, is_deleted String) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid); -- { serverError BAD_TYPE_OF_FIELD }
|
||||
|
||||
CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid);
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0);
|
||||
select 'no cleanup 1', * from test FINAL order by uid;
|
||||
OPTIMIZE TABLE test FINAL CLEANUP; -- { serverError SUPPORT_IS_DISABLED }
|
||||
select 'no cleanup 2', * from test order by uid;
|
||||
DROP TABLE test;
|
||||
|
||||
CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/no_cleanup/', 'r1', version, is_deleted) Order by (uid);
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0);
|
||||
select 'no cleanup 3', * from test FINAL order by uid;
|
||||
OPTIMIZE TABLE test FINAL CLEANUP; -- { serverError SUPPORT_IS_DISABLED }
|
||||
select 'no cleanup 4', * from test order by uid;
|
||||
DROP TABLE test;
|
||||
|
||||
-- is_deleted column for other mergeTrees - ErrorCodes::LOGICAL_ERROR)
|
||||
|
||||
-- Check clean_deleted_rows='Always' for other MergeTrees
|
||||
SELECT '== Check cleanup & settings for other merge trees ==';
|
||||
CREATE TABLE testMT (uid String, version UInt32, is_deleted UInt8) ENGINE = MergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1;
|
||||
INSERT INTO testMT (*) VALUES ('d1', 1, 1);
|
||||
OPTIMIZE TABLE testMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
|
||||
OPTIMIZE TABLE testMT FINAL;
|
||||
SELECT * FROM testMT order by uid;
|
||||
|
||||
CREATE TABLE testSummingMT (uid String, version UInt32, is_deleted UInt8) ENGINE = SummingMergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1;
|
||||
INSERT INTO testSummingMT (*) VALUES ('d1', 1, 1);
|
||||
OPTIMIZE TABLE testSummingMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
|
||||
OPTIMIZE TABLE testSummingMT FINAL;
|
||||
SELECT * FROM testSummingMT order by uid;
|
||||
|
||||
CREATE TABLE testAggregatingMT (uid String, version UInt32, is_deleted UInt8) ENGINE = AggregatingMergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1;
|
||||
INSERT INTO testAggregatingMT (*) VALUES ('d1', 1, 1);
|
||||
OPTIMIZE TABLE testAggregatingMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
|
||||
OPTIMIZE TABLE testAggregatingMT FINAL;
|
||||
SELECT * FROM testAggregatingMT order by uid;
|
||||
|
||||
CREATE TABLE testCollapsingMT (uid String, version UInt32, is_deleted UInt8, sign Int8) ENGINE = CollapsingMergeTree(sign) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1;
|
||||
INSERT INTO testCollapsingMT (*) VALUES ('d1', 1, 1, 1);
|
||||
OPTIMIZE TABLE testCollapsingMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
|
||||
OPTIMIZE TABLE testCollapsingMT FINAL;
|
||||
SELECT * FROM testCollapsingMT order by uid;
|
||||
|
||||
CREATE TABLE testVersionedCMT (uid String, version UInt32, is_deleted UInt8, sign Int8) ENGINE = VersionedCollapsingMergeTree(sign, version) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1;
|
||||
INSERT INTO testVersionedCMT (*) VALUES ('d1', 1, 1, 1);
|
||||
OPTIMIZE TABLE testVersionedCMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
|
||||
OPTIMIZE TABLE testVersionedCMT FINAL;
|
||||
SELECT * FROM testVersionedCMT order by uid;
|
@ -1,31 +0,0 @@
|
||||
--- Based on https://github.com/ClickHouse/ClickHouse/issues/49685
|
||||
--- Verify that ReplacingMergeTree properly handles _is_deleted:
|
||||
--- SELECT FINAL should take `_is_deleted` into consideration when there is only one partition.
|
||||
-- { echoOn }
|
||||
|
||||
DROP TABLE IF EXISTS t;
|
||||
CREATE TABLE t
|
||||
(
|
||||
`account_id` UInt64,
|
||||
`_is_deleted` UInt8,
|
||||
`_version` UInt64
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(_version, _is_deleted)
|
||||
ORDER BY (account_id);
|
||||
INSERT INTO t SELECT number, 0, 1 FROM numbers(1e3);
|
||||
-- Mark the first 100 rows as deleted.
|
||||
INSERT INTO t SELECT number, 1, 1 FROM numbers(1e2);
|
||||
-- Put everything in one partition
|
||||
OPTIMIZE TABLE t FINAL;
|
||||
SELECT count() FROM t;
|
||||
1000
|
||||
SELECT count() FROM t FINAL;
|
||||
900
|
||||
-- Both should produce the same number of rows.
|
||||
-- Previously, `do_not_merge_across_partitions_select_final = 1` showed more rows,
|
||||
-- as if no rows were deleted.
|
||||
SELECT count() FROM t FINAL SETTINGS do_not_merge_across_partitions_select_final = 1;
|
||||
900
|
||||
SELECT count() FROM t FINAL SETTINGS do_not_merge_across_partitions_select_final = 0;
|
||||
900
|
||||
DROP TABLE t;
|
@ -1,32 +0,0 @@
|
||||
--- Based on https://github.com/ClickHouse/ClickHouse/issues/49685
|
||||
--- Verify that ReplacingMergeTree properly handles _is_deleted:
|
||||
--- SELECT FINAL should take `_is_deleted` into consideration when there is only one partition.
|
||||
-- { echoOn }
|
||||
|
||||
DROP TABLE IF EXISTS t;
|
||||
CREATE TABLE t
|
||||
(
|
||||
`account_id` UInt64,
|
||||
`_is_deleted` UInt8,
|
||||
`_version` UInt64
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(_version, _is_deleted)
|
||||
ORDER BY (account_id);
|
||||
|
||||
INSERT INTO t SELECT number, 0, 1 FROM numbers(1e3);
|
||||
-- Mark the first 100 rows as deleted.
|
||||
INSERT INTO t SELECT number, 1, 1 FROM numbers(1e2);
|
||||
|
||||
-- Put everything in one partition
|
||||
OPTIMIZE TABLE t FINAL;
|
||||
|
||||
SELECT count() FROM t;
|
||||
SELECT count() FROM t FINAL;
|
||||
|
||||
-- Both should produce the same number of rows.
|
||||
-- Previously, `do_not_merge_across_partitions_select_final = 1` showed more rows,
|
||||
-- as if no rows were deleted.
|
||||
SELECT count() FROM t FINAL SETTINGS do_not_merge_across_partitions_select_final = 1;
|
||||
SELECT count() FROM t FINAL SETTINGS do_not_merge_across_partitions_select_final = 0;
|
||||
|
||||
DROP TABLE t;
|
@ -1,13 +0,0 @@
|
||||
== Only last version remains after OPTIMIZE W/ CLEANUP ==
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 1 0
|
||||
d5 1 0
|
||||
d6 3 0
|
||||
== OPTIMIZE W/ CLEANUP (remove d6) ==
|
||||
d1 5 0
|
||||
d2 1 0
|
||||
d3 1 0
|
||||
d4 1 0
|
||||
d5 1 0
|
@ -1,24 +0,0 @@
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) SETTINGS vertical_merge_algorithm_min_rows_to_activate = 1,
|
||||
vertical_merge_algorithm_min_columns_to_activate = 0,
|
||||
min_rows_for_wide_part = 1,
|
||||
min_bytes_for_wide_part = 1,
|
||||
allow_experimental_replacing_merge_with_cleanup=1;
|
||||
|
||||
-- Expect d6 to be version=3 is_deleted=false
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 3, 0);
|
||||
-- Insert previous version of 'd6' but only v=3 is_deleted=false will remain
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 2, 1);
|
||||
SELECT '== Only last version remains after OPTIMIZE W/ CLEANUP ==';
|
||||
OPTIMIZE TABLE test FINAL CLEANUP;
|
||||
select * from test order by uid;
|
||||
|
||||
-- insert d6 v=3 is_deleted=true (timestamp more recent so this version should be the one take into acount)
|
||||
INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 3, 1);
|
||||
|
||||
SELECT '== OPTIMIZE W/ CLEANUP (remove d6) ==';
|
||||
OPTIMIZE TABLE test FINAL CLEANUP;
|
||||
-- No d6 anymore
|
||||
select * from test order by uid;
|
||||
|
||||
DROP TABLE IF EXISTS test;
|
@ -17,26 +17,6 @@ CREATE TABLE t_r
|
||||
ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t/', 'r2')
|
||||
ORDER BY id; -- { serverError METADATA_MISMATCH }
|
||||
|
||||
CREATE TABLE t2
|
||||
(
|
||||
`id` UInt64,
|
||||
`val` String,
|
||||
`legacy_ver` UInt64,
|
||||
`deleted` UInt8
|
||||
)
|
||||
ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t2/', 'r1', legacy_ver)
|
||||
ORDER BY id;
|
||||
|
||||
CREATE TABLE t2_r
|
||||
(
|
||||
`id` UInt64,
|
||||
`val` String,
|
||||
`legacy_ver` UInt64,
|
||||
`deleted` UInt8
|
||||
)
|
||||
ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t2/', 'r2', legacy_ver, deleted)
|
||||
ORDER BY id; -- { serverError METADATA_MISMATCH }
|
||||
|
||||
CREATE TABLE t3
|
||||
(
|
||||
`key` UInt64,
|
||||
|
@ -0,0 +1,7 @@
|
||||
# There was a wrong, harmful feature, leading to bugs and data corruption.
|
||||
# This feature is removed, but we take care to maintain compatibility on the syntax level, so now it works as a no-op.
|
||||
|
||||
DROP TABLE IF EXISTS t;
|
||||
CREATE TABLE t (x UInt8, PRIMARY KEY x) ENGINE = ReplacingMergeTree;
|
||||
OPTIMIZE TABLE t CLEANUP;
|
||||
DROP TABLE t;
|
Loading…
Reference in New Issue
Block a user