mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 17:41:59 +00:00
Merge branch 'master' into map_from_arrasy
This commit is contained in:
commit
3af1d0c3bb
@ -489,7 +489,7 @@ class IColumn;
|
||||
M(Bool, optimize_trivial_count_query, true, "Process trivial 'SELECT count() FROM table' query from metadata.", 0) \
|
||||
M(Bool, optimize_respect_aliases, true, "If it is set to true, it will respect aliases in WHERE/GROUP BY/ORDER BY, that will help with partition pruning/secondary indexes/optimize_aggregation_in_order/optimize_read_in_order/optimize_trivial_count", 0) \
|
||||
M(UInt64, mutations_sync, 0, "Wait for synchronous execution of ALTER TABLE UPDATE/DELETE queries (mutations). 0 - execute asynchronously. 1 - wait current server. 2 - wait all replicas if they exist.", 0) \
|
||||
M(Bool, allow_experimental_lightweight_delete, false, "Enable lightweight DELETE mutations for mergetree tables. Work in progress", 0) \
|
||||
M(Bool, enable_lightweight_delete, true, "Enable lightweight DELETE mutations for mergetree tables.", 0) \
|
||||
M(Bool, optimize_move_functions_out_of_any, false, "Move functions out of aggregate functions 'any', 'anyLast'.", 0) \
|
||||
M(Bool, optimize_normalize_count_variants, true, "Rewrite aggregate functions that semantically equals to count() as count().", 0) \
|
||||
M(Bool, optimize_injective_functions_inside_uniq, true, "Delete injective functions of one argument inside uniq*() functions.", 0) \
|
||||
@ -726,6 +726,7 @@ class IColumn;
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_database_atomic, true) \
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_bigint_types, true) \
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_window_functions, true) \
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_lightweight_delete, true) \
|
||||
MAKE_OBSOLETE(M, Milliseconds, async_insert_stale_timeout_ms, 0) \
|
||||
MAKE_OBSOLETE(M, HandleKafkaErrorMode, handle_kafka_error_mode, HandleKafkaErrorMode::DEFAULT) \
|
||||
MAKE_OBSOLETE(M, Bool, database_replicated_ddl_output, true) \
|
||||
|
@ -78,10 +78,10 @@ BlockIO InterpreterDeleteQuery::execute()
|
||||
}
|
||||
else if (table->supportsLightweightDelete())
|
||||
{
|
||||
if (!getContext()->getSettingsRef().allow_experimental_lightweight_delete)
|
||||
if (!getContext()->getSettingsRef().enable_lightweight_delete)
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED,
|
||||
"Lightweight delete mutate is experimental. "
|
||||
"Set `allow_experimental_lightweight_delete` setting to enable it");
|
||||
"Lightweight delete mutate is disabled. "
|
||||
"Set `enable_lightweight_delete` setting to enable it");
|
||||
|
||||
/// Build "ALTER ... UPDATE _row_exists = 0 WHERE predicate" query
|
||||
String alter_query =
|
||||
|
@ -232,11 +232,7 @@ def test_delete_from_table(started_cluster, engine):
|
||||
dummy_node.query("INSERT INTO TABLE {} VALUES(1, 'bbbb');".format(name))
|
||||
dummy_node.query("INSERT INTO TABLE {} VALUES(2, 'bbbb');".format(name))
|
||||
|
||||
main_node.query(
|
||||
"SET allow_experimental_lightweight_delete=1; DELETE FROM {} WHERE id=2;".format(
|
||||
name
|
||||
)
|
||||
)
|
||||
main_node.query("DELETE FROM {} WHERE id=2;".format(name))
|
||||
|
||||
expected = "1\taaaa\n1\tbbbb"
|
||||
|
||||
|
@ -19,7 +19,6 @@
|
||||
<settings>
|
||||
<max_threads>1</max_threads>
|
||||
<mutations_sync>1</mutations_sync>
|
||||
<allow_experimental_lightweight_delete>1</allow_experimental_lightweight_delete>
|
||||
</settings>
|
||||
|
||||
<!-- delete most of the rows -->
|
||||
|
@ -5,7 +5,6 @@ CREATE TABLE merge_table_standard_delete(id Int32, name String) ENGINE = MergeTr
|
||||
INSERT INTO merge_table_standard_delete select number, toString(number) from numbers(100);
|
||||
|
||||
SET mutations_sync = 0;
|
||||
SET allow_experimental_lightweight_delete = 1;
|
||||
|
||||
DELETE FROM merge_table_standard_delete WHERE id = 10;
|
||||
|
||||
@ -108,3 +107,10 @@ DELETE FROM t_proj WHERE a < 100; -- { serverError BAD_ARGUMENTS }
|
||||
SELECT avg(a), avg(b), count() FROM t_proj;
|
||||
|
||||
DROP TABLE t_proj;
|
||||
|
||||
CREATE TABLE merge_table_standard_delete(id Int32, name String) ENGINE = MergeTree order by id settings min_bytes_for_wide_part=0;
|
||||
SET allow_experimental_lightweight_delete = false;
|
||||
DELETE FROM merge_table_standard_delete WHERE id = 10; -- allow_experimental_lightweight_delete=false is now ignored
|
||||
SET enable_lightweight_delete = false;
|
||||
DELETE FROM merge_table_standard_delete WHERE id = 10; -- { serverError SUPPORT_IS_DISABLED }
|
||||
DROP TABLE merge_table_standard_delete;
|
||||
|
@ -7,7 +7,6 @@ INSERT INTO merge_table_standard_delete select number, toString(number) from num
|
||||
SELECT COUNT(), part_type FROM system.parts WHERE database = currentDatabase() AND table = 'merge_table_standard_delete' AND active GROUP BY part_type ORDER BY part_type;
|
||||
|
||||
SET mutations_sync = 0;
|
||||
SET allow_experimental_lightweight_delete = 1;
|
||||
|
||||
DELETE FROM merge_table_standard_delete WHERE id = 10;
|
||||
SELECT COUNT(), part_type FROM system.parts WHERE database = currentDatabase() AND table = 'merge_table_standard_delete' AND active GROUP BY part_type ORDER BY part_type;
|
||||
|
@ -5,7 +5,6 @@ CREATE TABLE lwd_test (id UInt64 , value String) ENGINE MergeTree() ORDER BY id;
|
||||
INSERT INTO lwd_test SELECT number, randomString(10) FROM system.numbers LIMIT 1000000;
|
||||
|
||||
SET mutations_sync = 0;
|
||||
SET allow_experimental_lightweight_delete = 1;
|
||||
|
||||
SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test' AND active;
|
||||
SELECT 'Count', count() FROM lwd_test;
|
||||
|
@ -9,7 +9,6 @@ CREATE TABLE replicated_table_r2(id Int32, name String) ENGINE = ReplicatedMerge
|
||||
INSERT INTO replicated_table_r1 select number, toString(number) FROM numbers(100);
|
||||
|
||||
SET mutations_sync = 0;
|
||||
SET allow_experimental_lightweight_delete = 1;
|
||||
|
||||
DELETE FROM replicated_table_r1 WHERE id = 10;
|
||||
|
||||
|
@ -32,7 +32,6 @@ ORDER BY name, column;
|
||||
|
||||
|
||||
SET mutations_sync = 0;
|
||||
SET allow_experimental_lightweight_delete = 1;
|
||||
|
||||
-- delete some rows using LWD
|
||||
DELETE FROM lwd_test WHERE (id % 3) = 0;
|
||||
|
@ -1,6 +1,5 @@
|
||||
drop table if exists test;
|
||||
create table test (id Int32, key String) engine=MergeTree() order by tuple();
|
||||
insert into test select number, toString(number) from numbers(1000000);
|
||||
set allow_experimental_lightweight_delete=1;
|
||||
delete from test where id % 2 = 0 SETTINGS mutations_sync=0;
|
||||
select count() from test;
|
||||
|
@ -24,7 +24,6 @@ insert into url_na_log select 209, '2022-08-21' from numbers(10000);
|
||||
|
||||
|
||||
SET mutations_sync=2;
|
||||
SET allow_experimental_lightweight_delete=1;
|
||||
|
||||
OPTIMIZE TABLE url_na_log FINAL;
|
||||
|
||||
|
@ -126,7 +126,7 @@ class Tester:
|
||||
|
||||
def main():
|
||||
# Set mutations to synchronous mode and enable lightweight DELETE's
|
||||
url = os.environ['CLICKHOUSE_URL'] + '&allow_experimental_lightweight_delete=1&max_threads=1'
|
||||
url = os.environ['CLICKHOUSE_URL'] + '&max_threads=1'
|
||||
|
||||
default_index_granularity = 10;
|
||||
total_rows = 8 * default_index_granularity
|
||||
|
@ -43,7 +43,7 @@ SELECT intDiv(b, c) FROM test_filter PREWHERE c != 0 WHERE b%2 != 0;
|
||||
5
|
||||
9
|
||||
13
|
||||
SET mutations_sync = 2, allow_experimental_lightweight_delete = 1;
|
||||
SET mutations_sync = 2;
|
||||
-- Delete all rows where division by zero could occur
|
||||
DELETE FROM test_filter WHERE c = 0;
|
||||
-- Test that now division by zero doesn't occur without explicit condition
|
||||
|
@ -15,7 +15,7 @@ SELECT intDiv(b, c) FROM test_filter PREWHERE c != 0;
|
||||
SELECT intDiv(b, c) FROM test_filter PREWHERE c != 0 WHERE b%2 != 0;
|
||||
|
||||
|
||||
SET mutations_sync = 2, allow_experimental_lightweight_delete = 1;
|
||||
SET mutations_sync = 2;
|
||||
|
||||
-- Delete all rows where division by zero could occur
|
||||
DELETE FROM test_filter WHERE c = 0;
|
||||
|
@ -4,7 +4,6 @@ CREATE TABLE table_02513 (n UInt64) ENGINE=MergeTree() ORDER BY tuple() SETTINGS
|
||||
|
||||
INSERT INTO table_02513 SELECT number+11*13*1000 FROM numbers(20);
|
||||
|
||||
SET allow_experimental_lightweight_delete=1;
|
||||
SET mutations_sync=2;
|
||||
SET max_threads=1;
|
||||
|
||||
|
@ -7,7 +7,6 @@ CREATE MATERIALIZED VIEW kekv ENGINE = MergeTree ORDER BY tuple() AS SELECT * FR
|
||||
INSERT INTO kek VALUES (1);
|
||||
DELETE FROM kekv WHERE a = 1; -- { serverError BAD_ARGUMENTS}
|
||||
|
||||
SET allow_experimental_lightweight_delete=1;
|
||||
DELETE FROM kekv WHERE a = 1; -- { serverError BAD_ARGUMENTS}
|
||||
|
||||
DROP TABLE IF EXISTS kek;
|
||||
|
@ -11,7 +11,6 @@ INSERT INTO lwd_test_02521 SELECT number, randomString(10), now() FROM numbers(5
|
||||
OPTIMIZE TABLE lwd_test_02521 FINAL SETTINGS mutations_sync = 1;
|
||||
|
||||
SET mutations_sync=1;
|
||||
SET allow_experimental_lightweight_delete = 1;
|
||||
|
||||
-- { echoOn }
|
||||
SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test_02521' AND active;
|
||||
|
@ -10,8 +10,6 @@ INSERT INTO t1_local VALUES('partition2', 1,2);
|
||||
INSERT INTO t1_local VALUES('partition1', 2,3);
|
||||
INSERT INTO t1_local VALUES('partition2', 2,4);
|
||||
|
||||
SET allow_experimental_lightweight_delete=1;
|
||||
|
||||
-- { echoOn }
|
||||
|
||||
SELECT * FROM t1_local ORDER BY tc1, tc2;
|
||||
|
Loading…
Reference in New Issue
Block a user