2022-12-20 22:45:49 +00:00
-- Tags: no-fasttest
2022-12-20 21:04:26 +00:00
drop table if exists rmt ;
drop table if exists rmt2 ;
-- Disable compact parts, because we need hardlinks in mutations.
2022-12-22 15:46:19 +00:00
create table rmt ( n int , m int , k int ) engine = ReplicatedMergeTree ( ' /test/02432/{database} ' , ' 1 ' ) order by tuple ( )
2022-12-21 14:42:25 +00:00
settings storage_policy = ' s3_cache ' , allow_remote_fs_zero_copy_replication = 1 ,
max_part_removal_threads = 10 , concurrent_part_removal_threshold = 1 , cleanup_delay_period = 1 , cleanup_delay_period_random_add = 1 ,
2022-12-20 21:04:26 +00:00
max_replicated_merges_in_queue = 0 , max_replicated_mutations_in_queue = 0 , min_bytes_for_compact_part = 0 , min_rows_for_compact_part = 0 ;
insert into rmt ( n , m ) values ( 1 , 42 ) ;
insert into rmt ( n , m ) values ( 2 , 42 ) ;
insert into rmt ( n , m ) values ( 3 , 42 ) ;
insert into rmt ( n , m ) values ( 4 , 42 ) ;
insert into rmt ( n , m ) values ( 5 , 42 ) ;
insert into rmt ( n , m ) values ( 6 , 42 ) ;
insert into rmt ( n , m ) values ( 7 , 42 ) ;
insert into rmt ( n , m ) values ( 8 , 42 ) ;
insert into rmt ( n , m ) values ( 9 , 42 ) ;
insert into rmt ( n , m ) values ( 0 , 42 ) ;
select count ( ) , sum ( n ) , sum ( m ) from rmt ;
-- Add alters in between to avoid squashing of mutations
set replication_alter_partitions_sync = 0 ;
alter table rmt update n = n * 10 where 1 ;
alter table rmt modify column k UInt128 ;
alter table rmt update n = n + 1 where 1 ;
2022-12-22 15:46:19 +00:00
system sync replica rmt ;
2022-12-20 21:04:26 +00:00
alter table rmt modify column k String ;
alter table rmt update n = n * 10 where 1 ;
select count ( ) , sum ( n ) , sum ( m ) from rmt ;
-- New table can assign merges/mutations and can remove old parts
2022-12-22 15:46:19 +00:00
create table rmt2 ( n int , m int , k String ) engine = ReplicatedMergeTree ( ' /test/02432/{database} ' , ' 2 ' ) order by tuple ( )
2022-12-21 14:42:25 +00:00
settings storage_policy = ' s3_cache ' , allow_remote_fs_zero_copy_replication = 1 ,
max_part_removal_threads = 10 , concurrent_part_removal_threshold = 1 , cleanup_delay_period = 1 , cleanup_delay_period_random_add = 1 ,
2022-12-20 21:04:26 +00:00
min_bytes_for_compact_part = 0 , min_rows_for_compact_part = 0 , max_replicated_merges_in_queue = 1 ,
old_parts_lifetime = 0 ;
alter table rmt2 modify column k Null able ( String ) ;
alter table rmt2 update n = n + 1 where 1 ;
alter table rmt modify setting old_parts_lifetime = 0 , max_replicated_mutations_in_queue = 100 settings replication_alter_partitions_sync = 2 ;
2022-12-21 14:42:25 +00:00
-- Wait for mutations to finish
system sync replica rmt2 ;
alter table rmt2 update k = ' zero copy ' where 1 settings mutations_sync = 2 ;
2022-12-20 21:04:26 +00:00
-- Test does not rely on sleep, it increases probability of reproducing issues.
select sleep ( 3 ) ;
select count ( ) , sum ( n ) , sum ( m ) from rmt ;
select count ( ) , sum ( n ) , sum ( m ) from rmt2 ;
-- So there will be at least 2 parts (just in case no parts are removed until drop)
insert into rmt ( n ) values ( 10 ) ;
drop table rmt ;
drop table rmt2 ;
system flush logs ;
select count ( ) > 0 from system . text_log where yesterday ( ) < = event_date and logger_name like ' % ' | | currentDatabase ( ) | | ' % ' and message like ' %Removing % parts from filesystem: % (concurrently)% ' ;
2022-12-21 14:42:25 +00:00
select count ( ) > 1 , countDistinct ( thread_id ) > 1 from system . text_log where yesterday ( ) < = event_date and logger_name like ' % ' | | currentDatabase ( ) | | ' % ' and message like ' %Removing % parts in blocks range% ' ;
2022-12-20 21:04:26 +00:00