2021-09-20 17:08:59 +00:00
import os
import shutil
2020-05-13 17:00:47 +00:00
import time
2020-10-26 15:12:16 +00:00
import re
2020-05-13 17:00:47 +00:00
import pytest
2022-07-27 17:15:00 +00:00
import threading
2020-05-13 17:00:47 +00:00
from helpers . cluster import ClickHouseCluster
2021-02-11 22:23:40 +00:00
from helpers . test_tools import assert_eq_with_retry , assert_logs_contain
from helpers . network import PartitionManager
2020-05-13 17:00:47 +00:00
2021-11-15 14:07:08 +00:00
test_recover_staled_replica_run = 1
2020-05-13 17:00:47 +00:00
cluster = ClickHouseCluster ( __file__ )
2022-03-22 16:39:58 +00:00
main_node = cluster . add_instance (
" main_node " ,
main_configs = [ " configs/config.xml " ] ,
user_configs = [ " configs/settings.xml " ] ,
with_zookeeper = True ,
stay_alive = True ,
macros = { " shard " : 1 , " replica " : 1 } ,
)
dummy_node = cluster . add_instance (
" dummy_node " ,
main_configs = [ " configs/config.xml " ] ,
2024-05-08 22:00:51 +00:00
user_configs = [ " configs/settings2.xml " ] ,
2022-03-22 16:39:58 +00:00
with_zookeeper = True ,
stay_alive = True ,
macros = { " shard " : 1 , " replica " : 2 } ,
)
competing_node = cluster . add_instance (
" competing_node " ,
main_configs = [ " configs/config.xml " ] ,
user_configs = [ " configs/settings.xml " ] ,
with_zookeeper = True ,
2023-06-22 17:06:28 +00:00
stay_alive = True ,
2022-03-22 16:39:58 +00:00
macros = { " shard " : 1 , " replica " : 3 } ,
)
snapshotting_node = cluster . add_instance (
" snapshotting_node " ,
main_configs = [ " configs/config.xml " ] ,
user_configs = [ " configs/settings.xml " ] ,
with_zookeeper = True ,
macros = { " shard " : 2 , " replica " : 1 } ,
)
snapshot_recovering_node = cluster . add_instance (
" snapshot_recovering_node " ,
2024-06-03 18:51:09 +00:00
main_configs = [ " configs/config.xml " ] ,
2022-03-22 16:39:58 +00:00
user_configs = [ " configs/settings.xml " ] ,
with_zookeeper = True ,
)
all_nodes = [
main_node ,
dummy_node ,
competing_node ,
snapshotting_node ,
snapshot_recovering_node ,
]
2021-02-02 19:39:04 +00:00
2024-05-13 13:46:25 +00:00
bad_settings_node = cluster . add_instance (
2024-05-14 15:22:36 +00:00
" bad_settings_node " ,
2024-06-03 18:51:09 +00:00
main_configs = [ " configs/config2.xml " ] ,
2024-05-13 13:46:25 +00:00
user_configs = [ " configs/inconsistent_settings.xml " ] ,
with_zookeeper = True ,
macros = { " shard " : 1 , " replica " : 4 } ,
)
2021-09-20 17:08:59 +00:00
uuid_regex = re . compile ( " [0-9a-f] {8} -[0-9a-f] {4} -[0-9a-f] {4} -[0-9a-f] {4} -[0-9a-f] {12} " )
2022-03-22 16:39:58 +00:00
2020-10-26 15:12:16 +00:00
def assert_create_query ( nodes , table_name , expected ) :
replace_uuid = lambda x : re . sub ( uuid_regex , " uuid " , x )
2020-11-24 10:24:39 +00:00
query = " show create table {} " . format ( table_name )
2020-10-26 15:12:16 +00:00
for node in nodes :
assert_eq_with_retry ( node , query , expected , get_result = replace_uuid )
2020-05-13 17:00:47 +00:00
2022-03-22 16:39:58 +00:00
2020-05-13 17:00:47 +00:00
@pytest.fixture ( scope = " module " )
def started_cluster ( ) :
try :
cluster . start ( )
yield cluster
finally :
cluster . shutdown ( )
2022-03-22 16:39:58 +00:00
2024-05-13 13:23:41 +00:00
def test_flatten_nested ( started_cluster ) :
main_node . query (
" CREATE DATABASE create_replicated_table ENGINE = Replicated( ' /test/create_replicated_table ' , ' shard1 ' , ' replica ' || ' 1 ' ); "
)
dummy_node . query (
" CREATE DATABASE create_replicated_table ENGINE = Replicated( ' /test/create_replicated_table ' , ' shard1 ' , ' replica2 ' ); "
)
main_node . query (
" CREATE TABLE create_replicated_table.replicated_table (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree ORDER BY k PARTITION BY toYYYYMM(d); "
)
main_node . query (
" CREATE MATERIALIZED VIEW create_replicated_table.mv ENGINE=ReplicatedMergeTree ORDER BY tuple() AS select d, cast([(k, toString(i32))] as Nested(a UInt64, b String)) from create_replicated_table.replicated_table "
)
assert main_node . query (
" show create create_replicated_table.mv "
) == dummy_node . query ( " show create create_replicated_table.mv " )
main_node . query ( " DROP DATABASE create_replicated_table SYNC " )
dummy_node . query ( " DROP DATABASE create_replicated_table SYNC " )
2020-05-24 17:12:24 +00:00
def test_create_replicated_table ( started_cluster ) :
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE create_replicated_table ENGINE = Replicated( ' /test/create_replicated_table ' , ' shard1 ' , ' replica ' || ' 1 ' ); "
2022-03-22 16:39:58 +00:00
)
dummy_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE create_replicated_table ENGINE = Replicated( ' /test/create_replicated_table ' , ' shard1 ' , ' replica2 ' ); "
2022-03-22 16:39:58 +00:00
)
assert (
" Explicit zookeeper_path and replica_name are specified "
in main_node . query_and_get_error (
2023-03-02 17:23:31 +00:00
" CREATE TABLE create_replicated_table.replicated_table (d Date, k UInt64, i32 Int32) "
2022-03-22 16:39:58 +00:00
" ENGINE=ReplicatedMergeTree( ' /test/tmp ' , ' r ' ) ORDER BY k PARTITION BY toYYYYMM(d); "
)
)
2021-03-17 14:29:24 +00:00
2022-03-22 16:39:58 +00:00
assert (
" Explicit zookeeper_path and replica_name are specified "
in main_node . query_and_get_error (
2023-03-02 17:23:31 +00:00
" CREATE TABLE create_replicated_table.replicated_table (d Date, k UInt64, i32 Int32) "
2022-06-24 17:10:33 +00:00
" ENGINE=ReplicatedMergeTree( ' /test/tmp ' , ' r ' ) ORDER BY k PARTITION BY toYYYYMM(d); "
2022-03-22 16:39:58 +00:00
)
)
2021-03-17 14:29:24 +00:00
2022-06-24 17:10:33 +00:00
assert (
" This syntax for *MergeTree engine is deprecated "
in main_node . query_and_get_error (
2023-03-02 17:23:31 +00:00
" CREATE TABLE create_replicated_table.replicated_table (d Date, k UInt64, i32 Int32) "
2022-06-24 17:10:33 +00:00
" ENGINE=ReplicatedMergeTree( ' /test/tmp/ {shard} ' , ' {replica} ' , d, k, 8192); "
)
2022-03-22 16:39:58 +00:00
)
2021-02-02 19:39:04 +00:00
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE TABLE create_replicated_table.replicated_table (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree ORDER BY k PARTITION BY toYYYYMM(d); "
2022-03-22 16:39:58 +00:00
)
2020-05-13 17:00:47 +00:00
2022-03-22 16:39:58 +00:00
expected = (
2023-03-02 17:23:31 +00:00
" CREATE TABLE create_replicated_table.replicated_table \\ n( \\ n `d` Date, \\ n `k` UInt64, \\ n `i32` Int32 \\ n) \\ n "
2022-04-13 14:51:59 +00:00
" ENGINE = ReplicatedMergeTree( \\ ' /clickhouse/tables/ {uuid} / {shard} \\ ' , \\ ' {replica} \\ ' ) \\ n "
2022-03-22 16:39:58 +00:00
" PARTITION BY toYYYYMM(d) \\ nORDER BY k \\ nSETTINGS index_granularity = 8192 "
)
2023-03-02 17:35:56 +00:00
assert_create_query (
[ main_node , dummy_node ] , " create_replicated_table.replicated_table " , expected
)
2020-10-26 15:12:16 +00:00
# assert without replacing uuid
2023-03-02 17:35:56 +00:00
assert main_node . query (
2023-03-02 17:23:31 +00:00
" show create create_replicated_table.replicated_table "
2023-03-02 17:35:56 +00:00
) == dummy_node . query ( " show create create_replicated_table.replicated_table " )
2023-03-02 17:23:31 +00:00
main_node . query ( " DROP DATABASE create_replicated_table SYNC " )
dummy_node . query ( " DROP DATABASE create_replicated_table SYNC " )
2020-05-24 17:12:24 +00:00
2022-03-22 16:39:58 +00:00
@pytest.mark.parametrize ( " engine " , [ " MergeTree " , " ReplicatedMergeTree " ] )
2020-11-24 10:24:39 +00:00
def test_simple_alter_table ( started_cluster , engine ) :
2023-07-19 07:49:30 +00:00
database = f " test_simple_alter_table_ { engine } "
2022-03-22 16:39:58 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica1 ' ); "
2022-03-22 16:39:58 +00:00
)
dummy_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica2 ' ); "
2022-03-22 16:39:58 +00:00
)
2021-02-02 19:39:04 +00:00
# test_simple_alter_table
2023-07-19 07:49:30 +00:00
name = f " { database } .alter_test "
2022-03-22 16:39:58 +00:00
main_node . query (
" CREATE TABLE {} "
" (CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "
" ENGINE = {} PARTITION BY StartDate ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID); " . format (
name , engine
)
)
2020-11-24 10:24:39 +00:00
main_node . query ( " ALTER TABLE {} ADD COLUMN Added0 UInt32; " . format ( name ) )
main_node . query ( " ALTER TABLE {} ADD COLUMN Added2 UInt32; " . format ( name ) )
2022-03-22 16:39:58 +00:00
main_node . query (
" ALTER TABLE {} ADD COLUMN Added1 UInt32 AFTER Added0; " . format ( name )
)
main_node . query (
" ALTER TABLE {} ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2; " . format (
name
)
)
main_node . query (
" ALTER TABLE {} ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B; " . format (
name
)
)
main_node . query (
" ALTER TABLE {} ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1; " . format (
name
)
)
full_engine = (
engine
if not " Replicated " in engine
2022-04-13 14:51:59 +00:00
else engine + " ( \\ ' /clickhouse/tables/ {uuid} / {shard} \\ ' , \\ ' {replica} \\ ' ) "
2022-03-22 16:39:58 +00:00
)
expected = (
" CREATE TABLE {} \\ n( \\ n `CounterID` UInt32, \\ n `StartDate` Date, \\ n `UserID` UInt32, \\ n "
" `VisitID` UInt32, \\ n `NestedColumn.A` Array(UInt8), \\ n `NestedColumn.S` Array(String), \\ n "
" `ToDrop` UInt32, \\ n `Added0` UInt32, \\ n `Added1` UInt32, \\ n `Added2` UInt32, \\ n "
" `AddedNested1.A` Array(UInt32), \\ n `AddedNested1.B` Array(UInt64), \\ n `AddedNested1.C` Array(String), \\ n "
" `AddedNested2.A` Array(UInt32), \\ n `AddedNested2.B` Array(UInt64) \\ n) \\ n "
" ENGINE = {} \\ nPARTITION BY StartDate \\ nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) \\ n "
" SETTINGS index_granularity = 8192 " . format ( name , full_engine )
)
2020-10-26 15:12:16 +00:00
2020-11-24 10:24:39 +00:00
assert_create_query ( [ main_node , dummy_node ] , name , expected )
2020-05-24 17:12:24 +00:00
2021-02-02 19:39:04 +00:00
# test_create_replica_after_delay
2022-03-22 16:39:58 +00:00
competing_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE IF NOT EXISTS { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica3 ' ); "
2022-03-22 16:39:58 +00:00
)
2020-05-24 17:12:24 +00:00
2020-11-24 10:24:39 +00:00
main_node . query ( " ALTER TABLE {} ADD COLUMN Added3 UInt32; " . format ( name ) )
main_node . query ( " ALTER TABLE {} DROP COLUMN AddedNested1; " . format ( name ) )
main_node . query ( " ALTER TABLE {} RENAME COLUMN Added1 TO AddedNested1; " . format ( name ) )
2020-05-27 18:33:37 +00:00
2022-03-22 16:39:58 +00:00
full_engine = (
engine
if not " Replicated " in engine
2022-04-13 14:51:59 +00:00
else engine + " ( \\ ' /clickhouse/tables/ {uuid} / {shard} \\ ' , \\ ' {replica} \\ ' ) "
2022-03-22 16:39:58 +00:00
)
expected = (
" CREATE TABLE {} \\ n( \\ n `CounterID` UInt32, \\ n `StartDate` Date, \\ n `UserID` UInt32, \\ n "
" `VisitID` UInt32, \\ n `NestedColumn.A` Array(UInt8), \\ n `NestedColumn.S` Array(String), \\ n "
" `ToDrop` UInt32, \\ n `Added0` UInt32, \\ n `AddedNested1` UInt32, \\ n `Added2` UInt32, \\ n "
" `AddedNested2.A` Array(UInt32), \\ n `AddedNested2.B` Array(UInt64), \\ n `Added3` UInt32 \\ n) \\ n "
" ENGINE = {} \\ nPARTITION BY StartDate \\ nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) \\ n "
" SETTINGS index_granularity = 8192 " . format ( name , full_engine )
)
2020-05-24 17:12:24 +00:00
2020-11-24 10:24:39 +00:00
assert_create_query ( [ main_node , dummy_node , competing_node ] , name , expected )
2023-07-19 07:49:30 +00:00
main_node . query ( f " DROP DATABASE { database } SYNC " )
dummy_node . query ( f " DROP DATABASE { database } SYNC " )
competing_node . query ( f " DROP DATABASE { database } SYNC " )
2021-02-02 19:39:04 +00:00
2022-03-22 16:39:58 +00:00
2023-01-31 14:20:21 +00:00
@pytest.mark.parametrize ( " engine " , [ " MergeTree " , " ReplicatedMergeTree " ] )
def test_delete_from_table ( started_cluster , engine ) :
2023-07-19 07:49:30 +00:00
database = f " delete_from_table_ { engine } "
2023-01-31 14:20:21 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica1 ' ); "
2023-01-31 14:20:21 +00:00
)
dummy_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard2 ' , ' replica1 ' ); "
2023-01-31 14:20:21 +00:00
)
2023-07-19 07:49:30 +00:00
name = f " { database } .delete_test "
2023-01-31 14:20:21 +00:00
main_node . query (
" CREATE TABLE {} "
" (id UInt64, value String) "
2023-01-31 14:35:08 +00:00
" ENGINE = {} PARTITION BY id % 2 ORDER BY (id); " . format ( name , engine )
2023-01-31 14:20:21 +00:00
)
main_node . query ( " INSERT INTO TABLE {} VALUES(1, ' aaaa ' ); " . format ( name ) )
main_node . query ( " INSERT INTO TABLE {} VALUES(2, ' aaaa ' ); " . format ( name ) )
dummy_node . query ( " INSERT INTO TABLE {} VALUES(1, ' bbbb ' ); " . format ( name ) )
dummy_node . query ( " INSERT INTO TABLE {} VALUES(2, ' bbbb ' ); " . format ( name ) )
2023-03-01 20:12:00 +00:00
main_node . query ( " DELETE FROM {} WHERE id=2; " . format ( name ) )
2023-01-31 14:20:21 +00:00
2023-01-31 14:35:08 +00:00
expected = " 1 \t aaaa \n 1 \t bbbb "
2023-01-31 14:20:21 +00:00
2023-01-31 14:35:08 +00:00
table_for_select = name
if not " Replicated " in engine :
2023-07-19 07:49:30 +00:00
table_for_select = f " cluster( ' { database } ' , { name } ) "
2023-01-31 14:20:21 +00:00
for node in [ main_node , dummy_node ] :
2023-01-31 14:35:08 +00:00
assert_eq_with_retry (
node ,
" SELECT * FROM {} ORDER BY id, value; " . format ( table_for_select ) ,
expected ,
)
2023-01-31 14:20:21 +00:00
2023-07-19 07:49:30 +00:00
main_node . query ( f " DROP DATABASE { database } SYNC " )
dummy_node . query ( f " DROP DATABASE { database } SYNC " )
2023-01-31 14:20:21 +00:00
2021-09-20 17:08:59 +00:00
def get_table_uuid ( database , name ) :
2022-03-22 16:39:58 +00:00
return main_node . query (
f " SELECT uuid FROM system.tables WHERE database = ' { database } ' and name = ' { name } ' "
) . strip ( )
2021-09-20 17:08:59 +00:00
@pytest.fixture ( scope = " module " , name = " attachable_part " )
def fixture_attachable_part ( started_cluster ) :
main_node . query ( f " CREATE DATABASE testdb_attach_atomic ENGINE = Atomic " )
2022-03-22 16:39:58 +00:00
main_node . query (
f " CREATE TABLE testdb_attach_atomic.test (CounterID UInt32) ENGINE = MergeTree ORDER BY (CounterID) "
)
2021-09-20 17:08:59 +00:00
main_node . query ( f " INSERT INTO testdb_attach_atomic.test VALUES (123) " )
2022-03-22 16:39:58 +00:00
main_node . query (
f " ALTER TABLE testdb_attach_atomic.test FREEZE WITH NAME ' test_attach ' "
)
2021-09-20 17:08:59 +00:00
table_uuid = get_table_uuid ( " testdb_attach_atomic " , " test " )
2022-03-22 16:39:58 +00:00
return os . path . join (
main_node . path ,
f " database/shadow/test_attach/store/ { table_uuid [ : 3 ] } / { table_uuid } /all_1_1_0 " ,
)
2021-09-20 17:08:59 +00:00
@pytest.mark.parametrize ( " engine " , [ " MergeTree " , " ReplicatedMergeTree " ] )
def test_alter_attach ( started_cluster , attachable_part , engine ) :
2023-07-19 07:49:30 +00:00
database = f " alter_attach_ { engine } "
2022-03-22 16:39:58 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica1 ' ); "
2022-03-22 16:39:58 +00:00
)
dummy_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica2 ' ); "
2022-03-22 16:39:58 +00:00
)
2021-11-11 08:12:54 +00:00
2022-03-22 16:39:58 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE TABLE { database } .alter_attach_test (CounterID UInt32) ENGINE = { engine } ORDER BY (CounterID) "
2022-03-22 16:39:58 +00:00
)
2023-07-19 07:49:30 +00:00
table_uuid = get_table_uuid ( database , " alter_attach_test " )
2021-09-20 17:08:59 +00:00
# Provide and attach a part to the main node
shutil . copytree (
2022-03-22 16:39:58 +00:00
attachable_part ,
os . path . join (
main_node . path ,
f " database/store/ { table_uuid [ : 3 ] } / { table_uuid } /detached/all_1_1_0 " ,
) ,
2021-09-20 17:08:59 +00:00
)
2023-07-19 07:49:30 +00:00
main_node . query ( f " ALTER TABLE { database } .alter_attach_test ATTACH PART ' all_1_1_0 ' " )
2021-09-20 17:08:59 +00:00
# On the main node, data is attached
2023-07-19 08:47:53 +00:00
assert (
main_node . query ( f " SELECT CounterID FROM { database } .alter_attach_test " )
== " 123 \n "
)
2021-09-20 17:08:59 +00:00
# On the other node, data is replicated only if using a Replicated table engine
if engine == " ReplicatedMergeTree " :
2023-07-19 08:47:53 +00:00
assert (
dummy_node . query ( f " SELECT CounterID FROM { database } .alter_attach_test " )
== " 123 \n "
)
2021-09-20 17:08:59 +00:00
else :
2023-07-19 08:47:53 +00:00
assert (
dummy_node . query ( f " SELECT CounterID FROM { database } .alter_attach_test " )
== " "
)
2023-07-19 07:49:30 +00:00
main_node . query ( f " DROP DATABASE { database } SYNC " )
dummy_node . query ( f " DROP DATABASE { database } SYNC " )
2021-09-20 17:08:59 +00:00
2022-03-22 16:39:58 +00:00
2021-09-26 16:47:24 +00:00
@pytest.mark.parametrize ( " engine " , [ " MergeTree " , " ReplicatedMergeTree " ] )
def test_alter_drop_part ( started_cluster , engine ) :
2023-07-19 07:49:30 +00:00
database = f " alter_drop_part_ { engine } "
2022-03-22 16:39:58 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica1 ' ); "
2022-03-22 16:39:58 +00:00
)
dummy_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica2 ' ); "
2022-03-22 16:39:58 +00:00
)
2021-11-11 08:12:54 +00:00
2021-09-26 16:47:24 +00:00
part_name = " all_0_0_0 " if engine == " ReplicatedMergeTree " else " all_1_1_0 "
2022-03-22 16:39:58 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE TABLE { database } .alter_drop_part (CounterID UInt32) ENGINE = { engine } ORDER BY (CounterID) "
2022-03-22 16:39:58 +00:00
)
2023-07-19 07:49:30 +00:00
main_node . query ( f " INSERT INTO { database } .alter_drop_part VALUES (123) " )
2021-09-26 16:47:24 +00:00
if engine == " MergeTree " :
2023-07-19 07:49:30 +00:00
dummy_node . query ( f " INSERT INTO { database } .alter_drop_part VALUES (456) " )
2024-01-10 10:07:13 +00:00
else :
main_node . query ( f " SYSTEM SYNC REPLICA { database } .alter_drop_part PULL " )
2023-07-19 07:49:30 +00:00
main_node . query ( f " ALTER TABLE { database } .alter_drop_part DROP PART ' { part_name } ' " )
assert main_node . query ( f " SELECT CounterID FROM { database } .alter_drop_part " ) == " "
2021-09-26 16:47:24 +00:00
if engine == " ReplicatedMergeTree " :
# The DROP operation is still replicated at the table engine level
2023-07-19 08:47:53 +00:00
assert (
dummy_node . query ( f " SELECT CounterID FROM { database } .alter_drop_part " ) == " "
)
2021-09-26 16:47:24 +00:00
else :
2023-03-02 17:35:56 +00:00
assert (
2023-07-19 07:49:30 +00:00
dummy_node . query ( f " SELECT CounterID FROM { database } .alter_drop_part " )
2023-03-02 17:35:56 +00:00
== " 456 \n "
)
2023-07-19 07:49:30 +00:00
main_node . query ( f " DROP DATABASE { database } SYNC " )
dummy_node . query ( f " DROP DATABASE { database } SYNC " )
2021-09-26 16:47:24 +00:00
2022-03-22 16:39:58 +00:00
2021-09-26 16:47:24 +00:00
@pytest.mark.parametrize ( " engine " , [ " MergeTree " , " ReplicatedMergeTree " ] )
def test_alter_detach_part ( started_cluster , engine ) :
2023-07-19 07:49:30 +00:00
database = f " alter_detach_part_ { engine } "
2022-03-22 16:39:58 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica1 ' ); "
2022-03-22 16:39:58 +00:00
)
dummy_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica2 ' ); "
2022-03-22 16:39:58 +00:00
)
2021-11-11 08:12:54 +00:00
2021-09-26 16:47:24 +00:00
part_name = " all_0_0_0 " if engine == " ReplicatedMergeTree " else " all_1_1_0 "
2022-03-22 16:39:58 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE TABLE { database } .alter_detach (CounterID UInt32) ENGINE = { engine } ORDER BY (CounterID) "
2022-03-22 16:39:58 +00:00
)
2023-07-19 07:49:30 +00:00
main_node . query ( f " INSERT INTO { database } .alter_detach VALUES (123) " )
2021-09-26 16:47:24 +00:00
if engine == " MergeTree " :
2023-07-19 07:49:30 +00:00
dummy_node . query ( f " INSERT INTO { database } .alter_detach VALUES (456) " )
2024-06-12 14:56:09 +00:00
else :
main_node . query ( f " SYSTEM SYNC REPLICA { database } .alter_detach PULL " )
2023-07-19 07:49:30 +00:00
main_node . query ( f " ALTER TABLE { database } .alter_detach DETACH PART ' { part_name } ' " )
detached_parts_query = f " SELECT name FROM system.detached_parts WHERE database= ' { database } ' AND table= ' alter_detach ' "
2021-09-26 16:47:24 +00:00
assert main_node . query ( detached_parts_query ) == f " { part_name } \n "
if engine == " ReplicatedMergeTree " :
# The detach operation is still replicated at the table engine level
assert dummy_node . query ( detached_parts_query ) == f " { part_name } \n "
else :
assert dummy_node . query ( detached_parts_query ) == " "
2023-07-19 07:49:30 +00:00
main_node . query ( f " DROP DATABASE { database } SYNC " )
dummy_node . query ( f " DROP DATABASE { database } SYNC " )
2021-09-26 16:47:24 +00:00
2022-03-22 16:39:58 +00:00
2021-09-26 16:47:24 +00:00
@pytest.mark.parametrize ( " engine " , [ " MergeTree " , " ReplicatedMergeTree " ] )
def test_alter_drop_detached_part ( started_cluster , engine ) :
2023-07-19 07:49:30 +00:00
database = f " alter_drop_detached_part_ { engine } "
2022-03-22 16:39:58 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica1 ' ); "
2022-03-22 16:39:58 +00:00
)
dummy_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica2 ' ); "
2022-03-22 16:39:58 +00:00
)
2021-11-11 08:12:54 +00:00
2021-09-26 16:47:24 +00:00
part_name = " all_0_0_0 " if engine == " ReplicatedMergeTree " else " all_1_1_0 "
2022-03-22 16:39:58 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE TABLE { database } .alter_drop_detached (CounterID UInt32) ENGINE = { engine } ORDER BY (CounterID) "
2022-03-22 16:39:58 +00:00
)
2023-07-19 07:49:30 +00:00
main_node . query ( f " INSERT INTO { database } .alter_drop_detached VALUES (123) " )
2023-03-02 17:35:56 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " ALTER TABLE { database } .alter_drop_detached DETACH PART ' { part_name } ' "
2023-03-02 17:35:56 +00:00
)
2021-09-26 16:47:24 +00:00
if engine == " MergeTree " :
2023-07-19 07:49:30 +00:00
dummy_node . query ( f " INSERT INTO { database } .alter_drop_detached VALUES (456) " )
2023-03-02 17:35:56 +00:00
dummy_node . query (
2023-07-19 07:49:30 +00:00
f " ALTER TABLE { database } .alter_drop_detached DETACH PART ' { part_name } ' "
2023-03-02 17:35:56 +00:00
)
main_node . query (
2023-07-19 07:49:30 +00:00
f " ALTER TABLE { database } .alter_drop_detached DROP DETACHED PART ' { part_name } ' "
2023-03-02 17:35:56 +00:00
)
2023-07-19 07:49:30 +00:00
detached_parts_query = f " SELECT name FROM system.detached_parts WHERE database= ' { database } ' AND table= ' alter_drop_detached ' "
2021-09-26 16:47:24 +00:00
assert main_node . query ( detached_parts_query ) == " "
assert dummy_node . query ( detached_parts_query ) == f " { part_name } \n "
2023-07-19 07:49:30 +00:00
main_node . query ( f " DROP DATABASE { database } SYNC " )
dummy_node . query ( f " DROP DATABASE { database } SYNC " )
2021-11-11 08:12:54 +00:00
2021-09-26 16:47:24 +00:00
2022-09-15 19:15:57 +00:00
@pytest.mark.parametrize ( " engine " , [ " MergeTree " , " ReplicatedMergeTree " ] )
def test_alter_drop_partition ( started_cluster , engine ) :
2023-07-19 07:49:30 +00:00
database = f " alter_drop_partition_ { engine } "
2022-09-15 19:15:57 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica1 ' ); "
2022-09-15 19:15:57 +00:00
)
dummy_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard1 ' , ' replica2 ' ); "
2022-09-15 19:15:57 +00:00
)
snapshotting_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE DATABASE { database } ENGINE = Replicated( ' /test/ { database } ' , ' shard2 ' , ' replica1 ' ); "
2022-09-15 19:15:57 +00:00
)
main_node . query (
2023-07-19 07:49:30 +00:00
f " CREATE TABLE { database } .alter_drop (CounterID UInt32) ENGINE = { engine } ORDER BY (CounterID) "
2022-09-15 19:15:57 +00:00
)
2023-07-19 07:49:30 +00:00
main_node . query ( f " INSERT INTO { database } .alter_drop VALUES (123) " )
2022-09-15 19:15:57 +00:00
if engine == " MergeTree " :
2023-07-19 07:49:30 +00:00
dummy_node . query ( f " INSERT INTO { database } .alter_drop VALUES (456) " )
snapshotting_node . query ( f " INSERT INTO { database } .alter_drop VALUES (789) " )
2022-09-15 19:15:57 +00:00
main_node . query (
2023-07-19 07:49:30 +00:00
f " ALTER TABLE { database } .alter_drop ON CLUSTER { database } DROP PARTITION ID ' all ' " ,
2022-09-15 19:15:57 +00:00
settings = { " replication_alter_partitions_sync " : 2 } ,
)
assert (
main_node . query (
2023-07-19 07:49:30 +00:00
f " SELECT CounterID FROM clusterAllReplicas( ' { database } ' , { database } .alter_drop) "
2022-09-15 19:15:57 +00:00
)
== " "
)
2023-07-19 07:49:30 +00:00
assert dummy_node . query ( f " SELECT CounterID FROM { database } .alter_drop " ) == " "
main_node . query ( f " DROP DATABASE { database } " )
dummy_node . query ( f " DROP DATABASE { database } " )
snapshotting_node . query ( f " DROP DATABASE { database } " )
2022-09-15 19:15:57 +00:00
2021-09-20 17:08:59 +00:00
def test_alter_fetch ( started_cluster ) :
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE alter_fetch ENGINE = Replicated( ' /test/alter_fetch ' , ' shard1 ' , ' replica1 ' ); "
2022-03-22 16:39:58 +00:00
)
dummy_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE alter_fetch ENGINE = Replicated( ' /test/alter_fetch ' , ' shard1 ' , ' replica2 ' ); "
2022-03-22 16:39:58 +00:00
)
2021-11-11 08:12:54 +00:00
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE TABLE alter_fetch.fetch_source (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID) "
2022-03-22 16:39:58 +00:00
)
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE TABLE alter_fetch.fetch_target (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID) "
2022-03-22 16:39:58 +00:00
)
2023-03-02 17:23:31 +00:00
main_node . query ( " INSERT INTO alter_fetch.fetch_source VALUES (123) " )
table_uuid = get_table_uuid ( " alter_fetch " , " fetch_source " )
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
f " ALTER TABLE alter_fetch.fetch_target FETCH PART ' all_0_0_0 ' FROM ' /clickhouse/tables/ { table_uuid } / {{ shard }} ' "
2022-03-22 16:39:58 +00:00
)
2023-03-02 17:23:31 +00:00
detached_parts_query = " SELECT name FROM system.detached_parts WHERE database= ' alter_fetch ' AND table= ' fetch_target ' "
2021-09-20 17:08:59 +00:00
assert main_node . query ( detached_parts_query ) == " all_0_0_0 \n "
assert dummy_node . query ( detached_parts_query ) == " "
2023-03-02 17:23:31 +00:00
main_node . query ( " DROP DATABASE alter_fetch SYNC " )
dummy_node . query ( " DROP DATABASE alter_fetch SYNC " )
2021-09-20 17:08:59 +00:00
2022-03-22 16:39:58 +00:00
2020-05-27 18:33:37 +00:00
def test_alters_from_different_replicas ( started_cluster ) :
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE alters_from_different_replicas ENGINE = Replicated( ' /test/alters_from_different_replicas ' , ' shard1 ' , ' replica1 ' ); "
2022-03-22 16:39:58 +00:00
)
dummy_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE alters_from_different_replicas ENGINE = Replicated( ' /test/alters_from_different_replicas ' , ' shard1 ' , ' replica2 ' ); "
2022-03-22 16:39:58 +00:00
)
2021-11-11 08:12:54 +00:00
2021-02-02 19:39:04 +00:00
# test_alters_from_different_replicas
2022-03-22 16:39:58 +00:00
competing_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE alters_from_different_replicas ENGINE = Replicated( ' /test/alters_from_different_replicas ' , ' shard1 ' , ' replica3 ' ); "
2022-03-22 16:39:58 +00:00
)
2021-02-02 19:39:04 +00:00
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE TABLE alters_from_different_replicas.concurrent_test "
2022-03-22 16:39:58 +00:00
" (CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "
2022-06-23 08:37:52 +00:00
" ENGINE = MergeTree PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID); "
2022-03-22 16:39:58 +00:00
)
2020-05-27 18:33:37 +00:00
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE TABLE alters_from_different_replicas.dist AS alters_from_different_replicas.concurrent_test ENGINE = Distributed(alters_from_different_replicas, alters_from_different_replicas, concurrent_test, CounterID) "
2022-03-22 16:39:58 +00:00
)
2021-02-02 19:39:04 +00:00
2021-02-09 15:14:20 +00:00
dummy_node . stop_clickhouse ( kill = True )
2020-05-27 18:33:37 +00:00
2021-03-08 20:35:09 +00:00
settings = { " distributed_ddl_task_timeout " : 5 }
2024-03-04 19:22:18 +00:00
assert " is not finished on 1 of 3 hosts " in competing_node . query_and_get_error (
" ALTER TABLE alters_from_different_replicas.concurrent_test ADD COLUMN Added0 UInt32; " ,
settings = settings ,
2022-03-22 16:39:58 +00:00
)
settings = {
" distributed_ddl_task_timeout " : 5 ,
" distributed_ddl_output_mode " : " null_status_on_timeout " ,
}
2022-06-29 14:27:21 +00:00
assert " shard1 \t replica2 \t QUEUED \t " in main_node . query (
2023-03-02 17:23:31 +00:00
" ALTER TABLE alters_from_different_replicas.concurrent_test ADD COLUMN Added2 UInt32; " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
settings = {
" distributed_ddl_task_timeout " : 5 ,
" distributed_ddl_output_mode " : " never_throw " ,
}
2022-06-29 14:27:21 +00:00
assert " shard1 \t replica2 \t QUEUED \t " in competing_node . query (
2023-03-02 17:23:31 +00:00
" ALTER TABLE alters_from_different_replicas.concurrent_test ADD COLUMN Added1 UInt32 AFTER Added0; " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
2021-02-09 15:14:20 +00:00
dummy_node . start_clickhouse ( )
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" ALTER TABLE alters_from_different_replicas.concurrent_test ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2; "
2022-03-22 16:39:58 +00:00
)
competing_node . query (
2023-03-02 17:23:31 +00:00
" ALTER TABLE alters_from_different_replicas.concurrent_test ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B; "
2022-03-22 16:39:58 +00:00
)
main_node . query (
2023-03-02 17:23:31 +00:00
" ALTER TABLE alters_from_different_replicas.concurrent_test ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1; "
2022-03-22 16:39:58 +00:00
)
2020-06-07 11:26:42 +00:00
2022-03-22 16:39:58 +00:00
expected = (
2023-03-02 17:23:31 +00:00
" CREATE TABLE alters_from_different_replicas.concurrent_test \\ n( \\ n `CounterID` UInt32, \\ n `StartDate` Date, \\ n `UserID` UInt32, \\ n "
2022-03-22 16:39:58 +00:00
" `VisitID` UInt32, \\ n `NestedColumn.A` Array(UInt8), \\ n `NestedColumn.S` Array(String), \\ n `ToDrop` UInt32, \\ n "
" `Added0` UInt32, \\ n `Added1` UInt32, \\ n `Added2` UInt32, \\ n `AddedNested1.A` Array(UInt32), \\ n "
" `AddedNested1.B` Array(UInt64), \\ n `AddedNested1.C` Array(String), \\ n `AddedNested2.A` Array(UInt32), \\ n "
" `AddedNested2.B` Array(UInt64) \\ n) \\ n "
2022-06-23 08:37:52 +00:00
" ENGINE = MergeTree \\ nPARTITION BY toYYYYMM(StartDate) \\ nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) \\ nSETTINGS index_granularity = 8192 "
2022-03-22 16:39:58 +00:00
)
2020-06-07 11:26:42 +00:00
2023-03-02 17:35:56 +00:00
assert_create_query (
[ main_node , competing_node ] ,
" alters_from_different_replicas.concurrent_test " ,
expected ,
)
2020-06-20 15:38:20 +00:00
2021-02-02 19:39:04 +00:00
# test_create_replica_after_delay
2023-03-02 17:23:31 +00:00
main_node . query ( " DROP TABLE alters_from_different_replicas.concurrent_test SYNC " )
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE TABLE alters_from_different_replicas.concurrent_test "
2022-03-22 16:39:58 +00:00
" (CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "
" ENGINE = ReplicatedMergeTree ORDER BY CounterID; "
)
2020-10-26 15:12:16 +00:00
2022-03-22 16:39:58 +00:00
expected = (
2023-03-02 17:23:31 +00:00
" CREATE TABLE alters_from_different_replicas.concurrent_test \\ n( \\ n `CounterID` UInt32, \\ n `StartDate` Date, \\ n `UserID` UInt32, \\ n "
2022-03-22 16:39:58 +00:00
" `VisitID` UInt32, \\ n `NestedColumn.A` Array(UInt8), \\ n `NestedColumn.S` Array(String), \\ n `ToDrop` UInt32 \\ n) \\ n "
2022-04-13 14:51:59 +00:00
" ENGINE = ReplicatedMergeTree( \\ ' /clickhouse/tables/ {uuid} / {shard} \\ ' , \\ ' {replica} \\ ' ) \\ nORDER BY CounterID \\ nSETTINGS index_granularity = 8192 "
2022-03-22 16:39:58 +00:00
)
2020-10-26 15:12:16 +00:00
2023-03-02 17:35:56 +00:00
assert_create_query (
[ main_node , competing_node ] ,
" alters_from_different_replicas.concurrent_test " ,
expected ,
)
2020-06-20 15:38:20 +00:00
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" INSERT INTO alters_from_different_replicas.dist (CounterID, StartDate, UserID) SELECT number, addDays(toDate( ' 2020-02-02 ' ), number), intHash32(number) FROM numbers(10) "
2022-03-22 16:39:58 +00:00
)
2021-02-02 19:39:04 +00:00
# test_replica_restart
2020-06-22 14:19:26 +00:00
main_node . restart_clickhouse ( )
2020-10-26 15:12:16 +00:00
2022-03-22 16:39:58 +00:00
expected = (
2023-03-02 17:23:31 +00:00
" CREATE TABLE alters_from_different_replicas.concurrent_test \\ n( \\ n `CounterID` UInt32, \\ n `StartDate` Date, \\ n `UserID` UInt32, \\ n "
2022-03-22 16:39:58 +00:00
" `VisitID` UInt32, \\ n `NestedColumn.A` Array(UInt8), \\ n `NestedColumn.S` Array(String), \\ n `ToDrop` UInt32 \\ n) \\ n "
2022-04-13 14:51:59 +00:00
" ENGINE = ReplicatedMergeTree( \\ ' /clickhouse/tables/ {uuid} / {shard} \\ ' , \\ ' {replica} \\ ' ) \\ nORDER BY CounterID \\ nSETTINGS index_granularity = 8192 "
2022-03-22 16:39:58 +00:00
)
2020-06-22 14:19:26 +00:00
2021-02-02 19:39:04 +00:00
# test_snapshot_and_snapshot_recover
2022-03-22 16:39:58 +00:00
snapshotting_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE alters_from_different_replicas ENGINE = Replicated( ' /test/alters_from_different_replicas ' , ' shard2 ' , ' replica1 ' ); "
2022-03-22 16:39:58 +00:00
)
snapshot_recovering_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE alters_from_different_replicas ENGINE = Replicated( ' /test/alters_from_different_replicas ' , ' shard2 ' , ' replica2 ' ); "
2022-03-22 16:39:58 +00:00
)
2023-03-02 17:35:56 +00:00
assert_create_query (
all_nodes , " alters_from_different_replicas.concurrent_test " , expected
)
2020-11-27 14:04:03 +00:00
2023-03-02 17:23:31 +00:00
main_node . query ( " SYSTEM FLUSH DISTRIBUTED alters_from_different_replicas.dist " )
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" ALTER TABLE alters_from_different_replicas.concurrent_test UPDATE StartDate = addYears(StartDate, 1) WHERE 1 "
2022-03-22 16:39:58 +00:00
)
2023-03-02 17:35:56 +00:00
res = main_node . query (
" ALTER TABLE alters_from_different_replicas.concurrent_test DELETE WHERE UserID % 2 "
)
2022-03-22 16:39:58 +00:00
assert (
2022-06-29 14:27:21 +00:00
" shard1 \t replica1 \t OK " in res
and " shard1 \t replica2 \t OK " in res
and " shard1 \t replica3 \t OK " in res
2022-03-22 16:39:58 +00:00
)
2022-06-29 14:27:21 +00:00
assert " shard2 \t replica1 \t OK " in res and " shard2 \t replica2 \t OK " in res
2021-02-08 19:36:17 +00:00
2022-03-22 16:39:58 +00:00
expected = (
" 1 \t 1 \t main_node \n "
" 1 \t 2 \t dummy_node \n "
" 1 \t 3 \t competing_node \n "
" 2 \t 1 \t snapshotting_node \n "
" 2 \t 2 \t snapshot_recovering_node \n "
)
assert (
main_node . query (
2023-03-02 17:23:31 +00:00
" SELECT shard_num, replica_num, host_name FROM system.clusters WHERE cluster= ' alters_from_different_replicas ' "
2022-03-22 16:39:58 +00:00
)
== expected
)
2020-11-29 11:45:32 +00:00
2021-02-02 19:39:04 +00:00
# test_drop_and_create_replica
2023-03-02 17:23:31 +00:00
main_node . query ( " DROP DATABASE alters_from_different_replicas SYNC " )
2022-03-22 16:39:58 +00:00
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE alters_from_different_replicas ENGINE = Replicated( ' /test/alters_from_different_replicas ' , ' shard1 ' , ' replica1 ' ); "
2022-03-22 16:39:58 +00:00
)
2020-10-26 15:12:16 +00:00
2022-03-22 16:39:58 +00:00
expected = (
2023-03-02 17:23:31 +00:00
" CREATE TABLE alters_from_different_replicas.concurrent_test \\ n( \\ n `CounterID` UInt32, \\ n `StartDate` Date, \\ n `UserID` UInt32, \\ n "
2022-03-22 16:39:58 +00:00
" `VisitID` UInt32, \\ n `NestedColumn.A` Array(UInt8), \\ n `NestedColumn.S` Array(String), \\ n `ToDrop` UInt32 \\ n) \\ n "
2022-04-13 14:51:59 +00:00
" ENGINE = ReplicatedMergeTree( \\ ' /clickhouse/tables/ {uuid} / {shard} \\ ' , \\ ' {replica} \\ ' ) \\ nORDER BY CounterID \\ nSETTINGS index_granularity = 8192 "
2022-03-22 16:39:58 +00:00
)
2020-10-26 15:12:16 +00:00
2023-03-02 17:35:56 +00:00
assert_create_query (
[ main_node , competing_node ] ,
" alters_from_different_replicas.concurrent_test " ,
expected ,
)
assert_create_query (
all_nodes , " alters_from_different_replicas.concurrent_test " , expected
)
2021-02-02 19:39:04 +00:00
for node in all_nodes :
2023-03-02 17:23:31 +00:00
node . query ( " SYSTEM SYNC REPLICA alters_from_different_replicas.concurrent_test " )
2021-02-02 19:39:04 +00:00
2022-03-22 16:39:58 +00:00
expected = (
" 0 \t 2021-02-02 \t 4249604106 \n "
" 1 \t 2021-02-03 \t 1343103100 \n "
" 4 \t 2021-02-06 \t 3902320246 \n "
" 7 \t 2021-02-09 \t 3844986530 \n "
" 9 \t 2021-02-11 \t 1241149650 \n "
)
2020-10-26 15:12:16 +00:00
2022-03-22 16:39:58 +00:00
assert_eq_with_retry (
dummy_node ,
2023-03-02 17:23:31 +00:00
" SELECT CounterID, StartDate, UserID FROM alters_from_different_replicas.dist ORDER BY CounterID " ,
2022-03-22 16:39:58 +00:00
expected ,
)
2023-03-02 17:23:31 +00:00
main_node . query ( " DROP DATABASE alters_from_different_replicas SYNC " )
dummy_node . query ( " DROP DATABASE alters_from_different_replicas SYNC " )
competing_node . query ( " DROP DATABASE alters_from_different_replicas SYNC " )
snapshotting_node . query ( " DROP DATABASE alters_from_different_replicas SYNC " )
snapshot_recovering_node . query ( " DROP DATABASE alters_from_different_replicas SYNC " )
2020-06-20 15:38:20 +00:00
2022-03-22 16:39:58 +00:00
2022-07-29 16:33:16 +00:00
def create_some_tables ( db ) :
2023-08-09 11:35:50 +00:00
settings = {
" distributed_ddl_task_timeout " : 0 ,
" allow_experimental_object_type " : 1 ,
" allow_suspicious_codecs " : 1 ,
}
2022-12-01 12:50:56 +00:00
main_node . query ( f " CREATE TABLE { db } .t1 (n int) ENGINE=Memory " , settings = settings )
2022-03-22 16:39:58 +00:00
dummy_node . query (
2022-12-01 12:50:56 +00:00
f " CREATE TABLE { db } .t2 (s String) ENGINE=Memory " , settings = settings
2022-03-22 16:39:58 +00:00
)
main_node . query (
2022-12-01 12:50:56 +00:00
f " CREATE TABLE { db } .mt1 (n int) ENGINE=MergeTree order by n " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
dummy_node . query (
2022-12-01 12:50:56 +00:00
f " CREATE TABLE { db } .mt2 (n int) ENGINE=MergeTree order by n " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
main_node . query (
2022-12-01 12:50:56 +00:00
f " CREATE TABLE { db } .rmt1 (n int) ENGINE=ReplicatedMergeTree order by n " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
dummy_node . query (
2023-08-09 11:35:50 +00:00
f " CREATE TABLE { db } .rmt2 (n int CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12))) ENGINE=ReplicatedMergeTree order by n " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
main_node . query (
2023-08-09 11:35:50 +00:00
f " CREATE TABLE { db } .rmt3 (n int, json Object( ' json ' ) materialized ' ' ) ENGINE=ReplicatedMergeTree order by n " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
dummy_node . query (
2022-12-01 12:50:56 +00:00
f " CREATE TABLE { db } .rmt5 (n int) ENGINE=ReplicatedMergeTree order by n " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
main_node . query (
2022-12-01 12:50:56 +00:00
f " CREATE MATERIALIZED VIEW { db } .mv1 (n int) ENGINE=ReplicatedMergeTree order by n AS SELECT n FROM recover.rmt1 " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
dummy_node . query (
2022-12-01 12:50:56 +00:00
f " CREATE MATERIALIZED VIEW { db } .mv2 (n int) ENGINE=ReplicatedMergeTree order by n AS SELECT n FROM recover.rmt2 " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
main_node . query (
2022-12-01 12:50:56 +00:00
f " CREATE DICTIONARY { db } .d1 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n "
2022-03-22 16:39:58 +00:00
" SOURCE(CLICKHOUSE(HOST ' localhost ' PORT 9000 USER ' default ' TABLE ' rmt1 ' PASSWORD ' ' DB ' recover ' )) "
2023-04-12 22:32:19 +00:00
" LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT()) "
2022-03-22 16:39:58 +00:00
)
dummy_node . query (
2022-12-01 12:50:56 +00:00
f " CREATE DICTIONARY { db } .d2 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n "
2022-03-22 16:39:58 +00:00
" SOURCE(CLICKHOUSE(HOST ' localhost ' PORT 9000 USER ' default ' TABLE ' rmt2 ' PASSWORD ' ' DB ' recover ' )) "
2023-04-12 22:32:19 +00:00
" LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT()) "
2022-03-22 16:39:58 +00:00
)
2022-07-29 16:33:16 +00:00
2022-12-01 12:50:56 +00:00
# These tables are used to check that DatabaseReplicated correctly renames all the tables in case when it restores from the lost state
def create_table_for_exchanges ( db ) :
settings = { " distributed_ddl_task_timeout " : 0 }
for table in [ " a1 " , " a2 " , " a3 " , " a4 " , " a5 " , " a6 " ] :
main_node . query (
f " CREATE TABLE { db } . { table } (s String) ENGINE=ReplicatedMergeTree order by s " ,
settings = settings ,
)
2022-07-29 16:33:16 +00:00
def test_recover_staled_replica ( started_cluster ) :
main_node . query (
" CREATE DATABASE recover ENGINE = Replicated( ' /clickhouse/databases/recover ' , ' shard1 ' , ' replica1 ' ); "
)
started_cluster . get_kazoo_client ( " zoo1 " ) . set (
" /clickhouse/databases/recover/logs_to_keep " , b " 10 "
)
dummy_node . query (
" CREATE DATABASE recover ENGINE = Replicated( ' /clickhouse/databases/recover ' , ' shard1 ' , ' replica2 ' ); "
2022-03-22 16:39:58 +00:00
)
2022-07-29 16:33:16 +00:00
settings = { " distributed_ddl_task_timeout " : 0 }
create_some_tables ( " recover " )
2022-12-01 12:50:56 +00:00
create_table_for_exchanges ( " recover " )
2022-07-29 16:33:16 +00:00
2022-03-22 16:39:58 +00:00
for table in [ " t1 " , " t2 " , " mt1 " , " mt2 " , " rmt1 " , " rmt2 " , " rmt3 " , " rmt5 " ] :
2022-12-01 12:50:56 +00:00
main_node . query ( f " INSERT INTO recover. { table } VALUES (42) " )
2022-03-22 16:39:58 +00:00
for table in [ " t1 " , " t2 " , " mt1 " , " mt2 " ] :
2022-12-01 12:50:56 +00:00
dummy_node . query ( f " INSERT INTO recover. { table } VALUES (42) " )
for i , table in enumerate ( [ " a1 " , " a2 " , " a3 " , " a4 " , " a5 " , " a6 " ] ) :
main_node . query ( f " INSERT INTO recover. { table } VALUES ( ' { str ( i + 1 ) * 10 } ' ) " )
2022-03-22 16:39:58 +00:00
for table in [ " rmt1 " , " rmt2 " , " rmt3 " , " rmt5 " ] :
2022-12-01 12:50:56 +00:00
main_node . query ( f " SYSTEM SYNC REPLICA recover. { table } " )
for table in [ " a1 " , " a2 " , " a3 " , " a4 " , " a5 " , " a6 " ] :
main_node . query ( f " SYSTEM SYNC REPLICA recover. { table } " )
2021-02-11 22:23:40 +00:00
with PartitionManager ( ) as pm :
pm . drop_instance_zk_connections ( dummy_node )
dummy_node . query_and_get_error ( " RENAME TABLE recover.t1 TO recover.m1 " )
2021-04-02 10:34:49 +00:00
2022-03-22 16:39:58 +00:00
main_node . query_with_retry (
" RENAME TABLE recover.t1 TO recover.m1 " , settings = settings
)
main_node . query_with_retry (
" ALTER TABLE recover.mt1 ADD COLUMN m int " , settings = settings
)
main_node . query_with_retry (
" ALTER TABLE recover.rmt1 ADD COLUMN m int " , settings = settings
)
main_node . query_with_retry (
" RENAME TABLE recover.rmt3 TO recover.rmt4 " , settings = settings
)
2021-03-16 10:00:49 +00:00
main_node . query_with_retry ( " DROP TABLE recover.rmt5 " , settings = settings )
main_node . query_with_retry ( " DROP DICTIONARY recover.d2 " , settings = settings )
2022-03-22 16:39:58 +00:00
main_node . query_with_retry (
" CREATE DICTIONARY recover.d2 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n "
" SOURCE(CLICKHOUSE(HOST ' localhost ' PORT 9000 USER ' default ' TABLE ' rmt1 ' PASSWORD ' ' DB ' recover ' )) "
2023-04-12 22:32:19 +00:00
" LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT()); " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
inner_table = (
" .inner_id. "
+ dummy_node . query_with_retry (
" SELECT uuid FROM system.tables WHERE database= ' recover ' AND name= ' mv1 ' "
) . strip ( )
)
main_node . query_with_retry (
2022-12-01 12:50:56 +00:00
f " ALTER TABLE recover.` { inner_table } ` MODIFY COLUMN n int DEFAULT 42 " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
main_node . query_with_retry (
2023-11-30 18:42:12 +00:00
" ALTER TABLE recover.mv1 MODIFY QUERY SELECT m as n FROM recover.rmt1 " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
main_node . query_with_retry (
2022-12-01 12:50:56 +00:00
" RENAME TABLE recover.mv2 TO recover.mv3 " ,
2022-03-22 16:39:58 +00:00
settings = settings ,
)
main_node . query_with_retry (
" CREATE TABLE recover.tmp AS recover.m1 " , settings = settings
)
2021-03-16 10:00:49 +00:00
main_node . query_with_retry ( " DROP TABLE recover.tmp " , settings = settings )
2022-03-22 16:39:58 +00:00
main_node . query_with_retry (
" CREATE TABLE recover.tmp AS recover.m1 " , settings = settings
)
2021-03-16 10:00:49 +00:00
main_node . query_with_retry ( " DROP TABLE recover.tmp " , settings = settings )
2022-03-22 16:39:58 +00:00
main_node . query_with_retry (
" CREATE TABLE recover.tmp AS recover.m1 " , settings = settings
)
2022-12-01 12:50:56 +00:00
main_node . query ( " EXCHANGE TABLES recover.a1 AND recover.a2 " , settings = settings )
main_node . query ( " EXCHANGE TABLES recover.a3 AND recover.a4 " , settings = settings )
main_node . query ( " EXCHANGE TABLES recover.a5 AND recover.a4 " , settings = settings )
main_node . query ( " EXCHANGE TABLES recover.a6 AND recover.a3 " , settings = settings )
main_node . query ( " RENAME TABLE recover.a6 TO recover.a7 " , settings = settings )
main_node . query ( " RENAME TABLE recover.a1 TO recover.a8 " , settings = settings )
2022-03-22 16:39:58 +00:00
assert (
main_node . query (
" SELECT name FROM system.tables WHERE database= ' recover ' AND name NOT LIKE ' .inner_id. % ' ORDER BY name "
)
2022-12-01 12:50:56 +00:00
== " a2 \n a3 \n a4 \n a5 \n a7 \n a8 \n d1 \n d2 \n m1 \n mt1 \n mt2 \n mv1 \n mv3 \n rmt1 \n rmt2 \n rmt4 \n t2 \n tmp \n "
2022-03-22 16:39:58 +00:00
)
query = (
" SELECT name, uuid, create_table_query FROM system.tables WHERE database= ' recover ' AND name NOT LIKE ' .inner_id. % ' "
" ORDER BY name SETTINGS show_table_uuid_in_table_create_query_if_not_nil=1 "
)
2021-02-11 22:23:40 +00:00
expected = main_node . query ( query )
assert_eq_with_retry ( dummy_node , query , expected )
2022-03-22 16:39:58 +00:00
assert (
main_node . query (
" SELECT count() FROM system.tables WHERE database= ' recover ' AND name LIKE ' .inner_id. % ' "
)
== " 2 \n "
)
assert (
dummy_node . query (
" SELECT count() FROM system.tables WHERE database= ' recover ' AND name LIKE ' .inner_id. % ' "
)
== " 2 \n "
)
2021-02-11 22:23:40 +00:00
2022-12-01 12:50:56 +00:00
# Check that Database Replicated renamed all the tables correctly
for i , table in enumerate ( [ " a2 " , " a8 " , " a5 " , " a7 " , " a4 " , " a3 " ] ) :
assert (
dummy_node . query ( f " SELECT * FROM recover. { table } " ) == f " { str ( i + 1 ) * 10 } \n "
)
2022-03-22 16:39:58 +00:00
for table in [
" m1 " ,
" t2 " ,
" mt1 " ,
" mt2 " ,
" rmt1 " ,
" rmt2 " ,
" rmt4 " ,
" d1 " ,
" d2 " ,
" mv1 " ,
" mv3 " ,
] :
2022-12-01 12:50:56 +00:00
assert main_node . query ( f " SELECT (*,).1 FROM recover. { table } " ) == " 42 \n "
2022-03-22 16:39:58 +00:00
for table in [ " t2 " , " rmt1 " , " rmt2 " , " rmt4 " , " d1 " , " d2 " , " mt2 " , " mv1 " , " mv3 " ] :
2023-08-09 11:35:50 +00:00
assert (
dummy_node . query ( f " SELECT ' { table } ' , (*,).1 FROM recover. { table } " )
== f " { table } \t 42 \n "
)
2022-03-22 16:39:58 +00:00
for table in [ " m1 " , " mt1 " ] :
2022-12-01 12:50:56 +00:00
assert dummy_node . query ( f " SELECT count() FROM recover. { table } " ) == " 0 \n "
2021-11-15 14:07:08 +00:00
global test_recover_staled_replica_run
2022-03-22 16:39:58 +00:00
assert (
dummy_node . query (
" SELECT count() FROM system.tables WHERE database= ' recover_broken_tables ' "
)
2022-04-13 12:38:20 +00:00
== f " { test_recover_staled_replica_run } \n "
)
assert (
2022-04-14 13:31:49 +00:00
dummy_node . query (
" SELECT count() FROM system.tables WHERE database= ' recover_broken_replicated_tables ' "
)
== f " { test_recover_staled_replica_run } \n "
2022-03-22 16:39:58 +00:00
)
2021-11-15 14:07:08 +00:00
test_recover_staled_replica_run + = 1
2022-12-01 12:50:56 +00:00
print ( dummy_node . query ( " SHOW DATABASES " ) )
print ( dummy_node . query ( " SHOW TABLES FROM recover_broken_tables " ) )
print ( dummy_node . query ( " SHOW TABLES FROM recover_broken_replicated_tables " ) )
2022-03-22 16:39:58 +00:00
table = dummy_node . query (
2022-12-01 12:50:56 +00:00
" SHOW TABLES FROM recover_broken_tables LIKE ' mt1_41_ % ' LIMIT 1 "
2022-03-22 16:39:58 +00:00
) . strip ( )
assert (
2022-12-01 12:50:56 +00:00
dummy_node . query ( f " SELECT (*,).1 FROM recover_broken_tables. { table } " ) == " 42 \n "
2022-03-22 16:39:58 +00:00
)
table = dummy_node . query (
2022-12-01 12:50:56 +00:00
" SHOW TABLES FROM recover_broken_replicated_tables LIKE ' rmt5_41_ % ' LIMIT 1 "
2022-03-22 16:39:58 +00:00
) . strip ( )
assert (
2022-12-01 12:50:56 +00:00
dummy_node . query ( f " SELECT (*,).1 FROM recover_broken_replicated_tables. { table } " )
2022-03-22 16:39:58 +00:00
== " 42 \n "
)
2021-02-11 22:23:40 +00:00
2021-03-08 17:26:38 +00:00
expected = " Cleaned 6 outdated objects: dropped 1 dictionaries and 3 tables, moved 2 tables "
2021-02-11 22:23:40 +00:00
assert_logs_contain ( dummy_node , expected )
dummy_node . query ( " DROP TABLE recover.tmp " )
2022-03-22 16:39:58 +00:00
assert_eq_with_retry (
main_node ,
" SELECT count() FROM system.tables WHERE database= ' recover ' AND name= ' tmp ' " ,
" 0 \n " ,
)
2021-11-11 08:12:54 +00:00
main_node . query ( " DROP DATABASE recover SYNC " )
dummy_node . query ( " DROP DATABASE recover SYNC " )
2021-02-11 22:23:40 +00:00
2022-03-22 16:39:58 +00:00
2023-01-04 13:49:39 +00:00
def test_recover_staled_replica_many_mvs ( started_cluster ) :
main_node . query ( " DROP DATABASE IF EXISTS recover_mvs " )
dummy_node . query ( " DROP DATABASE IF EXISTS recover_mvs " )
main_node . query_with_retry (
" CREATE DATABASE IF NOT EXISTS recover_mvs ENGINE = Replicated( ' /clickhouse/databases/recover_mvs ' , ' shard1 ' , ' replica1 ' ); "
)
started_cluster . get_kazoo_client ( " zoo1 " ) . set (
" /clickhouse/databases/recover_mvs/logs_to_keep " , b " 10 "
)
dummy_node . query_with_retry (
" CREATE DATABASE IF NOT EXISTS recover_mvs ENGINE = Replicated( ' /clickhouse/databases/recover_mvs ' , ' shard1 ' , ' replica2 ' ); "
)
settings = { " distributed_ddl_task_timeout " : 0 }
with PartitionManager ( ) as pm :
pm . drop_instance_zk_connections ( dummy_node )
dummy_node . query_and_get_error ( " RENAME TABLE recover_mvs.t1 TO recover_mvs.m1 " )
for identifier in [ " 1 " , " 2 " , " 3 " , " 4 " ] :
main_node . query (
f " CREATE TABLE recover_mvs.rmt { identifier } (n int) ENGINE=ReplicatedMergeTree ORDER BY n " ,
settings = settings ,
)
print ( " Created tables " )
for identifier in [ " 1 " , " 2 " , " 3 " , " 4 " ] :
main_node . query (
f " CREATE TABLE recover_mvs.mv_inner { identifier } (n int) ENGINE=ReplicatedMergeTree ORDER BY n " ,
settings = settings ,
)
for identifier in [ " 1 " , " 2 " , " 3 " , " 4 " ] :
main_node . query_with_retry (
f """ CREATE MATERIALIZED VIEW recover_mvs.mv { identifier }
TO recover_mvs . mv_inner { identifier }
AS SELECT * FROM recover_mvs . rmt { identifier } """ ,
settings = settings ,
)
print ( " Created MVs " )
for identifier in [ " 1 " , " 2 " , " 3 " , " 4 " ] :
main_node . query_with_retry (
f """ CREATE VIEW recover_mvs.view_from_mv { identifier }
AS SELECT * FROM recover_mvs . mv { identifier } """ ,
settings = settings ,
)
print ( " Created Views on top of MVs " )
for identifier in [ " 1 " , " 2 " , " 3 " , " 4 " ] :
main_node . query_with_retry (
f """ CREATE MATERIALIZED VIEW recover_mvs.cascade_mv { identifier }
ENGINE = MergeTree ( ) ORDER BY tuple ( )
POPULATE AS SELECT * FROM recover_mvs . mv_inner { identifier } ; """ ,
settings = settings ,
)
print ( " Created cascade MVs " )
for identifier in [ " 1 " , " 2 " , " 3 " , " 4 " ] :
main_node . query_with_retry (
f """ CREATE VIEW recover_mvs.view_from_cascade_mv { identifier }
AS SELECT * FROM recover_mvs . cascade_mv { identifier } """ ,
settings = settings ,
)
print ( " Created Views on top of cascade MVs " )
for identifier in [ " 1 " , " 2 " , " 3 " , " 4 " ] :
main_node . query_with_retry (
f """ CREATE MATERIALIZED VIEW recover_mvs.double_cascade_mv { identifier }
ENGINE = MergeTree ( ) ORDER BY tuple ( )
POPULATE AS SELECT * FROM recover_mvs . ` . inner_id . { get_table_uuid ( " recover_mvs " , f " cascade_mv { identifier } " ) } ` """ ,
settings = settings ,
)
print ( " Created double cascade MVs " )
for identifier in [ " 1 " , " 2 " , " 3 " , " 4 " ] :
main_node . query_with_retry (
f """ CREATE VIEW recover_mvs.view_from_double_cascade_mv { identifier }
AS SELECT * FROM recover_mvs . double_cascade_mv { identifier } """ ,
settings = settings ,
)
print ( " Created Views on top of double cascade MVs " )
# This weird table name is actually makes sence because it starts with letter `a` and may break some internal sorting
main_node . query_with_retry (
"""
CREATE VIEW recover_mvs . anime
AS
SELECT n
FROM
(
SELECT *
FROM
(
SELECT *
FROM
(
SELECT *
FROM recover_mvs . mv_inner1 AS q1
INNER JOIN recover_mvs . mv_inner2 AS q2 ON q1 . n = q2 . n
) AS new_table_1
INNER JOIN recover_mvs . mv_inner3 AS q3 ON new_table_1 . n = q3 . n
) AS new_table_2
INNER JOIN recover_mvs . mv_inner4 AS q4 ON new_table_2 . n = q4 . n
)
""" ,
settings = settings ,
)
print ( " Created final boss " )
for identifier in [ " 1 " , " 2 " , " 3 " , " 4 " ] :
main_node . query_with_retry (
f """ CREATE DICTIONARY recover_mvs.`11111d { identifier } ` (n UInt64)
PRIMARY KEY n
SOURCE ( CLICKHOUSE ( HOST ' localhost ' PORT tcpPort ( ) TABLE ' double_cascade_mv {identifier} ' DB ' recover_mvs ' ) )
LAYOUT ( FLAT ( ) ) LIFETIME ( 1 ) """ ,
settings = settings ,
)
print ( " Created dictionaries " )
for identifier in [ " 1 " , " 2 " , " 3 " , " 4 " ] :
main_node . query_with_retry (
f """ CREATE VIEW recover_mvs.`00000vd { identifier } `
AS SELECT * FROM recover_mvs . ` 11111 d { identifier } ` """ ,
settings = settings ,
)
print ( " Created Views on top of dictionaries " )
dummy_node . query ( " SYSTEM SYNC DATABASE REPLICA recover_mvs " )
query = " SELECT name FROM system.tables WHERE database= ' recover_mvs ' ORDER BY name "
assert main_node . query ( query ) == dummy_node . query ( query )
main_node . query ( " DROP DATABASE IF EXISTS recover_mvs " )
dummy_node . query ( " DROP DATABASE IF EXISTS recover_mvs " )
2021-02-15 00:04:46 +00:00
def test_startup_without_zk ( started_cluster ) :
with PartitionManager ( ) as pm :
pm . drop_instance_zk_connections ( main_node )
2022-03-22 16:39:58 +00:00
err = main_node . query_and_get_error (
" CREATE DATABASE startup ENGINE = Replicated( ' /clickhouse/databases/startup ' , ' shard1 ' , ' replica1 ' ); "
)
2024-01-10 11:43:30 +00:00
assert " ZooKeeper " in err or " Coordination::Exception " in err
2022-03-22 16:39:58 +00:00
main_node . query (
" CREATE DATABASE startup ENGINE = Replicated( ' /clickhouse/databases/startup ' , ' shard1 ' , ' replica1 ' ); "
)
2022-08-12 12:16:50 +00:00
main_node . query (
" CREATE TABLE startup.rmt (n int) ENGINE=ReplicatedMergeTree order by n "
)
2022-08-12 11:56:46 +00:00
2021-02-15 00:04:46 +00:00
main_node . query ( " INSERT INTO startup.rmt VALUES (42) " )
with PartitionManager ( ) as pm :
pm . drop_instance_zk_connections ( main_node )
2022-08-20 11:52:10 +00:00
main_node . restart_clickhouse ( stop_start_wait_sec = 60 )
2021-02-15 00:04:46 +00:00
assert main_node . query ( " SELECT (*,).1 FROM startup.rmt " ) == " 42 \n "
2022-08-12 11:56:46 +00:00
# we need to wait until the table is not readonly
2022-08-19 09:39:59 +00:00
main_node . query_with_retry ( " INSERT INTO startup.rmt VALUES(42) " )
2022-08-12 11:56:46 +00:00
2022-08-19 09:39:59 +00:00
main_node . query_with_retry ( " CREATE TABLE startup.m (n int) ENGINE=Memory " )
2021-02-15 00:04:46 +00:00
main_node . query ( " EXCHANGE TABLES startup.rmt AND startup.m " )
assert main_node . query ( " SELECT (*,).1 FROM startup.m " ) == " 42 \n "
2022-08-12 11:56:46 +00:00
2021-11-11 13:36:10 +00:00
main_node . query ( " DROP DATABASE startup SYNC " )
2021-08-16 18:30:53 +00:00
2022-03-22 16:39:58 +00:00
2021-08-16 18:30:53 +00:00
def test_server_uuid ( started_cluster ) :
uuid1 = main_node . query ( " select serverUUID() " )
uuid2 = dummy_node . query ( " select serverUUID() " )
assert uuid1 != uuid2
main_node . restart_clickhouse ( )
uuid1_after_restart = main_node . query ( " select serverUUID() " )
assert uuid1 == uuid1_after_restart
2022-05-01 13:40:18 +00:00
def test_sync_replica ( started_cluster ) :
main_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE test_sync_database ENGINE = Replicated( ' /test/sync_replica ' , ' shard1 ' , ' replica1 ' ); "
2022-05-01 13:40:18 +00:00
)
dummy_node . query (
2023-03-02 17:23:31 +00:00
" CREATE DATABASE test_sync_database ENGINE = Replicated( ' /test/sync_replica ' , ' shard1 ' , ' replica2 ' ); "
2022-05-01 13:40:18 +00:00
)
number_of_tables = 1000
settings = { " distributed_ddl_task_timeout " : 0 }
with PartitionManager ( ) as pm :
pm . drop_instance_zk_connections ( dummy_node )
for i in range ( number_of_tables ) :
main_node . query (
" CREATE TABLE test_sync_database.table_ {} (n int) ENGINE=MergeTree order by n " . format (
i
) ,
settings = settings ,
)
2022-05-06 16:37:20 +00:00
# wait for host to reconnect
dummy_node . query_with_retry ( " SELECT * FROM system.zookeeper WHERE path= ' / ' " )
2022-05-01 13:40:18 +00:00
dummy_node . query ( " SYSTEM SYNC DATABASE REPLICA test_sync_database " )
2024-03-08 16:43:10 +00:00
assert " 2 \n " == main_node . query (
" SELECT sum(is_active) FROM system.clusters WHERE cluster= ' test_sync_database ' "
)
2024-03-08 16:12:31 +00:00
2022-05-01 13:40:18 +00:00
assert dummy_node . query (
" SELECT count() FROM system.tables where database= ' test_sync_database ' "
) . strip ( ) == str ( number_of_tables )
assert main_node . query (
" SELECT count() FROM system.tables where database= ' test_sync_database ' "
) . strip ( ) == str ( number_of_tables )
2022-05-06 16:37:20 +00:00
engine_settings = { " default_table_engine " : " ReplicatedMergeTree " }
dummy_node . query (
" CREATE TABLE test_sync_database.table (n int, primary key n) partition by n " ,
settings = engine_settings ,
)
main_node . query ( " INSERT INTO test_sync_database.table SELECT * FROM numbers(10) " )
dummy_node . query ( " TRUNCATE TABLE test_sync_database.table " , settings = settings )
dummy_node . query (
" ALTER TABLE test_sync_database.table ADD COLUMN m int " , settings = settings
)
main_node . query (
" SYSTEM SYNC DATABASE REPLICA ON CLUSTER test_sync_database test_sync_database "
)
lp1 = main_node . query (
2023-03-02 17:23:31 +00:00
" select value from system.zookeeper where path= ' /test/sync_replica/replicas/shard1|replica1 ' and name= ' log_ptr ' "
2022-05-06 16:37:20 +00:00
)
lp2 = main_node . query (
2023-03-02 17:23:31 +00:00
" select value from system.zookeeper where path= ' /test/sync_replica/replicas/shard1|replica2 ' and name= ' log_ptr ' "
2022-05-06 16:37:20 +00:00
)
max_lp = main_node . query (
2023-03-02 17:23:31 +00:00
" select value from system.zookeeper where path= ' /test/sync_replica/ ' and name= ' max_log_ptr ' "
2022-05-06 16:37:20 +00:00
)
assert lp1 == max_lp
assert lp2 == max_lp
2022-07-27 17:15:00 +00:00
2023-03-02 17:23:31 +00:00
main_node . query ( " DROP DATABASE test_sync_database SYNC " )
dummy_node . query ( " DROP DATABASE test_sync_database SYNC " )
2022-07-27 17:15:00 +00:00
def test_force_synchronous_settings ( started_cluster ) :
main_node . query (
" CREATE DATABASE test_force_synchronous_settings ENGINE = Replicated( ' /clickhouse/databases/test2 ' , ' shard1 ' , ' replica1 ' ); "
)
dummy_node . query (
" CREATE DATABASE test_force_synchronous_settings ENGINE = Replicated( ' /clickhouse/databases/test2 ' , ' shard1 ' , ' replica2 ' ); "
)
snapshotting_node . query (
" CREATE DATABASE test_force_synchronous_settings ENGINE = Replicated( ' /clickhouse/databases/test2 ' , ' shard2 ' , ' replica1 ' ); "
)
main_node . query (
" CREATE TABLE test_force_synchronous_settings.t (n int) ENGINE=ReplicatedMergeTree( ' /test/same/path/ {shard} ' , ' {replica} ' ) ORDER BY tuple() "
)
main_node . query (
" INSERT INTO test_force_synchronous_settings.t SELECT * FROM numbers(10) "
)
snapshotting_node . query (
" INSERT INTO test_force_synchronous_settings.t SELECT * FROM numbers(10) "
)
snapshotting_node . query (
" SYSTEM SYNC DATABASE REPLICA test_force_synchronous_settings "
)
dummy_node . query ( " SYSTEM SYNC DATABASE REPLICA test_force_synchronous_settings " )
snapshotting_node . query ( " SYSTEM STOP MERGES test_force_synchronous_settings.t " )
def start_merges_func ( ) :
time . sleep ( 5 )
snapshotting_node . query ( " SYSTEM START MERGES test_force_synchronous_settings.t " )
start_merges_thread = threading . Thread ( target = start_merges_func )
start_merges_thread . start ( )
settings = {
" mutations_sync " : 2 ,
" database_replicated_enforce_synchronous_settings " : 1 ,
}
main_node . query (
" ALTER TABLE test_force_synchronous_settings.t UPDATE n = n * 10 WHERE 1 " ,
settings = settings ,
)
assert " 10 \t 450 \n " == snapshotting_node . query (
" SELECT count(), sum(n) FROM test_force_synchronous_settings.t "
)
start_merges_thread . join ( )
def select_func ( ) :
dummy_node . query (
2023-08-08 20:15:04 +00:00
" SELECT sleepEachRow(1) FROM test_force_synchronous_settings.t SETTINGS function_sleep_max_microseconds_per_block = 0 "
2022-07-27 17:15:00 +00:00
)
select_thread = threading . Thread ( target = select_func )
select_thread . start ( )
settings = { " database_replicated_enforce_synchronous_settings " : 1 }
snapshotting_node . query (
" DROP TABLE test_force_synchronous_settings.t SYNC " , settings = settings
)
main_node . query (
" CREATE TABLE test_force_synchronous_settings.t (n String) ENGINE=ReplicatedMergeTree( ' /test/same/path/ {shard} ' , ' {replica} ' ) ORDER BY tuple() "
)
select_thread . join ( )
2022-07-29 16:33:16 +00:00
def test_recover_digest_mismatch ( started_cluster ) :
2023-01-04 13:49:39 +00:00
main_node . query ( " DROP DATABASE IF EXISTS recover_digest_mismatch " )
dummy_node . query ( " DROP DATABASE IF EXISTS recover_digest_mismatch " )
2022-07-29 16:33:16 +00:00
main_node . query (
" CREATE DATABASE recover_digest_mismatch ENGINE = Replicated( ' /clickhouse/databases/recover_digest_mismatch ' , ' shard1 ' , ' replica1 ' ); "
)
dummy_node . query (
" CREATE DATABASE recover_digest_mismatch ENGINE = Replicated( ' /clickhouse/databases/recover_digest_mismatch ' , ' shard1 ' , ' replica2 ' ); "
)
create_some_tables ( " recover_digest_mismatch " )
2023-01-04 13:49:39 +00:00
main_node . query ( " SYSTEM SYNC DATABASE REPLICA recover_digest_mismatch " )
dummy_node . query ( " SYSTEM SYNC DATABASE REPLICA recover_digest_mismatch " )
2022-07-29 16:33:16 +00:00
ways_to_corrupt_metadata = [
2023-01-04 13:49:39 +00:00
" mv /var/lib/clickhouse/metadata/recover_digest_mismatch/t1.sql /var/lib/clickhouse/metadata/recover_digest_mismatch/m1.sql " ,
" sed --follow-symlinks -i ' s/Int32/String/ ' /var/lib/clickhouse/metadata/recover_digest_mismatch/mv1.sql " ,
" rm -f /var/lib/clickhouse/metadata/recover_digest_mismatch/d1.sql " ,
2023-08-14 17:06:32 +00:00
" rm -rf /var/lib/clickhouse/metadata/recover_digest_mismatch/ " , # Will trigger "Directory already exists"
2023-01-04 13:49:39 +00:00
" rm -rf /var/lib/clickhouse/store " ,
2022-07-29 16:33:16 +00:00
]
for command in ways_to_corrupt_metadata :
2023-01-04 13:49:39 +00:00
print ( f " Corrupting data using ` { command } ` " )
2022-07-29 16:33:16 +00:00
need_remove_is_active_node = " rm -rf " in command
dummy_node . stop_clickhouse ( kill = not need_remove_is_active_node )
dummy_node . exec_in_container ( [ " bash " , " -c " , command ] )
query = (
" SELECT name, uuid, create_table_query FROM system.tables WHERE database= ' recover_digest_mismatch ' AND name NOT LIKE ' .inner_id. % ' "
" ORDER BY name SETTINGS show_table_uuid_in_table_create_query_if_not_nil=1 "
)
expected = main_node . query ( query )
2023-01-04 13:49:39 +00:00
if need_remove_is_active_node :
2022-07-29 16:33:16 +00:00
# NOTE Otherwise it fails to recreate ReplicatedMergeTree table due to "Replica already exists"
main_node . query (
" SYSTEM DROP REPLICA ' 2 ' FROM DATABASE recover_digest_mismatch "
)
2023-01-04 13:49:39 +00:00
# There is a race condition between deleting active node and creating it on server startup
# So we start a server only after we deleted all table replicas from the Keeper
dummy_node . start_clickhouse ( )
2022-07-29 16:33:16 +00:00
assert_eq_with_retry ( dummy_node , query , expected )
2023-01-04 13:49:39 +00:00
main_node . query ( " DROP DATABASE IF EXISTS recover_digest_mismatch " )
dummy_node . query ( " DROP DATABASE IF EXISTS recover_digest_mismatch " )
print ( " Everything Okay " )
2023-06-22 17:06:28 +00:00
2023-06-22 17:26:42 +00:00
2023-06-22 17:06:28 +00:00
def test_replicated_table_structure_alter ( started_cluster ) :
main_node . query ( " DROP DATABASE IF EXISTS table_structure " )
dummy_node . query ( " DROP DATABASE IF EXISTS table_structure " )
main_node . query (
" CREATE DATABASE table_structure ENGINE = Replicated( ' /clickhouse/databases/table_structure ' , ' shard1 ' , ' replica1 ' ); "
)
dummy_node . query (
" CREATE DATABASE table_structure ENGINE = Replicated( ' /clickhouse/databases/table_structure ' , ' shard1 ' , ' replica2 ' ); "
)
competing_node . query (
" CREATE DATABASE table_structure ENGINE = Replicated( ' /clickhouse/databases/table_structure ' , ' shard1 ' , ' replica3 ' ); "
)
competing_node . query ( " CREATE TABLE table_structure.mem (n int) ENGINE=Memory " )
dummy_node . query ( " DETACH DATABASE table_structure " )
settings = { " distributed_ddl_task_timeout " : 0 }
2023-06-22 17:26:42 +00:00
main_node . query (
" CREATE TABLE table_structure.rmt (n int, v UInt64) ENGINE=ReplicatedReplacingMergeTree(v) ORDER BY n " ,
settings = settings ,
)
2023-06-22 17:06:28 +00:00
competing_node . query ( " SYSTEM SYNC DATABASE REPLICA table_structure " )
competing_node . query ( " DETACH DATABASE table_structure " )
2023-06-22 17:26:42 +00:00
main_node . query (
" ALTER TABLE table_structure.rmt ADD COLUMN m int " , settings = settings
)
main_node . query (
" ALTER TABLE table_structure.rmt COMMENT COLUMN v ' version ' " , settings = settings
)
2023-06-22 17:06:28 +00:00
main_node . query ( " INSERT INTO table_structure.rmt VALUES (1, 2, 3) " )
command = " rm -f /var/lib/clickhouse/metadata/table_structure/mem.sql "
competing_node . exec_in_container ( [ " bash " , " -c " , command ] )
competing_node . restart_clickhouse ( kill = True )
dummy_node . query ( " ATTACH DATABASE table_structure " )
dummy_node . query ( " SYSTEM SYNC DATABASE REPLICA table_structure " )
dummy_node . query ( " SYSTEM SYNC REPLICA table_structure.rmt " )
assert " 1 \t 2 \t 3 \n " == dummy_node . query ( " SELECT * FROM table_structure.rmt " )
competing_node . query ( " SYSTEM SYNC DATABASE REPLICA table_structure " )
competing_node . query ( " SYSTEM SYNC REPLICA table_structure.rmt " )
2023-06-22 17:26:42 +00:00
# time.sleep(600)
2023-06-22 17:06:28 +00:00
assert " mem " in competing_node . query ( " SHOW TABLES FROM table_structure " )
assert " 1 \t 2 \t 3 \n " == competing_node . query ( " SELECT * FROM table_structure.rmt " )
main_node . query ( " ALTER TABLE table_structure.rmt ADD COLUMN k int " )
main_node . query ( " INSERT INTO table_structure.rmt VALUES (1, 2, 3, 4) " )
dummy_node . query ( " SYSTEM SYNC DATABASE REPLICA table_structure " )
dummy_node . query ( " SYSTEM SYNC REPLICA table_structure.rmt " )
2023-06-22 17:26:42 +00:00
assert " 1 \t 2 \t 3 \t 0 \n 1 \t 2 \t 3 \t 4 \n " == dummy_node . query (
" SELECT * FROM table_structure.rmt ORDER BY k "
)
2023-11-27 12:25:41 +00:00
def test_modify_comment ( started_cluster ) :
main_node . query (
" CREATE DATABASE modify_comment_db ENGINE = Replicated( ' /test/modify_comment ' , ' shard1 ' , ' replica ' || ' 1 ' ); "
)
dummy_node . query (
" CREATE DATABASE modify_comment_db ENGINE = Replicated( ' /test/modify_comment ' , ' shard1 ' , ' replica ' || ' 2 ' ); "
)
main_node . query (
" CREATE TABLE modify_comment_db.modify_comment_table (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree ORDER BY k PARTITION BY toYYYYMM(d); "
)
def restart_verify_not_readonly ( ) :
main_node . restart_clickhouse ( )
assert (
main_node . query (
" SELECT is_readonly FROM system.replicas WHERE table = ' modify_comment_table ' "
)
== " 0 \n "
)
dummy_node . restart_clickhouse ( )
assert (
dummy_node . query (
" SELECT is_readonly FROM system.replicas WHERE table = ' modify_comment_table ' "
)
== " 0 \n "
)
main_node . query (
" ALTER TABLE modify_comment_db.modify_comment_table COMMENT COLUMN d ' Some comment ' "
)
restart_verify_not_readonly ( )
main_node . query (
" ALTER TABLE modify_comment_db.modify_comment_table MODIFY COMMENT ' Some error comment ' "
)
restart_verify_not_readonly ( )
main_node . query ( " DROP DATABASE modify_comment_db SYNC " )
dummy_node . query ( " DROP DATABASE modify_comment_db SYNC " )
2024-01-24 12:01:32 +00:00
def test_table_metadata_corruption ( started_cluster ) :
main_node . query ( " DROP DATABASE IF EXISTS table_metadata_corruption " )
dummy_node . query ( " DROP DATABASE IF EXISTS table_metadata_corruption " )
main_node . query (
" CREATE DATABASE table_metadata_corruption ENGINE = Replicated( ' /clickhouse/databases/table_metadata_corruption ' , ' shard1 ' , ' replica1 ' ); "
)
dummy_node . query (
" CREATE DATABASE table_metadata_corruption ENGINE = Replicated( ' /clickhouse/databases/table_metadata_corruption ' , ' shard1 ' , ' replica2 ' ); "
)
create_some_tables ( " table_metadata_corruption " )
main_node . query ( " SYSTEM SYNC DATABASE REPLICA table_metadata_corruption " )
dummy_node . query ( " SYSTEM SYNC DATABASE REPLICA table_metadata_corruption " )
# Server should handle this by throwing an exception during table loading, which should lead to server shutdown
corrupt = " sed --follow-symlinks -i ' s/ReplicatedMergeTree/CorruptedMergeTree/ ' /var/lib/clickhouse/metadata/table_metadata_corruption/rmt1.sql "
print ( f " Corrupting metadata using ` { corrupt } ` " )
dummy_node . stop_clickhouse ( kill = True )
dummy_node . exec_in_container ( [ " bash " , " -c " , corrupt ] )
query = (
" SELECT name, uuid, create_table_query FROM system.tables WHERE database= ' table_metadata_corruption ' AND name NOT LIKE ' .inner_id. % ' "
" ORDER BY name SETTINGS show_table_uuid_in_table_create_query_if_not_nil=1 "
)
expected = main_node . query ( query )
# We expect clickhouse server to shutdown without LOGICAL_ERRORs or deadlocks
dummy_node . start_clickhouse ( expected_to_fail = True )
assert not dummy_node . contains_in_log ( " LOGICAL_ERROR " )
fix_corrupt = " sed --follow-symlinks -i ' s/CorruptedMergeTree/ReplicatedMergeTree/ ' /var/lib/clickhouse/metadata/table_metadata_corruption/rmt1.sql "
print ( f " Fix corrupted metadata using ` { fix_corrupt } ` " )
dummy_node . exec_in_container ( [ " bash " , " -c " , fix_corrupt ] )
dummy_node . start_clickhouse ( )
assert_eq_with_retry ( dummy_node , query , expected )
main_node . query ( " DROP DATABASE IF EXISTS table_metadata_corruption " )
dummy_node . query ( " DROP DATABASE IF EXISTS table_metadata_corruption " )
2024-05-08 22:00:51 +00:00
def test_auto_recovery ( started_cluster ) :
dummy_node . query ( " DROP DATABASE IF EXISTS auto_recovery " )
2024-05-13 13:46:25 +00:00
bad_settings_node . query ( " DROP DATABASE IF EXISTS auto_recovery " )
2024-05-08 22:00:51 +00:00
dummy_node . query (
" CREATE DATABASE auto_recovery ENGINE = Replicated( ' /clickhouse/databases/auto_recovery ' , ' shard1 ' , ' replica1 ' ); "
)
2024-05-13 13:46:25 +00:00
bad_settings_node . query (
2024-05-08 22:00:51 +00:00
" CREATE DATABASE auto_recovery ENGINE = Replicated( ' /clickhouse/databases/auto_recovery ' , ' shard1 ' , ' replica2 ' ) SETTINGS max_retries_before_automatic_recovery=3; "
)
dummy_node . query (
" CREATE TABLE auto_recovery.t1 (n int) ENGINE=ReplicatedMergeTree ORDER BY n "
)
dummy_node . query ( " INSERT INTO auto_recovery.t1 SELECT 42 " )
# dummy_node has <throw_on_unsupported_query_inside_transaction>0</throw_on_unsupported_query_inside_transaction> (default is 1),
# so it will consider that the setting is changed, and will write it to the DDL entry
2024-05-13 13:46:25 +00:00
# bad_settings_node has implicit_transaction=1, so it will fail and recover from snapshot
2024-05-08 22:00:51 +00:00
dummy_node . query (
" CREATE TABLE auto_recovery.t2 (n int) ENGINE=ReplicatedMergeTree ORDER BY tuple() " ,
settings = {
" throw_on_unsupported_query_inside_transaction " : 1 ,
" distributed_ddl_task_timeout " : 0 ,
} ,
)
dummy_node . query ( " INSERT INTO auto_recovery.t2 SELECT 137 " )
dummy_node . query (
" EXCHANGE TABLES auto_recovery.t1 AND auto_recovery.t2 " ,
settings = { " distributed_ddl_task_timeout " : 0 } ,
)
2024-05-13 13:46:25 +00:00
bad_settings_node . query (
2024-05-08 22:00:51 +00:00
" SYSTEM SYNC DATABASE REPLICA auto_recovery " , settings = { " receive_timeout " : 60 }
)
2024-05-13 13:46:25 +00:00
assert bad_settings_node . contains_in_log (
2024-05-08 22:00:51 +00:00
" Unexpected error (3 times in a row), will try to restart main thread "
)
2024-05-13 13:46:25 +00:00
assert bad_settings_node . contains_in_log ( " Cannot begin an implicit transaction " )
bad_settings_node . query ( " SYSTEM SYNC REPLICA auto_recovery.t1 " )
bad_settings_node . query ( " SYSTEM SYNC REPLICA auto_recovery.t2 " )
2024-05-08 22:00:51 +00:00
assert " 42 \n " == dummy_node . query ( " SELECT * FROM auto_recovery.t2 " )
assert " 137 \n " == dummy_node . query ( " SELECT * FROM auto_recovery.t1 " )
2024-05-13 13:46:25 +00:00
assert " 42 \n " == bad_settings_node . query ( " SELECT * FROM auto_recovery.t2 " )
assert " 137 \n " == bad_settings_node . query ( " SELECT * FROM auto_recovery.t1 " )
2024-05-28 16:42:10 +00:00
def test_all_groups_cluster ( started_cluster ) :
dummy_node . query ( " DROP DATABASE IF EXISTS db_cluster " )
bad_settings_node . query ( " DROP DATABASE IF EXISTS db_cluster " )
dummy_node . query (
" CREATE DATABASE db_cluster ENGINE = Replicated( ' /clickhouse/databases/all_groups_cluster ' , ' shard1 ' , ' replica1 ' ); "
)
bad_settings_node . query (
" CREATE DATABASE db_cluster ENGINE = Replicated( ' /clickhouse/databases/all_groups_cluster ' , ' shard1 ' , ' replica2 ' ); "
)
2024-06-04 18:05:58 +00:00
assert " dummy_node \n " == dummy_node . query (
2024-05-28 16:42:10 +00:00
" select host_name from system.clusters where name= ' db_cluster ' order by host_name "
)
assert " bad_settings_node \n " == bad_settings_node . query (
" select host_name from system.clusters where name= ' db_cluster ' order by host_name "
)
assert " bad_settings_node \n dummy_node \n " == bad_settings_node . query (
" select host_name from system.clusters where name= ' all_groups.db_cluster ' order by host_name "
)