mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-30 19:42:00 +00:00
e4c8c4cecf
* Add zookeeper name in endpoint id When we migrate a replicated table from one zookeeper cluster to another (the reason why we migration is that zookeeper's load is too high), we will create a new table with the same zpath, but it will fail and the old table will be in trouble. Here is some infomation: 1.old table: CREATE TABLE a1 (`id` UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/default/a1/{shard}', '{replica}') ORDER BY (id); 2.new table: CREATE TABLE a2 (`id` UInt64) ENGINE = ReplicatedMergeTree('aux1:/clickhouse/tables/default/a1/{shard}', '{replica}') ORDER BY (id); 3.error info: <Error> executeQuery: Code: 220. DB::Exception: Duplicate interserver IO endpoint: DataPartsExchange:/clickhouse/tables/default/a1/01/replicas/02. (DUPLICATE_INTERSERVER_IO_ENDPOINT) <Error> InterserverIOHTTPHandler: Code: 221. DB::Exception: No interserver IO endpoint named DataPartsExchange:/clickhouse/tables/default/a1/01/replicas/02. (NO_SUCH_INTERSERVER_IO_ENDPOINT) * Revert "Add zookeeper name in endpoint id" This reverts commit 9deb75b249619b7abdd38e3949ca8b3a76c9df8e. * Add zookeeper name in endpoint id When we migrate a replicated table from one zookeeper cluster to another (the reason why we migration is that zookeeper's load is too high), we will create a new table with the same zpath, but it will fail and the old table will be in trouble. * Fix incompatible with a new setting * add a test, fix other issues * Update 02442_auxiliary_zookeeper_endpoint_id.sql * Update 02735_system_zookeeper_connection.reference * Update 02735_system_zookeeper_connection.sql * Update run.sh * Remove the 'no-fasttest' tag * Update 02442_auxiliary_zookeeper_endpoint_id.sql --------- Co-authored-by: Alexander Tokmakov <tavplubix@clickhouse.com> Co-authored-by: Alexander Tokmakov <tavplubix@gmail.com>
101 lines
3.3 KiB
XML
101 lines
3.3 KiB
XML
<clickhouse>
|
|
<zookeeper>
|
|
<node index="1">
|
|
<host>localhost</host>
|
|
<port>9181</port>
|
|
</node>
|
|
<node index="2">
|
|
<host>localhost</host>
|
|
<port>19181</port>
|
|
</node>
|
|
<node index="3">
|
|
<host>localhost</host>
|
|
<port>29181</port>
|
|
</node>
|
|
</zookeeper>
|
|
|
|
<auxiliary_zookeepers>
|
|
<zookeeper2>
|
|
<node index="1">
|
|
<host>localhost</host>
|
|
<port>9181</port>
|
|
</node>
|
|
<node index="2">
|
|
<host>localhost</host>
|
|
<port>19181</port>
|
|
</node>
|
|
<node index="3">
|
|
<host>localhost</host>
|
|
<port>29181</port>
|
|
</node>
|
|
<root>/test/chroot/auxiliary_zookeeper2</root>
|
|
</zookeeper2>
|
|
</auxiliary_zookeepers>
|
|
|
|
<keeper_server>
|
|
<tcp_port>9181</tcp_port>
|
|
<server_id>1</server_id>
|
|
|
|
<coordination_settings>
|
|
<operation_timeout_ms>10000</operation_timeout_ms>
|
|
<session_timeout_ms>30000</session_timeout_ms>
|
|
<heart_beat_interval_ms>1000</heart_beat_interval_ms>
|
|
<election_timeout_lower_bound_ms>4000</election_timeout_lower_bound_ms>
|
|
<election_timeout_upper_bound_ms>5000</election_timeout_upper_bound_ms>
|
|
<raft_logs_level>information</raft_logs_level>
|
|
<force_sync>false</force_sync>
|
|
<!-- we want all logs for complex problems investigation -->
|
|
<reserved_log_items>1000000000000000</reserved_log_items>
|
|
</coordination_settings>
|
|
|
|
<raft_configuration>
|
|
<server>
|
|
<id>1</id>
|
|
<hostname>localhost</hostname>
|
|
<port>9234</port>
|
|
<can_become_leader>true</can_become_leader>
|
|
<priority>3</priority>
|
|
</server>
|
|
<server>
|
|
<id>2</id>
|
|
<hostname>localhost</hostname>
|
|
<port>9235</port>
|
|
<can_become_leader>true</can_become_leader>
|
|
<start_as_follower>true</start_as_follower>
|
|
<priority>2</priority>
|
|
</server>
|
|
<server>
|
|
<id>3</id>
|
|
<hostname>localhost</hostname>
|
|
<port>9236</port>
|
|
<can_become_leader>true</can_become_leader>
|
|
<start_as_follower>true</start_as_follower>
|
|
<priority>1</priority>
|
|
</server>
|
|
</raft_configuration>
|
|
</keeper_server>
|
|
|
|
<remote_servers>
|
|
<test_cluster_database_replicated>
|
|
<shard>
|
|
<replica>
|
|
<host>localhost</host>
|
|
<port>9000</port>
|
|
</replica>
|
|
<replica>
|
|
<host>localhost</host>
|
|
<port>19000</port>
|
|
</replica>
|
|
</shard>
|
|
<shard>
|
|
<replica>
|
|
<host>localhost</host>
|
|
<port>29000</port>
|
|
</replica>
|
|
</shard>
|
|
</test_cluster_database_replicated>
|
|
</remote_servers>
|
|
|
|
<_functional_tests_helper_database_replicated_replace_args_macros>1</_functional_tests_helper_database_replicated_replace_args_macros>
|
|
</clickhouse>
|