diff --git a/docs/en/operations/system-tables/replicas.md b/docs/en/operations/system-tables/replicas.md index 5a6ec54723b..e2cc607f6d8 100644 --- a/docs/en/operations/system-tables/replicas.md +++ b/docs/en/operations/system-tables/replicas.md @@ -82,6 +82,7 @@ The next 4 columns have a non-zero value only where there is an active session w - `absolute_delay` (`UInt64`) - How big lag in seconds the current replica has. - `total_replicas` (`UInt8`) - The total number of known replicas of this table. - `active_replicas` (`UInt8`) - The number of replicas of this table that have a session in ZooKeeper (i.e., the number of functioning replicas). +- `replica_is_active` ([Map(String, UInt8)](../../sql-reference/data-types/map.md)) — Map between replica name and is replica active. If you request all the columns, the table may work a bit slowly, since several reads from ZooKeeper are made for each row. If you do not request the last 4 columns (log_max_index, log_pointer, total_replicas, active_replicas), the table works quickly. diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 8b6267943bd..08161f8ef46 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5575,8 +5575,11 @@ void StorageReplicatedMergeTree::getStatus(Status & res, bool with_zk_fields) res.total_replicas = all_replicas.size(); for (const String & replica : all_replicas) - if (zookeeper->exists(fs::path(zookeeper_path) / "replicas" / replica / "is_active")) - ++res.active_replicas; + { + bool is_replica_active = zookeeper->exists(fs::path(zookeeper_path) / "replicas" / replica / "is_active"); + res.active_replicas += static_cast(is_replica_active); + res.replica_is_active.emplace(replica, is_replica_active); + } } catch (const Coordination::Exception &) { diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 0f9d71bd5a5..3d2727d7bb9 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -176,6 +176,7 @@ public: UInt8 active_replicas; /// If the error has happened fetching the info from ZooKeeper, this field will be set. String zookeeper_exception; + std::unordered_map replica_is_active; }; /// Get the status of the table. If with_zk_fields = false - do not fill in the fields that require queries to ZK. diff --git a/src/Storages/System/StorageSystemReplicas.cpp b/src/Storages/System/StorageSystemReplicas.cpp index fc33c6b421b..5c22d3c2fae 100644 --- a/src/Storages/System/StorageSystemReplicas.cpp +++ b/src/Storages/System/StorageSystemReplicas.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -51,6 +52,7 @@ StorageSystemReplicas::StorageSystemReplicas(const StorageID & table_id_) { "total_replicas", std::make_shared() }, { "active_replicas", std::make_shared() }, { "zookeeper_exception", std::make_shared() }, + { "replica_is_active", std::make_shared(std::make_shared(), std::make_shared()) } })); setInMemoryMetadata(storage_metadata); } @@ -101,7 +103,8 @@ Pipe StorageSystemReplicas::read( || column_name == "log_pointer" || column_name == "total_replicas" || column_name == "active_replicas" - || column_name == "zookeeper_exception") + || column_name == "zookeeper_exception" + || column_name == "replica_is_active") { with_zk_fields = true; break; @@ -184,6 +187,18 @@ Pipe StorageSystemReplicas::read( res_columns[col_num++]->insert(status.total_replicas); res_columns[col_num++]->insert(status.active_replicas); res_columns[col_num++]->insert(status.zookeeper_exception); + + Map replica_is_active_values; + for (const auto & [name, is_active] : status.replica_is_active) + { + Tuple is_replica_active_value; + is_replica_active_value.emplace_back(name); + is_replica_active_value.emplace_back(is_active); + + replica_is_active_values.emplace_back(std::move(is_replica_active_value)); + } + + res_columns[col_num++]->insert(std::move(replica_is_active_values)); } Block header = metadata_snapshot->getSampleBlock(); diff --git a/tests/integration/test_replica_is_active/__init__.py b/tests/integration/test_replica_is_active/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_replica_is_active/test.py b/tests/integration/test_replica_is_active/test.py new file mode 100644 index 00000000000..14046ea7f7d --- /dev/null +++ b/tests/integration/test_replica_is_active/test.py @@ -0,0 +1,41 @@ +import pytest +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', with_zookeeper=True) +node2 = cluster.add_instance('node2', with_zookeeper=True) +node3 = cluster.add_instance('node3', with_zookeeper=True) + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + for i, node in enumerate((node1, node2, node3)): + node_name = 'node' + str(i + 1) + node.query( + ''' + CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_table', '{}') + PARTITION BY date ORDER BY id + '''.format(node_name) + ) + + yield cluster + + finally: + cluster.shutdown() + + +def test_replica_is_active(start_cluster): + query_result = node1.query("select replica_is_active from system.replicas where table = 'test_table'") + assert query_result == '{\'node1\':1,\'node2\':1,\'node3\':1}\n' + + node3.stop() + query_result = node1.query("select replica_is_active from system.replicas where table = 'test_table'") + assert query_result == '{\'node1\':1,\'node2\':1,\'node3\':0}\n' + + node2.stop() + query_result = node1.query("select replica_is_active from system.replicas where table = 'test_table'") + assert query_result == '{\'node1\':1,\'node2\':0,\'node3\':0}\n'