reduce timeout and setTimeout earlier.

Signed-off-by: Jianfei Hu <hujianfei258@gmail.com>
This commit is contained in:
Jianfei Hu 2023-11-14 02:28:09 +00:00
parent 554d907189
commit 9df2775f08
5 changed files with 18 additions and 14 deletions

View File

@ -39,8 +39,9 @@ void KeeperContext::initialize(const Poco::Util::AbstractConfiguration & config,
if (config.hasProperty("keeper_server.availability_zone"))
{
auto keeper_az = config.getString("keeper_server.availability_zone.value");
if (config.getBool("keeper_server.availability_zone.enable_auto_detection_on_cloud", false))
auto keeper_az = config.getString("keeper_server.availability_zone.value", "");
const auto auto_detect_for_cloud = config.getBool("keeper_server.availability_zone.enable_auto_detection_on_cloud", false);
if (keeper_az.empty() && auto_detect_for_cloud)
{
try
{
@ -54,7 +55,7 @@ void KeeperContext::initialize(const Poco::Util::AbstractConfiguration & config,
if (!keeper_az.empty())
{
system_nodes_with_data[keeper_availability_zone_path] = keeper_az;
LOG_INFO(&Poco::Logger::get("KeeperContext"), "Initialize the KeeperContext with availability zone: '{}'.'. ", keeper_az);
LOG_INFO(&Poco::Logger::get("KeeperContext"), "Initialize the KeeperContext with availability zone: '{}'", keeper_az);
}
}

View File

@ -65,7 +65,7 @@ bool areCredentialsEmptyOrExpired(const Aws::Auth::AWSCredentials & credentials,
}
const char SSO_CREDENTIALS_PROVIDER_LOG_TAG[] = "SSOCredentialsProvider";
const int AVAILABILITY_ZONE_REQUEST_TIMEOUT_SECONDS = 5;
const int AVAILABILITY_ZONE_REQUEST_TIMEOUT_SECONDS = 3;
}
@ -241,11 +241,11 @@ String AWSEC2MetadataClient::getAvailabilityZoneOrException()
{
Poco::URI uri(getAWSMetadataEndpoint() + EC2_AVAILABILITY_ZONE_RESOURCE);
Poco::Net::HTTPClientSession session(uri.getHost(), uri.getPort());
session.setTimeout(Poco::Timespan(AVAILABILITY_ZONE_REQUEST_TIMEOUT_SECONDS, 0));
Poco::Net::HTTPResponse response;
Poco::Net::HTTPRequest request(Poco::Net::HTTPRequest::HTTP_GET, uri.getPath());
session.sendRequest(request);
session.setTimeout(Poco::Timespan(AVAILABILITY_ZONE_REQUEST_TIMEOUT_SECONDS, 0));
std::istream & rs = session.receiveResponse(response);
if (response.getStatus() != Poco::Net::HTTPResponse::HTTP_OK)
@ -287,17 +287,17 @@ String getRunningAvailabilityZoneImpl()
auto aws_az = AWSEC2MetadataClient::getAvailabilityZoneOrException();
return aws_az;
}
catch (const DB::Exception & aws_ex)
catch (const std::exception & aws_ex)
{
try
{
auto gcp_zone = getGCPAvailabilityZoneOrException();
return gcp_zone;
}
catch (const DB::Exception & gcp_ex)
catch (const std::exception & gcp_ex)
{
throw DB::Exception(ErrorCodes::UNSUPPORTED_METHOD,
"Failed to find the availability zone, tried AWS and GCP. AWS Error: {}\nGCP Error: {}", aws_ex.displayText(), gcp_ex.displayText());
"Failed to find the availability zone, tried AWS and GCP. AWS Error: {}\nGCP Error: {}", aws_ex.what(), gcp_ex.what());
}
}
}

View File

@ -14,6 +14,7 @@
<server_id>2</server_id>
<availability_zone>
<value>az-zoo2</value>
<enable_auto_detection_on_cloud>1</enable_auto_detection_on_cloud>
</availability_zone>
<coordination_settings>

View File

@ -27,10 +27,12 @@ def test_get_availability_zone():
with KeeperClient.from_cluster(cluster, "zoo1") as client1:
assert client1.get("/keeper/availability_zone") == "az-zoo1"
# Keeper2 set enable_auto_detection_on_cloud to true, but is ignored and <value>az-zoo2</value> is used.
with KeeperClient.from_cluster(cluster, "zoo2") as client2:
assert client2.get("/keeper/availability_zone") == "az-zoo2"
assert "availability_zone" in client2.ls("/keeper")
# keeper3 is not configured with availability_zone value.
with KeeperClient.from_cluster(cluster, "zoo3") as client3:
with pytest.raises(Exception):
client3.get("/keeper/availability_zone")
client3.get("/keeper/availability_zone")

View File

@ -183,8 +183,8 @@ def test_cmd_mntr(started_cluster):
# contains:
# 10 nodes created by test
# 3 nodes created by clickhouse "/clickhouse/task_queue/ddl"
# 1 root node, 4 keeper system nodes
assert int(result["zk_znode_count"]) == 15
# 1 root node, 3 keeper system nodes
assert int(result["zk_znode_count"]) == 14
assert int(result["zk_watch_count"]) == 2
assert int(result["zk_ephemerals_count"]) == 2
assert int(result["zk_approximate_data_size"]) > 0
@ -333,7 +333,7 @@ def test_cmd_srvr(started_cluster):
assert int(result["Connections"]) == 1
assert int(result["Zxid"], 16) > 10
assert result["Mode"] == "leader"
assert result["Node count"] == "15"
assert result["Node count"] == "14"
finally:
destroy_zk_client(zk)
@ -373,7 +373,7 @@ def test_cmd_stat(started_cluster):
assert int(result["Connections"]) == 1
assert int(result["Zxid"], 16) >= 10
assert result["Mode"] == "leader"
assert result["Node count"] == "15"
assert result["Node count"] == "14"
# filter connection statistics
cons = [n for n in data.split("\n") if "=" in n]