Fix tests

This commit is contained in:
Antonio Andelic 2023-03-23 12:58:39 +00:00
parent 9ea8dc4e98
commit d6cbc5d05b
32 changed files with 204 additions and 182 deletions

View File

@ -1308,7 +1308,7 @@ try
{
/// We do not load ZooKeeper configuration on the first config loading
/// because TestKeeper server is not started yet.
if (zkutil::hasZooKeeperConfig(config))
if (zkutil::hasZooKeeperConfig(*config))
global_context->reloadZooKeeperIfChanged(config);
global_context->reloadAuxiliaryZooKeepersConfigIfChanged(config);

View File

@ -1342,12 +1342,12 @@ void validateZooKeeperConfig(const Poco::Util::AbstractConfiguration & config)
throw DB::Exception(DB::ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG, "Both ZooKeeper and Keeper are specified");
}
bool hasZooKeeperConfig(const Poco::Util::AbstractConfiguration & config, bool allow_keeper_server)
bool hasZooKeeperConfig(const Poco::Util::AbstractConfiguration & config)
{
return config.has("zookeeper") || config.has("keeper") || (allow_keeper_server && config.has("keeper_server"));
return config.has("zookeeper") || config.has("keeper") || (config.has("keeper_server") && config.getBool("keeper_server.use_cluster", true));
}
String getZooKeeperConfigName(const Poco::Util::AbstractConfiguration & config, bool allow_keeper_server)
String getZooKeeperConfigName(const Poco::Util::AbstractConfiguration & config)
{
if (config.has("zookeeper"))
return "zookeeper";
@ -1355,7 +1355,7 @@ String getZooKeeperConfigName(const Poco::Util::AbstractConfiguration & config,
if (config.has("keeper"))
return "keeper";
if (allow_keeper_server && config.has("keeper_server"))
if (config.has("keeper_server") && config.getBool("keeper_server.use_cluster", true))
return "keeper_server";
throw DB::Exception(DB::ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no Zookeeper configuration in server config");

View File

@ -669,8 +669,8 @@ String getSequentialNodeName(const String & prefix, UInt64 number);
void validateZooKeeperConfig(const Poco::Util::AbstractConfiguration & config);
bool hasZooKeeperConfig(const Poco::Util::AbstractConfiguration & config, bool allow_keeper_server = true);
bool hasZooKeeperConfig(const Poco::Util::AbstractConfiguration & config);
String getZooKeeperConfigName(const Poco::Util::AbstractConfiguration & config, bool allow_keeper_server = true);
String getZooKeeperConfigName(const Poco::Util::AbstractConfiguration & config);
}

View File

@ -19,8 +19,8 @@ namespace zkutil
ZooKeeperArgs::ZooKeeperArgs(const Poco::Util::AbstractConfiguration & config, const String & config_name)
{
if (endsWith(config_name, "keeper_server"))
initFromKeeperServerSection(config, config_name);
if (config_name == "keeper_server")
initFromKeeperServerSection(config);
else
initFromKeeperSection(config, config_name);
@ -52,49 +52,79 @@ ZooKeeperArgs::ZooKeeperArgs(const String & hosts_string)
splitInto<','>(hosts, hosts_string);
}
void ZooKeeperArgs::initFromKeeperServerSection(const Poco::Util::AbstractConfiguration & config, const std::string & config_name)
void ZooKeeperArgs::initFromKeeperServerSection(const Poco::Util::AbstractConfiguration & config)
{
Poco::Util::AbstractConfiguration::Keys keys;
config.keys(config_name, keys);
static constexpr std::string_view config_name = "keeper_server";
bool secure = false;
String tcp_port;
String tcp_port_secure;
for (const auto & key : keys)
if (auto key = std::string{config_name} + ".tcp_port_secure";
config.has(key))
{
if (key == "tcp_port_secure")
{
secure = true;
tcp_port_secure = config.getString(config_name + "." + key);
}
else if (key == "tcp_port")
{
tcp_port = config.getString(config_name + "." + key);
}
else if (key == "coordination_settings")
{
if (config.has(config_name + "." + key + ".operation_timeout_ms"))
operation_timeout_ms = config.getInt(config_name + "." + key + ".operation_timeout_ms");
if (config.has(config_name + "." + key + ".session_timeout_ms"))
session_timeout_ms = config.getInt(config_name + "." + key + ".session_timeout_ms");
}
auto tcp_port_secure = config.getString(key);
if (tcp_port_secure.empty())
throw KeeperException("Empty tcp_port_secure in config file", Coordination::Error::ZBADARGUMENTS);
}
if (secure && tcp_port_secure.empty())
throw KeeperException("No tcp_port_secure in config file", Coordination::Error::ZBADARGUMENTS);
if (!secure && tcp_port.empty())
throw KeeperException("No tcp_port in config file", Coordination::Error::ZBADARGUMENTS);
bool secure{false};
std::string tcp_port;
if (auto tcp_port_secure_key = std::string{config_name} + ".tcp_port_secure";
config.has(tcp_port_secure_key))
{
secure = true;
tcp_port = config.getString(tcp_port_secure_key);
}
else if (auto tcp_port_key = std::string{config_name} + ".tcp_port";
config.has(tcp_port_key))
{
tcp_port = config.getString(tcp_port_key);
}
config.keys(config_name + ".raft_configuration", keys);
if (tcp_port.empty())
throw KeeperException("No tcp_port or tcp_port_secure in config file", Coordination::Error::ZBADARGUMENTS);
if (auto coordination_key = std::string{config_name} + ".coordination_settings";
config.has(coordination_key))
{
if (auto operation_timeout_key = coordination_key + ".operation_timeout_ms";
config.has(operation_timeout_key))
operation_timeout_ms = config.getInt(operation_timeout_key);
if (auto session_timeout_key = coordination_key + ".session_timeout_ms";
config.has(session_timeout_key))
session_timeout_key = config.getInt(session_timeout_key);
}
Poco::Util::AbstractConfiguration::Keys keys;
std::string raft_configuration_key = std::string{config_name} + ".raft_configuration";
config.keys(raft_configuration_key, keys);
for (const auto & key : keys)
{
if (startsWith(key, "server"))
{
hosts.push_back(
(secure ? "secure://" : "") + config.getString(config_name + ".raft_configuration." + key + ".hostname") + ":"
+ (secure ? tcp_port_secure : tcp_port));
(secure ? "secure://" : "") + config.getString(raft_configuration_key + "." + key + ".hostname") + ":" + tcp_port);
}
static constexpr std::array load_balancing_keys
{
".zookeeper_load_balancing",
".keeper_load_balancing"
};
for (const auto * load_balancing_key : load_balancing_keys)
{
if (auto load_balancing_config = std::string{config_name} + load_balancing_key;
config.has(load_balancing_config))
{
String load_balancing_str = config.getString(load_balancing_config);
/// Use magic_enum to avoid dependency from dbms (`SettingFieldLoadBalancingTraits::fromString(...)`)
auto load_balancing = magic_enum::enum_cast<DB::LoadBalancing>(Poco::toUpper(load_balancing_str));
if (!load_balancing)
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Unknown load balancing: {}", load_balancing_str);
get_priority_load_balancing.load_balancing = *load_balancing;
break;
}
}
}
void ZooKeeperArgs::initFromKeeperSection(const Poco::Util::AbstractConfiguration & config, const std::string & config_name)
@ -144,7 +174,7 @@ void ZooKeeperArgs::initFromKeeperSection(const Poco::Util::AbstractConfiguratio
{
implementation = config.getString(config_name + "." + key);
}
else if (key == "zookeeper_load_balancing")
else if (key == "zookeeper_load_balancing" || key == "keeper_load_balancing")
{
String load_balancing_str = config.getString(config_name + "." + key);
/// Use magic_enum to avoid dependency from dbms (`SettingFieldLoadBalancingTraits::fromString(...)`)

View File

@ -34,7 +34,7 @@ struct ZooKeeperArgs
DB::GetPriorityForLoadBalancing get_priority_load_balancing;
private:
void initFromKeeperServerSection(const Poco::Util::AbstractConfiguration & config, const std::string & config_name);
void initFromKeeperServerSection(const Poco::Util::AbstractConfiguration & config);
void initFromKeeperSection(const Poco::Util::AbstractConfiguration & config, const std::string & config_name);
};

View File

@ -2624,7 +2624,7 @@ void Context::reloadAuxiliaryZooKeepersConfigIfChanged(const ConfigurationPtr &
bool Context::hasZooKeeper() const
{
return getConfigRef().has("zookeeper") || getConfigRef().has("keeper") || getConfigRef().has("keeper_server");
return zkutil::hasZooKeeperConfig(getConfigRef());
}
bool Context::hasAuxiliaryZooKeeper(const String & name) const

View File

@ -1,5 +1,5 @@
<clickhouse>
<keeper>
<zookeeper>
<node index="1">
<host>zoo1</host>
<port>2281</port>
@ -13,5 +13,5 @@
<port>2281</port>
</node>
<session_timeout_ms>15000</session_timeout_ms>
</keeper>
</zookeeper>
</clickhouse>

View File

@ -28,6 +28,14 @@
<start_as_follower>true</start_as_follower>
<priority>2</priority>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>3</priority>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@ -28,6 +28,14 @@
<start_as_follower>true</start_as_follower>
<priority>2</priority>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>3</priority>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@ -0,0 +1,41 @@
<clickhouse>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>3</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>5000</operation_timeout_ms>
<session_timeout_ms>10000</session_timeout_ms>
<snapshot_distance>75</snapshot_distance>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>node1</hostname>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>3</priority>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@ -1,16 +1,16 @@
<clickhouse>
<keeper>
<node index="1">
<host>zoo1</host>
<port>2181</port>
<host>node1</host>
<port>9181</port>
</node>
<node index="2">
<host>zoo2</host>
<port>2181</port>
<host>node2</host>
<port>9181</port>
</node>
<node index="3">
<host>zoo3</host>
<port>2181</port>
<host>node3</host>
<port>9181</port>
</node>
<session_timeout_ms>3000</session_timeout_ms>
</keeper>

View File

@ -6,12 +6,14 @@
<host>node1</host>
<port>9000</port>
</replica>
<replica>
<host>node2</host>
<port>9000</port>
</replica>
<replica>
<host>node3</host>
<port>9000</port>
</replica>
</shard>
</test_cluster>
</remote_servers>

View File

@ -1,16 +1,16 @@
<clickhouse>
<zookeeper>
<node index="1">
<host>zoo1</host>
<port>2181</port>
<host>node1</host>
<port>9181</port>
</node>
<node index="2">
<host>zoo2</host>
<port>2181</port>
<host>node2</host>
<port>9181</port>
</node>
<node index="3">
<host>zoo3</host>
<port>2181</port>
<host>node3</host>
<port>9181</port>
</node>
<session_timeout_ms>3000</session_timeout_ms>
</zookeeper>

View File

@ -1,18 +0,0 @@
<clickhouse>
<remote_servers>
<test_cluster>
<shard>
<replica>
<host>node1</host>
<port>9000</port>
</replica>
<replica>
<host>node2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster>
</remote_servers>
</clickhouse>

View File

@ -1,12 +0,0 @@
<clickhouse>
<keeper>
<node index="1">
<host>node1</host>
<port>9181</port>
</node>
<node index="2">
<host>node2</host>
<port>9181</port>
</node>
</keeper>
</clickhouse>

View File

@ -8,18 +8,30 @@ cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
"node1",
with_zookeeper=True,
main_configs=["configs/remote_servers.xml", "configs/keeper_config.xml"],
main_configs=[
"configs/remote_servers.xml",
"configs/keeper_config.xml",
"configs/enable_keeper1.xml",
],
macros={"replica": "node1"},
)
node2 = cluster.add_instance(
"node2",
with_zookeeper=True,
main_configs=["configs/remote_servers.xml", "configs/zookeeper_config.xml"],
main_configs=[
"configs/remote_servers.xml",
"configs/zookeeper_config.xml",
"configs/enable_keeper2.xml",
],
macros={"replica": "node2"},
)
node3 = cluster.add_instance(
"node3",
main_configs=["configs/remote_servers.xml", "configs/enable_keeper3.xml"],
macros={"replica": "node3"},
)
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
@ -45,10 +57,9 @@ def test_create_insert(started_cluster):
node1.query("INSERT INTO tbl VALUES (1, 'str1')")
node2.query("INSERT INTO tbl VALUES (1, 'str1')") # Test deduplication
node2.query("INSERT INTO tbl VALUES (2, 'str2')")
node3.query("INSERT INTO tbl VALUES (2, 'str2')")
expected = [[1, "str1"], [2, "str2"]]
assert node1.query("SELECT * FROM tbl ORDER BY id") == TSV(expected)
assert node2.query("SELECT * FROM tbl ORDER BY id") == TSV(expected)
assert node1.query("CHECK TABLE tbl") == "1\n"
assert node2.query("CHECK TABLE tbl") == "1\n"
for node in [node1, node2, node3]:
expected = [[1, "str1"], [2, "str2"]]
assert node.query("SELECT * FROM tbl ORDER BY id") == TSV(expected)
assert node.query("CHECK TABLE tbl") == "1\n"

View File

@ -1,61 +0,0 @@
#!/usr/bin/env python3
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
"node1",
with_zookeeper=True,
main_configs=[
"configs_keeper_server/remote_servers.xml",
"configs_keeper_server/enable_keeper1.xml",
"configs_keeper_server/use_keeper.xml",
],
macros={"replica": "node1"},
)
node2 = cluster.add_instance(
"node2",
with_zookeeper=True,
main_configs=[
"configs_keeper_server/remote_servers.xml",
"configs_keeper_server/enable_keeper2.xml",
],
macros={"replica": "node2"},
)
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_create_insert(started_cluster):
node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'test_cluster' NO DELAY")
node1.query(
"""
CREATE TABLE tbl ON CLUSTER 'test_cluster' (
id Int64,
str String
) ENGINE=ReplicatedMergeTree('/clickhouse/tables/tbl/', '{replica}')
ORDER BY id
"""
)
node1.query("INSERT INTO tbl VALUES (1, 'str1')")
node2.query("INSERT INTO tbl VALUES (1, 'str1')") # Test deduplication
node2.query("INSERT INTO tbl VALUES (2, 'str2')")
expected = [[1, "str1"], [2, "str2"]]
assert node1.query("SELECT * FROM tbl ORDER BY id") == TSV(expected)
assert node2.query("SELECT * FROM tbl ORDER BY id") == TSV(expected)
assert node1.query("CHECK TABLE tbl") == "1\n"
assert node2.query("CHECK TABLE tbl") == "1\n"

View File

@ -1,5 +1,7 @@
<yandex>
<clickhouse>
<keeper_server>
<use_cluster>false</use_cluster>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
@ -39,4 +41,4 @@
</server>
</raft_configuration>
</keeper_server>
</yandex>
</clickhouse>

View File

@ -1,5 +1,7 @@
<yandex>
<clickhouse>
<keeper_server>
<use_cluster>false</use_cluster>
<tcp_port>9181</tcp_port>
<server_id>2</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
@ -39,4 +41,4 @@
</server>
</raft_configuration>
</keeper_server>
</yandex>
</clickhouse>

View File

@ -1,5 +1,7 @@
<yandex>
<clickhouse>
<keeper_server>
<use_cluster>false</use_cluster>
<tcp_port>9181</tcp_port>
<server_id>3</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
@ -39,4 +41,4 @@
</server>
</raft_configuration>
</keeper_server>
</yandex>
</clickhouse>

View File

@ -1,4 +1,4 @@
<yandex>
<clickhouse>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
@ -21,4 +21,4 @@
</server>
</raft_configuration>
</keeper_server>
</yandex>
</clickhouse>

View File

@ -1,4 +1,4 @@
<yandex>
<clickhouse>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>3</server_id>
@ -21,4 +21,4 @@
</server>
</raft_configuration>
</keeper_server>
</yandex>
</clickhouse>

View File

@ -1,4 +1,4 @@
<yandex>
<clickhouse>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>2</server_id>
@ -20,4 +20,4 @@
</server>
</raft_configuration>
</keeper_server>
</yandex>
</clickhouse>

View File

@ -1,14 +1,7 @@
import socket
import pytest
from helpers.cluster import ClickHouseCluster
import helpers.keeper_utils as keeper_utils
import random
import string
import os
import time
from multiprocessing.dummy import Pool
from helpers.test_tools import assert_eq_with_retry
from io import StringIO
import csv
import re
@ -23,7 +16,7 @@ node3 = cluster.add_instance(
"node3", main_configs=["configs/enable_keeper3.xml"], stay_alive=True
)
from kazoo.client import KazooClient, KazooState
from kazoo.client import KazooClient
def wait_nodes():

View File

@ -1,5 +1,7 @@
<clickhouse>
<keeper_server>
<use_cluster>false</use_cluster>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>

View File

@ -1,5 +1,7 @@
<clickhouse>
<keeper_server>
<use_cluster>false</use_cluster>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>

View File

@ -1,5 +1,7 @@
<clickhouse>
<keeper_server>
<use_cluster>false</use_cluster>
<tcp_port>9181</tcp_port>
<server_id>2</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>

View File

@ -1,5 +1,7 @@
<clickhouse>
<keeper_server>
<use_cluster>false</use_cluster>
<tcp_port>9181</tcp_port>
<server_id>3</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>

View File

@ -1,5 +1,7 @@
<clickhouse>
<keeper_server>
<use_cluster>false</use_cluster>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>

View File

@ -1,5 +1,7 @@
<clickhouse>
<keeper_server>
<use_cluster>false</use_cluster>
<tcp_port>9181</tcp_port>
<server_id>2</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>

View File

@ -1,5 +1,7 @@
<clickhouse>
<keeper_server>
<use_cluster>false</use_cluster>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/logs</log_storage_path>

View File

@ -1,5 +1,5 @@
<clickhouse>
<keeper>
<zookeeper>
<node index="1">
<host>zoo1</host>
<port>2181</port>
@ -14,5 +14,5 @@
</node>
<session_timeout_ms>3000</session_timeout_ms>
<root>/root_a</root>
</keeper>
</zookeeper>
</clickhouse>