This commit is contained in:
kssenii 2021-04-01 12:09:44 +00:00
parent 2c70183652
commit 8300a74f5a
2 changed files with 6 additions and 5 deletions

View File

@ -46,6 +46,7 @@ StorageExternalDistributed::StorageExternalDistributed(
size_t max_addresses = context.getSettingsRef().storage_external_distributed_max_addresses;
std::vector<String> shards_descriptions = parseRemoteDescription(cluster_description, 0, cluster_description.size(), ',', max_addresses);
std::vector<std::pair<std::string, UInt16>> addresses;
/// For each shard pass replicas description into storage, replicas are managed by storage's PoolWithFailover.
for (const auto & shard_description : shards_descriptions)
@ -57,7 +58,7 @@ StorageExternalDistributed::StorageExternalDistributed(
#if USE_MYSQL
case ExternalStorageEngine::MySQL:
{
auto addresses = parseRemoteDescriptionForExternalDatabase(shard_description, max_addresses, 3306);
addresses = parseRemoteDescriptionForExternalDatabase(shard_description, max_addresses, 3306);
mysqlxx::PoolWithFailover pool(
remote_database,
@ -80,7 +81,7 @@ StorageExternalDistributed::StorageExternalDistributed(
case ExternalStorageEngine::PostgreSQL:
{
auto addresses = parseRemoteDescriptionForExternalDatabase(shard_description, max_addresses, 5432);
addresses = parseRemoteDescriptionForExternalDatabase(shard_description, max_addresses, 5432);
postgres::PoolWithFailover pool(
remote_database,

View File

@ -9,7 +9,7 @@ cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=[
'configs/config.xml',
'configs/dictionaries/postgres_dict.xml',
'configs/log_conf.xml'], with_postgres=True)
'configs/log_conf.xml'], with_postgres=True, with_postgres_cluster=True)
postgres_dict_table_template = """
CREATE TABLE IF NOT EXISTS {} (
@ -62,7 +62,7 @@ def started_cluster():
print("postgres1 connected")
create_postgres_db(postgres_conn, 'clickhouse')
postgres_conn = get_postgres_conn(port=5441)
postgres_conn = get_postgres_conn(port=5421)
print("postgres2 connected")
create_postgres_db(postgres_conn, 'clickhouse')
@ -131,7 +131,7 @@ def test_invalidate_query(started_cluster):
def test_dictionary_with_replicas(started_cluster):
conn1 = get_postgres_conn(port=5432, database=True)
cursor1 = conn1.cursor()
conn2 = get_postgres_conn(port=5441, database=True)
conn2 = get_postgres_conn(port=5421, database=True)
cursor2 = conn2.cursor()
create_postgres_table(cursor1, 'test1')