From 8300a74f5a456e96d54b66953ae09da5a0822df6 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 1 Apr 2021 12:09:44 +0000 Subject: [PATCH] Fix --- src/Storages/StorageExternalDistributed.cpp | 5 +++-- tests/integration/test_dictionaries_postgresql/test.py | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/Storages/StorageExternalDistributed.cpp b/src/Storages/StorageExternalDistributed.cpp index e676b9ea68c..0f84724b343 100644 --- a/src/Storages/StorageExternalDistributed.cpp +++ b/src/Storages/StorageExternalDistributed.cpp @@ -46,6 +46,7 @@ StorageExternalDistributed::StorageExternalDistributed( size_t max_addresses = context.getSettingsRef().storage_external_distributed_max_addresses; std::vector shards_descriptions = parseRemoteDescription(cluster_description, 0, cluster_description.size(), ',', max_addresses); + std::vector> addresses; /// For each shard pass replicas description into storage, replicas are managed by storage's PoolWithFailover. for (const auto & shard_description : shards_descriptions) @@ -57,7 +58,7 @@ StorageExternalDistributed::StorageExternalDistributed( #if USE_MYSQL case ExternalStorageEngine::MySQL: { - auto addresses = parseRemoteDescriptionForExternalDatabase(shard_description, max_addresses, 3306); + addresses = parseRemoteDescriptionForExternalDatabase(shard_description, max_addresses, 3306); mysqlxx::PoolWithFailover pool( remote_database, @@ -80,7 +81,7 @@ StorageExternalDistributed::StorageExternalDistributed( case ExternalStorageEngine::PostgreSQL: { - auto addresses = parseRemoteDescriptionForExternalDatabase(shard_description, max_addresses, 5432); + addresses = parseRemoteDescriptionForExternalDatabase(shard_description, max_addresses, 5432); postgres::PoolWithFailover pool( remote_database, diff --git a/tests/integration/test_dictionaries_postgresql/test.py b/tests/integration/test_dictionaries_postgresql/test.py index 10d9f4213e1..5b3b5a5aa45 100644 --- a/tests/integration/test_dictionaries_postgresql/test.py +++ b/tests/integration/test_dictionaries_postgresql/test.py @@ -9,7 +9,7 @@ cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance('node1', main_configs=[ 'configs/config.xml', 'configs/dictionaries/postgres_dict.xml', - 'configs/log_conf.xml'], with_postgres=True) + 'configs/log_conf.xml'], with_postgres=True, with_postgres_cluster=True) postgres_dict_table_template = """ CREATE TABLE IF NOT EXISTS {} ( @@ -62,7 +62,7 @@ def started_cluster(): print("postgres1 connected") create_postgres_db(postgres_conn, 'clickhouse') - postgres_conn = get_postgres_conn(port=5441) + postgres_conn = get_postgres_conn(port=5421) print("postgres2 connected") create_postgres_db(postgres_conn, 'clickhouse') @@ -131,7 +131,7 @@ def test_invalidate_query(started_cluster): def test_dictionary_with_replicas(started_cluster): conn1 = get_postgres_conn(port=5432, database=True) cursor1 = conn1.cursor() - conn2 = get_postgres_conn(port=5441, database=True) + conn2 = get_postgres_conn(port=5421, database=True) cursor2 = conn2.cursor() create_postgres_table(cursor1, 'test1')