2021-11-30 12:26:57 +00:00
import pytest
2022-07-25 15:32:51 +00:00
2021-11-30 12:26:57 +00:00
import time
import psycopg2
import os . path as p
import random
from helpers . cluster import ClickHouseCluster
from helpers . test_tools import assert_eq_with_retry
from psycopg2 . extensions import ISOLATION_LEVEL_AUTOCOMMIT
from helpers . test_tools import TSV
from random import randrange
import threading
2022-01-08 12:26:29 +00:00
from helpers . postgres_utility import get_postgres_conn
from helpers . postgres_utility import PostgresManager
from helpers . postgres_utility import create_replication_slot , drop_replication_slot
from helpers . postgres_utility import create_postgres_schema , drop_postgres_schema
from helpers . postgres_utility import create_postgres_table , drop_postgres_table
2022-03-22 16:39:58 +00:00
from helpers . postgres_utility import (
create_postgres_table_with_schema ,
drop_postgres_table_with_schema ,
)
2022-01-08 12:26:29 +00:00
from helpers . postgres_utility import check_tables_are_synchronized
from helpers . postgres_utility import check_several_tables_are_synchronized
from helpers . postgres_utility import assert_nested_table_is_created
from helpers . postgres_utility import assert_number_of_columns
2022-03-22 16:39:58 +00:00
from helpers . postgres_utility import (
postgres_table_template ,
postgres_table_template_2 ,
postgres_table_template_3 ,
postgres_table_template_4 ,
postgres_table_template_5 ,
)
2022-01-08 12:26:29 +00:00
from helpers . postgres_utility import queries
2021-11-30 12:26:57 +00:00
cluster = ClickHouseCluster ( __file__ )
2022-03-22 16:39:58 +00:00
instance = cluster . add_instance (
" instance " ,
main_configs = [ " configs/log_conf.xml " ] ,
user_configs = [ " configs/users.xml " ] ,
with_postgres = True ,
stay_alive = True ,
)
2021-11-30 12:26:57 +00:00
2023-05-09 14:10:53 +00:00
instance2 = cluster . add_instance (
" instance2 " ,
main_configs = [ " configs/log_conf.xml " , " configs/merge_tree_too_many_parts.xml " ] ,
user_configs = [ " configs/users.xml " ] ,
with_postgres = True ,
stay_alive = True ,
)
2022-01-08 12:26:29 +00:00
pg_manager = PostgresManager ( )
2023-05-09 14:10:53 +00:00
pg_manager2 = PostgresManager ( )
2023-10-16 11:01:36 +00:00
pg_manager_instance2 = PostgresManager ( )
2021-11-30 12:26:57 +00:00
@pytest.fixture ( scope = " module " )
def started_cluster ( ) :
try :
cluster . start ( )
2023-05-09 14:10:53 +00:00
pg_manager . init (
instance ,
cluster . postgres_ip ,
cluster . postgres_port ,
2023-05-11 16:09:46 +00:00
default_database = " postgres_database " ,
2023-05-09 14:10:53 +00:00
)
2023-10-16 11:01:36 +00:00
pg_manager_instance2 . init (
instance2 ,
cluster . postgres_ip ,
cluster . postgres_port ,
default_database = " postgres_database " ,
postgres_db_exists = True ,
)
2023-05-09 14:10:53 +00:00
pg_manager2 . init (
2023-05-11 16:09:46 +00:00
instance2 , cluster . postgres_ip , cluster . postgres_port , " postgres_database2 "
2023-05-09 14:10:53 +00:00
)
2023-10-16 11:01:36 +00:00
2021-11-30 12:26:57 +00:00
yield cluster
finally :
cluster . shutdown ( )
2022-01-08 12:26:29 +00:00
@pytest.fixture ( autouse = True )
def setup_teardown ( ) :
print ( " PostgreSQL is available - running test " )
yield # run test
pg_manager . restart ( )
2021-11-30 12:26:57 +00:00
def test_add_new_table_to_replication ( started_cluster ) :
NUM_TABLES = 5
2023-05-11 16:09:46 +00:00
pg_manager . create_and_fill_postgres_tables ( NUM_TABLES , 10000 )
2022-03-22 16:39:58 +00:00
pg_manager . create_materialized_db (
ip = started_cluster . postgres_ip , port = started_cluster . postgres_port
)
2022-01-08 12:26:29 +00:00
check_several_tables_are_synchronized ( instance , NUM_TABLES )
2021-11-30 12:26:57 +00:00
result = instance . query ( " SHOW TABLES FROM test_database " )
2022-03-22 16:39:58 +00:00
assert (
result
== " postgresql_replica_0 \n postgresql_replica_1 \n postgresql_replica_2 \n postgresql_replica_3 \n postgresql_replica_4 \n "
)
2021-11-30 12:26:57 +00:00
2022-03-22 16:39:58 +00:00
table_name = " postgresql_replica_5 "
2023-05-11 16:09:46 +00:00
pg_manager . create_and_fill_postgres_table ( table_name )
2021-11-30 12:26:57 +00:00
2022-03-22 16:39:58 +00:00
result = instance . query ( " SHOW CREATE DATABASE test_database " )
assert (
result [ : 63 ]
== " CREATE DATABASE test_database \\ nENGINE = MaterializedPostgreSQL( "
) # Check without ip
2023-02-16 16:22:29 +00:00
assert result [ - 51 : ] == " \\ ' postgres_database \\ ' , \\ ' postgres \\ ' , \\ ' [HIDDEN] \\ ' ) \n "
2022-03-22 16:39:58 +00:00
result = instance . query_and_get_error (
" ALTER DATABASE test_database MODIFY SETTING materialized_postgresql_tables_list= ' tabl1 ' "
)
assert (
" Changing setting `materialized_postgresql_tables_list` is not allowed "
in result
)
result = instance . query_and_get_error (
" ALTER DATABASE test_database MODIFY SETTING materialized_postgresql_tables= ' tabl1 ' "
)
assert " Database engine MaterializedPostgreSQL does not support setting " in result
instance . query ( f " ATTACH TABLE test_database. { table_name } " )
2021-11-30 12:26:57 +00:00
result = instance . query ( " SHOW TABLES FROM test_database " )
2022-03-22 16:39:58 +00:00
assert (
result
== " postgresql_replica_0 \n postgresql_replica_1 \n postgresql_replica_2 \n postgresql_replica_3 \n postgresql_replica_4 \n postgresql_replica_5 \n "
)
check_tables_are_synchronized ( instance , table_name )
instance . query (
f " INSERT INTO postgres_database. { table_name } SELECT number, number from numbers(10000, 10000) "
)
check_tables_are_synchronized ( instance , table_name )
result = instance . query_and_get_error ( f " ATTACH TABLE test_database. { table_name } " )
assert " Table test_database.postgresql_replica_5 already exists " in result
result = instance . query_and_get_error ( " ATTACH TABLE test_database.unknown_table " )
assert " PostgreSQL table unknown_table does not exist " in result
result = instance . query ( " SHOW CREATE DATABASE test_database " )
assert (
result [ : 63 ]
== " CREATE DATABASE test_database \\ nENGINE = MaterializedPostgreSQL( "
)
assert (
result [ - 180 : ]
== " ) \\ nSETTINGS materialized_postgresql_tables_list = \\ ' postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4,postgresql_replica_5 \\ ' \n "
)
table_name = " postgresql_replica_6 "
2023-05-11 16:09:46 +00:00
pg_manager . create_postgres_table ( table_name )
2022-03-22 16:39:58 +00:00
instance . query (
" INSERT INTO postgres_database. {} SELECT number, number from numbers(10000) " . format (
table_name
)
)
instance . query ( f " ATTACH TABLE test_database. { table_name } " )
2021-11-30 12:26:57 +00:00
instance . restart_clickhouse ( )
2022-03-22 16:39:58 +00:00
table_name = " postgresql_replica_7 "
2023-05-11 16:09:46 +00:00
pg_manager . create_postgres_table ( table_name )
2022-03-22 16:39:58 +00:00
instance . query (
" INSERT INTO postgres_database. {} SELECT number, number from numbers(10000) " . format (
table_name
)
)
instance . query ( f " ATTACH TABLE test_database. { table_name } " )
result = instance . query ( " SHOW CREATE DATABASE test_database " )
assert (
result [ : 63 ]
== " CREATE DATABASE test_database \\ nENGINE = MaterializedPostgreSQL( "
)
assert (
result [ - 222 : ]
== " ) \\ nSETTINGS materialized_postgresql_tables_list = \\ ' postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4,postgresql_replica_5,postgresql_replica_6,postgresql_replica_7 \\ ' \n "
)
instance . query (
f " INSERT INTO postgres_database. { table_name } SELECT number, number from numbers(10000, 10000) "
)
2022-03-09 16:50:02 +00:00
2021-11-30 12:26:57 +00:00
result = instance . query ( " SHOW TABLES FROM test_database " )
2022-03-22 16:39:58 +00:00
assert (
result
== " postgresql_replica_0 \n postgresql_replica_1 \n postgresql_replica_2 \n postgresql_replica_3 \n postgresql_replica_4 \n postgresql_replica_5 \n postgresql_replica_6 \n postgresql_replica_7 \n "
)
2022-01-08 12:26:29 +00:00
check_several_tables_are_synchronized ( instance , NUM_TABLES + 3 )
2021-11-30 12:26:57 +00:00
def test_remove_table_from_replication ( started_cluster ) :
NUM_TABLES = 5
2022-01-08 12:26:29 +00:00
pg_manager . create_and_fill_postgres_tables ( NUM_TABLES , 10000 )
2022-03-22 16:39:58 +00:00
pg_manager . create_materialized_db (
ip = started_cluster . postgres_ip , port = started_cluster . postgres_port
)
2022-01-08 12:26:29 +00:00
check_several_tables_are_synchronized ( instance , NUM_TABLES )
2021-11-30 12:26:57 +00:00
result = instance . query ( " SHOW TABLES FROM test_database " )
2022-03-22 16:39:58 +00:00
assert (
result
== " postgresql_replica_0 \n postgresql_replica_1 \n postgresql_replica_2 \n postgresql_replica_3 \n postgresql_replica_4 \n "
)
result = instance . query ( " SHOW CREATE DATABASE test_database " )
assert (
result [ : 63 ]
== " CREATE DATABASE test_database \\ nENGINE = MaterializedPostgreSQL( "
)
2023-02-16 16:22:29 +00:00
assert result [ - 51 : ] == " \\ ' postgres_database \\ ' , \\ ' postgres \\ ' , \\ ' [HIDDEN] \\ ' ) \n "
2022-03-22 16:39:58 +00:00
table_name = " postgresql_replica_4 "
instance . query ( f " DETACH TABLE test_database. { table_name } PERMANENTLY " )
result = instance . query_and_get_error ( f " SELECT * FROM test_database. { table_name } " )
2023-03-13 13:59:00 +00:00
assert " UNKNOWN_TABLE " in result
2021-11-30 12:26:57 +00:00
result = instance . query ( " SHOW TABLES FROM test_database " )
2022-03-22 16:39:58 +00:00
assert (
result
== " postgresql_replica_0 \n postgresql_replica_1 \n postgresql_replica_2 \n postgresql_replica_3 \n "
)
result = instance . query ( " SHOW CREATE DATABASE test_database " )
assert (
result [ : 63 ]
== " CREATE DATABASE test_database \\ nENGINE = MaterializedPostgreSQL( "
)
assert (
result [ - 138 : ]
== " ) \\ nSETTINGS materialized_postgresql_tables_list = \\ ' postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3 \\ ' \n "
)
instance . query ( f " ATTACH TABLE test_database. { table_name } " )
check_tables_are_synchronized ( instance , table_name )
2022-01-08 12:26:29 +00:00
check_several_tables_are_synchronized ( instance , NUM_TABLES )
2022-03-22 16:39:58 +00:00
instance . query (
f " INSERT INTO postgres_database. { table_name } SELECT number, number from numbers(10000, 10000) "
)
check_tables_are_synchronized ( instance , table_name )
result = instance . query ( " SHOW CREATE DATABASE test_database " )
assert (
result [ : 63 ]
== " CREATE DATABASE test_database \\ nENGINE = MaterializedPostgreSQL( "
)
assert (
result [ - 159 : ]
== " ) \\ nSETTINGS materialized_postgresql_tables_list = \\ ' postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4 \\ ' \n "
)
table_name = " postgresql_replica_1 "
instance . query ( f " DETACH TABLE test_database. { table_name } PERMANENTLY " )
result = instance . query ( " SHOW CREATE DATABASE test_database " )
assert (
result [ : 63 ]
== " CREATE DATABASE test_database \\ nENGINE = MaterializedPostgreSQL( "
)
assert (
result [ - 138 : ]
== " ) \\ nSETTINGS materialized_postgresql_tables_list = \\ ' postgresql_replica_0,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4 \\ ' \n "
)
2021-11-30 12:26:57 +00:00
2023-05-11 16:09:46 +00:00
pg_manager . execute ( f " drop table if exists postgresql_replica_0; " )
2021-11-30 12:26:57 +00:00
2021-12-25 15:35:38 +00:00
# Removing from replication table which does not exist in PostgreSQL must be ok.
2022-03-22 16:39:58 +00:00
instance . query ( " DETACH TABLE test_database.postgresql_replica_0 PERMANENTLY " )
assert instance . contains_in_log (
" from publication, because table does not exist in PostgreSQL "
)
2021-12-25 15:35:38 +00:00
2021-11-30 12:26:57 +00:00
def test_predefined_connection_configuration ( started_cluster ) :
2023-05-11 16:09:46 +00:00
pg_manager . execute ( f " DROP TABLE IF EXISTS test_table " )
pg_manager . execute (
f " CREATE TABLE test_table (key integer PRIMARY KEY, value integer) "
)
pg_manager . execute ( f " INSERT INTO test_table SELECT 1, 2 " )
2022-03-22 16:39:58 +00:00
instance . query (
" CREATE DATABASE test_database ENGINE = MaterializedPostgreSQL(postgres1) SETTINGS materialized_postgresql_tables_list= ' test_table ' "
)
check_tables_are_synchronized ( instance , " test_table " )
2022-01-08 12:26:29 +00:00
pg_manager . drop_materialized_db ( )
2021-11-30 12:26:57 +00:00
insert_counter = 0
2022-03-22 16:39:58 +00:00
2021-11-30 12:26:57 +00:00
def test_database_with_single_non_default_schema ( started_cluster ) :
2022-01-08 12:26:29 +00:00
cursor = pg_manager . get_db_cursor ( )
2022-03-22 16:39:58 +00:00
NUM_TABLES = 5
schema_name = " test_schema "
materialized_db = " test_database "
clickhouse_postgres_db = " postgres_database_with_schema "
2021-11-30 12:26:57 +00:00
global insert_counter
insert_counter = 0
def insert_into_tables ( ) :
global insert_counter
2022-03-22 16:39:58 +00:00
clickhouse_postgres_db = " postgres_database_with_schema "
2021-11-30 12:26:57 +00:00
for i in range ( NUM_TABLES ) :
2022-03-22 16:39:58 +00:00
table_name = f " postgresql_replica_ { i } "
instance . query (
f " INSERT INTO { clickhouse_postgres_db } . { table_name } SELECT number, number from numbers(1000 * { insert_counter } , 1000) "
)
2021-11-30 12:26:57 +00:00
insert_counter + = 1
def assert_show_tables ( expected ) :
2022-03-22 16:39:58 +00:00
result = instance . query ( " SHOW TABLES FROM test_database " )
assert result == expected
print ( " assert show tables Ok " )
2021-11-30 12:26:57 +00:00
def check_all_tables_are_synchronized ( ) :
for i in range ( NUM_TABLES ) :
2022-03-22 16:39:58 +00:00
print ( " checking table " , i )
check_tables_are_synchronized (
instance ,
f " postgresql_replica_ { i } " ,
postgres_database = clickhouse_postgres_db ,
)
print ( " synchronization Ok " )
2021-11-30 12:26:57 +00:00
create_postgres_schema ( cursor , schema_name )
2022-03-22 16:39:58 +00:00
pg_manager . create_clickhouse_postgres_db (
2023-05-11 16:09:46 +00:00
database_name = clickhouse_postgres_db ,
2022-03-22 16:39:58 +00:00
schema_name = schema_name ,
2023-05-11 16:09:46 +00:00
postgres_database = " postgres_database " ,
2022-03-22 16:39:58 +00:00
)
2021-11-30 12:26:57 +00:00
for i in range ( NUM_TABLES ) :
2022-03-22 16:39:58 +00:00
create_postgres_table_with_schema (
cursor , schema_name , f " postgresql_replica_ { i } "
)
2021-11-30 12:26:57 +00:00
insert_into_tables ( )
2022-03-22 16:39:58 +00:00
pg_manager . create_materialized_db (
ip = started_cluster . postgres_ip ,
port = started_cluster . postgres_port ,
settings = [
f " materialized_postgresql_schema = ' { schema_name } ' " ,
] ,
)
2021-11-30 12:26:57 +00:00
insert_into_tables ( )
check_all_tables_are_synchronized ( )
2022-03-22 16:39:58 +00:00
assert_show_tables (
" postgresql_replica_0 \n postgresql_replica_1 \n postgresql_replica_2 \n postgresql_replica_3 \n postgresql_replica_4 \n "
)
2021-11-30 12:26:57 +00:00
instance . restart_clickhouse ( )
check_all_tables_are_synchronized ( )
2022-03-22 16:39:58 +00:00
assert_show_tables (
" postgresql_replica_0 \n postgresql_replica_1 \n postgresql_replica_2 \n postgresql_replica_3 \n postgresql_replica_4 \n "
)
2021-11-30 12:26:57 +00:00
insert_into_tables ( )
check_all_tables_are_synchronized ( )
2022-03-22 16:39:58 +00:00
altered_table = random . randint ( 0 , NUM_TABLES - 1 )
2023-05-11 16:09:46 +00:00
pg_manager . execute (
2022-03-22 16:39:58 +00:00
" ALTER TABLE test_schema.postgresql_replica_ {} ADD COLUMN value2 integer " . format (
altered_table
)
)
instance . query (
f " INSERT INTO { clickhouse_postgres_db } .postgresql_replica_ { altered_table } SELECT number, number, number from numbers(5000, 1000) "
)
2023-02-16 16:22:29 +00:00
assert instance . wait_for_log_line (
f " Table postgresql_replica_ { altered_table } is skipped from replication stream "
)
instance . query (
f " DETACH TABLE test_database.postgresql_replica_ { altered_table } PERMANENTLY "
)
2022-03-22 16:39:58 +00:00
assert not instance . contains_in_log (
" from publication, because table does not exist in PostgreSQL "
)
2023-02-16 15:41:31 +00:00
instance . query ( f " ATTACH TABLE test_database.postgresql_replica_ { altered_table } " )
2022-03-22 16:39:58 +00:00
check_tables_are_synchronized (
2023-02-16 15:41:31 +00:00
instance ,
f " postgresql_replica_ { altered_table } " ,
postgres_database = clickhouse_postgres_db ,
2022-03-22 16:39:58 +00:00
)
2021-11-30 12:26:57 +00:00
def test_database_with_multiple_non_default_schemas_1 ( started_cluster ) :
2022-01-08 12:26:29 +00:00
cursor = pg_manager . get_db_cursor ( )
2021-11-30 12:26:57 +00:00
NUM_TABLES = 5
2022-03-22 16:39:58 +00:00
schema_name = " test_schema "
clickhouse_postgres_db = " postgres_database_with_schema "
materialized_db = " test_database "
publication_tables = " "
2021-11-30 12:26:57 +00:00
global insert_counter
insert_counter = 0
def insert_into_tables ( ) :
global insert_counter
2022-03-22 16:39:58 +00:00
clickhouse_postgres_db = " postgres_database_with_schema "
2021-11-30 12:26:57 +00:00
for i in range ( NUM_TABLES ) :
2022-03-22 16:39:58 +00:00
table_name = f " postgresql_replica_ { i } "
instance . query (
f " INSERT INTO { clickhouse_postgres_db } . { table_name } SELECT number, number from numbers(1000 * { insert_counter } , 1000) "
)
2021-11-30 12:26:57 +00:00
insert_counter + = 1
def assert_show_tables ( expected ) :
2022-03-22 16:39:58 +00:00
result = instance . query ( " SHOW TABLES FROM test_database " )
assert result == expected
print ( " assert show tables Ok " )
2021-11-30 12:26:57 +00:00
def check_all_tables_are_synchronized ( ) :
for i in range ( NUM_TABLES ) :
2022-03-22 16:39:58 +00:00
print ( " checking table " , i )
check_tables_are_synchronized (
instance ,
" postgresql_replica_ {} " . format ( i ) ,
schema_name = schema_name ,
postgres_database = clickhouse_postgres_db ,
)
print ( " synchronization Ok " )
2021-11-30 12:26:57 +00:00
create_postgres_schema ( cursor , schema_name )
2022-03-22 16:39:58 +00:00
pg_manager . create_clickhouse_postgres_db (
2023-05-11 16:09:46 +00:00
database_name = clickhouse_postgres_db ,
2022-03-22 16:39:58 +00:00
schema_name = schema_name ,
2023-05-11 16:09:46 +00:00
postgres_database = " postgres_database " ,
2022-03-22 16:39:58 +00:00
)
2021-11-30 12:26:57 +00:00
for i in range ( NUM_TABLES ) :
2022-03-22 16:39:58 +00:00
table_name = " postgresql_replica_ {} " . format ( i )
create_postgres_table_with_schema ( cursor , schema_name , table_name )
if publication_tables != " " :
publication_tables + = " , "
publication_tables + = schema_name + " . " + table_name
2021-11-30 12:26:57 +00:00
insert_into_tables ( )
2022-03-22 16:39:58 +00:00
pg_manager . create_materialized_db (
ip = started_cluster . postgres_ip ,
port = started_cluster . postgres_port ,
settings = [
f " materialized_postgresql_tables_list = ' { publication_tables } ' " ,
" materialized_postgresql_tables_list_with_schema=1 " ,
] ,
)
2021-11-30 12:26:57 +00:00
check_all_tables_are_synchronized ( )
2022-03-22 16:39:58 +00:00
assert_show_tables (
" test_schema.postgresql_replica_0 \n test_schema.postgresql_replica_1 \n test_schema.postgresql_replica_2 \n test_schema.postgresql_replica_3 \n test_schema.postgresql_replica_4 \n "
)
2021-11-30 12:26:57 +00:00
instance . restart_clickhouse ( )
check_all_tables_are_synchronized ( )
2022-03-22 16:39:58 +00:00
assert_show_tables (
" test_schema.postgresql_replica_0 \n test_schema.postgresql_replica_1 \n test_schema.postgresql_replica_2 \n test_schema.postgresql_replica_3 \n test_schema.postgresql_replica_4 \n "
)
2021-11-30 12:26:57 +00:00
insert_into_tables ( )
check_all_tables_are_synchronized ( )
2022-03-22 16:39:58 +00:00
altered_table = random . randint ( 0 , NUM_TABLES - 1 )
2023-05-11 16:09:46 +00:00
pg_manager . execute (
2022-03-22 16:39:58 +00:00
" ALTER TABLE test_schema.postgresql_replica_ {} ADD COLUMN value2 integer " . format (
altered_table
)
)
instance . query (
f " INSERT INTO { clickhouse_postgres_db } .postgresql_replica_ { altered_table } SELECT number, number, number from numbers(5000, 1000) "
)
2023-02-16 16:22:29 +00:00
assert instance . wait_for_log_line (
f " Table test_schema.postgresql_replica_ { altered_table } is skipped from replication stream "
)
altered_materialized_table = (
f " { materialized_db } .`test_schema.postgresql_replica_ { altered_table } ` "
)
2023-02-16 15:41:31 +00:00
instance . query ( f " DETACH TABLE { altered_materialized_table } PERMANENTLY " )
2022-03-22 16:39:58 +00:00
assert not instance . contains_in_log (
" from publication, because table does not exist in PostgreSQL "
)
2023-02-16 15:41:31 +00:00
instance . query ( f " ATTACH TABLE { altered_materialized_table } " )
2022-03-22 16:39:58 +00:00
assert_show_tables (
" test_schema.postgresql_replica_0 \n test_schema.postgresql_replica_1 \n test_schema.postgresql_replica_2 \n test_schema.postgresql_replica_3 \n test_schema.postgresql_replica_4 \n "
)
check_tables_are_synchronized (
instance ,
2023-02-16 15:41:31 +00:00
f " postgresql_replica_ { altered_table } " ,
2022-03-22 16:39:58 +00:00
schema_name = schema_name ,
postgres_database = clickhouse_postgres_db ,
)
2021-11-30 12:26:57 +00:00
def test_database_with_multiple_non_default_schemas_2 ( started_cluster ) :
2022-01-08 12:26:29 +00:00
cursor = pg_manager . get_db_cursor ( )
2021-11-30 12:26:57 +00:00
NUM_TABLES = 2
schemas_num = 2
2022-03-22 16:39:58 +00:00
schema_list = " schema0, schema1 "
materialized_db = " test_database "
2021-11-30 12:26:57 +00:00
global insert_counter
insert_counter = 0
def check_all_tables_are_synchronized ( ) :
for i in range ( schemas_num ) :
2022-03-22 16:39:58 +00:00
schema_name = f " schema { i } "
clickhouse_postgres_db = f " clickhouse_postgres_db { i } "
2021-11-30 12:26:57 +00:00
for ti in range ( NUM_TABLES ) :
2022-03-22 16:39:58 +00:00
table_name = f " postgresql_replica_ { ti } "
print ( f " checking table { schema_name } . { table_name } " )
check_tables_are_synchronized (
instance ,
f " { table_name } " ,
schema_name = schema_name ,
postgres_database = clickhouse_postgres_db ,
)
print ( " synchronized Ok " )
2021-11-30 12:26:57 +00:00
def insert_into_tables ( ) :
global insert_counter
for i in range ( schemas_num ) :
2022-03-22 16:39:58 +00:00
clickhouse_postgres_db = f " clickhouse_postgres_db { i } "
2021-11-30 12:26:57 +00:00
for ti in range ( NUM_TABLES ) :
2022-03-22 16:39:58 +00:00
table_name = f " postgresql_replica_ { ti } "
instance . query (
f " INSERT INTO { clickhouse_postgres_db } . { table_name } SELECT number, number from numbers(1000 * { insert_counter } , 1000) "
)
2021-11-30 12:26:57 +00:00
insert_counter + = 1
def assert_show_tables ( expected ) :
2022-03-22 16:39:58 +00:00
result = instance . query ( " SHOW TABLES FROM test_database " )
assert result == expected
print ( " assert show tables Ok " )
2021-11-30 12:26:57 +00:00
for i in range ( schemas_num ) :
2022-03-22 16:39:58 +00:00
schema_name = f " schema { i } "
clickhouse_postgres_db = f " clickhouse_postgres_db { i } "
2021-11-30 12:26:57 +00:00
create_postgres_schema ( cursor , schema_name )
2022-03-22 16:39:58 +00:00
pg_manager . create_clickhouse_postgres_db (
2023-07-18 10:20:56 +00:00
database_name = clickhouse_postgres_db ,
schema_name = schema_name ,
postgres_database = " postgres_database " ,
2022-03-22 16:39:58 +00:00
)
2021-11-30 12:26:57 +00:00
for ti in range ( NUM_TABLES ) :
2022-03-22 16:39:58 +00:00
table_name = f " postgresql_replica_ { ti } "
create_postgres_table_with_schema ( cursor , schema_name , table_name )
2021-11-30 12:26:57 +00:00
insert_into_tables ( )
2022-01-08 12:26:29 +00:00
pg_manager . create_materialized_db (
2022-03-22 16:39:58 +00:00
ip = started_cluster . postgres_ip ,
port = started_cluster . postgres_port ,
settings = [
f " materialized_postgresql_schema_list = ' { schema_list } ' " ,
] ,
)
2021-11-30 12:26:57 +00:00
check_all_tables_are_synchronized ( )
insert_into_tables ( )
2022-03-22 16:39:58 +00:00
assert_show_tables (
" schema0.postgresql_replica_0 \n schema0.postgresql_replica_1 \n schema1.postgresql_replica_0 \n schema1.postgresql_replica_1 \n "
)
2021-11-30 12:26:57 +00:00
instance . restart_clickhouse ( )
2022-03-22 16:39:58 +00:00
assert_show_tables (
" schema0.postgresql_replica_0 \n schema0.postgresql_replica_1 \n schema1.postgresql_replica_0 \n schema1.postgresql_replica_1 \n "
)
2021-11-30 12:26:57 +00:00
check_all_tables_are_synchronized ( )
insert_into_tables ( )
check_all_tables_are_synchronized ( )
2022-03-22 16:39:58 +00:00
print ( " ALTER " )
altered_schema = random . randint ( 0 , schemas_num - 1 )
altered_table = random . randint ( 0 , NUM_TABLES - 1 )
clickhouse_postgres_db = f " clickhouse_postgres_db { altered_schema } "
2023-05-11 16:09:46 +00:00
pg_manager . execute (
2022-03-22 16:39:58 +00:00
f " ALTER TABLE schema { altered_schema } .postgresql_replica_ { altered_table } ADD COLUMN value2 integer "
)
instance . query (
f " INSERT INTO clickhouse_postgres_db { altered_schema } .postgresql_replica_ { altered_table } SELECT number, number, number from numbers(1000 * { insert_counter } , 1000) "
)
2023-02-16 16:22:29 +00:00
assert instance . wait_for_log_line (
f " Table schema { altered_schema } .postgresql_replica_ { altered_table } is skipped from replication stream "
)
2023-02-16 15:41:31 +00:00
2023-02-16 16:22:29 +00:00
altered_materialized_table = (
f " { materialized_db } .`schema { altered_schema } .postgresql_replica_ { altered_table } ` "
)
2023-02-16 15:41:31 +00:00
instance . query ( f " DETACH TABLE { altered_materialized_table } PERMANENTLY " )
2022-03-22 16:39:58 +00:00
assert not instance . contains_in_log (
" from publication, because table does not exist in PostgreSQL "
)
2023-02-16 15:41:31 +00:00
instance . query ( f " ATTACH TABLE { altered_materialized_table } " )
2022-03-22 16:39:58 +00:00
assert_show_tables (
" schema0.postgresql_replica_0 \n schema0.postgresql_replica_1 \n schema1.postgresql_replica_0 \n schema1.postgresql_replica_1 \n "
)
check_tables_are_synchronized (
instance ,
f " postgresql_replica_ { altered_table } " ,
2023-02-16 15:41:31 +00:00
schema_name = f " schema { altered_schema } " ,
2022-03-22 16:39:58 +00:00
postgres_database = clickhouse_postgres_db ,
)
2021-11-30 12:26:57 +00:00
2021-12-14 13:53:47 +00:00
def test_table_override ( started_cluster ) :
2022-03-22 16:39:58 +00:00
table_name = " table_override "
materialized_database = " test_database "
2023-05-11 16:09:46 +00:00
pg_manager . create_postgres_table ( table_name , template = postgres_table_template_5 )
2022-03-22 16:39:58 +00:00
instance . query (
f " create table { table_name } (key Int32, value UUID) engine = PostgreSQL (postgres1, table= { table_name } ) "
)
instance . query (
f " insert into { table_name } select number, generateUUIDv4() from numbers(10) "
)
2022-03-09 18:25:43 +00:00
table_overrides = f " TABLE OVERRIDE { table_name } (COLUMNS (key Int32, value UUID) PARTITION BY key) "
2022-01-08 12:26:29 +00:00
pg_manager . create_materialized_db (
2022-03-22 16:39:58 +00:00
ip = started_cluster . postgres_ip ,
port = started_cluster . postgres_port ,
2022-01-08 12:26:29 +00:00
settings = [ f " materialized_postgresql_tables_list = ' { table_name } ' " ] ,
2022-03-22 16:39:58 +00:00
table_overrides = table_overrides ,
)
2022-01-08 12:26:29 +00:00
assert_nested_table_is_created ( instance , table_name , materialized_database )
2021-12-14 13:53:47 +00:00
result = instance . query ( f " show create table { materialized_database } . { table_name } " )
print ( result )
2022-03-09 18:25:43 +00:00
expected = " CREATE TABLE test_database.table_override \\ n( \\ n `key` Int32, \\ n `value` UUID, \\ n `_sign` Int8() MATERIALIZED 1, \\ n `_version` UInt64() MATERIALIZED 1 \\ n) \\ nENGINE = ReplacingMergeTree(_version) \\ nPARTITION BY key \\ nORDER BY tuple(key) "
2022-03-22 16:39:58 +00:00
assert result . strip ( ) == expected
2021-12-14 13:53:47 +00:00
time . sleep ( 5 )
2021-12-15 21:38:46 +00:00
query = f " select * from { materialized_database } . { table_name } order by key "
2021-12-14 13:53:47 +00:00
expected = instance . query ( f " select * from { table_name } order by key " )
2023-05-03 18:06:46 +00:00
instance . query ( f " drop table { table_name } sync " )
2021-12-15 21:38:46 +00:00
assert_eq_with_retry ( instance , query , expected )
2021-12-14 13:53:47 +00:00
2023-02-27 21:29:16 +00:00
def test_materialized_view ( started_cluster ) :
2023-05-11 16:09:46 +00:00
pg_manager . execute ( f " DROP TABLE IF EXISTS test_table " )
pg_manager . execute (
f " CREATE TABLE test_table (key integer PRIMARY KEY, value integer) "
)
pg_manager . execute ( f " INSERT INTO test_table SELECT 1, 2 " )
2023-02-27 21:29:16 +00:00
instance . query ( " DROP DATABASE IF EXISTS test_database " )
instance . query (
" CREATE DATABASE test_database ENGINE = MaterializedPostgreSQL(postgres1) SETTINGS materialized_postgresql_tables_list= ' test_table ' "
)
check_tables_are_synchronized ( instance , " test_table " )
instance . query ( " DROP TABLE IF EXISTS mv " )
instance . query (
" CREATE MATERIALIZED VIEW mv ENGINE=MergeTree ORDER BY tuple() POPULATE AS SELECT * FROM test_database.test_table "
)
assert " 1 \t 2 " == instance . query ( " SELECT * FROM mv " ) . strip ( )
2023-05-11 16:09:46 +00:00
pg_manager . execute ( f " INSERT INTO test_table SELECT 3, 4 " )
2023-02-27 21:29:16 +00:00
check_tables_are_synchronized ( instance , " test_table " )
2023-02-28 10:32:33 +00:00
assert " 1 \t 2 \n 3 \t 4 " == instance . query ( " SELECT * FROM mv ORDER BY 1, 2 " ) . strip ( )
2023-02-27 21:29:16 +00:00
pg_manager . drop_materialized_db ( )
2023-05-09 14:10:53 +00:00
def test_too_many_parts ( started_cluster ) :
table = " test_table "
pg_manager2 . create_and_fill_postgres_table ( table )
pg_manager2 . create_materialized_db (
ip = started_cluster . postgres_ip ,
port = started_cluster . postgres_port ,
settings = [
f " materialized_postgresql_tables_list = ' test_table ' , materialized_postgresql_backoff_min_ms = 100, materialized_postgresql_backoff_max_ms = 100 "
] ,
)
check_tables_are_synchronized (
instance2 , " test_table " , postgres_database = pg_manager2 . get_default_database ( )
)
assert (
" 50 " == instance2 . query ( " SELECT count() FROM test_database.test_table " ) . strip ( )
)
instance2 . query ( " SYSTEM STOP MERGES " )
num = 50
for i in range ( 10 ) :
instance2 . query (
f """
INSERT INTO { pg_manager2 . get_default_database ( ) } . test_table SELECT { num } , { num } ;
"""
)
num = num + 1
for i in range ( 30 ) :
if num == int (
instance2 . query ( " SELECT count() FROM test_database.test_table " )
) or instance2 . contains_in_log ( " DB::Exception: Too many parts " ) :
break
time . sleep ( 1 )
print ( f " wait sync try { i } " )
2023-07-17 13:04:39 +00:00
instance2 . query ( " SYSTEM FLUSH LOGS " )
2023-05-09 14:10:53 +00:00
if instance2 . contains_in_log ( " DB::Exception: Too many parts " ) :
break
assert num == int (
instance2 . query ( " SELECT count() FROM test_database.test_table " )
2023-07-18 14:54:33 +00:00
) or num - 1 == int (
instance2 . query ( " SELECT count() FROM test_database.test_table " )
)
2023-05-09 14:10:53 +00:00
assert instance2 . contains_in_log ( " DB::Exception: Too many parts " )
print ( num )
2023-07-18 10:20:56 +00:00
assert num == int (
instance2 . query ( " SELECT count() FROM test_database.test_table " )
) or num - 1 == int ( instance2 . query ( " SELECT count() FROM test_database.test_table " ) )
2023-05-09 14:10:53 +00:00
instance2 . query ( " SYSTEM START MERGES " )
check_tables_are_synchronized (
instance2 , " test_table " , postgres_database = pg_manager2 . get_default_database ( )
)
# assert "200" == instance.query("SELECT count FROM test_database.test_table").strip()
pg_manager2 . drop_materialized_db ( )
2023-10-15 12:19:44 +00:00
def test_toast ( started_cluster ) :
table = " test_toast "
2023-10-16 11:01:36 +00:00
pg_manager . create_postgres_table (
table ,
" " ,
""" CREATE TABLE " {} " (id integer PRIMARY KEY, txt text, other text) """ ,
2023-10-15 12:19:44 +00:00
)
pg_manager . create_materialized_db (
ip = started_cluster . postgres_ip ,
port = started_cluster . postgres_port ,
settings = [
f " materialized_postgresql_tables_list = ' { table } ' " ,
" materialized_postgresql_backoff_min_ms = 100 " ,
" materialized_postgresql_backoff_max_ms = 100 " ,
] ,
)
pg_manager . execute (
f """ \
INSERT INTO { table } ( id , txt ) \
VALUES ( 1 , ( SELECT array_to_string ( ARRAY ( SELECT chr ( ( 100 + round ( random ( ) * 25 ) ) : : integer ) FROM generate_series ( 1 , 30000 ) as t ( i ) ) , ' ' ) ) )
"""
)
check_tables_are_synchronized (
instance ,
table ,
postgres_database = pg_manager . get_default_database ( ) ,
order_by = " id " ,
)
2023-10-12 14:32:56 +00:00
def test_replica_consumer ( started_cluster ) :
table = " test_replica_consumer "
2023-10-17 09:35:15 +00:00
pg_manager_instance2 . restart ( )
2023-10-12 14:32:56 +00:00
2023-10-18 10:08:47 +00:00
pg_manager . create_postgres_table ( table )
instance . query (
f " INSERT INTO postgres_database. { table } SELECT number, number from numbers(0, 50) "
)
2023-10-16 11:01:36 +00:00
for pm in [ pg_manager , pg_manager_instance2 ] :
2023-10-12 14:32:56 +00:00
pm . create_materialized_db (
ip = started_cluster . postgres_ip ,
port = started_cluster . postgres_port ,
settings = [
f " materialized_postgresql_tables_list = ' { table } ' " ,
" materialized_postgresql_backoff_min_ms = 100 " ,
" materialized_postgresql_backoff_max_ms = 100 " ,
2023-10-12 17:47:39 +00:00
" materialized_postgresql_use_unique_replication_consumer_identifier = 1 " ,
2023-10-12 14:32:56 +00:00
] ,
)
2023-10-15 12:19:44 +00:00
check_tables_are_synchronized (
instance , table , postgres_database = pg_manager . get_default_database ( )
)
check_tables_are_synchronized (
2023-10-16 11:01:36 +00:00
instance2 , table , postgres_database = pg_manager_instance2 . get_default_database ( )
2023-10-15 12:19:44 +00:00
)
2023-10-12 14:32:56 +00:00
assert 50 == int ( instance . query ( f " SELECT count() FROM test_database. { table } " ) )
assert 50 == int ( instance2 . query ( f " SELECT count() FROM test_database. { table } " ) )
2023-10-12 17:47:39 +00:00
instance . query (
f " INSERT INTO postgres_database. { table } SELECT number, number from numbers(1000, 1000) "
)
2023-10-12 14:32:56 +00:00
check_tables_are_synchronized (
instance , table , postgres_database = pg_manager . get_default_database ( )
)
check_tables_are_synchronized (
2023-10-16 11:01:36 +00:00
instance2 , table , postgres_database = pg_manager_instance2 . get_default_database ( )
2023-10-12 14:32:56 +00:00
)
assert 1050 == int ( instance . query ( f " SELECT count() FROM test_database. { table } " ) )
assert 1050 == int ( instance2 . query ( f " SELECT count() FROM test_database. { table } " ) )
2023-10-16 11:01:36 +00:00
for pm in [ pg_manager , pg_manager_instance2 ] :
2023-10-15 16:12:01 +00:00
pm . drop_materialized_db ( )
2023-10-16 11:01:36 +00:00
pg_manager_instance2 . clear ( )
2023-10-12 14:32:56 +00:00
2022-03-22 16:39:58 +00:00
if __name__ == " __main__ " :
2021-11-30 12:26:57 +00:00
cluster . start ( )
input ( " Cluster created, press any key to destroy... " )
cluster . shutdown ( )