Merge pull request #11002 from ClickHouse/zookeeper_in_tmpfs_intergration_tests

Add ability to run zookeeper in integration tests with tmpfs
This commit is contained in:
alesapin 2020-05-20 11:30:13 +03:00 committed by GitHub
commit 2673c985bd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 78 additions and 36 deletions

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
hdfs1:
image: sequenceiq/hadoop-docker:2.7.0

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
kafka_zookeeper:

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
minio1:

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
mongo1:
image: mongo:3.6

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
mysql1:
image: mysql:5.7

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
networks:
default:
driver: bridge

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
postgres1:
image: postgres

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
redis1:
image: redis

View File

@ -1,25 +1,47 @@
version: '2.2'
version: '2.3'
services:
zoo1:
image: zookeeper:3.4.12
restart: always
environment:
ZOO_TICK_TIME: 500
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
ZOO_MY_ID: 1
JVMFLAGS: -Dzookeeper.forceSync=no
volumes:
- type: ${ZK_FS:-tmpfs}
source: ${ZK_DATA1:-}
target: /data
- type: ${ZK_FS:-tmpfs}
source: ${ZK_DATA_LOG1:-}
target: /datalog
zoo2:
image: zookeeper:3.4.12
restart: always
environment:
ZOO_TICK_TIME: 500
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
ZOO_MY_ID: 2
JVMFLAGS: -Dzookeeper.forceSync=no
volumes:
- type: ${ZK_FS:-tmpfs}
source: ${ZK_DATA2:-}
target: /data
- type: ${ZK_FS:-tmpfs}
source: ${ZK_DATA_LOG2:-}
target: /datalog
zoo3:
image: zookeeper:3.4.12
restart: always
environment:
ZOO_TICK_TIME: 500
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
ZOO_MY_ID: 3
JVMFLAGS: -Dzookeeper.forceSync=no
volumes:
- type: ${ZK_FS:-tmpfs}
source: ${ZK_DATA3:-}
target: /data
- type: ${ZK_FS:-tmpfs}
source: ${ZK_DATA_LOG3:-}
target: /datalog

View File

@ -133,6 +133,8 @@ class ClickHouseCluster:
self.schema_registry_host = "schema-registry"
self.schema_registry_port = 8081
self.zookeeper_use_tmpfs = True
self.docker_client = None
self.is_up = False
@ -148,7 +150,7 @@ class ClickHouseCluster:
with_redis=False, with_minio=False,
hostname=None, env_variables=None, image="yandex/clickhouse-integration-test",
stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=None,
zookeeper_docker_compose_path=None):
zookeeper_docker_compose_path=None, zookeeper_use_tmpfs=True):
"""Add an instance to the cluster.
name - the name of the instance directory and the value of the 'instance' macro in ClickHouse.
@ -187,6 +189,7 @@ class ClickHouseCluster:
zookeeper_docker_compose_path = p.join(DOCKER_COMPOSE_DIR, 'docker_compose_zookeeper.yml')
self.with_zookeeper = True
self.zookeeper_use_tmpfs = zookeeper_use_tmpfs
self.base_cmd.extend(['--file', zookeeper_docker_compose_path])
self.base_zookeeper_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name',
self.project_name, '--file', zookeeper_docker_compose_path]
@ -464,7 +467,19 @@ class ClickHouseCluster:
common_opts = ['up', '-d', '--force-recreate']
if self.with_zookeeper and self.base_zookeeper_cmd:
subprocess_check_call(self.base_zookeeper_cmd + common_opts)
env = os.environ.copy()
if not self.zookeeper_use_tmpfs:
env['ZK_FS'] = 'bind'
for i in range(1, 4):
zk_data_path = self.instances_dir + '/zkdata' + str(i)
zk_log_data_path = self.instances_dir + '/zklog' + str(i)
if not os.path.exists(zk_data_path):
os.mkdir(zk_data_path)
if not os.path.exists(zk_log_data_path):
os.mkdir(zk_log_data_path)
env['ZK_DATA' + str(i)] = zk_data_path
env['ZK_DATA_LOG' + str(i)] = zk_log_data_path
subprocess.check_call(self.base_zookeeper_cmd + common_opts, env=env)
for command in self.pre_zookeeper_commands:
self.run_kazoo_commands_with_retries(command, repeats=5)
self.wait_zookeeper_to_start(120)
@ -547,6 +562,15 @@ class ClickHouseCluster:
instance.ip_address = None
instance.client = None
if not self.zookeeper_use_tmpfs:
for i in range(1, 4):
zk_data_path = self.instances_dir + '/zkdata' + str(i)
zk_log_data_path = self.instances_dir + '/zklog' + str(i)
if os.path.exists(zk_data_path):
shutil.rmtree(zk_data_path)
if os.path.exists(zk_log_data_path):
shutil.rmtree(zk_log_data_path)
if sanitizer_assert_instance is not None:
raise Exception("Sanitizer assert found in {} for instance {}".format(self.docker_logs_path, sanitizer_assert_instance))
@ -586,7 +610,7 @@ CLICKHOUSE_START_COMMAND = "clickhouse server --config-file=/etc/clickhouse-serv
CLICKHOUSE_STAY_ALIVE_COMMAND = 'bash -c "{} --daemon; tail -f /dev/null"'.format(CLICKHOUSE_START_COMMAND)
DOCKER_COMPOSE_TEMPLATE = '''
version: '2.2'
version: '2.3'
services:
{name}:
image: {image}

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
golang1:
build:

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
java1:
build:

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
mysql1:
image: mysql:5.7

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
mysqljs1:
build:

View File

@ -1,4 +1,4 @@
version: '2.2'
version: '2.3'
services:
php1:
build:

View File

@ -299,7 +299,6 @@ def test_rename_with_parallel_insert(started_cluster):
drop_table(nodes, table_name)
@pytest.mark.skip(reason="For unknown reason one of these tests kill Zookeeper")
def test_rename_with_parallel_merges(started_cluster):
table_name = "test_rename_with_parallel_merges"
drop_table(nodes, table_name)
@ -337,7 +336,6 @@ def test_rename_with_parallel_merges(started_cluster):
drop_table(nodes, table_name)
@pytest.mark.skip(reason="For unknown reason one of these tests kill Zookeeper")
def test_rename_with_parallel_slow_insert(started_cluster):
table_name = "test_rename_with_parallel_slow_insert"
drop_table(nodes, table_name)
@ -499,7 +497,6 @@ def test_rename_with_parallel_ttl_delete(started_cluster):
drop_table(nodes, table_name)
@pytest.mark.skip(reason="For unknown reason one of these tests kill Zookeeper")
def test_rename_distributed(started_cluster):
table_name = 'test_rename_distributed'
try:
@ -516,7 +513,6 @@ def test_rename_distributed(started_cluster):
drop_distributed_table(node1, table_name)
@pytest.mark.skip(reason="For unknown reason one of these tests kill Zookeeper")
def test_rename_distributed_parallel_insert_and_select(started_cluster):
table_name = 'test_rename_distributed_parallel_insert_and_select'
try:

View File

@ -12,8 +12,8 @@ def test_chroot_with_same_root():
cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)
node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True, zookeeper_use_tmpfs=False)
node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True, zookeeper_use_tmpfs=False)
nodes = [node1, node2]
def create_zk_root(zk):
@ -51,8 +51,8 @@ def test_chroot_with_different_root():
cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_b.xml')
node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)
node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True, zookeeper_use_tmpfs=False)
node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True, zookeeper_use_tmpfs=False)
nodes = [node1, node2]
def create_zk_roots(zk):
@ -69,7 +69,7 @@ def test_chroot_with_different_root():
for i, node in enumerate(nodes):
node.query('''
CREATE TABLE simple (date Date, id UInt32)
CREATE TABLE simple (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
'''.format(replica=node.name))
for j in range(2): # Second insert to test deduplication
@ -90,8 +90,8 @@ def test_identity():
cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_with_password.xml')
cluster_2 = ClickHouseCluster(__file__)
node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)
node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True, zookeeper_use_tmpfs=False)
node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True, zookeeper_use_tmpfs=False)
try:
cluster_1.start()
@ -138,7 +138,7 @@ def test_secure_connection():
docker_compose = NamedTemporaryFile(delete=False)
docker_compose.write(
"version: '2.2'\nservices:\n" +
"version: '2.3'\nservices:\n" +
TEMPLATE.format(zoo_id=1, configs_dir=configs_dir, helpers_dir=helpers_dir) +
TEMPLATE.format(zoo_id=2, configs_dir=configs_dir, helpers_dir=helpers_dir) +
TEMPLATE.format(zoo_id=3, configs_dir=configs_dir, helpers_dir=helpers_dir)
@ -146,9 +146,9 @@ def test_secure_connection():
docker_compose.close()
node1 = cluster.add_instance('node1', config_dir='configs_secure', with_zookeeper=True,
zookeeper_docker_compose_path=docker_compose.name)
zookeeper_docker_compose_path=docker_compose.name, zookeeper_use_tmpfs=False)
node2 = cluster.add_instance('node2', config_dir='configs_secure', with_zookeeper=True,
zookeeper_docker_compose_path=docker_compose.name)
zookeeper_docker_compose_path=docker_compose.name, zookeeper_use_tmpfs=False)
try:
cluster.start()