Merge branch 'feature_multiple_disks' of https://github.com/filimonov/ClickHouse into filimonov-feature_multiple_disks

This commit is contained in:
Igr Mineev 2019-07-16 14:23:33 +03:00
commit d6b1035301
14 changed files with 281 additions and 411 deletions

View File

@ -114,7 +114,7 @@ class ClickHouseCluster:
cmd += " client"
return cmd
def add_instance(self, name, config_dir=None, main_configs=[], user_configs=[], macros={}, with_zookeeper=False, with_mysql=False, with_kafka=False, clickhouse_path_dir=None, with_odbc_drivers=False, with_postgres=False, with_hdfs=False, with_mongo=False, hostname=None, env_variables={}, image="yandex/clickhouse-integration-test", stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False):
def add_instance(self, name, config_dir=None, main_configs=[], user_configs=[], macros={}, with_zookeeper=False, with_mysql=False, with_kafka=False, clickhouse_path_dir=None, with_odbc_drivers=False, with_postgres=False, with_hdfs=False, with_mongo=False, hostname=None, env_variables={}, image="yandex/clickhouse-integration-test", stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=[]):
"""Add an instance to the cluster.
name - the name of the instance directory and the value of the 'instance' macro in ClickHouse.
@ -135,7 +135,7 @@ class ClickHouseCluster:
self.zookeeper_config_path, with_mysql, with_kafka, with_mongo, self.base_configs_dir, self.server_bin_path,
self.odbc_bridge_bin_path, clickhouse_path_dir, with_odbc_drivers, hostname=hostname,
env_variables=env_variables, image=image, stay_alive=stay_alive, ipv4_address=ipv4_address, ipv6_address=ipv6_address,
with_installed_binary=with_installed_binary)
with_installed_binary=with_installed_binary, tmpfs=tmpfs)
self.instances[name] = instance
if ipv4_address is not None or ipv6_address is not None:
@ -395,6 +395,10 @@ class ClickHouseCluster:
instance.client = None
def open_bash_shell(self, instance_name):
os.system(' '.join(self.base_cmd + ['exec', instance_name, '/bin/bash']))
def get_kazoo_client(self, zoo_instance_name):
zk = KazooClient(hosts=self.get_instance_ip(zoo_instance_name))
zk.start()
@ -435,6 +439,7 @@ services:
{odbc_bridge_volume}
{odbc_ini_path}
entrypoint: {entrypoint_cmd}
tmpfs: {tmpfs}
cap_add:
- SYS_PTRACE
depends_on: {depends_on}
@ -456,7 +461,7 @@ class ClickHouseInstance:
self, cluster, base_path, name, custom_config_dir, custom_main_configs, custom_user_configs, macros,
with_zookeeper, zookeeper_config_path, with_mysql, with_kafka, with_mongo, base_configs_dir, server_bin_path, odbc_bridge_bin_path,
clickhouse_path_dir, with_odbc_drivers, hostname=None, env_variables={}, image="yandex/clickhouse-integration-test",
stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False):
stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=[]):
self.name = name
self.base_cmd = cluster.base_cmd[:]
@ -464,6 +469,7 @@ class ClickHouseInstance:
self.cluster = cluster
self.hostname = hostname if hostname is not None else self.name
self.tmpfs = tmpfs[:]
self.custom_config_dir = p.abspath(p.join(base_path, custom_config_dir)) if custom_config_dir else None
self.custom_main_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_main_configs]
self.custom_user_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_user_configs]
@ -806,6 +812,7 @@ class ClickHouseInstance:
configs_dir=configs_dir,
config_d_dir=config_d_dir,
db_dir=db_dir,
tmpfs=str(self.tmpfs),
logs_dir=logs_dir,
depends_on=str(depends_on),
user=os.getuid(),

View File

@ -3,11 +3,11 @@
<test_cluster>
<shard>
<replica>
<host>ch1</host>
<host>node1</host>
<port>9000</port>
</replica>
<replica>
<host>ch2</host>
<host>node2</host>
<port>9000</port>
</replica>
</shard>

View File

@ -2,30 +2,33 @@
<storage_configuration>
<disks>
<default> <!-- path for deafult disk is provided in main config -->
<!--default> <! - - path for default disk is provided in main config - - >
<keep_free_space_bytes>0</keep_free_space_bytes>
</default>
<jbod1>
<path>/jbod1/</path> <!-- trailing slash is mandatory -->
</default-->
<mainstorage>
<path>/mainstorage/</path><!-- trailing slash is mandatory -->
<keep_free_space_bytes>1000000</keep_free_space_bytes>
</mainstorage>
<jbod1>
<path>/jbod1/</path>
<keep_free_space_bytes>10000000</keep_free_space_bytes>
</jbod1>
<jbod2>
<path>/jbod2/</path>
<keep_free_space_bytes>1000000</keep_free_space_bytes>
<keep_free_space_bytes>10000000</keep_free_space_bytes>
</jbod2>
<external>
<path>/external/</path>
<keep_free_space_bytes>1000000</keep_free_space_bytes>
<keep_free_space_bytes>10000000</keep_free_space_bytes>
</external>
</disks>
<schemas>
<!-- default: store on jbod1-->
<policies>
<!-- default: store on mainstorage -->
<default>
<volume>
<disk>jbod1</disk>
<max_data_part_size_bytes>10000000</max_data_part_size_bytes>
<!-- Q: how it will behave if the only disk has max_data_part_size_bytes limitation? -->
<disk>mainstorage</disk>
<max_data_part_size_bytes>20000000</max_data_part_size_bytes>
</volume>
</default>
@ -34,15 +37,15 @@
<volume>
<!-- names for the volumes should be added to allow moving parts between volumed with DDL commands -->
<disk>default</disk>
<max_data_part_size_bytes>10000000</max_data_part_size_bytes>
<max_data_part_size_bytes>2000000</max_data_part_size_bytes>
</volume>
<volume>
<disk>external</disk>
<max_data_part_size_bytes>100000000</max_data_part_size_bytes>
<max_data_part_size_bytes>20000000</max_data_part_size_bytes>
</volume>
</default_disk_with_external>
<!-- store on JBOD by default (round-robin), store bif parts on external -->
<!-- store on JBOD by default (round-robin), store big parts on external -->
<jbod_with_external>
<volume>
<disk>jbod1</disk>
@ -52,11 +55,23 @@
</volume>
<volume>
<disk>external</disk>
<!-- max_data_part_size_bytes>10000000</max_data_part_size_bytes -->
<!--max_data_part_size_bytes>30000000</max_data_part_size_bytes-->
</volume>
</jbod_with_external>
</schemas>
<mainstorage_only>
<volume>
<disk>mainstorage</disk>
<max_data_part_size_bytes>9000000</max_data_part_size_bytes>
</volume>
<volume>
<disk>external</disk>
<max_data_part_size_bytes>20000000</max_data_part_size_bytes>
</volume>
</mainstorage_only>
</policies>
</storage_configuration>

View File

@ -0,0 +1,232 @@
import time
import pytest
import os
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
config_dir='configs',
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 1} )
node2 = cluster.add_instance('node2',
config_dir='configs',
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 2} )
#node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def test_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_run_shell(test_cluster):
test_cluster.open_bash_shell('node1')
#################################
# root@node1:/# clickhouse client -m
# ClickHouse client version 19.8.1.536.
# Connecting to localhost:9000 as user default.
# Connected to ClickHouse server version 19.8.1 revision 54420.
# node1 :) select * from system.disks;
# def test_same_credentials(same_credentials_cluster):
# node1.query("insert into test_table values ('2017-06-16', 111, 0)")
# time.sleep(1)
# assert node1.query("SELECT id FROM test_table order by id") == '111\n'
# assert node2.query("SELECT id FROM test_table order by id") == '111\n'
# node2.query("insert into test_table values ('2017-06-17', 222, 1)")
# time.sleep(1)
# assert node1.query("SELECT id FROM test_table order by id") == '111\n222\n'
# assert node2.query("SELECT id FROM test_table order by id") == '111\n222\n'
# node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml', 'configs/no_credentials.xml'], with_zookeeper=True)
# node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/no_credentials.xml'], with_zookeeper=True)
# @pytest.fixture(scope="module")
# def no_credentials_cluster():
# try:
# cluster.start()
# _fill_nodes([node3, node4], 2)
# yield cluster
# finally:
# cluster.shutdown()
# def test_no_credentials(no_credentials_cluster):
# node3.query("insert into test_table values ('2017-06-18', 111, 0)")
# time.sleep(1)
# assert node3.query("SELECT id FROM test_table order by id") == '111\n'
# assert node4.query("SELECT id FROM test_table order by id") == '111\n'
# node4.query("insert into test_table values ('2017-06-19', 222, 1)")
# time.sleep(1)
# assert node3.query("SELECT id FROM test_table order by id") == '111\n222\n'
# assert node4.query("SELECT id FROM test_table order by id") == '111\n222\n'
# node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], with_zookeeper=True)
# node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml', 'configs/credentials2.xml'], with_zookeeper=True)
# @pytest.fixture(scope="module")
# def different_credentials_cluster():
# try:
# cluster.start()
# _fill_nodes([node5, node6], 3)
# yield cluster
# finally:
# cluster.shutdown()
# def test_different_credentials(different_credentials_cluster):
# node5.query("insert into test_table values ('2017-06-20', 111, 0)")
# time.sleep(1)
# assert node5.query("SELECT id FROM test_table order by id") == '111\n'
# assert node6.query("SELECT id FROM test_table order by id") == ''
# node6.query("insert into test_table values ('2017-06-21', 222, 1)")
# time.sleep(1)
# assert node5.query("SELECT id FROM test_table order by id") == '111\n'
# assert node6.query("SELECT id FROM test_table order by id") == '222\n'
# node7 = cluster.add_instance('node7', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], with_zookeeper=True)
# node8 = cluster.add_instance('node8', main_configs=['configs/remote_servers.xml', 'configs/no_credentials.xml'], with_zookeeper=True)
# @pytest.fixture(scope="module")
# def credentials_and_no_credentials_cluster():
# try:
# cluster.start()
# _fill_nodes([node7, node8], 4)
# yield cluster
# finally:
# cluster.shutdown()
# def test_credentials_and_no_credentials(credentials_and_no_credentials_cluster):
# node7.query("insert into test_table values ('2017-06-21', 111, 0)")
# time.sleep(1)
# assert node7.query("SELECT id FROM test_table order by id") == '111\n'
# assert node8.query("SELECT id FROM test_table order by id") == ''
# node8.query("insert into test_table values ('2017-06-22', 222, 1)")
# time.sleep(1)
# assert node7.query("SELECT id FROM test_table order by id") == '111\n'
# assert node8.query("SELECT id FROM test_table order by id") == '222\n'
'''
## Test stand for multiple disks feature
Currently for namual tests, can be easily scripted to be the part of inntergration tests.
To run you need to have docker & docker-compose.
```
(Check makefile)
make run
make ch1_shell
> clickhouse-client
make logs # Ctrl+C
make cleup
```
### basic
* allows to configure multiple disks & folumes & shemas
* clickhouse check that all disks are write-accessible
* clickhouse can create a table with provided storagepolicy
### one volume-one disk custom storagepolicy
* clickhouse puts data to correct folder when storagepolicy is used
* clickhouse can do merges / detach / attach / freeze on that folder
### one volume-multiple disks storagepolicy (JBOD scenario)
* clickhouse uses round-robin to place new parts
* clickhouse can do merges / detach / attach / freeze on that folder
### two volumes-one disk per volume (fast expensive / slow cheap storage)
* clickhouse uses round-robin to place new parts
* clickhouse can do merges / detach / attach / freeze on that folder
* clickhouse put parts to different volumes depending on part size
### use 'default' storagepolicy for tables created without storagepolicy provided.
# ReplicatedMergeTree
....
For all above:
clickhouse respect free space limitation setting.
ClickHouse writes important disk-related information to logs.
## Queries
```
CREATE TABLE table_with_storage_policy_default (id UInt64) Engine=MergeTree() ORDER BY (id);
select name, data_paths, storage_policy from system.tables where name='table_with_storage_policy_default';
"table_with_storage_policy_default","['/mainstorage/default/table_with_storage_policy_default/']","default"
INSERT INTO table_with_storage_policy_default SELECT rand64() FROM numbers(100);
CREATE TABLE table_with_storage_policy_default_explicit (id UInt64) Engine=MergeTree() ORDER BY (id) SETTINGS storage_table_with_storage_policy_name='default';
CREATE TABLE table_with_storage_policy_default_disk_with_external (id UInt64) Engine=MergeTree() ORDER BY (id) SETTINGS storage_table_with_storage_policy_name='default_disk_with_external';
CREATE TABLE table_with_storage_policy_jbod_with_external (id UInt64) Engine=MergeTree() ORDER BY (id) SETTINGS storage_table_with_storage_policy_name='jbod_with_external';
CREATE TABLE replicated_table_with_storage_policy_default (id UInt64) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}', '{replica}') ORDER BY (id);
CREATE TABLE replicated_table_with_storage_policy_default_explicit (id UInt64) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}', '{replica}') ORDER BY (id) SETTINGS storage_table_with_storage_policy_name='default';
CREATE TABLE replicated_table_with_storage_policy_default_disk_with_external (id UInt64) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}', '{replica}') ORDER BY (id) SETTINGS storage_table_with_storage_policy_name='default_disk_with_external';
CREATE TABLE replicated_table_with_storage_policy_jbod_with_external (id UInt64) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}', '{replica}') ORDER BY (id) SETTINGS storage_table_with_storage_policy_name='jbod_with_external';
```
## Extra acceptance criterias
* hardlinks problems. Thouse stetements should be able to work properly (or give a proper feedback) on multidisk scenarios
* ALTER TABLE ... UPDATE
* ALTER TABLE ... TABLE
* ALTER TABLE ... MODIFY COLUMN ...
* ALTER TABLE ... CLEAR COLUMN
* ALTER TABLE ... REPLACE PARTITION ...
* Maintainance - system tables show proper values:
* system.parts
* system.tables
* system.part_log (target disk?)
* New system table
* system.volumes
* system.disks
* system.storagepolicys
* chown / create needed disk folders in docker
'''

View File

@ -1,42 +0,0 @@
docker_compose_version := $(shell echo $$(uname -s)-$$(uname -m))
run:
docker-compose up -d
install_docker:
@echo ""
@echo "Please remember that you need to call it with sudo!!"
@echo "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
@echo ""
-curl -sSL https://get.docker.com | bash
-usermod -aG docker $${USER}
@echo "$${USER} has been added to docker group. Please relogin"
install_docker_compose:
@echo ""
@echo "Please remember that you need to call it with sudo!!"
@echo "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
@echo ""
-curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(docker_compose_version)" -o /usr/local/bin/docker-compose
-chmod +x /usr/local/bin/docker-compose
ch1_shell:
docker-compose exec ch1 bash
ch2_shell:
docker-compose exec ch2 bash
stop:
docker-compose down
logs:
docker-compose logs -f
cleanup:
docker-compose down --rmi local --remove-orphans -v
cleanup_all:
docker container stop $$(docker container ls -a -q)
docker system prune --volumes
docker image prune --all

View File

@ -1,83 +0,0 @@
## Test stand for multiple disks feature
Currently for namual tests, can be easily scripted to be the part of inntergration tests.
To run you need to have docker & docker-compose.
```
(Check makefile)
make run
make ch1_shell
> clickhouse-client
make logs # Ctrl+C
make cleup
```
### basic
* allows to configure multiple disks & folumes & shemas
* clickhouse check that all disks are write-accessible
* clickhouse can create a table with provided schema
### one volume-one disk custom schema
* clickhouse puts data to correct folder when schema is used
* clickhouse can do merges / detach / attach / freeze on that folder
### one volume-multiple disks schema (JBOD scenario)
* clickhouse uses round-robin to place new parts
* clickhouse can do merges / detach / attach / freeze on that folder
### two volumes-one disk per volume (fast expensive / slow cheap storage)
* clickhouse uses round-robin to place new parts
* clickhouse can do merges / detach / attach / freeze on that folder
* clickhouse put parts to different volumes depending on part size
### use 'default' schema for tables created without schema provided.
# ReplicatedMergeTree
....
For all above:
clickhouse respect free space limitation setting.
ClickHouse writes important disk-related information to logs.
## Queries
```
CREATE TABLE schema_default (id UInt64) Engine=MergeTree() ORDER BY (id);
INSERT INTO schema_default SELECT * FROM numbers(1);
CREATE TABLE schema_default_explicit (id UInt64) Engine=MergeTree() ORDER BY (id) SETTINGS storage_schema_name='default';
CREATE TABLE schema_default_disk_with_external (id UInt64) Engine=MergeTree() ORDER BY (id) SETTINGS storage_schema_name='default_disk_with_external';
CREATE TABLE schema_jbod_with_external (id UInt64) Engine=MergeTree() ORDER BY (id) SETTINGS storage_schema_name='jbod_with_external';
CREATE TABLE replicated_schema_default (id UInt64) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}', '{replica}') ORDER BY (id);
CREATE TABLE replicated_schema_default_explicit (id UInt64) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}', '{replica}') ORDER BY (id) SETTINGS storage_schema_name='default';
CREATE TABLE replicated_schema_default_disk_with_external (id UInt64) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}', '{replica}') ORDER BY (id) SETTINGS storage_schema_name='default_disk_with_external';
CREATE TABLE replicated_schema_jbod_with_external (id UInt64) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}', '{replica}') ORDER BY (id) SETTINGS storage_schema_name='jbod_with_external';
```
## Extra acceptance criterias
* hardlinks problems. Thouse stetements should be able to work properly (or give a proper feedback) on multidisk scenarios
* ALTER TABLE ... UPDATE
* ALTER TABLE ... TABLE
* ALTER TABLE ... MODIFY COLUMN ...
* ALTER TABLE ... CLEAR COLUMN
* ALTER TABLE ... REPLACE PARTITION ...
* Maintainance - system tables show proper values:
* system.parts
* system.tables
* system.part_log (target disk?)
* New system table
* system.volumes
* system.disks
* system.schemas
* chown / create needed disk folders in docker

View File

@ -1,7 +0,0 @@
<yandex>
<macros>
<cluster>test_cluster</cluster>
<shard>0</shard>
<replica>ch1</replica>
</macros>
</yandex>

View File

@ -1,7 +0,0 @@
<yandex>
<macros>
<cluster>test_cluster</cluster>
<shard>0</shard>
<replica>ch2</replica>
</macros>
</yandex>

View File

@ -1,8 +0,0 @@
<yandex>
<zookeeper>
<node>
<host>zookeeper</host>
<port>2181</port>
</node>
</zookeeper>
</yandex>

View File

@ -1,52 +0,0 @@
version: '3'
services:
zookeeper:
image: zookeeper
networks:
- 001_miltiple_disks
ch1:
image: yandex/clickhouse-server
hostname: ch1
depends_on:
- "zookeeper"
networks:
- 001_miltiple_disks
tmpfs:
- /jbod1:size=40M
- /jbod2:size=40M
- /external:size=200M
volumes:
- ./config/conf.d:/etc/clickhouse-server/conf.d
- ./config/ch1.metrika.xml:/etc/metrika.xml
# TODO: hacky, but "cheap" - don't need to rebuild docker image.
# for CI & tests automation should be done another way, not relying on build path
- ../../../../build/dbms/programs/clickhouse:/usr/bin/clickhouse
- ./test_setup:/test_setup
entrypoint: ['/test_setup/entrypoint.sh']
ch2:
image: yandex/clickhouse-server
hostname: ch2
depends_on:
- "zookeeper"
networks:
- 001_miltiple_disks
tmpfs:
- /jbod1:size=40M
- /jbod2:size=40M
- /external:size=200M
volumes:
- ./config/conf.d:/etc/clickhouse-server/conf.d
- ./config/ch2.metrika.xml:/etc/metrika.xml
# TODO: hacky, but "cheap" - don't need to rebuild docker image.
# for CI & tests automation should be done another way, not relying on build path
- ../../../../build/dbms/programs/clickhouse:/usr/bin/clickhouse
- ./test_setup:/test_setup
entrypoint: ['/test_setup/entrypoint.sh']
networks:
001_miltiple_disks:

View File

@ -1,9 +0,0 @@
#!/bin/bash
if [ -f /etc/clickhouse-server/conf.d/container_maintanence_mode.flag ]; then
echo "Starting container in maintanence mode. It will sleep unless you shutdown it"
sleep infinity
else
chown -R clickhouse:clickhouse /jbod1 /jbod2 /external
/test_setup/wait-for-it.sh zookeeper:2181 --timeout=0 --strict -- /entrypoint.sh
fi

View File

@ -1,182 +0,0 @@
#!/usr/bin/env bash
# Use this script to test if a given TCP host/port are available
# The MIT License (MIT)
# Copyright (c) 2016 Giles Hall
# See https://github.com/vishnubob/wait-for-it/blob/master/LICENSE
WAITFORIT_cmdname=${0##*/}
echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi }
usage()
{
cat << USAGE >&2
Usage:
$WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args]
-h HOST | --host=HOST Host or IP under test
-p PORT | --port=PORT TCP port under test
Alternatively, you specify the host and port as host:port
-s | --strict Only execute subcommand if the test succeeds
-q | --quiet Don't output any status messages
-t TIMEOUT | --timeout=TIMEOUT
Timeout in seconds, zero for no timeout
-- COMMAND ARGS Execute command with args after the test finishes
USAGE
exit 1
}
wait_for()
{
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
else
echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout"
fi
WAITFORIT_start_ts=$(date +%s)
while :
do
if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then
nc -z $WAITFORIT_HOST $WAITFORIT_PORT
WAITFORIT_result=$?
else
(echo > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1
WAITFORIT_result=$?
fi
if [[ $WAITFORIT_result -eq 0 ]]; then
WAITFORIT_end_ts=$(date +%s)
echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds"
break
fi
sleep 1
done
return $WAITFORIT_result
}
wait_for_wrapper()
{
# In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692
if [[ $WAITFORIT_QUIET -eq 1 ]]; then
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
else
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
fi
WAITFORIT_PID=$!
trap "kill -INT -$WAITFORIT_PID" INT
wait $WAITFORIT_PID
WAITFORIT_RESULT=$?
if [[ $WAITFORIT_RESULT -ne 0 ]]; then
echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
fi
return $WAITFORIT_RESULT
}
# process arguments
while [[ $# -gt 0 ]]
do
case "$1" in
*:* )
WAITFORIT_hostport=(${1//:/ })
WAITFORIT_HOST=${WAITFORIT_hostport[0]}
WAITFORIT_PORT=${WAITFORIT_hostport[1]}
shift 1
;;
--child)
WAITFORIT_CHILD=1
shift 1
;;
-q | --quiet)
WAITFORIT_QUIET=1
shift 1
;;
-s | --strict)
WAITFORIT_STRICT=1
shift 1
;;
-h)
WAITFORIT_HOST="$2"
if [[ $WAITFORIT_HOST == "" ]]; then break; fi
shift 2
;;
--host=*)
WAITFORIT_HOST="${1#*=}"
shift 1
;;
-p)
WAITFORIT_PORT="$2"
if [[ $WAITFORIT_PORT == "" ]]; then break; fi
shift 2
;;
--port=*)
WAITFORIT_PORT="${1#*=}"
shift 1
;;
-t)
WAITFORIT_TIMEOUT="$2"
if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi
shift 2
;;
--timeout=*)
WAITFORIT_TIMEOUT="${1#*=}"
shift 1
;;
--)
shift
WAITFORIT_CLI=("$@")
break
;;
--help)
usage
;;
*)
echoerr "Unknown argument: $1"
usage
;;
esac
done
if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then
echoerr "Error: you need to provide a host and port to test."
usage
fi
WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15}
WAITFORIT_STRICT=${WAITFORIT_STRICT:-0}
WAITFORIT_CHILD=${WAITFORIT_CHILD:-0}
WAITFORIT_QUIET=${WAITFORIT_QUIET:-0}
# check to see if timeout is from busybox?
WAITFORIT_TIMEOUT_PATH=$(type -p timeout)
WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH)
if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then
WAITFORIT_ISBUSY=1
WAITFORIT_BUSYTIMEFLAG="-t"
else
WAITFORIT_ISBUSY=0
WAITFORIT_BUSYTIMEFLAG=""
fi
if [[ $WAITFORIT_CHILD -gt 0 ]]; then
wait_for
WAITFORIT_RESULT=$?
exit $WAITFORIT_RESULT
else
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
wait_for_wrapper
WAITFORIT_RESULT=$?
else
wait_for
WAITFORIT_RESULT=$?
fi
fi
if [[ $WAITFORIT_CLI != "" ]]; then
if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then
echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess"
exit $WAITFORIT_RESULT
fi
exec "${WAITFORIT_CLI[@]}"
else
exit $WAITFORIT_RESULT
fi

View File

@ -11,6 +11,12 @@ COPY s3downloader /s3downloader
ENV DATASETS="hits visits"
# https://clang.llvm.org/docs/ThreadSanitizer.html
# https://clang.llvm.org/docs/AddressSanitizer.html
# https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html
# https://clang.llvm.org/docs/MemorySanitizer.html
CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb; \
dpkg -i package_folder/clickhouse-server_*.deb; \