mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Removed leftovers
This commit is contained in:
parent
59976318e4
commit
1bfe312834
@ -130,19 +130,16 @@ if [ -n "$*" ]; then
|
||||
$*
|
||||
else
|
||||
TEST_RUN=${TEST_RUN=1}
|
||||
TEST_PERF=${TEST_PERF=1}
|
||||
TEST_DICT=${TEST_DICT=1}
|
||||
CLICKHOUSE_CLIENT_QUERY="${CLICKHOUSE_CLIENT} --config ${CLICKHOUSE_CONFIG_CLIENT} --port $CLICKHOUSE_PORT_TCP -m -n -q"
|
||||
$CLICKHOUSE_CLIENT_QUERY 'SELECT * from system.build_options; SELECT * FROM system.clusters;'
|
||||
CLICKHOUSE_TEST="env ${TEST_DIR}clickhouse-test --force-color --binary ${BIN_DIR}${CLICKHOUSE_BINARY_NAME} --configclient $CLICKHOUSE_CONFIG_CLIENT --configserver $CLICKHOUSE_CONFIG --tmp $DATA_DIR/tmp --queries $QUERIES_DIR $TEST_OPT0 $TEST_OPT"
|
||||
CLICKHOUSE_PERFORMANCE_TEST="${BIN_DIR}clickhouse-performance-test --port $CLICKHOUSE_PORT_TCP --recursive $CUR_DIR/performance --skip-tags=long"
|
||||
if [ "${TEST_RUN_STRESS}" ]; then
|
||||
# Running test in parallel will fail some results (tests can create/fill/drop same tables)
|
||||
TEST_NPROC=${TEST_NPROC:=$(( `nproc || sysctl -n hw.ncpu || echo 2` * 2))}
|
||||
for i in `seq 1 ${TEST_NPROC}`; do
|
||||
$CLICKHOUSE_TEST --order=random --testname --tmp=$DATA_DIR/tmp/tmp${i} &
|
||||
done
|
||||
$CLICKHOUSE_PERFORMANCE_TEST &
|
||||
fi
|
||||
|
||||
if [ "${TEST_RUN_PARALLEL}" ]; then
|
||||
@ -164,8 +161,6 @@ else
|
||||
( [ "$TEST_RUN" ] && $CLICKHOUSE_TEST ) || ${TEST_TRUE:=false}
|
||||
fi
|
||||
|
||||
( [ "$TEST_PERF" ] && $CLICKHOUSE_PERFORMANCE_TEST $* ) || true
|
||||
#( [ "$TEST_DICT" ] && mkdir -p $DATA_DIR/etc/dictionaries/ && cd $CUR_DIR/external_dictionaries && python generate_and_test.py --port=$CLICKHOUSE_PORT_TCP --client=$CLICKHOUSE_CLIENT --source=$CUR_DIR/external_dictionaries/source.tsv --reference=$CUR_DIR/external_dictionaries/reference --generated=$DATA_DIR/etc/dictionaries/ --no_mysql --no_mongo ) || true
|
||||
$CLICKHOUSE_CLIENT_QUERY "SELECT event, value FROM system.events; SELECT metric, value FROM system.metrics; SELECT metric, value FROM system.asynchronous_metrics;"
|
||||
$CLICKHOUSE_CLIENT_QUERY "SELECT 'Still alive'"
|
||||
fi
|
||||
|
1
debian/clickhouse-test.install
vendored
1
debian/clickhouse-test.install
vendored
@ -1,6 +1,5 @@
|
||||
usr/bin/clickhouse-test
|
||||
usr/bin/clickhouse-test-server
|
||||
usr/bin/clickhouse-performance-test
|
||||
usr/share/clickhouse-test/*
|
||||
etc/clickhouse-client/client-test.xml
|
||||
etc/clickhouse-server/server-test.xml
|
||||
|
@ -36,6 +36,5 @@ lrwxrwxrwx 1 root root 10 clickhouse-lld -> clickhouse
|
||||
lrwxrwxrwx 1 root root 10 clickhouse-local -> clickhouse
|
||||
lrwxrwxrwx 1 root root 10 clickhouse-obfuscator -> clickhouse
|
||||
lrwxrwxrwx 1 root root 10 clickhouse-odbc-bridge -> clickhouse
|
||||
lrwxrwxrwx 1 root root 10 clickhouse-performance-test -> clickhouse
|
||||
lrwxrwxrwx 1 root root 10 clickhouse-server -> clickhouse
|
||||
```
|
||||
|
@ -1,16 +0,0 @@
|
||||
# docker build -t yandex/clickhouse-performance-test .
|
||||
FROM yandex/clickhouse-stateful-test
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
python-requests
|
||||
|
||||
COPY s3downloader /s3downloader
|
||||
COPY run.sh /run.sh
|
||||
|
||||
ENV OPEN_DATASETS="hits values_with_expressions"
|
||||
ENV PRIVATE_DATASETS="hits_100m_single hits_10m_single"
|
||||
ENV DOWNLOAD_DATASETS=1
|
||||
|
||||
CMD /run.sh
|
@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
install_packages() {
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
service clickhouse-server start && sleep 5
|
||||
}
|
||||
|
||||
download_data() {
|
||||
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
/s3downloader --dataset-names $OPEN_DATASETS
|
||||
/s3downloader --dataset-names $PRIVATE_DATASETS --url 'https://s3.mds.yandex.net/clickhouse-private-datasets'
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
service clickhouse-server restart && sleep 5
|
||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
}
|
||||
|
||||
run() {
|
||||
clickhouse-performance-test $TESTS_TO_RUN | tee test_output/test_result.json
|
||||
}
|
||||
|
||||
install_packages
|
||||
|
||||
if [ $DOWNLOAD_DATASETS -eq 1 ]; then
|
||||
download_data
|
||||
fi
|
||||
|
||||
clickhouse-client --query "select * from system.settings where name = 'log_queries'"
|
||||
tree /etc/clickhouse-server
|
||||
cat /var/lib/clickhouse/preprocessed_configs/config.xml
|
||||
cat /var/lib/clickhouse/preprocessed_configs/users.xml
|
||||
|
||||
run
|
@ -1,98 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
import tarfile
|
||||
import logging
|
||||
import argparse
|
||||
import requests
|
||||
import tempfile
|
||||
|
||||
|
||||
DEFAULT_URL = 'https://clickhouse-datasets.s3.yandex.net'
|
||||
|
||||
AVAILABLE_DATASETS = {
|
||||
'hits': 'hits_v1.tar',
|
||||
'visits': 'visits_v1.tar',
|
||||
'values_with_expressions': 'test_values.tar',
|
||||
'hits_100m_single': 'hits_100m_single.tar',
|
||||
'hits_1000m_single': 'hits_1000m_single.tar',
|
||||
'hits_10m_single': 'hits_10m_single.tar',
|
||||
'trips_mergetree': 'trips_mergetree.tar',
|
||||
}
|
||||
|
||||
def _get_temp_file_name():
|
||||
return os.path.join(tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
|
||||
|
||||
def build_url(base_url, dataset):
|
||||
return os.path.join(base_url, dataset, 'partitions', AVAILABLE_DATASETS[dataset])
|
||||
|
||||
def dowload_with_progress(url, path):
|
||||
logging.info("Downloading from %s to temp path %s", url, path)
|
||||
with open(path, 'w') as f:
|
||||
response = requests.get(url, stream=True)
|
||||
response.raise_for_status()
|
||||
total_length = response.headers.get('content-length')
|
||||
if total_length is None or int(total_length) == 0:
|
||||
logging.info("No content-length, will download file without progress")
|
||||
f.write(response.content)
|
||||
else:
|
||||
dl = 0
|
||||
total_length = int(total_length)
|
||||
logging.info("Content length is %ld bytes", total_length)
|
||||
counter = 0
|
||||
for data in response.iter_content(chunk_size=4096):
|
||||
dl += len(data)
|
||||
counter += 1
|
||||
f.write(data)
|
||||
done = int(50 * dl / total_length)
|
||||
percent = int(100 * float(dl) / total_length)
|
||||
if sys.stdout.isatty():
|
||||
sys.stdout.write("\r[{}{}] {}%".format('=' * done, ' ' * (50-done), percent))
|
||||
sys.stdout.flush()
|
||||
elif counter % 1000 == 0:
|
||||
sys.stdout.write("{}%".format(percent))
|
||||
sys.stdout.flush()
|
||||
sys.stdout.write("\n")
|
||||
logging.info("Downloading finished")
|
||||
|
||||
def unpack_to_clickhouse_directory(tar_path, clickhouse_path):
|
||||
logging.info("Will unpack data from temp path %s to clickhouse db %s", tar_path, clickhouse_path)
|
||||
with tarfile.open(tar_path, 'r') as comp_file:
|
||||
comp_file.extractall(path=clickhouse_path)
|
||||
logging.info("Unpack finished")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s %(levelname)s: %(message)s')
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Simple tool for dowloading datasets for clickhouse from S3")
|
||||
|
||||
parser.add_argument('--dataset-names', required=True, nargs='+', choices=AVAILABLE_DATASETS.keys())
|
||||
parser.add_argument('--url-prefix', default=DEFAULT_URL)
|
||||
parser.add_argument('--clickhouse-data-path', default='/var/lib/clickhouse/')
|
||||
|
||||
args = parser.parse_args()
|
||||
datasets = args.dataset_names
|
||||
logging.info("Will fetch following datasets: %s", ', '.join(datasets))
|
||||
for dataset in datasets:
|
||||
logging.info("Processing %s", dataset)
|
||||
temp_archive_path = _get_temp_file_name()
|
||||
try:
|
||||
download_url_for_dataset = build_url(args.url_prefix, dataset)
|
||||
dowload_with_progress(download_url_for_dataset, temp_archive_path)
|
||||
unpack_to_clickhouse_directory(temp_archive_path, args.clickhouse_data_path)
|
||||
except Exception as ex:
|
||||
logging.info("Some exception occured %s", str(ex))
|
||||
raise
|
||||
finally:
|
||||
logging.info("Will remove dowloaded file %s from filesystem if it exists", temp_archive_path)
|
||||
if os.path.exists(temp_archive_path):
|
||||
os.remove(temp_archive_path)
|
||||
logging.info("Processing of %s finished, table placed at", dataset)
|
||||
logging.info("Fetch finished, enjoy your tables!")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user