mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 08:02:02 +00:00
Convert to python3 (#15007)
This commit is contained in:
parent
404c199448
commit
9cb3c743bd
@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
|
|
||||||
@ -99,7 +99,7 @@ def gen_html_json(options, arguments):
|
|||||||
tuples = read_stats_file(options, arguments[1])
|
tuples = read_stats_file(options, arguments[1])
|
||||||
print('{')
|
print('{')
|
||||||
print('"system: GreenPlum(x2),')
|
print('"system: GreenPlum(x2),')
|
||||||
print('"version": "%s",' % '4.3.9.1')
|
print(('"version": "%s",' % '4.3.9.1'))
|
||||||
print('"data_size": 10000000,')
|
print('"data_size": 10000000,')
|
||||||
print('"time": "",')
|
print('"time": "",')
|
||||||
print('"comments": "",')
|
print('"comments": "",')
|
||||||
|
2
debian/control
vendored
2
debian/control
vendored
@ -62,5 +62,5 @@ Description: debugging symbols for clickhouse-common-static
|
|||||||
Package: clickhouse-test
|
Package: clickhouse-test
|
||||||
Priority: optional
|
Priority: optional
|
||||||
Architecture: all
|
Architecture: all
|
||||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-client, bash, expect, python, python-lxml, python-termcolor, python-requests, curl, perl, sudo, openssl, netcat-openbsd, telnet, brotli, bsdutils
|
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-client, bash, expect, python3, python3-lxml, python3-termcolor, python3-requests, curl, perl, sudo, openssl, netcat-openbsd, telnet, brotli, bsdutils
|
||||||
Description: ClickHouse tests
|
Description: ClickHouse tests
|
||||||
|
@ -25,10 +25,10 @@ RUN apt-get update \
|
|||||||
ninja-build \
|
ninja-build \
|
||||||
perl \
|
perl \
|
||||||
pkg-config \
|
pkg-config \
|
||||||
python \
|
python3 \
|
||||||
python-lxml \
|
python3-lxml \
|
||||||
python-requests \
|
python3-requests \
|
||||||
python-termcolor \
|
python3-termcolor \
|
||||||
tzdata \
|
tzdata \
|
||||||
llvm-${LLVM_VERSION} \
|
llvm-${LLVM_VERSION} \
|
||||||
clang-${LLVM_VERSION} \
|
clang-${LLVM_VERSION} \
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
#-*- coding: utf-8 -*-
|
#-*- coding: utf-8 -*-
|
||||||
import subprocess
|
import subprocess
|
||||||
import os
|
import os
|
||||||
|
@ -52,10 +52,10 @@ RUN apt-get update \
|
|||||||
moreutils \
|
moreutils \
|
||||||
ninja-build \
|
ninja-build \
|
||||||
psmisc \
|
psmisc \
|
||||||
python \
|
python3 \
|
||||||
python-lxml \
|
python3-lxml \
|
||||||
python-requests \
|
python3-requests \
|
||||||
python-termcolor \
|
python3-termcolor \
|
||||||
qemu-user-static \
|
qemu-user-static \
|
||||||
rename \
|
rename \
|
||||||
software-properties-common \
|
software-properties-common \
|
||||||
|
@ -4,7 +4,7 @@ FROM yandex/clickhouse-test-base
|
|||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \
|
||||||
tzdata \
|
tzdata \
|
||||||
python \
|
python3 \
|
||||||
libreadline-dev \
|
libreadline-dev \
|
||||||
libicu-dev \
|
libicu-dev \
|
||||||
bsdutils \
|
bsdutils \
|
||||||
|
@ -16,13 +16,13 @@ RUN apt-get update \
|
|||||||
iproute2 \
|
iproute2 \
|
||||||
module-init-tools \
|
module-init-tools \
|
||||||
cgroupfs-mount \
|
cgroupfs-mount \
|
||||||
python-pip \
|
python3-pip \
|
||||||
tzdata \
|
tzdata \
|
||||||
libreadline-dev \
|
libreadline-dev \
|
||||||
libicu-dev \
|
libicu-dev \
|
||||||
bsdutils \
|
bsdutils \
|
||||||
curl \
|
curl \
|
||||||
python-pika \
|
python3-pika \
|
||||||
liblua5.1-dev \
|
liblua5.1-dev \
|
||||||
luajit \
|
luajit \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
@ -37,7 +37,7 @@ RUN apt-get update \
|
|||||||
ENV TZ=Europe/Moscow
|
ENV TZ=Europe/Moscow
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
RUN pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio rpm-confluent-schemaregistry grpcio grpcio-tools cassandra-driver
|
RUN python3 -m pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio grpcio grpcio-tools cassandra-driver confluent-kafka avro
|
||||||
|
|
||||||
ENV DOCKER_CHANNEL stable
|
ENV DOCKER_CHANNEL stable
|
||||||
ENV DOCKER_VERSION 17.09.1-ce
|
ENV DOCKER_VERSION 17.09.1-ce
|
||||||
|
@ -312,7 +312,7 @@ def add_errors_explained():
|
|||||||
|
|
||||||
|
|
||||||
if args.report == 'main':
|
if args.report == 'main':
|
||||||
print(header_template.format())
|
print((header_template.format()))
|
||||||
|
|
||||||
add_tested_commits()
|
add_tested_commits()
|
||||||
|
|
||||||
@ -571,14 +571,14 @@ if args.report == 'main':
|
|||||||
status = 'failure'
|
status = 'failure'
|
||||||
message = 'Errors while building the report.'
|
message = 'Errors while building the report.'
|
||||||
|
|
||||||
print("""
|
print(("""
|
||||||
<!--status: {status}-->
|
<!--status: {status}-->
|
||||||
<!--message: {message}-->
|
<!--message: {message}-->
|
||||||
""".format(status=status, message=message))
|
""".format(status=status, message=message)))
|
||||||
|
|
||||||
elif args.report == 'all-queries':
|
elif args.report == 'all-queries':
|
||||||
|
|
||||||
print(header_template.format())
|
print((header_template.format()))
|
||||||
|
|
||||||
add_tested_commits()
|
add_tested_commits()
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ FROM yandex/clickhouse-stateless-test
|
|||||||
RUN apt-get update -y \
|
RUN apt-get update -y \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive \
|
&& env DEBIAN_FRONTEND=noninteractive \
|
||||||
apt-get install --yes --no-install-recommends \
|
apt-get install --yes --no-install-recommends \
|
||||||
python-requests \
|
python3-requests \
|
||||||
llvm-9
|
llvm-9
|
||||||
|
|
||||||
COPY s3downloader /s3downloader
|
COPY s3downloader /s3downloader
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -29,7 +29,7 @@ def dowload_with_progress(url, path):
|
|||||||
logging.info("Downloading from %s to temp path %s", url, path)
|
logging.info("Downloading from %s to temp path %s", url, path)
|
||||||
for i in range(RETRIES_COUNT):
|
for i in range(RETRIES_COUNT):
|
||||||
try:
|
try:
|
||||||
with open(path, 'w') as f:
|
with open(path, 'wb') as f:
|
||||||
response = requests.get(url, stream=True)
|
response = requests.get(url, stream=True)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
total_length = response.headers.get('content-length')
|
total_length = response.headers.get('content-length')
|
||||||
@ -74,7 +74,7 @@ if __name__ == "__main__":
|
|||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Simple tool for dowloading datasets for clickhouse from S3")
|
description="Simple tool for dowloading datasets for clickhouse from S3")
|
||||||
|
|
||||||
parser.add_argument('--dataset-names', required=True, nargs='+', choices=AVAILABLE_DATASETS.keys())
|
parser.add_argument('--dataset-names', required=True, nargs='+', choices=list(AVAILABLE_DATASETS.keys()))
|
||||||
parser.add_argument('--url-prefix', default=DEFAULT_URL)
|
parser.add_argument('--url-prefix', default=DEFAULT_URL)
|
||||||
parser.add_argument('--clickhouse-data-path', default='/var/lib/clickhouse/')
|
parser.add_argument('--clickhouse-data-path', default='/var/lib/clickhouse/')
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ RUN echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9
|
|||||||
RUN apt-get update -y \
|
RUN apt-get update -y \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive \
|
&& env DEBIAN_FRONTEND=noninteractive \
|
||||||
apt-get install --yes --no-install-recommends \
|
apt-get install --yes --no-install-recommends \
|
||||||
python-requests
|
python3-requests
|
||||||
|
|
||||||
COPY s3downloader /s3downloader
|
COPY s3downloader /s3downloader
|
||||||
COPY run.sh /run.sh
|
COPY run.sh /run.sh
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -74,7 +74,7 @@ if __name__ == "__main__":
|
|||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Simple tool for dowloading datasets for clickhouse from S3")
|
description="Simple tool for dowloading datasets for clickhouse from S3")
|
||||||
|
|
||||||
parser.add_argument('--dataset-names', required=True, nargs='+', choices=AVAILABLE_DATASETS.keys())
|
parser.add_argument('--dataset-names', required=True, nargs='+', choices=list(AVAILABLE_DATASETS.keys()))
|
||||||
parser.add_argument('--url-prefix', default=DEFAULT_URL)
|
parser.add_argument('--url-prefix', default=DEFAULT_URL)
|
||||||
parser.add_argument('--clickhouse-data-path', default='/var/lib/clickhouse/')
|
parser.add_argument('--clickhouse-data-path', default='/var/lib/clickhouse/')
|
||||||
|
|
||||||
|
@ -12,10 +12,10 @@ RUN apt-get update -y \
|
|||||||
ncdu \
|
ncdu \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
openssl \
|
openssl \
|
||||||
python \
|
python3 \
|
||||||
python-lxml \
|
python3-lxml \
|
||||||
python-requests \
|
python3-requests \
|
||||||
python-termcolor \
|
python3-termcolor \
|
||||||
qemu-user-static \
|
qemu-user-static \
|
||||||
sudo \
|
sudo \
|
||||||
telnet \
|
telnet \
|
||||||
|
@ -3,10 +3,10 @@ FROM yandex/clickhouse-test-base
|
|||||||
|
|
||||||
RUN apt-get update -y && \
|
RUN apt-get update -y && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
python-pip \
|
python3-pip \
|
||||||
python-setuptools
|
python3-setuptools
|
||||||
|
|
||||||
RUN pip install \
|
RUN python3 -m pip install \
|
||||||
pytest \
|
pytest \
|
||||||
pytest-html \
|
pytest-html \
|
||||||
pytest-timeout \
|
pytest-timeout \
|
||||||
@ -17,4 +17,4 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
|
|||||||
dpkg -i package_folder/clickhouse-server_*.deb; \
|
dpkg -i package_folder/clickhouse-server_*.deb; \
|
||||||
dpkg -i package_folder/clickhouse-client_*.deb; \
|
dpkg -i package_folder/clickhouse-client_*.deb; \
|
||||||
dpkg -i package_folder/clickhouse-test_*.deb; \
|
dpkg -i package_folder/clickhouse-test_*.deb; \
|
||||||
python -m pytest /usr/share/clickhouse-test/queries -n $(nproc) --html=test_output/report.html --self-contained-html
|
python3 -m pytest /usr/share/clickhouse-test/queries -n $(nproc) --html=test_output/report.html --self-contained-html
|
||||||
|
@ -54,10 +54,10 @@ RUN apt-get --allow-unauthenticated update -y \
|
|||||||
perl \
|
perl \
|
||||||
pigz \
|
pigz \
|
||||||
pkg-config \
|
pkg-config \
|
||||||
python \
|
python3 \
|
||||||
python-lxml \
|
python3-lxml \
|
||||||
python-requests \
|
python3-requests \
|
||||||
python-termcolor \
|
python3-termcolor \
|
||||||
qemu-user-static \
|
qemu-user-static \
|
||||||
sudo \
|
sudo \
|
||||||
telnet \
|
telnet \
|
||||||
|
@ -12,10 +12,10 @@ RUN apt-get update -y \
|
|||||||
fakeroot \
|
fakeroot \
|
||||||
debhelper \
|
debhelper \
|
||||||
expect \
|
expect \
|
||||||
python \
|
python3 \
|
||||||
python-lxml \
|
python3-lxml \
|
||||||
python-termcolor \
|
python3-termcolor \
|
||||||
python-requests \
|
python3-requests \
|
||||||
sudo \
|
sudo \
|
||||||
openssl \
|
openssl \
|
||||||
ncdu \
|
ncdu \
|
||||||
|
@ -10,10 +10,10 @@ RUN apt-get update -y \
|
|||||||
debhelper \
|
debhelper \
|
||||||
parallel \
|
parallel \
|
||||||
expect \
|
expect \
|
||||||
python \
|
python3 \
|
||||||
python-lxml \
|
python3-lxml \
|
||||||
python-termcolor \
|
python3-termcolor \
|
||||||
python-requests \
|
python3-requests \
|
||||||
curl \
|
curl \
|
||||||
sudo \
|
sudo \
|
||||||
openssl \
|
openssl \
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from multiprocessing import cpu_count
|
from multiprocessing import cpu_count
|
||||||
from subprocess import Popen, check_call
|
from subprocess import Popen, check_call
|
||||||
|
@ -116,7 +116,7 @@ ninja
|
|||||||
Example for Fedora Rawhide:
|
Example for Fedora Rawhide:
|
||||||
``` bash
|
``` bash
|
||||||
sudo yum update
|
sudo yum update
|
||||||
yum --nogpg install git cmake make gcc-c++ python2
|
yum --nogpg install git cmake make gcc-c++ python3
|
||||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake ../ClickHouse
|
cmake ../ClickHouse
|
||||||
|
@ -102,7 +102,7 @@ Ejemplo de OpenSUSE Tumbleweed:
|
|||||||
Ejemplo de Fedora Rawhide:
|
Ejemplo de Fedora Rawhide:
|
||||||
|
|
||||||
sudo yum update
|
sudo yum update
|
||||||
yum --nogpg install git cmake make gcc-c++ python2
|
yum --nogpg install git cmake make gcc-c++ python3
|
||||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake ../ClickHouse
|
cmake ../ClickHouse
|
||||||
|
@ -103,7 +103,7 @@ $ cd ..
|
|||||||
به عنوان مثال برای فدورا پوست دباغی نشده:
|
به عنوان مثال برای فدورا پوست دباغی نشده:
|
||||||
|
|
||||||
sudo yum update
|
sudo yum update
|
||||||
yum --nogpg install git cmake make gcc-c++ python2
|
yum --nogpg install git cmake make gcc-c++ python3
|
||||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake ../ClickHouse
|
cmake ../ClickHouse
|
||||||
|
@ -102,7 +102,7 @@ Exemple Pour openSUSE Tumbleweed:
|
|||||||
Exemple Pour Fedora Rawhide:
|
Exemple Pour Fedora Rawhide:
|
||||||
|
|
||||||
sudo yum update
|
sudo yum update
|
||||||
yum --nogpg install git cmake make gcc-c++ python2
|
yum --nogpg install git cmake make gcc-c++ python3
|
||||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake ../ClickHouse
|
cmake ../ClickHouse
|
||||||
|
@ -102,7 +102,7 @@ OpenSUSEタンブルウィードの例:
|
|||||||
Fedora Rawhideの例:
|
Fedora Rawhideの例:
|
||||||
|
|
||||||
sudo yum update
|
sudo yum update
|
||||||
yum --nogpg install git cmake make gcc-c++ python2
|
yum --nogpg install git cmake make gcc-c++ python3
|
||||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake ../ClickHouse
|
cmake ../ClickHouse
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import os, sys
|
import os, sys
|
||||||
|
@ -71,8 +71,8 @@ def choose_latest_releases(args):
|
|||||||
logging.fatal('Unexpected GitHub response: %s', str(candidates))
|
logging.fatal('Unexpected GitHub response: %s', str(candidates))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
logging.info('Found LTS releases: %s', ', '.join(seen_lts.keys()))
|
logging.info('Found LTS releases: %s', ', '.join(list(seen_lts.keys())))
|
||||||
logging.info('Found stable releases: %s', ', '.join(seen_stable.keys()))
|
logging.info('Found stable releases: %s', ', '.join(list(seen_stable.keys())))
|
||||||
return sorted(list(seen_lts.items()) + list(seen_stable.items()))
|
return sorted(list(seen_lts.items()) + list(seen_stable.items()))
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import os
|
import os
|
||||||
|
@ -59,7 +59,7 @@ def build_docs_nav(lang, args):
|
|||||||
_, _, nav = build_nav_entry(docs_dir, args)
|
_, _, nav = build_nav_entry(docs_dir, args)
|
||||||
result = []
|
result = []
|
||||||
index_key = None
|
index_key = None
|
||||||
for key, value in nav.items():
|
for key, value in list(nav.items()):
|
||||||
if key and value:
|
if key and value:
|
||||||
if value == 'index.md':
|
if value == 'index.md':
|
||||||
index_key = key
|
index_key = key
|
||||||
|
@ -59,7 +59,7 @@ def convert_to_dicts(changed_files, batch_size):
|
|||||||
def post_data(prepared_batches, token):
|
def post_data(prepared_batches, token):
|
||||||
headers = {"Authorization": "Bearer {}".format(token)}
|
headers = {"Authorization": "Bearer {}".format(token)}
|
||||||
for batch in prepared_batches:
|
for batch in prepared_batches:
|
||||||
print("Pugring cache for", ", ".join(batch["files"]))
|
print(("Pugring cache for", ", ".join(batch["files"])))
|
||||||
response = requests.post(CLOUDFLARE_URL, json=batch, headers=headers)
|
response = requests.post(CLOUDFLARE_URL, json=batch, headers=headers)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
time.sleep(3)
|
time.sleep(3)
|
||||||
@ -71,8 +71,8 @@ if __name__ == "__main__":
|
|||||||
raise Exception("Env variable CLOUDFLARE_TOKEN is empty")
|
raise Exception("Env variable CLOUDFLARE_TOKEN is empty")
|
||||||
base_domain = os.getenv("BASE_DOMAIN", "https://content.clickhouse.tech/")
|
base_domain = os.getenv("BASE_DOMAIN", "https://content.clickhouse.tech/")
|
||||||
changed_files = collect_changed_files()
|
changed_files = collect_changed_files()
|
||||||
print("Found", len(changed_files), "changed files")
|
print(("Found", len(changed_files), "changed files"))
|
||||||
filtered_files = filter_and_transform_changed_files(changed_files, base_domain)
|
filtered_files = filter_and_transform_changed_files(changed_files, base_domain)
|
||||||
print("Files rest after filtering", len(filtered_files))
|
print(("Files rest after filtering", len(filtered_files)))
|
||||||
prepared_batches = convert_to_dicts(filtered_files, 25)
|
prepared_batches = convert_to_dicts(filtered_files, 25)
|
||||||
post_data(prepared_batches, token)
|
post_data(prepared_batches, token)
|
||||||
|
@ -15,7 +15,7 @@ import website
|
|||||||
|
|
||||||
def recursive_values(item):
|
def recursive_values(item):
|
||||||
if isinstance(item, dict):
|
if isinstance(item, dict):
|
||||||
for _, value in item.items():
|
for _, value in list(item.items()):
|
||||||
yield from recursive_values(value)
|
yield from recursive_values(value)
|
||||||
elif isinstance(item, list):
|
elif isinstance(item, list):
|
||||||
for value in item:
|
for value in item:
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
@ -42,4 +42,4 @@ def typograph(text):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
import sys
|
import sys
|
||||||
print(typograph(sys.stdin.read()))
|
print((typograph(sys.stdin.read())))
|
||||||
|
@ -102,7 +102,7 @@ OpenSUSE Tumbleweed için örnek:
|
|||||||
Fedora Rawhide için örnek:
|
Fedora Rawhide için örnek:
|
||||||
|
|
||||||
sudo yum update
|
sudo yum update
|
||||||
yum --nogpg install git cmake make gcc-c++ python2
|
yum --nogpg install git cmake make gcc-c++ python3
|
||||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake ../ClickHouse
|
cmake ../ClickHouse
|
||||||
|
2
release
2
release
@ -66,7 +66,7 @@ do
|
|||||||
shift
|
shift
|
||||||
elif [[ $1 == '--fast' ]]; then
|
elif [[ $1 == '--fast' ]]; then
|
||||||
# Wrong but fast pbuilder mode: create base package with all depends
|
# Wrong but fast pbuilder mode: create base package with all depends
|
||||||
EXTRAPACKAGES="$EXTRAPACKAGES debhelper cmake ninja-build gcc-8 g++-8 libc6-dev libicu-dev libreadline-dev psmisc bash expect python python-lxml python-termcolor python-requests curl perl sudo openssl netcat-openbsd"
|
EXTRAPACKAGES="$EXTRAPACKAGES debhelper cmake ninja-build gcc-8 g++-8 libc6-dev libicu-dev libreadline-dev psmisc bash expect python3 python3-lxml python3-termcolor python3-requests curl perl sudo openssl netcat-openbsd"
|
||||||
shift
|
shift
|
||||||
elif [[ $1 == '--rpm' ]]; then
|
elif [[ $1 == '--rpm' ]]; then
|
||||||
MAKE_RPM=1
|
MAKE_RPM=1
|
||||||
|
@ -24,16 +24,16 @@ for s in sys.stdin.read().split():
|
|||||||
parts[m1].append((i1, i2, l, s))
|
parts[m1].append((i1, i2, l, s))
|
||||||
|
|
||||||
for m, ps in sorted(parts.items()):
|
for m, ps in sorted(parts.items()):
|
||||||
ps.sort(key=lambda (i1, i2, l, s): (i1, -i2, -l))
|
ps.sort(key=lambda i1_i2_l_s: (i1_i2_l_s[0], -i1_i2_l_s[1], -i1_i2_l_s[2]))
|
||||||
(x2, y2, l2, s2) = (-1, -1, -1, -1)
|
(x2, y2, l2, s2) = (-1, -1, -1, -1)
|
||||||
for x1, y1, l1, s1 in ps:
|
for x1, y1, l1, s1 in ps:
|
||||||
if x1 >= x2 and y1 <= y2 and l1 < l2 and (x1, y1) != (x2, y2): # 2 contains 1
|
if x1 >= x2 and y1 <= y2 and l1 < l2 and (x1, y1) != (x2, y2): # 2 contains 1
|
||||||
pass
|
pass
|
||||||
elif x1 > y2: # 1 is to the right of 2
|
elif x1 > y2: # 1 is to the right of 2
|
||||||
if x1 != y2 + 1 and y2 != -1:
|
if x1 != y2 + 1 and y2 != -1:
|
||||||
print # to see the missing numbers
|
print() # to see the missing numbers
|
||||||
(x2, y2, l2, s2) = (x1, y1, l1, s1)
|
(x2, y2, l2, s2) = (x1, y1, l1, s1)
|
||||||
print s1
|
print(s1)
|
||||||
else:
|
else:
|
||||||
raise Exception('invalid parts intersection: ' + s1 + ' and ' + s2)
|
raise Exception('invalid parts intersection: ' + s1 + ' and ' + s2)
|
||||||
print
|
print()
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python3
|
||||||
from __future__ import print_function
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import os.path
|
import os.path
|
||||||
@ -23,7 +23,7 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
termcolor = None
|
termcolor = None
|
||||||
from random import random
|
from random import random
|
||||||
import commands
|
import subprocess
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
from contextlib import closing
|
from contextlib import closing
|
||||||
|
|
||||||
@ -99,7 +99,7 @@ def remove_control_characters(s):
|
|||||||
"""
|
"""
|
||||||
def str_to_int(s, default, base=10):
|
def str_to_int(s, default, base=10):
|
||||||
if int(s, base) < 0x10000:
|
if int(s, base) < 0x10000:
|
||||||
return unichr(int(s, base))
|
return chr(int(s, base))
|
||||||
return default
|
return default
|
||||||
s = re.sub(r"&#(\d+);?", lambda c: str_to_int(c.group(1), c.group(0)), s)
|
s = re.sub(r"&#(\d+);?", lambda c: str_to_int(c.group(1), c.group(0)), s)
|
||||||
s = re.sub(r"&#[xX]([0-9a-fA-F]+);?", lambda c: str_to_int(c.group(1), c.group(0), base=16), s)
|
s = re.sub(r"&#[xX]([0-9a-fA-F]+);?", lambda c: str_to_int(c.group(1), c.group(0), base=16), s)
|
||||||
@ -129,8 +129,8 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std
|
|||||||
return ''.join(random.choice(alphabet) for _ in range(length))
|
return ''.join(random.choice(alphabet) for _ in range(length))
|
||||||
database = 'test_{suffix}'.format(suffix=random_str())
|
database = 'test_{suffix}'.format(suffix=random_str())
|
||||||
|
|
||||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||||
clickhouse_proc_create.communicate("CREATE DATABASE " + database + get_db_engine(args))
|
clickhouse_proc_create.communicate(("CREATE DATABASE " + database + get_db_engine(args)))
|
||||||
|
|
||||||
os.environ["CLICKHOUSE_DATABASE"] = database
|
os.environ["CLICKHOUSE_DATABASE"] = database
|
||||||
|
|
||||||
@ -157,8 +157,8 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std
|
|||||||
sleep(0.01)
|
sleep(0.01)
|
||||||
|
|
||||||
if not args.database:
|
if not args.database:
|
||||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||||
clickhouse_proc_create.communicate("DROP DATABASE " + database)
|
clickhouse_proc_create.communicate(("DROP DATABASE " + database))
|
||||||
|
|
||||||
total_time = (datetime.now() - start_time).total_seconds()
|
total_time = (datetime.now() - start_time).total_seconds()
|
||||||
|
|
||||||
@ -166,10 +166,10 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std
|
|||||||
os.system("LC_ALL=C sed -i -e 's/{test_db}/default/g' {file}".format(test_db=database, file=stdout_file))
|
os.system("LC_ALL=C sed -i -e 's/{test_db}/default/g' {file}".format(test_db=database, file=stdout_file))
|
||||||
os.system("LC_ALL=C sed -i -e 's/{test_db}/default/g' {file}".format(test_db=database, file=stderr_file))
|
os.system("LC_ALL=C sed -i -e 's/{test_db}/default/g' {file}".format(test_db=database, file=stderr_file))
|
||||||
|
|
||||||
stdout = open(stdout_file, 'r').read() if os.path.exists(stdout_file) else ''
|
stdout = open(stdout_file, 'rb').read() if os.path.exists(stdout_file) else b''
|
||||||
stdout = unicode(stdout, errors='replace', encoding='utf-8')
|
stdout = str(stdout, errors='replace', encoding='utf-8')
|
||||||
stderr = open(stderr_file, 'r').read() if os.path.exists(stderr_file) else ''
|
stderr = open(stderr_file, 'rb').read() if os.path.exists(stderr_file) else b''
|
||||||
stderr = unicode(stderr, errors='replace', encoding='utf-8')
|
stderr = str(stderr, errors='replace', encoding='utf-8')
|
||||||
|
|
||||||
return proc, stdout, stderr, total_time
|
return proc, stdout, stderr, total_time
|
||||||
|
|
||||||
@ -300,8 +300,8 @@ def run_tests_array(all_tests_with_params):
|
|||||||
else:
|
else:
|
||||||
|
|
||||||
if args.testname:
|
if args.testname:
|
||||||
clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||||
clickhouse_proc.communicate("SELECT 'Running test {suite}/{case} from pid={pid}';".format(pid = os.getpid(), case = case, suite = suite))
|
clickhouse_proc.communicate(("SELECT 'Running test {suite}/{case} from pid={pid}';".format(pid = os.getpid(), case = case, suite = suite)))
|
||||||
|
|
||||||
if clickhouse_proc.returncode != 0:
|
if clickhouse_proc.returncode != 0:
|
||||||
failures += 1
|
failures += 1
|
||||||
@ -342,7 +342,7 @@ def run_tests_array(all_tests_with_params):
|
|||||||
print(" - return code {}".format(proc.returncode))
|
print(" - return code {}".format(proc.returncode))
|
||||||
|
|
||||||
if stderr:
|
if stderr:
|
||||||
print(stderr.encode('utf-8'))
|
print(stderr)
|
||||||
|
|
||||||
# Stop on fatal errors like segmentation fault. They are send to client via logs.
|
# Stop on fatal errors like segmentation fault. They are send to client via logs.
|
||||||
if ' <Fatal> ' in stderr:
|
if ' <Fatal> ' in stderr:
|
||||||
@ -360,22 +360,22 @@ def run_tests_array(all_tests_with_params):
|
|||||||
failures_chain += 1
|
failures_chain += 1
|
||||||
print(MSG_FAIL, end='')
|
print(MSG_FAIL, end='')
|
||||||
print_test_time(total_time)
|
print_test_time(total_time)
|
||||||
print(" - having stderror:\n{}".format(stderr.encode('utf-8')))
|
print(" - having stderror:\n{}".format(stderr))
|
||||||
elif 'Exception' in stdout:
|
elif 'Exception' in stdout:
|
||||||
failures += 1
|
failures += 1
|
||||||
failures_chain += 1
|
failures_chain += 1
|
||||||
print(MSG_FAIL, end='')
|
print(MSG_FAIL, end='')
|
||||||
print_test_time(total_time)
|
print_test_time(total_time)
|
||||||
print(" - having exception:\n{}".format(stdout.encode('utf-8')))
|
print(" - having exception:\n{}".format(stdout))
|
||||||
elif not os.path.isfile(reference_file):
|
elif not os.path.isfile(reference_file):
|
||||||
print(MSG_UNKNOWN, end='')
|
print(MSG_UNKNOWN, end='')
|
||||||
print_test_time(total_time)
|
print_test_time(total_time)
|
||||||
print(" - no reference file")
|
print(" - no reference file")
|
||||||
else:
|
else:
|
||||||
result_is_different = subprocess.call(['diff', '-q', reference_file, stdout_file], stdout = PIPE)
|
result_is_different = subprocess.call(['diff', '-q', reference_file, stdout_file], stdout=PIPE)
|
||||||
|
|
||||||
if result_is_different:
|
if result_is_different:
|
||||||
diff = Popen(['diff', '-U', str(args.unified), reference_file, stdout_file], stdout = PIPE).communicate()[0]
|
diff = Popen(['diff', '-U', str(args.unified), reference_file, stdout_file], stdout=PIPE, universal_newlines=True).communicate()[0]
|
||||||
failures += 1
|
failures += 1
|
||||||
print(MSG_FAIL, end='')
|
print(MSG_FAIL, end='')
|
||||||
print_test_time(total_time)
|
print_test_time(total_time)
|
||||||
@ -419,9 +419,9 @@ def check_server_started(client, retry_count):
|
|||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
while retry_count > 0:
|
while retry_count > 0:
|
||||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT 1")
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT 1")
|
||||||
|
|
||||||
if clickhouse_proc.returncode == 0 and stdout.startswith("1"):
|
if clickhouse_proc.returncode == 0 and stdout.startswith(b"1"):
|
||||||
print(" OK")
|
print(" OK")
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
return True
|
return True
|
||||||
@ -468,46 +468,46 @@ class BuildFlags(object):
|
|||||||
|
|
||||||
def collect_build_flags(client):
|
def collect_build_flags(client):
|
||||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'")
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'")
|
||||||
result = []
|
result = []
|
||||||
|
|
||||||
if clickhouse_proc.returncode == 0:
|
if clickhouse_proc.returncode == 0:
|
||||||
if '-fsanitize=thread' in stdout:
|
if b'-fsanitize=thread' in stdout:
|
||||||
result.append(BuildFlags.THREAD)
|
result.append(BuildFlags.THREAD)
|
||||||
elif '-fsanitize=address' in stdout:
|
elif b'-fsanitize=address' in stdout:
|
||||||
result.append(BuildFlags.ADDRESS)
|
result.append(BuildFlags.ADDRESS)
|
||||||
elif '-fsanitize=undefined' in stdout:
|
elif b'-fsanitize=undefined' in stdout:
|
||||||
result.append(BuildFlags.UNDEFINED)
|
result.append(BuildFlags.UNDEFINED)
|
||||||
elif '-fsanitize=memory' in stdout:
|
elif b'-fsanitize=memory' in stdout:
|
||||||
result.append(BuildFlags.MEMORY)
|
result.append(BuildFlags.MEMORY)
|
||||||
else:
|
else:
|
||||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||||
|
|
||||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.build_options WHERE name = 'BUILD_TYPE'")
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'BUILD_TYPE'")
|
||||||
|
|
||||||
if clickhouse_proc.returncode == 0:
|
if clickhouse_proc.returncode == 0:
|
||||||
if 'Debug' in stdout:
|
if b'Debug' in stdout:
|
||||||
result.append(BuildFlags.DEBUG)
|
result.append(BuildFlags.DEBUG)
|
||||||
elif 'RelWithDebInfo' in stdout or 'Release' in stdout:
|
elif b'RelWithDebInfo' in stdout or b'Release' in stdout:
|
||||||
result.append(BuildFlags.RELEASE)
|
result.append(BuildFlags.RELEASE)
|
||||||
else:
|
else:
|
||||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||||
|
|
||||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.build_options WHERE name = 'UNBUNDLED'")
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'UNBUNDLED'")
|
||||||
|
|
||||||
if clickhouse_proc.returncode == 0:
|
if clickhouse_proc.returncode == 0:
|
||||||
if 'ON' in stdout or '1' in stdout:
|
if b'ON' in stdout or b'1' in stdout:
|
||||||
result.append(BuildFlags.UNBUNDLED)
|
result.append(BuildFlags.UNBUNDLED)
|
||||||
else:
|
else:
|
||||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||||
|
|
||||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.settings WHERE name = 'default_database_engine'")
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.settings WHERE name = 'default_database_engine'")
|
||||||
|
|
||||||
if clickhouse_proc.returncode == 0:
|
if clickhouse_proc.returncode == 0:
|
||||||
if 'Ordinary' in stdout:
|
if b'Ordinary' in stdout:
|
||||||
result.append(BuildFlags.DATABASE_ORDINARY)
|
result.append(BuildFlags.DATABASE_ORDINARY)
|
||||||
else:
|
else:
|
||||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||||
@ -523,11 +523,11 @@ def main(args):
|
|||||||
|
|
||||||
def is_data_present():
|
def is_data_present():
|
||||||
clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||||
(stdout, stderr) = clickhouse_proc.communicate("EXISTS TABLE test.hits")
|
(stdout, stderr) = clickhouse_proc.communicate(b"EXISTS TABLE test.hits")
|
||||||
if clickhouse_proc.returncode != 0:
|
if clickhouse_proc.returncode != 0:
|
||||||
raise CalledProcessError(clickhouse_proc.returncode, args.client, stderr)
|
raise CalledProcessError(clickhouse_proc.returncode, args.client, stderr)
|
||||||
|
|
||||||
return stdout.startswith('1')
|
return stdout.startswith(b'1')
|
||||||
|
|
||||||
if not check_server_started(args.client, args.server_check_retries):
|
if not check_server_started(args.client, args.server_check_retries):
|
||||||
raise Exception("clickhouse-server is not responding. Cannot execute 'SELECT 1' query.")
|
raise Exception("clickhouse-server is not responding. Cannot execute 'SELECT 1' query.")
|
||||||
@ -562,7 +562,7 @@ def main(args):
|
|||||||
stop_time = time() + args.global_time_limit
|
stop_time = time() + args.global_time_limit
|
||||||
|
|
||||||
if args.zookeeper is None:
|
if args.zookeeper is None:
|
||||||
code, out = commands.getstatusoutput(args.extract_from_config + " --try --config " + args.configserver + ' --key zookeeper | grep . | wc -l')
|
code, out = subprocess.getstatusoutput(args.extract_from_config + " --try --config " + args.configserver + ' --key zookeeper | grep . | wc -l')
|
||||||
try:
|
try:
|
||||||
if int(out) > 0:
|
if int(out) > 0:
|
||||||
args.zookeeper = True
|
args.zookeeper = True
|
||||||
@ -572,18 +572,18 @@ def main(args):
|
|||||||
args.zookeeper = False
|
args.zookeeper = False
|
||||||
|
|
||||||
if args.shard is None:
|
if args.shard is None:
|
||||||
code, out = commands.getstatusoutput(args.extract_from_config + " --try --config " + args.configserver + ' --key listen_host | grep -E "127.0.0.2|::"')
|
code, out = subprocess.getstatusoutput(args.extract_from_config + " --try --config " + args.configserver + ' --key listen_host | grep -E "127.0.0.2|::"')
|
||||||
if out:
|
if out:
|
||||||
args.shard = True
|
args.shard = True
|
||||||
else:
|
else:
|
||||||
args.shard = False
|
args.shard = False
|
||||||
|
|
||||||
if args.database and args.database != "test":
|
if args.database and args.database != "test":
|
||||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||||
clickhouse_proc_create.communicate("CREATE DATABASE IF NOT EXISTS " + args.database + get_db_engine(args))
|
clickhouse_proc_create.communicate(("CREATE DATABASE IF NOT EXISTS " + args.database + get_db_engine(args)))
|
||||||
|
|
||||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||||
clickhouse_proc_create.communicate("CREATE DATABASE IF NOT EXISTS test" + get_db_engine(args))
|
clickhouse_proc_create.communicate(("CREATE DATABASE IF NOT EXISTS test" + get_db_engine(args)))
|
||||||
|
|
||||||
def is_test_from_dir(suite_dir, case):
|
def is_test_from_dir(suite_dir, case):
|
||||||
case_file = os.path.join(suite_dir, case)
|
case_file = os.path.join(suite_dir, case)
|
||||||
@ -595,14 +595,14 @@ def main(args):
|
|||||||
return random()
|
return random()
|
||||||
|
|
||||||
if -1 == item.find('_'):
|
if -1 == item.find('_'):
|
||||||
return 99998
|
return 99998, ''
|
||||||
|
|
||||||
prefix, suffix = item.split('_', 1)
|
prefix, suffix = item.split('_', 1)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return int(prefix), suffix
|
return int(prefix), suffix
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return 99997
|
return 99997, ''
|
||||||
|
|
||||||
total_tests_run = 0
|
total_tests_run = 0
|
||||||
for suite in sorted(os.listdir(base_dir), key=sute_key_func):
|
for suite in sorted(os.listdir(base_dir), key=sute_key_func):
|
||||||
@ -650,7 +650,7 @@ def main(args):
|
|||||||
return 99997
|
return 99997
|
||||||
|
|
||||||
all_tests = os.listdir(suite_dir)
|
all_tests = os.listdir(suite_dir)
|
||||||
all_tests = filter(lambda case: is_test_from_dir(suite_dir, case), all_tests)
|
all_tests = [case for case in all_tests if is_test_from_dir(suite_dir, case)]
|
||||||
if args.test:
|
if args.test:
|
||||||
all_tests = [t for t in all_tests if any([re.search(r, t) for r in args.test])]
|
all_tests = [t for t in all_tests if any([re.search(r, t) for r in args.test])]
|
||||||
all_tests.sort(key=key_func)
|
all_tests.sort(key=key_func)
|
||||||
@ -670,7 +670,7 @@ def main(args):
|
|||||||
if jobs > run_total:
|
if jobs > run_total:
|
||||||
run_total = jobs
|
run_total = jobs
|
||||||
|
|
||||||
batch_size = len(all_tests) / jobs
|
batch_size = len(all_tests) // jobs
|
||||||
all_tests_array = []
|
all_tests_array = []
|
||||||
for i in range(0, len(all_tests), batch_size):
|
for i in range(0, len(all_tests), batch_size):
|
||||||
all_tests_array.append((all_tests[i:i+batch_size], suite, suite_dir, suite_tmp_dir, run_total))
|
all_tests_array.append((all_tests[i:i+batch_size], suite, suite_dir, suite_tmp_dir, run_total))
|
||||||
|
@ -37,7 +37,7 @@ class ClickHouseServer:
|
|||||||
|
|
||||||
s.connect(('localhost', port))
|
s.connect(('localhost', port))
|
||||||
except socket.error as socketerror:
|
except socket.error as socketerror:
|
||||||
print "Error: ", socketerror
|
print("Error: ", socketerror)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def shutdown(self, timeout=10):
|
def shutdown(self, timeout=10):
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from server import ClickHouseServer
|
from .server import ClickHouseServer
|
||||||
from client import ClickHouseClient
|
from .client import ClickHouseClient
|
||||||
from table import ClickHouseTable
|
from .table import ClickHouseTable
|
||||||
import os
|
import os
|
||||||
import errno
|
import errno
|
||||||
from shutil import rmtree
|
from shutil import rmtree
|
||||||
@ -140,7 +140,7 @@ class ClickHouseServerWithCatboostModels:
|
|||||||
if not os.path.exists(self.models_dir):
|
if not os.path.exists(self.models_dir):
|
||||||
os.makedirs(self.models_dir)
|
os.makedirs(self.models_dir)
|
||||||
|
|
||||||
for name, model in self.models.items():
|
for name, model in list(self.models.items()):
|
||||||
model_path = os.path.join(self.models_dir, name + '.cbm')
|
model_path = os.path.join(self.models_dir, name + '.cbm')
|
||||||
config_path = os.path.join(self.models_dir, name + '_model.xml')
|
config_path = os.path.join(self.models_dir, name + '_model.xml')
|
||||||
params = {
|
params = {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from server import ClickHouseServer
|
from .server import ClickHouseServer
|
||||||
from client import ClickHouseClient
|
from .client import ClickHouseClient
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
import os
|
import os
|
||||||
import threading
|
import threading
|
||||||
@ -40,7 +40,7 @@ class ClickHouseTable:
|
|||||||
column_types = list(self.df.dtypes)
|
column_types = list(self.df.dtypes)
|
||||||
column_names = list(self.df)
|
column_names = list(self.df)
|
||||||
schema = ', '.join((name + ' ' + self._convert(str(t)) for name, t in zip(column_names, column_types)))
|
schema = ', '.join((name + ' ' + self._convert(str(t)) for name, t in zip(column_names, column_types)))
|
||||||
print 'schema:', schema
|
print('schema:', schema)
|
||||||
|
|
||||||
create_query = 'create table test.{} (date Date DEFAULT today(), {}) engine = MergeTree(date, (date), 8192)'
|
create_query = 'create table test.{} (date Date DEFAULT today(), {}) engine = MergeTree(date, (date), 8192)'
|
||||||
self.client.query(create_query.format(self.table_name, schema))
|
self.client.query(create_query.format(self.table_name, schema))
|
||||||
@ -58,10 +58,10 @@ class ClickHouseTable:
|
|||||||
result = self.client.query(query.format(model_name, columns, self.table_name))
|
result = self.client.query(query.format(model_name, columns, self.table_name))
|
||||||
|
|
||||||
def parse_row(row):
|
def parse_row(row):
|
||||||
values = tuple(map(float, filter(len, map(str.strip, row.replace('(', '').replace(')', '').split(',')))))
|
values = tuple(map(float, list(filter(len, list(map(str.strip, row.replace('(', '').replace(')', '').split(',')))))))
|
||||||
return values if len(values) != 1 else values[0]
|
return values if len(values) != 1 else values[0]
|
||||||
|
|
||||||
return tuple(map(parse_row, filter(len, map(str.strip, result.split('\n')))))
|
return tuple(map(parse_row, list(filter(len, list(map(str.strip, result.split('\n')))))))
|
||||||
|
|
||||||
def _drop_table(self):
|
def _drop_table(self):
|
||||||
self.client.query('drop table test.{}'.format(self.table_name))
|
self.client.query('drop table test.{}'.format(self.table_name))
|
||||||
|
@ -19,10 +19,10 @@ def train_catboost_model(df, target, cat_features, params, verbose=True):
|
|||||||
if not isinstance(df, DataFrame):
|
if not isinstance(df, DataFrame):
|
||||||
raise Exception('DataFrame object expected, but got ' + repr(df))
|
raise Exception('DataFrame object expected, but got ' + repr(df))
|
||||||
|
|
||||||
print 'features:', df.columns.tolist()
|
print('features:', df.columns.tolist())
|
||||||
|
|
||||||
cat_features_index = list(df.columns.get_loc(feature) for feature in cat_features)
|
cat_features_index = list(df.columns.get_loc(feature) for feature in cat_features)
|
||||||
print 'cat features:', cat_features_index
|
print('cat features:', cat_features_index)
|
||||||
model = CatBoostClassifier(**params)
|
model = CatBoostClassifier(**params)
|
||||||
model.fit(df, target, cat_features=cat_features_index, verbose=verbose)
|
model.fit(df, target, cat_features=cat_features_index, verbose=verbose)
|
||||||
return model
|
return model
|
||||||
|
@ -23,7 +23,7 @@ def check_predictions(test_name, target, pred_python, pred_ch, acc_threshold):
|
|||||||
|
|
||||||
acc = 1 - np.sum(np.abs(ch_class - np.array(target))) / (len(target) + .0)
|
acc = 1 - np.sum(np.abs(ch_class - np.array(target))) / (len(target) + .0)
|
||||||
assert acc >= acc_threshold
|
assert acc >= acc_threshold
|
||||||
print test_name, 'accuracy: {:.10f}'.format(acc)
|
print(test_name, 'accuracy: {:.10f}'.format(acc))
|
||||||
|
|
||||||
|
|
||||||
def test_apply_float_features_only():
|
def test_apply_float_features_only():
|
||||||
@ -52,9 +52,9 @@ def test_apply_float_features_only():
|
|||||||
train_target = get_target(train_df)
|
train_target = get_target(train_df)
|
||||||
test_target = get_target(test_df)
|
test_target = get_target(test_df)
|
||||||
|
|
||||||
print
|
print()
|
||||||
print 'train target', train_target
|
print('train target', train_target)
|
||||||
print 'test target', test_target
|
print('test target', test_target)
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
'iterations': 4,
|
'iterations': 4,
|
||||||
@ -71,8 +71,8 @@ def test_apply_float_features_only():
|
|||||||
with server:
|
with server:
|
||||||
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
||||||
|
|
||||||
print 'python predictions', pred_python
|
print('python predictions', pred_python)
|
||||||
print 'clickhouse predictions', pred_ch
|
print('clickhouse predictions', pred_ch)
|
||||||
|
|
||||||
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
||||||
|
|
||||||
@ -105,9 +105,9 @@ def test_apply_float_features_with_string_cat_features():
|
|||||||
train_target = get_target(train_df)
|
train_target = get_target(train_df)
|
||||||
test_target = get_target(test_df)
|
test_target = get_target(test_df)
|
||||||
|
|
||||||
print
|
print()
|
||||||
print 'train target', train_target
|
print('train target', train_target)
|
||||||
print 'test target', test_target
|
print('test target', test_target)
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
'iterations': 6,
|
'iterations': 6,
|
||||||
@ -124,8 +124,8 @@ def test_apply_float_features_with_string_cat_features():
|
|||||||
with server:
|
with server:
|
||||||
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
||||||
|
|
||||||
print 'python predictions', pred_python
|
print('python predictions', pred_python)
|
||||||
print 'clickhouse predictions', pred_ch
|
print('clickhouse predictions', pred_ch)
|
||||||
|
|
||||||
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
||||||
|
|
||||||
@ -158,9 +158,9 @@ def test_apply_float_features_with_int_cat_features():
|
|||||||
train_target = get_target(train_df)
|
train_target = get_target(train_df)
|
||||||
test_target = get_target(test_df)
|
test_target = get_target(test_df)
|
||||||
|
|
||||||
print
|
print()
|
||||||
print 'train target', train_target
|
print('train target', train_target)
|
||||||
print 'test target', test_target
|
print('test target', test_target)
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
'iterations': 6,
|
'iterations': 6,
|
||||||
@ -177,8 +177,8 @@ def test_apply_float_features_with_int_cat_features():
|
|||||||
with server:
|
with server:
|
||||||
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
||||||
|
|
||||||
print 'python predictions', pred_python
|
print('python predictions', pred_python)
|
||||||
print 'clickhouse predictions', pred_ch
|
print('clickhouse predictions', pred_ch)
|
||||||
|
|
||||||
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
||||||
|
|
||||||
@ -211,9 +211,9 @@ def test_apply_float_features_with_mixed_cat_features():
|
|||||||
train_target = get_target(train_df)
|
train_target = get_target(train_df)
|
||||||
test_target = get_target(test_df)
|
test_target = get_target(test_df)
|
||||||
|
|
||||||
print
|
print()
|
||||||
print 'train target', train_target
|
print('train target', train_target)
|
||||||
print 'test target', test_target
|
print('test target', test_target)
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
'iterations': 6,
|
'iterations': 6,
|
||||||
@ -230,8 +230,8 @@ def test_apply_float_features_with_mixed_cat_features():
|
|||||||
with server:
|
with server:
|
||||||
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
||||||
|
|
||||||
print 'python predictions', pred_python
|
print('python predictions', pred_python)
|
||||||
print 'clickhouse predictions', pred_ch
|
print('clickhouse predictions', pred_ch)
|
||||||
|
|
||||||
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
||||||
|
|
||||||
@ -269,9 +269,9 @@ def test_apply_multiclass():
|
|||||||
train_target = get_target(train_df)
|
train_target = get_target(train_df)
|
||||||
test_target = get_target(test_df)
|
test_target = get_target(test_df)
|
||||||
|
|
||||||
print
|
print()
|
||||||
print 'train target', train_target
|
print('train target', train_target)
|
||||||
print 'test target', test_target
|
print('test target', test_target)
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
'iterations': 10,
|
'iterations': 10,
|
||||||
@ -288,7 +288,7 @@ def test_apply_multiclass():
|
|||||||
with server:
|
with server:
|
||||||
pred_ch = np.argmax(np.array(server.apply_model(name, test_df, [])), axis=1)
|
pred_ch = np.argmax(np.array(server.apply_model(name, test_df, [])), axis=1)
|
||||||
|
|
||||||
print 'python predictions', pred_python
|
print('python predictions', pred_python)
|
||||||
print 'clickhouse predictions', pred_ch
|
print('clickhouse predictions', pred_ch)
|
||||||
|
|
||||||
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
||||||
|
@ -12,11 +12,11 @@ You must install latest Docker from
|
|||||||
https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#set-up-the-repository
|
https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#set-up-the-repository
|
||||||
Don't use Docker from your system repository.
|
Don't use Docker from your system repository.
|
||||||
|
|
||||||
* [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python-pip libpq-dev zlib1g-dev libcrypto++-dev libssl-dev`
|
* [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python3-pip libpq-dev zlib1g-dev libcrypto++-dev libssl-dev`
|
||||||
* [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest`
|
* [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest`
|
||||||
* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio rpm-confluent-schemaregistry`
|
* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio confluent-kafka avro
|
||||||
|
|
||||||
(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python-pytest python-dicttoxml python-docker python-pymysql python-pymongo python-tzlocal python-kazoo python-psycopg2 python-kafka python-pytest-timeout python-minio`
|
(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python3-pytest python3-dicttoxml python3-docker python3-pymysql python3-pymongo python3-tzlocal python3-kazoo python3-psycopg2 kafka-python python3-pytest-timeout python3-minio`
|
||||||
|
|
||||||
If you want to run the tests under a non-privileged user, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and re-login.
|
If you want to run the tests under a non-privileged user, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and re-login.
|
||||||
(You must close all your sessions (for example, restart your computer))
|
(You must close all your sessions (for example, restart your computer))
|
||||||
|
@ -31,7 +31,7 @@ class Client:
|
|||||||
command += ['--query', sql]
|
command += ['--query', sql]
|
||||||
|
|
||||||
if settings is not None:
|
if settings is not None:
|
||||||
for setting, value in settings.iteritems():
|
for setting, value in settings.items():
|
||||||
command += ['--' + setting, str(value)]
|
command += ['--' + setting, str(value)]
|
||||||
|
|
||||||
if user is not None:
|
if user is not None:
|
||||||
@ -67,7 +67,7 @@ class QueryRuntimeException(Exception):
|
|||||||
class CommandRequest:
|
class CommandRequest:
|
||||||
def __init__(self, command, stdin=None, timeout=None, ignore_error=False):
|
def __init__(self, command, stdin=None, timeout=None, ignore_error=False):
|
||||||
# Write data to tmp file to avoid PIPEs and execution blocking
|
# Write data to tmp file to avoid PIPEs and execution blocking
|
||||||
stdin_file = tempfile.TemporaryFile()
|
stdin_file = tempfile.TemporaryFile(mode='w+')
|
||||||
stdin_file.write(stdin)
|
stdin_file.write(stdin)
|
||||||
stdin_file.seek(0)
|
stdin_file.seek(0)
|
||||||
self.stdout_file = tempfile.TemporaryFile()
|
self.stdout_file = tempfile.TemporaryFile()
|
||||||
@ -80,7 +80,7 @@ class CommandRequest:
|
|||||||
# can print some debug information there
|
# can print some debug information there
|
||||||
env = {}
|
env = {}
|
||||||
env["TSAN_OPTIONS"] = "verbosity=0"
|
env["TSAN_OPTIONS"] = "verbosity=0"
|
||||||
self.process = sp.Popen(command, stdin=stdin_file, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
|
self.process = sp.Popen(command, stdin=stdin_file, stdout=self.stdout_file, stderr=self.stderr_file, env=env, universal_newlines=True)
|
||||||
|
|
||||||
self.timer = None
|
self.timer = None
|
||||||
self.process_finished_before_timeout = True
|
self.process_finished_before_timeout = True
|
||||||
@ -98,8 +98,8 @@ class CommandRequest:
|
|||||||
self.stdout_file.seek(0)
|
self.stdout_file.seek(0)
|
||||||
self.stderr_file.seek(0)
|
self.stderr_file.seek(0)
|
||||||
|
|
||||||
stdout = self.stdout_file.read()
|
stdout = self.stdout_file.read().decode()
|
||||||
stderr = self.stderr_file.read()
|
stderr = self.stderr_file.read().decode()
|
||||||
|
|
||||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||||
raise QueryTimeoutExceedException('Client timed out!')
|
raise QueryTimeoutExceedException('Client timed out!')
|
||||||
@ -115,8 +115,8 @@ class CommandRequest:
|
|||||||
self.stdout_file.seek(0)
|
self.stdout_file.seek(0)
|
||||||
self.stderr_file.seek(0)
|
self.stderr_file.seek(0)
|
||||||
|
|
||||||
stdout = self.stdout_file.read()
|
stdout = self.stdout_file.read().decode()
|
||||||
stderr = self.stderr_file.read()
|
stderr = self.stderr_file.read().decode()
|
||||||
|
|
||||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||||
raise QueryTimeoutExceedException('Client timed out!')
|
raise QueryTimeoutExceedException('Client timed out!')
|
||||||
@ -131,8 +131,8 @@ class CommandRequest:
|
|||||||
self.stdout_file.seek(0)
|
self.stdout_file.seek(0)
|
||||||
self.stderr_file.seek(0)
|
self.stderr_file.seek(0)
|
||||||
|
|
||||||
stdout = self.stdout_file.read()
|
stdout = self.stdout_file.read().decode()
|
||||||
stderr = self.stderr_file.read()
|
stderr = self.stderr_file.read().decode()
|
||||||
|
|
||||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||||
raise QueryTimeoutExceedException('Client timed out!')
|
raise QueryTimeoutExceedException('Client timed out!')
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import base64
|
import base64
|
||||||
import errno
|
import errno
|
||||||
import httplib
|
import http.client
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import os.path as p
|
import os.path as p
|
||||||
@ -12,7 +12,7 @@ import socket
|
|||||||
import subprocess
|
import subprocess
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import urllib
|
import urllib.parse
|
||||||
|
|
||||||
import cassandra.cluster
|
import cassandra.cluster
|
||||||
import docker
|
import docker
|
||||||
@ -21,7 +21,7 @@ import pymongo
|
|||||||
import pymysql
|
import pymysql
|
||||||
import requests
|
import requests
|
||||||
import xml.dom.minidom
|
import xml.dom.minidom
|
||||||
from confluent.schemaregistry.client import CachedSchemaRegistryClient
|
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
|
||||||
from dicttoxml import dicttoxml
|
from dicttoxml import dicttoxml
|
||||||
from kazoo.client import KazooClient
|
from kazoo.client import KazooClient
|
||||||
from kazoo.exceptions import KazooException
|
from kazoo.exceptions import KazooException
|
||||||
@ -41,7 +41,7 @@ SANITIZER_SIGN = "=================="
|
|||||||
def _create_env_file(path, variables, fname=DEFAULT_ENV_NAME):
|
def _create_env_file(path, variables, fname=DEFAULT_ENV_NAME):
|
||||||
full_path = os.path.join(path, fname)
|
full_path = os.path.join(path, fname)
|
||||||
with open(full_path, 'w') as f:
|
with open(full_path, 'w') as f:
|
||||||
for var, value in variables.items():
|
for var, value in list(variables.items()):
|
||||||
f.write("=".join([var, value]) + "\n")
|
f.write("=".join([var, value]) + "\n")
|
||||||
return full_path
|
return full_path
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ def get_docker_compose_path():
|
|||||||
if os.path.exists(os.path.dirname('/compose/')):
|
if os.path.exists(os.path.dirname('/compose/')):
|
||||||
return os.path.dirname('/compose/') # default in docker runner container
|
return os.path.dirname('/compose/') # default in docker runner container
|
||||||
else:
|
else:
|
||||||
print("Fallback docker_compose_path to LOCAL_DOCKER_COMPOSE_DIR: {}".format(LOCAL_DOCKER_COMPOSE_DIR))
|
print(("Fallback docker_compose_path to LOCAL_DOCKER_COMPOSE_DIR: {}".format(LOCAL_DOCKER_COMPOSE_DIR)))
|
||||||
return LOCAL_DOCKER_COMPOSE_DIR
|
return LOCAL_DOCKER_COMPOSE_DIR
|
||||||
|
|
||||||
|
|
||||||
@ -91,8 +91,8 @@ class ClickHouseCluster:
|
|||||||
|
|
||||||
def __init__(self, base_path, name=None, base_config_dir=None, server_bin_path=None, client_bin_path=None,
|
def __init__(self, base_path, name=None, base_config_dir=None, server_bin_path=None, client_bin_path=None,
|
||||||
odbc_bridge_bin_path=None, zookeeper_config_path=None, custom_dockerd_host=None):
|
odbc_bridge_bin_path=None, zookeeper_config_path=None, custom_dockerd_host=None):
|
||||||
for param in os.environ.keys():
|
for param in list(os.environ.keys()):
|
||||||
print "ENV %40s %s" % (param, os.environ[param])
|
print("ENV %40s %s" % (param, os.environ[param]))
|
||||||
self.base_dir = p.dirname(base_path)
|
self.base_dir = p.dirname(base_path)
|
||||||
self.name = name if name is not None else ''
|
self.name = name if name is not None else ''
|
||||||
|
|
||||||
@ -160,7 +160,7 @@ class ClickHouseCluster:
|
|||||||
|
|
||||||
self.docker_client = None
|
self.docker_client = None
|
||||||
self.is_up = False
|
self.is_up = False
|
||||||
print "CLUSTER INIT base_config_dir:{}".format(self.base_config_dir)
|
print("CLUSTER INIT base_config_dir:{}".format(self.base_config_dir))
|
||||||
|
|
||||||
def get_client_cmd(self):
|
def get_client_cmd(self):
|
||||||
cmd = self.client_bin_path
|
cmd = self.client_bin_path
|
||||||
@ -386,7 +386,7 @@ class ClickHouseCluster:
|
|||||||
def get_instance_ip(self, instance_name):
|
def get_instance_ip(self, instance_name):
|
||||||
docker_id = self.get_instance_docker_id(instance_name)
|
docker_id = self.get_instance_docker_id(instance_name)
|
||||||
handle = self.docker_client.containers.get(docker_id)
|
handle = self.docker_client.containers.get(docker_id)
|
||||||
return handle.attrs['NetworkSettings']['Networks'].values()[0]['IPAddress']
|
return list(handle.attrs['NetworkSettings']['Networks'].values())[0]['IPAddress']
|
||||||
|
|
||||||
def get_container_id(self, instance_name):
|
def get_container_id(self, instance_name):
|
||||||
docker_id = self.get_instance_docker_id(instance_name)
|
docker_id = self.get_instance_docker_id(instance_name)
|
||||||
@ -395,22 +395,21 @@ class ClickHouseCluster:
|
|||||||
|
|
||||||
def get_container_logs(self, instance_name):
|
def get_container_logs(self, instance_name):
|
||||||
container_id = self.get_container_id(instance_name)
|
container_id = self.get_container_id(instance_name)
|
||||||
return self.docker_client.api.logs(container_id)
|
return self.docker_client.api.logs(container_id).decode()
|
||||||
|
|
||||||
def exec_in_container(self, container_id, cmd, detach=False, nothrow=False, **kwargs):
|
def exec_in_container(self, container_id, cmd, detach=False, nothrow=False, **kwargs):
|
||||||
exec_id = self.docker_client.api.exec_create(container_id, cmd, **kwargs)
|
exec_id = self.docker_client.api.exec_create(container_id, cmd, **kwargs)
|
||||||
output = self.docker_client.api.exec_start(exec_id, detach=detach)
|
output = self.docker_client.api.exec_start(exec_id, detach=detach)
|
||||||
|
|
||||||
output = output.decode('utf8')
|
|
||||||
exit_code = self.docker_client.api.exec_inspect(exec_id)['ExitCode']
|
exit_code = self.docker_client.api.exec_inspect(exec_id)['ExitCode']
|
||||||
if exit_code:
|
if exit_code:
|
||||||
container_info = self.docker_client.api.inspect_container(container_id)
|
container_info = self.docker_client.api.inspect_container(container_id)
|
||||||
image_id = container_info.get('Image')
|
image_id = container_info.get('Image')
|
||||||
image_info = self.docker_client.api.inspect_image(image_id)
|
image_info = self.docker_client.api.inspect_image(image_id)
|
||||||
print("Command failed in container {}: ".format(container_id))
|
print(("Command failed in container {}: ".format(container_id)))
|
||||||
pprint.pprint(container_info)
|
pprint.pprint(container_info)
|
||||||
print("")
|
print("")
|
||||||
print("Container {} uses image {}: ".format(container_id, image_id))
|
print(("Container {} uses image {}: ".format(container_id, image_id)))
|
||||||
pprint.pprint(image_info)
|
pprint.pprint(image_info)
|
||||||
print("")
|
print("")
|
||||||
message = 'Cmd "{}" failed in container {}. Return code {}. Output: {}'.format(' '.join(cmd), container_id,
|
message = 'Cmd "{}" failed in container {}. Return code {}. Output: {}'.format(' '.join(cmd), container_id,
|
||||||
@ -419,14 +418,17 @@ class ClickHouseCluster:
|
|||||||
print(message)
|
print(message)
|
||||||
else:
|
else:
|
||||||
raise Exception(message)
|
raise Exception(message)
|
||||||
|
if not detach:
|
||||||
|
return output.decode()
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def copy_file_to_container(self, container_id, local_path, dest_path):
|
def copy_file_to_container(self, container_id, local_path, dest_path):
|
||||||
with open(local_path, 'r') as fdata:
|
with open(local_path, "r") as fdata:
|
||||||
data = fdata.read()
|
data = fdata.read()
|
||||||
encoded_data = base64.b64encode(data)
|
encodedBytes = base64.b64encode(data.encode("utf-8"))
|
||||||
|
encodedStr = str(encodedBytes, "utf-8")
|
||||||
self.exec_in_container(container_id,
|
self.exec_in_container(container_id,
|
||||||
["bash", "-c", "echo {} | base64 --decode > {}".format(encoded_data, dest_path)],
|
["bash", "-c", "echo {} | base64 --decode > {}".format(encodedStr, dest_path)],
|
||||||
user='root')
|
user='root')
|
||||||
|
|
||||||
def wait_mysql_to_start(self, timeout=60):
|
def wait_mysql_to_start(self, timeout=60):
|
||||||
@ -435,10 +437,10 @@ class ClickHouseCluster:
|
|||||||
try:
|
try:
|
||||||
conn = pymysql.connect(user='root', password='clickhouse', host='127.0.0.1', port=3308)
|
conn = pymysql.connect(user='root', password='clickhouse', host='127.0.0.1', port=3308)
|
||||||
conn.close()
|
conn.close()
|
||||||
print "Mysql Started"
|
print("Mysql Started")
|
||||||
return
|
return
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print "Can't connect to MySQL " + str(ex)
|
print("Can't connect to MySQL " + str(ex))
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
subprocess_call(['docker-compose', 'ps', '--services', '--all'])
|
subprocess_call(['docker-compose', 'ps', '--services', '--all'])
|
||||||
@ -451,10 +453,10 @@ class ClickHouseCluster:
|
|||||||
conn_string = "host='localhost' user='postgres' password='mysecretpassword'"
|
conn_string = "host='localhost' user='postgres' password='mysecretpassword'"
|
||||||
conn = psycopg2.connect(conn_string)
|
conn = psycopg2.connect(conn_string)
|
||||||
conn.close()
|
conn.close()
|
||||||
print "Postgres Started"
|
print("Postgres Started")
|
||||||
return
|
return
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print "Can't connect to Postgres " + str(ex)
|
print("Can't connect to Postgres " + str(ex))
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
raise Exception("Cannot wait Postgres container")
|
raise Exception("Cannot wait Postgres container")
|
||||||
@ -466,10 +468,10 @@ class ClickHouseCluster:
|
|||||||
for instance in ['zoo1', 'zoo2', 'zoo3']:
|
for instance in ['zoo1', 'zoo2', 'zoo3']:
|
||||||
conn = self.get_kazoo_client(instance)
|
conn = self.get_kazoo_client(instance)
|
||||||
conn.get_children('/')
|
conn.get_children('/')
|
||||||
print "All instances of ZooKeeper started"
|
print("All instances of ZooKeeper started")
|
||||||
return
|
return
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print "Can't connect to ZooKeeper " + str(ex)
|
print("Can't connect to ZooKeeper " + str(ex))
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
raise Exception("Cannot wait ZooKeeper container")
|
raise Exception("Cannot wait ZooKeeper container")
|
||||||
@ -480,10 +482,10 @@ class ClickHouseCluster:
|
|||||||
while time.time() - start < timeout:
|
while time.time() - start < timeout:
|
||||||
try:
|
try:
|
||||||
hdfs_api.write_data("/somefilewithrandomname222", "1")
|
hdfs_api.write_data("/somefilewithrandomname222", "1")
|
||||||
print "Connected to HDFS and SafeMode disabled! "
|
print("Connected to HDFS and SafeMode disabled! ")
|
||||||
return
|
return
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print "Can't connect to HDFS " + str(ex)
|
print("Can't connect to HDFS " + str(ex))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
raise Exception("Can't wait HDFS to start")
|
raise Exception("Can't wait HDFS to start")
|
||||||
@ -496,10 +498,10 @@ class ClickHouseCluster:
|
|||||||
while time.time() - start < timeout:
|
while time.time() - start < timeout:
|
||||||
try:
|
try:
|
||||||
connection.list_database_names()
|
connection.list_database_names()
|
||||||
print "Connected to Mongo dbs:", connection.list_database_names()
|
print("Connected to Mongo dbs:", connection.database_names())
|
||||||
return
|
return
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print "Can't connect to Mongo " + str(ex)
|
print("Can't connect to Mongo " + str(ex))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
def wait_minio_to_start(self, timeout=30, secure=False):
|
def wait_minio_to_start(self, timeout=30, secure=False):
|
||||||
@ -519,12 +521,12 @@ class ClickHouseCluster:
|
|||||||
|
|
||||||
minio_client.make_bucket(self.minio_bucket)
|
minio_client.make_bucket(self.minio_bucket)
|
||||||
|
|
||||||
print("S3 bucket '%s' created", self.minio_bucket)
|
print(("S3 bucket '%s' created", self.minio_bucket))
|
||||||
|
|
||||||
self.minio_client = minio_client
|
self.minio_client = minio_client
|
||||||
return
|
return
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print("Can't connect to Minio: %s", str(ex))
|
print(("Can't connect to Minio: %s", str(ex)))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
raise Exception("Can't wait Minio to start")
|
raise Exception("Can't wait Minio to start")
|
||||||
@ -539,7 +541,7 @@ class ClickHouseCluster:
|
|||||||
print("Connected to SchemaRegistry")
|
print("Connected to SchemaRegistry")
|
||||||
return
|
return
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print("Can't connect to SchemaRegistry: %s", str(ex))
|
print(("Can't connect to SchemaRegistry: %s", str(ex)))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
def wait_cassandra_to_start(self, timeout=30):
|
def wait_cassandra_to_start(self, timeout=30):
|
||||||
@ -555,7 +557,7 @@ class ClickHouseCluster:
|
|||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
def start(self, destroy_dirs=True):
|
def start(self, destroy_dirs=True):
|
||||||
print "Cluster start called. is_up={}, destroy_dirs={}".format(self.is_up, destroy_dirs)
|
print("Cluster start called. is_up={}, destroy_dirs={}".format(self.is_up, destroy_dirs))
|
||||||
if self.is_up:
|
if self.is_up:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -571,11 +573,11 @@ class ClickHouseCluster:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if destroy_dirs and p.exists(self.instances_dir):
|
if destroy_dirs and p.exists(self.instances_dir):
|
||||||
print("Removing instances dir %s", self.instances_dir)
|
print(("Removing instances dir %s", self.instances_dir))
|
||||||
shutil.rmtree(self.instances_dir)
|
shutil.rmtree(self.instances_dir)
|
||||||
|
|
||||||
for instance in self.instances.values():
|
for instance in list(self.instances.values()):
|
||||||
print('Setup directory for instance: {} destroy_dirs: {}'.format(instance.name, destroy_dirs))
|
print(('Setup directory for instance: {} destroy_dirs: {}'.format(instance.name, destroy_dirs)))
|
||||||
instance.create_dir(destroy_dir=destroy_dirs)
|
instance.create_dir(destroy_dir=destroy_dirs)
|
||||||
|
|
||||||
self.docker_client = docker.from_env(version=self.docker_api_version)
|
self.docker_client = docker.from_env(version=self.docker_api_version)
|
||||||
@ -676,12 +678,12 @@ class ClickHouseCluster:
|
|||||||
self.wait_cassandra_to_start()
|
self.wait_cassandra_to_start()
|
||||||
|
|
||||||
clickhouse_start_cmd = self.base_cmd + ['up', '-d', '--no-recreate']
|
clickhouse_start_cmd = self.base_cmd + ['up', '-d', '--no-recreate']
|
||||||
print("Trying to create ClickHouse instance by command %s", ' '.join(map(str, clickhouse_start_cmd)))
|
print(("Trying to create ClickHouse instance by command %s", ' '.join(map(str, clickhouse_start_cmd))))
|
||||||
subprocess_check_call(clickhouse_start_cmd)
|
subprocess_check_call(clickhouse_start_cmd)
|
||||||
print("ClickHouse instance created")
|
print("ClickHouse instance created")
|
||||||
|
|
||||||
start_deadline = time.time() + 20.0 # seconds
|
start_deadline = time.time() + 20.0 # seconds
|
||||||
for instance in self.instances.itervalues():
|
for instance in self.instances.values():
|
||||||
instance.docker_client = self.docker_client
|
instance.docker_client = self.docker_client
|
||||||
instance.ip_address = self.get_instance_ip(instance.name)
|
instance.ip_address = self.get_instance_ip(instance.name)
|
||||||
|
|
||||||
@ -693,10 +695,10 @@ class ClickHouseCluster:
|
|||||||
|
|
||||||
self.is_up = True
|
self.is_up = True
|
||||||
|
|
||||||
except BaseException, e:
|
except BaseException as e:
|
||||||
print "Failed to start cluster: "
|
print("Failed to start cluster: ")
|
||||||
print str(e)
|
print(str(e))
|
||||||
print traceback.print_exc()
|
print(traceback.print_exc())
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def shutdown(self, kill=True):
|
def shutdown(self, kill=True):
|
||||||
@ -705,7 +707,7 @@ class ClickHouseCluster:
|
|||||||
try:
|
try:
|
||||||
subprocess.check_call(self.base_cmd + ['logs'], stdout=f)
|
subprocess.check_call(self.base_cmd + ['logs'], stdout=f)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print "Unable to get logs from docker."
|
print("Unable to get logs from docker.")
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
for line in f:
|
for line in f:
|
||||||
if SANITIZER_SIGN in line:
|
if SANITIZER_SIGN in line:
|
||||||
@ -716,18 +718,18 @@ class ClickHouseCluster:
|
|||||||
try:
|
try:
|
||||||
subprocess_check_call(self.base_cmd + ['kill'])
|
subprocess_check_call(self.base_cmd + ['kill'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print "Kill command failed durung shutdown. {}".format(repr(e))
|
print("Kill command failed durung shutdown. {}".format(repr(e)))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
subprocess_check_call(self.base_cmd + ['down', '--volumes', '--remove-orphans'])
|
subprocess_check_call(self.base_cmd + ['down', '--volumes', '--remove-orphans'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print "Down + remove orphans failed durung shutdown. {}".format(repr(e))
|
print("Down + remove orphans failed durung shutdown. {}".format(repr(e)))
|
||||||
|
|
||||||
self.is_up = False
|
self.is_up = False
|
||||||
|
|
||||||
self.docker_client = None
|
self.docker_client = None
|
||||||
|
|
||||||
for instance in self.instances.values():
|
for instance in list(self.instances.values()):
|
||||||
instance.docker_client = None
|
instance.docker_client = None
|
||||||
instance.ip_address = None
|
instance.ip_address = None
|
||||||
instance.client = None
|
instance.client = None
|
||||||
@ -769,7 +771,7 @@ class ClickHouseCluster:
|
|||||||
kazoo_callback(self.get_kazoo_client(zoo_instance_name))
|
kazoo_callback(self.get_kazoo_client(zoo_instance_name))
|
||||||
return
|
return
|
||||||
except KazooException as e:
|
except KazooException as e:
|
||||||
print repr(e)
|
print(repr(e))
|
||||||
time.sleep(sleep_for)
|
time.sleep(sleep_for)
|
||||||
|
|
||||||
kazoo_callback(self.get_kazoo_client(zoo_instance_name))
|
kazoo_callback(self.get_kazoo_client(zoo_instance_name))
|
||||||
@ -922,7 +924,7 @@ class ClickHouseInstance:
|
|||||||
return result
|
return result
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print "Retry {} got exception {}".format(i + 1, ex)
|
print("Retry {} got exception {}".format(i + 1, ex))
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
|
|
||||||
if result is not None:
|
if result is not None:
|
||||||
@ -954,28 +956,30 @@ class ClickHouseInstance:
|
|||||||
|
|
||||||
params["query"] = sql
|
params["query"] = sql
|
||||||
|
|
||||||
auth = ""
|
auth = None
|
||||||
if user and password:
|
if user and password:
|
||||||
auth = "{}:{}@".format(user, password)
|
auth = requests.auth.HTTPBasicAuth(user, password)
|
||||||
elif user:
|
elif user:
|
||||||
auth = "{}@".format(user)
|
auth = requests.auth.HTTPBasicAuth(user, '')
|
||||||
|
url = "http://" + self.ip_address + ":8123/?" + urllib.parse.urlencode(params)
|
||||||
|
|
||||||
url = "http://" + auth + self.ip_address + ":8123/?" + urllib.urlencode(params)
|
if data:
|
||||||
|
r = requests.post(url, data, auth=auth)
|
||||||
open_result = urllib.urlopen(url, data)
|
else:
|
||||||
|
r = requests.get(url, auth=auth)
|
||||||
|
|
||||||
def http_code_and_message():
|
def http_code_and_message():
|
||||||
return str(open_result.getcode()) + " " + httplib.responses[
|
code = r.status_code
|
||||||
open_result.getcode()] + ": " + open_result.read()
|
return str(code) + " " + http.client.responses[code] + ": " + r.text
|
||||||
|
|
||||||
if expect_fail_and_get_error:
|
if expect_fail_and_get_error:
|
||||||
if open_result.getcode() == 200:
|
if r.ok:
|
||||||
raise Exception("ClickHouse HTTP server is expected to fail, but succeeded: " + open_result.read())
|
raise Exception("ClickHouse HTTP server is expected to fail, but succeeded: " + r.text)
|
||||||
return http_code_and_message()
|
return http_code_and_message()
|
||||||
else:
|
else:
|
||||||
if open_result.getcode() != 200:
|
if not r.ok:
|
||||||
raise Exception("ClickHouse HTTP server returned " + http_code_and_message())
|
raise Exception("ClickHouse HTTP server returned " + http_code_and_message())
|
||||||
return open_result.read()
|
return r.text
|
||||||
|
|
||||||
# Connects to the instance via HTTP interface, sends a query and returns the answer
|
# Connects to the instance via HTTP interface, sends a query and returns the answer
|
||||||
def http_request(self, url, method='GET', params=None, data=None, headers=None):
|
def http_request(self, url, method='GET', params=None, data=None, headers=None):
|
||||||
@ -1161,9 +1165,9 @@ class ClickHouseInstance:
|
|||||||
|
|
||||||
def _create_odbc_config_file(self):
|
def _create_odbc_config_file(self):
|
||||||
with open(self.odbc_ini_path.split(':')[0], 'w') as f:
|
with open(self.odbc_ini_path.split(':')[0], 'w') as f:
|
||||||
for driver_setup in self.odbc_drivers.values():
|
for driver_setup in list(self.odbc_drivers.values()):
|
||||||
f.write("[{}]\n".format(driver_setup["DSN"]))
|
f.write("[{}]\n".format(driver_setup["DSN"]))
|
||||||
for key, value in driver_setup.items():
|
for key, value in list(driver_setup.items()):
|
||||||
if key != "DSN":
|
if key != "DSN":
|
||||||
f.write(key + "=" + value + "\n")
|
f.write(key + "=" + value + "\n")
|
||||||
|
|
||||||
@ -1183,16 +1187,16 @@ class ClickHouseInstance:
|
|||||||
instance_config_dir = p.abspath(p.join(self.path, 'configs'))
|
instance_config_dir = p.abspath(p.join(self.path, 'configs'))
|
||||||
os.makedirs(instance_config_dir)
|
os.makedirs(instance_config_dir)
|
||||||
|
|
||||||
print "Copy common default production configuration from {}".format(self.base_config_dir)
|
print("Copy common default production configuration from {}".format(self.base_config_dir))
|
||||||
shutil.copyfile(p.join(self.base_config_dir, 'config.xml'), p.join(instance_config_dir, 'config.xml'))
|
shutil.copyfile(p.join(self.base_config_dir, 'config.xml'), p.join(instance_config_dir, 'config.xml'))
|
||||||
shutil.copyfile(p.join(self.base_config_dir, 'users.xml'), p.join(instance_config_dir, 'users.xml'))
|
shutil.copyfile(p.join(self.base_config_dir, 'users.xml'), p.join(instance_config_dir, 'users.xml'))
|
||||||
|
|
||||||
print "Create directory for configuration generated in this helper"
|
print("Create directory for configuration generated in this helper")
|
||||||
# used by all utils with any config
|
# used by all utils with any config
|
||||||
conf_d_dir = p.abspath(p.join(instance_config_dir, 'conf.d'))
|
conf_d_dir = p.abspath(p.join(instance_config_dir, 'conf.d'))
|
||||||
os.mkdir(conf_d_dir)
|
os.mkdir(conf_d_dir)
|
||||||
|
|
||||||
print "Create directory for common tests configuration"
|
print("Create directory for common tests configuration")
|
||||||
# used by server with main config.xml
|
# used by server with main config.xml
|
||||||
self.config_d_dir = p.abspath(p.join(instance_config_dir, 'config.d'))
|
self.config_d_dir = p.abspath(p.join(instance_config_dir, 'config.d'))
|
||||||
os.mkdir(self.config_d_dir)
|
os.mkdir(self.config_d_dir)
|
||||||
@ -1201,14 +1205,14 @@ class ClickHouseInstance:
|
|||||||
dictionaries_dir = p.abspath(p.join(instance_config_dir, 'dictionaries'))
|
dictionaries_dir = p.abspath(p.join(instance_config_dir, 'dictionaries'))
|
||||||
os.mkdir(dictionaries_dir)
|
os.mkdir(dictionaries_dir)
|
||||||
|
|
||||||
print "Copy common configuration from helpers"
|
print("Copy common configuration from helpers")
|
||||||
# The file is named with 0_ prefix to be processed before other configuration overloads.
|
# The file is named with 0_ prefix to be processed before other configuration overloads.
|
||||||
shutil.copy(p.join(HELPERS_DIR, '0_common_instance_config.xml'), self.config_d_dir)
|
shutil.copy(p.join(HELPERS_DIR, '0_common_instance_config.xml'), self.config_d_dir)
|
||||||
shutil.copy(p.join(HELPERS_DIR, '0_common_instance_users.xml'), users_d_dir)
|
shutil.copy(p.join(HELPERS_DIR, '0_common_instance_users.xml'), users_d_dir)
|
||||||
if len(self.custom_dictionaries_paths):
|
if len(self.custom_dictionaries_paths):
|
||||||
shutil.copy(p.join(HELPERS_DIR, '0_common_enable_dictionaries.xml'), self.config_d_dir)
|
shutil.copy(p.join(HELPERS_DIR, '0_common_enable_dictionaries.xml'), self.config_d_dir)
|
||||||
|
|
||||||
print "Generate and write macros file"
|
print("Generate and write macros file")
|
||||||
macros = self.macros.copy()
|
macros = self.macros.copy()
|
||||||
macros['instance'] = self.name
|
macros['instance'] = self.name
|
||||||
with open(p.join(conf_d_dir, 'macros.xml'), 'w') as macros_config:
|
with open(p.join(conf_d_dir, 'macros.xml'), 'w') as macros_config:
|
||||||
@ -1222,7 +1226,7 @@ class ClickHouseInstance:
|
|||||||
shutil.copytree(self.kerberos_secrets_dir, p.abspath(p.join(self.path, 'secrets')))
|
shutil.copytree(self.kerberos_secrets_dir, p.abspath(p.join(self.path, 'secrets')))
|
||||||
|
|
||||||
# Copy config.d configs
|
# Copy config.d configs
|
||||||
print "Copy custom test config files {} to {}".format(self.custom_main_config_paths, self.config_d_dir)
|
print("Copy custom test config files {} to {}".format(self.custom_main_config_paths, self.config_d_dir))
|
||||||
for path in self.custom_main_config_paths:
|
for path in self.custom_main_config_paths:
|
||||||
shutil.copy(path, self.config_d_dir)
|
shutil.copy(path, self.config_d_dir)
|
||||||
|
|
||||||
@ -1235,16 +1239,16 @@ class ClickHouseInstance:
|
|||||||
shutil.copy(path, dictionaries_dir)
|
shutil.copy(path, dictionaries_dir)
|
||||||
|
|
||||||
db_dir = p.abspath(p.join(self.path, 'database'))
|
db_dir = p.abspath(p.join(self.path, 'database'))
|
||||||
print "Setup database dir {}".format(db_dir)
|
print("Setup database dir {}".format(db_dir))
|
||||||
if self.clickhouse_path_dir is not None:
|
if self.clickhouse_path_dir is not None:
|
||||||
print "Database files taken from {}".format(self.clickhouse_path_dir)
|
print("Database files taken from {}".format(self.clickhouse_path_dir))
|
||||||
shutil.copytree(self.clickhouse_path_dir, db_dir)
|
shutil.copytree(self.clickhouse_path_dir, db_dir)
|
||||||
print "Database copied from {} to {}".format(self.clickhouse_path_dir, db_dir)
|
print("Database copied from {} to {}".format(self.clickhouse_path_dir, db_dir))
|
||||||
else:
|
else:
|
||||||
os.mkdir(db_dir)
|
os.mkdir(db_dir)
|
||||||
|
|
||||||
logs_dir = p.abspath(p.join(self.path, 'logs'))
|
logs_dir = p.abspath(p.join(self.path, 'logs'))
|
||||||
print "Setup logs dir {}".format(logs_dir)
|
print("Setup logs dir {}".format(logs_dir))
|
||||||
os.mkdir(logs_dir)
|
os.mkdir(logs_dir)
|
||||||
|
|
||||||
depends_on = []
|
depends_on = []
|
||||||
@ -1272,7 +1276,7 @@ class ClickHouseInstance:
|
|||||||
|
|
||||||
env_file = _create_env_file(os.path.dirname(self.docker_compose_path), self.env_variables)
|
env_file = _create_env_file(os.path.dirname(self.docker_compose_path), self.env_variables)
|
||||||
|
|
||||||
print "Env {} stored in {}".format(self.env_variables, env_file)
|
print("Env {} stored in {}".format(self.env_variables, env_file))
|
||||||
|
|
||||||
odbc_ini_path = ""
|
odbc_ini_path = ""
|
||||||
if self.odbc_ini_path:
|
if self.odbc_ini_path:
|
||||||
@ -1284,7 +1288,7 @@ class ClickHouseInstance:
|
|||||||
if self.stay_alive:
|
if self.stay_alive:
|
||||||
entrypoint_cmd = CLICKHOUSE_STAY_ALIVE_COMMAND
|
entrypoint_cmd = CLICKHOUSE_STAY_ALIVE_COMMAND
|
||||||
|
|
||||||
print "Entrypoint cmd: {}".format(entrypoint_cmd)
|
print("Entrypoint cmd: {}".format(entrypoint_cmd))
|
||||||
|
|
||||||
networks = app_net = ipv4_address = ipv6_address = net_aliases = net_alias1 = ""
|
networks = app_net = ipv4_address = ipv6_address = net_aliases = net_alias1 = ""
|
||||||
if self.ipv4_address is not None or self.ipv6_address is not None or self.hostname != self.name:
|
if self.ipv4_address is not None or self.ipv6_address is not None or self.hostname != self.name:
|
||||||
|
@ -176,7 +176,7 @@ class SourceMongo(ExternalSource):
|
|||||||
to_insert = []
|
to_insert = []
|
||||||
for row in data:
|
for row in data:
|
||||||
row_dict = {}
|
row_dict = {}
|
||||||
for cell_name, cell_value in row.data.items():
|
for cell_name, cell_value in list(row.data.items()):
|
||||||
row_dict[cell_name] = self.converters[cell_name](cell_value)
|
row_dict[cell_name] = self.converters[cell_name](cell_value)
|
||||||
to_insert.append(row_dict)
|
to_insert.append(row_dict)
|
||||||
|
|
||||||
@ -387,7 +387,7 @@ class SourceHTTPBase(ExternalSource):
|
|||||||
self.node.exec_in_container([
|
self.node.exec_in_container([
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"python2 /http_server.py --data-path={tbl} --schema={schema} --host={host} --port={port} --cert-path=/fake_cert.pem".format(
|
"python3 /http_server.py --data-path={tbl} --schema={schema} --host={host} --port={port} --cert-path=/fake_cert.pem".format(
|
||||||
tbl=path, schema=self._get_schema(), host=self.docker_hostname, port=self.http_port)
|
tbl=path, schema=self._get_schema(), host=self.docker_hostname, port=self.http_port)
|
||||||
], detach=True)
|
], detach=True)
|
||||||
self.ordered_names = structure.get_ordered_names()
|
self.ordered_names = structure.get_ordered_names()
|
||||||
@ -573,12 +573,14 @@ class SourceAerospike(ExternalSource):
|
|||||||
def _flush_aerospike_db(self):
|
def _flush_aerospike_db(self):
|
||||||
keys = []
|
keys = []
|
||||||
|
|
||||||
def handle_record((key, metadata, record)):
|
def handle_record(xxx_todo_changeme):
|
||||||
print("Handle record {} {}".format(key, record))
|
(key, metadata, record) = xxx_todo_changeme
|
||||||
|
print(("Handle record {} {}".format(key, record)))
|
||||||
keys.append(key)
|
keys.append(key)
|
||||||
|
|
||||||
def print_record((key, metadata, record)):
|
def print_record(xxx_todo_changeme1):
|
||||||
print("Print record {} {}".format(key, record))
|
(key, metadata, record) = xxx_todo_changeme1
|
||||||
|
print(("Print record {} {}".format(key, record)))
|
||||||
|
|
||||||
scan = self.client.scan(self.namespace, self.set)
|
scan = self.client.scan(self.namespace, self.set)
|
||||||
scan.foreach(handle_record)
|
scan.foreach(handle_record)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
import StringIO
|
import io
|
||||||
import gzip
|
import gzip
|
||||||
import subprocess
|
import subprocess
|
||||||
from tempfile import NamedTemporaryFile
|
from tempfile import NamedTemporaryFile
|
||||||
@ -14,7 +14,7 @@ class HDFSApi(object):
|
|||||||
self.http_data_port = "50075"
|
self.http_data_port = "50075"
|
||||||
self.user = user
|
self.user = user
|
||||||
|
|
||||||
def read_data(self, path):
|
def read_data(self, path, universal_newlines=True):
|
||||||
response = requests.get(
|
response = requests.get(
|
||||||
"http://{host}:{port}/webhdfs/v1{path}?op=OPEN".format(host=self.host, port=self.http_proxy_port,
|
"http://{host}:{port}/webhdfs/v1{path}?op=OPEN".format(host=self.host, port=self.http_proxy_port,
|
||||||
path=path), allow_redirects=False)
|
path=path), allow_redirects=False)
|
||||||
@ -27,7 +27,10 @@ class HDFSApi(object):
|
|||||||
if response_data.status_code != 200:
|
if response_data.status_code != 200:
|
||||||
response_data.raise_for_status()
|
response_data.raise_for_status()
|
||||||
|
|
||||||
return response_data.content
|
if universal_newlines:
|
||||||
|
return response_data.text
|
||||||
|
else:
|
||||||
|
return response_data.content
|
||||||
|
|
||||||
# Requests can't put file
|
# Requests can't put file
|
||||||
def _curl_to_put(self, filename, path, params):
|
def _curl_to_put(self, filename, path, params):
|
||||||
@ -35,12 +38,14 @@ class HDFSApi(object):
|
|||||||
port=self.http_data_port, path=path,
|
port=self.http_data_port, path=path,
|
||||||
params=params)
|
params=params)
|
||||||
cmd = "curl -s -i -X PUT -T {fname} '{url}'".format(fname=filename, url=url)
|
cmd = "curl -s -i -X PUT -T {fname} '{url}'".format(fname=filename, url=url)
|
||||||
output = subprocess.check_output(cmd, shell=True)
|
output = subprocess.check_output(cmd, shell=True, universal_newlines=True)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def write_data(self, path, content):
|
def write_data(self, path, content):
|
||||||
named_file = NamedTemporaryFile()
|
named_file = NamedTemporaryFile(mode='wb+')
|
||||||
fpath = named_file.name
|
fpath = named_file.name
|
||||||
|
if isinstance(content, str):
|
||||||
|
content = content.encode()
|
||||||
named_file.write(content)
|
named_file.write(content)
|
||||||
named_file.flush()
|
named_file.flush()
|
||||||
response = requests.put(
|
response = requests.put(
|
||||||
@ -58,10 +63,12 @@ class HDFSApi(object):
|
|||||||
raise Exception("Can't create file on hdfs:\n {}".format(output))
|
raise Exception("Can't create file on hdfs:\n {}".format(output))
|
||||||
|
|
||||||
def write_gzip_data(self, path, content):
|
def write_gzip_data(self, path, content):
|
||||||
out = StringIO.StringIO()
|
if isinstance(content, str):
|
||||||
with gzip.GzipFile(fileobj=out, mode="w") as f:
|
content = content.encode()
|
||||||
|
out = io.BytesIO()
|
||||||
|
with gzip.GzipFile(fileobj=out, mode="wb") as f:
|
||||||
f.write(content)
|
f.write(content)
|
||||||
self.write_data(path, out.getvalue())
|
self.write_data(path, out.getvalue())
|
||||||
|
|
||||||
def read_gzip_data(self, path):
|
def read_gzip_data(self, path):
|
||||||
return gzip.GzipFile(fileobj=StringIO.StringIO(self.read_data(path))).read()
|
return gzip.GzipFile(fileobj=io.BytesIO(self.read_data(path, universal_newlines=False))).read().decode()
|
||||||
|
@ -3,7 +3,7 @@ import argparse
|
|||||||
import csv
|
import csv
|
||||||
import socket
|
import socket
|
||||||
import ssl
|
import ssl
|
||||||
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
|
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||||
|
|
||||||
|
|
||||||
# Decorator used to see if authentication works for external dictionary who use a HTTP source.
|
# Decorator used to see if authentication works for external dictionary who use a HTTP source.
|
||||||
@ -29,7 +29,7 @@ def start_server(server_address, data_path, schema, cert_path, address_family):
|
|||||||
@check_auth
|
@check_auth
|
||||||
def do_POST(self):
|
def do_POST(self):
|
||||||
ids = self.__read_and_decode_post_ids()
|
ids = self.__read_and_decode_post_ids()
|
||||||
print "ids=", ids
|
print("ids=", ids)
|
||||||
self.__send_headers()
|
self.__send_headers()
|
||||||
self.__send_data(ids)
|
self.__send_data(ids)
|
||||||
|
|
||||||
@ -43,26 +43,26 @@ def start_server(server_address, data_path, schema, cert_path, address_family):
|
|||||||
reader = csv.reader(fl, delimiter='\t')
|
reader = csv.reader(fl, delimiter='\t')
|
||||||
for row in reader:
|
for row in reader:
|
||||||
if not only_ids or (row[0] in only_ids):
|
if not only_ids or (row[0] in only_ids):
|
||||||
self.wfile.write('\t'.join(row) + '\n')
|
self.wfile.write(('\t'.join(row) + '\n').encode())
|
||||||
|
|
||||||
def __read_and_decode_post_ids(self):
|
def __read_and_decode_post_ids(self):
|
||||||
data = self.__read_and_decode_post_data()
|
data = self.__read_and_decode_post_data()
|
||||||
return filter(None, data.split())
|
return [_f for _f in data.split() if _f]
|
||||||
|
|
||||||
def __read_and_decode_post_data(self):
|
def __read_and_decode_post_data(self):
|
||||||
transfer_encoding = self.headers.get("Transfer-encoding")
|
transfer_encoding = self.headers.get("Transfer-encoding")
|
||||||
decoded = "";
|
decoded = "";
|
||||||
if transfer_encoding == "chunked":
|
if transfer_encoding == "chunked":
|
||||||
while True:
|
while True:
|
||||||
s = self.rfile.readline()
|
s = self.rfile.readline().decode()
|
||||||
chunk_length = int(s, 16)
|
chunk_length = int(s, 16)
|
||||||
if not chunk_length:
|
if not chunk_length:
|
||||||
break
|
break
|
||||||
decoded += self.rfile.read(chunk_length)
|
decoded += self.rfile.read(chunk_length).decode()
|
||||||
self.rfile.readline()
|
self.rfile.readline().decode()
|
||||||
else:
|
else:
|
||||||
content_length = int(self.headers.get("Content-Length", 0))
|
content_length = int(self.headers.get("Content-Length", 0))
|
||||||
decoded = self.rfile.read(content_length)
|
decoded = self.rfile.read(content_length).decode()
|
||||||
return decoded
|
return decoded
|
||||||
|
|
||||||
if address_family == "ipv6":
|
if address_family == "ipv6":
|
||||||
|
@ -183,7 +183,7 @@ class _NetworkManager:
|
|||||||
exit_code = self._docker_client.api.exec_inspect(handle)['ExitCode']
|
exit_code = self._docker_client.api.exec_inspect(handle)['ExitCode']
|
||||||
|
|
||||||
if exit_code != 0:
|
if exit_code != 0:
|
||||||
print output
|
print(output)
|
||||||
raise subprocess.CalledProcessError(exit_code, cmd)
|
raise subprocess.CalledProcessError(exit_code, cmd)
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
@ -1,14 +1,15 @@
|
|||||||
import difflib
|
import difflib
|
||||||
import time
|
import time
|
||||||
|
from io import IOBase
|
||||||
|
|
||||||
|
|
||||||
class TSV:
|
class TSV:
|
||||||
"""Helper to get pretty diffs between expected and actual tab-separated value files"""
|
"""Helper to get pretty diffs between expected and actual tab-separated value files"""
|
||||||
|
|
||||||
def __init__(self, contents):
|
def __init__(self, contents):
|
||||||
if isinstance(contents, file):
|
if isinstance(contents, IOBase):
|
||||||
raw_lines = contents.readlines()
|
raw_lines = contents.readlines()
|
||||||
elif isinstance(contents, str) or isinstance(contents, unicode):
|
elif isinstance(contents, str) or isinstance(contents, str):
|
||||||
raw_lines = contents.splitlines(True)
|
raw_lines = contents.splitlines(True)
|
||||||
elif isinstance(contents, list):
|
elif isinstance(contents, list):
|
||||||
raw_lines = ['\t'.join(map(str, l)) if isinstance(l, list) else str(l) for l in contents]
|
raw_lines = ['\t'.join(map(str, l)) if isinstance(l, list) else str(l) for l in contents]
|
||||||
@ -29,7 +30,7 @@ class TSV:
|
|||||||
return self != TSV(other)
|
return self != TSV(other)
|
||||||
return self.lines != other.lines
|
return self.lines != other.lines
|
||||||
|
|
||||||
def diff(self, other, n1=None, n2=None):
|
def diff(self, other, n1='', n2=''):
|
||||||
if not isinstance(other, TSV):
|
if not isinstance(other, TSV):
|
||||||
return self.diff(TSV(other), n1=n1, n2=n2)
|
return self.diff(TSV(other), n1=n1, n2=n2)
|
||||||
return list(line.rstrip() for line in difflib.unified_diff(self.lines, other.lines, fromfile=n1, tofile=n2))[2:]
|
return list(line.rstrip() for line in difflib.unified_diff(self.lines, other.lines, fromfile=n1, tofile=n2))[2:]
|
||||||
@ -45,14 +46,14 @@ class TSV:
|
|||||||
def assert_eq_with_retry(instance, query, expectation, retry_count=20, sleep_time=0.5, stdin=None, timeout=None,
|
def assert_eq_with_retry(instance, query, expectation, retry_count=20, sleep_time=0.5, stdin=None, timeout=None,
|
||||||
settings=None, user=None, ignore_error=False):
|
settings=None, user=None, ignore_error=False):
|
||||||
expectation_tsv = TSV(expectation)
|
expectation_tsv = TSV(expectation)
|
||||||
for i in xrange(retry_count):
|
for i in range(retry_count):
|
||||||
try:
|
try:
|
||||||
if TSV(instance.query(query, user=user, stdin=stdin, timeout=timeout, settings=settings,
|
if TSV(instance.query(query, user=user, stdin=stdin, timeout=timeout, settings=settings,
|
||||||
ignore_error=ignore_error)) == expectation_tsv:
|
ignore_error=ignore_error)) == expectation_tsv:
|
||||||
break
|
break
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print "assert_eq_with_retry retry {} exception {}".format(i + 1, ex)
|
print(("assert_eq_with_retry retry {} exception {}".format(i + 1, ex)))
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
else:
|
else:
|
||||||
val = TSV(instance.query(query, user=user, stdin=stdin, timeout=timeout, settings=settings,
|
val = TSV(instance.query(query, user=user, stdin=stdin, timeout=timeout, settings=settings,
|
||||||
@ -66,13 +67,13 @@ def assert_logs_contain(instance, substring):
|
|||||||
raise AssertionError("'{}' not found in logs".format(substring))
|
raise AssertionError("'{}' not found in logs".format(substring))
|
||||||
|
|
||||||
def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_time=0.5):
|
def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_time=0.5):
|
||||||
for i in xrange(retry_count):
|
for i in range(retry_count):
|
||||||
try:
|
try:
|
||||||
if instance.contains_in_log(substring):
|
if instance.contains_in_log(substring):
|
||||||
break
|
break
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print "contains_in_log_with_retry retry {} exception {}".format(i + 1, ex)
|
print("contains_in_log_with_retry retry {} exception {}".format(i + 1, ex))
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
else:
|
else:
|
||||||
raise AssertionError("'{}' not found in logs".format(substring))
|
raise AssertionError("'{}' not found in logs".format(substring))
|
||||||
|
@ -6,7 +6,7 @@ CURDIR = os.path.dirname(os.path.realpath(__file__))
|
|||||||
|
|
||||||
sys.path.insert(0, os.path.join(CURDIR))
|
sys.path.insert(0, os.path.join(CURDIR))
|
||||||
|
|
||||||
import uexpect
|
from . import uexpect
|
||||||
|
|
||||||
prompt = ':\) '
|
prompt = ':\) '
|
||||||
end_of_block = r'.*\r\n.*\r\n'
|
end_of_block = r'.*\r\n.*\r\n'
|
||||||
|
@ -15,7 +15,7 @@ import os
|
|||||||
import pty
|
import pty
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
from Queue import Queue, Empty
|
from queue import Queue, Empty
|
||||||
from subprocess import Popen
|
from subprocess import Popen
|
||||||
from threading import Thread, Event
|
from threading import Thread, Event
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ class IO(object):
|
|||||||
return self.write(data + eol)
|
return self.write(data + eol)
|
||||||
|
|
||||||
def write(self, data):
|
def write(self, data):
|
||||||
return os.write(self.master, data)
|
return os.write(self.master, data.encode())
|
||||||
|
|
||||||
def expect(self, pattern, timeout=None, escape=False):
|
def expect(self, pattern, timeout=None, escape=False):
|
||||||
self.match = None
|
self.match = None
|
||||||
@ -201,7 +201,8 @@ def spawn(command):
|
|||||||
def reader(process, out, queue, kill_event):
|
def reader(process, out, queue, kill_event):
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
data = os.read(out, 65536)
|
# TODO: there are some issues with 1<<16 buffer size
|
||||||
|
data = os.read(out, 1<<17).decode(errors='replace')
|
||||||
queue.put(data)
|
queue.put(data)
|
||||||
except:
|
except:
|
||||||
if kill_event.is_set():
|
if kill_event.is_set():
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
#-*- coding: utf-8 -*-
|
#-*- coding: utf-8 -*-
|
||||||
import subprocess
|
import subprocess
|
||||||
import os
|
import os
|
||||||
@ -188,5 +188,5 @@ if __name__ == "__main__":
|
|||||||
command=args.command
|
command=args.command
|
||||||
)
|
)
|
||||||
|
|
||||||
print("Running pytest container as: '" + cmd + "'.")
|
print(("Running pytest container as: '" + cmd + "'."))
|
||||||
subprocess.check_call(cmd, shell=True)
|
subprocess.check_call(cmd, shell=True)
|
||||||
|
@ -371,7 +371,7 @@ def test_version_update_two_nodes(start_dynamic_cluster):
|
|||||||
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity_new", timeout=120)
|
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity_new", timeout=120)
|
||||||
break
|
break
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print("Exception during replica sync", ex)
|
print(("Exception during replica sync", ex))
|
||||||
node11.query("SYSTEM RESTART REPLICA table_with_default_granularity_new")
|
node11.query("SYSTEM RESTART REPLICA table_with_default_granularity_new")
|
||||||
node12.query("SYSTEM RESTART REPLICA table_with_default_granularity_new")
|
node12.query("SYSTEM RESTART REPLICA table_with_default_granularity_new")
|
||||||
time.sleep(2 * i)
|
time.sleep(2 * i)
|
||||||
@ -386,7 +386,7 @@ def test_version_update_two_nodes(start_dynamic_cluster):
|
|||||||
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity", timeout=120)
|
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity", timeout=120)
|
||||||
break
|
break
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print("Exception during replica sync", ex)
|
print(("Exception during replica sync", ex))
|
||||||
node11.query("SYSTEM RESTART REPLICA table_with_default_granularity")
|
node11.query("SYSTEM RESTART REPLICA table_with_default_granularity")
|
||||||
node12.query("SYSTEM RESTART REPLICA table_with_default_granularity")
|
node12.query("SYSTEM RESTART REPLICA table_with_default_granularity")
|
||||||
time.sleep(2 * i)
|
time.sleep(2 * i)
|
||||||
|
@ -50,8 +50,8 @@ def started_cluster():
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for cluster_name, shards in clusters_schema.iteritems():
|
for cluster_name, shards in clusters_schema.items():
|
||||||
for shard_name, replicas in shards.iteritems():
|
for shard_name, replicas in shards.items():
|
||||||
for replica_name in replicas:
|
for replica_name in replicas:
|
||||||
name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name)
|
name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name)
|
||||||
cluster.add_instance(name,
|
cluster.add_instance(name,
|
||||||
@ -235,16 +235,16 @@ def execute_task(task, cmd_options):
|
|||||||
task.start()
|
task.start()
|
||||||
|
|
||||||
zk = cluster.get_kazoo_client('zoo1')
|
zk = cluster.get_kazoo_client('zoo1')
|
||||||
print "Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])
|
print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1]))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
zk.delete("/clickhouse-copier", recursive=True)
|
zk.delete("/clickhouse-copier", recursive=True)
|
||||||
except kazoo.exceptions.NoNodeError:
|
except kazoo.exceptions.NoNodeError:
|
||||||
print "No node /clickhouse-copier. It is Ok in first test."
|
print("No node /clickhouse-copier. It is Ok in first test.")
|
||||||
|
|
||||||
zk_task_path = task.zk_task_path
|
zk_task_path = task.zk_task_path
|
||||||
zk.ensure_path(zk_task_path)
|
zk.ensure_path(zk_task_path)
|
||||||
zk.create(zk_task_path + "/description", task.copier_task_config)
|
zk.create(zk_task_path + "/description", task.copier_task_config.encode())
|
||||||
|
|
||||||
# Run cluster-copier processes on each node
|
# Run cluster-copier processes on each node
|
||||||
docker_api = docker.from_env().api
|
docker_api = docker.from_env().api
|
||||||
@ -256,19 +256,19 @@ def execute_task(task, cmd_options):
|
|||||||
'--base-dir', '/var/log/clickhouse-server/copier']
|
'--base-dir', '/var/log/clickhouse-server/copier']
|
||||||
cmd += cmd_options
|
cmd += cmd_options
|
||||||
|
|
||||||
copiers = random.sample(cluster.instances.keys(), 3)
|
copiers = random.sample(list(cluster.instances.keys()), 3)
|
||||||
|
|
||||||
for instance_name in copiers:
|
for instance_name in copiers:
|
||||||
instance = cluster.instances[instance_name]
|
instance = cluster.instances[instance_name]
|
||||||
container = instance.get_docker_handle()
|
container = instance.get_docker_handle()
|
||||||
instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs/config-copier.xml"),
|
instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs/config-copier.xml"),
|
||||||
"/etc/clickhouse-server/config-copier.xml")
|
"/etc/clickhouse-server/config-copier.xml")
|
||||||
print "Copied copier config to {}".format(instance.name)
|
print("Copied copier config to {}".format(instance.name))
|
||||||
exec_id = docker_api.exec_create(container.id, cmd, stderr=True)
|
exec_id = docker_api.exec_create(container.id, cmd, stderr=True)
|
||||||
output = docker_api.exec_start(exec_id).decode('utf8')
|
output = docker_api.exec_start(exec_id).decode('utf8')
|
||||||
print(output)
|
print(output)
|
||||||
copiers_exec_ids.append(exec_id)
|
copiers_exec_ids.append(exec_id)
|
||||||
print "Copier for {} ({}) has started".format(instance.name, instance.ip_address)
|
print("Copier for {} ({}) has started".format(instance.name, instance.ip_address))
|
||||||
|
|
||||||
# Wait for copiers stopping and check their return codes
|
# Wait for copiers stopping and check their return codes
|
||||||
for exec_id, instance_name in zip(copiers_exec_ids, copiers):
|
for exec_id, instance_name in zip(copiers_exec_ids, copiers):
|
||||||
@ -362,6 +362,6 @@ def test_no_arg(started_cluster):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with contextmanager(started_cluster)() as cluster:
|
with contextmanager(started_cluster)() as cluster:
|
||||||
for name, instance in cluster.instances.items():
|
for name, instance in list(cluster.instances.items()):
|
||||||
print name, instance.ip_address
|
print(name, instance.ip_address)
|
||||||
raw_input("Cluster created, press any key to destroy...")
|
input("Cluster created, press any key to destroy...")
|
||||||
|
@ -27,8 +27,8 @@ def started_cluster():
|
|||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
for cluster_name, shards in clusters_schema.iteritems():
|
for cluster_name, shards in clusters_schema.items():
|
||||||
for shard_name, replicas in shards.iteritems():
|
for shard_name, replicas in shards.items():
|
||||||
for replica_name in replicas:
|
for replica_name in replicas:
|
||||||
name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name)
|
name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name)
|
||||||
cluster.add_instance(name,
|
cluster.add_instance(name,
|
||||||
@ -83,7 +83,7 @@ def execute_task(task, cmd_options):
|
|||||||
task.start()
|
task.start()
|
||||||
|
|
||||||
zk = cluster.get_kazoo_client('zoo1')
|
zk = cluster.get_kazoo_client('zoo1')
|
||||||
print "Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])
|
print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1]))
|
||||||
|
|
||||||
zk_task_path = task.zk_task_path
|
zk_task_path = task.zk_task_path
|
||||||
zk.ensure_path(zk_task_path)
|
zk.ensure_path(zk_task_path)
|
||||||
@ -101,16 +101,16 @@ def execute_task(task, cmd_options):
|
|||||||
|
|
||||||
print(cmd)
|
print(cmd)
|
||||||
|
|
||||||
for instance_name, instance in cluster.instances.iteritems():
|
for instance_name, instance in cluster.instances.items():
|
||||||
container = instance.get_docker_handle()
|
container = instance.get_docker_handle()
|
||||||
exec_id = docker_api.exec_create(container.id, cmd, stderr=True)
|
exec_id = docker_api.exec_create(container.id, cmd, stderr=True)
|
||||||
docker_api.exec_start(exec_id, detach=True)
|
docker_api.exec_start(exec_id, detach=True)
|
||||||
|
|
||||||
copiers_exec_ids.append(exec_id)
|
copiers_exec_ids.append(exec_id)
|
||||||
print "Copier for {} ({}) has started".format(instance.name, instance.ip_address)
|
print("Copier for {} ({}) has started".format(instance.name, instance.ip_address))
|
||||||
|
|
||||||
# Wait for copiers stopping and check their return codes
|
# Wait for copiers stopping and check their return codes
|
||||||
for exec_id, instance in zip(copiers_exec_ids, cluster.instances.itervalues()):
|
for exec_id, instance in zip(copiers_exec_ids, iter(cluster.instances.values())):
|
||||||
while True:
|
while True:
|
||||||
res = docker_api.exec_inspect(exec_id)
|
res = docker_api.exec_inspect(exec_id)
|
||||||
if not res['Running']:
|
if not res['Running']:
|
||||||
@ -175,6 +175,6 @@ def test_trivial_copy_with_move_fault(started_cluster, use_sample_offset):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with contextmanager(started_cluster)() as cluster:
|
with contextmanager(started_cluster)() as cluster:
|
||||||
for name, instance in cluster.instances.items():
|
for name, instance in list(cluster.instances.items()):
|
||||||
print name, instance.ip_address
|
print(name, instance.ip_address)
|
||||||
raw_input("Cluster created, press any key to destroy...")
|
input("Cluster created, press any key to destroy...")
|
||||||
|
@ -26,14 +26,14 @@ def test_exception_message(started_cluster):
|
|||||||
assert node1.query("select number from nums order by number") == "0\n1\n"
|
assert node1.query("select number from nums order by number") == "0\n1\n"
|
||||||
|
|
||||||
def node_busy(_):
|
def node_busy(_):
|
||||||
for i in xrange(10):
|
for i in range(10):
|
||||||
node1.query("select sleep(2)", user='default')
|
node1.query("select sleep(2)", user='default')
|
||||||
|
|
||||||
busy_pool = Pool(3)
|
busy_pool = Pool(3)
|
||||||
busy_pool.map_async(node_busy, xrange(3))
|
busy_pool.map_async(node_busy, range(3))
|
||||||
time.sleep(1) # wait a little until polling starts
|
time.sleep(1) # wait a little until polling starts
|
||||||
try:
|
try:
|
||||||
assert node2.query("select number from remote('node1', 'default', 'nums')", user='good') == "0\n1\n"
|
assert node2.query("select number from remote('node1', 'default', 'nums')", user='good') == "0\n1\n"
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print ex.message
|
print(ex.message)
|
||||||
assert False, "Exception thrown while max_concurrent_queries_for_user is not exceeded"
|
assert False, "Exception thrown while max_concurrent_queries_for_user is not exceeded"
|
||||||
|
@ -66,7 +66,7 @@ def test_no_ttl_merges_in_busy_pool(started_cluster):
|
|||||||
node1.query("ALTER TABLE test_ttl UPDATE data = data + 1 WHERE sleepEachRow(1) = 0")
|
node1.query("ALTER TABLE test_ttl UPDATE data = data + 1 WHERE sleepEachRow(1) = 0")
|
||||||
|
|
||||||
while count_running_mutations(node1, "test_ttl") < 6:
|
while count_running_mutations(node1, "test_ttl") < 6:
|
||||||
print "Mutations count", count_running_mutations(node1, "test_ttl")
|
print("Mutations count", count_running_mutations(node1, "test_ttl"))
|
||||||
assert count_ttl_merges_in_background_pool(node1, "test_ttl") == 0
|
assert count_ttl_merges_in_background_pool(node1, "test_ttl") == 0
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
@ -74,7 +74,7 @@ def test_no_ttl_merges_in_busy_pool(started_cluster):
|
|||||||
|
|
||||||
rows_count = []
|
rows_count = []
|
||||||
while count_running_mutations(node1, "test_ttl") == 6:
|
while count_running_mutations(node1, "test_ttl") == 6:
|
||||||
print "Mutations count after start TTL", count_running_mutations(node1, "test_ttl")
|
print("Mutations count after start TTL", count_running_mutations(node1, "test_ttl"))
|
||||||
rows_count.append(int(node1.query("SELECT count() FROM test_ttl").strip()))
|
rows_count.append(int(node1.query("SELECT count() FROM test_ttl").strip()))
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ node6 = cluster.add_instance('node6', user_configs=['configs/config_include_from
|
|||||||
def start_cluster():
|
def start_cluster():
|
||||||
try:
|
try:
|
||||||
def create_zk_roots(zk):
|
def create_zk_roots(zk):
|
||||||
zk.create(path="/setting/max_query_size", value="77777", makepath=True)
|
zk.create(path="/setting/max_query_size", value=b"77777", makepath=True)
|
||||||
|
|
||||||
cluster.add_zookeeper_startup_command(create_zk_roots)
|
cluster.add_zookeeper_startup_command(create_zk_roots)
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ def start_cluster():
|
|||||||
initialize_database([node1, node2], 1)
|
initialize_database([node1, node2], 1)
|
||||||
yield cluster
|
yield cluster
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print ex
|
print(ex)
|
||||||
finally:
|
finally:
|
||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ def start_cluster():
|
|||||||
fill_nodes([node1, node2], 1)
|
fill_nodes([node1, node2], 1)
|
||||||
yield cluster
|
yield cluster
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print ex
|
print(ex)
|
||||||
finally:
|
finally:
|
||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
|
|
||||||
|
@ -83,11 +83,11 @@ def test(started_cluster):
|
|||||||
assert_eq_with_retry(node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed)
|
assert_eq_with_retry(node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed)
|
||||||
|
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(Exception):
|
||||||
print node3.query_with_retry("SELECT * FROM distributed ORDER BY id", retry_count=5)
|
print(node3.query_with_retry("SELECT * FROM distributed ORDER BY id", retry_count=5))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with contextmanager(started_cluster)() as cluster:
|
with contextmanager(started_cluster)() as cluster:
|
||||||
for name, instance in cluster.instances.items():
|
for name, instance in list(cluster.instances.items()):
|
||||||
print name, instance.ip_address
|
print(name, instance.ip_address)
|
||||||
raw_input("Cluster created, press any key to destroy...")
|
input("Cluster created, press any key to destroy...")
|
||||||
|
@ -98,12 +98,12 @@ SELECT sum(x) FROM distributed SETTINGS
|
|||||||
|
|
||||||
# If we forbid stale replicas, the query must fail.
|
# If we forbid stale replicas, the query must fail.
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(Exception):
|
||||||
print instance_with_dist_table.query('''
|
print(instance_with_dist_table.query('''
|
||||||
SELECT count() FROM distributed SETTINGS
|
SELECT count() FROM distributed SETTINGS
|
||||||
load_balancing='in_order',
|
load_balancing='in_order',
|
||||||
max_replica_delay_for_distributed_queries=1,
|
max_replica_delay_for_distributed_queries=1,
|
||||||
fallback_to_stale_replicas_for_distributed_queries=0
|
fallback_to_stale_replicas_for_distributed_queries=0
|
||||||
''')
|
'''))
|
||||||
|
|
||||||
# Now partition off the remote replica of the local shard and test that failover still works.
|
# Now partition off the remote replica of the local shard and test that failover still works.
|
||||||
pm.partition_instances(node_1_1, node_1_2, port=9000)
|
pm.partition_instances(node_1_1, node_1_2, port=9000)
|
||||||
|
@ -113,12 +113,12 @@ class SimpleLayoutTester:
|
|||||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||||
|
|
||||||
def prepare(self, cluster_):
|
def prepare(self, cluster_):
|
||||||
for _, dictionary in self.layout_to_dictionary.items():
|
for _, dictionary in list(self.layout_to_dictionary.items()):
|
||||||
dictionary.prepare_source(cluster_)
|
dictionary.prepare_source(cluster_)
|
||||||
dictionary.load_data(self.data)
|
dictionary.load_data(self.data)
|
||||||
|
|
||||||
def execute(self, layout_name, node):
|
def execute(self, layout_name, node):
|
||||||
if not self.layout_to_dictionary.has_key(layout_name):
|
if layout_name not in self.layout_to_dictionary:
|
||||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||||
|
|
||||||
dct = self.layout_to_dictionary[layout_name]
|
dct = self.layout_to_dictionary[layout_name]
|
||||||
@ -170,12 +170,12 @@ class ComplexLayoutTester:
|
|||||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||||
|
|
||||||
def prepare(self, cluster_):
|
def prepare(self, cluster_):
|
||||||
for _, dictionary in self.layout_to_dictionary.items():
|
for _, dictionary in list(self.layout_to_dictionary.items()):
|
||||||
dictionary.prepare_source(cluster_)
|
dictionary.prepare_source(cluster_)
|
||||||
dictionary.load_data(self.data)
|
dictionary.load_data(self.data)
|
||||||
|
|
||||||
def execute(self, layout_name, node):
|
def execute(self, layout_name, node):
|
||||||
if not self.layout_to_dictionary.has_key(layout_name):
|
if layout_name not in self.layout_to_dictionary:
|
||||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||||
|
|
||||||
dct = self.layout_to_dictionary[layout_name]
|
dct = self.layout_to_dictionary[layout_name]
|
||||||
@ -213,13 +213,13 @@ class RangedLayoutTester:
|
|||||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||||
|
|
||||||
def prepare(self, cluster_):
|
def prepare(self, cluster_):
|
||||||
for _, dictionary in self.layout_to_dictionary.items():
|
for _, dictionary in list(self.layout_to_dictionary.items()):
|
||||||
dictionary.prepare_source(cluster_)
|
dictionary.prepare_source(cluster_)
|
||||||
dictionary.load_data(self.data)
|
dictionary.load_data(self.data)
|
||||||
|
|
||||||
def execute(self, layout_name, node):
|
def execute(self, layout_name, node):
|
||||||
|
|
||||||
if not self.layout_to_dictionary.has_key(layout_name):
|
if layout_name not in self.layout_to_dictionary:
|
||||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||||
|
|
||||||
dct = self.layout_to_dictionary[layout_name]
|
dct = self.layout_to_dictionary[layout_name]
|
||||||
|
@ -42,7 +42,7 @@ def test_memory_consumption(cluster):
|
|||||||
allocated_first = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
allocated_first = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
||||||
|
|
||||||
alloc_array = []
|
alloc_array = []
|
||||||
for i in xrange(5):
|
for i in range(5):
|
||||||
node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)")
|
node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)")
|
||||||
|
|
||||||
allocated = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
allocated = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
||||||
@ -51,7 +51,7 @@ def test_memory_consumption(cluster):
|
|||||||
# size doesn't grow
|
# size doesn't grow
|
||||||
assert all(allocated_first >= a for a in alloc_array)
|
assert all(allocated_first >= a for a in alloc_array)
|
||||||
|
|
||||||
for i in xrange(5):
|
for i in range(5):
|
||||||
node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)")
|
node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)")
|
||||||
|
|
||||||
allocated = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
allocated = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
||||||
|
@ -106,7 +106,7 @@ def setup_module(module):
|
|||||||
for source in sources:
|
for source in sources:
|
||||||
for layout in LAYOUTS:
|
for layout in LAYOUTS:
|
||||||
if not source.compatible_with_layout(layout):
|
if not source.compatible_with_layout(layout):
|
||||||
print "Source", source.name, "incompatible with layout", layout.name
|
print("Source", source.name, "incompatible with layout", layout.name)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
fields = KEY_FIELDS[layout.layout_type] + [field]
|
fields = KEY_FIELDS[layout.layout_type] + [field]
|
||||||
@ -128,9 +128,9 @@ def started_cluster():
|
|||||||
assert len(FIELDS) == len(VALUES)
|
assert len(FIELDS) == len(VALUES)
|
||||||
for dicts in DICTIONARIES:
|
for dicts in DICTIONARIES:
|
||||||
for dictionary in dicts:
|
for dictionary in dicts:
|
||||||
print "Preparing", dictionary.name
|
print("Preparing", dictionary.name)
|
||||||
dictionary.prepare_source(cluster)
|
dictionary.prepare_source(cluster)
|
||||||
print "Prepared"
|
print("Prepared")
|
||||||
|
|
||||||
yield cluster
|
yield cluster
|
||||||
|
|
||||||
@ -138,9 +138,9 @@ def started_cluster():
|
|||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("id", range(len(FIELDS)))
|
@pytest.mark.parametrize("id", list(range(len(FIELDS))))
|
||||||
def test_redis_dictionaries(started_cluster, id):
|
def test_redis_dictionaries(started_cluster, id):
|
||||||
print 'id:', id
|
print('id:', id)
|
||||||
|
|
||||||
dicts = DICTIONARIES[id]
|
dicts = DICTIONARIES[id]
|
||||||
values = VALUES[id]
|
values = VALUES[id]
|
||||||
@ -173,7 +173,7 @@ def test_redis_dictionaries(started_cluster, id):
|
|||||||
node.query("system reload dictionary {}".format(dct.name))
|
node.query("system reload dictionary {}".format(dct.name))
|
||||||
|
|
||||||
for query, answer in queries_with_answers:
|
for query, answer in queries_with_answers:
|
||||||
print query
|
print(query)
|
||||||
assert node.query(query) == str(answer) + '\n'
|
assert node.query(query) == str(answer) + '\n'
|
||||||
|
|
||||||
# Checks, that dictionaries can be reloaded.
|
# Checks, that dictionaries can be reloaded.
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import difflib
|
import difflib
|
||||||
import os
|
import os
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
files = ['key_simple.tsv', 'key_complex_integers.tsv', 'key_complex_mixed.tsv']
|
files = ['key_simple.tsv', 'key_complex_integers.tsv', 'key_complex_mixed.tsv']
|
||||||
|
|
||||||
@ -78,8 +79,9 @@ def generate_dictionaries(path, structure):
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
dictionary_skeleton = \
|
dictionary_skeleton = \
|
||||||
dictionary_skeleton % reduce(lambda xml, (type, default): xml + attribute_skeleton % (type, type, default),
|
dictionary_skeleton % reduce(
|
||||||
zip(types, implicit_defaults), '')
|
lambda xml, type_default: xml + attribute_skeleton % (type_default[0], type_default[0], type_default[1]),
|
||||||
|
list(zip(types, implicit_defaults)), '')
|
||||||
|
|
||||||
source_clickhouse = '''
|
source_clickhouse = '''
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
@ -195,7 +197,7 @@ class DictionaryTestTable:
|
|||||||
String_ String,
|
String_ String,
|
||||||
Date_ Date, DateTime_ DateTime, Parent UInt64'''
|
Date_ Date, DateTime_ DateTime, Parent UInt64'''
|
||||||
|
|
||||||
self.names_and_types = map(str.split, self.structure.split(','))
|
self.names_and_types = list(map(str.split, self.structure.split(',')))
|
||||||
self.keys_names_and_types = self.names_and_types[:6]
|
self.keys_names_and_types = self.names_and_types[:6]
|
||||||
self.values_names_and_types = self.names_and_types[6:]
|
self.values_names_and_types = self.names_and_types[6:]
|
||||||
self.source_file_name = source_file_name
|
self.source_file_name = source_file_name
|
||||||
@ -223,10 +225,10 @@ class DictionaryTestTable:
|
|||||||
def make_tuple(line):
|
def make_tuple(line):
|
||||||
row = tuple(line.split('\t'))
|
row = tuple(line.split('\t'))
|
||||||
self.rows.append(row)
|
self.rows.append(row)
|
||||||
return '(' + ','.join(map(wrap_value, zip(row, types))) + ')'
|
return '(' + ','.join(map(wrap_value, list(zip(row, types)))) + ')'
|
||||||
|
|
||||||
values = ','.join(map(make_tuple, lines))
|
values = ','.join(map(make_tuple, lines))
|
||||||
print query % (self.structure, values)
|
print(query % (self.structure, values))
|
||||||
instance.query(query % (self.structure, values))
|
instance.query(query % (self.structure, values))
|
||||||
|
|
||||||
def get_structure_for_keys(self, keys, enable_parent=True):
|
def get_structure_for_keys(self, keys, enable_parent=True):
|
||||||
@ -245,7 +247,7 @@ class DictionaryTestTable:
|
|||||||
for row in rows:
|
for row in rows:
|
||||||
key = '\t'.join(row[:len(keys)])
|
key = '\t'.join(row[:len(keys)])
|
||||||
value = '\t'.join(row[len(keys):])
|
value = '\t'.join(row[len(keys):])
|
||||||
if key in lines_map.keys():
|
if key in list(lines_map.keys()):
|
||||||
pattern_value = lines_map[key]
|
pattern_value = lines_map[key]
|
||||||
del lines_map[key]
|
del lines_map[key]
|
||||||
if not value == pattern_value:
|
if not value == pattern_value:
|
||||||
@ -256,7 +258,7 @@ class DictionaryTestTable:
|
|||||||
diff.append((key + '\t' + value, ''))
|
diff.append((key + '\t' + value, ''))
|
||||||
|
|
||||||
if add_not_found_rows:
|
if add_not_found_rows:
|
||||||
for key, value in lines_map.items():
|
for key, value in list(lines_map.items()):
|
||||||
diff.append(('', key + '\t' + value))
|
diff.append(('', key + '\t' + value))
|
||||||
|
|
||||||
if not diff:
|
if not diff:
|
||||||
|
@ -4,7 +4,7 @@ import pytest
|
|||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster
|
||||||
from helpers.test_tools import TSV
|
from helpers.test_tools import TSV
|
||||||
|
|
||||||
from generate_dictionaries import generate_structure, generate_dictionaries, DictionaryTestTable
|
from .generate_dictionaries import generate_structure, generate_dictionaries, DictionaryTestTable
|
||||||
|
|
||||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ def started_cluster():
|
|||||||
cluster.start()
|
cluster.start()
|
||||||
test_table.create_clickhouse_source(instance)
|
test_table.create_clickhouse_source(instance)
|
||||||
for line in TSV(instance.query('select name from system.dictionaries')).lines:
|
for line in TSV(instance.query('select name from system.dictionaries')).lines:
|
||||||
print line,
|
print(line, end=' ')
|
||||||
|
|
||||||
yield cluster
|
yield cluster
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ def test_select_all(dictionary_structure):
|
|||||||
result = TSV(query('select * from test.{0}'.format(name)))
|
result = TSV(query('select * from test.{0}'.format(name)))
|
||||||
|
|
||||||
diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=True)
|
diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=True)
|
||||||
print test_table.process_diff(diff)
|
print(test_table.process_diff(diff))
|
||||||
assert not diff
|
assert not diff
|
||||||
|
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ def test_select_all_from_cached(cached_dictionary_structure):
|
|||||||
for i in range(4):
|
for i in range(4):
|
||||||
result = TSV(query('select * from test.{0}'.format(name)))
|
result = TSV(query('select * from test.{0}'.format(name)))
|
||||||
diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=False)
|
diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=False)
|
||||||
print test_table.process_diff(diff)
|
print(test_table.process_diff(diff))
|
||||||
assert not diff
|
assert not diff
|
||||||
|
|
||||||
key = []
|
key = []
|
||||||
@ -120,5 +120,5 @@ def test_select_all_from_cached(cached_dictionary_structure):
|
|||||||
|
|
||||||
result = TSV(query('select * from test.{0}'.format(name)))
|
result = TSV(query('select * from test.{0}'.format(name)))
|
||||||
diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=True)
|
diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=True)
|
||||||
print test_table.process_diff(diff)
|
print(test_table.process_diff(diff))
|
||||||
assert not diff
|
assert not diff
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ import argparse
|
|||||||
import csv
|
import csv
|
||||||
import socket
|
import socket
|
||||||
import ssl
|
import ssl
|
||||||
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
|
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||||
|
|
||||||
|
|
||||||
# Decorator used to see if authentication works for external dictionary who use a HTTP source.
|
# Decorator used to see if authentication works for external dictionary who use a HTTP source.
|
||||||
@ -29,7 +29,7 @@ def start_server(server_address, data_path, schema, cert_path, address_family):
|
|||||||
@check_auth
|
@check_auth
|
||||||
def do_POST(self):
|
def do_POST(self):
|
||||||
ids = self.__read_and_decode_post_ids()
|
ids = self.__read_and_decode_post_ids()
|
||||||
print "ids=", ids
|
print("ids=", ids)
|
||||||
self.__send_headers()
|
self.__send_headers()
|
||||||
self.__send_data(ids)
|
self.__send_data(ids)
|
||||||
|
|
||||||
@ -43,11 +43,11 @@ def start_server(server_address, data_path, schema, cert_path, address_family):
|
|||||||
reader = csv.reader(fl, delimiter='\t')
|
reader = csv.reader(fl, delimiter='\t')
|
||||||
for row in reader:
|
for row in reader:
|
||||||
if not only_ids or (row[0] in only_ids):
|
if not only_ids or (row[0] in only_ids):
|
||||||
self.wfile.write('\t'.join(row) + '\n')
|
self.wfile.write(('\t'.join(row) + '\n').encode())
|
||||||
|
|
||||||
def __read_and_decode_post_ids(self):
|
def __read_and_decode_post_ids(self):
|
||||||
data = self.__read_and_decode_post_data()
|
data = self.__read_and_decode_post_data()
|
||||||
return filter(None, data.split())
|
return [_f for _f in data.split() if _f]
|
||||||
|
|
||||||
def __read_and_decode_post_data(self):
|
def __read_and_decode_post_data(self):
|
||||||
transfer_encoding = self.headers.get("Transfer-encoding")
|
transfer_encoding = self.headers.get("Transfer-encoding")
|
||||||
@ -58,11 +58,11 @@ def start_server(server_address, data_path, schema, cert_path, address_family):
|
|||||||
chunk_length = int(s, 16)
|
chunk_length = int(s, 16)
|
||||||
if not chunk_length:
|
if not chunk_length:
|
||||||
break
|
break
|
||||||
decoded += self.rfile.read(chunk_length)
|
decoded += self.rfile.read(chunk_length).decode()
|
||||||
self.rfile.readline()
|
self.rfile.readline()
|
||||||
else:
|
else:
|
||||||
content_length = int(self.headers.get("Content-Length", 0))
|
content_length = int(self.headers.get("Content-Length", 0))
|
||||||
decoded = self.rfile.read(content_length)
|
decoded = self.rfile.read(content_length).decode()
|
||||||
return decoded
|
return decoded
|
||||||
|
|
||||||
if address_family == "ipv6":
|
if address_family == "ipv6":
|
||||||
|
@ -26,7 +26,7 @@ def prepare():
|
|||||||
node.exec_in_container([
|
node.exec_in_container([
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"python2 /http_server.py --data-path={tbl} --schema=http --host=localhost --port=5555".format(
|
"python3 /http_server.py --data-path={tbl} --schema=http --host=localhost --port=5555".format(
|
||||||
tbl=path)
|
tbl=path)
|
||||||
], detach=True)
|
], detach=True)
|
||||||
|
|
||||||
|
@ -33,5 +33,5 @@ def test_different_types(cluster):
|
|||||||
|
|
||||||
def test_select_by_type(cluster):
|
def test_select_by_type(cluster):
|
||||||
node = cluster.instances["node"]
|
node = cluster.instances["node"]
|
||||||
for name, disk_type in disk_types.items():
|
for name, disk_type in list(disk_types.items()):
|
||||||
assert node.query("SELECT name FROM system.disks WHERE type='" + disk_type + "'") == name + "\n"
|
assert node.query("SELECT name FROM system.disks WHERE type='" + disk_type + "'") == name + "\n"
|
||||||
|
@ -26,12 +26,12 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster):
|
|||||||
main_configs += [os.path.join(self.test_config_dir, f) for f in
|
main_configs += [os.path.join(self.test_config_dir, f) for f in
|
||||||
["server.crt", "server.key", "dhparam.pem", "config.d/ssl_conf.xml"]]
|
["server.crt", "server.key", "dhparam.pem", "config.d/ssl_conf.xml"]]
|
||||||
|
|
||||||
for i in xrange(4):
|
for i in range(4):
|
||||||
self.add_instance(
|
self.add_instance(
|
||||||
'ch{}'.format(i + 1),
|
'ch{}'.format(i + 1),
|
||||||
main_configs=main_configs,
|
main_configs=main_configs,
|
||||||
user_configs=user_configs,
|
user_configs=user_configs,
|
||||||
macros={"layer": 0, "shard": i / 2 + 1, "replica": i % 2 + 1},
|
macros={"layer": 0, "shard": i // 2 + 1, "replica": i % 2 + 1},
|
||||||
with_zookeeper=True)
|
with_zookeeper=True)
|
||||||
|
|
||||||
self.start()
|
self.start()
|
||||||
@ -62,11 +62,11 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster):
|
|||||||
self.ddl_check_query(instance, "CREATE DATABASE IF NOT EXISTS test ON CLUSTER 'cluster'")
|
self.ddl_check_query(instance, "CREATE DATABASE IF NOT EXISTS test ON CLUSTER 'cluster'")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print e
|
print(e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def sync_replicas(self, table, timeout=5):
|
def sync_replicas(self, table, timeout=5):
|
||||||
for instance in self.instances.values():
|
for instance in list(self.instances.values()):
|
||||||
instance.query("SYSTEM SYNC REPLICA {}".format(table), timeout=timeout)
|
instance.query("SYSTEM SYNC REPLICA {}".format(table), timeout=timeout)
|
||||||
|
|
||||||
def check_all_hosts_successfully_executed(self, tsv_content, num_hosts=None):
|
def check_all_hosts_successfully_executed(self, tsv_content, num_hosts=None):
|
||||||
@ -90,7 +90,7 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster):
|
|||||||
def replace_domains_to_ip_addresses_in_cluster_config(self, instances_to_replace):
|
def replace_domains_to_ip_addresses_in_cluster_config(self, instances_to_replace):
|
||||||
clusters_config = open(p.join(self.base_dir, '{}/config.d/clusters.xml'.format(self.test_config_dir))).read()
|
clusters_config = open(p.join(self.base_dir, '{}/config.d/clusters.xml'.format(self.test_config_dir))).read()
|
||||||
|
|
||||||
for inst_name, inst in self.instances.items():
|
for inst_name, inst in list(self.instances.items()):
|
||||||
clusters_config = clusters_config.replace(inst_name, str(inst.ip_address))
|
clusters_config = clusters_config.replace(inst_name, str(inst.ip_address))
|
||||||
|
|
||||||
for inst_name in instances_to_replace:
|
for inst_name in instances_to_replace:
|
||||||
@ -113,7 +113,7 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster):
|
|||||||
Make retries in case of UNKNOWN_STATUS_OF_INSERT or zkutil::KeeperException errors
|
Make retries in case of UNKNOWN_STATUS_OF_INSERT or zkutil::KeeperException errors
|
||||||
"""
|
"""
|
||||||
|
|
||||||
for i in xrange(100):
|
for i in range(100):
|
||||||
try:
|
try:
|
||||||
instance.query(query_insert)
|
instance.query(query_insert)
|
||||||
return
|
return
|
||||||
|
@ -27,7 +27,7 @@ def test_cluster(request):
|
|||||||
|
|
||||||
# Check query log to ensure that DDL queries are not executed twice
|
# Check query log to ensure that DDL queries are not executed twice
|
||||||
time.sleep(1.5)
|
time.sleep(1.5)
|
||||||
for instance in cluster.instances.values():
|
for instance in list(cluster.instances.values()):
|
||||||
cluster.ddl_check_there_are_no_dublicates(instance)
|
cluster.ddl_check_there_are_no_dublicates(instance)
|
||||||
|
|
||||||
cluster.pm_random_drops.heal_all()
|
cluster.pm_random_drops.heal_all()
|
||||||
@ -133,12 +133,12 @@ CREATE TABLE IF NOT EXISTS all_merge_64 ON CLUSTER '{cluster}' (p Date, i Int64,
|
|||||||
ENGINE = Distributed('{cluster}', default, merge, i)
|
ENGINE = Distributed('{cluster}', default, merge, i)
|
||||||
""")
|
""")
|
||||||
|
|
||||||
for i in xrange(0, 4, 2):
|
for i in range(0, 4, 2):
|
||||||
k = (i / 2) * 2
|
k = (i / 2) * 2
|
||||||
test_cluster.instances['ch{}'.format(i + 1)].query("INSERT INTO merge (i) VALUES ({})({})".format(k, k + 1))
|
test_cluster.instances['ch{}'.format(i + 1)].query("INSERT INTO merge (i) VALUES ({})({})".format(k, k + 1))
|
||||||
|
|
||||||
assert TSV(instance.query("SELECT i FROM all_merge_32 ORDER BY i")) == TSV(
|
assert TSV(instance.query("SELECT i FROM all_merge_32 ORDER BY i")) == TSV(
|
||||||
''.join(['{}\n'.format(x) for x in xrange(4)]))
|
''.join(['{}\n'.format(x) for x in range(4)]))
|
||||||
|
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
test_cluster.ddl_check_query(instance, "ALTER TABLE merge ON CLUSTER '{cluster}' MODIFY COLUMN i Int64")
|
test_cluster.ddl_check_query(instance, "ALTER TABLE merge ON CLUSTER '{cluster}' MODIFY COLUMN i Int64")
|
||||||
@ -147,19 +147,19 @@ ENGINE = Distributed('{cluster}', default, merge, i)
|
|||||||
"ALTER TABLE merge ON CLUSTER '{cluster}' ADD COLUMN s String DEFAULT toString(i) FORMAT TSV")
|
"ALTER TABLE merge ON CLUSTER '{cluster}' ADD COLUMN s String DEFAULT toString(i) FORMAT TSV")
|
||||||
|
|
||||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(4)]))
|
''.join(['{}\t{}\n'.format(x, x) for x in range(4)]))
|
||||||
|
|
||||||
for i in xrange(0, 4, 2):
|
for i in range(0, 4, 2):
|
||||||
k = (i / 2) * 2 + 4
|
k = (i / 2) * 2 + 4
|
||||||
test_cluster.instances['ch{}'.format(i + 1)].query(
|
test_cluster.instances['ch{}'.format(i + 1)].query(
|
||||||
"INSERT INTO merge (p, i) VALUES (31, {})(31, {})".format(k, k + 1))
|
"INSERT INTO merge (p, i) VALUES (31, {})(31, {})".format(k, k + 1))
|
||||||
|
|
||||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(8)]))
|
''.join(['{}\t{}\n'.format(x, x) for x in range(8)]))
|
||||||
|
|
||||||
test_cluster.ddl_check_query(instance, "ALTER TABLE merge ON CLUSTER '{cluster}' DETACH PARTITION 197002")
|
test_cluster.ddl_check_query(instance, "ALTER TABLE merge ON CLUSTER '{cluster}' DETACH PARTITION 197002")
|
||||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(4)]))
|
''.join(['{}\t{}\n'.format(x, x) for x in range(4)]))
|
||||||
|
|
||||||
test_cluster.ddl_check_query(instance, "DROP TABLE merge ON CLUSTER '{cluster}'")
|
test_cluster.ddl_check_query(instance, "DROP TABLE merge ON CLUSTER '{cluster}'")
|
||||||
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_32 ON CLUSTER '{cluster}'")
|
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_32 ON CLUSTER '{cluster}'")
|
||||||
@ -170,7 +170,7 @@ def test_macro(test_cluster):
|
|||||||
instance = test_cluster.instances['ch2']
|
instance = test_cluster.instances['ch2']
|
||||||
test_cluster.ddl_check_query(instance, "CREATE TABLE tab ON CLUSTER '{cluster}' (value UInt8) ENGINE = Memory")
|
test_cluster.ddl_check_query(instance, "CREATE TABLE tab ON CLUSTER '{cluster}' (value UInt8) ENGINE = Memory")
|
||||||
|
|
||||||
for i in xrange(4):
|
for i in range(4):
|
||||||
test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)],
|
test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)],
|
||||||
"INSERT INTO tab VALUES ({})".format(i))
|
"INSERT INTO tab VALUES ({})".format(i))
|
||||||
|
|
||||||
@ -359,6 +359,6 @@ def test_replicated_without_arguments(test_cluster):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with contextmanager(test_cluster)() as ctx_cluster:
|
with contextmanager(test_cluster)() as ctx_cluster:
|
||||||
for name, instance in ctx_cluster.instances.items():
|
for name, instance in list(ctx_cluster.instances.items()):
|
||||||
print name, instance.ip_address
|
print(name, instance.ip_address)
|
||||||
raw_input("Cluster created, press any key to destroy...")
|
input("Cluster created, press any key to destroy...")
|
||||||
|
@ -26,7 +26,7 @@ def test_cluster(request):
|
|||||||
|
|
||||||
# Check query log to ensure that DDL queries are not executed twice
|
# Check query log to ensure that DDL queries are not executed twice
|
||||||
time.sleep(1.5)
|
time.sleep(1.5)
|
||||||
for instance in cluster.instances.values():
|
for instance in list(cluster.instances.values()):
|
||||||
cluster.ddl_check_there_are_no_dublicates(instance)
|
cluster.ddl_check_there_are_no_dublicates(instance)
|
||||||
|
|
||||||
cluster.pm_random_drops.heal_all()
|
cluster.pm_random_drops.heal_all()
|
||||||
@ -59,36 +59,36 @@ CREATE TABLE IF NOT EXISTS all_merge_64 ON CLUSTER cluster (p Date, i Int64, s S
|
|||||||
ENGINE = Distributed(cluster, default, merge_for_alter, i)
|
ENGINE = Distributed(cluster, default, merge_for_alter, i)
|
||||||
""")
|
""")
|
||||||
|
|
||||||
for i in xrange(4):
|
for i in range(4):
|
||||||
k = (i / 2) * 2
|
k = (i // 2) * 2
|
||||||
test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)],
|
test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)],
|
||||||
"INSERT INTO merge_for_alter (i) VALUES ({})({})".format(k, k + 1))
|
"INSERT INTO merge_for_alter (i) VALUES ({})({})".format(k, k + 1))
|
||||||
|
|
||||||
test_cluster.sync_replicas("merge_for_alter")
|
test_cluster.sync_replicas("merge_for_alter")
|
||||||
|
|
||||||
assert TSV(instance.query("SELECT i FROM all_merge_32 ORDER BY i")) == TSV(
|
assert TSV(instance.query("SELECT i FROM all_merge_32 ORDER BY i")) == TSV(
|
||||||
''.join(['{}\n'.format(x) for x in xrange(4)]))
|
''.join(['{}\n'.format(x) for x in range(4)]))
|
||||||
|
|
||||||
test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster MODIFY COLUMN i Int64")
|
test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster MODIFY COLUMN i Int64")
|
||||||
test_cluster.ddl_check_query(instance,
|
test_cluster.ddl_check_query(instance,
|
||||||
"ALTER TABLE merge_for_alter ON CLUSTER cluster ADD COLUMN s String DEFAULT toString(i)")
|
"ALTER TABLE merge_for_alter ON CLUSTER cluster ADD COLUMN s String DEFAULT toString(i)")
|
||||||
|
|
||||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(4)]))
|
''.join(['{}\t{}\n'.format(x, x) for x in range(4)]))
|
||||||
|
|
||||||
for i in xrange(4):
|
for i in range(4):
|
||||||
k = (i / 2) * 2 + 4
|
k = (i // 2) * 2 + 4
|
||||||
test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)],
|
test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)],
|
||||||
"INSERT INTO merge_for_alter (p, i) VALUES (31, {})(31, {})".format(k, k + 1))
|
"INSERT INTO merge_for_alter (p, i) VALUES (31, {})(31, {})".format(k, k + 1))
|
||||||
|
|
||||||
test_cluster.sync_replicas("merge_for_alter")
|
test_cluster.sync_replicas("merge_for_alter")
|
||||||
|
|
||||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(8)]))
|
''.join(['{}\t{}\n'.format(x, x) for x in range(8)]))
|
||||||
|
|
||||||
test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster DETACH PARTITION 197002")
|
test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster DETACH PARTITION 197002")
|
||||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(4)]))
|
''.join(['{}\t{}\n'.format(x, x) for x in range(4)]))
|
||||||
|
|
||||||
test_cluster.ddl_check_query(instance, "DROP TABLE merge_for_alter ON CLUSTER cluster")
|
test_cluster.ddl_check_query(instance, "DROP TABLE merge_for_alter ON CLUSTER cluster")
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ users = pytest.mark.parametrize('user,password', [
|
|||||||
])
|
])
|
||||||
|
|
||||||
def bootstrap():
|
def bootstrap():
|
||||||
for n in cluster.instances.values():
|
for n in list(cluster.instances.values()):
|
||||||
n.query('DROP TABLE IF EXISTS data')
|
n.query('DROP TABLE IF EXISTS data')
|
||||||
n.query('DROP TABLE IF EXISTS dist')
|
n.query('DROP TABLE IF EXISTS dist')
|
||||||
n.query('CREATE TABLE data (key Int) Engine=Memory()')
|
n.query('CREATE TABLE data (key Int) Engine=Memory()')
|
||||||
|
@ -18,7 +18,7 @@ queries = nodes * 5
|
|||||||
|
|
||||||
|
|
||||||
def bootstrap():
|
def bootstrap():
|
||||||
for n in cluster.instances.values():
|
for n in list(cluster.instances.values()):
|
||||||
# At startup, server loads configuration files.
|
# At startup, server loads configuration files.
|
||||||
#
|
#
|
||||||
# However ConfigReloader does not know about already loaded files
|
# However ConfigReloader does not know about already loaded files
|
||||||
@ -90,7 +90,7 @@ def get_node(query_node, table='dist', *args, **kwargs):
|
|||||||
|
|
||||||
query_node.query('SELECT * FROM ' + table, *args, **kwargs)
|
query_node.query('SELECT * FROM ' + table, *args, **kwargs)
|
||||||
|
|
||||||
for n in cluster.instances.values():
|
for n in list(cluster.instances.values()):
|
||||||
n.query('SYSTEM FLUSH LOGS')
|
n.query('SYSTEM FLUSH LOGS')
|
||||||
|
|
||||||
rows = query_node.query("""
|
rows = query_node.query("""
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# This test is a subset of the 01223_dist_on_dist.
|
# This test is a subset of the 01223_dist_on_dist.
|
||||||
# (just in case, with real separate instances).
|
# (just in case, with real separate instances).
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster
|
||||||
@ -51,7 +51,7 @@ def started_cluster():
|
|||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("node", NODES.values())
|
@pytest.mark.parametrize("node", list(NODES.values()))
|
||||||
@pytest.mark.parametrize("source",
|
@pytest.mark.parametrize("source",
|
||||||
["distributed_over_distributed_table", "cluster('test_cluster', default, distributed_table)"])
|
["distributed_over_distributed_table", "cluster('test_cluster', default, distributed_table)"])
|
||||||
class TestDistributedOverDistributedSuite:
|
class TestDistributedOverDistributedSuite:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
@ -9,6 +9,9 @@ from helpers.uclient import client, prompt, end_of_block
|
|||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
|
# log = sys.stdout
|
||||||
|
log = None
|
||||||
|
|
||||||
NODES = {'node' + str(i): cluster.add_instance(
|
NODES = {'node' + str(i): cluster.add_instance(
|
||||||
'node' + str(i),
|
'node' + str(i),
|
||||||
main_configs=['configs/remote_servers.xml'],
|
main_configs=['configs/remote_servers.xml'],
|
||||||
@ -63,12 +66,11 @@ def poll_query(node, query, expected, timeout):
|
|||||||
pass
|
pass
|
||||||
assert node.query(query) == expected
|
assert node.query(query) == expected
|
||||||
|
|
||||||
@pytest.mark.parametrize("node", NODES.values()[:1])
|
@pytest.mark.parametrize("node", list(NODES.values())[:1])
|
||||||
@pytest.mark.parametrize("source", ["lv_over_distributed_table"])
|
@pytest.mark.parametrize("source", ["lv_over_distributed_table"])
|
||||||
class TestLiveViewOverDistributedSuite:
|
class TestLiveViewOverDistributedSuite:
|
||||||
def test_distributed_over_live_view_order_by_node(self, started_cluster, node, source):
|
def test_distributed_over_live_view_order_by_node(self, started_cluster, node, source):
|
||||||
log = sys.stdout
|
node0, node1 = list(NODES.values())
|
||||||
node0, node1 = NODES.values()
|
|
||||||
|
|
||||||
select_query = "SELECT * FROM distributed_over_lv ORDER BY node, key FORMAT CSV"
|
select_query = "SELECT * FROM distributed_over_lv ORDER BY node, key FORMAT CSV"
|
||||||
select_query_dist_table = "SELECT * FROM distributed_table ORDER BY node, key FORMAT CSV"
|
select_query_dist_table = "SELECT * FROM distributed_table ORDER BY node, key FORMAT CSV"
|
||||||
@ -118,8 +120,7 @@ class TestLiveViewOverDistributedSuite:
|
|||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
def test_distributed_over_live_view_order_by_key(self, started_cluster, node, source):
|
def test_distributed_over_live_view_order_by_key(self, started_cluster, node, source):
|
||||||
log = sys.stdout
|
node0, node1 = list(NODES.values())
|
||||||
node0, node1 = NODES.values()
|
|
||||||
|
|
||||||
select_query = "SELECT * FROM distributed_over_lv ORDER BY key, node FORMAT CSV"
|
select_query = "SELECT * FROM distributed_over_lv ORDER BY key, node FORMAT CSV"
|
||||||
select_count_query = "SELECT count() FROM distributed_over_lv"
|
select_count_query = "SELECT count() FROM distributed_over_lv"
|
||||||
@ -160,8 +161,7 @@ class TestLiveViewOverDistributedSuite:
|
|||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
def test_distributed_over_live_view_group_by_node(self, started_cluster, node, source):
|
def test_distributed_over_live_view_group_by_node(self, started_cluster, node, source):
|
||||||
log = sys.stdout
|
node0, node1 = list(NODES.values())
|
||||||
node0, node1 = NODES.values()
|
|
||||||
|
|
||||||
select_query = "SELECT node, SUM(value) FROM distributed_over_lv GROUP BY node ORDER BY node FORMAT CSV"
|
select_query = "SELECT node, SUM(value) FROM distributed_over_lv GROUP BY node ORDER BY node FORMAT CSV"
|
||||||
|
|
||||||
@ -204,8 +204,7 @@ class TestLiveViewOverDistributedSuite:
|
|||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
def test_distributed_over_live_view_group_by_key(self, started_cluster, node, source):
|
def test_distributed_over_live_view_group_by_key(self, started_cluster, node, source):
|
||||||
log = sys.stdout
|
node0, node1 = list(NODES.values())
|
||||||
node0, node1 = NODES.values()
|
|
||||||
|
|
||||||
select_query = "SELECT key, SUM(value) FROM distributed_over_lv GROUP BY key ORDER BY key FORMAT CSV"
|
select_query = "SELECT key, SUM(value) FROM distributed_over_lv GROUP BY key ORDER BY key FORMAT CSV"
|
||||||
|
|
||||||
@ -249,8 +248,7 @@ class TestLiveViewOverDistributedSuite:
|
|||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
def test_distributed_over_live_view_sum(self, started_cluster, node, source):
|
def test_distributed_over_live_view_sum(self, started_cluster, node, source):
|
||||||
log = sys.stdout
|
node0, node1 = list(NODES.values())
|
||||||
node0, node1 = NODES.values()
|
|
||||||
|
|
||||||
with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \
|
with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \
|
||||||
client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2:
|
client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2:
|
||||||
|
@ -103,7 +103,7 @@ def started_cluster(request):
|
|||||||
try:
|
try:
|
||||||
cluster.start()
|
cluster.start()
|
||||||
|
|
||||||
for node_id, node in NODES.items():
|
for node_id, node in list(NODES.items()):
|
||||||
node.query(CREATE_TABLES_SQL)
|
node.query(CREATE_TABLES_SQL)
|
||||||
node.query(INSERT_SQL_TEMPLATE.format(node_id=node_id))
|
node.query(INSERT_SQL_TEMPLATE.format(node_id=node_id))
|
||||||
|
|
||||||
@ -155,7 +155,7 @@ def test_reconnect(started_cluster, node_name, first_user, query_base):
|
|||||||
|
|
||||||
with PartitionManager() as pm:
|
with PartitionManager() as pm:
|
||||||
# Break the connection.
|
# Break the connection.
|
||||||
pm.partition_instances(*NODES.values())
|
pm.partition_instances(*list(NODES.values()))
|
||||||
|
|
||||||
# Now it shouldn't:
|
# Now it shouldn't:
|
||||||
_check_timeout_and_exception(node, first_user, query_base, query)
|
_check_timeout_and_exception(node, first_user, query_base, query)
|
||||||
|
@ -65,7 +65,7 @@ def start_cluster():
|
|||||||
yield cluster
|
yield cluster
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print ex
|
print(ex)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from helpers.client import QueryRuntimeException
|
from helpers.client import QueryRuntimeException
|
||||||
|
@ -3,7 +3,7 @@ import logging
|
|||||||
|
|
||||||
import avro.schema
|
import avro.schema
|
||||||
import pytest
|
import pytest
|
||||||
from confluent.schemaregistry.serializers import MessageSerializer
|
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
|
||||||
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
|
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
|
||||||
|
|
||||||
logging.getLogger().setLevel(logging.INFO)
|
logging.getLogger().setLevel(logging.INFO)
|
||||||
|
@ -226,8 +226,8 @@ def test_introspection():
|
|||||||
|
|
||||||
assert instance.query(
|
assert instance.query(
|
||||||
"SELECT * from system.grants WHERE user_name IN ('A', 'B') ORDER BY user_name, access_type, grant_option") == \
|
"SELECT * from system.grants WHERE user_name IN ('A', 'B') ORDER BY user_name, access_type, grant_option") == \
|
||||||
TSV([["A", "\N", "SELECT", "test", "table", "\N", 0, 0],
|
TSV([["A", "\\N", "SELECT", "test", "table", "\\N", 0, 0],
|
||||||
["B", "\N", "CREATE", "\N", "\N", "\N", 0, 1]])
|
["B", "\\N", "CREATE", "\\N", "\\N", "\\N", 0, 1]])
|
||||||
|
|
||||||
|
|
||||||
def test_current_database():
|
def test_current_database():
|
||||||
|
@ -301,7 +301,7 @@ CREATE TABLE test.graphite2
|
|||||||
"AND table='graphite2'"))
|
"AND table='graphite2'"))
|
||||||
if parts == 1:
|
if parts == 1:
|
||||||
break
|
break
|
||||||
print('Parts', parts)
|
print(('Parts', parts))
|
||||||
|
|
||||||
assert TSV(
|
assert TSV(
|
||||||
q("SELECT value, timestamp, date, updated FROM test.graphite2")
|
q("SELECT value, timestamp, date, updated FROM test.graphite2")
|
||||||
|
@ -35,7 +35,7 @@ def cluster_without_dns_cache_update():
|
|||||||
yield cluster
|
yield cluster
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print ex
|
print(ex)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
@ -90,7 +90,7 @@ def cluster_with_dns_cache_update():
|
|||||||
yield cluster
|
yield cluster
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print ex
|
print(ex)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
@ -117,7 +117,7 @@ def test_ip_change_update_dns_cache(cluster_with_dns_cache_update):
|
|||||||
curl_result = node4.exec_in_container(["bash", "-c", "curl -s 'node3:8123'"])
|
curl_result = node4.exec_in_container(["bash", "-c", "curl -s 'node3:8123'"])
|
||||||
assert curl_result == 'Ok.\n'
|
assert curl_result == 'Ok.\n'
|
||||||
cat_resolv = node4.exec_in_container(["bash", "-c", "cat /etc/resolv.conf"])
|
cat_resolv = node4.exec_in_container(["bash", "-c", "cat /etc/resolv.conf"])
|
||||||
print("RESOLV {}".format(cat_resolv))
|
print(("RESOLV {}".format(cat_resolv)))
|
||||||
|
|
||||||
assert_eq_with_retry(node4, "SELECT * FROM remote('node3', 'system', 'one')", "0", sleep_time=0.5)
|
assert_eq_with_retry(node4, "SELECT * FROM remote('node3', 'system', 'one')", "0", sleep_time=0.5)
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import contextlib
|
import contextlib
|
||||||
import os
|
import os
|
||||||
import urllib
|
import urllib.request, urllib.parse, urllib.error
|
||||||
|
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
|
||||||
@ -22,7 +22,7 @@ class SimpleCluster:
|
|||||||
def test_dynamic_query_handler():
|
def test_dynamic_query_handler():
|
||||||
with contextlib.closing(
|
with contextlib.closing(
|
||||||
SimpleCluster(ClickHouseCluster(__file__), "dynamic_handler", "test_dynamic_handler")) as cluster:
|
SimpleCluster(ClickHouseCluster(__file__), "dynamic_handler", "test_dynamic_handler")) as cluster:
|
||||||
test_query = urllib.quote_plus('SELECT * FROM system.settings WHERE name = \'max_threads\'')
|
test_query = urllib.parse.quote_plus('SELECT * FROM system.settings WHERE name = \'max_threads\'')
|
||||||
|
|
||||||
assert 404 == cluster.instance.http_request('?max_threads=1', method='GET', headers={'XXX': 'xxx'}).status_code
|
assert 404 == cluster.instance.http_request('?max_threads=1', method='GET', headers={'XXX': 'xxx'}).status_code
|
||||||
|
|
||||||
@ -54,11 +54,11 @@ def test_predefined_query_handler():
|
|||||||
assert 500 == cluster.instance.http_request('test_predefined_handler_get?max_threads=1', method='GET',
|
assert 500 == cluster.instance.http_request('test_predefined_handler_get?max_threads=1', method='GET',
|
||||||
headers={'XXX': 'xxx'}).status_code
|
headers={'XXX': 'xxx'}).status_code
|
||||||
|
|
||||||
assert 'max_threads\t1\n' == cluster.instance.http_request(
|
assert b'max_threads\t1\n' == cluster.instance.http_request(
|
||||||
'test_predefined_handler_get?max_threads=1&setting_name=max_threads', method='GET',
|
'test_predefined_handler_get?max_threads=1&setting_name=max_threads', method='GET',
|
||||||
headers={'XXX': 'xxx'}).content
|
headers={'XXX': 'xxx'}).content
|
||||||
|
|
||||||
assert 'max_threads\t1\nmax_alter_threads\t1\n' == cluster.instance.http_request(
|
assert b'max_threads\t1\nmax_alter_threads\t1\n' == cluster.instance.http_request(
|
||||||
'query_param_with_url/max_threads?max_threads=1&max_alter_threads=1',
|
'query_param_with_url/max_threads?max_threads=1&max_alter_threads=1',
|
||||||
headers={'XXX': 'max_alter_threads'}).content
|
headers={'XXX': 'max_alter_threads'}).content
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ def test_fixed_static_handler():
|
|||||||
assert 'text/html; charset=UTF-8' == \
|
assert 'text/html; charset=UTF-8' == \
|
||||||
cluster.instance.http_request('test_get_fixed_static_handler', method='GET',
|
cluster.instance.http_request('test_get_fixed_static_handler', method='GET',
|
||||||
headers={'XXX': 'xxx'}).headers['Content-Type']
|
headers={'XXX': 'xxx'}).headers['Content-Type']
|
||||||
assert 'Test get static handler and fix content' == cluster.instance.http_request(
|
assert b'Test get static handler and fix content' == cluster.instance.http_request(
|
||||||
'test_get_fixed_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
'test_get_fixed_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
||||||
|
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ def test_config_static_handler():
|
|||||||
assert 'text/plain; charset=UTF-8' == \
|
assert 'text/plain; charset=UTF-8' == \
|
||||||
cluster.instance.http_request('test_get_config_static_handler', method='GET',
|
cluster.instance.http_request('test_get_config_static_handler', method='GET',
|
||||||
headers={'XXX': 'xxx'}).headers['Content-Type']
|
headers={'XXX': 'xxx'}).headers['Content-Type']
|
||||||
assert 'Test get static handler and config content' == cluster.instance.http_request(
|
assert b'Test get static handler and config content' == cluster.instance.http_request(
|
||||||
'test_get_config_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
'test_get_config_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
||||||
|
|
||||||
|
|
||||||
@ -126,7 +126,7 @@ def test_absolute_path_static_handler():
|
|||||||
assert 'text/html; charset=UTF-8' == \
|
assert 'text/html; charset=UTF-8' == \
|
||||||
cluster.instance.http_request('test_get_absolute_path_static_handler', method='GET',
|
cluster.instance.http_request('test_get_absolute_path_static_handler', method='GET',
|
||||||
headers={'XXX': 'xxx'}).headers['Content-Type']
|
headers={'XXX': 'xxx'}).headers['Content-Type']
|
||||||
assert '<html><body>Absolute Path File</body></html>\n' == cluster.instance.http_request(
|
assert b'<html><body>Absolute Path File</body></html>\n' == cluster.instance.http_request(
|
||||||
'test_get_absolute_path_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
'test_get_absolute_path_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
||||||
|
|
||||||
|
|
||||||
@ -152,7 +152,7 @@ def test_relative_path_static_handler():
|
|||||||
assert 'text/html; charset=UTF-8' == \
|
assert 'text/html; charset=UTF-8' == \
|
||||||
cluster.instance.http_request('test_get_relative_path_static_handler', method='GET',
|
cluster.instance.http_request('test_get_relative_path_static_handler', method='GET',
|
||||||
headers={'XXX': 'xxx'}).headers['Content-Type']
|
headers={'XXX': 'xxx'}).headers['Content-Type']
|
||||||
assert '<html><body>Relative Path File</body></html>\n' == cluster.instance.http_request(
|
assert b'<html><body>Relative Path File</body></html>\n' == cluster.instance.http_request(
|
||||||
'test_get_relative_path_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
'test_get_relative_path_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
||||||
|
|
||||||
|
|
||||||
@ -160,19 +160,19 @@ def test_defaults_http_handlers():
|
|||||||
with contextlib.closing(
|
with contextlib.closing(
|
||||||
SimpleCluster(ClickHouseCluster(__file__), "defaults_handlers", "test_defaults_handlers")) as cluster:
|
SimpleCluster(ClickHouseCluster(__file__), "defaults_handlers", "test_defaults_handlers")) as cluster:
|
||||||
assert 200 == cluster.instance.http_request('', method='GET').status_code
|
assert 200 == cluster.instance.http_request('', method='GET').status_code
|
||||||
assert 'Default server response' == cluster.instance.http_request('', method='GET').content
|
assert b'Default server response' == cluster.instance.http_request('', method='GET').content
|
||||||
|
|
||||||
assert 200 == cluster.instance.http_request('ping', method='GET').status_code
|
assert 200 == cluster.instance.http_request('ping', method='GET').status_code
|
||||||
assert 'Ok.\n' == cluster.instance.http_request('ping', method='GET').content
|
assert b'Ok.\n' == cluster.instance.http_request('ping', method='GET').content
|
||||||
|
|
||||||
assert 200 == cluster.instance.http_request('replicas_status', method='get').status_code
|
assert 200 == cluster.instance.http_request('replicas_status', method='get').status_code
|
||||||
assert 'Ok.\n' == cluster.instance.http_request('replicas_status', method='get').content
|
assert b'Ok.\n' == cluster.instance.http_request('replicas_status', method='get').content
|
||||||
|
|
||||||
assert 200 == cluster.instance.http_request('replicas_status?verbose=1', method='get').status_code
|
assert 200 == cluster.instance.http_request('replicas_status?verbose=1', method='get').status_code
|
||||||
assert '' == cluster.instance.http_request('replicas_status?verbose=1', method='get').content
|
assert b'' == cluster.instance.http_request('replicas_status?verbose=1', method='get').content
|
||||||
|
|
||||||
assert 200 == cluster.instance.http_request('?query=SELECT+1', method='GET').status_code
|
assert 200 == cluster.instance.http_request('?query=SELECT+1', method='GET').status_code
|
||||||
assert '1\n' == cluster.instance.http_request('?query=SELECT+1', method='GET').content
|
assert b'1\n' == cluster.instance.http_request('?query=SELECT+1', method='GET').content
|
||||||
|
|
||||||
|
|
||||||
def test_prometheus_handler():
|
def test_prometheus_handler():
|
||||||
@ -186,7 +186,7 @@ def test_prometheus_handler():
|
|||||||
headers={'XXX': 'xxx'}).status_code
|
headers={'XXX': 'xxx'}).status_code
|
||||||
|
|
||||||
assert 200 == cluster.instance.http_request('test_prometheus', method='GET', headers={'XXX': 'xxx'}).status_code
|
assert 200 == cluster.instance.http_request('test_prometheus', method='GET', headers={'XXX': 'xxx'}).status_code
|
||||||
assert 'ClickHouseProfileEvents_Query' in cluster.instance.http_request('test_prometheus', method='GET',
|
assert b'ClickHouseProfileEvents_Query' in cluster.instance.http_request('test_prometheus', method='GET',
|
||||||
headers={'XXX': 'xxx'}).content
|
headers={'XXX': 'xxx'}).content
|
||||||
|
|
||||||
|
|
||||||
@ -203,5 +203,5 @@ def test_replicas_status_handler():
|
|||||||
|
|
||||||
assert 200 == cluster.instance.http_request('test_replicas_status', method='GET',
|
assert 200 == cluster.instance.http_request('test_replicas_status', method='GET',
|
||||||
headers={'XXX': 'xxx'}).status_code
|
headers={'XXX': 'xxx'}).status_code
|
||||||
assert 'Ok.\n' == cluster.instance.http_request('test_replicas_status', method='GET',
|
assert b'Ok.\n' == cluster.instance.http_request('test_replicas_status', method='GET',
|
||||||
headers={'XXX': 'xxx'}).content
|
headers={'XXX': 'xxx'}).content
|
||||||
|
@ -75,7 +75,7 @@ def test_replication_after_partition(both_https_cluster):
|
|||||||
closing_pool = Pool(1)
|
closing_pool = Pool(1)
|
||||||
inserting_pool = Pool(5)
|
inserting_pool = Pool(5)
|
||||||
cres = closing_pool.map_async(close, [random.randint(1, 3) for _ in range(10)])
|
cres = closing_pool.map_async(close, [random.randint(1, 3) for _ in range(10)])
|
||||||
ires = inserting_pool.map_async(insert_data_and_check, range(100))
|
ires = inserting_pool.map_async(insert_data_and_check, list(range(100)))
|
||||||
|
|
||||||
cres.wait()
|
cres.wait()
|
||||||
ires.wait()
|
ires.wait()
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
@ -119,6 +118,6 @@ def test_async_inserts_into_local_shard(started_cluster):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with contextmanager(started_cluster)() as cluster:
|
with contextmanager(started_cluster)() as cluster:
|
||||||
for name, instance in cluster.instances.items():
|
for name, instance in list(cluster.instances.items()):
|
||||||
print name, instance.ip_address
|
print(name, instance.ip_address)
|
||||||
raw_input("Cluster created, press any key to destroy...")
|
input("Cluster created, press any key to destroy...")
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -7,6 +7,8 @@ from helpers.cluster import ClickHouseCluster
|
|||||||
from helpers.uclient import client, prompt, end_of_block
|
from helpers.uclient import client, prompt, end_of_block
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
# log = sys.stdout
|
||||||
|
log = None
|
||||||
|
|
||||||
NODES = {'node' + str(i): cluster.add_instance(
|
NODES = {'node' + str(i): cluster.add_instance(
|
||||||
'node' + str(i),
|
'node' + str(i),
|
||||||
@ -55,7 +57,7 @@ def started_cluster():
|
|||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("node", NODES.values()[:1])
|
@pytest.mark.parametrize("node", list(NODES.values())[:1])
|
||||||
@pytest.mark.parametrize("source", ["lv_over_distributed_table"])
|
@pytest.mark.parametrize("source", ["lv_over_distributed_table"])
|
||||||
class TestLiveViewOverDistributedSuite:
|
class TestLiveViewOverDistributedSuite:
|
||||||
def test_select_with_order_by_node(self, started_cluster, node, source):
|
def test_select_with_order_by_node(self, started_cluster, node, source):
|
||||||
@ -87,7 +89,6 @@ node2\t1\t11
|
|||||||
== "22\n"
|
== "22\n"
|
||||||
|
|
||||||
def test_watch_live_view_order_by_node(self, started_cluster, node, source):
|
def test_watch_live_view_order_by_node(self, started_cluster, node, source):
|
||||||
log = sys.stdout
|
|
||||||
command = " ".join(node.client.command)
|
command = " ".join(node.client.command)
|
||||||
args = dict(log=log, command=command)
|
args = dict(log=log, command=command)
|
||||||
|
|
||||||
@ -130,7 +131,6 @@ node2\t1\t11
|
|||||||
client1.expect('"node3",3,3,3')
|
client1.expect('"node3",3,3,3')
|
||||||
|
|
||||||
def test_watch_live_view_order_by_key(self, started_cluster, node, source):
|
def test_watch_live_view_order_by_key(self, started_cluster, node, source):
|
||||||
log = sys.stdout
|
|
||||||
command = " ".join(node.client.command)
|
command = " ".join(node.client.command)
|
||||||
args = dict(log=log, command=command)
|
args = dict(log=log, command=command)
|
||||||
|
|
||||||
@ -173,7 +173,6 @@ node2\t1\t11
|
|||||||
client1.expect('"node3",3,3,3')
|
client1.expect('"node3",3,3,3')
|
||||||
|
|
||||||
def test_watch_live_view_group_by_node(self, started_cluster, node, source):
|
def test_watch_live_view_group_by_node(self, started_cluster, node, source):
|
||||||
log = sys.stdout
|
|
||||||
command = " ".join(node.client.command)
|
command = " ".join(node.client.command)
|
||||||
args = dict(log=log, command=command)
|
args = dict(log=log, command=command)
|
||||||
|
|
||||||
@ -208,7 +207,6 @@ node2\t1\t11
|
|||||||
client1.expect('"node3",3,3')
|
client1.expect('"node3",3,3')
|
||||||
|
|
||||||
def test_watch_live_view_group_by_key(self, started_cluster, node, source):
|
def test_watch_live_view_group_by_key(self, started_cluster, node, source):
|
||||||
log = sys.stdout
|
|
||||||
command = " ".join(node.client.command)
|
command = " ".join(node.client.command)
|
||||||
args = dict(log=log, command=command)
|
args = dict(log=log, command=command)
|
||||||
sep = ' \xe2\x94\x82'
|
sep = ' \xe2\x94\x82'
|
||||||
@ -245,7 +243,6 @@ node2\t1\t11
|
|||||||
client1.expect('3,3,3')
|
client1.expect('3,3,3')
|
||||||
|
|
||||||
def test_watch_live_view_sum(self, started_cluster, node, source):
|
def test_watch_live_view_sum(self, started_cluster, node, source):
|
||||||
log = sys.stdout
|
|
||||||
command = " ".join(node.client.command)
|
command = " ".join(node.client.command)
|
||||||
args = dict(log=log, command=command)
|
args = dict(log=log, command=command)
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ def check_query(clickhouse_node, query, result_set, retry_count=3, interval_seco
|
|||||||
if result_set == lastest_result:
|
if result_set == lastest_result:
|
||||||
return
|
return
|
||||||
|
|
||||||
print lastest_result
|
print(lastest_result)
|
||||||
time.sleep(interval_seconds)
|
time.sleep(interval_seconds)
|
||||||
|
|
||||||
assert lastest_result == result_set
|
assert lastest_result == result_set
|
||||||
|
@ -6,7 +6,7 @@ import pymysql.cursors
|
|||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster, get_docker_compose_path
|
from helpers.cluster import ClickHouseCluster, get_docker_compose_path
|
||||||
|
|
||||||
import materialize_with_ddl
|
from . import materialize_with_ddl
|
||||||
|
|
||||||
DOCKER_COMPOSE_PATH = get_docker_compose_path()
|
DOCKER_COMPOSE_PATH = get_docker_compose_path()
|
||||||
|
|
||||||
@ -50,10 +50,10 @@ class MySQLNodeInstance:
|
|||||||
while time.time() - start < timeout:
|
while time.time() - start < timeout:
|
||||||
try:
|
try:
|
||||||
self.alloc_connection()
|
self.alloc_connection()
|
||||||
print "Mysql Started"
|
print("Mysql Started")
|
||||||
return
|
return
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print "Can't connect to MySQL " + str(ex)
|
print("Can't connect to MySQL " + str(ex))
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
subprocess.check_call(['docker-compose', 'ps', '--services', 'all'])
|
subprocess.check_call(['docker-compose', 'ps', '--services', 'all'])
|
||||||
@ -119,8 +119,8 @@ def test_materialize_database_ddl_with_mysql_5_7(started_cluster, started_mysql_
|
|||||||
materialize_with_ddl.alter_modify_column_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7,
|
materialize_with_ddl.alter_modify_column_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7,
|
||||||
"mysql1")
|
"mysql1")
|
||||||
except:
|
except:
|
||||||
print(clickhouse_node.query(
|
print((clickhouse_node.query(
|
||||||
"select '\n', thread_id, query_id, arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym from system.stack_trace format TSVRaw"))
|
"select '\n', thread_id, query_id, arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym from system.stack_trace format TSVRaw")))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -44,12 +44,12 @@ def start_small_cluster():
|
|||||||
|
|
||||||
def test_single_endpoint_connections_count(start_small_cluster):
|
def test_single_endpoint_connections_count(start_small_cluster):
|
||||||
def task(count):
|
def task(count):
|
||||||
print("Inserting ten times from {}".format(count))
|
print(("Inserting ten times from {}".format(count)))
|
||||||
for i in xrange(count, count + 10):
|
for i in range(count, count + 10):
|
||||||
node1.query("insert into test_table values ('2017-06-16', {}, 0)".format(i))
|
node1.query("insert into test_table values ('2017-06-16', {}, 0)".format(i))
|
||||||
|
|
||||||
p = Pool(10)
|
p = Pool(10)
|
||||||
p.map(task, xrange(0, 100, 10))
|
p.map(task, range(0, 100, 10))
|
||||||
|
|
||||||
assert_eq_with_retry(node1, "select count() from test_table", "100")
|
assert_eq_with_retry(node1, "select count() from test_table", "100")
|
||||||
assert_eq_with_retry(node2, "select count() from test_table", "100")
|
assert_eq_with_retry(node2, "select count() from test_table", "100")
|
||||||
@ -97,17 +97,17 @@ def start_big_cluster():
|
|||||||
|
|
||||||
def test_multiple_endpoint_connections_count(start_big_cluster):
|
def test_multiple_endpoint_connections_count(start_big_cluster):
|
||||||
def task(count):
|
def task(count):
|
||||||
print("Inserting ten times from {}".format(count))
|
print(("Inserting ten times from {}".format(count)))
|
||||||
if (count / 10) % 2 == 1:
|
if (count / 10) % 2 == 1:
|
||||||
node = node3
|
node = node3
|
||||||
else:
|
else:
|
||||||
node = node4
|
node = node4
|
||||||
|
|
||||||
for i in xrange(count, count + 10):
|
for i in range(count, count + 10):
|
||||||
node.query("insert into test_table values ('2017-06-16', {}, 0)".format(i))
|
node.query("insert into test_table values ('2017-06-16', {}, 0)".format(i))
|
||||||
|
|
||||||
p = Pool(10)
|
p = Pool(10)
|
||||||
p.map(task, xrange(0, 100, 10))
|
p.map(task, range(0, 100, 10))
|
||||||
|
|
||||||
assert_eq_with_retry(node3, "select count() from test_table", "100")
|
assert_eq_with_retry(node3, "select count() from test_table", "100")
|
||||||
assert_eq_with_retry(node4, "select count() from test_table", "100")
|
assert_eq_with_retry(node4, "select count() from test_table", "100")
|
||||||
|
@ -68,6 +68,6 @@ def test_select_table_name_from_merge_over_distributed(started_cluster):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with contextmanager(started_cluster)() as cluster:
|
with contextmanager(started_cluster)() as cluster:
|
||||||
for name, instance in cluster.instances.items():
|
for name, instance in list(cluster.instances.items()):
|
||||||
print name, instance.ip_address
|
print(name, instance.ip_address)
|
||||||
raw_input("Cluster created, press any key to destroy...")
|
input("Cluster created, press any key to destroy...")
|
||||||
|
@ -954,7 +954,7 @@ def test_mutate_to_another_disk(start_cluster, name, engine):
|
|||||||
if node1.query("SELECT latest_fail_reason FROM system.mutations WHERE table = '{}'".format(name)) == "":
|
if node1.query("SELECT latest_fail_reason FROM system.mutations WHERE table = '{}'".format(name)) == "":
|
||||||
assert node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) == "25\n"
|
assert node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) == "25\n"
|
||||||
else: # mutation failed, let's try on another disk
|
else: # mutation failed, let's try on another disk
|
||||||
print "Mutation failed"
|
print("Mutation failed")
|
||||||
node1.query("OPTIMIZE TABLE {} FINAL".format(name))
|
node1.query("OPTIMIZE TABLE {} FINAL".format(name))
|
||||||
node1.query("ALTER TABLE {} UPDATE s1 = concat(s1, 'x') WHERE 1".format(name))
|
node1.query("ALTER TABLE {} UPDATE s1 = concat(s1, 'x') WHERE 1".format(name))
|
||||||
retry = 20
|
retry = 20
|
||||||
@ -1114,7 +1114,7 @@ def test_download_appropriate_disk(start_cluster):
|
|||||||
|
|
||||||
for _ in range(10):
|
for _ in range(10):
|
||||||
try:
|
try:
|
||||||
print "Syncing replica"
|
print("Syncing replica")
|
||||||
node2.query("SYSTEM SYNC REPLICA replicated_table_for_download")
|
node2.query("SYSTEM SYNC REPLICA replicated_table_for_download")
|
||||||
break
|
break
|
||||||
except:
|
except:
|
||||||
|
@ -122,7 +122,7 @@ def test_delete_and_drop_mutation(started_cluster):
|
|||||||
if int(result.strip()) == 2:
|
if int(result.strip()) == 2:
|
||||||
break
|
break
|
||||||
except:
|
except:
|
||||||
print "Result", result
|
print("Result", result)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
@ -44,8 +44,8 @@ def test_mutations_with_merge_background_task(started_cluster):
|
|||||||
all_done = True
|
all_done = True
|
||||||
break
|
break
|
||||||
|
|
||||||
print instance_test_mutations.query(
|
print(instance_test_mutations.query(
|
||||||
"SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames")
|
"SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames"))
|
||||||
assert all_done
|
assert all_done
|
||||||
|
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ class MySQLNodeInstance:
|
|||||||
res = "\n".join(rows)
|
res = "\n".join(rows)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
if isinstance(execution_query, (str, bytes, unicode)):
|
if isinstance(execution_query, (str, bytes)):
|
||||||
return execute(execution_query)
|
return execute(execution_query)
|
||||||
else:
|
else:
|
||||||
return [execute(q) for q in execution_query]
|
return [execute(q) for q in execution_query]
|
||||||
@ -256,7 +256,7 @@ def test_mysql_types(started_cluster, case_name, mysql_type, expected_ch_type, m
|
|||||||
res = node.query(query, **kwargs)
|
res = node.query(query, **kwargs)
|
||||||
return res if isinstance(res, int) else res.rstrip('\n\r')
|
return res if isinstance(res, int) else res.rstrip('\n\r')
|
||||||
|
|
||||||
if isinstance(query, (str, bytes, unicode)):
|
if isinstance(query, (str, bytes)):
|
||||||
return do_execute(query)
|
return do_execute(query)
|
||||||
else:
|
else:
|
||||||
return [do_execute(q) for q in query]
|
return [do_execute(q) for q in query]
|
||||||
|
@ -98,7 +98,7 @@ def test_mysql_client(mysql_client, server_address):
|
|||||||
-e "SELECT 1;"
|
-e "SELECT 1;"
|
||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
|
|
||||||
assert stdout == '\n'.join(['1', '1', ''])
|
assert stdout.decode() == '\n'.join(['1', '1', ''])
|
||||||
|
|
||||||
code, (stdout, stderr) = mysql_client.exec_run('''
|
code, (stdout, stderr) = mysql_client.exec_run('''
|
||||||
mysql --protocol tcp -h {host} -P {port} default -u default --password=123
|
mysql --protocol tcp -h {host} -P {port} default -u default --password=123
|
||||||
@ -106,13 +106,13 @@ def test_mysql_client(mysql_client, server_address):
|
|||||||
-e "SELECT 'тест' as b;"
|
-e "SELECT 'тест' as b;"
|
||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
|
|
||||||
assert stdout == '\n'.join(['a', '1', 'b', 'тест', ''])
|
assert stdout.decode() == '\n'.join(['a', '1', 'b', 'тест', ''])
|
||||||
|
|
||||||
code, (stdout, stderr) = mysql_client.exec_run('''
|
code, (stdout, stderr) = mysql_client.exec_run('''
|
||||||
mysql --protocol tcp -h {host} -P {port} default -u default --password=abc -e "select 1 as a;"
|
mysql --protocol tcp -h {host} -P {port} default -u default --password=abc -e "select 1 as a;"
|
||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
|
|
||||||
assert stderr == 'mysql: [Warning] Using a password on the command line interface can be insecure.\n' \
|
assert stderr.decode() == 'mysql: [Warning] Using a password on the command line interface can be insecure.\n' \
|
||||||
'ERROR 516 (00000): default: Authentication failed: password is incorrect or there is no user with such name\n'
|
'ERROR 516 (00000): default: Authentication failed: password is incorrect or there is no user with such name\n'
|
||||||
|
|
||||||
code, (stdout, stderr) = mysql_client.exec_run('''
|
code, (stdout, stderr) = mysql_client.exec_run('''
|
||||||
@ -122,8 +122,8 @@ def test_mysql_client(mysql_client, server_address):
|
|||||||
-e "use system2;"
|
-e "use system2;"
|
||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
|
|
||||||
assert stdout == 'count()\n1\n'
|
assert stdout.decode() == 'count()\n1\n'
|
||||||
assert stderr[0:182] == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \
|
assert stderr[0:182].decode() == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \
|
||||||
"ERROR 81 (00000) at line 1: Code: 81, e.displayText() = DB::Exception: Database system2 doesn't exist"
|
"ERROR 81 (00000) at line 1: Code: 81, e.displayText() = DB::Exception: Database system2 doesn't exist"
|
||||||
|
|
||||||
code, (stdout, stderr) = mysql_client.exec_run('''
|
code, (stdout, stderr) = mysql_client.exec_run('''
|
||||||
@ -140,7 +140,7 @@ def test_mysql_client(mysql_client, server_address):
|
|||||||
-e "SELECT * FROM tmp ORDER BY tmp_column;"
|
-e "SELECT * FROM tmp ORDER BY tmp_column;"
|
||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
|
|
||||||
assert stdout == '\n'.join(['column', '0', '0', '1', '1', '5', '5', 'tmp_column', '0', '1', ''])
|
assert stdout.decode() == '\n'.join(['column', '0', '0', '1', '1', '5', '5', 'tmp_column', '0', '1', ''])
|
||||||
|
|
||||||
|
|
||||||
def test_mysql_client_exception(mysql_client, server_address):
|
def test_mysql_client_exception(mysql_client, server_address):
|
||||||
@ -150,7 +150,7 @@ def test_mysql_client_exception(mysql_client, server_address):
|
|||||||
-e "CREATE TABLE default.t1_remote_mysql AS mysql('127.0.0.1:10086','default','t1_local','default','');"
|
-e "CREATE TABLE default.t1_remote_mysql AS mysql('127.0.0.1:10086','default','t1_local','default','');"
|
||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
|
|
||||||
assert stderr[0:266] == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \
|
assert stderr[0:266].decode() == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \
|
||||||
"ERROR 1000 (00000) at line 1: Poco::Exception. Code: 1000, e.code() = 2002, e.displayText() = mysqlxx::ConnectionFailed: Can't connect to MySQL server on '127.0.0.1' (115) ((nullptr):0)"
|
"ERROR 1000 (00000) at line 1: Poco::Exception. Code: 1000, e.code() = 2002, e.displayText() = mysqlxx::ConnectionFailed: Can't connect to MySQL server on '127.0.0.1' (115) ((nullptr):0)"
|
||||||
|
|
||||||
|
|
||||||
@ -188,14 +188,14 @@ def test_mysql_replacement_query(mysql_client, server_address):
|
|||||||
--password=123 -e "select database();"
|
--password=123 -e "select database();"
|
||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
assert code == 0
|
assert code == 0
|
||||||
assert stdout == 'database()\ndefault\n'
|
assert stdout.decode() == 'database()\ndefault\n'
|
||||||
|
|
||||||
code, (stdout, stderr) = mysql_client.exec_run('''
|
code, (stdout, stderr) = mysql_client.exec_run('''
|
||||||
mysql --protocol tcp -h {host} -P {port} default -u default
|
mysql --protocol tcp -h {host} -P {port} default -u default
|
||||||
--password=123 -e "select DATABASE();"
|
--password=123 -e "select DATABASE();"
|
||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
assert code == 0
|
assert code == 0
|
||||||
assert stdout == 'DATABASE()\ndefault\n'
|
assert stdout.decode() == 'DATABASE()\ndefault\n'
|
||||||
|
|
||||||
|
|
||||||
def test_mysql_explain(mysql_client, server_address):
|
def test_mysql_explain(mysql_client, server_address):
|
||||||
@ -238,6 +238,7 @@ def test_mysql_federated(mysql_server, server_address):
|
|||||||
node.query('''INSERT INTO mysql_federated.test VALUES (0), (1), (5)''', settings={"password": "123"})
|
node.query('''INSERT INTO mysql_federated.test VALUES (0), (1), (5)''', settings={"password": "123"})
|
||||||
|
|
||||||
def check_retryable_error_in_stderr(stderr):
|
def check_retryable_error_in_stderr(stderr):
|
||||||
|
stderr = stderr.decode()
|
||||||
return ("Can't connect to local MySQL server through socket" in stderr
|
return ("Can't connect to local MySQL server through socket" in stderr
|
||||||
or "MySQL server has gone away" in stderr
|
or "MySQL server has gone away" in stderr
|
||||||
or "Server shutdown in progress" in stderr)
|
or "Server shutdown in progress" in stderr)
|
||||||
@ -252,8 +253,8 @@ def test_mysql_federated(mysql_server, server_address):
|
|||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
|
|
||||||
if code != 0:
|
if code != 0:
|
||||||
print("stdout", stdout)
|
print(("stdout", stdout))
|
||||||
print("stderr", stderr)
|
print(("stderr", stderr))
|
||||||
if try_num + 1 < retries and check_retryable_error_in_stderr(stderr):
|
if try_num + 1 < retries and check_retryable_error_in_stderr(stderr):
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
continue
|
continue
|
||||||
@ -266,14 +267,14 @@ def test_mysql_federated(mysql_server, server_address):
|
|||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
|
|
||||||
if code != 0:
|
if code != 0:
|
||||||
print("stdout", stdout)
|
print(("stdout", stdout))
|
||||||
print("stderr", stderr)
|
print(("stderr", stderr))
|
||||||
if try_num + 1 < retries and check_retryable_error_in_stderr(stderr):
|
if try_num + 1 < retries and check_retryable_error_in_stderr(stderr):
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
continue
|
continue
|
||||||
assert code == 0
|
assert code == 0
|
||||||
|
|
||||||
assert stdout == '\n'.join(['col', '0', '1', '5', ''])
|
assert stdout.decode() == '\n'.join(['col', '0', '1', '5', ''])
|
||||||
|
|
||||||
code, (stdout, stderr) = mysql_server.exec_run('''
|
code, (stdout, stderr) = mysql_server.exec_run('''
|
||||||
mysql
|
mysql
|
||||||
@ -282,14 +283,14 @@ def test_mysql_federated(mysql_server, server_address):
|
|||||||
'''.format(host=server_address, port=server_port), demux=True)
|
'''.format(host=server_address, port=server_port), demux=True)
|
||||||
|
|
||||||
if code != 0:
|
if code != 0:
|
||||||
print("stdout", stdout)
|
print(("stdout", stdout))
|
||||||
print("stderr", stderr)
|
print(("stderr", stderr))
|
||||||
if try_num + 1 < retries and check_retryable_error_in_stderr(stderr):
|
if try_num + 1 < retries and check_retryable_error_in_stderr(stderr):
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
continue
|
continue
|
||||||
assert code == 0
|
assert code == 0
|
||||||
|
|
||||||
assert stdout == '\n'.join(['col', '0', '0', '1', '1', '5', '5', ''])
|
assert stdout.decode() == '\n'.join(['col', '0', '0', '1', '1', '5', '5', ''])
|
||||||
|
|
||||||
|
|
||||||
def test_mysql_set_variables(mysql_client, server_address):
|
def test_mysql_set_variables(mysql_client, server_address):
|
||||||
@ -362,7 +363,7 @@ def test_python_client(server_address):
|
|||||||
|
|
||||||
def test_golang_client(server_address, golang_container):
|
def test_golang_client(server_address, golang_container):
|
||||||
# type: (str, Container) -> None
|
# type: (str, Container) -> None
|
||||||
with open(os.path.join(SCRIPT_DIR, 'golang.reference')) as fp:
|
with open(os.path.join(SCRIPT_DIR, 'golang.reference'), 'rb') as fp:
|
||||||
reference = fp.read()
|
reference = fp.read()
|
||||||
|
|
||||||
code, (stdout, stderr) = golang_container.exec_run(
|
code, (stdout, stderr) = golang_container.exec_run(
|
||||||
@ -370,7 +371,7 @@ def test_golang_client(server_address, golang_container):
|
|||||||
'abc'.format(host=server_address, port=server_port), demux=True)
|
'abc'.format(host=server_address, port=server_port), demux=True)
|
||||||
|
|
||||||
assert code == 1
|
assert code == 1
|
||||||
assert stderr == "Error 81: Database abc doesn't exist\n"
|
assert stderr.decode() == "Error 81: Database abc doesn't exist\n"
|
||||||
|
|
||||||
code, (stdout, stderr) = golang_container.exec_run(
|
code, (stdout, stderr) = golang_container.exec_run(
|
||||||
'./main --host {host} --port {port} --user default --password 123 --database '
|
'./main --host {host} --port {port} --user default --password 123 --database '
|
||||||
@ -391,31 +392,31 @@ def test_php_client(server_address, php_container):
|
|||||||
code, (stdout, stderr) = php_container.exec_run(
|
code, (stdout, stderr) = php_container.exec_run(
|
||||||
'php -f test.php {host} {port} default 123'.format(host=server_address, port=server_port), demux=True)
|
'php -f test.php {host} {port} default 123'.format(host=server_address, port=server_port), demux=True)
|
||||||
assert code == 0
|
assert code == 0
|
||||||
assert stdout == 'tables\n'
|
assert stdout.decode() == 'tables\n'
|
||||||
|
|
||||||
code, (stdout, stderr) = php_container.exec_run(
|
code, (stdout, stderr) = php_container.exec_run(
|
||||||
'php -f test_ssl.php {host} {port} default 123'.format(host=server_address, port=server_port), demux=True)
|
'php -f test_ssl.php {host} {port} default 123'.format(host=server_address, port=server_port), demux=True)
|
||||||
assert code == 0
|
assert code == 0
|
||||||
assert stdout == 'tables\n'
|
assert stdout.decode() == 'tables\n'
|
||||||
|
|
||||||
code, (stdout, stderr) = php_container.exec_run(
|
code, (stdout, stderr) = php_container.exec_run(
|
||||||
'php -f test.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port),
|
'php -f test.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port),
|
||||||
demux=True)
|
demux=True)
|
||||||
assert code == 0
|
assert code == 0
|
||||||
assert stdout == 'tables\n'
|
assert stdout.decode() == 'tables\n'
|
||||||
|
|
||||||
code, (stdout, stderr) = php_container.exec_run(
|
code, (stdout, stderr) = php_container.exec_run(
|
||||||
'php -f test_ssl.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port),
|
'php -f test_ssl.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port),
|
||||||
demux=True)
|
demux=True)
|
||||||
assert code == 0
|
assert code == 0
|
||||||
assert stdout == 'tables\n'
|
assert stdout.decode() == 'tables\n'
|
||||||
|
|
||||||
|
|
||||||
def test_mysqljs_client(server_address, nodejs_container):
|
def test_mysqljs_client(server_address, nodejs_container):
|
||||||
code, (_, stderr) = nodejs_container.exec_run(
|
code, (_, stderr) = nodejs_container.exec_run(
|
||||||
'node test.js {host} {port} user_with_sha256 abacaba'.format(host=server_address, port=server_port), demux=True)
|
'node test.js {host} {port} user_with_sha256 abacaba'.format(host=server_address, port=server_port), demux=True)
|
||||||
assert code == 1
|
assert code == 1
|
||||||
assert 'MySQL is requesting the sha256_password authentication method, which is not supported.' in stderr
|
assert 'MySQL is requesting the sha256_password authentication method, which is not supported.' in stderr.decode()
|
||||||
|
|
||||||
code, (_, stderr) = nodejs_container.exec_run(
|
code, (_, stderr) = nodejs_container.exec_run(
|
||||||
'node test.js {host} {port} user_with_empty_password ""'.format(host=server_address, port=server_port),
|
'node test.js {host} {port} user_with_empty_password ""'.format(host=server_address, port=server_port),
|
||||||
@ -449,21 +450,21 @@ def test_java_client(server_address, java_container):
|
|||||||
'java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database '
|
'java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database '
|
||||||
'default'.format(host=server_address, port=server_port), demux=True)
|
'default'.format(host=server_address, port=server_port), demux=True)
|
||||||
assert code == 0
|
assert code == 0
|
||||||
assert stdout == reference
|
assert stdout.decode() == reference
|
||||||
|
|
||||||
# non-empty password passed.
|
# non-empty password passed.
|
||||||
code, (stdout, stderr) = java_container.exec_run(
|
code, (stdout, stderr) = java_container.exec_run(
|
||||||
'java JavaConnectorTest --host {host} --port {port} --user default --password 123 --database '
|
'java JavaConnectorTest --host {host} --port {port} --user default --password 123 --database '
|
||||||
'default'.format(host=server_address, port=server_port), demux=True)
|
'default'.format(host=server_address, port=server_port), demux=True)
|
||||||
assert code == 0
|
assert code == 0
|
||||||
assert stdout == reference
|
assert stdout.decode() == reference
|
||||||
|
|
||||||
# double-sha1 password passed.
|
# double-sha1 password passed.
|
||||||
code, (stdout, stderr) = java_container.exec_run(
|
code, (stdout, stderr) = java_container.exec_run(
|
||||||
'java JavaConnectorTest --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database '
|
'java JavaConnectorTest --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database '
|
||||||
'default'.format(host=server_address, port=server_port), demux=True)
|
'default'.format(host=server_address, port=server_port), demux=True)
|
||||||
assert code == 0
|
assert code == 0
|
||||||
assert stdout == reference
|
assert stdout.decode() == reference
|
||||||
|
|
||||||
|
|
||||||
def test_types(server_address):
|
def test_types(server_address):
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user