mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 08:02:02 +00:00
Convert to python3 (#15007)
This commit is contained in:
parent
404c199448
commit
9cb3c743bd
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import json
|
||||
|
||||
@ -99,7 +99,7 @@ def gen_html_json(options, arguments):
|
||||
tuples = read_stats_file(options, arguments[1])
|
||||
print('{')
|
||||
print('"system: GreenPlum(x2),')
|
||||
print('"version": "%s",' % '4.3.9.1')
|
||||
print(('"version": "%s",' % '4.3.9.1'))
|
||||
print('"data_size": 10000000,')
|
||||
print('"time": "",')
|
||||
print('"comments": "",')
|
||||
|
2
debian/control
vendored
2
debian/control
vendored
@ -62,5 +62,5 @@ Description: debugging symbols for clickhouse-common-static
|
||||
Package: clickhouse-test
|
||||
Priority: optional
|
||||
Architecture: all
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-client, bash, expect, python, python-lxml, python-termcolor, python-requests, curl, perl, sudo, openssl, netcat-openbsd, telnet, brotli, bsdutils
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-client, bash, expect, python3, python3-lxml, python3-termcolor, python3-requests, curl, perl, sudo, openssl, netcat-openbsd, telnet, brotli, bsdutils
|
||||
Description: ClickHouse tests
|
||||
|
@ -25,10 +25,10 @@ RUN apt-get update \
|
||||
ninja-build \
|
||||
perl \
|
||||
pkg-config \
|
||||
python \
|
||||
python-lxml \
|
||||
python-requests \
|
||||
python-termcolor \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-requests \
|
||||
python3-termcolor \
|
||||
tzdata \
|
||||
llvm-${LLVM_VERSION} \
|
||||
clang-${LLVM_VERSION} \
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
#-*- coding: utf-8 -*-
|
||||
import subprocess
|
||||
import os
|
||||
|
@ -52,10 +52,10 @@ RUN apt-get update \
|
||||
moreutils \
|
||||
ninja-build \
|
||||
psmisc \
|
||||
python \
|
||||
python-lxml \
|
||||
python-requests \
|
||||
python-termcolor \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-requests \
|
||||
python3-termcolor \
|
||||
qemu-user-static \
|
||||
rename \
|
||||
software-properties-common \
|
||||
|
@ -4,7 +4,7 @@ FROM yandex/clickhouse-test-base
|
||||
RUN apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \
|
||||
tzdata \
|
||||
python \
|
||||
python3 \
|
||||
libreadline-dev \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
|
@ -16,13 +16,13 @@ RUN apt-get update \
|
||||
iproute2 \
|
||||
module-init-tools \
|
||||
cgroupfs-mount \
|
||||
python-pip \
|
||||
python3-pip \
|
||||
tzdata \
|
||||
libreadline-dev \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
curl \
|
||||
python-pika \
|
||||
python3-pika \
|
||||
liblua5.1-dev \
|
||||
luajit \
|
||||
libssl-dev \
|
||||
@ -37,7 +37,7 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio rpm-confluent-schemaregistry grpcio grpcio-tools cassandra-driver
|
||||
RUN python3 -m pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio grpcio grpcio-tools cassandra-driver confluent-kafka avro
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 17.09.1-ce
|
||||
|
@ -312,7 +312,7 @@ def add_errors_explained():
|
||||
|
||||
|
||||
if args.report == 'main':
|
||||
print(header_template.format())
|
||||
print((header_template.format()))
|
||||
|
||||
add_tested_commits()
|
||||
|
||||
@ -571,14 +571,14 @@ if args.report == 'main':
|
||||
status = 'failure'
|
||||
message = 'Errors while building the report.'
|
||||
|
||||
print("""
|
||||
print(("""
|
||||
<!--status: {status}-->
|
||||
<!--message: {message}-->
|
||||
""".format(status=status, message=message))
|
||||
""".format(status=status, message=message)))
|
||||
|
||||
elif args.report == 'all-queries':
|
||||
|
||||
print(header_template.format())
|
||||
print((header_template.format()))
|
||||
|
||||
add_tested_commits()
|
||||
|
||||
|
@ -4,7 +4,7 @@ FROM yandex/clickhouse-stateless-test
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
python-requests \
|
||||
python3-requests \
|
||||
llvm-9
|
||||
|
||||
COPY s3downloader /s3downloader
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
@ -29,7 +29,7 @@ def dowload_with_progress(url, path):
|
||||
logging.info("Downloading from %s to temp path %s", url, path)
|
||||
for i in range(RETRIES_COUNT):
|
||||
try:
|
||||
with open(path, 'w') as f:
|
||||
with open(path, 'wb') as f:
|
||||
response = requests.get(url, stream=True)
|
||||
response.raise_for_status()
|
||||
total_length = response.headers.get('content-length')
|
||||
@ -74,7 +74,7 @@ if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Simple tool for dowloading datasets for clickhouse from S3")
|
||||
|
||||
parser.add_argument('--dataset-names', required=True, nargs='+', choices=AVAILABLE_DATASETS.keys())
|
||||
parser.add_argument('--dataset-names', required=True, nargs='+', choices=list(AVAILABLE_DATASETS.keys()))
|
||||
parser.add_argument('--url-prefix', default=DEFAULT_URL)
|
||||
parser.add_argument('--clickhouse-data-path', default='/var/lib/clickhouse/')
|
||||
|
||||
|
@ -6,7 +6,7 @@ RUN echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
python-requests
|
||||
python3-requests
|
||||
|
||||
COPY s3downloader /s3downloader
|
||||
COPY run.sh /run.sh
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
@ -74,7 +74,7 @@ if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Simple tool for dowloading datasets for clickhouse from S3")
|
||||
|
||||
parser.add_argument('--dataset-names', required=True, nargs='+', choices=AVAILABLE_DATASETS.keys())
|
||||
parser.add_argument('--dataset-names', required=True, nargs='+', choices=list(AVAILABLE_DATASETS.keys()))
|
||||
parser.add_argument('--url-prefix', default=DEFAULT_URL)
|
||||
parser.add_argument('--clickhouse-data-path', default='/var/lib/clickhouse/')
|
||||
|
||||
|
@ -12,10 +12,10 @@ RUN apt-get update -y \
|
||||
ncdu \
|
||||
netcat-openbsd \
|
||||
openssl \
|
||||
python \
|
||||
python-lxml \
|
||||
python-requests \
|
||||
python-termcolor \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-requests \
|
||||
python3-termcolor \
|
||||
qemu-user-static \
|
||||
sudo \
|
||||
telnet \
|
||||
|
@ -3,10 +3,10 @@ FROM yandex/clickhouse-test-base
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
python-pip \
|
||||
python-setuptools
|
||||
python3-pip \
|
||||
python3-setuptools
|
||||
|
||||
RUN pip install \
|
||||
RUN python3 -m pip install \
|
||||
pytest \
|
||||
pytest-html \
|
||||
pytest-timeout \
|
||||
@ -17,4 +17,4 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-server_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-client_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-test_*.deb; \
|
||||
python -m pytest /usr/share/clickhouse-test/queries -n $(nproc) --html=test_output/report.html --self-contained-html
|
||||
python3 -m pytest /usr/share/clickhouse-test/queries -n $(nproc) --html=test_output/report.html --self-contained-html
|
||||
|
@ -54,10 +54,10 @@ RUN apt-get --allow-unauthenticated update -y \
|
||||
perl \
|
||||
pigz \
|
||||
pkg-config \
|
||||
python \
|
||||
python-lxml \
|
||||
python-requests \
|
||||
python-termcolor \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-requests \
|
||||
python3-termcolor \
|
||||
qemu-user-static \
|
||||
sudo \
|
||||
telnet \
|
||||
|
@ -12,10 +12,10 @@ RUN apt-get update -y \
|
||||
fakeroot \
|
||||
debhelper \
|
||||
expect \
|
||||
python \
|
||||
python-lxml \
|
||||
python-termcolor \
|
||||
python-requests \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-termcolor \
|
||||
python3-requests \
|
||||
sudo \
|
||||
openssl \
|
||||
ncdu \
|
||||
|
@ -10,10 +10,10 @@ RUN apt-get update -y \
|
||||
debhelper \
|
||||
parallel \
|
||||
expect \
|
||||
python \
|
||||
python-lxml \
|
||||
python-termcolor \
|
||||
python-requests \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-termcolor \
|
||||
python3-requests \
|
||||
curl \
|
||||
sudo \
|
||||
openssl \
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
from multiprocessing import cpu_count
|
||||
from subprocess import Popen, check_call
|
||||
|
@ -116,7 +116,7 @@ ninja
|
||||
Example for Fedora Rawhide:
|
||||
``` bash
|
||||
sudo yum update
|
||||
yum --nogpg install git cmake make gcc-c++ python2
|
||||
yum --nogpg install git cmake make gcc-c++ python3
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
|
@ -102,7 +102,7 @@ Ejemplo de OpenSUSE Tumbleweed:
|
||||
Ejemplo de Fedora Rawhide:
|
||||
|
||||
sudo yum update
|
||||
yum --nogpg install git cmake make gcc-c++ python2
|
||||
yum --nogpg install git cmake make gcc-c++ python3
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
|
@ -103,7 +103,7 @@ $ cd ..
|
||||
به عنوان مثال برای فدورا پوست دباغی نشده:
|
||||
|
||||
sudo yum update
|
||||
yum --nogpg install git cmake make gcc-c++ python2
|
||||
yum --nogpg install git cmake make gcc-c++ python3
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
|
@ -102,7 +102,7 @@ Exemple Pour openSUSE Tumbleweed:
|
||||
Exemple Pour Fedora Rawhide:
|
||||
|
||||
sudo yum update
|
||||
yum --nogpg install git cmake make gcc-c++ python2
|
||||
yum --nogpg install git cmake make gcc-c++ python3
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
|
@ -102,7 +102,7 @@ OpenSUSEタンブルウィードの例:
|
||||
Fedora Rawhideの例:
|
||||
|
||||
sudo yum update
|
||||
yum --nogpg install git cmake make gcc-c++ python2
|
||||
yum --nogpg install git cmake make gcc-c++ python3
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os, sys
|
||||
|
@ -71,8 +71,8 @@ def choose_latest_releases(args):
|
||||
logging.fatal('Unexpected GitHub response: %s', str(candidates))
|
||||
sys.exit(1)
|
||||
|
||||
logging.info('Found LTS releases: %s', ', '.join(seen_lts.keys()))
|
||||
logging.info('Found stable releases: %s', ', '.join(seen_stable.keys()))
|
||||
logging.info('Found LTS releases: %s', ', '.join(list(seen_lts.keys())))
|
||||
logging.info('Found stable releases: %s', ', '.join(list(seen_stable.keys())))
|
||||
return sorted(list(seen_lts.items()) + list(seen_stable.items()))
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
import datetime
|
||||
import os
|
||||
|
@ -59,7 +59,7 @@ def build_docs_nav(lang, args):
|
||||
_, _, nav = build_nav_entry(docs_dir, args)
|
||||
result = []
|
||||
index_key = None
|
||||
for key, value in nav.items():
|
||||
for key, value in list(nav.items()):
|
||||
if key and value:
|
||||
if value == 'index.md':
|
||||
index_key = key
|
||||
|
@ -59,7 +59,7 @@ def convert_to_dicts(changed_files, batch_size):
|
||||
def post_data(prepared_batches, token):
|
||||
headers = {"Authorization": "Bearer {}".format(token)}
|
||||
for batch in prepared_batches:
|
||||
print("Pugring cache for", ", ".join(batch["files"]))
|
||||
print(("Pugring cache for", ", ".join(batch["files"])))
|
||||
response = requests.post(CLOUDFLARE_URL, json=batch, headers=headers)
|
||||
response.raise_for_status()
|
||||
time.sleep(3)
|
||||
@ -71,8 +71,8 @@ if __name__ == "__main__":
|
||||
raise Exception("Env variable CLOUDFLARE_TOKEN is empty")
|
||||
base_domain = os.getenv("BASE_DOMAIN", "https://content.clickhouse.tech/")
|
||||
changed_files = collect_changed_files()
|
||||
print("Found", len(changed_files), "changed files")
|
||||
print(("Found", len(changed_files), "changed files"))
|
||||
filtered_files = filter_and_transform_changed_files(changed_files, base_domain)
|
||||
print("Files rest after filtering", len(filtered_files))
|
||||
print(("Files rest after filtering", len(filtered_files)))
|
||||
prepared_batches = convert_to_dicts(filtered_files, 25)
|
||||
post_data(prepared_batches, token)
|
||||
|
@ -15,7 +15,7 @@ import website
|
||||
|
||||
def recursive_values(item):
|
||||
if isinstance(item, dict):
|
||||
for _, value in item.items():
|
||||
for _, value in list(item.items()):
|
||||
yield from recursive_values(value)
|
||||
elif isinstance(item, list):
|
||||
for value in item:
|
||||
|
@ -1,6 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
@ -42,4 +42,4 @@ def typograph(text):
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
print(typograph(sys.stdin.read()))
|
||||
print((typograph(sys.stdin.read())))
|
||||
|
@ -102,7 +102,7 @@ OpenSUSE Tumbleweed için örnek:
|
||||
Fedora Rawhide için örnek:
|
||||
|
||||
sudo yum update
|
||||
yum --nogpg install git cmake make gcc-c++ python2
|
||||
yum --nogpg install git cmake make gcc-c++ python3
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
|
2
release
2
release
@ -66,7 +66,7 @@ do
|
||||
shift
|
||||
elif [[ $1 == '--fast' ]]; then
|
||||
# Wrong but fast pbuilder mode: create base package with all depends
|
||||
EXTRAPACKAGES="$EXTRAPACKAGES debhelper cmake ninja-build gcc-8 g++-8 libc6-dev libicu-dev libreadline-dev psmisc bash expect python python-lxml python-termcolor python-requests curl perl sudo openssl netcat-openbsd"
|
||||
EXTRAPACKAGES="$EXTRAPACKAGES debhelper cmake ninja-build gcc-8 g++-8 libc6-dev libicu-dev libreadline-dev psmisc bash expect python3 python3-lxml python3-termcolor python3-requests curl perl sudo openssl netcat-openbsd"
|
||||
shift
|
||||
elif [[ $1 == '--rpm' ]]; then
|
||||
MAKE_RPM=1
|
||||
|
@ -24,16 +24,16 @@ for s in sys.stdin.read().split():
|
||||
parts[m1].append((i1, i2, l, s))
|
||||
|
||||
for m, ps in sorted(parts.items()):
|
||||
ps.sort(key=lambda (i1, i2, l, s): (i1, -i2, -l))
|
||||
ps.sort(key=lambda i1_i2_l_s: (i1_i2_l_s[0], -i1_i2_l_s[1], -i1_i2_l_s[2]))
|
||||
(x2, y2, l2, s2) = (-1, -1, -1, -1)
|
||||
for x1, y1, l1, s1 in ps:
|
||||
if x1 >= x2 and y1 <= y2 and l1 < l2 and (x1, y1) != (x2, y2): # 2 contains 1
|
||||
pass
|
||||
elif x1 > y2: # 1 is to the right of 2
|
||||
if x1 != y2 + 1 and y2 != -1:
|
||||
print # to see the missing numbers
|
||||
print() # to see the missing numbers
|
||||
(x2, y2, l2, s2) = (x1, y1, l1, s1)
|
||||
print s1
|
||||
print(s1)
|
||||
else:
|
||||
raise Exception('invalid parts intersection: ' + s1 + ' and ' + s2)
|
||||
print
|
||||
print()
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python2
|
||||
from __future__ import print_function
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path
|
||||
@ -23,7 +23,7 @@ try:
|
||||
except ImportError:
|
||||
termcolor = None
|
||||
from random import random
|
||||
import commands
|
||||
import subprocess
|
||||
import multiprocessing
|
||||
from contextlib import closing
|
||||
|
||||
@ -99,7 +99,7 @@ def remove_control_characters(s):
|
||||
"""
|
||||
def str_to_int(s, default, base=10):
|
||||
if int(s, base) < 0x10000:
|
||||
return unichr(int(s, base))
|
||||
return chr(int(s, base))
|
||||
return default
|
||||
s = re.sub(r"&#(\d+);?", lambda c: str_to_int(c.group(1), c.group(0)), s)
|
||||
s = re.sub(r"&#[xX]([0-9a-fA-F]+);?", lambda c: str_to_int(c.group(1), c.group(0), base=16), s)
|
||||
@ -129,8 +129,8 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std
|
||||
return ''.join(random.choice(alphabet) for _ in range(length))
|
||||
database = 'test_{suffix}'.format(suffix=random_str())
|
||||
|
||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
clickhouse_proc_create.communicate("CREATE DATABASE " + database + get_db_engine(args))
|
||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||
clickhouse_proc_create.communicate(("CREATE DATABASE " + database + get_db_engine(args)))
|
||||
|
||||
os.environ["CLICKHOUSE_DATABASE"] = database
|
||||
|
||||
@ -157,8 +157,8 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std
|
||||
sleep(0.01)
|
||||
|
||||
if not args.database:
|
||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
clickhouse_proc_create.communicate("DROP DATABASE " + database)
|
||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||
clickhouse_proc_create.communicate(("DROP DATABASE " + database))
|
||||
|
||||
total_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
@ -166,10 +166,10 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std
|
||||
os.system("LC_ALL=C sed -i -e 's/{test_db}/default/g' {file}".format(test_db=database, file=stdout_file))
|
||||
os.system("LC_ALL=C sed -i -e 's/{test_db}/default/g' {file}".format(test_db=database, file=stderr_file))
|
||||
|
||||
stdout = open(stdout_file, 'r').read() if os.path.exists(stdout_file) else ''
|
||||
stdout = unicode(stdout, errors='replace', encoding='utf-8')
|
||||
stderr = open(stderr_file, 'r').read() if os.path.exists(stderr_file) else ''
|
||||
stderr = unicode(stderr, errors='replace', encoding='utf-8')
|
||||
stdout = open(stdout_file, 'rb').read() if os.path.exists(stdout_file) else b''
|
||||
stdout = str(stdout, errors='replace', encoding='utf-8')
|
||||
stderr = open(stderr_file, 'rb').read() if os.path.exists(stderr_file) else b''
|
||||
stderr = str(stderr, errors='replace', encoding='utf-8')
|
||||
|
||||
return proc, stdout, stderr, total_time
|
||||
|
||||
@ -300,8 +300,8 @@ def run_tests_array(all_tests_with_params):
|
||||
else:
|
||||
|
||||
if args.testname:
|
||||
clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
clickhouse_proc.communicate("SELECT 'Running test {suite}/{case} from pid={pid}';".format(pid = os.getpid(), case = case, suite = suite))
|
||||
clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||
clickhouse_proc.communicate(("SELECT 'Running test {suite}/{case} from pid={pid}';".format(pid = os.getpid(), case = case, suite = suite)))
|
||||
|
||||
if clickhouse_proc.returncode != 0:
|
||||
failures += 1
|
||||
@ -342,7 +342,7 @@ def run_tests_array(all_tests_with_params):
|
||||
print(" - return code {}".format(proc.returncode))
|
||||
|
||||
if stderr:
|
||||
print(stderr.encode('utf-8'))
|
||||
print(stderr)
|
||||
|
||||
# Stop on fatal errors like segmentation fault. They are send to client via logs.
|
||||
if ' <Fatal> ' in stderr:
|
||||
@ -360,22 +360,22 @@ def run_tests_array(all_tests_with_params):
|
||||
failures_chain += 1
|
||||
print(MSG_FAIL, end='')
|
||||
print_test_time(total_time)
|
||||
print(" - having stderror:\n{}".format(stderr.encode('utf-8')))
|
||||
print(" - having stderror:\n{}".format(stderr))
|
||||
elif 'Exception' in stdout:
|
||||
failures += 1
|
||||
failures_chain += 1
|
||||
print(MSG_FAIL, end='')
|
||||
print_test_time(total_time)
|
||||
print(" - having exception:\n{}".format(stdout.encode('utf-8')))
|
||||
print(" - having exception:\n{}".format(stdout))
|
||||
elif not os.path.isfile(reference_file):
|
||||
print(MSG_UNKNOWN, end='')
|
||||
print_test_time(total_time)
|
||||
print(" - no reference file")
|
||||
else:
|
||||
result_is_different = subprocess.call(['diff', '-q', reference_file, stdout_file], stdout = PIPE)
|
||||
result_is_different = subprocess.call(['diff', '-q', reference_file, stdout_file], stdout=PIPE)
|
||||
|
||||
if result_is_different:
|
||||
diff = Popen(['diff', '-U', str(args.unified), reference_file, stdout_file], stdout = PIPE).communicate()[0]
|
||||
diff = Popen(['diff', '-U', str(args.unified), reference_file, stdout_file], stdout=PIPE, universal_newlines=True).communicate()[0]
|
||||
failures += 1
|
||||
print(MSG_FAIL, end='')
|
||||
print_test_time(total_time)
|
||||
@ -419,9 +419,9 @@ def check_server_started(client, retry_count):
|
||||
sys.stdout.flush()
|
||||
while retry_count > 0:
|
||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT 1")
|
||||
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT 1")
|
||||
|
||||
if clickhouse_proc.returncode == 0 and stdout.startswith("1"):
|
||||
if clickhouse_proc.returncode == 0 and stdout.startswith(b"1"):
|
||||
print(" OK")
|
||||
sys.stdout.flush()
|
||||
return True
|
||||
@ -468,46 +468,46 @@ class BuildFlags(object):
|
||||
|
||||
def collect_build_flags(client):
|
||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'")
|
||||
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'")
|
||||
result = []
|
||||
|
||||
if clickhouse_proc.returncode == 0:
|
||||
if '-fsanitize=thread' in stdout:
|
||||
if b'-fsanitize=thread' in stdout:
|
||||
result.append(BuildFlags.THREAD)
|
||||
elif '-fsanitize=address' in stdout:
|
||||
elif b'-fsanitize=address' in stdout:
|
||||
result.append(BuildFlags.ADDRESS)
|
||||
elif '-fsanitize=undefined' in stdout:
|
||||
elif b'-fsanitize=undefined' in stdout:
|
||||
result.append(BuildFlags.UNDEFINED)
|
||||
elif '-fsanitize=memory' in stdout:
|
||||
elif b'-fsanitize=memory' in stdout:
|
||||
result.append(BuildFlags.MEMORY)
|
||||
else:
|
||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||
|
||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.build_options WHERE name = 'BUILD_TYPE'")
|
||||
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'BUILD_TYPE'")
|
||||
|
||||
if clickhouse_proc.returncode == 0:
|
||||
if 'Debug' in stdout:
|
||||
if b'Debug' in stdout:
|
||||
result.append(BuildFlags.DEBUG)
|
||||
elif 'RelWithDebInfo' in stdout or 'Release' in stdout:
|
||||
elif b'RelWithDebInfo' in stdout or b'Release' in stdout:
|
||||
result.append(BuildFlags.RELEASE)
|
||||
else:
|
||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||
|
||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.build_options WHERE name = 'UNBUNDLED'")
|
||||
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'UNBUNDLED'")
|
||||
|
||||
if clickhouse_proc.returncode == 0:
|
||||
if 'ON' in stdout or '1' in stdout:
|
||||
if b'ON' in stdout or b'1' in stdout:
|
||||
result.append(BuildFlags.UNBUNDLED)
|
||||
else:
|
||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||
|
||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.settings WHERE name = 'default_database_engine'")
|
||||
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.settings WHERE name = 'default_database_engine'")
|
||||
|
||||
if clickhouse_proc.returncode == 0:
|
||||
if 'Ordinary' in stdout:
|
||||
if b'Ordinary' in stdout:
|
||||
result.append(BuildFlags.DATABASE_ORDINARY)
|
||||
else:
|
||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||
@ -523,11 +523,11 @@ def main(args):
|
||||
|
||||
def is_data_present():
|
||||
clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
(stdout, stderr) = clickhouse_proc.communicate("EXISTS TABLE test.hits")
|
||||
(stdout, stderr) = clickhouse_proc.communicate(b"EXISTS TABLE test.hits")
|
||||
if clickhouse_proc.returncode != 0:
|
||||
raise CalledProcessError(clickhouse_proc.returncode, args.client, stderr)
|
||||
|
||||
return stdout.startswith('1')
|
||||
return stdout.startswith(b'1')
|
||||
|
||||
if not check_server_started(args.client, args.server_check_retries):
|
||||
raise Exception("clickhouse-server is not responding. Cannot execute 'SELECT 1' query.")
|
||||
@ -562,7 +562,7 @@ def main(args):
|
||||
stop_time = time() + args.global_time_limit
|
||||
|
||||
if args.zookeeper is None:
|
||||
code, out = commands.getstatusoutput(args.extract_from_config + " --try --config " + args.configserver + ' --key zookeeper | grep . | wc -l')
|
||||
code, out = subprocess.getstatusoutput(args.extract_from_config + " --try --config " + args.configserver + ' --key zookeeper | grep . | wc -l')
|
||||
try:
|
||||
if int(out) > 0:
|
||||
args.zookeeper = True
|
||||
@ -572,18 +572,18 @@ def main(args):
|
||||
args.zookeeper = False
|
||||
|
||||
if args.shard is None:
|
||||
code, out = commands.getstatusoutput(args.extract_from_config + " --try --config " + args.configserver + ' --key listen_host | grep -E "127.0.0.2|::"')
|
||||
code, out = subprocess.getstatusoutput(args.extract_from_config + " --try --config " + args.configserver + ' --key listen_host | grep -E "127.0.0.2|::"')
|
||||
if out:
|
||||
args.shard = True
|
||||
else:
|
||||
args.shard = False
|
||||
|
||||
if args.database and args.database != "test":
|
||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
clickhouse_proc_create.communicate("CREATE DATABASE IF NOT EXISTS " + args.database + get_db_engine(args))
|
||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||
clickhouse_proc_create.communicate(("CREATE DATABASE IF NOT EXISTS " + args.database + get_db_engine(args)))
|
||||
|
||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
clickhouse_proc_create.communicate("CREATE DATABASE IF NOT EXISTS test" + get_db_engine(args))
|
||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||
clickhouse_proc_create.communicate(("CREATE DATABASE IF NOT EXISTS test" + get_db_engine(args)))
|
||||
|
||||
def is_test_from_dir(suite_dir, case):
|
||||
case_file = os.path.join(suite_dir, case)
|
||||
@ -595,14 +595,14 @@ def main(args):
|
||||
return random()
|
||||
|
||||
if -1 == item.find('_'):
|
||||
return 99998
|
||||
return 99998, ''
|
||||
|
||||
prefix, suffix = item.split('_', 1)
|
||||
|
||||
try:
|
||||
return int(prefix), suffix
|
||||
except ValueError:
|
||||
return 99997
|
||||
return 99997, ''
|
||||
|
||||
total_tests_run = 0
|
||||
for suite in sorted(os.listdir(base_dir), key=sute_key_func):
|
||||
@ -650,7 +650,7 @@ def main(args):
|
||||
return 99997
|
||||
|
||||
all_tests = os.listdir(suite_dir)
|
||||
all_tests = filter(lambda case: is_test_from_dir(suite_dir, case), all_tests)
|
||||
all_tests = [case for case in all_tests if is_test_from_dir(suite_dir, case)]
|
||||
if args.test:
|
||||
all_tests = [t for t in all_tests if any([re.search(r, t) for r in args.test])]
|
||||
all_tests.sort(key=key_func)
|
||||
@ -670,7 +670,7 @@ def main(args):
|
||||
if jobs > run_total:
|
||||
run_total = jobs
|
||||
|
||||
batch_size = len(all_tests) / jobs
|
||||
batch_size = len(all_tests) // jobs
|
||||
all_tests_array = []
|
||||
for i in range(0, len(all_tests), batch_size):
|
||||
all_tests_array.append((all_tests[i:i+batch_size], suite, suite_dir, suite_tmp_dir, run_total))
|
||||
|
@ -37,7 +37,7 @@ class ClickHouseServer:
|
||||
|
||||
s.connect(('localhost', port))
|
||||
except socket.error as socketerror:
|
||||
print "Error: ", socketerror
|
||||
print("Error: ", socketerror)
|
||||
raise
|
||||
|
||||
def shutdown(self, timeout=10):
|
||||
|
@ -1,6 +1,6 @@
|
||||
from server import ClickHouseServer
|
||||
from client import ClickHouseClient
|
||||
from table import ClickHouseTable
|
||||
from .server import ClickHouseServer
|
||||
from .client import ClickHouseClient
|
||||
from .table import ClickHouseTable
|
||||
import os
|
||||
import errno
|
||||
from shutil import rmtree
|
||||
@ -140,7 +140,7 @@ class ClickHouseServerWithCatboostModels:
|
||||
if not os.path.exists(self.models_dir):
|
||||
os.makedirs(self.models_dir)
|
||||
|
||||
for name, model in self.models.items():
|
||||
for name, model in list(self.models.items()):
|
||||
model_path = os.path.join(self.models_dir, name + '.cbm')
|
||||
config_path = os.path.join(self.models_dir, name + '_model.xml')
|
||||
params = {
|
||||
|
@ -1,5 +1,5 @@
|
||||
from server import ClickHouseServer
|
||||
from client import ClickHouseClient
|
||||
from .server import ClickHouseServer
|
||||
from .client import ClickHouseClient
|
||||
from pandas import DataFrame
|
||||
import os
|
||||
import threading
|
||||
@ -40,7 +40,7 @@ class ClickHouseTable:
|
||||
column_types = list(self.df.dtypes)
|
||||
column_names = list(self.df)
|
||||
schema = ', '.join((name + ' ' + self._convert(str(t)) for name, t in zip(column_names, column_types)))
|
||||
print 'schema:', schema
|
||||
print('schema:', schema)
|
||||
|
||||
create_query = 'create table test.{} (date Date DEFAULT today(), {}) engine = MergeTree(date, (date), 8192)'
|
||||
self.client.query(create_query.format(self.table_name, schema))
|
||||
@ -58,10 +58,10 @@ class ClickHouseTable:
|
||||
result = self.client.query(query.format(model_name, columns, self.table_name))
|
||||
|
||||
def parse_row(row):
|
||||
values = tuple(map(float, filter(len, map(str.strip, row.replace('(', '').replace(')', '').split(',')))))
|
||||
values = tuple(map(float, list(filter(len, list(map(str.strip, row.replace('(', '').replace(')', '').split(',')))))))
|
||||
return values if len(values) != 1 else values[0]
|
||||
|
||||
return tuple(map(parse_row, filter(len, map(str.strip, result.split('\n')))))
|
||||
return tuple(map(parse_row, list(filter(len, list(map(str.strip, result.split('\n')))))))
|
||||
|
||||
def _drop_table(self):
|
||||
self.client.query('drop table test.{}'.format(self.table_name))
|
||||
|
@ -19,10 +19,10 @@ def train_catboost_model(df, target, cat_features, params, verbose=True):
|
||||
if not isinstance(df, DataFrame):
|
||||
raise Exception('DataFrame object expected, but got ' + repr(df))
|
||||
|
||||
print 'features:', df.columns.tolist()
|
||||
print('features:', df.columns.tolist())
|
||||
|
||||
cat_features_index = list(df.columns.get_loc(feature) for feature in cat_features)
|
||||
print 'cat features:', cat_features_index
|
||||
print('cat features:', cat_features_index)
|
||||
model = CatBoostClassifier(**params)
|
||||
model.fit(df, target, cat_features=cat_features_index, verbose=verbose)
|
||||
return model
|
||||
|
@ -23,7 +23,7 @@ def check_predictions(test_name, target, pred_python, pred_ch, acc_threshold):
|
||||
|
||||
acc = 1 - np.sum(np.abs(ch_class - np.array(target))) / (len(target) + .0)
|
||||
assert acc >= acc_threshold
|
||||
print test_name, 'accuracy: {:.10f}'.format(acc)
|
||||
print(test_name, 'accuracy: {:.10f}'.format(acc))
|
||||
|
||||
|
||||
def test_apply_float_features_only():
|
||||
@ -52,9 +52,9 @@ def test_apply_float_features_only():
|
||||
train_target = get_target(train_df)
|
||||
test_target = get_target(test_df)
|
||||
|
||||
print
|
||||
print 'train target', train_target
|
||||
print 'test target', test_target
|
||||
print()
|
||||
print('train target', train_target)
|
||||
print('test target', test_target)
|
||||
|
||||
params = {
|
||||
'iterations': 4,
|
||||
@ -71,8 +71,8 @@ def test_apply_float_features_only():
|
||||
with server:
|
||||
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
||||
|
||||
print 'python predictions', pred_python
|
||||
print 'clickhouse predictions', pred_ch
|
||||
print('python predictions', pred_python)
|
||||
print('clickhouse predictions', pred_ch)
|
||||
|
||||
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
||||
|
||||
@ -105,9 +105,9 @@ def test_apply_float_features_with_string_cat_features():
|
||||
train_target = get_target(train_df)
|
||||
test_target = get_target(test_df)
|
||||
|
||||
print
|
||||
print 'train target', train_target
|
||||
print 'test target', test_target
|
||||
print()
|
||||
print('train target', train_target)
|
||||
print('test target', test_target)
|
||||
|
||||
params = {
|
||||
'iterations': 6,
|
||||
@ -124,8 +124,8 @@ def test_apply_float_features_with_string_cat_features():
|
||||
with server:
|
||||
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
||||
|
||||
print 'python predictions', pred_python
|
||||
print 'clickhouse predictions', pred_ch
|
||||
print('python predictions', pred_python)
|
||||
print('clickhouse predictions', pred_ch)
|
||||
|
||||
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
||||
|
||||
@ -158,9 +158,9 @@ def test_apply_float_features_with_int_cat_features():
|
||||
train_target = get_target(train_df)
|
||||
test_target = get_target(test_df)
|
||||
|
||||
print
|
||||
print 'train target', train_target
|
||||
print 'test target', test_target
|
||||
print()
|
||||
print('train target', train_target)
|
||||
print('test target', test_target)
|
||||
|
||||
params = {
|
||||
'iterations': 6,
|
||||
@ -177,8 +177,8 @@ def test_apply_float_features_with_int_cat_features():
|
||||
with server:
|
||||
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
||||
|
||||
print 'python predictions', pred_python
|
||||
print 'clickhouse predictions', pred_ch
|
||||
print('python predictions', pred_python)
|
||||
print('clickhouse predictions', pred_ch)
|
||||
|
||||
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
||||
|
||||
@ -211,9 +211,9 @@ def test_apply_float_features_with_mixed_cat_features():
|
||||
train_target = get_target(train_df)
|
||||
test_target = get_target(test_df)
|
||||
|
||||
print
|
||||
print 'train target', train_target
|
||||
print 'test target', test_target
|
||||
print()
|
||||
print('train target', train_target)
|
||||
print('test target', test_target)
|
||||
|
||||
params = {
|
||||
'iterations': 6,
|
||||
@ -230,8 +230,8 @@ def test_apply_float_features_with_mixed_cat_features():
|
||||
with server:
|
||||
pred_ch = (np.array(server.apply_model(name, test_df, [])) > 0).astype(int)
|
||||
|
||||
print 'python predictions', pred_python
|
||||
print 'clickhouse predictions', pred_ch
|
||||
print('python predictions', pred_python)
|
||||
print('clickhouse predictions', pred_ch)
|
||||
|
||||
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
||||
|
||||
@ -269,9 +269,9 @@ def test_apply_multiclass():
|
||||
train_target = get_target(train_df)
|
||||
test_target = get_target(test_df)
|
||||
|
||||
print
|
||||
print 'train target', train_target
|
||||
print 'test target', test_target
|
||||
print()
|
||||
print('train target', train_target)
|
||||
print('test target', test_target)
|
||||
|
||||
params = {
|
||||
'iterations': 10,
|
||||
@ -288,7 +288,7 @@ def test_apply_multiclass():
|
||||
with server:
|
||||
pred_ch = np.argmax(np.array(server.apply_model(name, test_df, [])), axis=1)
|
||||
|
||||
print 'python predictions', pred_python
|
||||
print 'clickhouse predictions', pred_ch
|
||||
print('python predictions', pred_python)
|
||||
print('clickhouse predictions', pred_ch)
|
||||
|
||||
check_predictions(name, test_target, pred_python, pred_ch, 0.9)
|
||||
|
@ -12,11 +12,11 @@ You must install latest Docker from
|
||||
https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#set-up-the-repository
|
||||
Don't use Docker from your system repository.
|
||||
|
||||
* [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python-pip libpq-dev zlib1g-dev libcrypto++-dev libssl-dev`
|
||||
* [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python3-pip libpq-dev zlib1g-dev libcrypto++-dev libssl-dev`
|
||||
* [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest`
|
||||
* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio rpm-confluent-schemaregistry`
|
||||
* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio confluent-kafka avro
|
||||
|
||||
(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python-pytest python-dicttoxml python-docker python-pymysql python-pymongo python-tzlocal python-kazoo python-psycopg2 python-kafka python-pytest-timeout python-minio`
|
||||
(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python3-pytest python3-dicttoxml python3-docker python3-pymysql python3-pymongo python3-tzlocal python3-kazoo python3-psycopg2 kafka-python python3-pytest-timeout python3-minio`
|
||||
|
||||
If you want to run the tests under a non-privileged user, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and re-login.
|
||||
(You must close all your sessions (for example, restart your computer))
|
||||
|
@ -31,7 +31,7 @@ class Client:
|
||||
command += ['--query', sql]
|
||||
|
||||
if settings is not None:
|
||||
for setting, value in settings.iteritems():
|
||||
for setting, value in settings.items():
|
||||
command += ['--' + setting, str(value)]
|
||||
|
||||
if user is not None:
|
||||
@ -67,7 +67,7 @@ class QueryRuntimeException(Exception):
|
||||
class CommandRequest:
|
||||
def __init__(self, command, stdin=None, timeout=None, ignore_error=False):
|
||||
# Write data to tmp file to avoid PIPEs and execution blocking
|
||||
stdin_file = tempfile.TemporaryFile()
|
||||
stdin_file = tempfile.TemporaryFile(mode='w+')
|
||||
stdin_file.write(stdin)
|
||||
stdin_file.seek(0)
|
||||
self.stdout_file = tempfile.TemporaryFile()
|
||||
@ -80,7 +80,7 @@ class CommandRequest:
|
||||
# can print some debug information there
|
||||
env = {}
|
||||
env["TSAN_OPTIONS"] = "verbosity=0"
|
||||
self.process = sp.Popen(command, stdin=stdin_file, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
|
||||
self.process = sp.Popen(command, stdin=stdin_file, stdout=self.stdout_file, stderr=self.stderr_file, env=env, universal_newlines=True)
|
||||
|
||||
self.timer = None
|
||||
self.process_finished_before_timeout = True
|
||||
@ -98,8 +98,8 @@ class CommandRequest:
|
||||
self.stdout_file.seek(0)
|
||||
self.stderr_file.seek(0)
|
||||
|
||||
stdout = self.stdout_file.read()
|
||||
stderr = self.stderr_file.read()
|
||||
stdout = self.stdout_file.read().decode()
|
||||
stderr = self.stderr_file.read().decode()
|
||||
|
||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||
raise QueryTimeoutExceedException('Client timed out!')
|
||||
@ -115,8 +115,8 @@ class CommandRequest:
|
||||
self.stdout_file.seek(0)
|
||||
self.stderr_file.seek(0)
|
||||
|
||||
stdout = self.stdout_file.read()
|
||||
stderr = self.stderr_file.read()
|
||||
stdout = self.stdout_file.read().decode()
|
||||
stderr = self.stderr_file.read().decode()
|
||||
|
||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||
raise QueryTimeoutExceedException('Client timed out!')
|
||||
@ -131,8 +131,8 @@ class CommandRequest:
|
||||
self.stdout_file.seek(0)
|
||||
self.stderr_file.seek(0)
|
||||
|
||||
stdout = self.stdout_file.read()
|
||||
stderr = self.stderr_file.read()
|
||||
stdout = self.stdout_file.read().decode()
|
||||
stderr = self.stderr_file.read().decode()
|
||||
|
||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||
raise QueryTimeoutExceedException('Client timed out!')
|
||||
|
@ -1,6 +1,6 @@
|
||||
import base64
|
||||
import errno
|
||||
import httplib
|
||||
import http.client
|
||||
import logging
|
||||
import os
|
||||
import os.path as p
|
||||
@ -12,7 +12,7 @@ import socket
|
||||
import subprocess
|
||||
import time
|
||||
import traceback
|
||||
import urllib
|
||||
import urllib.parse
|
||||
|
||||
import cassandra.cluster
|
||||
import docker
|
||||
@ -21,7 +21,7 @@ import pymongo
|
||||
import pymysql
|
||||
import requests
|
||||
import xml.dom.minidom
|
||||
from confluent.schemaregistry.client import CachedSchemaRegistryClient
|
||||
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
|
||||
from dicttoxml import dicttoxml
|
||||
from kazoo.client import KazooClient
|
||||
from kazoo.exceptions import KazooException
|
||||
@ -41,7 +41,7 @@ SANITIZER_SIGN = "=================="
|
||||
def _create_env_file(path, variables, fname=DEFAULT_ENV_NAME):
|
||||
full_path = os.path.join(path, fname)
|
||||
with open(full_path, 'w') as f:
|
||||
for var, value in variables.items():
|
||||
for var, value in list(variables.items()):
|
||||
f.write("=".join([var, value]) + "\n")
|
||||
return full_path
|
||||
|
||||
@ -76,7 +76,7 @@ def get_docker_compose_path():
|
||||
if os.path.exists(os.path.dirname('/compose/')):
|
||||
return os.path.dirname('/compose/') # default in docker runner container
|
||||
else:
|
||||
print("Fallback docker_compose_path to LOCAL_DOCKER_COMPOSE_DIR: {}".format(LOCAL_DOCKER_COMPOSE_DIR))
|
||||
print(("Fallback docker_compose_path to LOCAL_DOCKER_COMPOSE_DIR: {}".format(LOCAL_DOCKER_COMPOSE_DIR)))
|
||||
return LOCAL_DOCKER_COMPOSE_DIR
|
||||
|
||||
|
||||
@ -91,8 +91,8 @@ class ClickHouseCluster:
|
||||
|
||||
def __init__(self, base_path, name=None, base_config_dir=None, server_bin_path=None, client_bin_path=None,
|
||||
odbc_bridge_bin_path=None, zookeeper_config_path=None, custom_dockerd_host=None):
|
||||
for param in os.environ.keys():
|
||||
print "ENV %40s %s" % (param, os.environ[param])
|
||||
for param in list(os.environ.keys()):
|
||||
print("ENV %40s %s" % (param, os.environ[param]))
|
||||
self.base_dir = p.dirname(base_path)
|
||||
self.name = name if name is not None else ''
|
||||
|
||||
@ -160,7 +160,7 @@ class ClickHouseCluster:
|
||||
|
||||
self.docker_client = None
|
||||
self.is_up = False
|
||||
print "CLUSTER INIT base_config_dir:{}".format(self.base_config_dir)
|
||||
print("CLUSTER INIT base_config_dir:{}".format(self.base_config_dir))
|
||||
|
||||
def get_client_cmd(self):
|
||||
cmd = self.client_bin_path
|
||||
@ -386,7 +386,7 @@ class ClickHouseCluster:
|
||||
def get_instance_ip(self, instance_name):
|
||||
docker_id = self.get_instance_docker_id(instance_name)
|
||||
handle = self.docker_client.containers.get(docker_id)
|
||||
return handle.attrs['NetworkSettings']['Networks'].values()[0]['IPAddress']
|
||||
return list(handle.attrs['NetworkSettings']['Networks'].values())[0]['IPAddress']
|
||||
|
||||
def get_container_id(self, instance_name):
|
||||
docker_id = self.get_instance_docker_id(instance_name)
|
||||
@ -395,22 +395,21 @@ class ClickHouseCluster:
|
||||
|
||||
def get_container_logs(self, instance_name):
|
||||
container_id = self.get_container_id(instance_name)
|
||||
return self.docker_client.api.logs(container_id)
|
||||
return self.docker_client.api.logs(container_id).decode()
|
||||
|
||||
def exec_in_container(self, container_id, cmd, detach=False, nothrow=False, **kwargs):
|
||||
exec_id = self.docker_client.api.exec_create(container_id, cmd, **kwargs)
|
||||
output = self.docker_client.api.exec_start(exec_id, detach=detach)
|
||||
|
||||
output = output.decode('utf8')
|
||||
exit_code = self.docker_client.api.exec_inspect(exec_id)['ExitCode']
|
||||
if exit_code:
|
||||
container_info = self.docker_client.api.inspect_container(container_id)
|
||||
image_id = container_info.get('Image')
|
||||
image_info = self.docker_client.api.inspect_image(image_id)
|
||||
print("Command failed in container {}: ".format(container_id))
|
||||
print(("Command failed in container {}: ".format(container_id)))
|
||||
pprint.pprint(container_info)
|
||||
print("")
|
||||
print("Container {} uses image {}: ".format(container_id, image_id))
|
||||
print(("Container {} uses image {}: ".format(container_id, image_id)))
|
||||
pprint.pprint(image_info)
|
||||
print("")
|
||||
message = 'Cmd "{}" failed in container {}. Return code {}. Output: {}'.format(' '.join(cmd), container_id,
|
||||
@ -419,14 +418,17 @@ class ClickHouseCluster:
|
||||
print(message)
|
||||
else:
|
||||
raise Exception(message)
|
||||
if not detach:
|
||||
return output.decode()
|
||||
return output
|
||||
|
||||
def copy_file_to_container(self, container_id, local_path, dest_path):
|
||||
with open(local_path, 'r') as fdata:
|
||||
with open(local_path, "r") as fdata:
|
||||
data = fdata.read()
|
||||
encoded_data = base64.b64encode(data)
|
||||
encodedBytes = base64.b64encode(data.encode("utf-8"))
|
||||
encodedStr = str(encodedBytes, "utf-8")
|
||||
self.exec_in_container(container_id,
|
||||
["bash", "-c", "echo {} | base64 --decode > {}".format(encoded_data, dest_path)],
|
||||
["bash", "-c", "echo {} | base64 --decode > {}".format(encodedStr, dest_path)],
|
||||
user='root')
|
||||
|
||||
def wait_mysql_to_start(self, timeout=60):
|
||||
@ -435,10 +437,10 @@ class ClickHouseCluster:
|
||||
try:
|
||||
conn = pymysql.connect(user='root', password='clickhouse', host='127.0.0.1', port=3308)
|
||||
conn.close()
|
||||
print "Mysql Started"
|
||||
print("Mysql Started")
|
||||
return
|
||||
except Exception as ex:
|
||||
print "Can't connect to MySQL " + str(ex)
|
||||
print("Can't connect to MySQL " + str(ex))
|
||||
time.sleep(0.5)
|
||||
|
||||
subprocess_call(['docker-compose', 'ps', '--services', '--all'])
|
||||
@ -451,10 +453,10 @@ class ClickHouseCluster:
|
||||
conn_string = "host='localhost' user='postgres' password='mysecretpassword'"
|
||||
conn = psycopg2.connect(conn_string)
|
||||
conn.close()
|
||||
print "Postgres Started"
|
||||
print("Postgres Started")
|
||||
return
|
||||
except Exception as ex:
|
||||
print "Can't connect to Postgres " + str(ex)
|
||||
print("Can't connect to Postgres " + str(ex))
|
||||
time.sleep(0.5)
|
||||
|
||||
raise Exception("Cannot wait Postgres container")
|
||||
@ -466,10 +468,10 @@ class ClickHouseCluster:
|
||||
for instance in ['zoo1', 'zoo2', 'zoo3']:
|
||||
conn = self.get_kazoo_client(instance)
|
||||
conn.get_children('/')
|
||||
print "All instances of ZooKeeper started"
|
||||
print("All instances of ZooKeeper started")
|
||||
return
|
||||
except Exception as ex:
|
||||
print "Can't connect to ZooKeeper " + str(ex)
|
||||
print("Can't connect to ZooKeeper " + str(ex))
|
||||
time.sleep(0.5)
|
||||
|
||||
raise Exception("Cannot wait ZooKeeper container")
|
||||
@ -480,10 +482,10 @@ class ClickHouseCluster:
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
hdfs_api.write_data("/somefilewithrandomname222", "1")
|
||||
print "Connected to HDFS and SafeMode disabled! "
|
||||
print("Connected to HDFS and SafeMode disabled! ")
|
||||
return
|
||||
except Exception as ex:
|
||||
print "Can't connect to HDFS " + str(ex)
|
||||
print("Can't connect to HDFS " + str(ex))
|
||||
time.sleep(1)
|
||||
|
||||
raise Exception("Can't wait HDFS to start")
|
||||
@ -496,10 +498,10 @@ class ClickHouseCluster:
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
connection.list_database_names()
|
||||
print "Connected to Mongo dbs:", connection.list_database_names()
|
||||
print("Connected to Mongo dbs:", connection.database_names())
|
||||
return
|
||||
except Exception as ex:
|
||||
print "Can't connect to Mongo " + str(ex)
|
||||
print("Can't connect to Mongo " + str(ex))
|
||||
time.sleep(1)
|
||||
|
||||
def wait_minio_to_start(self, timeout=30, secure=False):
|
||||
@ -519,12 +521,12 @@ class ClickHouseCluster:
|
||||
|
||||
minio_client.make_bucket(self.minio_bucket)
|
||||
|
||||
print("S3 bucket '%s' created", self.minio_bucket)
|
||||
print(("S3 bucket '%s' created", self.minio_bucket))
|
||||
|
||||
self.minio_client = minio_client
|
||||
return
|
||||
except Exception as ex:
|
||||
print("Can't connect to Minio: %s", str(ex))
|
||||
print(("Can't connect to Minio: %s", str(ex)))
|
||||
time.sleep(1)
|
||||
|
||||
raise Exception("Can't wait Minio to start")
|
||||
@ -539,7 +541,7 @@ class ClickHouseCluster:
|
||||
print("Connected to SchemaRegistry")
|
||||
return
|
||||
except Exception as ex:
|
||||
print("Can't connect to SchemaRegistry: %s", str(ex))
|
||||
print(("Can't connect to SchemaRegistry: %s", str(ex)))
|
||||
time.sleep(1)
|
||||
|
||||
def wait_cassandra_to_start(self, timeout=30):
|
||||
@ -555,7 +557,7 @@ class ClickHouseCluster:
|
||||
time.sleep(1)
|
||||
|
||||
def start(self, destroy_dirs=True):
|
||||
print "Cluster start called. is_up={}, destroy_dirs={}".format(self.is_up, destroy_dirs)
|
||||
print("Cluster start called. is_up={}, destroy_dirs={}".format(self.is_up, destroy_dirs))
|
||||
if self.is_up:
|
||||
return
|
||||
|
||||
@ -571,11 +573,11 @@ class ClickHouseCluster:
|
||||
|
||||
try:
|
||||
if destroy_dirs and p.exists(self.instances_dir):
|
||||
print("Removing instances dir %s", self.instances_dir)
|
||||
print(("Removing instances dir %s", self.instances_dir))
|
||||
shutil.rmtree(self.instances_dir)
|
||||
|
||||
for instance in self.instances.values():
|
||||
print('Setup directory for instance: {} destroy_dirs: {}'.format(instance.name, destroy_dirs))
|
||||
for instance in list(self.instances.values()):
|
||||
print(('Setup directory for instance: {} destroy_dirs: {}'.format(instance.name, destroy_dirs)))
|
||||
instance.create_dir(destroy_dir=destroy_dirs)
|
||||
|
||||
self.docker_client = docker.from_env(version=self.docker_api_version)
|
||||
@ -676,12 +678,12 @@ class ClickHouseCluster:
|
||||
self.wait_cassandra_to_start()
|
||||
|
||||
clickhouse_start_cmd = self.base_cmd + ['up', '-d', '--no-recreate']
|
||||
print("Trying to create ClickHouse instance by command %s", ' '.join(map(str, clickhouse_start_cmd)))
|
||||
print(("Trying to create ClickHouse instance by command %s", ' '.join(map(str, clickhouse_start_cmd))))
|
||||
subprocess_check_call(clickhouse_start_cmd)
|
||||
print("ClickHouse instance created")
|
||||
|
||||
start_deadline = time.time() + 20.0 # seconds
|
||||
for instance in self.instances.itervalues():
|
||||
for instance in self.instances.values():
|
||||
instance.docker_client = self.docker_client
|
||||
instance.ip_address = self.get_instance_ip(instance.name)
|
||||
|
||||
@ -693,10 +695,10 @@ class ClickHouseCluster:
|
||||
|
||||
self.is_up = True
|
||||
|
||||
except BaseException, e:
|
||||
print "Failed to start cluster: "
|
||||
print str(e)
|
||||
print traceback.print_exc()
|
||||
except BaseException as e:
|
||||
print("Failed to start cluster: ")
|
||||
print(str(e))
|
||||
print(traceback.print_exc())
|
||||
raise
|
||||
|
||||
def shutdown(self, kill=True):
|
||||
@ -705,7 +707,7 @@ class ClickHouseCluster:
|
||||
try:
|
||||
subprocess.check_call(self.base_cmd + ['logs'], stdout=f)
|
||||
except Exception as e:
|
||||
print "Unable to get logs from docker."
|
||||
print("Unable to get logs from docker.")
|
||||
f.seek(0)
|
||||
for line in f:
|
||||
if SANITIZER_SIGN in line:
|
||||
@ -716,18 +718,18 @@ class ClickHouseCluster:
|
||||
try:
|
||||
subprocess_check_call(self.base_cmd + ['kill'])
|
||||
except Exception as e:
|
||||
print "Kill command failed durung shutdown. {}".format(repr(e))
|
||||
print("Kill command failed durung shutdown. {}".format(repr(e)))
|
||||
|
||||
try:
|
||||
subprocess_check_call(self.base_cmd + ['down', '--volumes', '--remove-orphans'])
|
||||
except Exception as e:
|
||||
print "Down + remove orphans failed durung shutdown. {}".format(repr(e))
|
||||
print("Down + remove orphans failed durung shutdown. {}".format(repr(e)))
|
||||
|
||||
self.is_up = False
|
||||
|
||||
self.docker_client = None
|
||||
|
||||
for instance in self.instances.values():
|
||||
for instance in list(self.instances.values()):
|
||||
instance.docker_client = None
|
||||
instance.ip_address = None
|
||||
instance.client = None
|
||||
@ -769,7 +771,7 @@ class ClickHouseCluster:
|
||||
kazoo_callback(self.get_kazoo_client(zoo_instance_name))
|
||||
return
|
||||
except KazooException as e:
|
||||
print repr(e)
|
||||
print(repr(e))
|
||||
time.sleep(sleep_for)
|
||||
|
||||
kazoo_callback(self.get_kazoo_client(zoo_instance_name))
|
||||
@ -922,7 +924,7 @@ class ClickHouseInstance:
|
||||
return result
|
||||
time.sleep(sleep_time)
|
||||
except Exception as ex:
|
||||
print "Retry {} got exception {}".format(i + 1, ex)
|
||||
print("Retry {} got exception {}".format(i + 1, ex))
|
||||
time.sleep(sleep_time)
|
||||
|
||||
if result is not None:
|
||||
@ -954,28 +956,30 @@ class ClickHouseInstance:
|
||||
|
||||
params["query"] = sql
|
||||
|
||||
auth = ""
|
||||
auth = None
|
||||
if user and password:
|
||||
auth = "{}:{}@".format(user, password)
|
||||
auth = requests.auth.HTTPBasicAuth(user, password)
|
||||
elif user:
|
||||
auth = "{}@".format(user)
|
||||
auth = requests.auth.HTTPBasicAuth(user, '')
|
||||
url = "http://" + self.ip_address + ":8123/?" + urllib.parse.urlencode(params)
|
||||
|
||||
url = "http://" + auth + self.ip_address + ":8123/?" + urllib.urlencode(params)
|
||||
|
||||
open_result = urllib.urlopen(url, data)
|
||||
if data:
|
||||
r = requests.post(url, data, auth=auth)
|
||||
else:
|
||||
r = requests.get(url, auth=auth)
|
||||
|
||||
def http_code_and_message():
|
||||
return str(open_result.getcode()) + " " + httplib.responses[
|
||||
open_result.getcode()] + ": " + open_result.read()
|
||||
code = r.status_code
|
||||
return str(code) + " " + http.client.responses[code] + ": " + r.text
|
||||
|
||||
if expect_fail_and_get_error:
|
||||
if open_result.getcode() == 200:
|
||||
raise Exception("ClickHouse HTTP server is expected to fail, but succeeded: " + open_result.read())
|
||||
if r.ok:
|
||||
raise Exception("ClickHouse HTTP server is expected to fail, but succeeded: " + r.text)
|
||||
return http_code_and_message()
|
||||
else:
|
||||
if open_result.getcode() != 200:
|
||||
if not r.ok:
|
||||
raise Exception("ClickHouse HTTP server returned " + http_code_and_message())
|
||||
return open_result.read()
|
||||
return r.text
|
||||
|
||||
# Connects to the instance via HTTP interface, sends a query and returns the answer
|
||||
def http_request(self, url, method='GET', params=None, data=None, headers=None):
|
||||
@ -1161,9 +1165,9 @@ class ClickHouseInstance:
|
||||
|
||||
def _create_odbc_config_file(self):
|
||||
with open(self.odbc_ini_path.split(':')[0], 'w') as f:
|
||||
for driver_setup in self.odbc_drivers.values():
|
||||
for driver_setup in list(self.odbc_drivers.values()):
|
||||
f.write("[{}]\n".format(driver_setup["DSN"]))
|
||||
for key, value in driver_setup.items():
|
||||
for key, value in list(driver_setup.items()):
|
||||
if key != "DSN":
|
||||
f.write(key + "=" + value + "\n")
|
||||
|
||||
@ -1183,16 +1187,16 @@ class ClickHouseInstance:
|
||||
instance_config_dir = p.abspath(p.join(self.path, 'configs'))
|
||||
os.makedirs(instance_config_dir)
|
||||
|
||||
print "Copy common default production configuration from {}".format(self.base_config_dir)
|
||||
print("Copy common default production configuration from {}".format(self.base_config_dir))
|
||||
shutil.copyfile(p.join(self.base_config_dir, 'config.xml'), p.join(instance_config_dir, 'config.xml'))
|
||||
shutil.copyfile(p.join(self.base_config_dir, 'users.xml'), p.join(instance_config_dir, 'users.xml'))
|
||||
|
||||
print "Create directory for configuration generated in this helper"
|
||||
print("Create directory for configuration generated in this helper")
|
||||
# used by all utils with any config
|
||||
conf_d_dir = p.abspath(p.join(instance_config_dir, 'conf.d'))
|
||||
os.mkdir(conf_d_dir)
|
||||
|
||||
print "Create directory for common tests configuration"
|
||||
print("Create directory for common tests configuration")
|
||||
# used by server with main config.xml
|
||||
self.config_d_dir = p.abspath(p.join(instance_config_dir, 'config.d'))
|
||||
os.mkdir(self.config_d_dir)
|
||||
@ -1201,14 +1205,14 @@ class ClickHouseInstance:
|
||||
dictionaries_dir = p.abspath(p.join(instance_config_dir, 'dictionaries'))
|
||||
os.mkdir(dictionaries_dir)
|
||||
|
||||
print "Copy common configuration from helpers"
|
||||
print("Copy common configuration from helpers")
|
||||
# The file is named with 0_ prefix to be processed before other configuration overloads.
|
||||
shutil.copy(p.join(HELPERS_DIR, '0_common_instance_config.xml'), self.config_d_dir)
|
||||
shutil.copy(p.join(HELPERS_DIR, '0_common_instance_users.xml'), users_d_dir)
|
||||
if len(self.custom_dictionaries_paths):
|
||||
shutil.copy(p.join(HELPERS_DIR, '0_common_enable_dictionaries.xml'), self.config_d_dir)
|
||||
|
||||
print "Generate and write macros file"
|
||||
print("Generate and write macros file")
|
||||
macros = self.macros.copy()
|
||||
macros['instance'] = self.name
|
||||
with open(p.join(conf_d_dir, 'macros.xml'), 'w') as macros_config:
|
||||
@ -1222,7 +1226,7 @@ class ClickHouseInstance:
|
||||
shutil.copytree(self.kerberos_secrets_dir, p.abspath(p.join(self.path, 'secrets')))
|
||||
|
||||
# Copy config.d configs
|
||||
print "Copy custom test config files {} to {}".format(self.custom_main_config_paths, self.config_d_dir)
|
||||
print("Copy custom test config files {} to {}".format(self.custom_main_config_paths, self.config_d_dir))
|
||||
for path in self.custom_main_config_paths:
|
||||
shutil.copy(path, self.config_d_dir)
|
||||
|
||||
@ -1235,16 +1239,16 @@ class ClickHouseInstance:
|
||||
shutil.copy(path, dictionaries_dir)
|
||||
|
||||
db_dir = p.abspath(p.join(self.path, 'database'))
|
||||
print "Setup database dir {}".format(db_dir)
|
||||
print("Setup database dir {}".format(db_dir))
|
||||
if self.clickhouse_path_dir is not None:
|
||||
print "Database files taken from {}".format(self.clickhouse_path_dir)
|
||||
print("Database files taken from {}".format(self.clickhouse_path_dir))
|
||||
shutil.copytree(self.clickhouse_path_dir, db_dir)
|
||||
print "Database copied from {} to {}".format(self.clickhouse_path_dir, db_dir)
|
||||
print("Database copied from {} to {}".format(self.clickhouse_path_dir, db_dir))
|
||||
else:
|
||||
os.mkdir(db_dir)
|
||||
|
||||
logs_dir = p.abspath(p.join(self.path, 'logs'))
|
||||
print "Setup logs dir {}".format(logs_dir)
|
||||
print("Setup logs dir {}".format(logs_dir))
|
||||
os.mkdir(logs_dir)
|
||||
|
||||
depends_on = []
|
||||
@ -1272,7 +1276,7 @@ class ClickHouseInstance:
|
||||
|
||||
env_file = _create_env_file(os.path.dirname(self.docker_compose_path), self.env_variables)
|
||||
|
||||
print "Env {} stored in {}".format(self.env_variables, env_file)
|
||||
print("Env {} stored in {}".format(self.env_variables, env_file))
|
||||
|
||||
odbc_ini_path = ""
|
||||
if self.odbc_ini_path:
|
||||
@ -1284,7 +1288,7 @@ class ClickHouseInstance:
|
||||
if self.stay_alive:
|
||||
entrypoint_cmd = CLICKHOUSE_STAY_ALIVE_COMMAND
|
||||
|
||||
print "Entrypoint cmd: {}".format(entrypoint_cmd)
|
||||
print("Entrypoint cmd: {}".format(entrypoint_cmd))
|
||||
|
||||
networks = app_net = ipv4_address = ipv6_address = net_aliases = net_alias1 = ""
|
||||
if self.ipv4_address is not None or self.ipv6_address is not None or self.hostname != self.name:
|
||||
|
@ -176,7 +176,7 @@ class SourceMongo(ExternalSource):
|
||||
to_insert = []
|
||||
for row in data:
|
||||
row_dict = {}
|
||||
for cell_name, cell_value in row.data.items():
|
||||
for cell_name, cell_value in list(row.data.items()):
|
||||
row_dict[cell_name] = self.converters[cell_name](cell_value)
|
||||
to_insert.append(row_dict)
|
||||
|
||||
@ -387,7 +387,7 @@ class SourceHTTPBase(ExternalSource):
|
||||
self.node.exec_in_container([
|
||||
"bash",
|
||||
"-c",
|
||||
"python2 /http_server.py --data-path={tbl} --schema={schema} --host={host} --port={port} --cert-path=/fake_cert.pem".format(
|
||||
"python3 /http_server.py --data-path={tbl} --schema={schema} --host={host} --port={port} --cert-path=/fake_cert.pem".format(
|
||||
tbl=path, schema=self._get_schema(), host=self.docker_hostname, port=self.http_port)
|
||||
], detach=True)
|
||||
self.ordered_names = structure.get_ordered_names()
|
||||
@ -573,12 +573,14 @@ class SourceAerospike(ExternalSource):
|
||||
def _flush_aerospike_db(self):
|
||||
keys = []
|
||||
|
||||
def handle_record((key, metadata, record)):
|
||||
print("Handle record {} {}".format(key, record))
|
||||
def handle_record(xxx_todo_changeme):
|
||||
(key, metadata, record) = xxx_todo_changeme
|
||||
print(("Handle record {} {}".format(key, record)))
|
||||
keys.append(key)
|
||||
|
||||
def print_record((key, metadata, record)):
|
||||
print("Print record {} {}".format(key, record))
|
||||
def print_record(xxx_todo_changeme1):
|
||||
(key, metadata, record) = xxx_todo_changeme1
|
||||
print(("Print record {} {}".format(key, record)))
|
||||
|
||||
scan = self.client.scan(self.namespace, self.set)
|
||||
scan.foreach(handle_record)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import StringIO
|
||||
import io
|
||||
import gzip
|
||||
import subprocess
|
||||
from tempfile import NamedTemporaryFile
|
||||
@ -14,7 +14,7 @@ class HDFSApi(object):
|
||||
self.http_data_port = "50075"
|
||||
self.user = user
|
||||
|
||||
def read_data(self, path):
|
||||
def read_data(self, path, universal_newlines=True):
|
||||
response = requests.get(
|
||||
"http://{host}:{port}/webhdfs/v1{path}?op=OPEN".format(host=self.host, port=self.http_proxy_port,
|
||||
path=path), allow_redirects=False)
|
||||
@ -27,7 +27,10 @@ class HDFSApi(object):
|
||||
if response_data.status_code != 200:
|
||||
response_data.raise_for_status()
|
||||
|
||||
return response_data.content
|
||||
if universal_newlines:
|
||||
return response_data.text
|
||||
else:
|
||||
return response_data.content
|
||||
|
||||
# Requests can't put file
|
||||
def _curl_to_put(self, filename, path, params):
|
||||
@ -35,12 +38,14 @@ class HDFSApi(object):
|
||||
port=self.http_data_port, path=path,
|
||||
params=params)
|
||||
cmd = "curl -s -i -X PUT -T {fname} '{url}'".format(fname=filename, url=url)
|
||||
output = subprocess.check_output(cmd, shell=True)
|
||||
output = subprocess.check_output(cmd, shell=True, universal_newlines=True)
|
||||
return output
|
||||
|
||||
def write_data(self, path, content):
|
||||
named_file = NamedTemporaryFile()
|
||||
named_file = NamedTemporaryFile(mode='wb+')
|
||||
fpath = named_file.name
|
||||
if isinstance(content, str):
|
||||
content = content.encode()
|
||||
named_file.write(content)
|
||||
named_file.flush()
|
||||
response = requests.put(
|
||||
@ -58,10 +63,12 @@ class HDFSApi(object):
|
||||
raise Exception("Can't create file on hdfs:\n {}".format(output))
|
||||
|
||||
def write_gzip_data(self, path, content):
|
||||
out = StringIO.StringIO()
|
||||
with gzip.GzipFile(fileobj=out, mode="w") as f:
|
||||
if isinstance(content, str):
|
||||
content = content.encode()
|
||||
out = io.BytesIO()
|
||||
with gzip.GzipFile(fileobj=out, mode="wb") as f:
|
||||
f.write(content)
|
||||
self.write_data(path, out.getvalue())
|
||||
|
||||
def read_gzip_data(self, path):
|
||||
return gzip.GzipFile(fileobj=StringIO.StringIO(self.read_data(path))).read()
|
||||
return gzip.GzipFile(fileobj=io.BytesIO(self.read_data(path, universal_newlines=False))).read().decode()
|
||||
|
@ -3,7 +3,7 @@ import argparse
|
||||
import csv
|
||||
import socket
|
||||
import ssl
|
||||
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
|
||||
|
||||
# Decorator used to see if authentication works for external dictionary who use a HTTP source.
|
||||
@ -29,7 +29,7 @@ def start_server(server_address, data_path, schema, cert_path, address_family):
|
||||
@check_auth
|
||||
def do_POST(self):
|
||||
ids = self.__read_and_decode_post_ids()
|
||||
print "ids=", ids
|
||||
print("ids=", ids)
|
||||
self.__send_headers()
|
||||
self.__send_data(ids)
|
||||
|
||||
@ -43,26 +43,26 @@ def start_server(server_address, data_path, schema, cert_path, address_family):
|
||||
reader = csv.reader(fl, delimiter='\t')
|
||||
for row in reader:
|
||||
if not only_ids or (row[0] in only_ids):
|
||||
self.wfile.write('\t'.join(row) + '\n')
|
||||
self.wfile.write(('\t'.join(row) + '\n').encode())
|
||||
|
||||
def __read_and_decode_post_ids(self):
|
||||
data = self.__read_and_decode_post_data()
|
||||
return filter(None, data.split())
|
||||
return [_f for _f in data.split() if _f]
|
||||
|
||||
def __read_and_decode_post_data(self):
|
||||
transfer_encoding = self.headers.get("Transfer-encoding")
|
||||
decoded = "";
|
||||
if transfer_encoding == "chunked":
|
||||
while True:
|
||||
s = self.rfile.readline()
|
||||
s = self.rfile.readline().decode()
|
||||
chunk_length = int(s, 16)
|
||||
if not chunk_length:
|
||||
break
|
||||
decoded += self.rfile.read(chunk_length)
|
||||
self.rfile.readline()
|
||||
decoded += self.rfile.read(chunk_length).decode()
|
||||
self.rfile.readline().decode()
|
||||
else:
|
||||
content_length = int(self.headers.get("Content-Length", 0))
|
||||
decoded = self.rfile.read(content_length)
|
||||
decoded = self.rfile.read(content_length).decode()
|
||||
return decoded
|
||||
|
||||
if address_family == "ipv6":
|
||||
|
@ -183,7 +183,7 @@ class _NetworkManager:
|
||||
exit_code = self._docker_client.api.exec_inspect(handle)['ExitCode']
|
||||
|
||||
if exit_code != 0:
|
||||
print output
|
||||
print(output)
|
||||
raise subprocess.CalledProcessError(exit_code, cmd)
|
||||
|
||||
return output
|
||||
|
@ -1,14 +1,15 @@
|
||||
import difflib
|
||||
import time
|
||||
from io import IOBase
|
||||
|
||||
|
||||
class TSV:
|
||||
"""Helper to get pretty diffs between expected and actual tab-separated value files"""
|
||||
|
||||
def __init__(self, contents):
|
||||
if isinstance(contents, file):
|
||||
if isinstance(contents, IOBase):
|
||||
raw_lines = contents.readlines()
|
||||
elif isinstance(contents, str) or isinstance(contents, unicode):
|
||||
elif isinstance(contents, str) or isinstance(contents, str):
|
||||
raw_lines = contents.splitlines(True)
|
||||
elif isinstance(contents, list):
|
||||
raw_lines = ['\t'.join(map(str, l)) if isinstance(l, list) else str(l) for l in contents]
|
||||
@ -29,7 +30,7 @@ class TSV:
|
||||
return self != TSV(other)
|
||||
return self.lines != other.lines
|
||||
|
||||
def diff(self, other, n1=None, n2=None):
|
||||
def diff(self, other, n1='', n2=''):
|
||||
if not isinstance(other, TSV):
|
||||
return self.diff(TSV(other), n1=n1, n2=n2)
|
||||
return list(line.rstrip() for line in difflib.unified_diff(self.lines, other.lines, fromfile=n1, tofile=n2))[2:]
|
||||
@ -45,14 +46,14 @@ class TSV:
|
||||
def assert_eq_with_retry(instance, query, expectation, retry_count=20, sleep_time=0.5, stdin=None, timeout=None,
|
||||
settings=None, user=None, ignore_error=False):
|
||||
expectation_tsv = TSV(expectation)
|
||||
for i in xrange(retry_count):
|
||||
for i in range(retry_count):
|
||||
try:
|
||||
if TSV(instance.query(query, user=user, stdin=stdin, timeout=timeout, settings=settings,
|
||||
ignore_error=ignore_error)) == expectation_tsv:
|
||||
break
|
||||
time.sleep(sleep_time)
|
||||
except Exception as ex:
|
||||
print "assert_eq_with_retry retry {} exception {}".format(i + 1, ex)
|
||||
print(("assert_eq_with_retry retry {} exception {}".format(i + 1, ex)))
|
||||
time.sleep(sleep_time)
|
||||
else:
|
||||
val = TSV(instance.query(query, user=user, stdin=stdin, timeout=timeout, settings=settings,
|
||||
@ -66,13 +67,13 @@ def assert_logs_contain(instance, substring):
|
||||
raise AssertionError("'{}' not found in logs".format(substring))
|
||||
|
||||
def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_time=0.5):
|
||||
for i in xrange(retry_count):
|
||||
for i in range(retry_count):
|
||||
try:
|
||||
if instance.contains_in_log(substring):
|
||||
break
|
||||
time.sleep(sleep_time)
|
||||
except Exception as ex:
|
||||
print "contains_in_log_with_retry retry {} exception {}".format(i + 1, ex)
|
||||
print("contains_in_log_with_retry retry {} exception {}".format(i + 1, ex))
|
||||
time.sleep(sleep_time)
|
||||
else:
|
||||
raise AssertionError("'{}' not found in logs".format(substring))
|
||||
|
@ -6,7 +6,7 @@ CURDIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
sys.path.insert(0, os.path.join(CURDIR))
|
||||
|
||||
import uexpect
|
||||
from . import uexpect
|
||||
|
||||
prompt = ':\) '
|
||||
end_of_block = r'.*\r\n.*\r\n'
|
||||
|
@ -15,7 +15,7 @@ import os
|
||||
import pty
|
||||
import re
|
||||
import time
|
||||
from Queue import Queue, Empty
|
||||
from queue import Queue, Empty
|
||||
from subprocess import Popen
|
||||
from threading import Thread, Event
|
||||
|
||||
@ -118,7 +118,7 @@ class IO(object):
|
||||
return self.write(data + eol)
|
||||
|
||||
def write(self, data):
|
||||
return os.write(self.master, data)
|
||||
return os.write(self.master, data.encode())
|
||||
|
||||
def expect(self, pattern, timeout=None, escape=False):
|
||||
self.match = None
|
||||
@ -201,7 +201,8 @@ def spawn(command):
|
||||
def reader(process, out, queue, kill_event):
|
||||
while True:
|
||||
try:
|
||||
data = os.read(out, 65536)
|
||||
# TODO: there are some issues with 1<<16 buffer size
|
||||
data = os.read(out, 1<<17).decode(errors='replace')
|
||||
queue.put(data)
|
||||
except:
|
||||
if kill_event.is_set():
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
#-*- coding: utf-8 -*-
|
||||
import subprocess
|
||||
import os
|
||||
@ -188,5 +188,5 @@ if __name__ == "__main__":
|
||||
command=args.command
|
||||
)
|
||||
|
||||
print("Running pytest container as: '" + cmd + "'.")
|
||||
print(("Running pytest container as: '" + cmd + "'."))
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
@ -371,7 +371,7 @@ def test_version_update_two_nodes(start_dynamic_cluster):
|
||||
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity_new", timeout=120)
|
||||
break
|
||||
except Exception as ex:
|
||||
print("Exception during replica sync", ex)
|
||||
print(("Exception during replica sync", ex))
|
||||
node11.query("SYSTEM RESTART REPLICA table_with_default_granularity_new")
|
||||
node12.query("SYSTEM RESTART REPLICA table_with_default_granularity_new")
|
||||
time.sleep(2 * i)
|
||||
@ -386,7 +386,7 @@ def test_version_update_two_nodes(start_dynamic_cluster):
|
||||
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity", timeout=120)
|
||||
break
|
||||
except Exception as ex:
|
||||
print("Exception during replica sync", ex)
|
||||
print(("Exception during replica sync", ex))
|
||||
node11.query("SYSTEM RESTART REPLICA table_with_default_granularity")
|
||||
node12.query("SYSTEM RESTART REPLICA table_with_default_granularity")
|
||||
time.sleep(2 * i)
|
||||
|
@ -50,8 +50,8 @@ def started_cluster():
|
||||
}
|
||||
}
|
||||
|
||||
for cluster_name, shards in clusters_schema.iteritems():
|
||||
for shard_name, replicas in shards.iteritems():
|
||||
for cluster_name, shards in clusters_schema.items():
|
||||
for shard_name, replicas in shards.items():
|
||||
for replica_name in replicas:
|
||||
name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name)
|
||||
cluster.add_instance(name,
|
||||
@ -235,16 +235,16 @@ def execute_task(task, cmd_options):
|
||||
task.start()
|
||||
|
||||
zk = cluster.get_kazoo_client('zoo1')
|
||||
print "Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])
|
||||
print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1]))
|
||||
|
||||
try:
|
||||
zk.delete("/clickhouse-copier", recursive=True)
|
||||
except kazoo.exceptions.NoNodeError:
|
||||
print "No node /clickhouse-copier. It is Ok in first test."
|
||||
print("No node /clickhouse-copier. It is Ok in first test.")
|
||||
|
||||
zk_task_path = task.zk_task_path
|
||||
zk.ensure_path(zk_task_path)
|
||||
zk.create(zk_task_path + "/description", task.copier_task_config)
|
||||
zk.create(zk_task_path + "/description", task.copier_task_config.encode())
|
||||
|
||||
# Run cluster-copier processes on each node
|
||||
docker_api = docker.from_env().api
|
||||
@ -256,19 +256,19 @@ def execute_task(task, cmd_options):
|
||||
'--base-dir', '/var/log/clickhouse-server/copier']
|
||||
cmd += cmd_options
|
||||
|
||||
copiers = random.sample(cluster.instances.keys(), 3)
|
||||
copiers = random.sample(list(cluster.instances.keys()), 3)
|
||||
|
||||
for instance_name in copiers:
|
||||
instance = cluster.instances[instance_name]
|
||||
container = instance.get_docker_handle()
|
||||
instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs/config-copier.xml"),
|
||||
"/etc/clickhouse-server/config-copier.xml")
|
||||
print "Copied copier config to {}".format(instance.name)
|
||||
print("Copied copier config to {}".format(instance.name))
|
||||
exec_id = docker_api.exec_create(container.id, cmd, stderr=True)
|
||||
output = docker_api.exec_start(exec_id).decode('utf8')
|
||||
print(output)
|
||||
copiers_exec_ids.append(exec_id)
|
||||
print "Copier for {} ({}) has started".format(instance.name, instance.ip_address)
|
||||
print("Copier for {} ({}) has started".format(instance.name, instance.ip_address))
|
||||
|
||||
# Wait for copiers stopping and check their return codes
|
||||
for exec_id, instance_name in zip(copiers_exec_ids, copiers):
|
||||
@ -362,6 +362,6 @@ def test_no_arg(started_cluster):
|
||||
|
||||
if __name__ == '__main__':
|
||||
with contextmanager(started_cluster)() as cluster:
|
||||
for name, instance in cluster.instances.items():
|
||||
print name, instance.ip_address
|
||||
raw_input("Cluster created, press any key to destroy...")
|
||||
for name, instance in list(cluster.instances.items()):
|
||||
print(name, instance.ip_address)
|
||||
input("Cluster created, press any key to destroy...")
|
||||
|
@ -27,8 +27,8 @@ def started_cluster():
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
for cluster_name, shards in clusters_schema.iteritems():
|
||||
for shard_name, replicas in shards.iteritems():
|
||||
for cluster_name, shards in clusters_schema.items():
|
||||
for shard_name, replicas in shards.items():
|
||||
for replica_name in replicas:
|
||||
name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name)
|
||||
cluster.add_instance(name,
|
||||
@ -83,7 +83,7 @@ def execute_task(task, cmd_options):
|
||||
task.start()
|
||||
|
||||
zk = cluster.get_kazoo_client('zoo1')
|
||||
print "Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])
|
||||
print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1]))
|
||||
|
||||
zk_task_path = task.zk_task_path
|
||||
zk.ensure_path(zk_task_path)
|
||||
@ -101,16 +101,16 @@ def execute_task(task, cmd_options):
|
||||
|
||||
print(cmd)
|
||||
|
||||
for instance_name, instance in cluster.instances.iteritems():
|
||||
for instance_name, instance in cluster.instances.items():
|
||||
container = instance.get_docker_handle()
|
||||
exec_id = docker_api.exec_create(container.id, cmd, stderr=True)
|
||||
docker_api.exec_start(exec_id, detach=True)
|
||||
|
||||
copiers_exec_ids.append(exec_id)
|
||||
print "Copier for {} ({}) has started".format(instance.name, instance.ip_address)
|
||||
print("Copier for {} ({}) has started".format(instance.name, instance.ip_address))
|
||||
|
||||
# Wait for copiers stopping and check their return codes
|
||||
for exec_id, instance in zip(copiers_exec_ids, cluster.instances.itervalues()):
|
||||
for exec_id, instance in zip(copiers_exec_ids, iter(cluster.instances.values())):
|
||||
while True:
|
||||
res = docker_api.exec_inspect(exec_id)
|
||||
if not res['Running']:
|
||||
@ -175,6 +175,6 @@ def test_trivial_copy_with_move_fault(started_cluster, use_sample_offset):
|
||||
|
||||
if __name__ == '__main__':
|
||||
with contextmanager(started_cluster)() as cluster:
|
||||
for name, instance in cluster.instances.items():
|
||||
print name, instance.ip_address
|
||||
raw_input("Cluster created, press any key to destroy...")
|
||||
for name, instance in list(cluster.instances.items()):
|
||||
print(name, instance.ip_address)
|
||||
input("Cluster created, press any key to destroy...")
|
||||
|
@ -26,14 +26,14 @@ def test_exception_message(started_cluster):
|
||||
assert node1.query("select number from nums order by number") == "0\n1\n"
|
||||
|
||||
def node_busy(_):
|
||||
for i in xrange(10):
|
||||
for i in range(10):
|
||||
node1.query("select sleep(2)", user='default')
|
||||
|
||||
busy_pool = Pool(3)
|
||||
busy_pool.map_async(node_busy, xrange(3))
|
||||
busy_pool.map_async(node_busy, range(3))
|
||||
time.sleep(1) # wait a little until polling starts
|
||||
try:
|
||||
assert node2.query("select number from remote('node1', 'default', 'nums')", user='good') == "0\n1\n"
|
||||
except Exception as ex:
|
||||
print ex.message
|
||||
print(ex.message)
|
||||
assert False, "Exception thrown while max_concurrent_queries_for_user is not exceeded"
|
||||
|
@ -66,7 +66,7 @@ def test_no_ttl_merges_in_busy_pool(started_cluster):
|
||||
node1.query("ALTER TABLE test_ttl UPDATE data = data + 1 WHERE sleepEachRow(1) = 0")
|
||||
|
||||
while count_running_mutations(node1, "test_ttl") < 6:
|
||||
print "Mutations count", count_running_mutations(node1, "test_ttl")
|
||||
print("Mutations count", count_running_mutations(node1, "test_ttl"))
|
||||
assert count_ttl_merges_in_background_pool(node1, "test_ttl") == 0
|
||||
time.sleep(0.5)
|
||||
|
||||
@ -74,7 +74,7 @@ def test_no_ttl_merges_in_busy_pool(started_cluster):
|
||||
|
||||
rows_count = []
|
||||
while count_running_mutations(node1, "test_ttl") == 6:
|
||||
print "Mutations count after start TTL", count_running_mutations(node1, "test_ttl")
|
||||
print("Mutations count after start TTL", count_running_mutations(node1, "test_ttl"))
|
||||
rows_count.append(int(node1.query("SELECT count() FROM test_ttl").strip()))
|
||||
time.sleep(0.5)
|
||||
|
||||
|
@ -19,7 +19,7 @@ node6 = cluster.add_instance('node6', user_configs=['configs/config_include_from
|
||||
def start_cluster():
|
||||
try:
|
||||
def create_zk_roots(zk):
|
||||
zk.create(path="/setting/max_query_size", value="77777", makepath=True)
|
||||
zk.create(path="/setting/max_query_size", value=b"77777", makepath=True)
|
||||
|
||||
cluster.add_zookeeper_startup_command(create_zk_roots)
|
||||
|
||||
|
@ -33,7 +33,7 @@ def start_cluster():
|
||||
initialize_database([node1, node2], 1)
|
||||
yield cluster
|
||||
except Exception as ex:
|
||||
print ex
|
||||
print(ex)
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
@ -29,7 +29,7 @@ def start_cluster():
|
||||
fill_nodes([node1, node2], 1)
|
||||
yield cluster
|
||||
except Exception as ex:
|
||||
print ex
|
||||
print(ex)
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
@ -83,11 +83,11 @@ def test(started_cluster):
|
||||
assert_eq_with_retry(node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
print node3.query_with_retry("SELECT * FROM distributed ORDER BY id", retry_count=5)
|
||||
print(node3.query_with_retry("SELECT * FROM distributed ORDER BY id", retry_count=5))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with contextmanager(started_cluster)() as cluster:
|
||||
for name, instance in cluster.instances.items():
|
||||
print name, instance.ip_address
|
||||
raw_input("Cluster created, press any key to destroy...")
|
||||
for name, instance in list(cluster.instances.items()):
|
||||
print(name, instance.ip_address)
|
||||
input("Cluster created, press any key to destroy...")
|
||||
|
@ -98,12 +98,12 @@ SELECT sum(x) FROM distributed SETTINGS
|
||||
|
||||
# If we forbid stale replicas, the query must fail.
|
||||
with pytest.raises(Exception):
|
||||
print instance_with_dist_table.query('''
|
||||
print(instance_with_dist_table.query('''
|
||||
SELECT count() FROM distributed SETTINGS
|
||||
load_balancing='in_order',
|
||||
max_replica_delay_for_distributed_queries=1,
|
||||
fallback_to_stale_replicas_for_distributed_queries=0
|
||||
''')
|
||||
'''))
|
||||
|
||||
# Now partition off the remote replica of the local shard and test that failover still works.
|
||||
pm.partition_instances(node_1_1, node_1_2, port=9000)
|
||||
|
@ -113,12 +113,12 @@ class SimpleLayoutTester:
|
||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||
|
||||
def prepare(self, cluster_):
|
||||
for _, dictionary in self.layout_to_dictionary.items():
|
||||
for _, dictionary in list(self.layout_to_dictionary.items()):
|
||||
dictionary.prepare_source(cluster_)
|
||||
dictionary.load_data(self.data)
|
||||
|
||||
def execute(self, layout_name, node):
|
||||
if not self.layout_to_dictionary.has_key(layout_name):
|
||||
if layout_name not in self.layout_to_dictionary:
|
||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||
|
||||
dct = self.layout_to_dictionary[layout_name]
|
||||
@ -170,12 +170,12 @@ class ComplexLayoutTester:
|
||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||
|
||||
def prepare(self, cluster_):
|
||||
for _, dictionary in self.layout_to_dictionary.items():
|
||||
for _, dictionary in list(self.layout_to_dictionary.items()):
|
||||
dictionary.prepare_source(cluster_)
|
||||
dictionary.load_data(self.data)
|
||||
|
||||
def execute(self, layout_name, node):
|
||||
if not self.layout_to_dictionary.has_key(layout_name):
|
||||
if layout_name not in self.layout_to_dictionary:
|
||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||
|
||||
dct = self.layout_to_dictionary[layout_name]
|
||||
@ -213,13 +213,13 @@ class RangedLayoutTester:
|
||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||
|
||||
def prepare(self, cluster_):
|
||||
for _, dictionary in self.layout_to_dictionary.items():
|
||||
for _, dictionary in list(self.layout_to_dictionary.items()):
|
||||
dictionary.prepare_source(cluster_)
|
||||
dictionary.load_data(self.data)
|
||||
|
||||
def execute(self, layout_name, node):
|
||||
|
||||
if not self.layout_to_dictionary.has_key(layout_name):
|
||||
if layout_name not in self.layout_to_dictionary:
|
||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||
|
||||
dct = self.layout_to_dictionary[layout_name]
|
||||
|
@ -42,7 +42,7 @@ def test_memory_consumption(cluster):
|
||||
allocated_first = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
||||
|
||||
alloc_array = []
|
||||
for i in xrange(5):
|
||||
for i in range(5):
|
||||
node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)")
|
||||
|
||||
allocated = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
||||
@ -51,7 +51,7 @@ def test_memory_consumption(cluster):
|
||||
# size doesn't grow
|
||||
assert all(allocated_first >= a for a in alloc_array)
|
||||
|
||||
for i in xrange(5):
|
||||
for i in range(5):
|
||||
node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)")
|
||||
|
||||
allocated = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
||||
|
@ -106,7 +106,7 @@ def setup_module(module):
|
||||
for source in sources:
|
||||
for layout in LAYOUTS:
|
||||
if not source.compatible_with_layout(layout):
|
||||
print "Source", source.name, "incompatible with layout", layout.name
|
||||
print("Source", source.name, "incompatible with layout", layout.name)
|
||||
continue
|
||||
|
||||
fields = KEY_FIELDS[layout.layout_type] + [field]
|
||||
@ -128,9 +128,9 @@ def started_cluster():
|
||||
assert len(FIELDS) == len(VALUES)
|
||||
for dicts in DICTIONARIES:
|
||||
for dictionary in dicts:
|
||||
print "Preparing", dictionary.name
|
||||
print("Preparing", dictionary.name)
|
||||
dictionary.prepare_source(cluster)
|
||||
print "Prepared"
|
||||
print("Prepared")
|
||||
|
||||
yield cluster
|
||||
|
||||
@ -138,9 +138,9 @@ def started_cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("id", range(len(FIELDS)))
|
||||
@pytest.mark.parametrize("id", list(range(len(FIELDS))))
|
||||
def test_redis_dictionaries(started_cluster, id):
|
||||
print 'id:', id
|
||||
print('id:', id)
|
||||
|
||||
dicts = DICTIONARIES[id]
|
||||
values = VALUES[id]
|
||||
@ -173,7 +173,7 @@ def test_redis_dictionaries(started_cluster, id):
|
||||
node.query("system reload dictionary {}".format(dct.name))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
print query
|
||||
print(query)
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
||||
# Checks, that dictionaries can be reloaded.
|
||||
|
@ -1,5 +1,6 @@
|
||||
import difflib
|
||||
import os
|
||||
from functools import reduce
|
||||
|
||||
files = ['key_simple.tsv', 'key_complex_integers.tsv', 'key_complex_mixed.tsv']
|
||||
|
||||
@ -78,8 +79,9 @@ def generate_dictionaries(path, structure):
|
||||
'''
|
||||
|
||||
dictionary_skeleton = \
|
||||
dictionary_skeleton % reduce(lambda xml, (type, default): xml + attribute_skeleton % (type, type, default),
|
||||
zip(types, implicit_defaults), '')
|
||||
dictionary_skeleton % reduce(
|
||||
lambda xml, type_default: xml + attribute_skeleton % (type_default[0], type_default[0], type_default[1]),
|
||||
list(zip(types, implicit_defaults)), '')
|
||||
|
||||
source_clickhouse = '''
|
||||
<clickhouse>
|
||||
@ -195,7 +197,7 @@ class DictionaryTestTable:
|
||||
String_ String,
|
||||
Date_ Date, DateTime_ DateTime, Parent UInt64'''
|
||||
|
||||
self.names_and_types = map(str.split, self.structure.split(','))
|
||||
self.names_and_types = list(map(str.split, self.structure.split(',')))
|
||||
self.keys_names_and_types = self.names_and_types[:6]
|
||||
self.values_names_and_types = self.names_and_types[6:]
|
||||
self.source_file_name = source_file_name
|
||||
@ -223,10 +225,10 @@ class DictionaryTestTable:
|
||||
def make_tuple(line):
|
||||
row = tuple(line.split('\t'))
|
||||
self.rows.append(row)
|
||||
return '(' + ','.join(map(wrap_value, zip(row, types))) + ')'
|
||||
return '(' + ','.join(map(wrap_value, list(zip(row, types)))) + ')'
|
||||
|
||||
values = ','.join(map(make_tuple, lines))
|
||||
print query % (self.structure, values)
|
||||
print(query % (self.structure, values))
|
||||
instance.query(query % (self.structure, values))
|
||||
|
||||
def get_structure_for_keys(self, keys, enable_parent=True):
|
||||
@ -245,7 +247,7 @@ class DictionaryTestTable:
|
||||
for row in rows:
|
||||
key = '\t'.join(row[:len(keys)])
|
||||
value = '\t'.join(row[len(keys):])
|
||||
if key in lines_map.keys():
|
||||
if key in list(lines_map.keys()):
|
||||
pattern_value = lines_map[key]
|
||||
del lines_map[key]
|
||||
if not value == pattern_value:
|
||||
@ -256,7 +258,7 @@ class DictionaryTestTable:
|
||||
diff.append((key + '\t' + value, ''))
|
||||
|
||||
if add_not_found_rows:
|
||||
for key, value in lines_map.items():
|
||||
for key, value in list(lines_map.items()):
|
||||
diff.append(('', key + '\t' + value))
|
||||
|
||||
if not diff:
|
||||
|
@ -4,7 +4,7 @@ import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import TSV
|
||||
|
||||
from generate_dictionaries import generate_structure, generate_dictionaries, DictionaryTestTable
|
||||
from .generate_dictionaries import generate_structure, generate_dictionaries, DictionaryTestTable
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
@ -32,7 +32,7 @@ def started_cluster():
|
||||
cluster.start()
|
||||
test_table.create_clickhouse_source(instance)
|
||||
for line in TSV(instance.query('select name from system.dictionaries')).lines:
|
||||
print line,
|
||||
print(line, end=' ')
|
||||
|
||||
yield cluster
|
||||
|
||||
@ -72,7 +72,7 @@ def test_select_all(dictionary_structure):
|
||||
result = TSV(query('select * from test.{0}'.format(name)))
|
||||
|
||||
diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=True)
|
||||
print test_table.process_diff(diff)
|
||||
print(test_table.process_diff(diff))
|
||||
assert not diff
|
||||
|
||||
|
||||
@ -103,7 +103,7 @@ def test_select_all_from_cached(cached_dictionary_structure):
|
||||
for i in range(4):
|
||||
result = TSV(query('select * from test.{0}'.format(name)))
|
||||
diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=False)
|
||||
print test_table.process_diff(diff)
|
||||
print(test_table.process_diff(diff))
|
||||
assert not diff
|
||||
|
||||
key = []
|
||||
@ -120,5 +120,5 @@ def test_select_all_from_cached(cached_dictionary_structure):
|
||||
|
||||
result = TSV(query('select * from test.{0}'.format(name)))
|
||||
diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=True)
|
||||
print test_table.process_diff(diff)
|
||||
print(test_table.process_diff(diff))
|
||||
assert not diff
|
||||
|
@ -1,4 +1,4 @@
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import time
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import os
|
||||
import random
|
||||
|
@ -1,4 +1,4 @@
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import time
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import time
|
||||
|
||||
|
@ -3,7 +3,7 @@ import argparse
|
||||
import csv
|
||||
import socket
|
||||
import ssl
|
||||
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
|
||||
|
||||
# Decorator used to see if authentication works for external dictionary who use a HTTP source.
|
||||
@ -29,7 +29,7 @@ def start_server(server_address, data_path, schema, cert_path, address_family):
|
||||
@check_auth
|
||||
def do_POST(self):
|
||||
ids = self.__read_and_decode_post_ids()
|
||||
print "ids=", ids
|
||||
print("ids=", ids)
|
||||
self.__send_headers()
|
||||
self.__send_data(ids)
|
||||
|
||||
@ -43,11 +43,11 @@ def start_server(server_address, data_path, schema, cert_path, address_family):
|
||||
reader = csv.reader(fl, delimiter='\t')
|
||||
for row in reader:
|
||||
if not only_ids or (row[0] in only_ids):
|
||||
self.wfile.write('\t'.join(row) + '\n')
|
||||
self.wfile.write(('\t'.join(row) + '\n').encode())
|
||||
|
||||
def __read_and_decode_post_ids(self):
|
||||
data = self.__read_and_decode_post_data()
|
||||
return filter(None, data.split())
|
||||
return [_f for _f in data.split() if _f]
|
||||
|
||||
def __read_and_decode_post_data(self):
|
||||
transfer_encoding = self.headers.get("Transfer-encoding")
|
||||
@ -58,11 +58,11 @@ def start_server(server_address, data_path, schema, cert_path, address_family):
|
||||
chunk_length = int(s, 16)
|
||||
if not chunk_length:
|
||||
break
|
||||
decoded += self.rfile.read(chunk_length)
|
||||
decoded += self.rfile.read(chunk_length).decode()
|
||||
self.rfile.readline()
|
||||
else:
|
||||
content_length = int(self.headers.get("Content-Length", 0))
|
||||
decoded = self.rfile.read(content_length)
|
||||
decoded = self.rfile.read(content_length).decode()
|
||||
return decoded
|
||||
|
||||
if address_family == "ipv6":
|
||||
|
@ -26,7 +26,7 @@ def prepare():
|
||||
node.exec_in_container([
|
||||
"bash",
|
||||
"-c",
|
||||
"python2 /http_server.py --data-path={tbl} --schema=http --host=localhost --port=5555".format(
|
||||
"python3 /http_server.py --data-path={tbl} --schema=http --host=localhost --port=5555".format(
|
||||
tbl=path)
|
||||
], detach=True)
|
||||
|
||||
|
@ -33,5 +33,5 @@ def test_different_types(cluster):
|
||||
|
||||
def test_select_by_type(cluster):
|
||||
node = cluster.instances["node"]
|
||||
for name, disk_type in disk_types.items():
|
||||
for name, disk_type in list(disk_types.items()):
|
||||
assert node.query("SELECT name FROM system.disks WHERE type='" + disk_type + "'") == name + "\n"
|
||||
|
@ -26,12 +26,12 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster):
|
||||
main_configs += [os.path.join(self.test_config_dir, f) for f in
|
||||
["server.crt", "server.key", "dhparam.pem", "config.d/ssl_conf.xml"]]
|
||||
|
||||
for i in xrange(4):
|
||||
for i in range(4):
|
||||
self.add_instance(
|
||||
'ch{}'.format(i + 1),
|
||||
main_configs=main_configs,
|
||||
user_configs=user_configs,
|
||||
macros={"layer": 0, "shard": i / 2 + 1, "replica": i % 2 + 1},
|
||||
macros={"layer": 0, "shard": i // 2 + 1, "replica": i % 2 + 1},
|
||||
with_zookeeper=True)
|
||||
|
||||
self.start()
|
||||
@ -62,11 +62,11 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster):
|
||||
self.ddl_check_query(instance, "CREATE DATABASE IF NOT EXISTS test ON CLUSTER 'cluster'")
|
||||
|
||||
except Exception as e:
|
||||
print e
|
||||
print(e)
|
||||
raise
|
||||
|
||||
def sync_replicas(self, table, timeout=5):
|
||||
for instance in self.instances.values():
|
||||
for instance in list(self.instances.values()):
|
||||
instance.query("SYSTEM SYNC REPLICA {}".format(table), timeout=timeout)
|
||||
|
||||
def check_all_hosts_successfully_executed(self, tsv_content, num_hosts=None):
|
||||
@ -90,7 +90,7 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster):
|
||||
def replace_domains_to_ip_addresses_in_cluster_config(self, instances_to_replace):
|
||||
clusters_config = open(p.join(self.base_dir, '{}/config.d/clusters.xml'.format(self.test_config_dir))).read()
|
||||
|
||||
for inst_name, inst in self.instances.items():
|
||||
for inst_name, inst in list(self.instances.items()):
|
||||
clusters_config = clusters_config.replace(inst_name, str(inst.ip_address))
|
||||
|
||||
for inst_name in instances_to_replace:
|
||||
@ -113,7 +113,7 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster):
|
||||
Make retries in case of UNKNOWN_STATUS_OF_INSERT or zkutil::KeeperException errors
|
||||
"""
|
||||
|
||||
for i in xrange(100):
|
||||
for i in range(100):
|
||||
try:
|
||||
instance.query(query_insert)
|
||||
return
|
||||
|
@ -27,7 +27,7 @@ def test_cluster(request):
|
||||
|
||||
# Check query log to ensure that DDL queries are not executed twice
|
||||
time.sleep(1.5)
|
||||
for instance in cluster.instances.values():
|
||||
for instance in list(cluster.instances.values()):
|
||||
cluster.ddl_check_there_are_no_dublicates(instance)
|
||||
|
||||
cluster.pm_random_drops.heal_all()
|
||||
@ -133,12 +133,12 @@ CREATE TABLE IF NOT EXISTS all_merge_64 ON CLUSTER '{cluster}' (p Date, i Int64,
|
||||
ENGINE = Distributed('{cluster}', default, merge, i)
|
||||
""")
|
||||
|
||||
for i in xrange(0, 4, 2):
|
||||
for i in range(0, 4, 2):
|
||||
k = (i / 2) * 2
|
||||
test_cluster.instances['ch{}'.format(i + 1)].query("INSERT INTO merge (i) VALUES ({})({})".format(k, k + 1))
|
||||
|
||||
assert TSV(instance.query("SELECT i FROM all_merge_32 ORDER BY i")) == TSV(
|
||||
''.join(['{}\n'.format(x) for x in xrange(4)]))
|
||||
''.join(['{}\n'.format(x) for x in range(4)]))
|
||||
|
||||
time.sleep(5)
|
||||
test_cluster.ddl_check_query(instance, "ALTER TABLE merge ON CLUSTER '{cluster}' MODIFY COLUMN i Int64")
|
||||
@ -147,19 +147,19 @@ ENGINE = Distributed('{cluster}', default, merge, i)
|
||||
"ALTER TABLE merge ON CLUSTER '{cluster}' ADD COLUMN s String DEFAULT toString(i) FORMAT TSV")
|
||||
|
||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(4)]))
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in range(4)]))
|
||||
|
||||
for i in xrange(0, 4, 2):
|
||||
for i in range(0, 4, 2):
|
||||
k = (i / 2) * 2 + 4
|
||||
test_cluster.instances['ch{}'.format(i + 1)].query(
|
||||
"INSERT INTO merge (p, i) VALUES (31, {})(31, {})".format(k, k + 1))
|
||||
|
||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(8)]))
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in range(8)]))
|
||||
|
||||
test_cluster.ddl_check_query(instance, "ALTER TABLE merge ON CLUSTER '{cluster}' DETACH PARTITION 197002")
|
||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(4)]))
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in range(4)]))
|
||||
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE merge ON CLUSTER '{cluster}'")
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_32 ON CLUSTER '{cluster}'")
|
||||
@ -170,7 +170,7 @@ def test_macro(test_cluster):
|
||||
instance = test_cluster.instances['ch2']
|
||||
test_cluster.ddl_check_query(instance, "CREATE TABLE tab ON CLUSTER '{cluster}' (value UInt8) ENGINE = Memory")
|
||||
|
||||
for i in xrange(4):
|
||||
for i in range(4):
|
||||
test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)],
|
||||
"INSERT INTO tab VALUES ({})".format(i))
|
||||
|
||||
@ -359,6 +359,6 @@ def test_replicated_without_arguments(test_cluster):
|
||||
|
||||
if __name__ == '__main__':
|
||||
with contextmanager(test_cluster)() as ctx_cluster:
|
||||
for name, instance in ctx_cluster.instances.items():
|
||||
print name, instance.ip_address
|
||||
raw_input("Cluster created, press any key to destroy...")
|
||||
for name, instance in list(ctx_cluster.instances.items()):
|
||||
print(name, instance.ip_address)
|
||||
input("Cluster created, press any key to destroy...")
|
||||
|
@ -26,7 +26,7 @@ def test_cluster(request):
|
||||
|
||||
# Check query log to ensure that DDL queries are not executed twice
|
||||
time.sleep(1.5)
|
||||
for instance in cluster.instances.values():
|
||||
for instance in list(cluster.instances.values()):
|
||||
cluster.ddl_check_there_are_no_dublicates(instance)
|
||||
|
||||
cluster.pm_random_drops.heal_all()
|
||||
@ -59,36 +59,36 @@ CREATE TABLE IF NOT EXISTS all_merge_64 ON CLUSTER cluster (p Date, i Int64, s S
|
||||
ENGINE = Distributed(cluster, default, merge_for_alter, i)
|
||||
""")
|
||||
|
||||
for i in xrange(4):
|
||||
k = (i / 2) * 2
|
||||
for i in range(4):
|
||||
k = (i // 2) * 2
|
||||
test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)],
|
||||
"INSERT INTO merge_for_alter (i) VALUES ({})({})".format(k, k + 1))
|
||||
|
||||
test_cluster.sync_replicas("merge_for_alter")
|
||||
|
||||
assert TSV(instance.query("SELECT i FROM all_merge_32 ORDER BY i")) == TSV(
|
||||
''.join(['{}\n'.format(x) for x in xrange(4)]))
|
||||
''.join(['{}\n'.format(x) for x in range(4)]))
|
||||
|
||||
test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster MODIFY COLUMN i Int64")
|
||||
test_cluster.ddl_check_query(instance,
|
||||
"ALTER TABLE merge_for_alter ON CLUSTER cluster ADD COLUMN s String DEFAULT toString(i)")
|
||||
|
||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(4)]))
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in range(4)]))
|
||||
|
||||
for i in xrange(4):
|
||||
k = (i / 2) * 2 + 4
|
||||
for i in range(4):
|
||||
k = (i // 2) * 2 + 4
|
||||
test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)],
|
||||
"INSERT INTO merge_for_alter (p, i) VALUES (31, {})(31, {})".format(k, k + 1))
|
||||
|
||||
test_cluster.sync_replicas("merge_for_alter")
|
||||
|
||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(8)]))
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in range(8)]))
|
||||
|
||||
test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster DETACH PARTITION 197002")
|
||||
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in xrange(4)]))
|
||||
''.join(['{}\t{}\n'.format(x, x) for x in range(4)]))
|
||||
|
||||
test_cluster.ddl_check_query(instance, "DROP TABLE merge_for_alter ON CLUSTER cluster")
|
||||
|
||||
|
@ -25,7 +25,7 @@ users = pytest.mark.parametrize('user,password', [
|
||||
])
|
||||
|
||||
def bootstrap():
|
||||
for n in cluster.instances.values():
|
||||
for n in list(cluster.instances.values()):
|
||||
n.query('DROP TABLE IF EXISTS data')
|
||||
n.query('DROP TABLE IF EXISTS dist')
|
||||
n.query('CREATE TABLE data (key Int) Engine=Memory()')
|
||||
|
@ -18,7 +18,7 @@ queries = nodes * 5
|
||||
|
||||
|
||||
def bootstrap():
|
||||
for n in cluster.instances.values():
|
||||
for n in list(cluster.instances.values()):
|
||||
# At startup, server loads configuration files.
|
||||
#
|
||||
# However ConfigReloader does not know about already loaded files
|
||||
@ -90,7 +90,7 @@ def get_node(query_node, table='dist', *args, **kwargs):
|
||||
|
||||
query_node.query('SELECT * FROM ' + table, *args, **kwargs)
|
||||
|
||||
for n in cluster.instances.values():
|
||||
for n in list(cluster.instances.values()):
|
||||
n.query('SYSTEM FLUSH LOGS')
|
||||
|
||||
rows = query_node.query("""
|
||||
|
@ -1,7 +1,7 @@
|
||||
# This test is a subset of the 01223_dist_on_dist.
|
||||
# (just in case, with real separate instances).
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
@ -51,7 +51,7 @@ def started_cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("node", NODES.values())
|
||||
@pytest.mark.parametrize("node", list(NODES.values()))
|
||||
@pytest.mark.parametrize("source",
|
||||
["distributed_over_distributed_table", "cluster('test_cluster', default, distributed_table)"])
|
||||
class TestDistributedOverDistributedSuite:
|
||||
|
@ -1,4 +1,4 @@
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import sys
|
||||
import time
|
||||
@ -9,6 +9,9 @@ from helpers.uclient import client, prompt, end_of_block
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
# log = sys.stdout
|
||||
log = None
|
||||
|
||||
NODES = {'node' + str(i): cluster.add_instance(
|
||||
'node' + str(i),
|
||||
main_configs=['configs/remote_servers.xml'],
|
||||
@ -63,12 +66,11 @@ def poll_query(node, query, expected, timeout):
|
||||
pass
|
||||
assert node.query(query) == expected
|
||||
|
||||
@pytest.mark.parametrize("node", NODES.values()[:1])
|
||||
@pytest.mark.parametrize("node", list(NODES.values())[:1])
|
||||
@pytest.mark.parametrize("source", ["lv_over_distributed_table"])
|
||||
class TestLiveViewOverDistributedSuite:
|
||||
def test_distributed_over_live_view_order_by_node(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
node0, node1 = NODES.values()
|
||||
node0, node1 = list(NODES.values())
|
||||
|
||||
select_query = "SELECT * FROM distributed_over_lv ORDER BY node, key FORMAT CSV"
|
||||
select_query_dist_table = "SELECT * FROM distributed_table ORDER BY node, key FORMAT CSV"
|
||||
@ -118,8 +120,7 @@ class TestLiveViewOverDistributedSuite:
|
||||
client1.expect(prompt)
|
||||
|
||||
def test_distributed_over_live_view_order_by_key(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
node0, node1 = NODES.values()
|
||||
node0, node1 = list(NODES.values())
|
||||
|
||||
select_query = "SELECT * FROM distributed_over_lv ORDER BY key, node FORMAT CSV"
|
||||
select_count_query = "SELECT count() FROM distributed_over_lv"
|
||||
@ -160,8 +161,7 @@ class TestLiveViewOverDistributedSuite:
|
||||
client1.expect(prompt)
|
||||
|
||||
def test_distributed_over_live_view_group_by_node(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
node0, node1 = NODES.values()
|
||||
node0, node1 = list(NODES.values())
|
||||
|
||||
select_query = "SELECT node, SUM(value) FROM distributed_over_lv GROUP BY node ORDER BY node FORMAT CSV"
|
||||
|
||||
@ -204,8 +204,7 @@ class TestLiveViewOverDistributedSuite:
|
||||
client1.expect(prompt)
|
||||
|
||||
def test_distributed_over_live_view_group_by_key(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
node0, node1 = NODES.values()
|
||||
node0, node1 = list(NODES.values())
|
||||
|
||||
select_query = "SELECT key, SUM(value) FROM distributed_over_lv GROUP BY key ORDER BY key FORMAT CSV"
|
||||
|
||||
@ -249,8 +248,7 @@ class TestLiveViewOverDistributedSuite:
|
||||
client1.expect(prompt)
|
||||
|
||||
def test_distributed_over_live_view_sum(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
node0, node1 = NODES.values()
|
||||
node0, node1 = list(NODES.values())
|
||||
|
||||
with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \
|
||||
client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2:
|
||||
|
@ -103,7 +103,7 @@ def started_cluster(request):
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
for node_id, node in NODES.items():
|
||||
for node_id, node in list(NODES.items()):
|
||||
node.query(CREATE_TABLES_SQL)
|
||||
node.query(INSERT_SQL_TEMPLATE.format(node_id=node_id))
|
||||
|
||||
@ -155,7 +155,7 @@ def test_reconnect(started_cluster, node_name, first_user, query_base):
|
||||
|
||||
with PartitionManager() as pm:
|
||||
# Break the connection.
|
||||
pm.partition_instances(*NODES.values())
|
||||
pm.partition_instances(*list(NODES.values()))
|
||||
|
||||
# Now it shouldn't:
|
||||
_check_timeout_and_exception(node, first_user, query_base, query)
|
||||
|
@ -65,7 +65,7 @@ def start_cluster():
|
||||
yield cluster
|
||||
|
||||
except Exception as ex:
|
||||
print ex
|
||||
print(ex)
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
@ -1,4 +1,4 @@
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import pytest
|
||||
from helpers.client import QueryRuntimeException
|
||||
|
@ -3,7 +3,7 @@ import logging
|
||||
|
||||
import avro.schema
|
||||
import pytest
|
||||
from confluent.schemaregistry.serializers import MessageSerializer
|
||||
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
|
||||
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
|
||||
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
|
@ -226,8 +226,8 @@ def test_introspection():
|
||||
|
||||
assert instance.query(
|
||||
"SELECT * from system.grants WHERE user_name IN ('A', 'B') ORDER BY user_name, access_type, grant_option") == \
|
||||
TSV([["A", "\N", "SELECT", "test", "table", "\N", 0, 0],
|
||||
["B", "\N", "CREATE", "\N", "\N", "\N", 0, 1]])
|
||||
TSV([["A", "\\N", "SELECT", "test", "table", "\\N", 0, 0],
|
||||
["B", "\\N", "CREATE", "\\N", "\\N", "\\N", 0, 1]])
|
||||
|
||||
|
||||
def test_current_database():
|
||||
|
@ -301,7 +301,7 @@ CREATE TABLE test.graphite2
|
||||
"AND table='graphite2'"))
|
||||
if parts == 1:
|
||||
break
|
||||
print('Parts', parts)
|
||||
print(('Parts', parts))
|
||||
|
||||
assert TSV(
|
||||
q("SELECT value, timestamp, date, updated FROM test.graphite2")
|
||||
|
@ -35,7 +35,7 @@ def cluster_without_dns_cache_update():
|
||||
yield cluster
|
||||
|
||||
except Exception as ex:
|
||||
print ex
|
||||
print(ex)
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
@ -90,7 +90,7 @@ def cluster_with_dns_cache_update():
|
||||
yield cluster
|
||||
|
||||
except Exception as ex:
|
||||
print ex
|
||||
print(ex)
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
@ -117,7 +117,7 @@ def test_ip_change_update_dns_cache(cluster_with_dns_cache_update):
|
||||
curl_result = node4.exec_in_container(["bash", "-c", "curl -s 'node3:8123'"])
|
||||
assert curl_result == 'Ok.\n'
|
||||
cat_resolv = node4.exec_in_container(["bash", "-c", "cat /etc/resolv.conf"])
|
||||
print("RESOLV {}".format(cat_resolv))
|
||||
print(("RESOLV {}".format(cat_resolv)))
|
||||
|
||||
assert_eq_with_retry(node4, "SELECT * FROM remote('node3', 'system', 'one')", "0", sleep_time=0.5)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
import contextlib
|
||||
import os
|
||||
import urllib
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
@ -22,7 +22,7 @@ class SimpleCluster:
|
||||
def test_dynamic_query_handler():
|
||||
with contextlib.closing(
|
||||
SimpleCluster(ClickHouseCluster(__file__), "dynamic_handler", "test_dynamic_handler")) as cluster:
|
||||
test_query = urllib.quote_plus('SELECT * FROM system.settings WHERE name = \'max_threads\'')
|
||||
test_query = urllib.parse.quote_plus('SELECT * FROM system.settings WHERE name = \'max_threads\'')
|
||||
|
||||
assert 404 == cluster.instance.http_request('?max_threads=1', method='GET', headers={'XXX': 'xxx'}).status_code
|
||||
|
||||
@ -54,11 +54,11 @@ def test_predefined_query_handler():
|
||||
assert 500 == cluster.instance.http_request('test_predefined_handler_get?max_threads=1', method='GET',
|
||||
headers={'XXX': 'xxx'}).status_code
|
||||
|
||||
assert 'max_threads\t1\n' == cluster.instance.http_request(
|
||||
assert b'max_threads\t1\n' == cluster.instance.http_request(
|
||||
'test_predefined_handler_get?max_threads=1&setting_name=max_threads', method='GET',
|
||||
headers={'XXX': 'xxx'}).content
|
||||
|
||||
assert 'max_threads\t1\nmax_alter_threads\t1\n' == cluster.instance.http_request(
|
||||
assert b'max_threads\t1\nmax_alter_threads\t1\n' == cluster.instance.http_request(
|
||||
'query_param_with_url/max_threads?max_threads=1&max_alter_threads=1',
|
||||
headers={'XXX': 'max_alter_threads'}).content
|
||||
|
||||
@ -79,7 +79,7 @@ def test_fixed_static_handler():
|
||||
assert 'text/html; charset=UTF-8' == \
|
||||
cluster.instance.http_request('test_get_fixed_static_handler', method='GET',
|
||||
headers={'XXX': 'xxx'}).headers['Content-Type']
|
||||
assert 'Test get static handler and fix content' == cluster.instance.http_request(
|
||||
assert b'Test get static handler and fix content' == cluster.instance.http_request(
|
||||
'test_get_fixed_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
||||
|
||||
|
||||
@ -100,7 +100,7 @@ def test_config_static_handler():
|
||||
assert 'text/plain; charset=UTF-8' == \
|
||||
cluster.instance.http_request('test_get_config_static_handler', method='GET',
|
||||
headers={'XXX': 'xxx'}).headers['Content-Type']
|
||||
assert 'Test get static handler and config content' == cluster.instance.http_request(
|
||||
assert b'Test get static handler and config content' == cluster.instance.http_request(
|
||||
'test_get_config_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
||||
|
||||
|
||||
@ -126,7 +126,7 @@ def test_absolute_path_static_handler():
|
||||
assert 'text/html; charset=UTF-8' == \
|
||||
cluster.instance.http_request('test_get_absolute_path_static_handler', method='GET',
|
||||
headers={'XXX': 'xxx'}).headers['Content-Type']
|
||||
assert '<html><body>Absolute Path File</body></html>\n' == cluster.instance.http_request(
|
||||
assert b'<html><body>Absolute Path File</body></html>\n' == cluster.instance.http_request(
|
||||
'test_get_absolute_path_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
||||
|
||||
|
||||
@ -152,7 +152,7 @@ def test_relative_path_static_handler():
|
||||
assert 'text/html; charset=UTF-8' == \
|
||||
cluster.instance.http_request('test_get_relative_path_static_handler', method='GET',
|
||||
headers={'XXX': 'xxx'}).headers['Content-Type']
|
||||
assert '<html><body>Relative Path File</body></html>\n' == cluster.instance.http_request(
|
||||
assert b'<html><body>Relative Path File</body></html>\n' == cluster.instance.http_request(
|
||||
'test_get_relative_path_static_handler', method='GET', headers={'XXX': 'xxx'}).content
|
||||
|
||||
|
||||
@ -160,19 +160,19 @@ def test_defaults_http_handlers():
|
||||
with contextlib.closing(
|
||||
SimpleCluster(ClickHouseCluster(__file__), "defaults_handlers", "test_defaults_handlers")) as cluster:
|
||||
assert 200 == cluster.instance.http_request('', method='GET').status_code
|
||||
assert 'Default server response' == cluster.instance.http_request('', method='GET').content
|
||||
assert b'Default server response' == cluster.instance.http_request('', method='GET').content
|
||||
|
||||
assert 200 == cluster.instance.http_request('ping', method='GET').status_code
|
||||
assert 'Ok.\n' == cluster.instance.http_request('ping', method='GET').content
|
||||
assert b'Ok.\n' == cluster.instance.http_request('ping', method='GET').content
|
||||
|
||||
assert 200 == cluster.instance.http_request('replicas_status', method='get').status_code
|
||||
assert 'Ok.\n' == cluster.instance.http_request('replicas_status', method='get').content
|
||||
assert b'Ok.\n' == cluster.instance.http_request('replicas_status', method='get').content
|
||||
|
||||
assert 200 == cluster.instance.http_request('replicas_status?verbose=1', method='get').status_code
|
||||
assert '' == cluster.instance.http_request('replicas_status?verbose=1', method='get').content
|
||||
assert b'' == cluster.instance.http_request('replicas_status?verbose=1', method='get').content
|
||||
|
||||
assert 200 == cluster.instance.http_request('?query=SELECT+1', method='GET').status_code
|
||||
assert '1\n' == cluster.instance.http_request('?query=SELECT+1', method='GET').content
|
||||
assert b'1\n' == cluster.instance.http_request('?query=SELECT+1', method='GET').content
|
||||
|
||||
|
||||
def test_prometheus_handler():
|
||||
@ -186,7 +186,7 @@ def test_prometheus_handler():
|
||||
headers={'XXX': 'xxx'}).status_code
|
||||
|
||||
assert 200 == cluster.instance.http_request('test_prometheus', method='GET', headers={'XXX': 'xxx'}).status_code
|
||||
assert 'ClickHouseProfileEvents_Query' in cluster.instance.http_request('test_prometheus', method='GET',
|
||||
assert b'ClickHouseProfileEvents_Query' in cluster.instance.http_request('test_prometheus', method='GET',
|
||||
headers={'XXX': 'xxx'}).content
|
||||
|
||||
|
||||
@ -203,5 +203,5 @@ def test_replicas_status_handler():
|
||||
|
||||
assert 200 == cluster.instance.http_request('test_replicas_status', method='GET',
|
||||
headers={'XXX': 'xxx'}).status_code
|
||||
assert 'Ok.\n' == cluster.instance.http_request('test_replicas_status', method='GET',
|
||||
assert b'Ok.\n' == cluster.instance.http_request('test_replicas_status', method='GET',
|
||||
headers={'XXX': 'xxx'}).content
|
||||
|
@ -75,7 +75,7 @@ def test_replication_after_partition(both_https_cluster):
|
||||
closing_pool = Pool(1)
|
||||
inserting_pool = Pool(5)
|
||||
cres = closing_pool.map_async(close, [random.randint(1, 3) for _ in range(10)])
|
||||
ires = inserting_pool.map_async(insert_data_and_check, range(100))
|
||||
ires = inserting_pool.map_async(insert_data_and_check, list(range(100)))
|
||||
|
||||
cres.wait()
|
||||
ires.wait()
|
||||
|
@ -1,4 +1,3 @@
|
||||
#!/usr/bin/env python2
|
||||
import os
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
@ -119,6 +118,6 @@ def test_async_inserts_into_local_shard(started_cluster):
|
||||
|
||||
if __name__ == '__main__':
|
||||
with contextmanager(started_cluster)() as cluster:
|
||||
for name, instance in cluster.instances.items():
|
||||
print name, instance.ip_address
|
||||
raw_input("Cluster created, press any key to destroy...")
|
||||
for name, instance in list(cluster.instances.items()):
|
||||
print(name, instance.ip_address)
|
||||
input("Cluster created, press any key to destroy...")
|
||||
|
@ -1,4 +1,4 @@
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
@ -7,6 +7,8 @@ from helpers.cluster import ClickHouseCluster
|
||||
from helpers.uclient import client, prompt, end_of_block
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
# log = sys.stdout
|
||||
log = None
|
||||
|
||||
NODES = {'node' + str(i): cluster.add_instance(
|
||||
'node' + str(i),
|
||||
@ -55,7 +57,7 @@ def started_cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("node", NODES.values()[:1])
|
||||
@pytest.mark.parametrize("node", list(NODES.values())[:1])
|
||||
@pytest.mark.parametrize("source", ["lv_over_distributed_table"])
|
||||
class TestLiveViewOverDistributedSuite:
|
||||
def test_select_with_order_by_node(self, started_cluster, node, source):
|
||||
@ -87,7 +89,6 @@ node2\t1\t11
|
||||
== "22\n"
|
||||
|
||||
def test_watch_live_view_order_by_node(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
command = " ".join(node.client.command)
|
||||
args = dict(log=log, command=command)
|
||||
|
||||
@ -130,7 +131,6 @@ node2\t1\t11
|
||||
client1.expect('"node3",3,3,3')
|
||||
|
||||
def test_watch_live_view_order_by_key(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
command = " ".join(node.client.command)
|
||||
args = dict(log=log, command=command)
|
||||
|
||||
@ -173,7 +173,6 @@ node2\t1\t11
|
||||
client1.expect('"node3",3,3,3')
|
||||
|
||||
def test_watch_live_view_group_by_node(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
command = " ".join(node.client.command)
|
||||
args = dict(log=log, command=command)
|
||||
|
||||
@ -208,7 +207,6 @@ node2\t1\t11
|
||||
client1.expect('"node3",3,3')
|
||||
|
||||
def test_watch_live_view_group_by_key(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
command = " ".join(node.client.command)
|
||||
args = dict(log=log, command=command)
|
||||
sep = ' \xe2\x94\x82'
|
||||
@ -245,7 +243,6 @@ node2\t1\t11
|
||||
client1.expect('3,3,3')
|
||||
|
||||
def test_watch_live_view_sum(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
command = " ".join(node.client.command)
|
||||
args = dict(log=log, command=command)
|
||||
|
||||
|
@ -11,7 +11,7 @@ def check_query(clickhouse_node, query, result_set, retry_count=3, interval_seco
|
||||
if result_set == lastest_result:
|
||||
return
|
||||
|
||||
print lastest_result
|
||||
print(lastest_result)
|
||||
time.sleep(interval_seconds)
|
||||
|
||||
assert lastest_result == result_set
|
||||
|
@ -6,7 +6,7 @@ import pymysql.cursors
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster, get_docker_compose_path
|
||||
|
||||
import materialize_with_ddl
|
||||
from . import materialize_with_ddl
|
||||
|
||||
DOCKER_COMPOSE_PATH = get_docker_compose_path()
|
||||
|
||||
@ -50,10 +50,10 @@ class MySQLNodeInstance:
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
self.alloc_connection()
|
||||
print "Mysql Started"
|
||||
print("Mysql Started")
|
||||
return
|
||||
except Exception as ex:
|
||||
print "Can't connect to MySQL " + str(ex)
|
||||
print("Can't connect to MySQL " + str(ex))
|
||||
time.sleep(0.5)
|
||||
|
||||
subprocess.check_call(['docker-compose', 'ps', '--services', 'all'])
|
||||
@ -119,8 +119,8 @@ def test_materialize_database_ddl_with_mysql_5_7(started_cluster, started_mysql_
|
||||
materialize_with_ddl.alter_modify_column_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7,
|
||||
"mysql1")
|
||||
except:
|
||||
print(clickhouse_node.query(
|
||||
"select '\n', thread_id, query_id, arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym from system.stack_trace format TSVRaw"))
|
||||
print((clickhouse_node.query(
|
||||
"select '\n', thread_id, query_id, arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym from system.stack_trace format TSVRaw")))
|
||||
raise
|
||||
|
||||
|
||||
|
@ -44,12 +44,12 @@ def start_small_cluster():
|
||||
|
||||
def test_single_endpoint_connections_count(start_small_cluster):
|
||||
def task(count):
|
||||
print("Inserting ten times from {}".format(count))
|
||||
for i in xrange(count, count + 10):
|
||||
print(("Inserting ten times from {}".format(count)))
|
||||
for i in range(count, count + 10):
|
||||
node1.query("insert into test_table values ('2017-06-16', {}, 0)".format(i))
|
||||
|
||||
p = Pool(10)
|
||||
p.map(task, xrange(0, 100, 10))
|
||||
p.map(task, range(0, 100, 10))
|
||||
|
||||
assert_eq_with_retry(node1, "select count() from test_table", "100")
|
||||
assert_eq_with_retry(node2, "select count() from test_table", "100")
|
||||
@ -97,17 +97,17 @@ def start_big_cluster():
|
||||
|
||||
def test_multiple_endpoint_connections_count(start_big_cluster):
|
||||
def task(count):
|
||||
print("Inserting ten times from {}".format(count))
|
||||
print(("Inserting ten times from {}".format(count)))
|
||||
if (count / 10) % 2 == 1:
|
||||
node = node3
|
||||
else:
|
||||
node = node4
|
||||
|
||||
for i in xrange(count, count + 10):
|
||||
for i in range(count, count + 10):
|
||||
node.query("insert into test_table values ('2017-06-16', {}, 0)".format(i))
|
||||
|
||||
p = Pool(10)
|
||||
p.map(task, xrange(0, 100, 10))
|
||||
p.map(task, range(0, 100, 10))
|
||||
|
||||
assert_eq_with_retry(node3, "select count() from test_table", "100")
|
||||
assert_eq_with_retry(node4, "select count() from test_table", "100")
|
||||
|
@ -68,6 +68,6 @@ def test_select_table_name_from_merge_over_distributed(started_cluster):
|
||||
|
||||
if __name__ == '__main__':
|
||||
with contextmanager(started_cluster)() as cluster:
|
||||
for name, instance in cluster.instances.items():
|
||||
print name, instance.ip_address
|
||||
raw_input("Cluster created, press any key to destroy...")
|
||||
for name, instance in list(cluster.instances.items()):
|
||||
print(name, instance.ip_address)
|
||||
input("Cluster created, press any key to destroy...")
|
||||
|
@ -954,7 +954,7 @@ def test_mutate_to_another_disk(start_cluster, name, engine):
|
||||
if node1.query("SELECT latest_fail_reason FROM system.mutations WHERE table = '{}'".format(name)) == "":
|
||||
assert node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) == "25\n"
|
||||
else: # mutation failed, let's try on another disk
|
||||
print "Mutation failed"
|
||||
print("Mutation failed")
|
||||
node1.query("OPTIMIZE TABLE {} FINAL".format(name))
|
||||
node1.query("ALTER TABLE {} UPDATE s1 = concat(s1, 'x') WHERE 1".format(name))
|
||||
retry = 20
|
||||
@ -1114,7 +1114,7 @@ def test_download_appropriate_disk(start_cluster):
|
||||
|
||||
for _ in range(10):
|
||||
try:
|
||||
print "Syncing replica"
|
||||
print("Syncing replica")
|
||||
node2.query("SYSTEM SYNC REPLICA replicated_table_for_download")
|
||||
break
|
||||
except:
|
||||
|
@ -122,7 +122,7 @@ def test_delete_and_drop_mutation(started_cluster):
|
||||
if int(result.strip()) == 2:
|
||||
break
|
||||
except:
|
||||
print "Result", result
|
||||
print("Result", result)
|
||||
pass
|
||||
|
||||
time.sleep(0.5)
|
||||
|
@ -44,8 +44,8 @@ def test_mutations_with_merge_background_task(started_cluster):
|
||||
all_done = True
|
||||
break
|
||||
|
||||
print instance_test_mutations.query(
|
||||
"SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames")
|
||||
print(instance_test_mutations.query(
|
||||
"SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames"))
|
||||
assert all_done
|
||||
|
||||
|
||||
|
@ -44,7 +44,7 @@ class MySQLNodeInstance:
|
||||
res = "\n".join(rows)
|
||||
return res
|
||||
|
||||
if isinstance(execution_query, (str, bytes, unicode)):
|
||||
if isinstance(execution_query, (str, bytes)):
|
||||
return execute(execution_query)
|
||||
else:
|
||||
return [execute(q) for q in execution_query]
|
||||
@ -256,7 +256,7 @@ def test_mysql_types(started_cluster, case_name, mysql_type, expected_ch_type, m
|
||||
res = node.query(query, **kwargs)
|
||||
return res if isinstance(res, int) else res.rstrip('\n\r')
|
||||
|
||||
if isinstance(query, (str, bytes, unicode)):
|
||||
if isinstance(query, (str, bytes)):
|
||||
return do_execute(query)
|
||||
else:
|
||||
return [do_execute(q) for q in query]
|
||||
|
@ -98,7 +98,7 @@ def test_mysql_client(mysql_client, server_address):
|
||||
-e "SELECT 1;"
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
|
||||
assert stdout == '\n'.join(['1', '1', ''])
|
||||
assert stdout.decode() == '\n'.join(['1', '1', ''])
|
||||
|
||||
code, (stdout, stderr) = mysql_client.exec_run('''
|
||||
mysql --protocol tcp -h {host} -P {port} default -u default --password=123
|
||||
@ -106,13 +106,13 @@ def test_mysql_client(mysql_client, server_address):
|
||||
-e "SELECT 'тест' as b;"
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
|
||||
assert stdout == '\n'.join(['a', '1', 'b', 'тест', ''])
|
||||
assert stdout.decode() == '\n'.join(['a', '1', 'b', 'тест', ''])
|
||||
|
||||
code, (stdout, stderr) = mysql_client.exec_run('''
|
||||
mysql --protocol tcp -h {host} -P {port} default -u default --password=abc -e "select 1 as a;"
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
|
||||
assert stderr == 'mysql: [Warning] Using a password on the command line interface can be insecure.\n' \
|
||||
assert stderr.decode() == 'mysql: [Warning] Using a password on the command line interface can be insecure.\n' \
|
||||
'ERROR 516 (00000): default: Authentication failed: password is incorrect or there is no user with such name\n'
|
||||
|
||||
code, (stdout, stderr) = mysql_client.exec_run('''
|
||||
@ -122,8 +122,8 @@ def test_mysql_client(mysql_client, server_address):
|
||||
-e "use system2;"
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
|
||||
assert stdout == 'count()\n1\n'
|
||||
assert stderr[0:182] == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \
|
||||
assert stdout.decode() == 'count()\n1\n'
|
||||
assert stderr[0:182].decode() == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \
|
||||
"ERROR 81 (00000) at line 1: Code: 81, e.displayText() = DB::Exception: Database system2 doesn't exist"
|
||||
|
||||
code, (stdout, stderr) = mysql_client.exec_run('''
|
||||
@ -140,7 +140,7 @@ def test_mysql_client(mysql_client, server_address):
|
||||
-e "SELECT * FROM tmp ORDER BY tmp_column;"
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
|
||||
assert stdout == '\n'.join(['column', '0', '0', '1', '1', '5', '5', 'tmp_column', '0', '1', ''])
|
||||
assert stdout.decode() == '\n'.join(['column', '0', '0', '1', '1', '5', '5', 'tmp_column', '0', '1', ''])
|
||||
|
||||
|
||||
def test_mysql_client_exception(mysql_client, server_address):
|
||||
@ -150,7 +150,7 @@ def test_mysql_client_exception(mysql_client, server_address):
|
||||
-e "CREATE TABLE default.t1_remote_mysql AS mysql('127.0.0.1:10086','default','t1_local','default','');"
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
|
||||
assert stderr[0:266] == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \
|
||||
assert stderr[0:266].decode() == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \
|
||||
"ERROR 1000 (00000) at line 1: Poco::Exception. Code: 1000, e.code() = 2002, e.displayText() = mysqlxx::ConnectionFailed: Can't connect to MySQL server on '127.0.0.1' (115) ((nullptr):0)"
|
||||
|
||||
|
||||
@ -188,14 +188,14 @@ def test_mysql_replacement_query(mysql_client, server_address):
|
||||
--password=123 -e "select database();"
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
assert code == 0
|
||||
assert stdout == 'database()\ndefault\n'
|
||||
assert stdout.decode() == 'database()\ndefault\n'
|
||||
|
||||
code, (stdout, stderr) = mysql_client.exec_run('''
|
||||
mysql --protocol tcp -h {host} -P {port} default -u default
|
||||
--password=123 -e "select DATABASE();"
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
assert code == 0
|
||||
assert stdout == 'DATABASE()\ndefault\n'
|
||||
assert stdout.decode() == 'DATABASE()\ndefault\n'
|
||||
|
||||
|
||||
def test_mysql_explain(mysql_client, server_address):
|
||||
@ -238,6 +238,7 @@ def test_mysql_federated(mysql_server, server_address):
|
||||
node.query('''INSERT INTO mysql_federated.test VALUES (0), (1), (5)''', settings={"password": "123"})
|
||||
|
||||
def check_retryable_error_in_stderr(stderr):
|
||||
stderr = stderr.decode()
|
||||
return ("Can't connect to local MySQL server through socket" in stderr
|
||||
or "MySQL server has gone away" in stderr
|
||||
or "Server shutdown in progress" in stderr)
|
||||
@ -252,8 +253,8 @@ def test_mysql_federated(mysql_server, server_address):
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
|
||||
if code != 0:
|
||||
print("stdout", stdout)
|
||||
print("stderr", stderr)
|
||||
print(("stdout", stdout))
|
||||
print(("stderr", stderr))
|
||||
if try_num + 1 < retries and check_retryable_error_in_stderr(stderr):
|
||||
time.sleep(1)
|
||||
continue
|
||||
@ -266,14 +267,14 @@ def test_mysql_federated(mysql_server, server_address):
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
|
||||
if code != 0:
|
||||
print("stdout", stdout)
|
||||
print("stderr", stderr)
|
||||
print(("stdout", stdout))
|
||||
print(("stderr", stderr))
|
||||
if try_num + 1 < retries and check_retryable_error_in_stderr(stderr):
|
||||
time.sleep(1)
|
||||
continue
|
||||
assert code == 0
|
||||
|
||||
assert stdout == '\n'.join(['col', '0', '1', '5', ''])
|
||||
assert stdout.decode() == '\n'.join(['col', '0', '1', '5', ''])
|
||||
|
||||
code, (stdout, stderr) = mysql_server.exec_run('''
|
||||
mysql
|
||||
@ -282,14 +283,14 @@ def test_mysql_federated(mysql_server, server_address):
|
||||
'''.format(host=server_address, port=server_port), demux=True)
|
||||
|
||||
if code != 0:
|
||||
print("stdout", stdout)
|
||||
print("stderr", stderr)
|
||||
print(("stdout", stdout))
|
||||
print(("stderr", stderr))
|
||||
if try_num + 1 < retries and check_retryable_error_in_stderr(stderr):
|
||||
time.sleep(1)
|
||||
continue
|
||||
assert code == 0
|
||||
|
||||
assert stdout == '\n'.join(['col', '0', '0', '1', '1', '5', '5', ''])
|
||||
assert stdout.decode() == '\n'.join(['col', '0', '0', '1', '1', '5', '5', ''])
|
||||
|
||||
|
||||
def test_mysql_set_variables(mysql_client, server_address):
|
||||
@ -362,7 +363,7 @@ def test_python_client(server_address):
|
||||
|
||||
def test_golang_client(server_address, golang_container):
|
||||
# type: (str, Container) -> None
|
||||
with open(os.path.join(SCRIPT_DIR, 'golang.reference')) as fp:
|
||||
with open(os.path.join(SCRIPT_DIR, 'golang.reference'), 'rb') as fp:
|
||||
reference = fp.read()
|
||||
|
||||
code, (stdout, stderr) = golang_container.exec_run(
|
||||
@ -370,7 +371,7 @@ def test_golang_client(server_address, golang_container):
|
||||
'abc'.format(host=server_address, port=server_port), demux=True)
|
||||
|
||||
assert code == 1
|
||||
assert stderr == "Error 81: Database abc doesn't exist\n"
|
||||
assert stderr.decode() == "Error 81: Database abc doesn't exist\n"
|
||||
|
||||
code, (stdout, stderr) = golang_container.exec_run(
|
||||
'./main --host {host} --port {port} --user default --password 123 --database '
|
||||
@ -391,31 +392,31 @@ def test_php_client(server_address, php_container):
|
||||
code, (stdout, stderr) = php_container.exec_run(
|
||||
'php -f test.php {host} {port} default 123'.format(host=server_address, port=server_port), demux=True)
|
||||
assert code == 0
|
||||
assert stdout == 'tables\n'
|
||||
assert stdout.decode() == 'tables\n'
|
||||
|
||||
code, (stdout, stderr) = php_container.exec_run(
|
||||
'php -f test_ssl.php {host} {port} default 123'.format(host=server_address, port=server_port), demux=True)
|
||||
assert code == 0
|
||||
assert stdout == 'tables\n'
|
||||
assert stdout.decode() == 'tables\n'
|
||||
|
||||
code, (stdout, stderr) = php_container.exec_run(
|
||||
'php -f test.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port),
|
||||
demux=True)
|
||||
assert code == 0
|
||||
assert stdout == 'tables\n'
|
||||
assert stdout.decode() == 'tables\n'
|
||||
|
||||
code, (stdout, stderr) = php_container.exec_run(
|
||||
'php -f test_ssl.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port),
|
||||
demux=True)
|
||||
assert code == 0
|
||||
assert stdout == 'tables\n'
|
||||
assert stdout.decode() == 'tables\n'
|
||||
|
||||
|
||||
def test_mysqljs_client(server_address, nodejs_container):
|
||||
code, (_, stderr) = nodejs_container.exec_run(
|
||||
'node test.js {host} {port} user_with_sha256 abacaba'.format(host=server_address, port=server_port), demux=True)
|
||||
assert code == 1
|
||||
assert 'MySQL is requesting the sha256_password authentication method, which is not supported.' in stderr
|
||||
assert 'MySQL is requesting the sha256_password authentication method, which is not supported.' in stderr.decode()
|
||||
|
||||
code, (_, stderr) = nodejs_container.exec_run(
|
||||
'node test.js {host} {port} user_with_empty_password ""'.format(host=server_address, port=server_port),
|
||||
@ -449,21 +450,21 @@ def test_java_client(server_address, java_container):
|
||||
'java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database '
|
||||
'default'.format(host=server_address, port=server_port), demux=True)
|
||||
assert code == 0
|
||||
assert stdout == reference
|
||||
assert stdout.decode() == reference
|
||||
|
||||
# non-empty password passed.
|
||||
code, (stdout, stderr) = java_container.exec_run(
|
||||
'java JavaConnectorTest --host {host} --port {port} --user default --password 123 --database '
|
||||
'default'.format(host=server_address, port=server_port), demux=True)
|
||||
assert code == 0
|
||||
assert stdout == reference
|
||||
assert stdout.decode() == reference
|
||||
|
||||
# double-sha1 password passed.
|
||||
code, (stdout, stderr) = java_container.exec_run(
|
||||
'java JavaConnectorTest --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database '
|
||||
'default'.format(host=server_address, port=server_port), demux=True)
|
||||
assert code == 0
|
||||
assert stdout == reference
|
||||
assert stdout.decode() == reference
|
||||
|
||||
|
||||
def test_types(server_address):
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user