#!/usr/bin/python3
import collections
import csv
import itertools
import os
import sys
import traceback
report_errors = []
status = 'success'
message = 'See the report'
message_array = []
error_tests = 0
slow_average_tests = 0
faster_queries = 0
slower_queries = 0
unstable_queries = 0
print("""
ClickHouse performance comparison
""".format())
table_anchor = 0
row_anchor = 0
def nextTableAnchor():
global table_anchor
table_anchor += 1
return str(table_anchor)
def nextRowAnchor():
global row_anchor
global table_anchor
row_anchor += 1
return str(table_anchor) + "." + str(row_anchor)
def tr(x):
a = nextRowAnchor()
#return '
{x}
'.format(a=a, x=str(x))
return '
{x}
'.format(a=a, x=str(x))
def td(value, cell_attributes = ''):
return '
{value} | '.format(
cell_attributes = cell_attributes,
value = value)
def th(x):
return '
' + str(x) + ' | '
def tableRow(cell_values, cell_attributes = []):
return tr(''.join([td(v, a)
for v, a in itertools.zip_longest(
cell_values, cell_attributes,
fillvalue = '')]))
def tableHeader(r):
return tr(''.join([th(f) for f in r]))
def tableStart(title):
return """
""".format(
anchor = nextTableAnchor(),
title = title)
def tableEnd():
return '
'
def tsvRows(n):
result = []
try:
with open(n, encoding='utf-8') as fd:
return [row for row in csv.reader(fd, delimiter="\t", quotechar='"')]
except:
report_errors.append(
traceback.format_exception_only(
*sys.exc_info()[:2])[-1])
pass
return []
def htmlRows(n):
rawRows = tsvRows(n)
result = ''
for row in rawRows:
result += tableRow(row)
return result
def printSimpleTable(caption, columns, rows):
if not rows:
return
print(tableStart(caption))
print(tableHeader(columns))
for row in rows:
print(tableRow(row))
print(tableEnd())
printSimpleTable('Tested commits', ['Old', 'New'],
[[open('left-commit.txt').read(), open('right-commit.txt').read()]])
def print_changes():
rows = tsvRows('changed-perf.tsv')
if not rows:
return
global faster_queries, slower_queries
print(tableStart('Changes in performance'))
columns = [
'Old, s', # 0
'New, s', # 1
'Relative difference (new - old)/old', # 2
'Randomization distribution quantiles \
[5%, 50%, 95%, 99%]', # 3
'Test', # 4
'Query', # 5
]
print(tableHeader(columns))
attrs = ['' for c in columns]
for row in rows:
if float(row[2]) < 0.:
faster_queries += 1
attrs[2] = 'style="background: #adbdff"'
else:
slower_queries += 1
attrs[2] = 'style="background: #ffb0a0"'
print(tableRow(row, attrs))
print(tableEnd())
print_changes()
slow_on_client_rows = tsvRows('slow-on-client.tsv')
error_tests += len(slow_on_client_rows)
printSimpleTable('Slow on client',
['Client time, s', 'Server time, s', 'Ratio', 'Query'],
slow_on_client_rows)
unstable_rows = tsvRows('unstable-queries.tsv')
unstable_queries += len(unstable_rows)
printSimpleTable('Unstable queries',
[
'Old, s', 'New, s', 'Relative difference (new - old)/old',
'Randomization distribution quantiles [5%, 50%, 95%, 99%]',
'Test', 'Query'
],
unstable_rows)
run_error_rows = tsvRows('run-errors.tsv')
error_tests += len(run_error_rows)
printSimpleTable('Run errors', ['Test', 'Error'], run_error_rows)
printSimpleTable('Tests with most unstable queries',
['Test', 'Unstable', 'Changed perf', 'Total not OK'],
tsvRows('bad-tests.tsv'))
def print_test_times():
global slow_average_tests
rows = tsvRows('test-times.tsv')
if not rows:
return
columns = [
'Test', #0
'Wall clock time, s', #1
'Total client time, s', #2
'Total queries', #3
'Ignored short queries', #4
'Longest query
(sum for all runs), s', #5
'Avg wall clock time
(sum for all runs), s', #6
'Shortest query
(sum for all runs), s', #7
]
print(tableStart('Test times'))
print(tableHeader(columns))
attrs = ['' for c in columns]
for r in rows:
if float(r[6]) > 30:
slow_average_tests += 1
attrs[6] = 'style="background: #ffb0a0"'
else:
attrs[6] = ''
print(tableRow(r, attrs))
print(tableEnd())
print_test_times()
if len(report_errors):
print(tableStart('Errors while building the report'))
print(tableHeader(['Error']))
for x in report_errors:
print(tableRow([x]))
print(tableEnd())
print("""
Test output