Merge branch 'master' into storages-small-improvements

This commit is contained in:
Alexey Milovidov 2020-09-18 22:03:19 +03:00
commit 21ca9fce63
69 changed files with 1718 additions and 779 deletions

2
.gitmodules vendored
View File

@ -37,7 +37,7 @@
url = https://github.com/ClickHouse-Extras/mariadb-connector-c.git
[submodule "contrib/jemalloc"]
path = contrib/jemalloc
url = https://github.com/jemalloc/jemalloc.git
url = https://github.com/ClickHouse-Extras/jemalloc.git
[submodule "contrib/unixodbc"]
path = contrib/unixodbc
url = https://github.com/ClickHouse-Extras/UnixODBC.git

View File

@ -18,6 +18,7 @@ set (SRCS
terminalColors.cpp
errnoToString.cpp
getResource.cpp
StringRef.cpp
)
if (ENABLE_REPLXX)

13
base/common/StringRef.cpp Normal file
View File

@ -0,0 +1,13 @@
#include <ostream>
#include "StringRef.h"
std::ostream & operator<<(std::ostream & os, const StringRef & str)
{
if (str.data)
os.write(str.data, str.size);
return os;
}

View File

@ -4,7 +4,7 @@
#include <string>
#include <vector>
#include <functional>
#include <ostream>
#include <iosfwd>
#include <common/types.h>
#include <common/unaligned.h>
@ -322,10 +322,4 @@ inline bool operator==(StringRef lhs, const char * rhs)
return true;
}
inline std::ostream & operator<<(std::ostream & os, const StringRef & str)
{
if (str.data)
os.write(str.data, str.size);
return os;
}
std::ostream & operator<<(std::ostream & os, const StringRef & str);

View File

@ -54,8 +54,8 @@ template <size_t Bits, typename Signed>
class integer
{
public:
using base_type = uint8_t;
using signed_base_type = int8_t;
using base_type = uint64_t;
using signed_base_type = int64_t;
// ctors
integer() = default;
@ -127,7 +127,7 @@ private:
friend class std::numeric_limits<integer<Bits, signed>>;
friend class std::numeric_limits<integer<Bits, unsigned>>;
base_type m_arr[_impl::arr_size];
base_type items[_impl::item_count];
};
template <typename T>

File diff suppressed because it is too large Load Diff

View File

@ -53,6 +53,7 @@ SRCS(
setTerminalEcho.cpp
shift10.cpp
sleep.cpp
StringRef.cpp
terminalColors.cpp
)

2
contrib/jemalloc vendored

@ -1 +1 @@
Subproject commit ea6b3e973b477b8061e0076bb257dbd7f3faa756
Subproject commit 026764f19995c53583ab25a3b9c06a2fd74e4689

View File

@ -20,7 +20,7 @@ rm -f CMakeCache.txt
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSANITIZE=$SANITIZER $CMAKE_FLAGS ..
ninja $NINJA_FLAGS clickhouse-bundle
mv ./programs/clickhouse* /output
mv ./src/unit_tests_dbms /output
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
find . -name '*.so' -print -exec mv '{}' /output \;
find . -name '*.so.*' -print -exec mv '{}' /output \;

View File

@ -105,6 +105,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
# Create combined output archive for split build and for performance tests.
if package_type == "performance":
result.append("COMBINED_OUTPUT=performance")
cmake_flags.append("-DENABLE_TESTS=0")
elif split_binary:
result.append("COMBINED_OUTPUT=shared_build")

View File

@ -16,7 +16,7 @@ We also consider the test to be unstable, if the observed difference is less tha
performance differences above 5% more often than in 5% runs, so the test is likely
to have false positives.
### How to read the report
### How to Read the Report
The check status summarizes the report in a short text message like `1 faster, 10 unstable`:
* `1 faster` -- how many queries became faster,
@ -27,28 +27,50 @@ The check status summarizes the report in a short text message like `1 faster, 1
The report page itself constists of a several tables. Some of them always signify errors, e.g. "Run errors" -- the very presence of this table indicates that there were errors during the test, that are not normal and must be fixed. Some tables are mostly informational, e.g. "Test times" -- they reflect normal test results. But if a cell in such table is marked in red, this also means an error, e.g., a test is taking too long to run.
#### Tested commits
#### Tested Commits
Informational, no action required. Log messages for the commits that are tested. Note that for the right commit, we show nominal tested commit `pull/*/head` and real tested commit `pull/*/merge`, which is generated by GitHub by merging latest master to the `pull/*/head` and which we actually build and test in CI.
#### Run errors
Action required for every item -- these are errors that must be fixed. The errors that ocurred when running some test queries. For more information about the error, download test output archive and see `test-name-err.log`. To reproduce, see 'How to run' below.
#### Error Summary
Action required for every item.
#### Slow on client
Action required for every item -- these are errors that must be fixed. This table shows queries that take significantly longer to process on the client than on the server. A possible reason might be sending too much data to the client, e.g., a forgotten `format Null`.
This table summarizes all errors that ocurred during the test. Click the links to go to the description of a particular error.
#### Short queries not marked as short
Action required for every item -- these are errors that must be fixed. This table shows queries that are "short" but not explicitly marked as such. "Short" queries are too fast to meaningfully compare performance, because the changes are drowned by the noise. We consider all queries that run faster than 0.02 s to be "short", and only check the performance if they became slower than this threshold. Probably this mode is not what you want, so you have to increase the query run time to be between 1 and 0.1 s, so that the performance can be compared. You do want this "short" mode for queries that complete "immediately", such as some varieties of `select count(*)`. You have to mark them as "short" explicitly by writing `<query short="1">...`. The value of "short" attribute is evaluated as a python expression, and substitutions are performed, so you can write something like `<query short="{column1} = {column2}">select count(*) from table where {column1} > {column2}</query>`, to mark only a particular combination of variables as short.
#### Run Errors
Action required for every item -- these are errors that must be fixed.
#### Partial queries
Action required for the cells marked in red. Shows the queries we are unable to run on an old server -- probably because they contain a new function. You should see this table when you add a new function and a performance test for it. Check that the run time and variance are acceptable (run time between 0.1 and 1 seconds, variance below 10%). If not, they will be highlighted in red.
The errors that ocurred when running some test queries. For more information about the error, download test output archive and see `test-name-err.log`. To reproduce, see 'How to run' below.
#### Changes in performance
Action required for the cells marked in red, and some cheering is appropriate for the cells marked in green. These are the queries for which we observe a statistically significant change in performance. Note that there will always be some false positives -- we try to filter by p < 0.001, and have 2000 queries, so two false positives per run are expected. In practice we have more -- e.g. code layout changed because of some unknowable jitter in compiler internals, so the change we observe is real, but it is a 'false positive' in the sense that it is not directly caused by your changes. If, based on your knowledge of ClickHouse internals, you can decide that the observed test changes are not relevant to the changes made in the tested PR, you can ignore them.
#### Slow on Client
Action required for every item -- these are errors that must be fixed.
This table shows queries that take significantly longer to process on the client than on the server. A possible reason might be sending too much data to the client, e.g., a forgotten `format Null`.
#### Inconsistent Short Marking
Action required for every item -- these are errors that must be fixed.
Queries that have "short" duration (on the order of 0.1 s) can't be reliably tested in a normal way, where we perform a small (about ten) measurements for each server, because the signal-to-noise ratio is much smaller. There is a special mode for such queries that instead runs them for a fixed amount of time, normally with much higher number of measurements (up to thousands). This mode must be explicitly enabled by the test author to avoid accidental errors. It must be used only for queries that are meant to complete "immediately", such as `select count(*)`. If your query is not supposed to be "immediate", try to make it run longer, by e.g. processing more data.
This table shows queries for which the "short" marking is not consistent with the actual query run time -- i.e., a query runs for a long time but is marked as short, or it runs very fast but is not marked as short.
If your query is really supposed to complete "immediately" and can't be made to run longer, you have to mark it as "short". To do so, write `<query short="1">...` in the test file. The value of "short" attribute is evaluated as a python expression, and substitutions are performed, so you can write something like `<query short="{column1} = {column2}">select count(*) from table where {column1} > {column2}</query>`, to mark only a particular combination of variables as short.
#### Partial Queries
Action required for the cells marked in red.
Shows the queries we are unable to run on an old server -- probably because they contain a new function. You should see this table when you add a new function and a performance test for it. Check that the run time and variance are acceptable (run time between 0.1 and 1 seconds, variance below 10%). If not, they will be highlighted in red.
#### Changes in Performance
Action required for the cells marked in red, and some cheering is appropriate for the cells marked in green.
These are the queries for which we observe a statistically significant change in performance. Note that there will always be some false positives -- we try to filter by p < 0.001, and have 2000 queries, so two false positives per run are expected. In practice we have more -- e.g. code layout changed because of some unknowable jitter in compiler internals, so the change we observe is real, but it is a 'false positive' in the sense that it is not directly caused by your changes. If, based on your knowledge of ClickHouse internals, you can decide that the observed test changes are not relevant to the changes made in the tested PR, you can ignore them.
You can find flame graphs for queries with performance changes in the test output archive, in files named as 'my_test_0_Cpu_SELECT 1 FROM....FORMAT Null.left.svg'. First goes the test name, then the query number in the test, then the trace type (same as in `system.trace_log`), and then the server version (left is old and right is new).
#### Unstable queries
Action required for the cells marked in red. These are queries for which we did not observe a statistically significant change in performance, but for which the variance in query performance is very high. This means that we are likely to observe big changes in performance even in the absence of real changes, e.g. when comparing the server to itself. Such queries are going to have bad sensitivity as performance tests -- if a query has, say, 50% expected variability, this means we are going to see changes in performance up to 50%, even when there were no real changes in the code. And because of this, we won't be able to detect changes less than 50% with such a query, which is pretty bad. The reasons for the high variability must be investigated and fixed; ideally, the variability should be brought under 5-10%.
#### Unstable Queries
Action required for the cells marked in red.
These are the queries for which we did not observe a statistically significant change in performance, but for which the variance in query performance is very high. This means that we are likely to observe big changes in performance even in the absence of real changes, e.g. when comparing the server to itself. Such queries are going to have bad sensitivity as performance tests -- if a query has, say, 50% expected variability, this means we are going to see changes in performance up to 50%, even when there were no real changes in the code. And because of this, we won't be able to detect changes less than 50% with such a query, which is pretty bad. The reasons for the high variability must be investigated and fixed; ideally, the variability should be brought under 5-10%.
The most frequent reason for instability is that the query is just too short -- e.g. below 0.1 seconds. Bringing query time to 0.2 seconds or above usually helps.
Other reasons may include:
@ -57,24 +79,33 @@ Other reasons may include:
Investigating the instablility is the hardest problem in performance testing, and we still have not been able to understand the reasons behind the instability of some queries. There are some data that can help you in the performance test output archive. Look for files named 'my_unstable_test_0_SELECT 1...FORMAT Null.{left,right}.metrics.rep'. They contain metrics from `system.query_log.ProfileEvents` and functions from stack traces from `system.trace_log`, that vary significantly between query runs. The second column is array of \[min, med, max] values for the metric. Say, if you see `PerfCacheMisses` there, it may mean that the code being tested has not-so-cache-local memory access pattern that is sensitive to memory layout.
#### Skipped tests
Informational, no action required. Shows the tests that were skipped, and the reason for it. Normally it is because the data set required for the test was not loaded, or the test is marked as 'long' -- both cases mean that the test is too big to be ran per-commit.
#### Skipped Tests
Informational, no action required.
#### Test performance changes
Informational, no action required. This table summarizes the changes in performance of queries in each test -- how many queries have changed, how many are unstable, and what is the magnitude of the changes.
Shows the tests that were skipped, and the reason for it. Normally it is because the data set required for the test was not loaded, or the test is marked as 'long' -- both cases mean that the test is too big to be ran per-commit.
#### Test times
Action required for the cells marked in red. This table shows the run times for all the tests. You may have to fix two kinds of errors in this table:
#### Test Performance Changes
Informational, no action required.
This table summarizes the changes in performance of queries in each test -- how many queries have changed, how many are unstable, and what is the magnitude of the changes.
#### Test Times
Action required for the cells marked in red.
This table shows the run times for all the tests. You may have to fix two kinds of errors in this table:
1) Average query run time is too long -- probalby means that the preparatory steps such as creating the table and filling them with data are taking too long. Try to make them faster.
2) Longest query run time is too long -- some particular queries are taking too long, try to make them faster. The ideal query run time is between 0.1 and 1 s.
#### Concurrent benchmarks
No action required. This table shows the results of a concurrent behcmark where queries from `website` are ran in parallel using `clickhouse-benchmark`, and requests per second values are compared for old and new servers. It shows variability up to 20% for no apparent reason, so it's probably safe to disregard it. We have it for special cases like investigating concurrency effects in memory allocators, where it may be important.
#### Metric Changes
No action required.
#### Metric changes
No action required. These are changes in median values of metrics from `system.asynchronous_metrics_log`. Again, they are prone to unexplained variation and you can safely ignore this table unless it's interesting to you for some particular reason (e.g. you want to compare memory usage). There are also graphs of these metrics in the performance test output archive, in the `metrics` folder.
These are changes in median values of metrics from `system.asynchronous_metrics_log`. These metrics are prone to unexplained variation and you can safely ignore this table unless it's interesting to you for some particular reason (e.g. you want to compare memory usage). There are also graphs of these metrics in the performance test output archive, in the `metrics` folder.
### How to run
#### Errors while Building the Report
Ask a maintainer for help. These errors normally indicate a problem with testing infrastructure.
### How to Run
Run the entire docker container, specifying PR number (0 for master)
and SHA of the commit to test. The reference revision is determined as a nearest
ancestor testing release tag. It is possible to specify the reference revision and

View File

@ -121,7 +121,7 @@ function run_tests
then
# Use the explicitly set path to directory with test files.
test_prefix="$CHPC_TEST_PATH"
elif [ "$PR_TO_TEST" = "0" ]
elif [ "$PR_TO_TEST" == "0" ]
then
# When testing commits from master, use the older test files. This
# allows the tests to pass even when we add new functions and tests for
@ -155,6 +155,20 @@ function run_tests
test_files=$(ls "$test_prefix"/*.xml)
fi
# For PRs, test only a subset of queries, and run them less times.
# If the corresponding environment variables are already set, keep
# those values.
if [ "$PR_TO_TEST" == "0" ]
then
CHPC_RUNS=${CHPC_RUNS:-13}
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-0}
else
CHPC_RUNS=${CHPC_RUNS:-7}
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-20}
fi
export CHPC_RUNS
export CHPC_MAX_QUERIES
# Determine which concurrent benchmarks to run. For now, the only test
# we run as a concurrent benchmark is 'website'. Run it as benchmark if we
# are also going to run it as a normal test.
@ -184,11 +198,13 @@ function run_tests
echo test "$test_name"
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
# the grep is to filter out set -x output and keep only time output
# The grep is to filter out set -x output and keep only time output.
# The '2>&1 >/dev/null' redirects stderr to stdout, and discards stdout.
{ \
time "$script_dir/perf.py" --host localhost localhost --port 9001 9002 \
--runs "$CHPC_RUNS" --max-queries "$CHPC_MAX_QUERIES" \
-- "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; \
} 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" \
} 2>&1 >/dev/null | tee >(grep -v ^+ >> "wall-clock-times.tsv") \
|| echo "Test $test_name failed with error code $?" >> "$test_name-err.log"
done
@ -197,33 +213,9 @@ function run_tests
wait
}
# Run some queries concurrently and report the resulting TPS. This additional
# (relatively) short test helps detect concurrency-related effects, because the
# main performance comparison testing is done query-by-query.
function run_benchmark
{
rm -rf benchmark ||:
mkdir benchmark ||:
# The list is built by run_tests.
while IFS= read -r file
do
name=$(basename "$file" ".xml")
"$script_dir/perf.py" --print-queries "$file" > "benchmark/$name-queries.txt"
"$script_dir/perf.py" --print-settings "$file" > "benchmark/$name-settings.txt"
readarray -t settings < "benchmark/$name-settings.txt"
command=(clickhouse-benchmark --concurrency 6 --cumulative --iterations 1000 --randomize 1 --delay 0 --continue_on_errors "${settings[@]}")
"${command[@]}" --port 9001 --json "benchmark/$name-left.json" < "benchmark/$name-queries.txt"
"${command[@]}" --port 9002 --json "benchmark/$name-right.json" < "benchmark/$name-queries.txt"
done < benchmarks-to-run.txt
}
function get_profiles_watchdog
{
sleep 6000
sleep 600
echo "The trace collection did not finish in time." >> profile-errors.log
@ -490,8 +482,6 @@ build_log_column_definitions
cat analyze/errors.log >> report/errors.log ||:
cat profile-errors.log >> report/errors.log ||:
short_query_threshold="0.02"
clickhouse-local --query "
create view query_display_names as select * from
file('analyze/query-display-names.tsv', TSV,
@ -524,18 +514,11 @@ create view query_metric_stats as
-- Main statistics for queries -- query time as reported in query log.
create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
as select
-- Comparison mode doesn't make sense for queries that complete
-- immediately (on the same order of time as noise). If query duration is
-- less that some threshold, we just skip it. If there is a significant
-- regression in such query, the time will exceed the threshold, and we
-- well process it normally and detect the regression.
right < $short_query_threshold as short,
abs(diff) > report_threshold and abs(diff) > stat_threshold as changed_fail,
abs(diff) > report_threshold - 0.05 and abs(diff) > stat_threshold as changed_show,
not short and abs(diff) > report_threshold and abs(diff) > stat_threshold as changed_fail,
not short and abs(diff) > report_threshold - 0.05 and abs(diff) > stat_threshold as changed_show,
not short and not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail,
not short and not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show,
not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail,
not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show,
left, right, diff, stat_threshold,
if(report_threshold > 0, report_threshold, 0.10) as report_threshold,
@ -640,9 +623,9 @@ create table wall_clock_time_per_test engine Memory as select *
create table test_time engine Memory as
select test, sum(client) total_client_time,
maxIf(client, not short) query_max,
minIf(client, not short) query_min,
count(*) queries, sum(short) short_queries
max(client) query_max,
min(client) query_min,
count(*) queries
from total_client_time_per_query full join queries using (test, query_index)
group by test;
@ -650,7 +633,6 @@ create table test_times_report engine File(TSV, 'report/test-times.tsv') as
select wall_clock_time_per_test.test, real,
toDecimal64(total_client_time, 3),
queries,
short_queries,
toDecimal64(query_max, 3),
toDecimal64(real / queries, 3) avg_real_per_query,
toDecimal64(query_min, 3)
@ -685,32 +667,47 @@ create table queries_for_flamegraph engine File(TSVWithNamesAndTypes,
select test, query_index from queries where unstable_show or changed_show
;
-- List of queries that have 'short' duration, but are not marked as 'short' by
-- the test author (we report them).
create table unmarked_short_queries_report
engine File(TSV, 'report/unmarked-short-queries.tsv')
as select time, test, query_index, query_display_name
create view shortness
as select
(test, query_index) in
(select * from file('analyze/marked-short-queries.tsv', TSV,
'test text, query_index int'))
as marked_short,
time, test, query_index, query_display_name
from (
select right time, test, query_index from queries where short
select right time, test, query_index from queries
union all
select time_median, test, query_index from partial_query_times
where time_median < $short_query_threshold
) times
left join query_display_names
on times.test = query_display_names.test
and times.query_index = query_display_names.query_index
where (test, query_index) not in
(select * from file('analyze/marked-short-queries.tsv', TSV,
'test text, query_index int'))
order by test, query_index
;
-- Report of queries that have inconsistent 'short' markings:
-- 1) have short duration, but are not marked as 'short'
-- 2) the reverse -- marked 'short' but take too long.
-- The threshold for 2) is twice the threshold for 1), to avoid jitter.
create table inconsistent_short_marking_report
engine File(TSV, 'report/inconsistent-short-marking.tsv')
as select
multiIf(marked_short and time > 0.1, 'marked as short but is too long',
not marked_short and time < 0.02, 'is short but not marked as such',
'') problem,
marked_short, time,
test, query_index, query_display_name
from shortness
where problem != ''
;
--------------------------------------------------------------------------------
-- various compatibility data formats follow, not related to the main report
-- keep the table in old format so that we can analyze new and old data together
create table queries_old_format engine File(TSVWithNamesAndTypes, 'queries.rep')
as select short, changed_fail, unstable_fail, left, right, diff,
as select 0 short, changed_fail, unstable_fail, left, right, diff,
stat_threshold, test, query_display_name query
from queries
;
@ -1008,9 +1005,6 @@ case "$stage" in
# Ignore the errors to collect the log and build at least some report, anyway
time run_tests ||:
;&
"run_benchmark")
time run_benchmark 2> >(tee -a run-errors.tsv 1>&2) ||:
;&
"get_profiles")
# Check for huge pages.
cat /sys/kernel/mm/transparent_hugepage/enabled > thp-enabled.txt ||:

View File

@ -1,16 +1,20 @@
#!/usr/bin/python3
import os
import sys
import itertools
import clickhouse_driver
import xml.etree.ElementTree as et
import argparse
import clickhouse_driver
import itertools
import functools
import math
import os
import pprint
import random
import re
import statistics
import string
import sys
import time
import traceback
import xml.etree.ElementTree as et
def tsv_escape(s):
return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','')
@ -20,7 +24,8 @@ parser = argparse.ArgumentParser(description='Run performance test.')
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.")
parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.")
parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.')
parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.')
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.')
@ -62,18 +67,13 @@ def substitute_parameters(query_templates, other_templates = []):
# Build a list of test queries, substituting parameters to query templates,
# and reporting the queries marked as short.
test_queries = []
is_short = []
for e in root.findall('query'):
new_queries = []
if 'short' in e.attrib:
new_queries, [is_short] = substitute_parameters([e.text], [[e.attrib['short']]])
for i, s in enumerate(is_short):
# Don't print this if we only need to print the queries.
if eval(s) and not args.print_queries:
print(f'short\t{i + len(test_queries)}')
else:
new_queries = substitute_parameters([e.text])
new_queries, [new_is_short] = substitute_parameters([e.text], [[e.attrib.get('short', '0')]])
test_queries += new_queries
is_short += [eval(s) for s in new_is_short]
assert(len(test_queries) == len(is_short))
# If we're only asked to print the queries, do that and exit
@ -82,6 +82,11 @@ if args.print_queries:
print(q)
exit(0)
# Print short queries
for i, s in enumerate(is_short):
if s:
print(f'short\t{i}')
# If we're only asked to print the settings, do that and exit. These are settings
# for clickhouse-benchmark, so we print them as command line arguments, e.g.
# '--max_memory_usage=10000000'.
@ -116,7 +121,7 @@ if 'max_ignored_relative_change' in root.attrib:
# Open connections
servers = [{'host': host, 'port': port} for (host, port) in zip(args.host, args.port)]
connections = [clickhouse_driver.Client(**server) for server in servers]
all_connections = [clickhouse_driver.Client(**server) for server in servers]
for s in servers:
print('server\t{}\t{}'.format(s['host'], s['port']))
@ -126,7 +131,7 @@ for s in servers:
# connection loses the changes in settings.
drop_query_templates = [q.text for q in root.findall('drop_query')]
drop_queries = substitute_parameters(drop_query_templates)
for conn_index, c in enumerate(connections):
for conn_index, c in enumerate(all_connections):
for q in drop_queries:
try:
c.execute(q)
@ -142,7 +147,7 @@ for conn_index, c in enumerate(connections):
# configurable). So the end result is uncertain, but hopefully we'll be able to
# run at least some queries.
settings = root.findall('settings/*')
for conn_index, c in enumerate(connections):
for conn_index, c in enumerate(all_connections):
for s in settings:
try:
q = f"set {s.tag} = '{s.text}'"
@ -154,7 +159,7 @@ for conn_index, c in enumerate(connections):
# Check tables that should exist. If they don't exist, just skip this test.
tables = [e.text for e in root.findall('preconditions/table_exists')]
for t in tables:
for c in connections:
for c in all_connections:
try:
res = c.execute("select 1 from {} limit 1".format(t))
except:
@ -176,7 +181,7 @@ for q in create_queries:
file = sys.stderr)
sys.exit(1)
for conn_index, c in enumerate(connections):
for conn_index, c in enumerate(all_connections):
for q in create_queries:
c.execute(q)
print(f'create\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
@ -184,13 +189,19 @@ for conn_index, c in enumerate(connections):
# Run fill queries
fill_query_templates = [q.text for q in root.findall('fill_query')]
fill_queries = substitute_parameters(fill_query_templates)
for conn_index, c in enumerate(connections):
for conn_index, c in enumerate(all_connections):
for q in fill_queries:
c.execute(q)
print(f'fill\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
# Run the queries in randomized order, but preserve their indexes as specified
# in the test XML. To avoid using too much time, limit the number of queries
# we run per test.
queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries or len(test_queries)))
# Run test queries.
for query_index, q in enumerate(test_queries):
for query_index in queries_to_run:
q = test_queries[query_index]
query_prefix = f'{test_name}.query{query_index}'
# We have some crazy long queries (about 100kB), so trim them to a sane
@ -208,8 +219,8 @@ for query_index, q in enumerate(test_queries):
# new one. We want to run them on the new server only, so that the PR author
# can ensure that the test works properly. Remember the errors we had on
# each server.
query_error_on_connection = [None] * len(connections);
for conn_index, c in enumerate(connections):
query_error_on_connection = [None] * len(all_connections);
for conn_index, c in enumerate(all_connections):
try:
prewarm_id = f'{query_prefix}.prewarm0'
res = c.execute(q, query_id = prewarm_id)
@ -236,21 +247,22 @@ for query_index, q in enumerate(test_queries):
if len(no_errors) == 0:
continue
elif len(no_errors) < len(connections):
elif len(no_errors) < len(all_connections):
print(f'partial\t{query_index}\t{no_errors}')
this_query_connections = [all_connections[index] for index in no_errors]
# Now, perform measured runs.
# Track the time spent by the client to process this query, so that we can
# notice the queries that take long to process on the client side, e.g. by
# sending excessive data.
start_seconds = time.perf_counter()
server_seconds = 0
for run in range(0, args.runs):
run = 0
while True:
run_id = f'{query_prefix}.run{run}'
for conn_index, c in enumerate(connections):
if query_error_on_connection[conn_index]:
continue
for conn_index, c in enumerate(this_query_connections):
try:
res = c.execute(q, query_id = run_id)
except Exception as e:
@ -259,8 +271,8 @@ for query_index, q in enumerate(test_queries):
e.message = run_id + ': ' + e.message
raise
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}')
server_seconds += c.last_query.elapsed
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}')
if c.last_query.elapsed > 10:
# Stop processing pathologically slow queries, to avoid timing out
@ -269,12 +281,37 @@ for query_index, q in enumerate(test_queries):
print(f'The query no. {query_index} is taking too long to run ({c.last_query.elapsed} s)', file=sys.stderr)
exit(2)
# Be careful with the counter, after this line it's the next iteration
# already.
run += 1
# Try to run any query for at least the specified number of times,
# before considering other stop conditions.
if run < args.runs:
continue
# For very short queries we have a special mode where we run them for at
# least some time. The recommended lower bound of run time for "normal"
# queries is about 0.1 s, and we run them about 10 times, giving the
# time per query per server of about one second. Use this value as a
# reference for "short" queries.
if is_short[query_index]:
if server_seconds >= 2 * len(this_query_connections):
break
# Also limit the number of runs, so that we don't go crazy processing
# the results -- 'eqmed.sql' is really suboptimal.
if run >= 500:
break
else:
if run >= args.runs:
break
client_seconds = time.perf_counter() - start_seconds
print(f'client-time\t{query_index}\t{client_seconds}\t{server_seconds}')
# Run drop queries
drop_queries = substitute_parameters(drop_query_templates)
for conn_index, c in enumerate(connections):
for conn_index, c in enumerate(all_connections):
for q in drop_queries:
c.execute(q)
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')

View File

@ -98,6 +98,9 @@ th {{
tr:nth-child(odd) td {{filter: brightness(90%);}}
.inconsistent-short-marking tr :nth-child(2),
.inconsistent-short-marking tr :nth-child(3),
.inconsistent-short-marking tr :nth-child(5),
.all-query-times tr :nth-child(1),
.all-query-times tr :nth-child(2),
.all-query-times tr :nth-child(3),
@ -126,7 +129,6 @@ tr:nth-child(odd) td {{filter: brightness(90%);}}
.test-times tr :nth-child(5),
.test-times tr :nth-child(6),
.test-times tr :nth-child(7),
.test-times tr :nth-child(8),
.concurrent-benchmarks tr :nth-child(2),
.concurrent-benchmarks tr :nth-child(3),
.concurrent-benchmarks tr :nth-child(4),
@ -205,9 +207,11 @@ def tableStart(title):
global table_anchor
table_anchor = cls
anchor = currentTableAnchor()
help_anchor = '-'.join(title.lower().split(' '));
return f"""
<h2 id="{anchor}">
<a class="cancela" href="#{anchor}">{title}</a>
<a class="cancela" href="https://github.com/ClickHouse/ClickHouse/tree/master/docker/test/performance-comparison#{help_anchor}"><sup style="color: #888">?</sup></a>
</h2>
<table class="{cls}">
"""
@ -250,7 +254,7 @@ def addSimpleTable(caption, columns, rows, pos=None):
def add_tested_commits():
global report_errors
try:
addSimpleTable('Tested commits', ['Old', 'New'],
addSimpleTable('Tested Commits', ['Old', 'New'],
[['<pre>{}</pre>'.format(x) for x in
[open('left-commit.txt').read(),
open('right-commit.txt').read()]]])
@ -276,7 +280,7 @@ def add_report_errors():
if not report_errors:
return
text = tableStart('Errors while building the report')
text = tableStart('Errors while Building the Report')
text += tableHeader(['Error'])
for x in report_errors:
text += tableRow([x])
@ -290,7 +294,7 @@ def add_errors_explained():
return
text = '<a name="fail1"/>'
text += tableStart('Error summary')
text += tableStart('Error Summary')
text += tableHeader(['Description'])
for row in errors_explained:
text += tableRow(row)
@ -308,26 +312,26 @@ if args.report == 'main':
run_error_rows = tsvRows('run-errors.tsv')
error_tests += len(run_error_rows)
addSimpleTable('Run errors', ['Test', 'Error'], run_error_rows)
addSimpleTable('Run Errors', ['Test', 'Error'], run_error_rows)
if run_error_rows:
errors_explained.append([f'<a href="#{currentTableAnchor()}">There were some errors while running the tests</a>']);
slow_on_client_rows = tsvRows('report/slow-on-client.tsv')
error_tests += len(slow_on_client_rows)
addSimpleTable('Slow on client',
addSimpleTable('Slow on Client',
['Client time,&nbsp;s', 'Server time,&nbsp;s', 'Ratio', 'Test', 'Query'],
slow_on_client_rows)
if slow_on_client_rows:
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries are taking noticeable time client-side (missing `FORMAT Null`?)</a>']);
unmarked_short_rows = tsvRows('report/unmarked-short-queries.tsv')
unmarked_short_rows = tsvRows('report/inconsistent-short-marking.tsv')
error_tests += len(unmarked_short_rows)
addSimpleTable('Short queries not marked as short',
['New client time, s', 'Test', '#', 'Query'],
addSimpleTable('Inconsistent Short Marking',
['Problem', 'Is marked as short', 'New client time, s', 'Test', '#', 'Query'],
unmarked_short_rows)
if unmarked_short_rows:
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries have short duration but are not explicitly marked as "short"</a>']);
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries have inconsistent short marking</a>']);
def add_partial():
rows = tsvRows('report/partial-queries-report.tsv')
@ -335,7 +339,7 @@ if args.report == 'main':
return
global unstable_partial_queries, slow_average_tests, tables
text = tableStart('Partial queries')
text = tableStart('Partial Queries')
columns = ['Median time, s', 'Relative time variance', 'Test', '#', 'Query']
text += tableHeader(columns)
attrs = ['' for c in columns]
@ -366,7 +370,7 @@ if args.report == 'main':
global faster_queries, slower_queries, tables
text = tableStart('Changes in performance')
text = tableStart('Changes in Performance')
columns = [
'Old,&nbsp;s', # 0
'New,&nbsp;s', # 1
@ -423,7 +427,7 @@ if args.report == 'main':
'Query' #7
]
text = tableStart('Unstable queries')
text = tableStart('Unstable Queries')
text += tableHeader(columns)
attrs = ['' for c in columns]
@ -444,9 +448,9 @@ if args.report == 'main':
add_unstable_queries()
skipped_tests_rows = tsvRows('analyze/skipped-tests.tsv')
addSimpleTable('Skipped tests', ['Test', 'Reason'], skipped_tests_rows)
addSimpleTable('Skipped Tests', ['Test', 'Reason'], skipped_tests_rows)
addSimpleTable('Test performance changes',
addSimpleTable('Test Performance Changes',
['Test', 'Ratio of speedup&nbsp;(-) or slowdown&nbsp;(+)', 'Queries', 'Total not OK', 'Changed perf', 'Unstable'],
tsvRows('report/test-perf-changes.tsv'))
@ -461,13 +465,12 @@ if args.report == 'main':
'Wall clock time,&nbsp;s', #1
'Total client time,&nbsp;s', #2
'Total queries', #3
'Ignored short queries', #4
'Longest query<br>(sum for all runs),&nbsp;s', #5
'Avg wall clock time<br>(sum for all runs),&nbsp;s', #6
'Shortest query<br>(sum for all runs),&nbsp;s', #7
'Longest query<br>(sum for all runs),&nbsp;s', #4
'Avg wall clock time<br>(sum for all runs),&nbsp;s', #5
'Shortest query<br>(sum for all runs),&nbsp;s', #6
]
text = tableStart('Test times')
text = tableStart('Test Times')
text += tableHeader(columns)
nominal_runs = 7 # FIXME pass this as an argument
@ -476,20 +479,20 @@ if args.report == 'main':
attrs = ['' for c in columns]
for r in rows:
anchor = f'{currentTableAnchor()}.{r[0]}'
if float(r[6]) > allowed_average_run_time * total_runs:
if float(r[5]) > allowed_average_run_time * total_runs:
# FIXME should be 15s max -- investigate parallel_insert
slow_average_tests += 1
attrs[6] = f'style="background: {color_bad}"'
attrs[5] = f'style="background: {color_bad}"'
errors_explained.append([f'<a href="#{anchor}">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up'])
else:
attrs[6] = ''
attrs[5] = ''
if float(r[5]) > allowed_single_run_time * total_runs:
if float(r[4]) > allowed_single_run_time * total_runs:
slow_average_tests += 1
attrs[5] = f'style="background: {color_bad}"'
attrs[4] = f'style="background: {color_bad}"'
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report'])
else:
attrs[5] = ''
attrs[4] = ''
text += tableRow(r, attrs, anchor)
@ -498,74 +501,7 @@ if args.report == 'main':
add_test_times()
def add_benchmark_results():
if not os.path.isfile('benchmark/website-left.json'):
return
json_reports = [json.load(open(f'benchmark/website-{x}.json')) for x in ['left', 'right']]
stats = [next(iter(x.values()))["statistics"] for x in json_reports]
qps = [x["QPS"] for x in stats]
queries = [x["num_queries"] for x in stats]
errors = [x["num_errors"] for x in stats]
relative_diff = (qps[1] - qps[0]) / max(0.01, qps[0]);
times_diff = max(qps) / max(0.01, min(qps))
all_rows = []
header = ['Benchmark', 'Metric', 'Old', 'New', 'Relative difference', 'Times difference'];
attrs = ['' for x in header]
row = ['website', 'queries', f'{queries[0]:d}', f'{queries[1]:d}', '--', '--']
attrs[0] = 'rowspan=2'
all_rows.append([row, attrs])
attrs = ['' for x in header]
row = [None, 'queries/s', f'{qps[0]:.3f}', f'{qps[1]:.3f}', f'{relative_diff:.3f}', f'x{times_diff:.3f}']
if abs(relative_diff) > 0.1:
# More queries per second is better.
if relative_diff > 0.:
attrs[4] = f'style="background: {color_good}"'
else:
attrs[4] = f'style="background: {color_bad}"'
else:
attrs[4] = ''
all_rows.append([row, attrs]);
if max(errors):
all_rows[0][1][0] = "rowspan=3"
row = [''] * (len(header))
attrs = ['' for x in header]
attrs[0] = None
row[1] = 'errors'
row[2] = f'{errors[0]:d}'
row[3] = f'{errors[1]:d}'
row[4] = '--'
row[5] = '--'
if errors[0]:
attrs[2] += f' style="background: {color_bad}" '
if errors[1]:
attrs[3] += f' style="background: {color_bad}" '
all_rows.append([row, attrs])
text = tableStart('Concurrent benchmarks')
text += tableHeader(header)
for row, attrs in all_rows:
text += tableRow(row, attrs)
text += tableEnd()
global tables
tables.append(text)
try:
add_benchmark_results()
except:
report_errors.append(
traceback.format_exception_only(
*sys.exc_info()[:2])[-1])
pass
addSimpleTable('Metric changes',
addSimpleTable('Metric Changes',
['Metric', 'Old median value', 'New median value',
'Relative difference', 'Times difference'],
tsvRows('metrics/changes.tsv'))
@ -656,7 +592,7 @@ elif args.report == 'all-queries':
'Query', #9
]
text = tableStart('All query times')
text = tableStart('All Query Times')
text += tableHeader(columns)
attrs = ['' for c in columns]

View File

@ -60,6 +60,31 @@ A maximum number of bytes (uncompressed data) that can be read from a table when
What to do when the volume of data read exceeds one of the limits: throw or break. By default, throw.
## max\_rows\_to\_read_leaf {#max-rows-to-read-leaf}
The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little.
A maximum number of rows that can be read from a local table on a leaf node when running a distributed query. While
distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will be checked only on the read
stage on the leaf nodes and ignored on results merging stage on the root node. For example, cluster consists of 2 shards
and each shard contains a table with 100 rows. Then distributed query which suppose to read all the data from both
tables with setting `max_rows_to_read=150` will fail as in total it will be 200 rows. While query
with `max_rows_to_read_leaf=150` will succeed since leaf nodes will read 100 rows at max.
## max\_bytes\_to\_read_leaf {#max-bytes-to-read-leaf}
A maximum number of bytes (uncompressed data) that can be read from a local table on a leaf node when running
a distributed query. While distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will
be checked only on the read stage on the leaf nodes and ignored on results merging stage on the root node.
For example, cluster consists of 2 shards and each shard contains a table with 100 bytes of data.
Then distributed query which suppose to read all the data from both tables with setting `max_bytes_to_read=150` will fail
as in total it will be 200 bytes. While query with `max_bytes_to_read_leaf=150` will succeed since leaf nodes will read
100 bytes at max.
## read\_overflow\_mode_leaf {#read-overflow-mode-leaf}
What to do when the volume of data read exceeds one of the leaf limits: throw or break. By default, throw.
## max\_rows\_to\_group\_by {#settings-max-rows-to-group-by}
A maximum number of unique keys received from aggregation. This setting lets you limit memory consumption when aggregating.

View File

@ -56,6 +56,32 @@
Что делать, когда количество прочитанных данных превысило одно из ограничений: throw или break. По умолчанию: throw.
## max\_rows\_to\_read_leaf {#max-rows-to-read-leaf}
Следующие ограничения могут проверяться на каждый блок (а не на каждую строку). То есть, ограничения могут быть немного нарушены.
Максимальное количество строчек, которое можно прочитать из таблицы на удалённом сервере при выполнении
распределенного запроса. Распределенные запросы могут создавать несколько подзапросов к каждому из шардов в кластере и
тогда этот лимит будет применен при выполнении чтения на удаленных серверах (включая и сервер-инициатор) и проигнорирован
на сервере-инициаторе запроса во время обьединения полученных результатов. Например, кластер состоит из 2 шард и каждый
из них хранит таблицу с 100 строк. Тогда распределнный запрос для получения всех данных из этих таблиц и установленной
настройкой `max_rows_to_read=150` выбросит исключение, т.к. в общем он прочитает 200 строк. Но запрос
с настройкой `max_rows_to_read_leaf=150` завершится успешно, потому что каждый из шардов прочитает максимум 100 строк.
## max\_bytes\_to\_read_leaf {#max-bytes-to-read-leaf}
Максимальное количество байт (несжатых данных), которое можно прочитать из таблицы на удалённом сервере при
выполнении распределенного запроса. Распределенные запросы могут создавать несколько подзапросов к каждому из шардов в
кластере и тогда этот лимит будет применен при выполнении чтения на удаленных серверах (включая и сервер-инициатор)
и проигнорирован на сервере-инициаторе запроса во время обьединения полученных результатов. Например, кластер состоит
из 2 шард и каждый из них хранит таблицу со 100 байтами. Тогда распределнный запрос для получения всех данных из этих таблиц
и установленной настройкой `max_bytes_to_read=150` выбросит исключение, т.к. в общем он прочитает 200 байт. Но запрос
с настройкой `max_bytes_to_read_leaf=150` завершится успешно, потому что каждый из шардов прочитает максимум 100 байт.
## read\_overflow\_mode_leaf {#read-overflow-mode-leaf}
Что делать, когда количество прочитанных данных на удаленном сервере превысило одно из ограничений: throw или break. По умолчанию: throw.
## max\_rows\_to\_group\_by {#settings-max-rows-to-group-by}
Максимальное количество уникальных ключей, получаемых в процессе агрегации. Позволяет ограничить потребление оперативки при агрегации.

View File

@ -80,7 +80,7 @@ clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv
## 导入示例数据集 {#import-sample-dataset}
现在是时候用一些示例数据填充我们的ClickHouse服务器。 在本教程中我们将使用Yandex的匿名数据。Metrica在成为开源之前以生产方式运行ClickHouse的第一个服务更多关于这一点 [历史科](../introduction/history.md)). 有 [多种导入Yandex的方式。梅里卡数据集](example-datasets/metrica.md),为了本教程,我们将使用最现实的一个。
现在是时候用一些示例数据填充我们的ClickHouse服务端。 在本教程中我们将使用Yandex.Metrica的匿名数据它是在ClickHouse成为开源之前作为生产环境运行的第一个服务关于这一点的更多内容请参阅[ClickHouse历史](../introduction/history.md))。有 [多种导入Yandex.Metrica数据集的的方法](example-datasets/metrica.md),为了本教程,我们将使用最现实的一个。
### 下载并提取表数据 {#download-and-extract-table-data}
@ -93,22 +93,22 @@ curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unx
### 创建表 {#create-tables}
与大多数数据库管理系统一样ClickHouse在逻辑上将表分组为 “databases”. 有一个 `default` 数据库,但我们将创建一个名为新的 `tutorial`:
与大多数数据库管理系统一样ClickHouse在逻辑上将表分组为数据库。包含一个 `default` 数据库,但我们将创建一个新的数据库 `tutorial`:
``` bash
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial"
```
与数据库相比,创建表的语法要复杂得多(请参阅 [参考资料](../sql-reference/statements/create.md). 一般 `CREATE TABLE` 声明必须指定三个关键的事情:
创建数据库相比,创建表的语法要复杂得多(请参阅 [参考资料](../sql-reference/statements/create.md). 一般 `CREATE TABLE` 声明必须指定三个关键的事情:
1. 要创建的表的名称。
2. Table schema, i.e. list of columns and their [数据类型](../sql-reference/data-types/index.md).
3. [表引擎](../engines/table-engines/index.md) 及其设置,这决定了如何物理执行对此表的查询的所有细节。
2. 表结构,例如:列名和对应的[数据类型](../sql-reference/data-types/index.md)。
3. [表引擎](../engines/table-engines/index.md) 及其设置,这决定了对此表的查询操作是如何在物理层面执行的所有细节。
YandexMetrica是一个网络分析服务样本数据集不包括其全部功能因此只有两个表可以创建:
Yandex.Metrica是一个网络分析服务样本数据集不包括其全部功能因此只有两个表可以创建:
- `hits` 是一个格,其中包含所有用户在服务所涵盖的所有网站上完成的每个操作。
- `visits` 是一个包含预先构建的会话而不是单个操作的表
- `hits` 表包含所有用户在服务所涵盖的所有网站上完成的每个操作。
- `visits` 表包含预先构建的会话,而不是单个操作
让我们看看并执行这些表的实际创建表查询:
@ -453,9 +453,9 @@ SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
```
您可以使用以下交互模式执行这些查询 `clickhouse-client` (只需在终端中启动它,而不需要提前指定查询)或尝试一些 [替代接口](../interfaces/index.md) 如果你愿意的话
您可以使用`clickhouse-client`的交互模式执行这些查询(只需在终端中启动它,而不需要提前指定查询)。或者如果你愿意,可以尝试一些[替代接口](../interfaces/index.md)。
正如我们所看到的, `hits_v1` 使用 [基本MergeTree引擎](../engines/table-engines/mergetree-family/mergetree.md),而 `visits_v1` 使用 [崩溃](../engines/table-engines/mergetree-family/collapsingmergetree.md) 变体。
正如我们所看到的, `hits_v1` 使用 [基本MergeTree引擎](../engines/table-engines/mergetree-family/mergetree.md),而 `visits_v1` 使用 [折叠树](../engines/table-engines/mergetree-family/collapsingmergetree.md) 变体。
### 导入数据 {#import-data}

View File

@ -1,6 +1,6 @@
---
toc_priority: 33
toc_title: 简介
toc_title: 聚合函数
---
# 聚合函数 {#aggregate-functions}

View File

@ -34,7 +34,7 @@
│ 2 │ 3 │
└───┴──────┘
执行查询 `SELECT multiIf(isNull(y) x, y < 3, y, NULL) FROM t_null`。结果:
执行查询 `SELECT multiIf(isNull(y), x, y < 3, y, NULL) FROM t_null`。结果:
┌─multiIf(isNull(y), x, less(y, 3), y, NULL)─┐
│ 1 │

View File

@ -1504,7 +1504,18 @@ private:
{
/// Send data contained in the query.
ReadBufferFromMemory data_in(parsed_insert_query->data, parsed_insert_query->end - parsed_insert_query->data);
try
{
sendDataFrom(data_in, sample, columns_description);
}
catch (Exception & e)
{
/// The following query will use data from input
// "INSERT INTO data FORMAT TSV\n " < data.csv
// And may be pretty hard to debug, so add information about data source to make it easier.
e.addMessage("data for INSERT was parsed from query");
throw;
}
// Remember where the data ended. We use this info later to determine
// where the next query begins.
parsed_insert_query->end = data_in.buffer().begin() + data_in.count();
@ -1512,8 +1523,16 @@ private:
else if (!is_interactive)
{
/// Send data read from stdin.
try
{
sendDataFrom(std_in, sample, columns_description);
}
catch (Exception & e)
{
e.addMessage("data for INSERT was parsed from stdin");
throw;
}
}
else
throw Exception("No data to insert", ErrorCodes::NO_DATA_TO_INSERT);
}

View File

@ -2,6 +2,8 @@
LIBRARY()
ADDINCL(
contrib/libs/icu/common
contrib/libs/icu/i18n
contrib/libs/pdqsort
)

View File

@ -214,6 +214,9 @@ public:
void clear() { c_end = c_start; }
template <typename ... TAllocatorParams>
#if defined(__clang__)
ALWAYS_INLINE /// Better performance in clang build, worse performance in gcc build.
#endif
void reserve(size_t n, TAllocatorParams &&... allocator_params)
{
if (n > capacity())

View File

@ -233,6 +233,10 @@ class IColumn;
M(UInt64, max_bytes_to_read, 0, "Limit on read bytes (after decompression) from the most 'deep' sources. That is, only in the deepest subquery. When reading from a remote server, it is only checked on a remote server.", 0) \
M(OverflowMode, read_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \
\
M(UInt64, max_rows_to_read_leaf, 0, "Limit on read rows on the leaf nodes for distributed queries. Limit is applied for local reads only excluding the final merge stage on the root node.", 0) \
M(UInt64, max_bytes_to_read_leaf, 0, "Limit on read bytes (after decompression) on the leaf nodes for distributed queries. Limit is applied for local reads only excluding the final merge stage on the root node.", 0) \
M(OverflowMode, read_overflow_mode_leaf, OverflowMode::THROW, "What to do when the leaf limit is exceeded.", 0) \
\
M(UInt64, max_rows_to_group_by, 0, "", 0) \
M(OverflowModeGroupBy, group_by_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \
M(UInt64, max_bytes_before_external_group_by, 0, "", 0) \

View File

@ -34,11 +34,6 @@ public:
return name;
}
bool isStateful() const override
{
return true;
}
size_t getNumberOfArguments() const override
{
return 1;

View File

@ -332,7 +332,7 @@ void AsynchronousMetrics::update()
ReadBufferFromFile buf("/proc/cpuinfo", 32768 /* buf_size */);
// We need the following lines:
// core id : 4
// processor : 4
// cpu MHz : 4052.941
// They contain tabs and are interspersed with other info.
int core_id = 0;
@ -346,7 +346,7 @@ void AsynchronousMetrics::update()
// It doesn't read the EOL itself.
++buf.position();
if (s.rfind("core id", 0) == 0)
if (s.rfind("processor", 0) == 0)
{
if (auto colon = s.find_first_of(':'))
{

View File

@ -1441,16 +1441,22 @@ void InterpreterSelectQuery::executeFetchColumns(
}
StreamLocalLimits limits;
SizeLimits leaf_limits;
std::shared_ptr<const EnabledQuota> quota;
/// Set the limits and quota for reading data, the speed and time of the query.
if (!options.ignore_limits)
{
limits = getLimitsForStorage(settings, options);
leaf_limits = SizeLimits(settings.max_rows_to_read_leaf, settings.max_bytes_to_read_leaf,
settings.read_overflow_mode_leaf);
}
if (!options.ignore_quota && (options.to_stage == QueryProcessingStage::Complete))
quota = context->getQuota();
storage->read(query_plan, table_lock, metadata_snapshot, limits, std::move(quota),
storage->read(query_plan, table_lock, metadata_snapshot, limits, leaf_limits, std::move(quota),
required_columns, query_info, context, processing_stage, max_block_size, max_streams);
}
else

View File

@ -2,6 +2,7 @@
#include <Common/StringUtils/StringUtils.h>
#include <Common/quoteString.h>
#include <common/find_symbols.h>
#include <ostream>
namespace DB

View File

@ -1,5 +1,6 @@
#pragma once
#include <iosfwd>
#include <common/types.h>

View File

@ -788,6 +788,15 @@ void Pipe::setLimits(const StreamLocalLimits & limits)
}
}
void Pipe::setLeafLimits(const SizeLimits & leaf_limits)
{
for (auto & processor : processors)
{
if (auto * source_with_progress = dynamic_cast<ISourceWithProgress *>(processor.get()))
source_with_progress->setLeafLimits(leaf_limits);
}
}
void Pipe::setQuota(const std::shared_ptr<const EnabledQuota> & quota)
{
for (auto & processor : processors)

View File

@ -97,6 +97,7 @@ public:
/// Specify quotas and limits for every ISourceWithProgress.
void setLimits(const StreamLocalLimits & limits);
void setLeafLimits(const SizeLimits & leaf_limits);
void setQuota(const std::shared_ptr<const EnabledQuota> & quota);
/// Do not allow to change the table while the processors of pipe are alive.

View File

@ -15,6 +15,7 @@ ReadFromStorageStep::ReadFromStorageStep(
TableLockHolder table_lock_,
StorageMetadataPtr metadata_snapshot_,
StreamLocalLimits & limits_,
SizeLimits & leaf_limits_,
std::shared_ptr<const EnabledQuota> quota_,
StoragePtr storage_,
const Names & required_columns_,
@ -26,6 +27,7 @@ ReadFromStorageStep::ReadFromStorageStep(
: table_lock(std::move(table_lock_))
, metadata_snapshot(std::move(metadata_snapshot_))
, limits(limits_)
, leaf_limits(leaf_limits_)
, quota(std::move(quota_))
, storage(std::move(storage_))
, required_columns(required_columns_)
@ -86,6 +88,16 @@ ReadFromStorageStep::ReadFromStorageStep(
pipe.setLimits(limits);
/**
* Leaf size limits should be applied only for local processing of distributed queries.
* Such limits allow to control the read stage on leaf nodes and exclude the merging stage.
* Consider the case when distributed query needs to read from multiple shards. Then leaf
* limits will be applied on the shards only (including the root node) but will be ignored
* on the results merging stage.
*/
if (!storage->isRemote())
pipe.setLeafLimits(leaf_limits);
if (quota)
pipe.setQuota(quota);

View File

@ -26,6 +26,7 @@ public:
TableLockHolder table_lock,
StorageMetadataPtr metadata_snapshot,
StreamLocalLimits & limits,
SizeLimits & leaf_limits,
std::shared_ptr<const EnabledQuota> quota,
StoragePtr storage,
const Names & required_columns,
@ -47,6 +48,7 @@ private:
TableLockHolder table_lock;
StorageMetadataPtr metadata_snapshot;
StreamLocalLimits limits;
SizeLimits leaf_limits;
std::shared_ptr<const EnabledQuota> quota;
StoragePtr storage;

View File

@ -33,6 +33,7 @@ public:
/// Implementation for methods from ISourceWithProgress.
void setLimits(const StreamLocalLimits & limits_) final { stream->setLimits(limits_); }
void setLeafLimits(const SizeLimits &) final { }
void setQuota(const std::shared_ptr<const EnabledQuota> & quota_) final { stream->setQuota(quota_); }
void setProcessListElement(QueryStatus * elem) final { stream->setProcessListElement(elem); }
void setProgressCallback(const ProgressCallback & callback) final { stream->setProgressCallback(callback); }

View File

@ -93,6 +93,12 @@ void SourceWithProgress::progress(const Progress & value)
}
}
if (!leaf_limits.check(rows_to_check_limit, progress.read_bytes, "rows or bytes to read on leaf node",
ErrorCodes::TOO_MANY_ROWS, ErrorCodes::TOO_MANY_BYTES))
{
cancel();
}
size_t total_rows = progress.total_rows_to_read;
constexpr UInt64 profile_events_update_period_microseconds = 10 * 1000; // 10 milliseconds

View File

@ -17,6 +17,9 @@ public:
/// Set limitations that checked on each chunk.
virtual void setLimits(const StreamLocalLimits & limits_) = 0;
/// Set limitations that checked on each chunk for distributed queries on leaf nodes.
virtual void setLeafLimits(const SizeLimits & leaf_limits_) = 0;
/// Set the quota. If you set a quota on the amount of raw data,
/// then you should also set mode = LIMITS_TOTAL to LocalLimits with setLimits.
virtual void setQuota(const std::shared_ptr<const EnabledQuota> & quota_) = 0;
@ -46,6 +49,7 @@ public:
SourceWithProgress(Block header, bool enable_auto_progress);
void setLimits(const StreamLocalLimits & limits_) final { limits = limits_; }
void setLeafLimits(const SizeLimits & leaf_limits_) final {leaf_limits = leaf_limits_; }
void setQuota(const std::shared_ptr<const EnabledQuota> & quota_) final { quota = quota_; }
void setProcessListElement(QueryStatus * elem) final { process_list_elem = elem; }
void setProgressCallback(const ProgressCallback & callback) final { progress_callback = callback; }
@ -59,6 +63,7 @@ protected:
private:
StreamLocalLimits limits;
SizeLimits leaf_limits;
std::shared_ptr<const EnabledQuota> quota;
ProgressCallback progress_callback;
QueryStatus * process_list_elem = nullptr;

View File

@ -97,6 +97,7 @@ void IStorage::read(
TableLockHolder table_lock,
StorageMetadataPtr metadata_snapshot,
StreamLocalLimits & limits,
SizeLimits & leaf_limits,
std::shared_ptr<const EnabledQuota> quota,
const Names & column_names,
const SelectQueryInfo & query_info,
@ -106,7 +107,7 @@ void IStorage::read(
unsigned num_streams)
{
auto read_step = std::make_unique<ReadFromStorageStep>(
std::move(table_lock), std::move(metadata_snapshot), limits, std::move(quota), shared_from_this(),
std::move(table_lock), std::move(metadata_snapshot), limits, leaf_limits, std::move(quota), shared_from_this(),
column_names, query_info, std::move(context), processed_stage, max_block_size, num_streams);
read_step->setStepDescription("Read from " + getName());

View File

@ -288,6 +288,7 @@ public:
TableLockHolder table_lock,
StorageMetadataPtr metadata_snapshot,
StreamLocalLimits & limits,
SizeLimits & leaf_limits,
std::shared_ptr<const EnabledQuota> quota,
const Names & column_names,
const SelectQueryInfo & query_info,

View File

@ -69,20 +69,29 @@ TemporaryLiveViewCleaner::~TemporaryLiveViewCleaner()
void TemporaryLiveViewCleaner::addView(const std::shared_ptr<StorageLiveView> & view)
{
if (!view->isTemporary())
if (!view->isTemporary() || background_thread_should_exit)
return;
auto current_time = std::chrono::system_clock::now();
auto time_of_next_check = current_time + view->getTimeout();
std::lock_guard lock{mutex};
if (background_thread_should_exit)
return;
/// If views.empty() the background thread isn't running or it's going to stop right now.
bool background_thread_is_running = !views.empty();
/// Keep the vector `views` sorted by time of next check.
StorageAndTimeOfCheck storage_and_time_of_check{view, time_of_next_check};
views.insert(std::upper_bound(views.begin(), views.end(), storage_and_time_of_check), storage_and_time_of_check);
if (!background_thread.joinable())
if (!background_thread_is_running)
{
if (background_thread.joinable())
background_thread.join();
background_thread = ThreadFromGlobalPool{&TemporaryLiveViewCleaner::backgroundThreadFunc, this};
}
background_thread_wake_up.notify_one();
}
@ -95,7 +104,7 @@ void TemporaryLiveViewCleaner::backgroundThreadFunc()
{
background_thread_wake_up.wait_until(lock, views.front().time_of_check);
if (background_thread_should_exit)
return;
break;
auto current_time = std::chrono::system_clock::now();
std::vector<StorageID> storages_to_drop;
@ -112,18 +121,22 @@ void TemporaryLiveViewCleaner::backgroundThreadFunc()
continue;
}
++it;
if (current_time < time_of_check)
break; /// It's not the time to check it yet.
auto storage_id = storage->getStorageID();
if (!storage->hasUsers() && DatabaseCatalog::instance().getDependencies(storage_id).empty())
{
/// No users and no dependencies so we can remove the storage.
storages_to_drop.emplace_back(storage_id);
it = views.erase(it);
continue;
}
/// Calculate time of the next check.
time_of_check = current_time + storage->getTimeout();
auto storage_id = storage->getStorageID();
if (storage->hasUsers() || !DatabaseCatalog::instance().getDependencies(storage_id).empty())
continue;
storages_to_drop.emplace_back(storage_id);
++it;
}
lock.unlock();

View File

@ -1823,16 +1823,27 @@ MergeTreeData::DataPartsVector MergeTreeData::getActivePartsToReplace(
}
void MergeTreeData::renameTempPartAndAdd(MutableDataPartPtr & part, SimpleIncrement * increment, Transaction * out_transaction)
bool MergeTreeData::renameTempPartAndAdd(MutableDataPartPtr & part, SimpleIncrement * increment, Transaction * out_transaction)
{
auto removed = renameTempPartAndReplace(part, increment, out_transaction);
if (!removed.empty())
throw Exception("Added part " + part->name + " covers " + toString(removed.size())
+ " existing part(s) (including " + removed[0]->name + ")", ErrorCodes::LOGICAL_ERROR);
if (out_transaction && &out_transaction->data != this)
throw Exception("MergeTreeData::Transaction for one table cannot be used with another. It is a bug.",
ErrorCodes::LOGICAL_ERROR);
DataPartsVector covered_parts;
{
auto lock = lockParts();
if (!renameTempPartAndReplace(part, increment, out_transaction, lock, &covered_parts))
return false;
}
if (!covered_parts.empty())
throw Exception("Added part " + part->name + " covers " + toString(covered_parts.size())
+ " existing part(s) (including " + covered_parts[0]->name + ")", ErrorCodes::LOGICAL_ERROR);
return true;
}
void MergeTreeData::renameTempPartAndReplace(
bool MergeTreeData::renameTempPartAndReplace(
MutableDataPartPtr & part, SimpleIncrement * increment, Transaction * out_transaction,
std::unique_lock<std::mutex> & lock, DataPartsVector * out_covered_parts)
{
@ -1863,7 +1874,7 @@ void MergeTreeData::renameTempPartAndReplace(
part_info.mutation = 0; /// it's equal to min_block by default
part_name = part->getNewName(part_info);
}
else
else /// Parts from ReplicatedMergeTree already have names
part_name = part->name;
LOG_TRACE(log, "Renaming temporary part {} to {}.", part->relative_path, part_name);
@ -1886,7 +1897,7 @@ void MergeTreeData::renameTempPartAndReplace(
if (covering_part)
{
LOG_WARNING(log, "Tried to add obsolete part {} covered by {}", part_name, covering_part->getNameWithState());
return;
return false;
}
/// All checks are passed. Now we can rename the part on disk.
@ -1931,6 +1942,8 @@ void MergeTreeData::renameTempPartAndReplace(
for (DataPartPtr & covered_part : covered_parts)
out_covered_parts->emplace_back(std::move(covered_part));
}
return true;
}
MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
@ -1968,6 +1981,22 @@ void MergeTreeData::removePartsFromWorkingSet(const MergeTreeData::DataPartsVect
}
}
void MergeTreeData::removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove)
{
auto lock = lockParts();
for (const auto & part : remove)
{
auto it_part = data_parts_by_info.find(part->info);
if (it_part == data_parts_by_info.end())
throw Exception("Part " + part->getNameWithState() + " not found in data_parts", ErrorCodes::LOGICAL_ERROR);
modifyPartState(part, IMergeTreeDataPart::State::Temporary);
/// Erase immediately
data_parts_indexes.erase(it_part);
}
}
void MergeTreeData::removePartsFromWorkingSet(const DataPartsVector & remove, bool clear_without_timeout, DataPartsLock * acquired_lock)
{
auto lock = (acquired_lock) ? DataPartsLock() : lockParts();
@ -3087,6 +3116,25 @@ MergeTreeData::DataPartPtr MergeTreeData::getAnyPartInPartition(
return nullptr;
}
void MergeTreeData::Transaction::rollbackPartsToTemporaryState()
{
if (!isEmpty())
{
std::stringstream ss;
ss << " Rollbacking parts state to temporary and removing from working set:";
for (const auto & part : precommitted_parts)
ss << " " << part->relative_path;
ss << ".";
LOG_DEBUG(data.log, "Undoing transaction.{}", ss.str());
data.removePartsFromWorkingSetImmediatelyAndSetTemporaryState(
DataPartsVector(precommitted_parts.begin(), precommitted_parts.end()));
}
clear();
}
void MergeTreeData::Transaction::rollback()
{
if (!isEmpty())
@ -3251,7 +3299,8 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::cloneAndLoadDataPartOnSameDisk(
}
if (!does_storage_policy_allow_same_disk)
throw Exception(
"Could not clone and load part " + quoteString(src_part->getFullPath()) + " because disk does not belong to storage policy", ErrorCodes::BAD_ARGUMENTS);
"Could not clone and load part " + quoteString(src_part->getFullPath()) + " because disk does not belong to storage policy",
ErrorCodes::BAD_ARGUMENTS);
String dst_part_name = src_part->getNewName(dst_part_info);
String tmp_dst_part_name = tmp_part_prefix + dst_part_name;

View File

@ -225,6 +225,10 @@ public:
void rollback();
/// Immediately remove parts from table's data_parts set and change part
/// state to temporary. Useful for new parts which not present in table.
void rollbackPartsToTemporaryState();
size_t size() const { return precommitted_parts.size(); }
bool isEmpty() const { return precommitted_parts.empty(); }
@ -426,7 +430,8 @@ public:
/// If out_transaction != nullptr, adds the part in the PreCommitted state (the part will be added to the
/// active set later with out_transaction->commit()).
/// Else, commits the part immediately.
void renameTempPartAndAdd(MutableDataPartPtr & part, SimpleIncrement * increment = nullptr, Transaction * out_transaction = nullptr);
/// Returns true if part was added. Returns false if part is covered by bigger part.
bool renameTempPartAndAdd(MutableDataPartPtr & part, SimpleIncrement * increment = nullptr, Transaction * out_transaction = nullptr);
/// The same as renameTempPartAndAdd but the block range of the part can contain existing parts.
/// Returns all parts covered by the added part (in ascending order).
@ -435,10 +440,16 @@ public:
MutableDataPartPtr & part, SimpleIncrement * increment = nullptr, Transaction * out_transaction = nullptr);
/// Low-level version of previous one, doesn't lock mutex
void renameTempPartAndReplace(
bool renameTempPartAndReplace(
MutableDataPartPtr & part, SimpleIncrement * increment, Transaction * out_transaction, DataPartsLock & lock,
DataPartsVector * out_covered_parts = nullptr);
/// Remove parts from working set immediately (without wait for background
/// process). Transfer part state to temporary. Have very limited usage only
/// for new parts which don't already present in table.
void removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove);
/// Removes parts from the working set parts.
/// Parts in add must already be in data_parts with PreCommitted, Committed, or Outdated states.
/// If clear_without_timeout is true, the parts will be deleted at once, or during the next call to

View File

@ -583,6 +583,14 @@ Pipe MergeTreeDataSelectExecutor::readFromParts(
{
std::atomic<size_t> total_rows {0};
SizeLimits limits;
if (settings.read_overflow_mode == OverflowMode::THROW && settings.max_rows_to_read)
limits = SizeLimits(settings.max_rows_to_read, 0, settings.read_overflow_mode);
SizeLimits leaf_limits;
if (settings.read_overflow_mode_leaf == OverflowMode::THROW && settings.max_rows_to_read_leaf)
leaf_limits = SizeLimits(settings.max_rows_to_read_leaf, 0, settings.read_overflow_mode_leaf);
auto process_part = [&](size_t part_index)
{
auto & part = parts[part_index];
@ -610,18 +618,14 @@ Pipe MergeTreeDataSelectExecutor::readFromParts(
if (!ranges.ranges.empty())
{
if (settings.read_overflow_mode == OverflowMode::THROW && settings.max_rows_to_read)
if (limits.max_rows || leaf_limits.max_rows)
{
/// Fail fast if estimated number of rows to read exceeds the limit
auto current_rows_estimate = ranges.getRowsCount();
size_t prev_total_rows_estimate = total_rows.fetch_add(current_rows_estimate);
size_t total_rows_estimate = current_rows_estimate + prev_total_rows_estimate;
if (total_rows_estimate > settings.max_rows_to_read)
throw Exception(
"Limit for rows (controlled by 'max_rows_to_read' setting) exceeded, max rows: "
+ formatReadableQuantity(settings.max_rows_to_read)
+ ", estimated rows to read (at least): " + formatReadableQuantity(total_rows_estimate),
ErrorCodes::TOO_MANY_ROWS);
limits.check(total_rows_estimate, 0, "rows (controlled by 'max_rows_to_read' setting)", ErrorCodes::TOO_MANY_ROWS);
leaf_limits.check(total_rows_estimate, 0, "rows (controlled by 'max_rows_to_read_leaf' setting)", ErrorCodes::TOO_MANY_ROWS);
}
parts_with_ranges[part_index] = std::move(ranges);

View File

@ -27,6 +27,9 @@ namespace ErrorCodes
extern const int INSERT_WAS_DEDUPLICATED;
extern const int TIMEOUT_EXCEEDED;
extern const int NO_ACTIVE_REPLICAS;
extern const int DUPLICATE_DATA_PART;
extern const int PART_IS_TEMPORARILY_LOCKED;
extern const int LOGICAL_ERROR;
}
@ -96,7 +99,8 @@ void ReplicatedMergeTreeBlockOutputStream::checkQuorumPrecondition(zkutil::ZooKe
auto quorum_status = quorum_status_future.get();
if (quorum_status.error != Coordination::Error::ZNONODE)
throw Exception("Quorum for previous write has not been satisfied yet. Status: " + quorum_status.data, ErrorCodes::UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE);
throw Exception("Quorum for previous write has not been satisfied yet. Status: " + quorum_status.data,
ErrorCodes::UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE);
/// Both checks are implicitly made also later (otherwise there would be a race condition).
@ -116,7 +120,6 @@ void ReplicatedMergeTreeBlockOutputStream::write(const Block & block)
{
last_block_is_duplicate = false;
/// TODO Is it possible to not lock the table structure here?
storage.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event);
auto zookeeper = storage.getZooKeeper();
@ -214,6 +217,17 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
metadata_snapshot->check(part->getColumns());
assertSessionIsNotExpired(zookeeper);
String temporary_part_relative_path = part->relative_path;
/// There is one case when we need to retry transaction in a loop.
/// But don't do it too many times - just as defensive measure.
size_t loop_counter = 0;
constexpr size_t max_iterations = 10;
bool is_already_existing_part = false;
while (true)
{
/// Obtain incremental block number and lock it. The lock holds our intention to add the block to the filesystem.
/// We remove the lock just after renaming the part. In case of exception, block number will be marked as abandoned.
/// Also, make deduplication check. If a duplicate is detected, no nodes are created.
@ -223,16 +237,16 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
String block_id_path = deduplicate_block ? storage.zookeeper_path + "/blocks/" + block_id : "";
auto block_number_lock = storage.allocateBlockNumber(part->info.partition_id, zookeeper, block_id_path);
if (!block_number_lock)
{
LOG_INFO(log, "Block with ID {} already exists; ignoring it.", block_id);
part->is_duplicate = true;
last_block_is_duplicate = true;
ProfileEvents::increment(ProfileEvents::DuplicatedInsertedBlocks);
return;
}
/// Prepare transaction to ZooKeeper
/// It will simultaneously add information about the part to all the necessary places in ZooKeeper and remove block_number_lock.
Coordination::Requests ops;
Int64 block_number = block_number_lock->getNumber();
Int64 block_number = 0;
String existing_part_name;
if (block_number_lock)
{
is_already_existing_part = false;
block_number = block_number_lock->getNumber();
/// Set part attributes according to part_number. Prepare an entry for log.
@ -240,26 +254,19 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
part->info.max_block = block_number;
part->info.level = 0;
String part_name = part->getNewName(part->info);
part->name = part_name;
part->name = part->getNewName(part->info);
/// Will add log entry about new part.
StorageReplicatedMergeTree::LogEntry log_entry;
log_entry.type = StorageReplicatedMergeTree::LogEntry::GET_PART;
log_entry.create_time = time(nullptr);
log_entry.source_replica = storage.replica_name;
log_entry.new_part_name = part_name;
log_entry.new_part_name = part->name;
log_entry.quorum = quorum;
log_entry.block_id = block_id;
log_entry.new_part_type = part->getType();
/// Simultaneously add information about the part to all the necessary places in ZooKeeper and remove block_number_lock.
/// Information about the part.
Coordination::Requests ops;
storage.getCommitPartOps(ops, part, block_id_path);
/// Replication log.
ops.emplace_back(zkutil::makeCreateRequest(
storage.zookeeper_path + "/log/log-",
log_entry.toString(),
@ -268,14 +275,15 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
/// Deletes the information that the block number is used for writing.
block_number_lock->getUnlockOps(ops);
/** If you need a quorum - create a node in which the quorum is monitored.
* (If such a node already exists, then someone has managed to make another quorum record at the same time, but for it the quorum has not yet been reached.
/** If we need a quorum - create a node in which the quorum is monitored.
* (If such a node already exists, then someone has managed to make another quorum record at the same time,
* but for it the quorum has not yet been reached.
* You can not do the next quorum record at this time.)
*/
if (quorum)
{
ReplicatedMergeTreeQuorumEntry quorum_entry;
quorum_entry.part_name = part_name;
quorum_entry.part_name = part->name;
quorum_entry.required_number_of_replicas = quorum;
quorum_entry.replicas.insert(storage.replica_name);
@ -298,7 +306,8 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
storage.replica_path + "/is_active",
quorum_info.is_active_node_version));
/// Unfortunately, just checking the above is not enough, because `is_active` node can be deleted and reappear with the same version.
/// Unfortunately, just checking the above is not enough, because `is_active`
/// node can be deleted and reappear with the same version.
/// But then the `host` value will change. We will check this.
/// It's great that these two nodes change in the same transaction (see MergeTreeRestartingThread).
ops.emplace_back(
@ -306,9 +315,69 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
storage.replica_path + "/host",
quorum_info.host_node_version));
}
}
else
{
is_already_existing_part = true;
/// This block was already written to some replica. Get the part name for it.
/// Note: race condition with DROP PARTITION operation is possible. User will get "No node" exception and it is Ok.
existing_part_name = zookeeper->get(storage.zookeeper_path + "/blocks/" + block_id);
/// If it exists on our replica, ignore it.
if (storage.getActiveContainingPart(existing_part_name))
{
LOG_INFO(log, "Block with ID {} already exists locally as part {}; ignoring it.", block_id, existing_part_name);
part->is_duplicate = true;
last_block_is_duplicate = true;
ProfileEvents::increment(ProfileEvents::DuplicatedInsertedBlocks);
return;
}
LOG_INFO(log, "Block with ID {} already exists on other replicas as part {}; will write it locally with that name.",
block_id, existing_part_name);
/// If it does not exist, we will write a new part with existing name.
/// Note that it may also appear on filesystem right now in PreCommitted state due to concurrent inserts of the same data.
/// It will be checked when we will try to rename directory.
part->name = existing_part_name;
part->info = MergeTreePartInfo::fromPartName(existing_part_name, storage.format_version);
/// Used only for exception messages.
block_number = part->info.min_block;
/// Do not check for duplicate on commit to ZK.
block_id_path.clear();
}
/// Information about the part.
storage.getCommitPartOps(ops, part, block_id_path);
MergeTreeData::Transaction transaction(storage); /// If you can not add a part to ZK, we'll remove it back from the working set.
storage.renameTempPartAndAdd(part, nullptr, &transaction);
bool renamed = false;
try
{
renamed = storage.renameTempPartAndAdd(part, nullptr, &transaction);
}
catch (const Exception & e)
{
if (e.code() != ErrorCodes::DUPLICATE_DATA_PART
&& e.code() != ErrorCodes::PART_IS_TEMPORARILY_LOCKED)
throw;
}
if (!renamed)
{
if (is_already_existing_part)
{
LOG_INFO(log, "Part {} is duplicate and it is already written by concurrent request or fetched; ignoring it.", part->name);
return;
}
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "Part with name {} is already written by concurrent request."
" It should not happen for non-duplicate data parts because unique names are assigned for them. It's a bug",
part->name);
}
Coordination::Responses responses;
Coordination::Error multi_code = zookeeper->tryMultiNoThrow(ops, responses); /// 1 RTT
@ -319,6 +388,7 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
storage.merge_selecting_task->schedule();
/// Lock nodes have been already deleted, do not delete them in destructor
if (block_number_lock)
block_number_lock->assumeUnlocked();
}
else if (multi_code == Coordination::Error::ZCONNECTIONLOSS
@ -340,18 +410,30 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
if (multi_code == Coordination::Error::ZNODEEXISTS && deduplicate_block && failed_op_path == block_id_path)
{
/// Block with the same id have just appeared in table (or other replica), rollback the insertion.
LOG_INFO(log, "Block with ID {} already exists; ignoring it (removing part {})", block_id, part->name);
/// Block with the same id have just appeared in table (or other replica), rollback thee insertion.
LOG_INFO(log, "Block with ID {} already exists (it was just appeared). Renaming part {} back to {}. Will retry write.",
block_id, part->name, temporary_part_relative_path);
part->is_duplicate = true;
transaction.rollback();
last_block_is_duplicate = true;
ProfileEvents::increment(ProfileEvents::DuplicatedInsertedBlocks);
/// We will try to add this part again on the new iteration as it's just a new part.
/// So remove it from storage parts set immediately and transfer state to temporary.
transaction.rollbackPartsToTemporaryState();
part->is_temp = true;
part->renameTo(temporary_part_relative_path, false);
/// If this part appeared on other replica than it's better to try to write it locally one more time. If it's our part
/// than it will be ignored on the next itration.
++loop_counter;
if (loop_counter == max_iterations)
{
part->is_duplicate = true; /// Part is duplicate, just remove it from local FS
throw Exception("Too many transaction retries - it may indicate an error", ErrorCodes::DUPLICATE_DATA_PART);
}
continue;
}
else if (multi_code == Coordination::Error::ZNODEEXISTS && failed_op_path == quorum_info.status_path)
{
transaction.rollback();
throw Exception("Another quorum insert has been already started", ErrorCodes::UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE);
}
else
@ -376,8 +458,17 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
+ Coordination::errorMessage(multi_code), ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR);
}
break;
}
if (quorum)
{
if (is_already_existing_part)
{
/// We get duplicate part without fetch
storage.updateQuorum(part->name);
}
/// We are waiting for quorum to be satisfied.
LOG_TRACE(log, "Waiting for quorum");
@ -397,14 +488,15 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
ReplicatedMergeTreeQuorumEntry quorum_entry(value);
/// If the node has time to disappear, and then appear again for the next insert.
if (quorum_entry.part_name != part_name)
if (quorum_entry.part_name != part->name)
break;
if (!event->tryWait(quorum_timeout_ms))
throw Exception("Timeout while waiting for quorum", ErrorCodes::TIMEOUT_EXCEEDED);
}
/// And what if it is possible that the current replica at this time has ceased to be active and the quorum is marked as failed and deleted?
/// And what if it is possible that the current replica at this time has ceased to be active
/// and the quorum is marked as failed and deleted?
String value;
if (!zookeeper->tryGet(storage.replica_path + "/is_active", value, nullptr)
|| value != quorum_info.is_active_node_value)

View File

@ -113,6 +113,7 @@ namespace ErrorCodes
extern const int ALL_REPLICAS_LOST;
extern const int REPLICA_STATUS_CHANGED;
extern const int CANNOT_ASSIGN_ALTER;
extern const int DIRECTORY_ALREADY_EXISTS;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
@ -697,7 +698,9 @@ void StorageReplicatedMergeTree::drop()
if (has_metadata_in_zookeeper)
{
auto zookeeper = tryGetZooKeeper();
/// Table can be shut down, restarting thread is not active
/// and calling StorageReplicatedMergeTree::getZooKeeper() won't suffice.
auto zookeeper = global_context.getZooKeeper();
/// If probably there is metadata in ZooKeeper, we don't allow to drop the table.
if (is_readonly || !zookeeper)
@ -3314,6 +3317,15 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora
part->renameTo("detached/" + part_name, true);
}
}
catch (const Exception & e)
{
/// The same part is being written right now (but probably it's not committed yet).
/// We will check the need for fetch later.
if (e.code() == ErrorCodes::DIRECTORY_ALREADY_EXISTS)
return false;
throw;
}
catch (...)
{
if (!to_detached)
@ -4773,9 +4785,11 @@ void StorageReplicatedMergeTree::fetchPartition(
missing_parts.clear();
for (const String & part : parts_to_fetch)
{
bool fetched = false;
try
{
fetchPart(part, metadata_snapshot, best_replica_path, true, 0, zookeeper);
fetched = fetchPart(part, metadata_snapshot, best_replica_path, true, 0, zookeeper);
}
catch (const DB::Exception & e)
{
@ -4784,8 +4798,10 @@ void StorageReplicatedMergeTree::fetchPartition(
throw;
LOG_INFO(log, e.displayText());
missing_parts.push_back(part);
}
if (!fetched)
missing_parts.push_back(part);
}
++try_no;

View File

@ -0,0 +1,43 @@
#include <Interpreters/Context.h>
#include <Parsers/ASTLiteral.h>
#include <Parsers/ASTFunction.h>
#include <Storages/StorageNull.h>
#include <TableFunctions/parseColumnsListForTableFunction.h>
#include <TableFunctions/ITableFunction.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <TableFunctions/TableFunctionNull.h>
#include <Interpreters/evaluateConstantExpression.h>
#include "registerTableFunctions.h"
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
StoragePtr TableFunctionNull::executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const
{
if (const auto * function = ast_function->as<ASTFunction>())
{
auto arguments = function->arguments->children;
if (arguments.size() != 1)
throw Exception("Table function '" + getName() + "' requires 'structure'.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
auto structure = evaluateConstantExpressionOrIdentifierAsLiteral(arguments[0], context)->as<ASTLiteral>()->value.safeGet<String>();
ColumnsDescription columns = parseColumnsListFromString(structure, context);
auto res = StorageNull::create(StorageID(getDatabaseName(), table_name), columns, ConstraintsDescription());
res->startup();
return res;
}
throw Exception("Table function '" + getName() + "' requires 'structure'.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
}
void registerTableFunctionNull(TableFunctionFactory & factory)
{
factory.registerFunction<TableFunctionNull>();
}
}

View File

@ -0,0 +1,24 @@
#pragma once
#include <TableFunctions/ITableFunction.h>
#include <Core/Types.h>
namespace DB
{
/* null(structure) - creates a temporary null storage
*
* Used for testing purposes, for convenience writing tests and demos.
*/
class TableFunctionNull : public ITableFunction
{
public:
static constexpr auto name = "null";
std::string getName() const override { return name; }
private:
StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override;
const char * getStorageTypeName() const override { return "Null"; }
};
}

View File

@ -11,6 +11,7 @@ void registerTableFunctions()
registerTableFunctionMerge(factory);
registerTableFunctionRemote(factory);
registerTableFunctionNumbers(factory);
registerTableFunctionNull(factory);
registerTableFunctionZeros(factory);
registerTableFunctionFile(factory);
registerTableFunctionURL(factory);

View File

@ -11,6 +11,7 @@ class TableFunctionFactory;
void registerTableFunctionMerge(TableFunctionFactory & factory);
void registerTableFunctionRemote(TableFunctionFactory & factory);
void registerTableFunctionNumbers(TableFunctionFactory & factory);
void registerTableFunctionNull(TableFunctionFactory & factory);
void registerTableFunctionZeros(TableFunctionFactory & factory);
void registerTableFunctionFile(TableFunctionFactory & factory);
void registerTableFunctionURL(TableFunctionFactory & factory);

View File

@ -19,6 +19,7 @@ SRCS(
TableFunctionInput.cpp
TableFunctionMerge.cpp
TableFunctionMySQL.cpp
TableFunctionNull.cpp
TableFunctionNumbers.cpp
TableFunctionRemote.cpp
TableFunctionURL.cpp

View File

@ -431,7 +431,7 @@
},
"Integration tests (release)": {
"required_build_properties": {
"compiler": "clang-11",
"compiler": "gcc-10",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",

View File

@ -10,3 +10,20 @@
(1,1)
(3,2)
(5,2)
-------finalizeAggregation should not be stateful (issue #14847)-------
2 62
3 87
4 112
5 137
SELECT
n,
`finalizeAggregation(s)`
FROM
(
SELECT
n,
finalizeAggregation(s)
FROM test_00808_push_down_with_finalizeAggregation
WHERE (n <= 5) AND (n >= 2)
)
WHERE (n >= 2) AND (n <= 5)

View File

@ -36,3 +36,39 @@ SELECT arrayJoin(arrayMap(x -> x, arraySort(groupArray((ts, n))))) AS k FROM (
DROP TABLE IF EXISTS test_00808;
SELECT '-------finalizeAggregation should not be stateful (issue #14847)-------';
DROP TABLE IF EXISTS test_00808_push_down_with_finalizeAggregation;
CREATE TABLE test_00808_push_down_with_finalizeAggregation ENGINE = AggregatingMergeTree
ORDER BY n AS
SELECT
intDiv(number, 25) AS n,
avgState(number) AS s
FROM numbers(2500)
GROUP BY n;
SET force_primary_key = 1, enable_debug_queries = 1, enable_optimize_predicate_expression = 1;
SELECT *
FROM
(
SELECT
n,
finalizeAggregation(s)
FROM test_00808_push_down_with_finalizeAggregation
)
WHERE (n >= 2) AND (n <= 5);
ANALYZE SELECT *
FROM
(
SELECT
n,
finalizeAggregation(s)
FROM test_00808_push_down_with_finalizeAggregation
)
WHERE (n >= 2) AND (n <= 5);
DROP TABLE IF EXISTS test_00808_push_down_with_finalizeAggregation;

View File

@ -39,7 +39,7 @@
23.0
24.0
=== Try load data from datapage_v2.snappy.parquet
Code: 33. DB::Ex---tion: Error while reading Parquet data: IOError: Not yet implemented: Unsupported encoding.
Code: 33. DB::Ex---tion: Error while reading Parquet data: IOError: Not yet implemented: Unsupported encoding.: data for INSERT was parsed from stdin
=== Try load data from fixed_length_decimal_1.parquet
1.0
@ -168,22 +168,22 @@ Code: 33. DB::Ex---tion: Error while reading Parquet data: IOError: Not yet impl
23 UNITED KINGDOM 3 eans boost carefully special requests. accounts are. carefull
24 UNITED STATES 1 y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be
=== Try load data from nested_lists.snappy.parquet
Code: 8. DB::Ex---tion: Column "element" is not presented in input data
Code: 8. DB::Ex---tion: Column "element" is not presented in input data: data for INSERT was parsed from stdin
=== Try load data from nested_maps.snappy.parquet
Code: 33. DB::Ex---tion: Error while reading Parquet data: NotImplemented: Reading lists of structs from Parquet files not yet supported: key_value: list<key_value: struct<key: string not null, value: struct<key_value: list<key_value: struct<key: int32 not null, value: bool not null> not null> not null>> not null> not null
Code: 33. DB::Ex---tion: Error while reading Parquet data: NotImplemented: Reading lists of structs from Parquet files not yet supported: key_value: list<key_value: struct<key: string not null, value: struct<key_value: list<key_value: struct<key: int32 not null, value: bool not null> not null> not null>> not null> not null: data for INSERT was parsed from stdin
=== Try load data from nonnullable.impala.parquet
Code: 8. DB::Ex---tion: Column "element" is not presented in input data
Code: 8. DB::Ex---tion: Column "element" is not presented in input data: data for INSERT was parsed from stdin
=== Try load data from nullable.impala.parquet
Code: 8. DB::Ex---tion: Column "element" is not presented in input data
Code: 8. DB::Ex---tion: Column "element" is not presented in input data: data for INSERT was parsed from stdin
=== Try load data from nulls.snappy.parquet
Code: 8. DB::Ex---tion: Column "b_c_int" is not presented in input data
Code: 8. DB::Ex---tion: Column "b_c_int" is not presented in input data: data for INSERT was parsed from stdin
=== Try load data from repeated_no_annotation.parquet
Code: 8. DB::Ex---tion: Column "number" is not presented in input data
Code: 8. DB::Ex---tion: Column "number" is not presented in input data: data for INSERT was parsed from stdin
=== Try load data from userdata1.parquet
1454486129 1 Amanda Jordan ajordan0@com.com Female 1.197.201.2 6759521864920116 Indonesia 3/8/1971 49756.53 Internal Auditor 1E+02

View File

@ -0,0 +1,6 @@
Hello, world
---
Hello, world
Hello, world
Hello, world
Hello, world

View File

@ -0,0 +1,25 @@
DROP TABLE IF EXISTS r1;
DROP TABLE IF EXISTS r2;
CREATE TABLE r1 (x String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/r', 'r1') ORDER BY x;
CREATE TABLE r2 (x String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/r', 'r2') ORDER BY x;
SYSTEM STOP REPLICATED SENDS;
INSERT INTO r1 VALUES ('Hello, world');
SELECT * FROM r1;
SELECT * FROM r2;
INSERT INTO r2 VALUES ('Hello, world');
SELECT '---';
SELECT * FROM r1;
SELECT * FROM r2;
SYSTEM START REPLICATED SENDS;
SYSTEM SYNC REPLICA r1;
SYSTEM SYNC REPLICA r2;
SELECT * FROM r1;
SELECT * FROM r2;
DROP TABLE r1;
DROP TABLE r2;

View File

@ -0,0 +1,292 @@
0 0.0 0.00 0.000000 0.0000000
1 1.0 1.00 1.000000 1.0000000
8 8.0 8.00 8.000000 8.0000000
27 27.0 27.00 27.000000 27.0000000
64 64.0 64.00 64.000000 64.0000000
125 125.0 125.00 125.000000 125.0000000
216 216.0 216.00 216.000000 216.0000000
343 343.0 343.00 343.000000 343.0000000
512 512.0 512.00 512.000000 512.0000000
729 729.0 729.00 729.000000 729.0000000
0 0.0 0.00 0.000000 0.0000000
1 1.0 1.00 1.000000 1.0000000
8 8.0 8.00 8.000000 8.0000000
27 27.0 27.00 27.000000 27.0000000
64 64.0 64.00 64.000000 64.0000000
125 125.0 125.00 125.000000 125.0000000
216 216.0 216.00 216.000000 216.0000000
343 343.0 343.00 343.000000 343.0000000
512 512.0 512.00 512.000000 512.0000000
729 729.0 729.00 729.000000 729.0000000
0 0.0 0.00 0.000000 0.0000000
1 1.0 1.00 1.000000 1.0000000
8 8.0 8.00 8.000000 8.0000000
27 27.0 27.00 27.000000 27.0000000
64 64.0 64.00 64.000000 64.0000000
125 125.0 125.00 125.000000 125.0000000
216 216.0 216.00 216.000000 216.0000000
343 343.0 343.00 343.000000 343.0000000
512 512.0 512.00 512.000000 512.0000000
729 729.0 729.00 729.000000 729.0000000
0 0.0 0.00 0.000000 0.0000000
1 1.0 1.00 1.000000 1.0000000
8 8.0 8.00 8.000000 8.0000000
27 27.0 27.00 27.000000 27.0000000
64 64.0 64.00 64.000000 64.0000000
125 125.0 125.00 125.000000 125.0000000
216 216.0 216.00 216.000000 216.0000000
343 343.0 343.00 343.000000 343.0000000
512 512.0 512.00 512.000000 512.0000000
729 729.0 729.00 729.000000 729.0000000
0 0.0 0.00 0.000000 0.0000000
1 1.0 1.00 1.000000 1.0000000
8 8.0 8.00 8.000000 8.0000000
27 27.0 27.00 27.000000 27.0000000
64 64.0 64.00 64.000000 64.0000000
125 125.0 125.00 125.000000 125.0000000
216 216.0 216.00 216.000000 216.0000000
343 343.0 343.00 343.000000 343.0000000
512 512.0 512.00 512.000000 512.0000000
729 729.0 729.00 729.000000 729.0000000
0 0.0 0.00 0.000000 0.0000000
1 1.0 1.00 1.000000 1.0000000
8 8.0 8.00 8.000000 8.0000000
27 27.0 27.00 27.000000 27.0000000
64 64.0 64.00 64.000000 64.0000000
125 125.0 125.00 125.000000 125.0000000
216 216.0 216.00 216.000000 216.0000000
343 343.0 343.00 343.000000 343.0000000
512 512.0 512.00 512.000000 512.0000000
729 729.0 729.00 729.000000 729.0000000
0 0.0 0.00 0.000000 0.0000000
1 1.0 1.00 1.000000 1.0000000
8 8.0 8.00 8.000000 8.0000000
27 27.0 27.00 27.000000 27.0000000
64 64.0 64.00 64.000000 64.0000000
125 125.0 125.00 125.000000 125.0000000
216 216.0 216.00 216.000000 216.0000000
343 343.0 343.00 343.000000 343.0000000
512 512.0 512.00 512.000000 512.0000000
729 729.0 729.00 729.000000 729.0000000
0 0.0 0.00 0.000000 0.0000000
1 1.0 1.00 1.000000 1.0000000
8 8.0 8.00 8.000000 8.0000000
27 27.0 27.00 27.000000 27.0000000
64 64.0 64.00 64.000000 64.0000000
125 125.0 125.00 125.000000 125.0000000
216 216.0 216.00 216.000000 216.0000000
343 343.0 343.00 343.000000 343.0000000
512 512.0 512.00 512.000000 512.0000000
729 729.0 729.00 729.000000 729.0000000
0 0.0 0.00 0.000000 0.0000000
1 1.0 1.00 1.000000 1.0000000
8 8.0 8.00 8.000000 8.0000000
27 27.0 27.00 27.000000 27.0000000
64 64.0 64.00 64.000000 64.0000000
125 125.0 125.00 125.000000 125.0000000
216 216.0 216.00 216.000000 216.0000000
343 343.0 343.00 343.000000 343.0000000
512 512.0 512.00 512.000000 512.0000000
729 729.0 729.00 729.000000 729.0000000
0 0.0 0.00 0.000000 0.0000000
-1 -1.0 -1.00 -1.000000 -1.0000000
-4 -4.0 -4.00 -4.000000 -4.0000000
-9 -9.0 -9.00 -9.000000 -9.0000000
-16 -16.0 -16.00 -16.000000 -16.0000000
-25 -25.0 -25.00 -25.000000 -25.0000000
-36 -36.0 -36.00 -36.000000 -36.0000000
-49 -49.0 -49.00 -49.000000 -49.0000000
-64 -64.0 -64.00 -64.000000 -64.0000000
-81 -81.0 -81.00 -81.000000 -81.0000000
0 0.0 0.00 0.000000 0.0000000
-1 -1.0 -1.00 -1.000000 -1.0000000
-4 -4.0 -4.00 -4.000000 -4.0000000
-9 -9.0 -9.00 -9.000000 -9.0000000
-16 -16.0 -16.00 -16.000000 -16.0000000
-25 -25.0 -25.00 -25.000000 -25.0000000
-36 -36.0 -36.00 -36.000000 -36.0000000
-49 -49.0 -49.00 -49.000000 -49.0000000
-64 -64.0 -64.00 -64.000000 -64.0000000
-81 -81.0 -81.00 -81.000000 -81.0000000
0 0.0 0.00 0.000000 0.0000000
-1 -1.0 -1.00 -1.000000 -1.0000000
-4 -4.0 -4.00 -4.000000 -4.0000000
-9 -9.0 -9.00 -9.000000 -9.0000000
-16 -16.0 -16.00 -16.000000 -16.0000000
-25 -25.0 -25.00 -25.000000 -25.0000000
-36 -36.0 -36.00 -36.000000 -36.0000000
-49 -49.0 -49.00 -49.000000 -49.0000000
-64 -64.0 -64.00 -64.000000 -64.0000000
-81 -81.0 -81.00 -81.000000 -81.0000000
0 0.0 0.00 0.000000 0.0000000
-1 -1.0 -1.00 -1.000000 -1.0000000
-4 -4.0 -4.00 -4.000000 -4.0000000
-9 -9.0 -9.00 -9.000000 -9.0000000
-16 -16.0 -16.00 -16.000000 -16.0000000
-25 -25.0 -25.00 -25.000000 -25.0000000
-36 -36.0 -36.00 -36.000000 -36.0000000
-49 -49.0 -49.00 -49.000000 -49.0000000
-64 -64.0 -64.00 -64.000000 -64.0000000
-81 -81.0 -81.00 -81.000000 -81.0000000
0 0.0 0.00 0.000000 0.0000000
-1 -1.0 -1.00 -1.000000 -1.0000000
-4 -4.0 -4.00 -4.000000 -4.0000000
-9 -9.0 -9.00 -9.000000 -9.0000000
-16 -16.0 -16.00 -16.000000 -16.0000000
-25 -25.0 -25.00 -25.000000 -25.0000000
-36 -36.0 -36.00 -36.000000 -36.0000000
-49 -49.0 -49.00 -49.000000 -49.0000000
-64 -64.0 -64.00 -64.000000 -64.0000000
-81 -81.0 -81.00 -81.000000 -81.0000000
0 0.0 0.00 0.000000 0.0000000
-1 -1.0 -1.00 -1.000000 -1.0000000
-4 -4.0 -4.00 -4.000000 -4.0000000
-9 -9.0 -9.00 -9.000000 -9.0000000
-16 -16.0 -16.00 -16.000000 -16.0000000
-25 -25.0 -25.00 -25.000000 -25.0000000
-36 -36.0 -36.00 -36.000000 -36.0000000
-49 -49.0 -49.00 -49.000000 -49.0000000
-64 -64.0 -64.00 -64.000000 -64.0000000
-81 -81.0 -81.00 -81.000000 -81.0000000
0 0.0 0.00 0.000000 0.0000000
-1 -1.0 -1.00 -1.000000 -1.0000000
-4 -4.0 -4.00 -4.000000 -4.0000000
-9 -9.0 -9.00 -9.000000 -9.0000000
-16 -16.0 -16.00 -16.000000 -16.0000000
-25 -25.0 -25.00 -25.000000 -25.0000000
-36 -36.0 -36.00 -36.000000 -36.0000000
-49 -49.0 -49.00 -49.000000 -49.0000000
-64 -64.0 -64.00 -64.000000 -64.0000000
-81 -81.0 -81.00 -81.000000 -81.0000000
-0 0.0 0.00 0.000000 0.0000000
-1 -1.0 -1.00 -1.000000 -1.0000000
-4 -4.0 -4.00 -4.000000 -4.0000000
-9 -9.0 -9.00 -9.000000 -9.0000000
-16 -16.0 -16.00 -16.000000 -16.0000000
-25 -25.0 -25.00 -25.000000 -25.0000000
-36 -36.0 -36.00 -36.000000 -36.0000000
-49 -49.0 -49.00 -49.000000 -49.0000000
-64 -64.0 -64.00 -64.000000 -64.0000000
-81 -81.0 -81.00 -81.000000 -81.0000000
-0 0.0 0.00 0.000000 0.0000000
-1 -1.0 -1.00 -1.000000 -1.0000000
-4 -4.0 -4.00 -4.000000 -4.0000000
-9 -9.0 -9.00 -9.000000 -9.0000000
-16 -16.0 -16.00 -16.000000 -16.0000000
-25 -25.0 -25.00 -25.000000 -25.0000000
-36 -36.0 -36.00 -36.000000 -36.0000000
-49 -49.0 -49.00 -49.000000 -49.0000000
-64 -64.0 -64.00 -64.000000 -64.0000000
-81 -81.0 -81.00 -81.000000 -81.0000000
0 0 0 0
4294967295 4294967295 4294967295 4294967295
8589934588 8589934588 8589934588 8589934588
12884901879 12884901879 12884901879 12884901879
17179869168 17179869168 17179869168 17179869168
21474836455 21474836455 21474836455 21474836455
25769803740 25769803740 25769803740 25769803740
30064771023 30064771023 30064771023 30064771023
34359738304 34359738304 34359738304 34359738304
38654705583 38654705583 38654705583 38654705583
0 0 0 0
18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
18446744073709551612 18446744073709551612 18446744073709551612 18446744073709551612
18446744073709551607 18446744073709551607 18446744073709551607 18446744073709551607
18446744073709551600 18446744073709551600 18446744073709551600 18446744073709551600
18446744073709551591 18446744073709551591 18446744073709551591 18446744073709551591
18446744073709551580 18446744073709551580 18446744073709551580 18446744073709551580
18446744073709551567 18446744073709551567 18446744073709551567 18446744073709551567
18446744073709551552 18446744073709551552 18446744073709551552 18446744073709551552
18446744073709551535 18446744073709551535 18446744073709551535 18446744073709551535
0 0 0 0
115792089237316195423570985008687907853269984665640564039457584007913129639935 -1 -1 115792089237316195423570985008687907853269984665640564039457584007913129639935
115792089237316195423570985008687907853269984665640564039457584007913129639932 -4 -4 115792089237316195423570985008687907853269984665640564039457584007913129639932
115792089237316195423570985008687907853269984665640564039457584007913129639927 -9 -9 115792089237316195423570985008687907853269984665640564039457584007913129639927
115792089237316195423570985008687907853269984665640564039457584007913129639920 -16 -16 115792089237316195423570985008687907853269984665640564039457584007913129639920
115792089237316195423570985008687907853269984665640564039457584007913129639911 -25 -25 115792089237316195423570985008687907853269984665640564039457584007913129639911
115792089237316195423570985008687907853269984665640564039457584007913129639900 -36 -36 115792089237316195423570985008687907853269984665640564039457584007913129639900
115792089237316195423570985008687907853269984665640564039457584007913129639887 -49 -49 115792089237316195423570985008687907853269984665640564039457584007913129639887
115792089237316195423570985008687907853269984665640564039457584007913129639872 -64 -64 115792089237316195423570985008687907853269984665640564039457584007913129639872
115792089237316195423570985008687907853269984665640564039457584007913129639855 -81 -81 115792089237316195423570985008687907853269984665640564039457584007913129639855
0 0 0 0
-1 -1 -1 115792089237316195423570985008687907853269984665640564039457584007913129639935
-4 -4 -4 115792089237316195423570985008687907853269984665640564039457584007913129639932
-9 -9 -9 115792089237316195423570985008687907853269984665640564039457584007913129639927
-16 -16 -16 115792089237316195423570985008687907853269984665640564039457584007913129639920
-25 -25 -25 115792089237316195423570985008687907853269984665640564039457584007913129639911
-36 -36 -36 115792089237316195423570985008687907853269984665640564039457584007913129639900
-49 -49 -49 115792089237316195423570985008687907853269984665640564039457584007913129639887
-64 -64 -64 115792089237316195423570985008687907853269984665640564039457584007913129639872
-81 -81 -81 115792089237316195423570985008687907853269984665640564039457584007913129639855
0 0 0 0
-1 -1 -1 115792089237316195423570985008687907853269984665640564039457584007913129639935
-4 -4 -4 115792089237316195423570985008687907853269984665640564039457584007913129639932
-9 -9 -9 115792089237316195423570985008687907853269984665640564039457584007913129639927
-16 -16 -16 115792089237316195423570985008687907853269984665640564039457584007913129639920
-25 -25 -25 115792089237316195423570985008687907853269984665640564039457584007913129639911
-36 -36 -36 115792089237316195423570985008687907853269984665640564039457584007913129639900
-49 -49 -49 115792089237316195423570985008687907853269984665640564039457584007913129639887
-64 -64 -64 115792089237316195423570985008687907853269984665640564039457584007913129639872
-81 -81 -81 115792089237316195423570985008687907853269984665640564039457584007913129639855
0 0 0 0
-1 -1 -1 115792089237316195423570985008687907853269984665640564039457584007913129639935
-4 -4 -4 115792089237316195423570985008687907853269984665640564039457584007913129639932
-9 -9 -9 115792089237316195423570985008687907853269984665640564039457584007913129639927
-16 -16 -16 115792089237316195423570985008687907853269984665640564039457584007913129639920
-25 -25 -25 115792089237316195423570985008687907853269984665640564039457584007913129639911
-36 -36 -36 115792089237316195423570985008687907853269984665640564039457584007913129639900
-49 -49 -49 115792089237316195423570985008687907853269984665640564039457584007913129639887
-64 -64 -64 115792089237316195423570985008687907853269984665640564039457584007913129639872
-81 -81 -81 115792089237316195423570985008687907853269984665640564039457584007913129639855
0 0 0 0
-1 -1 -1 115792089237316195423570985008687907853269984665640564039457584007913129639935
-4 -4 -4 115792089237316195423570985008687907853269984665640564039457584007913129639932
-9 -9 -9 115792089237316195423570985008687907853269984665640564039457584007913129639927
-16 -16 -16 115792089237316195423570985008687907853269984665640564039457584007913129639920
-25 -25 -25 115792089237316195423570985008687907853269984665640564039457584007913129639911
-36 -36 -36 115792089237316195423570985008687907853269984665640564039457584007913129639900
-49 -49 -49 115792089237316195423570985008687907853269984665640564039457584007913129639887
-64 -64 -64 115792089237316195423570985008687907853269984665640564039457584007913129639872
-81 -81 -81 115792089237316195423570985008687907853269984665640564039457584007913129639855
0 0 0 0
-1 -1 -1 115792089237316195423570985008687907853269984665640564039457584007913129639935
-4 -4 -4 115792089237316195423570985008687907853269984665640564039457584007913129639932
-9 -9 -9 115792089237316195423570985008687907853269984665640564039457584007913129639927
-16 -16 -16 115792089237316195423570985008687907853269984665640564039457584007913129639920
-25 -25 -25 115792089237316195423570985008687907853269984665640564039457584007913129639911
-36 -36 -36 115792089237316195423570985008687907853269984665640564039457584007913129639900
-49 -49 -49 115792089237316195423570985008687907853269984665640564039457584007913129639887
-64 -64 -64 115792089237316195423570985008687907853269984665640564039457584007913129639872
-81 -81 -81 115792089237316195423570985008687907853269984665640564039457584007913129639855
0 0 0 0
-1 -1 -1 115792089237316195423570985008687907853269984665640564039457584007913129639935
-4 -4 -4 115792089237316195423570985008687907853269984665640564039457584007913129639932
-9 -9 -9 115792089237316195423570985008687907853269984665640564039457584007913129639927
-16 -16 -16 115792089237316195423570985008687907853269984665640564039457584007913129639920
-25 -25 -25 115792089237316195423570985008687907853269984665640564039457584007913129639911
-36 -36 -36 115792089237316195423570985008687907853269984665640564039457584007913129639900
-49 -49 -49 115792089237316195423570985008687907853269984665640564039457584007913129639887
-64 -64 -64 115792089237316195423570985008687907853269984665640564039457584007913129639872
-81 -81 -81 115792089237316195423570985008687907853269984665640564039457584007913129639855
0 0 0 0
1 0 0 0
2 0 0 0
3 0 0 0
4 0 0 0
5 0 0 0
6 0 0 0
7 0 0 0
8 0 0 0
9 0 0 0
0 0 0 0
-1 0 0 0
-2 0 0 0
-3 0 0 0
-4 0 0 0
-5 0 0 0
-6 0 0 0
-7 0 0 0
-8 0 0 0
-9 0 0 0
2499500025000000 2499500025000000 2499500025000000 2499500025000000.00
0 0 0 0.00

View File

@ -0,0 +1,48 @@
SELECT toUInt32(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toUInt64(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toUInt256(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toInt32(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toInt64(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toInt128(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toInt256(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toFloat32(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toFloat64(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toUInt32(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toUInt64(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toUInt256(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toInt32(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toInt64(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toInt128(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toInt256(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toFloat32(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toFloat64(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number;
SELECT toUInt32(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number;
SELECT toUInt64(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number;
SELECT toUInt256(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number;
SELECT toInt32(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number;
SELECT toInt64(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number;
SELECT toInt128(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number;
SELECT toInt256(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number;
SELECT toFloat32(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number;
SELECT toFloat64(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number;
SELECT number y, toInt128(number) - y, toInt256(number) - y, toUInt256(number) - y FROM numbers_mt(10) ORDER BY number;
SELECT -number y, toInt128(number) + y, toInt256(number) + y, toUInt256(number) + y FROM numbers_mt(10) ORDER BY number;
SET allow_experimental_bigint_types = 1;
DROP TABLE IF EXISTS t;
CREATE TABLE t (x UInt64, i256 Int256, u256 UInt256, d256 Decimal256(2)) ENGINE = Memory;
INSERT INTO t SELECT number * number * number AS x, x AS i256, x AS u256, x AS d256 FROM numbers(10000);
SELECT sum(x), sum(i256), sum(u256), sum(d256) FROM t;
INSERT INTO t SELECT -number * number * number AS x, x AS i256, x AS u256, x AS d256 FROM numbers(10000);
SELECT sum(x), sum(i256), sum(u256), sum(d256) FROM t;
DROP TABLE t;

View File

@ -0,0 +1,6 @@
100
100
100
100
100000
100000

View File

@ -0,0 +1,29 @@
SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=1; -- { serverError 158 }
SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_bytes_to_read_leaf=1; -- { serverError 307 }
SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=100;
SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_bytes_to_read_leaf=1000;
SELECT count() FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=1; -- { serverError 158 }
SELECT count() FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 100) SETTINGS max_bytes_to_read_leaf=1; -- { serverError 307 }
SELECT count() FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=100;
SELECT count() FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 100) SETTINGS max_bytes_to_read_leaf=1000;
DROP TABLE IF EXISTS test_local;
DROP TABLE IF EXISTS test_distributed;
CREATE TABLE test_local (date Date, value UInt32) ENGINE = MergeTree(date, date, 8192);
CREATE TABLE test_distributed AS test_local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), test_local, rand());
INSERT INTO test_local SELECT '2000-08-01', number as value from numbers(50000);
SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_rows_to_read_leaf = 40000; -- { serverError 158 }
SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_bytes_to_read_leaf = 40000; -- { serverError 307 }
SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_rows_to_read = 60000; -- { serverError 158 }
SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_rows_to_read_leaf = 60000;
SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_bytes_to_read = 100000; -- { serverError 307 }
SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_bytes_to_read_leaf = 100000;
DROP TABLE IF EXISTS test_local;
DROP TABLE IF EXISTS test_distributed;

View File

@ -0,0 +1,10 @@
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950

View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
NUM_REPLICAS=10
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "
DROP TABLE IF EXISTS r$i;
CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/01459_manual_write_ro_replicas/r', 'r$i') ORDER BY x;
"
done
function thread {
for x in {0..99}; do
$CLICKHOUSE_CLIENT --query "INSERT INTO r$1 SELECT $x % $NUM_REPLICAS = $1 ? $x - 1 : $x" # Replace some records as duplicates so they will be written by other replicas
done
}
for i in $(seq 1 $NUM_REPLICAS); do
thread $i &
done
wait
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "
SYSTEM SYNC REPLICA r$i;
SELECT count(), min(x), max(x), sum(x) FROM r$i;
DROP TABLE IF EXISTS r$i;
"
done

View File

@ -0,0 +1,10 @@
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950
100 0 99 4950

View File

@ -0,0 +1,37 @@
#!/usr/bin/env bash
set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
NUM_REPLICAS=10
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "
DROP TABLE IF EXISTS r$i;
CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/01459_manual_write_ro_replicas_quorum/r', 'r$i') ORDER BY x;
"
done
function thread {
for x in {0..99}; do
while true; do
$CLICKHOUSE_CLIENT --insert_quorum 5 --query "INSERT INTO r$1 SELECT $x" 2>&1 | grep -qF 'Quorum for previous write has not been satisfied yet' || break
done
done
}
for i in $(seq 1 $NUM_REPLICAS); do
thread $i &
done
wait
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "
SYSTEM SYNC REPLICA r$i;
SELECT count(), min(x), max(x), sum(x) FROM r$i;
DROP TABLE IF EXISTS r$i;
"
done

View File

@ -0,0 +1 @@
INSERT INTO function null('number UInt64') SELECT * FROM numbers_mt(10000);

View File

@ -0,0 +1,15 @@
#!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS data"
${CLICKHOUSE_CLIENT} -q "CREATE TABLE data (key Int) Engine=Memory()"
${CLICKHOUSE_CLIENT} --input_format_parallel_parsing=0 -q "INSERT INTO data SELECT key FROM input('key Int') FORMAT TSV" <<<10
# with '\n...' after the query clickhouse-client prefer data from the query over data from stdin, and produce very tricky message:
# Code: 27. DB::Exception: Cannot parse input: expected '\n' before: ' ': (at row 1)
# well for TSV it is ok, but for RowBinary:
# Code: 33. DB::Exception: Cannot read all data. Bytes read: 1. Bytes expected: 4.
# so check that the exception message contain the data source.
${CLICKHOUSE_CLIENT} --input_format_parallel_parsing=0 -q "INSERT INTO data FORMAT TSV " <<<2 |& grep -F -c 'data for INSERT was parsed from query'
${CLICKHOUSE_CLIENT} -q "SELECT * FROM data"

View File

@ -143,3 +143,4 @@
01474_bad_global_join
01473_event_time_microseconds
01461_query_start_time_microseconds
01455_shard_leaf_max_rows_bytes_to_read

View File

@ -1,5 +1,8 @@
v20.8.3.18-stable 2020-09-18
v20.8.2.3-stable 2020-09-08
v20.7.3.7-stable 2020-09-18
v20.7.2.30-stable 2020-08-31
v20.6.7.4-stable 2020-09-18
v20.6.6.7-stable 2020-09-11
v20.6.5.8-stable 2020-09-03
v20.6.4.44-stable 2020-08-20
@ -16,6 +19,7 @@ v20.4.5.36-stable 2020-06-10
v20.4.4.18-stable 2020-05-26
v20.4.3.16-stable 2020-05-23
v20.4.2.9-stable 2020-05-12
v20.3.19.4-lts 2020-09-18
v20.3.18.10-lts 2020-09-08
v20.3.17.173-lts 2020-08-15
v20.3.16.165-lts 2020-08-08

1 v20.8.2.3-stable v20.8.3.18-stable 2020-09-08 2020-09-18
1 v20.8.3.18-stable 2020-09-18
2 v20.8.2.3-stable v20.8.2.3-stable 2020-09-08 2020-09-08
3 v20.7.3.7-stable 2020-09-18
4 v20.7.2.30-stable v20.7.2.30-stable 2020-08-31 2020-08-31
5 v20.6.7.4-stable 2020-09-18
6 v20.6.6.7-stable v20.6.6.7-stable 2020-09-11 2020-09-11
7 v20.6.5.8-stable v20.6.5.8-stable 2020-09-03 2020-09-03
8 v20.6.4.44-stable v20.6.4.44-stable 2020-08-20 2020-08-20
19 v20.4.4.18-stable v20.4.4.18-stable 2020-05-26 2020-05-26
20 v20.4.3.16-stable v20.4.3.16-stable 2020-05-23 2020-05-23
21 v20.4.2.9-stable v20.4.2.9-stable 2020-05-12 2020-05-12
22 v20.3.19.4-lts 2020-09-18
23 v20.3.18.10-lts v20.3.18.10-lts 2020-09-08 2020-09-08
24 v20.3.17.173-lts v20.3.17.173-lts 2020-08-15 2020-08-15
25 v20.3.16.165-lts v20.3.16.165-lts 2020-08-08 2020-08-08

View File

@ -68,8 +68,8 @@ def make_tar_file_for_table(clickhouse_data_path, db_name, table_name,
USAGE_EXAMPLES = '''
examples:
\ts3uploader --dataset-name some_ds --access-key-id XXX --secret-access-key YYY --clickhouse-data-path /opt/clickhouse/ --table-name default.some_tbl --bucket-name some-bucket
\ts3uploader --dataset-name some_ds --access-key-id XXX --secret-access-key YYY --file-name some_ds.tsv.xz --bucket-name some-bucket
\t./s3uploader --dataset-name some_ds --access-key-id XXX --secret-access-key YYY --clickhouse-data-path /opt/clickhouse/ --table-name default.some_tbl --bucket-name some-bucket
\t./s3uploader --dataset-name some_ds --access-key-id XXX --secret-access-key YYY --file-path some_ds.tsv.xz --bucket-name some-bucket --s3-path /path/to/
'''
if __name__ == "__main__":