diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 504e2b7edd4..4fd6303d024 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -96,10 +96,14 @@ endif () if (USE_INTERNAL_POCO_LIBRARY) + set (save_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) + set (save_CMAKE_C_FLAGS ${CMAKE_C_FLAGS}) set (_save ${ENABLE_TESTS}) set (ENABLE_TESTS 0) set (CMAKE_DISABLE_FIND_PACKAGE_ZLIB 1) add_subdirectory (poco) unset (CMAKE_DISABLE_FIND_PACKAGE_ZLIB) set (ENABLE_TESTS ${_save}) + set (CMAKE_CXX_FLAGS ${save_CMAKE_CXX_FLAGS}) + set (CMAKE_C_FLAGS ${save_CMAKE_C_FLAGS}) endif () diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake index e01b66369c4..507e1266c44 100644 --- a/dbms/cmake/version.cmake +++ b/dbms/cmake/version.cmake @@ -1,6 +1,6 @@ # This strings autochanged from release_lib.sh: -set(VERSION_DESCRIBE v1.1.54332-testing) -set(VERSION_REVISION 54332) +set(VERSION_DESCRIBE v1.1.54333-testing) +set(VERSION_REVISION 54333) # end of autochange set (VERSION_MAJOR 1) diff --git a/dbms/src/Columns/ColumnAggregateFunction.h b/dbms/src/Columns/ColumnAggregateFunction.h index 202440ef12c..a9ec5ed486c 100644 --- a/dbms/src/Columns/ColumnAggregateFunction.h +++ b/dbms/src/Columns/ColumnAggregateFunction.h @@ -83,7 +83,7 @@ private: } ColumnAggregateFunction(const ColumnAggregateFunction & src_) - : arenas(src_.arenas), func(src_.func), src(src_.getPtr()) + : arenas(src_.arenas), func(src_.func), src(src_.getPtr()), data(src_.data.begin(), src_.data.end()) { } diff --git a/dbms/src/Common/config_build.cpp.in b/dbms/src/Common/config_build.cpp.in index 797ed4bd8e2..c9fe56634a6 100644 --- a/dbms/src/Common/config_build.cpp.in +++ b/dbms/src/Common/config_build.cpp.in @@ -22,17 +22,21 @@ const char * auto_config_build[] "BUILD_COMPILE_DEFINITIONS", "@BUILD_COMPILE_DEFINITIONS@", "BUILD_INCLUDE_DIRECTORIES", "@BUILD_INCLUDE_DIRECTORIES@", "STATIC", "@USE_STATIC_LIBRARIES@", - "USE_CAPNP", "@USE_CAPNP@", + "USE_EMBEDDED_COMPILER", "@USE_EMBEDDED_COMPILER@", + "USE_INTERNAL_MEMCPY", "@USE_INTERNAL_MEMCPY@", + "USE_GLIBC_COMPATIBILITY", "@GLIBC_COMPATIBILITY@", + "USE_JEMALLOC", "@USE_JEMALLOC@", + "USE_TCMALLOC", "@USE_TCMALLOC@", + "USE_UNWIND", "@USE_UNWIND@", "USE_ICU", "@USE_ICU@", "USE_MYSQL", "@USE_MYSQL@", "USE_RE2_ST", "@USE_RE2_ST@", "USE_VECTORCLASS", "@USE_VECTORCLASS@", "USE_RDKAFKA", "@USE_RDKAFKA@", "USE_CAPNP", "@USE_CAPNP@", - "USE_EMBEDDED_COMPILER", "@USE_EMBEDDED_COMPILER@", - "USE_Poco_DataODBC", "@Poco_DataODBC_FOUND", - "USE_Poco_MongoDB", "@Poco_MongoDB_FOUND", - "USE_Poco_NetSSL", "@Poco_NetSSL_FOUND", + "USE_Poco_DataODBC", "@Poco_DataODBC_FOUND@", + "USE_Poco_MongoDB", "@Poco_MongoDB_FOUND@", + "USE_Poco_NetSSL", "@Poco_NetSSL_FOUND@", nullptr, nullptr }; diff --git a/dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp b/dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp index b8323eee874..4692dc6df57 100644 --- a/dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp +++ b/dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp @@ -67,7 +67,7 @@ const Block & TotalsHavingBlockInputStream::getTotals() || totals_mode == TotalsMode::AFTER_HAVING_INCLUSIVE || (totals_mode == TotalsMode::AFTER_HAVING_AUTO && static_cast(passed_keys) / total_keys >= auto_include_threshold)) - addToTotals(current_totals, overflow_aggregates, nullptr); + addToTotals(overflow_aggregates, nullptr); } totals = header.cloneWithColumns(std::move(current_totals)); @@ -110,7 +110,7 @@ Block TotalsHavingBlockInputStream::readImpl() if (filter_column_name.empty()) { - addToTotals(current_totals, block, nullptr); + addToTotals(block, nullptr); } else { @@ -127,9 +127,9 @@ Block TotalsHavingBlockInputStream::readImpl() /// Add values to `totals` (if it was not already done). if (totals_mode == TotalsMode::BEFORE_HAVING) - addToTotals(current_totals, block, nullptr); + addToTotals(block, nullptr); else - addToTotals(current_totals, block, filter_description.data); + addToTotals(block, filter_description.data); /// Filter the block by expression in HAVING. size_t columns = finalized.columns(); @@ -155,11 +155,10 @@ Block TotalsHavingBlockInputStream::readImpl() } -void TotalsHavingBlockInputStream::addToTotals(MutableColumns & totals, const Block & block, const IColumn::Filter * filter) +void TotalsHavingBlockInputStream::addToTotals(const Block & block, const IColumn::Filter * filter) { - bool need_init = totals.empty(); + bool need_init = !arena; - ArenaPtr arena; if (need_init) arena = std::make_shared(); @@ -174,7 +173,7 @@ void TotalsHavingBlockInputStream::addToTotals(MutableColumns & totals, const Bl { MutableColumnPtr new_column = current.type->createColumn(); current.type->insertDefaultInto(*new_column); - totals.emplace_back(std::move(new_column)); + current_totals.emplace_back(std::move(new_column)); } continue; } @@ -193,11 +192,11 @@ void TotalsHavingBlockInputStream::addToTotals(MutableColumns & totals, const Bl function->create(data); target->getData().push_back(data); - totals.emplace_back(std::move(target)); + current_totals.emplace_back(std::move(target)); } else { - auto & target = typeid_cast(*totals[i]); + auto & target = typeid_cast(*current_totals[i]); function = target.getAggregateFunction().get(); data = target.getData()[0]; } diff --git a/dbms/src/DataStreams/TotalsHavingBlockInputStream.h b/dbms/src/DataStreams/TotalsHavingBlockInputStream.h index 5c0c3dd7cf2..1cc257983cb 100644 --- a/dbms/src/DataStreams/TotalsHavingBlockInputStream.h +++ b/dbms/src/DataStreams/TotalsHavingBlockInputStream.h @@ -51,9 +51,11 @@ private: /// Here, total values are accumulated. After the work is finished, they will be placed in IProfilingBlockInputStream::totals. MutableColumns current_totals; + /// Arena for aggregate function states in totals. + ArenaPtr arena; /// If filter == nullptr - add all rows. Otherwise, only the rows that pass the filter (HAVING). - void addToTotals(MutableColumns & totals, const Block & block, const IColumn::Filter * filter); + void addToTotals(const Block & block, const IColumn::Filter * filter); }; } diff --git a/dbms/src/Storages/StorageDictionary.cpp b/dbms/src/Storages/StorageDictionary.cpp index c836731986b..2eb96faf733 100644 --- a/dbms/src/Storages/StorageDictionary.cpp +++ b/dbms/src/Storages/StorageDictionary.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -97,6 +98,7 @@ void registerStorageDictionary(StorageFactory & factory) throw Exception("Storage Dictionary requires single parameter: name of dictionary", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + args.engine_args[0] = evaluateConstantExpressionOrIdentifierAsLiteral(args.engine_args[0], args.local_context); String dictionary_name = typeid_cast(*args.engine_args[0]).value.safeGet(); const auto & dictionary = args.context.getExternalDictionaries().getDictionary(dictionary_name); diff --git a/dbms/src/Storages/System/StorageSystemPartsColumns.cpp b/dbms/src/Storages/System/StorageSystemPartsColumns.cpp index 076cfd4d520..b97178a750d 100644 --- a/dbms/src/Storages/System/StorageSystemPartsColumns.cpp +++ b/dbms/src/Storages/System/StorageSystemPartsColumns.cpp @@ -141,9 +141,9 @@ void StorageSystemPartsColumns::processNextStorage(MutableColumns & columns, con columns[j++]->insertDefault(); } - columns[j++]->insert(part->getColumnCompressedSize(column.name)); - columns[j++]->insert(part->getColumnUncompressedSize(column.name)); - columns[j++]->insert(part->getColumnMrkSize(column.name)); + columns[j++]->insert(static_cast(part->getColumnCompressedSize(column.name))); + columns[j++]->insert(static_cast(part->getColumnUncompressedSize(column.name))); + columns[j++]->insert(static_cast(part->getColumnMrkSize(column.name))); if (has_state_column) columns[j++]->insert(part->stateString()); diff --git a/dbms/tests/clickhouse-test b/dbms/tests/clickhouse-test index fd38d065754..1a3f89aafbd 100755 --- a/dbms/tests/clickhouse-test +++ b/dbms/tests/clickhouse-test @@ -89,6 +89,8 @@ def main(args): base_dir = os.path.abspath(args.queries) tmp_dir = os.path.abspath(args.tmp) + passed_total = 0 + skipped_total = 0 failures_total = 0 # Keep same default values as in queries/0_stateless/00000_sh_lib.sh @@ -114,6 +116,7 @@ def main(args): print("\nRunning {} tests.\n".format(suite)) failures = 0 + failures_chain = 0 if 'stateful' in suite and not is_data_present(): print("Won't run stateful tests because test data wasn't loaded. See README.txt.") continue @@ -156,9 +159,11 @@ def main(args): if not args.zookeeper and 'zookeeper' in name: report_testcase.append(et.Element("skipped", attrib = {"message": "no zookeeper"})) print(MSG_SKIPPED + " - no zookeeper") + skipped_total += 1 elif not args.shard and 'shard' in name: report_testcase.append(et.Element("skipped", attrib = {"message": "no shard"})) print(MSG_SKIPPED + " - no shard") + skipped_total += 1 else: disabled_file = os.path.join(suite_dir, name) + '.disabled' @@ -191,7 +196,7 @@ def main(args): failure = et.Element("failure", attrib = {"message": "Timeout"}) report_testcase.append(failure) - failures = failures + 1 + failures += 1 print("{0} - Timeout!".format(MSG_FAIL)) else: stdout = open(stdout_file, 'r').read() if os.path.exists(stdout_file) else '' @@ -207,7 +212,8 @@ def main(args): stdout_element.text = et.CDATA(stdout) report_testcase.append(stdout_element) - failures = failures + 1 + failures += 1 + failures_chain += 1 print("{0} - return code {1}".format(MSG_FAIL, proc.returncode)) if stderr: @@ -227,7 +233,8 @@ def main(args): stderr_element.text = et.CDATA(stderr) report_testcase.append(stderr_element) - failures = failures + 1 + failures += 1 + failures_chain += 1 print("{0} - having stderror:\n{1}".format(MSG_FAIL, stderr.encode('utf-8'))) elif 'Exception' in stdout: failure = et.Element("error", attrib = {"message": "having exception"}) @@ -237,7 +244,8 @@ def main(args): stdout_element.text = et.CDATA(stdout) report_testcase.append(stdout_element) - failures = failures + 1 + failures += 1 + failures_chain += 1 print("{0} - having exception:\n{1}".format(MSG_FAIL, stdout.encode('utf-8'))) elif not os.path.isfile(reference_file): skipped = et.Element("skipped", attrib = {"message": "no reference file"}) @@ -260,9 +268,11 @@ def main(args): stdout_element.text = et.CDATA(remove_control_characters(diff)) report_testcase.append(stdout_element) - failures = failures + 1 + failures += 1 print("{0} - result differs with reference:\n{1}".format(MSG_FAIL, diff.encode('utf-8'))) else: + passed_total += 1 + failures_chain = 0 print(MSG_OK) if os.path.exists(stdout_file): os.remove(stdout_file) @@ -276,25 +286,28 @@ def main(args): error = et.Element("error", attrib = {"type": exc_type.__name__, "message": str(exc_value)}) report_testcase.append(error) - failures = failures + 1 + failures += 1 print("{0} - Test internal error: {1}\n{2}".format(MSG_FAIL, exc_type.__name__, exc_value)) finally: dump_report(args.output, suite, name, report_testcase) + if failures_chain >= 20: + break + failures_total = failures_total + failures if failures_total > 0: - print(colored("\nHaving {0} errors!".format(failures_total), "red", attrs=["bold"])) + print(colored("\nHaving {failures_total} errors! {passed_total} tests passed. {skipped_total} tests skipped.".format(passed_total = passed_total, skipped_total = skipped_total, failures_total = failures_total), "red", attrs=["bold"])) sys.exit(1) else: - print(colored("\nAll tests passed.", "green", attrs=["bold"])) + print(colored("\n{passed_total} tests passed. {skipped_total} tests skipped.".format(passed_total = passed_total, skipped_total = skipped_total), "green", attrs=["bold"])) sys.exit(0) if __name__ == '__main__': parser = ArgumentParser(description = 'ClickHouse functional tests') - parser.add_argument('-q', '--queries', default = 'queries', help = 'Path to queries dir') - parser.add_argument('--tmp', default = 'queries', help = 'Path to tmp dir') + parser.add_argument('-q', '--queries', help = 'Path to queries dir') + parser.add_argument('--tmp', help = 'Path to tmp dir') parser.add_argument('-b', '--binary', default = 'clickhouse', help = 'Main clickhouse binary') parser.add_argument('-c', '--client', help = 'Client program') parser.add_argument('--clientconfig', help = 'Client config (if you use not default ports)') @@ -314,6 +327,16 @@ if __name__ == '__main__': group.add_argument('--no-shard', action = 'store_false', default = None, dest = 'shard', help = 'Do not run shard related tests') args = parser.parse_args() + + if args.queries is None and os.path.isdir('queries'): + args.queries = 'queries' + if args.tmp is None: + args.tmp = args.queries + else: + args.queries = '/usr/share/clickhouse-test/queries' + if args.tmp is None: + args.tmp = '/tmp/clickhouse-test' + if args.client is None: args.client = args.binary + '-client' if args.clientconfig: diff --git a/dbms/tests/integration/README.md b/dbms/tests/integration/README.md index 06e39b47901..94cfd29ef40 100644 --- a/dbms/tests/integration/README.md +++ b/dbms/tests/integration/README.md @@ -7,11 +7,18 @@ This directory contains tests that involve several ClickHouse instances, custom Prerequisites: * Ubuntu 14.04 (Trusty). * [docker](https://www.docker.com/community-edition#/download). Minimum required API version: 1.25, check with `docker version`. + +You must install latest Docker from +https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#set-up-the-repository +Don't use Docker from your system repository. + * [pip](https://pypi.python.org/pypi/pip). To install: `sudo apt-get install python-pip` * [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest` * [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install docker-compose docker dicttoxml` If you want to run the tests under a non-privileged user, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and re-login. +(You must close all your sessions (for example, restart your computer)) +To check, that you have access to Docker, run `docker ps`. Run the tests with the `pytest` command. To select which tests to run, use: `pytest -k ` diff --git a/dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference b/dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference index 0f8b6de9166..28e6a775e09 100644 --- a/dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference +++ b/dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference @@ -1,6 +1,3 @@ -*** Without PARTITION BY and ORDER BY *** -1 -2 *** Replicated with sampling *** 1 *** Replacing with implicit version *** diff --git a/dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference b/dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference new file mode 100644 index 00000000000..18c1d950992 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference @@ -0,0 +1,4 @@ +123456789 Hello 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890 +234567890 World 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890 + +0 World 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890 diff --git a/dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql b/dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql new file mode 100644 index 00000000000..1b7c9df2d24 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql @@ -0,0 +1,46 @@ +SET group_by_two_level_threshold = 1, max_threads = 1; + +SELECT + k, + anyLast(s) +FROM +( + SELECT + 123456789 AS k, + 'Hello 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890' AS s + UNION ALL + SELECT + 234567890, + 'World 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890' +) +GROUP BY k + WITH TOTALS +HAVING length(anyLast(s)) > 0 +ORDER BY k; + +/* There was a bug in implementation of WITH TOTALS. + * When there was more than one block after aggregation, + * nullptr is passed to IAggregateFunction::merge instead of pointer to valid Arena. + * + * To reproduce, we set 'group_by_two_level_threshold' to small value to enable two-level aggregation. + * Only in two-level aggregation there are many blocks after GROUP BY. + * + * Also use UNION ALL in subquery to generate two blocks before GROUP BY. + * Because two-level aggregation may be triggered only after a block is processed. + * + * Use large numbers as a key, because for 8, 16 bit numbers, + * two-level aggregation is not possible as simple aggregation method is used. + * These numbers are happy to hash to different buckets and we thus we have two blocks after GROUP BY. + * + * Also we use long strings (at least 64 bytes) in aggregation state, + * because aggregate functions min/max/any/anyLast use Arena only for long enough strings. + * + * And we use function 'anyLast' for method IAggregateFunction::merge to be called for every new value. + * + * We use useless HAVING (that is always true), because in absense of HAVING, + * TOTALS are calculated in a simple way in same pass during aggregation, not in TotalsHavingBlockInputStream, + * and bug doesn't trigger. + * + * We use ORDER BY for result of the test to be deterministic. + * max_threads = 1 for deterministic order of result in subquery and the value of 'anyLast'. + */ diff --git a/debian/changelog b/debian/changelog index ef4a3432476..8e7fd49fcb4 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (1.1.54332) unstable; urgency=low +clickhouse (1.1.54333) unstable; urgency=low * Modified source code - -- Thu, 11 Jan 2018 10:25:25 +0300 + -- Fri, 12 Jan 2018 12:43:30 +0300 diff --git a/debian/pbuilder-hooks/B00ccache-stat b/debian/pbuilder-hooks/B00ccache-stat new file mode 100755 index 00000000000..fdf6db1b7e7 --- /dev/null +++ b/debian/pbuilder-hooks/B00ccache-stat @@ -0,0 +1,3 @@ +#!/bin/sh + +ccache --show-stats diff --git a/debian/source/options b/debian/source/options index d3ba057a286..1f78e80e6a6 100644 --- a/debian/source/options +++ b/debian/source/options @@ -1,2 +1,3 @@ tar-ignore -tar-ignore="contrib/poco/openssl/*" +tar-ignore="ClickHouse/contrib/poco/openssl/*" +tar-ignore="ClickHouse/build*" diff --git a/release b/release index 849d92994a6..ba645b541b2 100755 --- a/release +++ b/release @@ -68,7 +68,7 @@ if [ -z "$THREAD_COUNT" ] ; then THREAD_COUNT=`nproc || grep -c ^processor /proc/cpuinfo` fi -CMAKE_FLAGS+=" $LIBTCMALLOC_OPTS -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE -DUSE_EMBEDDED_COMPILER=1" +CMAKE_FLAGS=" $LIBTCMALLOC_OPTS -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE -DUSE_EMBEDDED_COMPILER=1 $CMAKE_FLAGS" export CMAKE_FLAGS REVISION+=$VERSION_POSTFIX @@ -83,6 +83,7 @@ if [ -z "$USE_PBUILDER" ] ; then -b ${DEBUILD_NOSIGN_OPTIONS} ${DEBUILD_NODEPS_OPTIONS} else export DIST=${DIST:=artful} + export SET_BUILDRESULT=${SET_BUILDRESULT:=$CURDIR/..} . $CURDIR/debian/.pbuilderrc if [[ -n "$FORCE_PBUILDER_CREATE" || ! -e "$BASETGZ" ]] ; then sudo --preserve-env pbuilder create --configfile $CURDIR/debian/.pbuilderrc