diff --git a/contrib/librdkafka b/contrib/librdkafka index 3a986afbb97..3401fa1e456 160000 --- a/contrib/librdkafka +++ b/contrib/librdkafka @@ -1 +1 @@ -Subproject commit 3a986afbb977fa13582991ce8f2c0b2045ffaa33 +Subproject commit 3401fa1e45605b5ae806f94905c92f5f546a607b diff --git a/contrib/zstd b/contrib/zstd index f3a8bd553a8..f4340f46b23 160000 --- a/contrib/zstd +++ b/contrib/zstd @@ -1 +1 @@ -Subproject commit f3a8bd553a865c59f1bd6e1f68bf182cf75a8f00 +Subproject commit f4340f46b2387bc8de7d5320c0b83bb1499933ad diff --git a/copy_headers.sh b/copy_headers.sh index d769044be95..fa5b94fc267 100755 --- a/copy_headers.sh +++ b/copy_headers.sh @@ -63,5 +63,6 @@ done # Even more platform-specific headers for src_file in $(ls -1 $SOURCE_PATH/contrib/libboost/boost_1_65_0/boost/smart_ptr/detail/*); do + mkdir -p "$DST/$(echo $src_file | sed -r -e 's/\/[^/]*$/\//')"; cp "$src_file" "$DST/$src_file"; done diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index 395d0b0a1ba..7467adaedf4 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -216,8 +216,9 @@ endif () # also for copy_headers.sh: target_include_directories (clickhouse_common_io BEFORE PRIVATE ${COMMON_INCLUDE_DIR}) +add_subdirectory (tests) + if (ENABLE_TESTS) - add_subdirectory (tests) # attach all dbms gtest sources grep_gtest_sources(${ClickHouse_SOURCE_DIR}/dbms dbms_gtest_sources) add_executable(unit_tests_dbms ${dbms_gtest_sources}) diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake index b9cf065e877..f84025775ef 100644 --- a/dbms/cmake/version.cmake +++ b/dbms/cmake/version.cmake @@ -1,6 +1,6 @@ # This strings autochanged from release_lib.sh: -set(VERSION_DESCRIBE v1.1.54326-testing) -set(VERSION_REVISION 54326) +set(VERSION_DESCRIBE v1.1.54328-testing) +set(VERSION_REVISION 54328) # end of autochange set (VERSION_MAJOR 1) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h b/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h index 0c30749c076..a5062df436d 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h @@ -3,6 +3,7 @@ #include #include #include +#include // SingleValueDataString used in embedded compiler namespace DB diff --git a/dbms/src/Interpreters/Compiler.cpp b/dbms/src/Interpreters/Compiler.cpp index 73a9b56f850..b63dd438a40 100644 --- a/dbms/src/Interpreters/Compiler.cpp +++ b/dbms/src/Interpreters/Compiler.cpp @@ -229,14 +229,14 @@ void Compiler::compile( /// echo | g++ -x c++ -E -Wp,-v - " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/include/c++/*" - " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/include/x86_64-linux-gnu/c++/*" + " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/include/" CMAKE_LIBRARY_ARCHITECTURE "/c++/*" " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/include/c++/*/backward" " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/include/clang/*/include" /// if compiler is clang (from package) " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/local/lib/clang/*/include" /// if clang installed manually - " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/lib/gcc/x86_64-linux-gnu/*/include-fixed" - " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/lib/gcc/x86_64-linux-gnu/*/include" + " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/lib/gcc/" CMAKE_LIBRARY_ARCHITECTURE "/*/include-fixed" + " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/lib/gcc/" CMAKE_LIBRARY_ARCHITECTURE "/*/include" " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/local/include" /// if something installed manually - " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/include/x86_64-linux-gnu" + " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/include/" CMAKE_LIBRARY_ARCHITECTURE " -isystem " INTERNAL_COMPILER_HEADERS_ROOT "/usr/include" #endif " -I " INTERNAL_COMPILER_HEADERS "/dbms/src/" diff --git a/dbms/src/Interpreters/EmbeddedDictionaries.h b/dbms/src/Interpreters/EmbeddedDictionaries.h index 9345d3a0826..a4f97308401 100644 --- a/dbms/src/Interpreters/EmbeddedDictionaries.h +++ b/dbms/src/Interpreters/EmbeddedDictionaries.h @@ -6,7 +6,7 @@ #include -namespace Poco { class Logger; } +namespace Poco { class Logger; namespace Util { class AbstractConfiguration; } } class RegionsHierarchies; class TechDataHierarchy; diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.cpp b/dbms/src/Interpreters/ExpressionAnalyzer.cpp index 1f12146908d..89bc6d906ec 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.cpp +++ b/dbms/src/Interpreters/ExpressionAnalyzer.cpp @@ -1543,7 +1543,7 @@ void ExpressionAnalyzer::makeSetsForIndexImpl(const ASTPtr & node, const Block & { makeExplicitSet(func, sample_block, true); } - catch (const DB::Exception & e) + catch (const Exception & e) { /// in `sample_block` there are no columns that add `getActions` if (e.code() != ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK) @@ -1672,7 +1672,7 @@ void ExpressionAnalyzer::makeExplicitSet(const ASTFunction * node, const Block & /** NOTE If tuple in left hand side specified non-explicitly * Example: identity((a, b)) IN ((1, 2), (3, 4)) * instead of (a, b)) IN ((1, 2), (3, 4)) - * then set creation of set doesn't work correctly. + * then set creation doesn't work correctly. */ if (left_arg_tuple && left_arg_tuple->name == "tuple") { diff --git a/dbms/src/Interpreters/config_compile.h.in b/dbms/src/Interpreters/config_compile.h.in index 0655cfd95dc..a3e87799571 100644 --- a/dbms/src/Interpreters/config_compile.h.in +++ b/dbms/src/Interpreters/config_compile.h.in @@ -1,5 +1,6 @@ #pragma once +#cmakedefine CMAKE_LIBRARY_ARCHITECTURE "@CMAKE_LIBRARY_ARCHITECTURE@" #cmakedefine PATH_SHARE "@PATH_SHARE@" #cmakedefine INTERNAL_COMPILER_FLAGS "@INTERNAL_COMPILER_FLAGS@" #cmakedefine INTERNAL_COMPILER_EXECUTABLE "@INTERNAL_COMPILER_EXECUTABLE@" diff --git a/dbms/src/Server/Compiler-5.0.0/CMakeLists.txt b/dbms/src/Server/Compiler-5.0.0/CMakeLists.txt index 7a4a42a3142..bf66650975f 100644 --- a/dbms/src/Server/Compiler-5.0.0/CMakeLists.txt +++ b/dbms/src/Server/Compiler-5.0.0/CMakeLists.txt @@ -48,6 +48,7 @@ LLVMSupport #PollyPPCG libtinfo.a -common +${ZLIB_LIBRARIES} ${EXECINFO_LIBRARY} +Threads::Threads ) diff --git a/dbms/src/Server/Compiler-6.0.0/CMakeLists.txt b/dbms/src/Server/Compiler-6.0.0/CMakeLists.txt index 11e9c61d8a2..683f9543f1b 100644 --- a/dbms/src/Server/Compiler-6.0.0/CMakeLists.txt +++ b/dbms/src/Server/Compiler-6.0.0/CMakeLists.txt @@ -47,6 +47,7 @@ ${REQUIRED_LLVM_LIBRARIES} #PollyPPCG libtinfo.a -common +${ZLIB_LIBRARIES} ${EXECINFO_LIBRARY} +Threads::Threads ) diff --git a/dbms/src/Storages/MergeTree/PKCondition.cpp b/dbms/src/Storages/MergeTree/PKCondition.cpp index 2d5853126af..c7623a8c63a 100644 --- a/dbms/src/Storages/MergeTree/PKCondition.cpp +++ b/dbms/src/Storages/MergeTree/PKCondition.cpp @@ -864,7 +864,7 @@ bool PKCondition::mayBeTrueInRangeImpl(const std::vector & key_ranges, co break; } - /// Compute the function. + /// Apply the function. DataTypePtr new_type; if (!key_range_transformed.left.isNull()) applyFunction(func, current_type, key_range_transformed.left, new_type, key_range_transformed.left); diff --git a/dbms/tests/CMakeLists.txt b/dbms/tests/CMakeLists.txt index b3df00b672f..35d0a5b2145 100644 --- a/dbms/tests/CMakeLists.txt +++ b/dbms/tests/CMakeLists.txt @@ -20,4 +20,14 @@ macro(grep_gtest_sources BASE_DIR DST_VAR) file(GLOB_RECURSE "${DST_VAR}" RELATIVE "${BASE_DIR}" "gtest*.cpp") endmacro() +install (PROGRAMS clickhouse-test DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) +install ( + DIRECTORY queries performance external_dictionaries + DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse-test + USE_SOURCE_PERMISSIONS + COMPONENT clickhouse + PATTERN "CMakeLists.txt" EXCLUDE + PATTERN ".gitignore" EXCLUDE +) + add_subdirectory (external_dictionaries) diff --git a/dbms/tests/clickhouse-test b/dbms/tests/clickhouse-test index de9f202bbd4..17b8e7ac43a 100755 --- a/dbms/tests/clickhouse-test +++ b/dbms/tests/clickhouse-test @@ -87,12 +87,14 @@ def main(args): args.shard = True base_dir = os.path.abspath(args.queries) + tmp_dir = os.path.abspath(args.tmp) failures_total = 0 # Keep same default values as in queries/0_stateless/00000_sh_lib.sh os.environ.setdefault("CLICKHOUSE_BINARY", args.binary) os.environ.setdefault("CLICKHOUSE_CLIENT", args.client) + os.environ.setdefault("CLICKHOUSE_TMP", tmp_dir) for suite in sorted(os.listdir(base_dir)): if SERVER_DIED: @@ -102,6 +104,11 @@ def main(args): suite_re_obj = re.search('^[0-9]+_(.*)$', suite) if not suite_re_obj: #skip .gitignore and so on continue + + suite_tmp_dir = os.path.join(tmp_dir, suite) + if not os.path.exists(suite_tmp_dir): + os.makedirs(suite_tmp_dir) + suite = suite_re_obj.group(1) if os.path.isdir(suite_dir): print("\nRunning {} tests.\n".format(suite)) @@ -154,15 +161,15 @@ def main(args): print(MSG_SKIPPED + " - no shard") else: disabled_file = os.path.join(suite_dir, name) + '.disabled' - + if os.path.exists(disabled_file) and not args.disabled: message = open(disabled_file, 'r').read() report_testcase.append(et.Element("skipped", attrib = {"message": message})) print(MSG_SKIPPED + " - " + message) else: reference_file = os.path.join(suite_dir, name) + '.reference' - stdout_file = os.path.join(suite_dir, name) + '.stdout' - stderr_file = os.path.join(suite_dir, name) + '.stderr' + stdout_file = os.path.join(suite_tmp_dir, name) + '.stdout' + stderr_file = os.path.join(suite_tmp_dir, name) + '.stderr' if ext == '.sql': command = "{0} --multiquery < {1} > {2} 2> {3}".format(args.client, case_file, stdout_file, stderr_file) @@ -287,9 +294,10 @@ def main(args): if __name__ == '__main__': parser = ArgumentParser(description = 'ClickHouse functional tests') parser.add_argument('-q', '--queries', default = 'queries', help = 'Path to queries dir') + parser.add_argument('--tmp', default = 'queries', help = 'Path to tmp dir') parser.add_argument('-b', '--binary', default = 'clickhouse', help = 'Main clickhouse binary') parser.add_argument('-c', '--client', help = 'Client program') - parser.add_argument('--client_config', help = 'Client config (if you use not default ports)') + parser.add_argument('--clientconfig', help = 'Client config (if you use not default ports)') parser.add_argument('-o', '--output', help = 'Output xUnit compliant test report directory') parser.add_argument('-t', '--timeout', type = int, default = 600, help = 'Timeout for each test case in seconds') parser.add_argument('test', nargs = '?', help = 'Optional test case name regex') @@ -308,6 +316,6 @@ if __name__ == '__main__': args = parser.parse_args() if args.client is None: args.client = args.binary + '-client' - if args.client_config: - args.client += ' -c' + args.client_config + if args.clientconfig: + args.client += ' -c' + args.clientconfig main(args) diff --git a/dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh b/dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh index 918fe38b5d8..28b737a62b1 100755 --- a/dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh +++ b/dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh @@ -8,7 +8,7 @@ TABLE_HASH="cityHash64(groupArray(cityHash64(*)))" function pack_unpack_compare() { - local buf_file="test.buf.'.$3" + local buf_file="${CLICKHOUSE_TMP}/test.buf.'.$3" ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test.buf" ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test.buf_file" @@ -20,12 +20,12 @@ function pack_unpack_compare() local res_db_file=$(${CLICKHOUSE_CLIENT} --max_threads=1 --query "SELECT $TABLE_HASH FROM test.buf_file") ${CLICKHOUSE_CLIENT} --max_threads=1 --query "SELECT * FROM test.buf FORMAT $3" > "$buf_file" - local res_ch_local1=$(${CLICKHOUSE_LOCAL} --structure "$2" --file "$buf_file" --table "my super table" --input-format "$3" --output-format TabSeparated --query "SELECT $TABLE_HASH FROM \`my super table\`" 2>stderr || cat stderr 1>&2) - local res_ch_local2=$(${CLICKHOUSE_LOCAL} --structure "$2" --table "my super table" --input-format "$3" --output-format TabSeparated --query "SELECT $TABLE_HASH FROM \`my super table\`" < "$buf_file" 2>stderr || cat stderr 1>&2) + local res_ch_local1=$(${CLICKHOUSE_LOCAL} --structure "$2" --file "$buf_file" --table "my super table" --input-format "$3" --output-format TabSeparated --query "SELECT $TABLE_HASH FROM \`my super table\`" 2>${CLICKHOUSE_TMP}/stderr || cat stderr 1>&2) + local res_ch_local2=$(${CLICKHOUSE_LOCAL} --structure "$2" --table "my super table" --input-format "$3" --output-format TabSeparated --query "SELECT $TABLE_HASH FROM \`my super table\`" < "$buf_file" 2>${CLICKHOUSE_TMP}/stderr || cat ${CLICKHOUSE_TMP}/stderr 1>&2) ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test.buf" ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test.buf_file" - rm -f "$buf_file" stderr + rm -f "${CLICKHOUSE_TMP}/$buf_file" stderr echo $((res_orig - res_db_file)) $((res_orig - res_ch_local1)) $((res_orig - res_ch_local2)) } diff --git a/dbms/tests/queries/0_stateless/00415_into_outfile.sh b/dbms/tests/queries/0_stateless/00415_into_outfile.sh index dbc1e0571b0..19641feca50 100755 --- a/dbms/tests/queries/0_stateless/00415_into_outfile.sh +++ b/dbms/tests/queries/0_stateless/00415_into_outfile.sh @@ -11,29 +11,29 @@ function perform() echo "performing test: $test_id" ${CLICKHOUSE_CLIENT} --query "$query" 2>/dev/null if [ "$?" -eq 0 ]; then - cat "./test_into_outfile_$test_id.out" + cat "${CLICKHOUSE_TMP}/test_into_outfile_$test_id.out" else echo "query failed" fi - rm -f "./test_into_outfile_$test_id.out" + rm -f "${CLICKHOUSE_TMP}/test_into_outfile_$test_id.out" } -perform "select" "SELECT 1, 2, 3 INTO OUTFILE './test_into_outfile_select.out'" +perform "select" "SELECT 1, 2, 3 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_select.out'" -perform "union_all" "SELECT 1, 2 UNION ALL SELECT 3, 4 INTO OUTFILE './test_into_outfile_union_all.out' FORMAT TSV" | sort --numeric-sort +perform "union_all" "SELECT 1, 2 UNION ALL SELECT 3, 4 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_union_all.out' FORMAT TSV" | sort --numeric-sort -perform "bad_union_all" "SELECT 1, 2 INTO OUTFILE './test_into_outfile_bad_union_all.out' UNION ALL SELECT 3, 4" +perform "bad_union_all" "SELECT 1, 2 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_bad_union_all.out' UNION ALL SELECT 3, 4" -perform "describe_table" "DESCRIBE TABLE system.one INTO OUTFILE './test_into_outfile_describe_table.out'" +perform "describe_table" "DESCRIBE TABLE system.one INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_describe_table.out'" echo "performing test: clickhouse-local" -echo -e '1\t2' | ${CLICKHOUSE_LOCAL} --structure 'col1 UInt32, col2 UInt32' --query "SELECT col1 + 1, col2 + 1 FROM table INTO OUTFILE './test_into_outfile_clickhouse-local.out'" 2>/dev/null +echo -e '1\t2' | ${CLICKHOUSE_LOCAL} --structure 'col1 UInt32, col2 UInt32' --query "SELECT col1 + 1, col2 + 1 FROM table INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_clickhouse-local.out'" 2>/dev/null if [ "$?" -eq 0 ]; then - cat "./test_into_outfile_clickhouse-local.out" + cat "${CLICKHOUSE_TMP}/test_into_outfile_clickhouse-local.out" else echo "query failed" fi -rm -f "./test_into_outfile_clickhouse-local.out" +rm -f "${CLICKHOUSE_TMP}/test_into_outfile_clickhouse-local.out" echo "performing test: http" -echo "SELECT 1, 2 INTO OUTFILE './test_into_outfile_http.out'" | ${CLICKHOUSE_CURL} -s "${CLICKHOUSE_URL}" -d @- --fail || echo "query failed" +echo "SELECT 1, 2 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_http.out'" | ${CLICKHOUSE_CURL} -s "${CLICKHOUSE_URL}" -d @- --fail || echo "query failed" diff --git a/dbms/tests/queries/0_stateless/00429_http_bufferization.sh b/dbms/tests/queries/0_stateless/00429_http_bufferization.sh index 054134a6a20..ab87950d38b 100755 --- a/dbms/tests/queries/0_stateless/00429_http_bufferization.sh +++ b/dbms/tests/queries/0_stateless/00429_http_bufferization.sh @@ -66,11 +66,11 @@ corner_sizes="1048576 `seq 500000 1000000 3500000`" # Check HTTP results with $CLICKHOUSE_CLIENT in normal case function cmp_cli_and_http() { - $CLICKHOUSE_CLIENT -q "`query $1`" > res1 - ch_url "buffer_size=$2&wait_end_of_query=0" "$1" > res2 - ch_url "buffer_size=$2&wait_end_of_query=1" "$1" > res3 - cmp res1 res2 && cmp res1 res3 || echo FAIL - rm -rf res1 res2 res3 + $CLICKHOUSE_CLIENT -q "`query $1`" > ${CLICKHOUSE_TMP}/res1 + ch_url "buffer_size=$2&wait_end_of_query=0" "$1" > ${CLICKHOUSE_TMP}/res2 + ch_url "buffer_size=$2&wait_end_of_query=1" "$1" > ${CLICKHOUSE_TMP}/res3 + cmp ${CLICKHOUSE_TMP}/res1 ${CLICKHOUSE_TMP}/res2 && cmp ${CLICKHOUSE_TMP}/res1 ${CLICKHOUSE_TMP}/res3 || echo FAIL + rm -rf ${CLICKHOUSE_TMP}/res1 ${CLICKHOUSE_TMP}/res2 ${CLICKHOUSE_TMP}/res3 } function check_cli_and_http() { @@ -88,14 +88,14 @@ check_cli_and_http # Check HTTP internal compression in normal case function cmp_http_compression() { - $CLICKHOUSE_CLIENT -q "`query $1`" > res0 - ch_url 'compress=1' $1 | clickhouse-compressor --decompress > res1 - ch_url "compress=1&buffer_size=$2&wait_end_of_query=0" $1 | clickhouse-compressor --decompress > res2 - ch_url "compress=1&buffer_size=$2&wait_end_of_query=1" $1 | clickhouse-compressor --decompress > res3 - cmp res0 res1 - cmp res1 res2 - cmp res1 res3 - rm -rf res0 res1 res2 res3 + $CLICKHOUSE_CLIENT -q "`query $1`" > ${CLICKHOUSE_TMP}/res0 + ch_url 'compress=1' $1 | clickhouse-compressor --decompress > ${CLICKHOUSE_TMP}/res1 + ch_url "compress=1&buffer_size=$2&wait_end_of_query=0" $1 | clickhouse-compressor --decompress > ${CLICKHOUSE_TMP}/res2 + ch_url "compress=1&buffer_size=$2&wait_end_of_query=1" $1 | clickhouse-compressor --decompress > ${CLICKHOUSE_TMP}/res3 + cmp ${CLICKHOUSE_TMP}/res0 ${CLICKHOUSE_TMP}/res1 + cmp ${CLICKHOUSE_TMP}/res1 ${CLICKHOUSE_TMP}/res2 + cmp ${CLICKHOUSE_TMP}/res1 ${CLICKHOUSE_TMP}/res3 + rm -rf ${CLICKHOUSE_TMP}/res0 ${CLICKHOUSE_TMP}/res1 ${CLICKHOUSE_TMP}/res2 ${CLICKHOUSE_TMP}/res3 } function check_http_compression() { diff --git a/dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh b/dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh index bda49706c62..8833cefca45 100755 --- a/dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh +++ b/dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh @@ -41,10 +41,10 @@ popd > /dev/null #SCRIPTDIR=`dirname "$SCRIPTPATH"` SCRIPTDIR=$SCRIPTPATH -cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 --merge_tree_uniform_read_distribution=1 -n 2>&1 > preferred_block_size_bytes.stdout -cmp "$SCRIPTDIR"/00282_merging.reference preferred_block_size_bytes.stdout && echo PASSED || echo FAILED +cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 --merge_tree_uniform_read_distribution=1 -n 2>&1 > ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout +cmp "$SCRIPTDIR"/00282_merging.reference ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED -cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 --merge_tree_uniform_read_distribution=0 -n 2>&1 > preferred_block_size_bytes.stdout -cmp "$SCRIPTDIR"/00282_merging.reference preferred_block_size_bytes.stdout && echo PASSED || echo FAILED +cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 --merge_tree_uniform_read_distribution=0 -n 2>&1 > ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout +cmp "$SCRIPTDIR"/00282_merging.reference ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED -rm preferred_block_size_bytes.stdout +rm ${CLICKHOUSE_TMP}/preferred_block_size_bytes.stdout diff --git a/dbms/tests/queries/shell_config.sh b/dbms/tests/queries/shell_config.sh index b4ad921cfc1..31ec03e2579 100644 --- a/dbms/tests/queries/shell_config.sh +++ b/dbms/tests/queries/shell_config.sh @@ -18,3 +18,5 @@ export CLICKHOUSE_PORT_HTTPS=${CLICKHOUSE_PORT_HTTPS:="8443"} export CLICKHOUSE_PORT_HTTP_PROTO=${CLICKHOUSE_PORT_HTTP_PROTO:="http"} export CLICKHOUSE_URL=${CLICKHOUSE_URL:="${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/"} export CLICKHOUSE_CURL=${CLICKHOUSE_CURL:="curl --max-time 5"} +export CLICKHOUSE_TMP=${CLICKHOUSE_TMP:="."} +mkdir -p ${CLICKHOUSE_TMP} diff --git a/debian/changelog b/debian/changelog index 734e8981d16..c5231b6548f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (1.1.54326) unstable; urgency=low +clickhouse (1.1.54328) unstable; urgency=low * Modified source code - -- proller Mon, 18 Dec 2017 16:14:46 +0300 + -- Wed, 27 Dec 2017 11:09:36 +0300 diff --git a/debian/clickhouse-client.install b/debian/clickhouse-client.install index 97798ad0976..0c3cb0d000e 100644 --- a/debian/clickhouse-client.install +++ b/debian/clickhouse-client.install @@ -1,6 +1,4 @@ /usr/bin/clickhouse-client -/usr/bin/clickhouse-benchmark /usr/bin/clickhouse-local /etc/clickhouse-client/config.xml /usr/bin/clickhouse-extract-from-config -/usr/bin/clickhouse-performance-test diff --git a/debian/clickhouse-test.install b/debian/clickhouse-test.install new file mode 100644 index 00000000000..6c9a2c182fe --- /dev/null +++ b/debian/clickhouse-test.install @@ -0,0 +1,4 @@ +usr/bin/clickhouse-test +usr/share/clickhouse-test/* +usr/bin/clickhouse-performance-test +usr/bin/clickhouse-benchmark diff --git a/debian/control b/debian/control index 88ef46356fc..9b4e464620c 100644 --- a/debian/control +++ b/debian/control @@ -53,3 +53,11 @@ Priority: extra Depends: ${misc:Depends}, clickhouse-server-base (= ${binary:Version}) Description: debugging symbols for clickhouse-server-base This package contains the debugging symbols for clickhouse-server-base. + + +Package: clickhouse-test +Section: Database +Priority: extra +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-client, clickhouse-compressor, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo +Description: Clickhouse tests diff --git a/debian/pbuilder-test/100_test_run b/debian/pbuilder-test/100_test_run index 38a1e428afc..c2941eb8d3a 100755 --- a/debian/pbuilder-test/100_test_run +++ b/debian/pbuilder-test/100_test_run @@ -1,4 +1,6 @@ #!/bin/bash -#service clickhouse-server start -clickhouse-client -q "SELECT version();" +clickhouse-client -q "SELECT * from system.build_options;" + +# --no-shard because default server listen only :: and 127.0.0.1 +[ -n "$TEST_RUN" ] && clickhouse-test --no-shard --queries /usr/share/clickhouse-test/queries --tmp /tmp/clickhouse-test/ || true diff --git a/docs/en/operations/tips.rst b/docs/en/operations/tips.rst index 12514d71160..9390c7b2557 100644 --- a/docs/en/operations/tips.rst +++ b/docs/en/operations/tips.rst @@ -132,7 +132,7 @@ ZooKeeper Probably you already have ZooKeeper for other purposes. It's Ok to use existing ZooKeeper installation if it is not overloaded. -Use recent version of ZooKeeper. At least 3.5 is Ok. Version in your Linux package repository might be outdated. +Use recent version of ZooKeeper. At least 3.4.9 is Ok. Version in your Linux package repository might be outdated. With default settings, ZooKeeper have time bomb: diff --git a/docs/ru/operations/tips.md b/docs/ru/operations/tips.md index 81f2aeb87e9..315a8fb07fa 100644 --- a/docs/ru/operations/tips.md +++ b/docs/ru/operations/tips.md @@ -105,7 +105,7 @@ XFS также подходит, но не так тщательно проте Вероятно вы уже используете ZooKeeper для других целей. Можно использовать ту же инсталляцию ZooKeeper, если она не сильно перегружена. -Лучше использовать свежую версию ZooKeeper, как минимум 3.5. Версия в стабильных дистрибутивах Linux может быть устаревшей. +Лучше использовать свежую версию ZooKeeper, как минимум 3.4.9. Версия в стабильных дистрибутивах Linux может быть устаревшей. С настройками по умолчанию, ZooKeeper является бомбой замедленного действия: diff --git a/libs/libcommon/include/common/StringRef.h b/libs/libcommon/include/common/StringRef.h index d7cc4d9e6f3..a32a1dccc8a 100644 --- a/libs/libcommon/include/common/StringRef.h +++ b/libs/libcommon/include/common/StringRef.h @@ -14,6 +14,10 @@ #include #endif +#if __SSE4_2__ + #include + #include +#endif /// The thing to avoid creating strings to find substrings in the hash table. @@ -172,8 +176,6 @@ struct StringRefHash64 #if __SSE4_2__ -#include - /// Parts are taken from CityHash. inline UInt64 hashLen16(UInt64 u, UInt64 v) diff --git a/website/benchmark.html b/website/benchmark.html index eb107c0a6c0..d122741f28d 100644 --- a/website/benchmark.html +++ b/website/benchmark.html @@ -2157,6 +2157,16 @@ try { var yaCounter18343495 = new Ya.Metrika({id:18343495,
+

Most results are for single server setup. The server is: two socket Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz; 128 GiB RAM; md RAID-5 on 8 6TB SATA HDD; ext4.

+ +

Some additional results (marked as x2, x3, x6) are for clustered setup for comparison. These results are contributed from independent teams and hardware specification may differ.

+ +

Disclaimer: some results are significantly outdated. Results for MonetDB was obtained at 2013 for version v11.15.11 (Feb2013-SP3). Results for MemSQL was obtained at 2015 for version 3.2. Results for Vertica are from 2015 for version 7.1.1.

+ +
+ +
+

Relative query processing time (lower is better):