diff --git a/contrib/zookeeper-cmake/CMakeLists.txt b/contrib/zookeeper-cmake/CMakeLists.txt deleted file mode 100644 index df8859ffbeb..00000000000 --- a/contrib/zookeeper-cmake/CMakeLists.txt +++ /dev/null @@ -1,202 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -cmake_minimum_required(VERSION 3.0) - -# Modified version of CMakeLists.txt for ClickHouse. Doesn't link the library to libm. -# Otherwise we have extra dependency when compiling with the most fresh libc version. -# How to check: -# readelf -s ./clickhouse | grep -F 2.23 - -SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/zookeeper/src/c) - -project(zookeeper VERSION 3.5.3) -set(email user@zookeeper.apache.org) -set(description "zookeeper C client") - -# general options -if(UNIX) - add_compile_options(-Wall -fPIC) -elseif(WIN32) - add_compile_options(/W3) -endif() -add_definitions(-DUSE_STATIC_LIB) - -# TODO: Enable /WX and /W4 on Windows. Currently there are ~1000 warnings. -# TODO: Add Solaris support. -# TODO: Add a shared library option. -# TODO: Specify symbols to export. -# TODO: Generate doxygen documentation. - -# Sync API option -option(WANT_SYNCAPI "Enables Sync API support" ON) -if(WANT_SYNCAPI) - add_definitions(-DTHREADED) - if(WIN32) - # Note that the generator expression ensures that `/MTd` is used when Debug - # configurations are built. - add_compile_options(/MT$<$:d>) - endif() -endif() - -# CppUnit option -if(WIN32 OR APPLE) - # The tests do not yet compile on Windows or macOS, - # so we set this to off by default. - # - # Note that CMake does not have expressions except in conditionals, - # so we're left with this if/else/endif pattern. - set(DEFAULT_WANT_CPPUNIT OFF) -else() - set(DEFAULT_WANT_CPPUNIT ON) -endif() -option(WANT_CPPUNIT "Enables CppUnit and tests" ${DEFAULT_WANT_CPPUNIT}) - -# The function `to_have(in out)` converts a header name like `arpa/inet.h` -# into an Autotools style preprocessor definition `HAVE_ARPA_INET_H`. -# This is then set or unset in `configure_file()` step. -# -# Note that CMake functions do not have return values; instead an "out" -# variable must be passed, and explicitly set with parent scope. -function(to_have in out) - string(TOUPPER ${in} str) - string(REGEX REPLACE "/|\\." "_" str ${str}) - set(${out} "HAVE_${str}" PARENT_SCOPE) -endfunction() - -# include file checks -foreach(f generated/zookeeper.jute.h generated/zookeeper.jute.c) - if(EXISTS "${LIBRARY_DIR}/${f}") - to_have(${f} name) - set(${name} 1) - else() - message(FATAL_ERROR - "jute files are missing!\n" - "Please run 'ant compile_jute' while in the ZooKeeper top level directory.") - endif() -endforeach() - -# header checks -include(CheckIncludeFile) -set(check_headers - arpa/inet.h - dlfcn.h - fcntl.h - inttypes.h - memory.h - netdb.h - netinet/in.h - stdint.h - stdlib.h - string.h - strings.h - sys/socket.h - sys/stat.h - sys/time.h - sys/types.h - unistd.h - sys/utsname.h) - -foreach(f ${check_headers}) - to_have(${f} name) - check_include_file(${f} ${name}) -endforeach() - -# function checks -include(CheckFunctionExists) -set(check_functions - getcwd - gethostbyname - gethostname - getlogin - getpwuid_r - gettimeofday - getuid - memmove - memset - poll - socket - strchr - strdup - strerror - strtol) - -foreach(fn ${check_functions}) - to_have(${fn} name) - check_function_exists(${fn} ${name}) -endforeach() - -# library checks -set(check_libraries rt m pthread) -foreach(lib ${check_libraries}) - to_have("lib${lib}" name) - find_library(${name} ${lib}) -endforeach() - -# IPv6 check -include(CheckStructHasMember) -check_struct_has_member("struct sockaddr_in6" sin6_addr "netinet/in.h" ZOO_IPV6_ENABLED) - -# configure -configure_file(${LIBRARY_DIR}/cmake_config.h.in ${CMAKE_CURRENT_BINARY_DIR}/include/config.h) - -# hashtable library -set(hashtable_sources ${LIBRARY_DIR}/src/hashtable/hashtable_itr.c ${LIBRARY_DIR}/src/hashtable/hashtable.c) -add_library(hashtable STATIC ${hashtable_sources}) - -# zookeeper library -set(zookeeper_sources - ${LIBRARY_DIR}/src/zookeeper.c - ${LIBRARY_DIR}/src/recordio.c - ${LIBRARY_DIR}/generated/zookeeper.jute.c - ${LIBRARY_DIR}/src/zk_log.c - ${LIBRARY_DIR}/src/zk_hashtable.c -) -# src/addrvec.c - -if(WANT_SYNCAPI) - list(APPEND zookeeper_sources ${LIBRARY_DIR}/src/mt_adaptor.c) -else() - list(APPEND zookeeper_sources ${LIBRARY_DIR}/src/st_adaptor.c) -endif() - -if(WIN32) - list(APPEND zookeeper_sources ${LIBRARY_DIR}/src/winport.c) -endif() - -add_library(zookeeper STATIC ${zookeeper_sources}) -target_include_directories(zookeeper BEFORE PUBLIC ${LIBRARY_DIR}/include ${CMAKE_CURRENT_BINARY_DIR}/include ${LIBRARY_DIR}/generated) -target_link_libraries(zookeeper PUBLIC - hashtable - $<$:rt> # clock_gettime - $<$:ws2_32>) # Winsock 2.0 - -if(WANT_SYNCAPI AND NOT WIN32) - find_package(Threads REQUIRED) - target_link_libraries(zookeeper PUBLIC Threads::Threads) -endif() - -# cli executable -add_executable(cli ${LIBRARY_DIR}/src/cli.c) -target_link_libraries(cli zookeeper) -target_link_libraries(cli $<$:m>) - -# load_gen executable -if(WANT_SYNCAPI AND NOT WIN32) - add_executable(load_gen ${LIBRARY_DIR}/src/load_gen.c) - target_link_libraries(load_gen zookeeper) - target_link_libraries(load_gen $<$:m>) -endif() diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake index 990c638bf46..23bc9eabf29 100644 --- a/dbms/cmake/version.cmake +++ b/dbms/cmake/version.cmake @@ -1,6 +1,6 @@ # This strings autochanged from release_lib.sh: -set(VERSION_DESCRIBE v1.1.54371-testing) -set(VERSION_REVISION 54371) +set(VERSION_DESCRIBE v1.1.54372-testing) +set(VERSION_REVISION 54372) # end of autochange set (VERSION_MAJOR 1) diff --git a/dbms/src/Client/MultiplexedConnections.cpp b/dbms/src/Client/MultiplexedConnections.cpp index fb2baa105a6..8fe27ecf7fa 100644 --- a/dbms/src/Client/MultiplexedConnections.cpp +++ b/dbms/src/Client/MultiplexedConnections.cpp @@ -280,8 +280,7 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead read_list.push_back(*connection->socket); } - /// If no data was found, then we check if there are any connections - /// ready for reading. + /// If no data was found, then we check if there are any connections ready for reading. if (read_list.empty()) { Poco::Net::Socket::SocketList write_list; @@ -300,6 +299,9 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead throw Exception("Timeout exceeded while reading from " + dumpAddressesUnlocked(), ErrorCodes::TIMEOUT_EXCEEDED); } + /// TODO Absolutely wrong code: read_list could be empty; rand() is not thread safe and has low quality; motivation of rand is unclear. + /// This code path is disabled by default. + auto & socket = read_list[rand() % read_list.size()]; if (fd_to_replica_state_idx.empty()) { diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp b/dbms/src/Common/ZooKeeper/ZooKeeper.cpp index 0280992a8b6..e4cda7e05fb 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/dbms/src/Common/ZooKeeper/ZooKeeper.cpp @@ -195,6 +195,9 @@ int32_t ZooKeeper::getChildrenImpl(const std::string & path, Strings & res, impl->list(path, callback, watch_callback); event.wait(); + + ProfileEvents::increment(ProfileEvents::ZooKeeperGetChildren); + ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return code; } diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.h b/dbms/src/Common/ZooKeeper/ZooKeeper.h index ede8c255a77..dfea3bbfce7 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeper.h +++ b/dbms/src/Common/ZooKeeper/ZooKeeper.h @@ -280,7 +280,7 @@ public: { zookeeper.tryRemove(path); } - catch (const KeeperException & e) + catch (...) { ProfileEvents::increment(ProfileEvents::CannotRemoveEphemeralNode); DB::tryLogCurrentException(__PRETTY_FUNCTION__); diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp index 2e21804026b..3f999a85384 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -737,7 +737,10 @@ void addRootPath(String & path, const String & root_path) if (root_path.empty()) return; - path = root_path + path; + if (path.size() == 1) /// "/" + path = root_path; + else + path = root_path + path; } void removeRootPath(String & path, const String & root_path) diff --git a/dbms/src/DataStreams/IProfilingBlockInputStream.cpp b/dbms/src/DataStreams/IProfilingBlockInputStream.cpp index 306afb5955b..f7558dbf3eb 100644 --- a/dbms/src/DataStreams/IProfilingBlockInputStream.cpp +++ b/dbms/src/DataStreams/IProfilingBlockInputStream.cpp @@ -42,7 +42,7 @@ Block IProfilingBlockInputStream::read() if (isCancelledOrThrowIfKilled()) return res; - if (!checkTimeLimits()) + if (!checkTimeLimit()) limit_exceeded_need_break = true; if (!limit_exceeded_need_break) @@ -191,7 +191,7 @@ static bool handleOverflowMode(OverflowMode mode, const String & message, int co }; -bool IProfilingBlockInputStream::checkTimeLimits() +bool IProfilingBlockInputStream::checkTimeLimit() { if (limits.max_execution_time != 0 && info.total_stopwatch.elapsed() > static_cast(limits.max_execution_time.totalMicroseconds()) * 1000) diff --git a/dbms/src/DataStreams/IProfilingBlockInputStream.h b/dbms/src/DataStreams/IProfilingBlockInputStream.h index 442c451faa3..0bed471f245 100644 --- a/dbms/src/DataStreams/IProfilingBlockInputStream.h +++ b/dbms/src/DataStreams/IProfilingBlockInputStream.h @@ -234,10 +234,10 @@ private: void updateExtremes(Block & block); - /** Check constraints and quotas. - * But only those that can be tested within each separate source. + /** Check limits and quotas. + * But only those that can be checked within each separate stream. */ - bool checkTimeLimits(); + bool checkTimeLimit(); void checkQuota(Block & block); diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index ec6b9395701..7ed250e9036 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -687,19 +687,26 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns(Pipeline stream->addTableLock(table_lock); }); - /** Set the limits and quota for reading data, the speed and time of the query. - * Such restrictions are checked on the initiating server of the request, and not on remote servers. - * Because the initiating server has a summary of the execution of the request on all servers. - */ - if (to_stage == QueryProcessingStage::Complete) + /// Set the limits and quota for reading data, the speed and time of the query. { IProfilingBlockInputStream::LocalLimits limits; limits.mode = IProfilingBlockInputStream::LIMITS_TOTAL; limits.size_limits = SizeLimits(settings.max_rows_to_read, settings.max_bytes_to_read, settings.read_overflow_mode); limits.max_execution_time = settings.max_execution_time; limits.timeout_overflow_mode = settings.timeout_overflow_mode; - limits.min_execution_speed = settings.min_execution_speed; - limits.timeout_before_checking_execution_speed = settings.timeout_before_checking_execution_speed; + + /** Quota and minimal speed restrictions are checked on the initiating server of the request, and not on remote servers, + * because the initiating server has a summary of the execution of the request on all servers. + * + * But limits on data size to read and maximum execution time are reasonable to check both on initiator and + * additionally on each remote server, because these limits are checked per block of data processed, + * and remote servers may process way more blocks of data than are received by initiator. + */ + if (to_stage == QueryProcessingStage::Complete) + { + limits.min_execution_speed = settings.min_execution_speed; + limits.timeout_before_checking_execution_speed = settings.timeout_before_checking_execution_speed; + } QuotaForIntervals & quota = context.getQuota(); @@ -708,7 +715,9 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns(Pipeline if (IProfilingBlockInputStream * p_stream = dynamic_cast(stream.get())) { p_stream->setLimits(limits); - p_stream->setQuota(quota); + + if (to_stage == QueryProcessingStage::Complete) + p_stream->setQuota(quota); } }); } diff --git a/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments8.reference b/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments8.reference new file mode 100644 index 00000000000..7193c3d3f3d --- /dev/null +++ b/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments8.reference @@ -0,0 +1 @@ +Still alive diff --git a/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments9.reference b/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments9.reference new file mode 100644 index 00000000000..7193c3d3f3d --- /dev/null +++ b/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments9.reference @@ -0,0 +1 @@ +Still alive diff --git a/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference b/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference new file mode 100644 index 00000000000..8b1acc12b63 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference @@ -0,0 +1,10 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 diff --git a/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql b/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql new file mode 100644 index 00000000000..dcd227bd2cf --- /dev/null +++ b/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql @@ -0,0 +1,2 @@ +SET max_execution_time = 1, timeout_overflow_mode = 'break'; +SELECT DISTINCT * FROM remote('127.0.0.{2,3}', system.numbers) WHERE number < 10; diff --git a/debian/changelog b/debian/changelog index 2ff28b59456..11e474be4f7 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (1.1.54371) unstable; urgency=low +clickhouse (1.1.54372) unstable; urgency=low * Modified source code - -- Fri, 23 Mar 2018 01:11:42 +0300 + -- Mon, 02 Apr 2018 22:13:54 +0300 diff --git a/debian/control b/debian/control index 46b743636a5..8f57ae258f4 100644 --- a/debian/control +++ b/debian/control @@ -50,11 +50,14 @@ Description: Server binary for clickhouse . This package provides clickhouse common configuration files -Package: clickhouse-common-dbg +Package: clickhouse-common-static-dbg Architecture: any Section: debug Priority: extra Depends: ${misc:Depends}, clickhouse-common-static (= ${binary:Version}) +Replaces: clickhouse-common-dbg +Provides: clickhouse-common-dbg +Conflicts: clickhouse-common-dbg Description: debugging symbols for clickhouse-common-static This package contains the debugging symbols for clickhouse-common. diff --git a/debian/rules b/debian/rules index fe53eac4643..fb21adf9984 100755 --- a/debian/rules +++ b/debian/rules @@ -70,7 +70,7 @@ override_dh_clean: dh_clean override_dh_strip: - dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-dbg + dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-static-dbg override_dh_install: # Making docs diff --git a/docs/ru/table_engines/mergetree.md b/docs/ru/table_engines/mergetree.md index 714d368909d..b11e00f83d1 100644 --- a/docs/ru/table_engines/mergetree.md +++ b/docs/ru/table_engines/mergetree.md @@ -38,7 +38,7 @@ MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID) Для столбцов также пишутся «засечки» каждую index_granularity строку, чтобы данные можно было читать в определённом диапазоне. При чтении из таблицы, запрос SELECT анализируется на предмет того, можно ли использовать индексы. -Индекс может использоваться, если в секции WHERE/PREWHERE, в качестве одного из элементов конъюнкции, или целиком, есть выражение, представляющее операции сравнения на равенства, неравенства, а также IN над столбцами, входящими в первичный ключ / дату, а также логические связки над ними. +Индекс может использоваться, если в секции WHERE/PREWHERE, в качестве одного из элементов конъюнкции, или целиком, есть выражение, представляющее операции сравнения на равенства, неравенства, а также IN или LIKE с фиксированным префиксом, над столбцами или выражениями, входящими в первичный ключ или ключ партиционирования, либо над некоторыми частистично монотонными функциями от этих столбцов, а также логические связки над такими выражениями. Таким образом, обеспечивается возможность быстро выполнять запросы по одному или многим диапазонам первичного ключа. Например, в указанном примере будут быстро работать запросы для конкретного счётчика; для конкретного счётчика и диапазона дат; для конкретного счётчика и даты, для нескольких счётчиков и диапазона дат и т. п.