diff --git a/cmake/find_poco.cmake b/cmake/find_poco.cmake index 315e27b798f..c0504b0df27 100644 --- a/cmake/find_poco.cmake +++ b/cmake/find_poco.cmake @@ -9,7 +9,7 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/poco/CMakeLists.txt") endif () if (NOT USE_INTERNAL_POCO_LIBRARY) - find_package (Poco COMPONENTS Net NetSSL XML Data Crypto DataODBC MongoDB) + find_package (Poco COMPONENTS Net NetSSL XML SQL Data Crypto DataODBC MongoDB) endif () if (Poco_INCLUDE_DIRS AND Poco_Foundation_LIBRARY) @@ -24,6 +24,15 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) set (ENABLE_DATA_SQLITE 0 CACHE BOOL "") set (ENABLE_DATA_MYSQL 0 CACHE BOOL "") set (ENABLE_DATA_POSTGRESQL 0 CACHE BOOL "") + # new after 2.0.0: + set (POCO_ENABLE_ZIP 0 CACHE BOOL "") + set (POCO_ENABLE_PAGECOMPILER 0 CACHE BOOL "") + set (POCO_ENABLE_PAGECOMPILER_FILE2PAGE 0 CACHE BOOL "") + set (POCO_ENABLE_REDIS 0 CACHE BOOL "") + set (POCO_ENABLE_SQL_SQLITE 0 CACHE BOOL "") + set (POCO_ENABLE_SQL_MYSQL 0 CACHE BOOL "") + set (POCO_ENABLE_SQL_POSTGRESQL 0 CACHE BOOL "") + set (POCO_UNBUNDLED 1 CACHE BOOL "") set (POCO_UNBUNDLED_PCRE 0 CACHE BOOL "") set (POCO_UNBUNDLED_EXPAT 0 CACHE BOOL "") @@ -44,9 +53,25 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) endif () if (ODBC_FOUND) - set (Poco_DataODBC_FOUND 1) - set (Poco_DataODBC_LIBRARY PocoDataODBC ${ODBC_LIBRARIES} ${LTDL_LIBRARY}) - set (Poco_DataODBC_INCLUDE_DIRS "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/ODBC/include/") + if (EXISTS "${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/ODBC/include/") + set (Poco_SQL_FOUND 1) + set (Poco_SQLODBC_FOUND 1) + set (Poco_SQL_INCLUDE_DIRS + "${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/include" + "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/include" + ) + set (Poco_SQLODBC_INCLUDE_DIRS + "${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/ODBC/include/" + "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/ODBC/include/" + ) + set (Poco_SQL_LIBRARY PocoSQL) + set (Poco_SQLODBC_LIBRARY PocoSQLODBC ${ODBC_LIBRARIES} ${LTDL_LIBRARY}) + else () + set (Poco_DataODBC_FOUND 1) + set (Poco_DataODBC_INCLUDE_DIRS "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/ODBC/include/" "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/include") + set (Poco_Data_LIBRARY PocoData) + set (Poco_DataODBC_LIBRARY PocoDataODBC ${ODBC_LIBRARIES} ${LTDL_LIBRARY}) + endif () endif () # TODO! fix internal ssl @@ -66,7 +91,6 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) set (Poco_Foundation_LIBRARY PocoFoundation) set (Poco_Util_LIBRARY PocoUtil) set (Poco_Net_LIBRARY PocoNet) - set (Poco_Data_LIBRARY PocoData) set (Poco_XML_LIBRARY PocoXML) endif () diff --git a/contrib/zookeeper-cmake/CMakeLists.txt b/contrib/zookeeper-cmake/CMakeLists.txt deleted file mode 100644 index df8859ffbeb..00000000000 --- a/contrib/zookeeper-cmake/CMakeLists.txt +++ /dev/null @@ -1,202 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -cmake_minimum_required(VERSION 3.0) - -# Modified version of CMakeLists.txt for ClickHouse. Doesn't link the library to libm. -# Otherwise we have extra dependency when compiling with the most fresh libc version. -# How to check: -# readelf -s ./clickhouse | grep -F 2.23 - -SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/zookeeper/src/c) - -project(zookeeper VERSION 3.5.3) -set(email user@zookeeper.apache.org) -set(description "zookeeper C client") - -# general options -if(UNIX) - add_compile_options(-Wall -fPIC) -elseif(WIN32) - add_compile_options(/W3) -endif() -add_definitions(-DUSE_STATIC_LIB) - -# TODO: Enable /WX and /W4 on Windows. Currently there are ~1000 warnings. -# TODO: Add Solaris support. -# TODO: Add a shared library option. -# TODO: Specify symbols to export. -# TODO: Generate doxygen documentation. - -# Sync API option -option(WANT_SYNCAPI "Enables Sync API support" ON) -if(WANT_SYNCAPI) - add_definitions(-DTHREADED) - if(WIN32) - # Note that the generator expression ensures that `/MTd` is used when Debug - # configurations are built. - add_compile_options(/MT$<$:d>) - endif() -endif() - -# CppUnit option -if(WIN32 OR APPLE) - # The tests do not yet compile on Windows or macOS, - # so we set this to off by default. - # - # Note that CMake does not have expressions except in conditionals, - # so we're left with this if/else/endif pattern. - set(DEFAULT_WANT_CPPUNIT OFF) -else() - set(DEFAULT_WANT_CPPUNIT ON) -endif() -option(WANT_CPPUNIT "Enables CppUnit and tests" ${DEFAULT_WANT_CPPUNIT}) - -# The function `to_have(in out)` converts a header name like `arpa/inet.h` -# into an Autotools style preprocessor definition `HAVE_ARPA_INET_H`. -# This is then set or unset in `configure_file()` step. -# -# Note that CMake functions do not have return values; instead an "out" -# variable must be passed, and explicitly set with parent scope. -function(to_have in out) - string(TOUPPER ${in} str) - string(REGEX REPLACE "/|\\." "_" str ${str}) - set(${out} "HAVE_${str}" PARENT_SCOPE) -endfunction() - -# include file checks -foreach(f generated/zookeeper.jute.h generated/zookeeper.jute.c) - if(EXISTS "${LIBRARY_DIR}/${f}") - to_have(${f} name) - set(${name} 1) - else() - message(FATAL_ERROR - "jute files are missing!\n" - "Please run 'ant compile_jute' while in the ZooKeeper top level directory.") - endif() -endforeach() - -# header checks -include(CheckIncludeFile) -set(check_headers - arpa/inet.h - dlfcn.h - fcntl.h - inttypes.h - memory.h - netdb.h - netinet/in.h - stdint.h - stdlib.h - string.h - strings.h - sys/socket.h - sys/stat.h - sys/time.h - sys/types.h - unistd.h - sys/utsname.h) - -foreach(f ${check_headers}) - to_have(${f} name) - check_include_file(${f} ${name}) -endforeach() - -# function checks -include(CheckFunctionExists) -set(check_functions - getcwd - gethostbyname - gethostname - getlogin - getpwuid_r - gettimeofday - getuid - memmove - memset - poll - socket - strchr - strdup - strerror - strtol) - -foreach(fn ${check_functions}) - to_have(${fn} name) - check_function_exists(${fn} ${name}) -endforeach() - -# library checks -set(check_libraries rt m pthread) -foreach(lib ${check_libraries}) - to_have("lib${lib}" name) - find_library(${name} ${lib}) -endforeach() - -# IPv6 check -include(CheckStructHasMember) -check_struct_has_member("struct sockaddr_in6" sin6_addr "netinet/in.h" ZOO_IPV6_ENABLED) - -# configure -configure_file(${LIBRARY_DIR}/cmake_config.h.in ${CMAKE_CURRENT_BINARY_DIR}/include/config.h) - -# hashtable library -set(hashtable_sources ${LIBRARY_DIR}/src/hashtable/hashtable_itr.c ${LIBRARY_DIR}/src/hashtable/hashtable.c) -add_library(hashtable STATIC ${hashtable_sources}) - -# zookeeper library -set(zookeeper_sources - ${LIBRARY_DIR}/src/zookeeper.c - ${LIBRARY_DIR}/src/recordio.c - ${LIBRARY_DIR}/generated/zookeeper.jute.c - ${LIBRARY_DIR}/src/zk_log.c - ${LIBRARY_DIR}/src/zk_hashtable.c -) -# src/addrvec.c - -if(WANT_SYNCAPI) - list(APPEND zookeeper_sources ${LIBRARY_DIR}/src/mt_adaptor.c) -else() - list(APPEND zookeeper_sources ${LIBRARY_DIR}/src/st_adaptor.c) -endif() - -if(WIN32) - list(APPEND zookeeper_sources ${LIBRARY_DIR}/src/winport.c) -endif() - -add_library(zookeeper STATIC ${zookeeper_sources}) -target_include_directories(zookeeper BEFORE PUBLIC ${LIBRARY_DIR}/include ${CMAKE_CURRENT_BINARY_DIR}/include ${LIBRARY_DIR}/generated) -target_link_libraries(zookeeper PUBLIC - hashtable - $<$:rt> # clock_gettime - $<$:ws2_32>) # Winsock 2.0 - -if(WANT_SYNCAPI AND NOT WIN32) - find_package(Threads REQUIRED) - target_link_libraries(zookeeper PUBLIC Threads::Threads) -endif() - -# cli executable -add_executable(cli ${LIBRARY_DIR}/src/cli.c) -target_link_libraries(cli zookeeper) -target_link_libraries(cli $<$:m>) - -# load_gen executable -if(WANT_SYNCAPI AND NOT WIN32) - add_executable(load_gen ${LIBRARY_DIR}/src/load_gen.c) - target_link_libraries(load_gen zookeeper) - target_link_libraries(load_gen $<$:m>) -endif() diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index d740ce5a45c..e45a53427cb 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -161,11 +161,20 @@ if (NOT USE_INTERNAL_BOOST_LIBRARY) target_include_directories (clickhouse_common_io BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) endif () -if (Poco_DataODBC_FOUND) - target_link_libraries (dbms ${Poco_DataODBC_LIBRARY}) - target_include_directories (dbms PRIVATE ${ODBC_INCLUDE_DIRECTORIES}) +if (Poco_SQLODBC_FOUND) + target_link_libraries (clickhouse_common_io ${Poco_SQL_LIBRARY}) + target_include_directories (clickhouse_common_io PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_SQL_INCLUDE_DIRS}) + target_link_libraries (dbms ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY}) + target_include_directories (dbms PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_SQLODBC_INCLUDE_DIRS} PUBLIC ${Poco_SQL_INCLUDE_DIRS}) endif() +if (Poco_DataODBC_FOUND) + target_link_libraries (clickhouse_common_io ${Poco_Data_LIBRARY}) + target_link_libraries (dbms ${Poco_DataODBC_LIBRARY}) + target_include_directories (dbms PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_DataODBC_INCLUDE_DIRS}) +endif() + + if (Poco_MongoDB_FOUND) target_link_libraries (dbms ${Poco_MongoDB_LIBRARY}) endif() @@ -212,6 +221,7 @@ endif () target_include_directories (dbms PUBLIC ${DBMS_INCLUDE_DIR}) target_include_directories (clickhouse_common_io PUBLIC ${DBMS_INCLUDE_DIR}) target_include_directories (clickhouse_common_io PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) +target_include_directories (clickhouse_common_io PUBLIC ${Poco_DataODBC_INCLUDE_DIRS}) target_include_directories (clickhouse_common_io BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR}) # also for copy_headers.sh: diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake index 990c638bf46..23bc9eabf29 100644 --- a/dbms/cmake/version.cmake +++ b/dbms/cmake/version.cmake @@ -1,6 +1,6 @@ # This strings autochanged from release_lib.sh: -set(VERSION_DESCRIBE v1.1.54371-testing) -set(VERSION_REVISION 54371) +set(VERSION_DESCRIBE v1.1.54372-testing) +set(VERSION_REVISION 54372) # end of autochange set (VERSION_MAJOR 1) diff --git a/dbms/src/Client/MultiplexedConnections.cpp b/dbms/src/Client/MultiplexedConnections.cpp index fb2baa105a6..8fe27ecf7fa 100644 --- a/dbms/src/Client/MultiplexedConnections.cpp +++ b/dbms/src/Client/MultiplexedConnections.cpp @@ -280,8 +280,7 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead read_list.push_back(*connection->socket); } - /// If no data was found, then we check if there are any connections - /// ready for reading. + /// If no data was found, then we check if there are any connections ready for reading. if (read_list.empty()) { Poco::Net::Socket::SocketList write_list; @@ -300,6 +299,9 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead throw Exception("Timeout exceeded while reading from " + dumpAddressesUnlocked(), ErrorCodes::TIMEOUT_EXCEEDED); } + /// TODO Absolutely wrong code: read_list could be empty; rand() is not thread safe and has low quality; motivation of rand is unclear. + /// This code path is disabled by default. + auto & socket = read_list[rand() % read_list.size()]; if (fd_to_replica_state_idx.empty()) { diff --git a/dbms/src/Common/Config/ConfigProcessor.cpp b/dbms/src/Common/Config/ConfigProcessor.cpp index 6100173c318..e303b580ba7 100644 --- a/dbms/src/Common/Config/ConfigProcessor.cpp +++ b/dbms/src/Common/Config/ConfigProcessor.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #define PREPROCESSED_SUFFIX "-preprocessed" @@ -382,15 +383,17 @@ ConfigProcessor::Files ConfigProcessor::getConfigMergeFiles(const std::string & Poco::File merge_dir(merge_dir_name); if (!merge_dir.exists() || !merge_dir.isDirectory()) continue; + for (Poco::DirectoryIterator it(merge_dir_name); it != Poco::DirectoryIterator(); ++it) { Poco::File & file = *it; - if (file.isFile() - && (endsWith(file.path(), ".xml") || endsWith(file.path(), ".conf")) - && !startsWith(file.path(), ".")) // skip temporary files - { + Poco::Path path(file.path()); + std::string extension = path.getExtension(); + std::string base_name = path.getBaseName(); + + // Skip non-config and temporary files + if (file.isFile() && (extension == "xml" || extension == "conf") && !startsWith(base_name, ".")) files.push_back(file.path()); - } } } diff --git a/dbms/src/Common/Config/ConfigReloader.cpp b/dbms/src/Common/Config/ConfigReloader.cpp index e06fca6316a..3c6311ee021 100644 --- a/dbms/src/Common/Config/ConfigReloader.cpp +++ b/dbms/src/Common/Config/ConfigReloader.cpp @@ -67,6 +67,7 @@ void ConfigReloader::run() catch (...) { tryLogCurrentException(log, __PRETTY_FUNCTION__); + std::this_thread::sleep_for(reload_interval); } } } diff --git a/dbms/src/Common/CurrentMetrics.cpp b/dbms/src/Common/CurrentMetrics.cpp index ead086e2b67..af29cb4912f 100644 --- a/dbms/src/Common/CurrentMetrics.cpp +++ b/dbms/src/Common/CurrentMetrics.cpp @@ -28,7 +28,9 @@ M(MemoryTrackingForMerges) \ M(LeaderElection) \ M(EphemeralNode) \ + M(ZooKeeperSession) \ M(ZooKeeperWatch) \ + M(ZooKeeperRequest) \ M(DelayedInserts) \ M(ContextLockWait) \ M(StorageBufferRows) \ diff --git a/dbms/src/Common/ProfileEvents.cpp b/dbms/src/Common/ProfileEvents.cpp index 71bf37c1a3a..027f52b2f37 100644 --- a/dbms/src/Common/ProfileEvents.cpp +++ b/dbms/src/Common/ProfileEvents.cpp @@ -56,14 +56,20 @@ \ M(ZooKeeperInit) \ M(ZooKeeperTransactions) \ - M(ZooKeeperGetChildren) \ + M(ZooKeeperList) \ M(ZooKeeperCreate) \ M(ZooKeeperRemove) \ M(ZooKeeperExists) \ M(ZooKeeperGet) \ M(ZooKeeperSet) \ M(ZooKeeperMulti) \ + M(ZooKeeperCheck) \ + M(ZooKeeperClose) \ + M(ZooKeeperWatchResponse) \ M(ZooKeeperExceptions) \ + M(ZooKeeperWaitMicroseconds) \ + M(ZooKeeperBytesSent) \ + M(ZooKeeperBytesReceived) \ \ M(DistributedConnectionFailTry) \ M(DistributedConnectionMissingTable) \ diff --git a/dbms/src/Common/ZooKeeper/KeeperException.h b/dbms/src/Common/ZooKeeper/KeeperException.h index d310cb8dc1d..9afe8d46873 100644 --- a/dbms/src/Common/ZooKeeper/KeeperException.h +++ b/dbms/src/Common/ZooKeeper/KeeperException.h @@ -1,21 +1,6 @@ #pragma once -#include + #include "Types.h" -#include - - -namespace DB -{ - namespace ErrorCodes - { - extern const int KEEPER_EXCEPTION; - } -} - -namespace ProfileEvents -{ - extern const Event ZooKeeperExceptions; -} namespace zkutil @@ -43,42 +28,7 @@ inline bool isUserError(int32_t zk_return_code) } -class KeeperException : public DB::Exception -{ -private: - /// delegate constructor, used to minimize repetition; last parameter used for overload resolution - KeeperException(const std::string & msg, const int32_t code, int) - : DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code) { incrementEventCounter(); } - -public: - KeeperException(const std::string & msg, const int32_t code) - : KeeperException(msg + " (" + ZooKeeperImpl::ZooKeeper::errorMessage(code) + ")", code, 0) {} - explicit KeeperException(const int32_t code) : KeeperException(ZooKeeperImpl::ZooKeeper::errorMessage(code), code, 0) {} - KeeperException(const int32_t code, const std::string & path) - : KeeperException(std::string{ZooKeeperImpl::ZooKeeper::errorMessage(code)} + ", path: " + path, code, 0) {} - - KeeperException(const KeeperException & exc) : DB::Exception(exc), code(exc.code) { incrementEventCounter(); } - - const char * name() const throw() override { return "zkutil::KeeperException"; } - const char * className() const throw() override { return "zkutil::KeeperException"; } - KeeperException * clone() const override { return new KeeperException(*this); } - - /// Any error related with network or master election - /// In case of these errors you should reinitialize ZooKeeper session. - bool isHardwareError() const - { - return zkutil::isHardwareError(code); - } - - const int32_t code; - -private: - static void incrementEventCounter() - { - ProfileEvents::increment(ProfileEvents::ZooKeeperExceptions); - } - -}; +using KeeperException = ZooKeeperImpl::Exception; class KeeperMultiException : public KeeperException diff --git a/dbms/src/Common/ZooKeeper/Lock.cpp b/dbms/src/Common/ZooKeeper/Lock.cpp index f7e2881f9d3..826e5b742aa 100644 --- a/dbms/src/Common/ZooKeeper/Lock.cpp +++ b/dbms/src/Common/ZooKeeper/Lock.cpp @@ -1,5 +1,7 @@ +#include "KeeperException.h" #include "Lock.h" + using namespace zkutil; bool Lock::tryLock() diff --git a/dbms/src/Common/ZooKeeper/Lock.h b/dbms/src/Common/ZooKeeper/Lock.h index 17ded48d26b..683470cf5a5 100644 --- a/dbms/src/Common/ZooKeeper/Lock.h +++ b/dbms/src/Common/ZooKeeper/Lock.h @@ -40,7 +40,7 @@ namespace zkutil { unlock(); } - catch (const zkutil::KeeperException & e) + catch (...) { DB::tryLogCurrentException(__PRETTY_FUNCTION__); } diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp b/dbms/src/Common/ZooKeeper/ZooKeeper.cpp index 0280992a8b6..800bbc04a73 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/dbms/src/Common/ZooKeeper/ZooKeeper.cpp @@ -1,4 +1,5 @@ #include "ZooKeeper.h" +#include "KeeperException.h" #include #include @@ -6,7 +7,6 @@ #include #include -#include #include #include #include @@ -15,25 +15,6 @@ #define ZOOKEEPER_OPERATION_TIMEOUT_MS 1000 -namespace ProfileEvents -{ - extern const Event ZooKeeperInit; - extern const Event ZooKeeperTransactions; - extern const Event ZooKeeperCreate; - extern const Event ZooKeeperRemove; - extern const Event ZooKeeperExists; - extern const Event ZooKeeperMulti; - extern const Event ZooKeeperGet; - extern const Event ZooKeeperSet; - extern const Event ZooKeeperGetChildren; -} - -namespace CurrentMetrics -{ - extern const Metric ZooKeeperWatch; -} - - namespace DB { namespace ErrorCodes @@ -84,8 +65,6 @@ void ZooKeeper::init(const std::string & hosts_, const std::string & identity_, Poco::Timespan(0, ZOOKEEPER_CONNECTION_TIMEOUT_MS * 1000), Poco::Timespan(0, ZOOKEEPER_OPERATION_TIMEOUT_MS * 1000)); - ProfileEvents::increment(ProfileEvents::ZooKeeperInit); - LOG_TRACE(log, "initialized, hosts: " << hosts << (chroot.empty() ? "" : ", chroot: " + chroot)); if (!chroot.empty() && !exists("/")) @@ -232,9 +211,6 @@ int32_t ZooKeeper::createImpl(const std::string & path, const std::string & data impl->create(path, data, mode & 1, mode & 2, {}, callback); /// TODO better mode event.wait(); - - ProfileEvents::increment(ProfileEvents::ZooKeeperCreate); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return code; } @@ -302,9 +278,6 @@ int32_t ZooKeeper::removeImpl(const std::string & path, int32_t version) impl->remove(path, version, callback); event.wait(); - - ProfileEvents::increment(ProfileEvents::ZooKeeperRemove); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return code; } @@ -339,9 +312,6 @@ int32_t ZooKeeper::existsImpl(const std::string & path, Stat * stat, WatchCallba impl->exists(path, callback, watch_callback); event.wait(); - - ProfileEvents::increment(ProfileEvents::ZooKeeperExists); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return code; } @@ -380,9 +350,6 @@ int32_t ZooKeeper::getImpl(const std::string & path, std::string & res, Stat * s impl->get(path, callback, watch_callback); event.wait(); - - ProfileEvents::increment(ProfileEvents::ZooKeeperGet); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return code; } @@ -431,9 +398,6 @@ int32_t ZooKeeper::setImpl(const std::string & path, const std::string & data, impl->set(path, data, version, callback); event.wait(); - - ProfileEvents::increment(ProfileEvents::ZooKeeperSet); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return code; } @@ -483,9 +447,6 @@ int32_t ZooKeeper::multiImpl(const Requests & requests, Responses & responses) impl->multi(requests, callback); event.wait(); - - ProfileEvents::increment(ProfileEvents::ZooKeeperMulti); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return code; } @@ -592,9 +553,6 @@ void ZooKeeper::waitForDisappear(const std::string & path) impl->exists(path, callback, watch); event.wait(); - ProfileEvents::increment(ProfileEvents::ZooKeeperExists); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); - if (code == ZooKeeperImpl::ZooKeeper::ZNONODE) return; @@ -643,9 +601,6 @@ std::future ZooKeeper::asyncGet(const std }; impl->get(path, std::move(callback), {}); - - ProfileEvents::increment(ProfileEvents::ZooKeeperGet); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return future; } @@ -664,9 +619,6 @@ std::future ZooKeeper::asyncTryGet(const }; impl->get(path, std::move(callback), {}); - - ProfileEvents::increment(ProfileEvents::ZooKeeperGet); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return future; } @@ -684,9 +636,6 @@ std::future ZooKeeper::asyncExists(con }; impl->exists(path, std::move(callback), {}); - - ProfileEvents::increment(ProfileEvents::ZooKeeperExists); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return future; } @@ -705,9 +654,6 @@ std::future ZooKeeper::asyncGetChildren( }; impl->list(path, std::move(callback), {}); - - ProfileEvents::increment(ProfileEvents::ZooKeeperGetChildren); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return future; } @@ -725,9 +671,6 @@ std::future ZooKeeper::asyncRemove(con }; impl->remove(path, version, std::move(callback)); - - ProfileEvents::increment(ProfileEvents::ZooKeeperRemove); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return future; } @@ -745,9 +688,6 @@ std::future ZooKeeper::asyncTryRemove( }; impl->remove(path, version, std::move(callback)); - - ProfileEvents::increment(ProfileEvents::ZooKeeperRemove); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return future; } @@ -762,9 +702,6 @@ std::future ZooKeeper::tryAsyncMulti(co }; impl->multi(ops, std::move(callback)); - - ProfileEvents::increment(ProfileEvents::ZooKeeperMulti); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return future; } @@ -782,9 +719,6 @@ std::future ZooKeeper::asyncMulti(const }; impl->multi(ops, std::move(callback)); - - ProfileEvents::increment(ProfileEvents::ZooKeeperMulti); - ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); return future; } diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.h b/dbms/src/Common/ZooKeeper/ZooKeeper.h index ede8c255a77..25b6f4a993a 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeper.h +++ b/dbms/src/Common/ZooKeeper/ZooKeeper.h @@ -1,7 +1,6 @@ #pragma once #include "Types.h" -#include "KeeperException.h" #include #include #include @@ -280,7 +279,7 @@ public: { zookeeper.tryRemove(path); } - catch (const KeeperException & e) + catch (...) { ProfileEvents::increment(ProfileEvents::CannotRemoveEphemeralNode); DB::tryLogCurrentException(__PRETTY_FUNCTION__); diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp index 2e21804026b..202d320fea2 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -1,9 +1,11 @@ #include #include +#include #include #include #include +#include #include #include @@ -11,7 +13,40 @@ #include -//#include + +namespace DB +{ + namespace ErrorCodes + { + extern const int KEEPER_EXCEPTION; + } +} + +namespace ProfileEvents +{ + extern const Event ZooKeeperExceptions; + extern const Event ZooKeeperInit; + extern const Event ZooKeeperTransactions; + extern const Event ZooKeeperCreate; + extern const Event ZooKeeperRemove; + extern const Event ZooKeeperExists; + extern const Event ZooKeeperMulti; + extern const Event ZooKeeperGet; + extern const Event ZooKeeperSet; + extern const Event ZooKeeperList; + extern const Event ZooKeeperCheck; + extern const Event ZooKeeperClose; + extern const Event ZooKeeperWaitMicroseconds; + extern const Event ZooKeeperBytesSent; + extern const Event ZooKeeperBytesReceived; + extern const Event ZooKeeperWatchResponse; +} + +namespace CurrentMetrics +{ + extern const Metric ZooKeeperRequest; + extern const Metric ZooKeeperWatch; +} /** ZooKeeper wire protocol. @@ -228,6 +263,33 @@ after: namespace ZooKeeperImpl { +Exception::Exception(const std::string & msg, const int32_t code, int) + : DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code) +{ + ProfileEvents::increment(ProfileEvents::ZooKeeperExceptions); +} + +Exception::Exception(const std::string & msg, const int32_t code) + : Exception(msg + " (" + ZooKeeperImpl::ZooKeeper::errorMessage(code) + ")", code, 0) +{ +} + +Exception::Exception(const int32_t code) + : Exception(ZooKeeperImpl::ZooKeeper::errorMessage(code), code, 0) +{ +} + +Exception::Exception(const int32_t code, const std::string & path) + : Exception(std::string{ZooKeeperImpl::ZooKeeper::errorMessage(code)} + ", path: " + path, code, 0) +{ +} + +Exception::Exception(const Exception & exc) + : DB::Exception(exc), code(exc.code) +{ +} + + using namespace DB; @@ -304,10 +366,10 @@ void read(String & s, ReadBuffer & in) static constexpr int32_t max_string_size = 1 << 20; int32_t size = 0; read(size, in); - if (size < 0) - throw Exception("Negative size"); /// TODO Actually it means that zookeeper node have NULL value. Maybe better to treat it like empty string. + if (size < 0) /// TODO Actually it means that zookeeper node has NULL value. Maybe better to treat it like empty string. + throw Exception("Negative size while reading string from ZooKeeper", ZooKeeper::ZMARSHALLINGERROR); if (size > max_string_size) - throw Exception("Too large string size"); /// TODO error code + throw Exception("Too large string size while reading from ZooKeeper", ZooKeeper::ZMARSHALLINGERROR); s.resize(size); in.read(&s[0], size); } @@ -317,7 +379,7 @@ template void read(std::array & s, ReadBuffer & in) int32_t size = 0; read(size, in); if (size != N) - throw Exception("Unexpected array size"); /// TODO error code + throw Exception("Unexpected array size while reading from ZooKeeper", ZooKeeper::ZMARSHALLINGERROR); in.read(&s[0], N); } @@ -347,9 +409,9 @@ template void read(std::vector & arr, ReadBuffer & in) int32_t size = 0; read(size, in); if (size < 0) - throw Exception("Negative size"); + throw Exception("Negative size while reading array from ZooKeeper", ZooKeeper::ZMARSHALLINGERROR); if (size > max_array_size) - throw Exception("Too large array size"); /// TODO error code + throw Exception("Too large array size while reading from ZooKeeper", ZooKeeper::ZMARSHALLINGERROR); arr.resize(size); for (auto & elem : arr) read(elem, in); @@ -478,6 +540,8 @@ ZooKeeper::ZooKeeper( send_thread = std::thread([this] { sendThread(); }); receive_thread = std::thread([this] { receiveThread(); }); + + ProfileEvents::increment(ProfileEvents::ZooKeeperInit); } @@ -488,6 +552,7 @@ void ZooKeeper::connect( static constexpr size_t num_tries = 3; bool connected = false; + WriteBufferFromOwnString fail_reasons; for (size_t try_no = 0; try_no < num_tries; ++try_no) { for (const auto & address : addresses) @@ -500,10 +565,11 @@ void ZooKeeper::connect( } catch (const Poco::Net::NetException & e) { - /// TODO log exception + fail_reasons << "\n" << getCurrentExceptionMessage(false); } catch (const Poco::TimeoutException & e) { + fail_reasons << "\n" << getCurrentExceptionMessage(false); } } @@ -512,7 +578,22 @@ void ZooKeeper::connect( } if (!connected) - throw Exception("All connection tries failed"); /// TODO more info; error code + { + WriteBufferFromOwnString out; + out << "All connection tries failed while connecting to ZooKeeper. Addresses: "; + bool first = true; + for (const auto & address : addresses) + { + if (first) + first = false; + else + out << ", "; + out << address.toString(); + } + + out << fail_reasons.str(); + throw Exception(out.str(), ZCONNECTIONLOSS); + } socket.setReceiveTimeout(operation_timeout); socket.setSendTimeout(operation_timeout); @@ -553,15 +634,15 @@ void ZooKeeper::receiveHandshake() read(handshake_length); if (handshake_length != 36) - throw Exception("Unexpected handshake length received: " + toString(handshake_length)); + throw Exception("Unexpected handshake length received: " + toString(handshake_length), ZMARSHALLINGERROR); read(protocol_version_read); if (protocol_version_read != protocol_version) - throw Exception("Unexpected protocol version: " + toString(protocol_version_read)); + throw Exception("Unexpected protocol version: " + toString(protocol_version_read), ZMARSHALLINGERROR); read(timeout); if (timeout != session_timeout.totalMilliseconds()) - throw Exception("Received different session timeout from server: " + toString(timeout)); + throw Exception("Received different session timeout from server: " + toString(timeout), ZMARSHALLINGERROR); read(session_id); read(passwd); @@ -588,14 +669,17 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data) read(err); if (xid != auth_xid) - throw Exception("Unexpected event recievent in reply to auth request: " + toString(xid)); + throw Exception("Unexpected event recieved in reply to auth request: " + toString(xid), + ZMARSHALLINGERROR); int32_t actual_length = in->count() - count_before_event; if (length != actual_length) - throw Exception("Response length doesn't match. Expected: " + toString(length) + ", actual: " + toString(actual_length)); + throw Exception("Response length doesn't match. Expected: " + toString(length) + ", actual: " + toString(actual_length), + ZMARSHALLINGERROR); if (err) - throw Exception("Error received in reply to auth request. Code: " + toString(err) + ". Message: " + String(errorMessage(err))); + throw Exception("Error received in reply to auth request. Code: " + toString(err) + ". Message: " + String(errorMessage(err)), + ZMARSHALLINGERROR); } @@ -607,6 +691,8 @@ void ZooKeeper::sendThread() { while (!expired) { + auto prev_bytes_sent = out->count(); + auto now = clock::now(); auto next_heartbeat_time = prev_heartbeat_time + std::chrono::milliseconds(session_timeout.totalMilliseconds() / 3); @@ -635,6 +721,8 @@ void ZooKeeper::sendThread() request.xid = ping_xid; request.write(*out); } + + ProfileEvents::increment(ProfileEvents::ZooKeeperBytesSent, out->count() - prev_bytes_sent); } } catch (...) @@ -657,19 +745,21 @@ void ZooKeeper::receiveThread() Int64 waited = 0; while (!expired) { + auto prev_bytes_received = in->count(); + clock::time_point now = clock::now(); UInt64 max_wait = operation_timeout.totalMicroseconds(); - bool has_operations = false; + std::optional earliest_operation; { std::lock_guard lock(operations_mutex); if (!operations.empty()) { /// Operations are ordered by xid (and consequently, by time). - has_operations = true; - auto earliest_operation_deadline = operations.begin()->second.time + std::chrono::microseconds(operation_timeout.totalMicroseconds()); + earliest_operation = operations.begin()->second; + auto earliest_operation_deadline = earliest_operation->time + std::chrono::microseconds(operation_timeout.totalMicroseconds()); if (now > earliest_operation_deadline) - throw Exception("Operation timeout"); + throw Exception("Operation timeout (deadline already expired) for path: " + earliest_operation->request->getPath(), ZOPERATIONTIMEOUT); max_wait = std::chrono::duration_cast(earliest_operation_deadline - now).count(); } } @@ -684,13 +774,15 @@ void ZooKeeper::receiveThread() } else { - if (has_operations) - throw Exception("Operation timeout"); + if (earliest_operation) + throw Exception("Operation timeout (no response) for path: " + earliest_operation->request->getPath(), ZOPERATIONTIMEOUT); waited += max_wait; if (waited > session_timeout.totalMicroseconds()) - throw Exception("Nothing is received in session timeout"); + throw Exception("Nothing is received in session timeout", ZOPERATIONTIMEOUT); } + + ProfileEvents::increment(ProfileEvents::ZooKeeperBytesReceived, in->count() - prev_bytes_received); } } catch (...) @@ -729,15 +821,18 @@ ZooKeeper::ResponsePtr ZooKeeper::CloseRequest::makeResponse() const { return st void addRootPath(String & path, const String & root_path) { if (path.empty()) - throw Exception("Path cannot be empty"); + throw Exception("Path cannot be empty", ZooKeeper::ZBADARGUMENTS); if (path[0] != '/') - throw Exception("Path must begin with /"); + throw Exception("Path must begin with /", ZooKeeper::ZBADARGUMENTS); if (root_path.empty()) return; - path = root_path + path; + if (path.size() == 1) /// "/" + path = root_path; + else + path = root_path + path; } void removeRootPath(String & path, const String & root_path) @@ -746,7 +841,7 @@ void removeRootPath(String & path, const String & root_path) return; if (path.size() <= root_path.size()) - throw Exception("Received path is not longer than root_path"); + throw Exception("Received path is not longer than root_path", ZooKeeper::ZDATAINCONSISTENCY); path = path.substr(root_path.size()); } @@ -795,14 +890,13 @@ void ZooKeeper::receiveEvent() if (xid == ping_xid) { if (err) - throw Exception("Received error in heartbeat response: " + String(errorMessage(err))); + throw Exception("Received error in heartbeat response: " + String(errorMessage(err)), ZRUNTIMEINCONSISTENCY); response = std::make_shared(); - -// std::cerr << "Received heartbeat\n"; } else if (xid == watch_xid) { + ProfileEvents::increment(ProfileEvents::ZooKeeperWatchResponse); response = std::make_shared(); request_info.callback = [this](const Response & response) @@ -829,11 +923,10 @@ void ZooKeeper::receiveEvent() if (callback) callback(watch_response); /// NOTE We may process callbacks not under mutex. + CurrentMetrics::sub(CurrentMetrics::ZooKeeperWatch, it->second.size()); watches.erase(it); } }; - -// std::cerr << "Received watch\n"; } else { @@ -842,14 +935,13 @@ void ZooKeeper::receiveEvent() auto it = operations.find(xid); if (it == operations.end()) - throw Exception("Received response for unknown xid"); + throw Exception("Received response for unknown xid", ZRUNTIMEINCONSISTENCY); request_info = std::move(it->second); operations.erase(it); + CurrentMetrics::sub(CurrentMetrics::ZooKeeperRequest); } -// std::cerr << "Received response: " << request_info.request->getOpNum() << "\n"; - response = request_info.request->makeResponse(); } @@ -863,7 +955,10 @@ void ZooKeeper::receiveEvent() int32_t actual_length = in->count() - count_before_event; if (length != actual_length) - throw Exception("Response length doesn't match. Expected: " + toString(length) + ", actual: " + toString(actual_length)); + throw Exception("Response length doesn't match. Expected: " + toString(length) + ", actual: " + toString(actual_length), ZMARSHALLINGERROR); + + auto elapsed_microseconds = std::chrono::duration_cast(clock::now() - request_info.time).count(); + ProfileEvents::increment(ProfileEvents::ZooKeeperWaitMicroseconds, elapsed_microseconds); if (request_info.callback) request_info.callback(*response); @@ -902,6 +997,7 @@ void ZooKeeper::finalize(bool error_send, bool error_receive) request_info.callback(*response); } + CurrentMetrics::sub(CurrentMetrics::ZooKeeperRequest, operations.size()); operations.clear(); } @@ -920,6 +1016,7 @@ void ZooKeeper::finalize(bool error_send, bool error_receive) callback(response); } + CurrentMetrics::sub(CurrentMetrics::ZooKeeperWatch, watches.size()); watches.clear(); } } @@ -1055,12 +1152,13 @@ void ZooKeeper::ErrorResponse::readImpl(ReadBuffer & in) ZooKeeperImpl::read(read_error, in); if (read_error != error) - throw Exception("Error code in ErrorResponse (" + toString(read_error) + ") doesn't match error code in header (" + toString(error) + ")"); + throw Exception("Error code in ErrorResponse (" + toString(read_error) + ") doesn't match error code in header (" + toString(error) + ")", + ZMARSHALLINGERROR); } void ZooKeeper::CloseResponse::readImpl(ReadBuffer &) { - throw Exception("Received response for close request"); + throw Exception("Received response for close request", ZRUNTIMEINCONSISTENCY); } ZooKeeper::MultiResponse::MultiResponse(const Requests & requests) @@ -1083,10 +1181,8 @@ void ZooKeeper::MultiResponse::readImpl(ReadBuffer & in) ZooKeeperImpl::read(done, in); ZooKeeperImpl::read(op_error, in); -// std::cerr << "Received result for multi: " << op_num << "\n"; - if (done) - throw Exception("Not enough results received for multi transaction"); + throw Exception("Not enough results received for multi transaction", ZMARSHALLINGERROR); /// op_num == -1 is special for multi transaction. /// For unknown reason, error code is duplicated in header and in response body. @@ -1120,11 +1216,11 @@ void ZooKeeper::MultiResponse::readImpl(ReadBuffer & in) ZooKeeperImpl::read(error, in); if (!done) - throw Exception("Too many results received for multi transaction"); + throw Exception("Too many results received for multi transaction", ZMARSHALLINGERROR); if (op_num != -1) - throw Exception("Unexpected op_num received at the end of results for multi transaction"); + throw Exception("Unexpected op_num received at the end of results for multi transaction", ZMARSHALLINGERROR); if (error != -1) - throw Exception("Unexpected error value received at the end of results for multi transaction"); + throw Exception("Unexpected error value received at the end of results for multi transaction", ZMARSHALLINGERROR); } } @@ -1133,7 +1229,7 @@ void ZooKeeper::pushRequest(RequestInfo && info) { /// If the request is close request, we push it even after session is expired - because it will signal sending thread to stop. if (expired && info.request->xid != close_xid) - throw Exception("Session expired"); + throw Exception("Session expired", ZSESSIONEXPIRED); info.request->addRootPath(root_path); @@ -1143,10 +1239,13 @@ void ZooKeeper::pushRequest(RequestInfo && info) { info.request->xid = xid.fetch_add(1); if (info.request->xid < 0) - throw Exception("XID overflow"); + throw Exception("XID overflow", ZSESSIONEXPIRED); } + ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions); + { + CurrentMetrics::add(CurrentMetrics::ZooKeeperRequest); std::lock_guard lock(operations_mutex); operations[info.request->xid] = info; } @@ -1154,12 +1253,13 @@ void ZooKeeper::pushRequest(RequestInfo && info) if (info.watch) { info.request->has_watch = true; + CurrentMetrics::add(CurrentMetrics::ZooKeeperWatch); std::lock_guard lock(watches_mutex); watches[info.request->getPath()].emplace_back(std::move(info.watch)); } - if (!requests.tryPush(info.request, session_timeout.totalMilliseconds())) - throw Exception("Cannot push request to queue within session timeout"); + if (!requests.tryPush(info.request, operation_timeout.totalMilliseconds())) + throw Exception("Cannot push request to queue within operation timeout", ZOPERATIONTIMEOUT); } @@ -1183,6 +1283,7 @@ void ZooKeeper::create( request_info.callback = [callback](const Response & response) { callback(typeid_cast(response)); }; pushRequest(std::move(request_info)); + ProfileEvents::increment(ProfileEvents::ZooKeeperCreate); } @@ -1200,6 +1301,7 @@ void ZooKeeper::remove( request_info.callback = [callback](const Response & response) { callback(typeid_cast(response)); }; pushRequest(std::move(request_info)); + ProfileEvents::increment(ProfileEvents::ZooKeeperRemove); } @@ -1217,6 +1319,7 @@ void ZooKeeper::exists( request_info.watch = watch; pushRequest(std::move(request_info)); + ProfileEvents::increment(ProfileEvents::ZooKeeperExists); } @@ -1234,6 +1337,7 @@ void ZooKeeper::get( request_info.watch = watch; pushRequest(std::move(request_info)); + ProfileEvents::increment(ProfileEvents::ZooKeeperGet); } @@ -1253,6 +1357,7 @@ void ZooKeeper::set( request_info.callback = [callback](const Response & response) { callback(typeid_cast(response)); }; pushRequest(std::move(request_info)); + ProfileEvents::increment(ProfileEvents::ZooKeeperSet); } @@ -1270,6 +1375,7 @@ void ZooKeeper::list( request_info.watch = watch; pushRequest(std::move(request_info)); + ProfileEvents::increment(ProfileEvents::ZooKeeperList); } @@ -1287,6 +1393,7 @@ void ZooKeeper::check( request_info.callback = [callback](const Response & response) { callback(typeid_cast(response)); }; pushRequest(std::move(request_info)); + ProfileEvents::increment(ProfileEvents::ZooKeeperCheck); } @@ -1307,6 +1414,7 @@ void ZooKeeper::multi( request_info.callback = [callback](const Response & response) { callback(typeid_cast(response)); }; pushRequest(std::move(request_info)); + ProfileEvents::increment(ProfileEvents::ZooKeeperMulti); } @@ -1319,6 +1427,7 @@ void ZooKeeper::close() request_info.request = std::make_shared(std::move(request)); pushRequest(std::move(request_info)); + ProfileEvents::increment(ProfileEvents::ZooKeeperClose); } diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h index 512e6a9a421..b3d8a057c49 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h +++ b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -23,12 +24,38 @@ #include +namespace CurrentMetrics +{ + extern const Metric ZooKeeperSession; +} + + namespace ZooKeeperImpl { using namespace DB; +class Exception : public DB::Exception +{ +private: + /// Delegate constructor, used to minimize repetition; last parameter used for overload resolution. + Exception(const std::string & msg, const int32_t code, int); + +public: + explicit Exception(const int32_t code); + Exception(const std::string & msg, const int32_t code); + Exception(const int32_t code, const std::string & path); + Exception(const Exception & exc); + + const char * name() const throw() override { return "ZooKeeperImpl::Exception"; } + const char * className() const throw() override { return "ZooKeeperImpl::Exception"; } + Exception * clone() const override { return new Exception(*this); } + + const int32_t code; +}; + + /** Usage scenario: * - create an object and issue commands; * - you provide callbacks for your commands; callbacks are invoked in internal thread and must be cheap: @@ -543,6 +570,8 @@ private: template void read(T &); + + CurrentMetrics::Increment metric_increment{CurrentMetrics::ZooKeeperSession}; }; }; diff --git a/dbms/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp b/dbms/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp index 1a564854ebf..edb51147fd8 100644 --- a/dbms/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp +++ b/dbms/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp b/dbms/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp index 28bd45dfe33..c2c34952968 100644 --- a/dbms/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp +++ b/dbms/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_test_commands.cpp b/dbms/src/Common/ZooKeeper/tests/zkutil_test_commands.cpp index 670a7bcb75f..78817734cd4 100644 --- a/dbms/src/Common/ZooKeeper/tests/zkutil_test_commands.cpp +++ b/dbms/src/Common/ZooKeeper/tests/zkutil_test_commands.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include diff --git a/dbms/src/Common/config.h.in b/dbms/src/Common/config.h.in index aa7f75182d0..2531d9ced70 100644 --- a/dbms/src/Common/config.h.in +++ b/dbms/src/Common/config.h.in @@ -9,6 +9,7 @@ #cmakedefine01 USE_RDKAFKA #cmakedefine01 USE_CAPNP #cmakedefine01 USE_EMBEDDED_COMPILER +#cmakedefine01 Poco_SQLODBC_FOUND #cmakedefine01 Poco_DataODBC_FOUND #cmakedefine01 Poco_MongoDB_FOUND #cmakedefine01 Poco_NetSSL_FOUND diff --git a/dbms/src/Common/config_build.cpp.in b/dbms/src/Common/config_build.cpp.in index c9fe56634a6..6d3a8e21959 100644 --- a/dbms/src/Common/config_build.cpp.in +++ b/dbms/src/Common/config_build.cpp.in @@ -34,6 +34,7 @@ const char * auto_config_build[] "USE_VECTORCLASS", "@USE_VECTORCLASS@", "USE_RDKAFKA", "@USE_RDKAFKA@", "USE_CAPNP", "@USE_CAPNP@", + "USE_Poco_SQLODBC", "@Poco_SQLODBC_FOUND@", "USE_Poco_DataODBC", "@Poco_DataODBC_FOUND@", "USE_Poco_MongoDB", "@Poco_MongoDB_FOUND@", "USE_Poco_NetSSL", "@Poco_NetSSL_FOUND@", diff --git a/dbms/src/DataStreams/IProfilingBlockInputStream.cpp b/dbms/src/DataStreams/IProfilingBlockInputStream.cpp index 306afb5955b..f7558dbf3eb 100644 --- a/dbms/src/DataStreams/IProfilingBlockInputStream.cpp +++ b/dbms/src/DataStreams/IProfilingBlockInputStream.cpp @@ -42,7 +42,7 @@ Block IProfilingBlockInputStream::read() if (isCancelledOrThrowIfKilled()) return res; - if (!checkTimeLimits()) + if (!checkTimeLimit()) limit_exceeded_need_break = true; if (!limit_exceeded_need_break) @@ -191,7 +191,7 @@ static bool handleOverflowMode(OverflowMode mode, const String & message, int co }; -bool IProfilingBlockInputStream::checkTimeLimits() +bool IProfilingBlockInputStream::checkTimeLimit() { if (limits.max_execution_time != 0 && info.total_stopwatch.elapsed() > static_cast(limits.max_execution_time.totalMicroseconds()) * 1000) diff --git a/dbms/src/DataStreams/IProfilingBlockInputStream.h b/dbms/src/DataStreams/IProfilingBlockInputStream.h index 442c451faa3..0bed471f245 100644 --- a/dbms/src/DataStreams/IProfilingBlockInputStream.h +++ b/dbms/src/DataStreams/IProfilingBlockInputStream.h @@ -234,10 +234,10 @@ private: void updateExtremes(Block & block); - /** Check constraints and quotas. - * But only those that can be tested within each separate source. + /** Check limits and quotas. + * But only those that can be checked within each separate stream. */ - bool checkTimeLimits(); + bool checkTimeLimit(); void checkQuota(Block & block); diff --git a/dbms/src/Dictionaries/DictionarySourceFactory.cpp b/dbms/src/Dictionaries/DictionarySourceFactory.cpp index 463cbee3ac7..6c9c355893d 100644 --- a/dbms/src/Dictionaries/DictionarySourceFactory.cpp +++ b/dbms/src/Dictionaries/DictionarySourceFactory.cpp @@ -19,7 +19,7 @@ #if Poco_MongoDB_FOUND #include #endif -#if Poco_DataODBC_FOUND +#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #include @@ -89,7 +89,7 @@ Block createSampleBlock(const DictionaryStructure & dict_struct) DictionarySourceFactory::DictionarySourceFactory() : log(&Poco::Logger::get("DictionarySourceFactory")) { -#if Poco_DataODBC_FOUND +#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND Poco::Data::ODBC::Connector::registerConnector(); #endif } @@ -154,7 +154,7 @@ DictionarySourcePtr DictionarySourceFactory::create( } else if ("odbc" == source_type) { -#if Poco_DataODBC_FOUND +#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND return std::make_unique(dict_struct, config, config_prefix + ".odbc", sample_block, context); #else throw Exception{"Dictionary source of type `odbc` is disabled because poco library was built without ODBC support.", diff --git a/dbms/src/Dictionaries/ODBCDictionarySource.h b/dbms/src/Dictionaries/ODBCDictionarySource.h index 174541ddfe9..0f10dbd94ff 100644 --- a/dbms/src/Dictionaries/ODBCDictionarySource.h +++ b/dbms/src/Dictionaries/ODBCDictionarySource.h @@ -1,17 +1,15 @@ #pragma once +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#include +#pragma GCC diagnostic pop #include #include #include - namespace Poco { - namespace Data - { - class SessionPool; - } - namespace Util { class AbstractConfiguration; diff --git a/dbms/src/Interpreters/Context.cpp b/dbms/src/Interpreters/Context.cpp index 3e8e07bad3f..b0bf8f6f441 100644 --- a/dbms/src/Interpreters/Context.cpp +++ b/dbms/src/Interpreters/Context.cpp @@ -515,12 +515,6 @@ void Context::setConfig(const ConfigurationPtr & config) shared->config = config; } -ConfigurationPtr Context::getConfig() const -{ - auto lock = getLock(); - return shared->config; -} - Poco::Util::AbstractConfiguration & Context::getConfigRef() const { auto lock = getLock(); @@ -689,10 +683,10 @@ void Context::assertTableExists(const String & database_name, const String & tab Databases::const_iterator it = shared->databases.find(db); if (shared->databases.end() == it) - throw Exception("Database " + db + " doesn't exist", ErrorCodes::UNKNOWN_DATABASE); + throw Exception("Database " + backQuoteIfNeed(db) + " doesn't exist", ErrorCodes::UNKNOWN_DATABASE); if (!it->second->isTableExist(*this, table_name)) - throw Exception("Table " + db + "." + table_name + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE); + throw Exception("Table " + backQuoteIfNeed(db) + "." + backQuoteIfNeed(table_name) + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE); } @@ -706,7 +700,7 @@ void Context::assertTableDoesntExist(const String & database_name, const String Databases::const_iterator it = shared->databases.find(db); if (shared->databases.end() != it && it->second->isTableExist(*this, table_name)) - throw Exception("Table " + db + "." + table_name + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS); + throw Exception("Table " + backQuoteIfNeed(db) + "." + backQuoteIfNeed(table_name) + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS); } @@ -719,7 +713,7 @@ void Context::assertDatabaseExists(const String & database_name, bool check_data checkDatabaseAccessRights(db); if (shared->databases.end() == shared->databases.find(db)) - throw Exception("Database " + db + " doesn't exist", ErrorCodes::UNKNOWN_DATABASE); + throw Exception("Database " + backQuoteIfNeed(db) + " doesn't exist", ErrorCodes::UNKNOWN_DATABASE); } @@ -731,7 +725,7 @@ void Context::assertDatabaseDoesntExist(const String & database_name) const checkDatabaseAccessRights(db); if (shared->databases.end() != shared->databases.find(db)) - throw Exception("Database " + db + " already exists.", ErrorCodes::DATABASE_ALREADY_EXISTS); + throw Exception("Database " + backQuoteIfNeed(db) + " already exists.", ErrorCodes::DATABASE_ALREADY_EXISTS); } @@ -801,7 +795,7 @@ StoragePtr Context::getTableImpl(const String & database_name, const String & ta if (shared->databases.end() == it) { if (exception) - *exception = Exception("Database " + db + " doesn't exist", ErrorCodes::UNKNOWN_DATABASE); + *exception = Exception("Database " + backQuoteIfNeed(db) + " doesn't exist", ErrorCodes::UNKNOWN_DATABASE); return {}; } @@ -809,7 +803,7 @@ StoragePtr Context::getTableImpl(const String & database_name, const String & ta if (!table) { if (exception) - *exception = Exception("Table " + db + "." + table_name + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE); + *exception = Exception("Table " + backQuoteIfNeed(db) + "." + backQuoteIfNeed(table_name) + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE); return {}; } @@ -820,7 +814,7 @@ StoragePtr Context::getTableImpl(const String & database_name, const String & ta void Context::addExternalTable(const String & table_name, const StoragePtr & storage, const ASTPtr & ast) { if (external_tables.end() != external_tables.find(table_name)) - throw Exception("Temporary table " + table_name + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS); + throw Exception("Temporary table " + backQuoteIfNeed(table_name) + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS); external_tables[table_name] = std::pair(storage, ast); } @@ -926,7 +920,7 @@ ASTPtr Context::getCreateExternalTableQuery(const String & table_name) const { TableAndCreateASTs::const_iterator jt = external_tables.find(table_name); if (external_tables.end() == jt) - throw Exception("Temporary Table" + table_name + " doesn't exist", ErrorCodes::UNKNOWN_TABLE); + throw Exception("Temporary table " + backQuoteIfNeed(table_name) + " doesn't exist", ErrorCodes::UNKNOWN_TABLE); return jt->second.second; } @@ -1326,21 +1320,13 @@ DDLWorker & Context::getDDLWorker() const return *shared->ddl_worker; } -void Context::setZooKeeper(zkutil::ZooKeeperPtr zookeeper) -{ - std::lock_guard lock(shared->zookeeper_mutex); - - if (shared->zookeeper) - throw Exception("ZooKeeper client has already been set.", ErrorCodes::LOGICAL_ERROR); - - shared->zookeeper = std::move(zookeeper); -} - zkutil::ZooKeeperPtr Context::getZooKeeper() const { std::lock_guard lock(shared->zookeeper_mutex); - if (shared->zookeeper && shared->zookeeper->expired()) + if (!shared->zookeeper) + shared->zookeeper = std::make_shared(getConfigRef(), "zookeeper"); + else if (shared->zookeeper->expired()) shared->zookeeper = shared->zookeeper->startNewSession(); return shared->zookeeper; diff --git a/dbms/src/Interpreters/Context.h b/dbms/src/Interpreters/Context.h index 73ddb5dce64..670bda401bf 100644 --- a/dbms/src/Interpreters/Context.h +++ b/dbms/src/Interpreters/Context.h @@ -140,7 +140,6 @@ public: /// Global application configuration settings. void setConfig(const ConfigurationPtr & config); - ConfigurationPtr getConfig() const; Poco::Util::AbstractConfiguration & getConfigRef() const; /** Take the list of users, quotas and configuration profiles from this config. @@ -300,7 +299,6 @@ public: MergeList & getMergeList(); const MergeList & getMergeList() const; - void setZooKeeper(std::shared_ptr zookeeper); /// If the current session is expired at the time of the call, synchronously creates and returns a new session with the startNewSession() call. std::shared_ptr getZooKeeper() const; /// Has ready or expired ZooKeeper diff --git a/dbms/src/Interpreters/DDLWorker.cpp b/dbms/src/Interpreters/DDLWorker.cpp index ea176fb998b..5a820ff7334 100644 --- a/dbms/src/Interpreters/DDLWorker.cpp +++ b/dbms/src/Interpreters/DDLWorker.cpp @@ -32,6 +32,7 @@ #include #include +#include #include #include #include @@ -858,7 +859,7 @@ void DDLWorker::run() } catch (const zkutil::KeeperException & e) { - if (!e.isHardwareError()) + if (!zkutil::isHardwareError(e.code)) throw; } } @@ -886,7 +887,7 @@ void DDLWorker::run() } catch (zkutil::KeeperException & e) { - if (e.isHardwareError()) + if (zkutil::isHardwareError(e.code)) { LOG_DEBUG(log, "Recovering ZooKeeper session after: " << getCurrentExceptionMessage(false)); diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.cpp b/dbms/src/Interpreters/ExpressionAnalyzer.cpp index 500e4cd775d..2b84a413778 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.cpp +++ b/dbms/src/Interpreters/ExpressionAnalyzer.cpp @@ -1519,7 +1519,9 @@ void ExpressionAnalyzer::makeSetsForIndexImpl(const ASTPtr & node, const Block & } else { - ExpressionActionsPtr temp_actions = std::make_shared(source_columns, settings); + NamesAndTypesList temp_columns = source_columns; + temp_columns.insert(temp_columns.end(), aggregated_columns.begin(), aggregated_columns.end()); + ExpressionActionsPtr temp_actions = std::make_shared(temp_columns, settings); getRootActions(func->arguments->children.at(0), true, false, temp_actions); Block sample_block_with_calculated_columns = temp_actions->getSampleBlock(); diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index ec6b9395701..7ed250e9036 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -687,19 +687,26 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns(Pipeline stream->addTableLock(table_lock); }); - /** Set the limits and quota for reading data, the speed and time of the query. - * Such restrictions are checked on the initiating server of the request, and not on remote servers. - * Because the initiating server has a summary of the execution of the request on all servers. - */ - if (to_stage == QueryProcessingStage::Complete) + /// Set the limits and quota for reading data, the speed and time of the query. { IProfilingBlockInputStream::LocalLimits limits; limits.mode = IProfilingBlockInputStream::LIMITS_TOTAL; limits.size_limits = SizeLimits(settings.max_rows_to_read, settings.max_bytes_to_read, settings.read_overflow_mode); limits.max_execution_time = settings.max_execution_time; limits.timeout_overflow_mode = settings.timeout_overflow_mode; - limits.min_execution_speed = settings.min_execution_speed; - limits.timeout_before_checking_execution_speed = settings.timeout_before_checking_execution_speed; + + /** Quota and minimal speed restrictions are checked on the initiating server of the request, and not on remote servers, + * because the initiating server has a summary of the execution of the request on all servers. + * + * But limits on data size to read and maximum execution time are reasonable to check both on initiator and + * additionally on each remote server, because these limits are checked per block of data processed, + * and remote servers may process way more blocks of data than are received by initiator. + */ + if (to_stage == QueryProcessingStage::Complete) + { + limits.min_execution_speed = settings.min_execution_speed; + limits.timeout_before_checking_execution_speed = settings.timeout_before_checking_execution_speed; + } QuotaForIntervals & quota = context.getQuota(); @@ -708,7 +715,9 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns(Pipeline if (IProfilingBlockInputStream * p_stream = dynamic_cast(stream.get())) { p_stream->setLimits(limits); - p_stream->setQuota(quota); + + if (to_stage == QueryProcessingStage::Complete) + p_stream->setQuota(quota); } }); } diff --git a/dbms/src/Server/ClusterCopier.cpp b/dbms/src/Server/ClusterCopier.cpp index 61fe2234124..625e687f3dc 100644 --- a/dbms/src/Server/ClusterCopier.cpp +++ b/dbms/src/Server/ClusterCopier.cpp @@ -17,10 +17,19 @@ #include #include +#include +#include +#include + #include #include +#include #include #include +#include +#include +#include +#include #include #include #include @@ -30,12 +39,6 @@ #include #include #include - -#include -#include -#include -#include -#include #include #include #include @@ -61,8 +64,6 @@ #include #include #include -#include -#include namespace DB @@ -715,13 +716,11 @@ class ClusterCopier { public: - ClusterCopier(const ConfigurationPtr & zookeeper_config_, - const String & task_path_, + ClusterCopier(const String & task_path_, const String & host_id_, const String & proxy_database_name_, Context & context_) : - zookeeper_config(zookeeper_config_), task_zookeeper_path(task_path_), host_id(host_id_), working_database_name(proxy_database_name_), @@ -732,7 +731,7 @@ public: void init() { - auto zookeeper = getZooKeeper(); + auto zookeeper = context.getZooKeeper(); task_description_watch_callback = [this] (const ZooKeeperImpl::ZooKeeper::WatchResponse &) { @@ -762,8 +761,8 @@ public: /// Do not initialize tables, will make deferred initialization in process() - getZooKeeper()->createAncestors(getWorkersPathVersion() + "/"); - getZooKeeper()->createAncestors(getWorkersPath() + "/"); + zookeeper->createAncestors(getWorkersPathVersion() + "/"); + zookeeper->createAncestors(getWorkersPath() + "/"); } template @@ -890,7 +889,7 @@ public: void reloadTaskDescription() { - auto zookeeper = getZooKeeper(); + auto zookeeper = context.getZooKeeper(); task_description_watch_zookeeper = zookeeper; String task_config_str; @@ -1087,7 +1086,7 @@ protected: { LOG_DEBUG(log, "Check that all shards processed partition " << partition_name << " successfully"); - auto zookeeper = getZooKeeper(); + auto zookeeper = context.getZooKeeper(); Strings status_paths; for (auto & shard : shards_with_partition) @@ -1213,7 +1212,7 @@ protected: { cleaner_holder = zkutil::EphemeralNodeHolder::create(dirt_cleaner_path, *zookeeper, host_id); } - catch (zkutil::KeeperException & e) + catch (const zkutil::KeeperException & e) { if (e.code == ZooKeeperImpl::ZooKeeper::ZNODEEXISTS) { @@ -1459,7 +1458,7 @@ protected: TaskTable & task_table = task_shard.task_table; ClusterPartition & cluster_partition = task_table.getClusterPartition(task_partition.name); - auto zookeeper = getZooKeeper(); + auto zookeeper = context.getZooKeeper(); String is_dirty_flag_path = task_partition.getCommonPartitionIsDirtyPath(); String current_task_is_active_path = task_partition.getActiveWorkerPath(); @@ -1693,7 +1692,7 @@ protected: status = future_is_dirty_checker->get(); future_is_dirty_checker.reset(); } - catch (zkutil::KeeperException & e) + catch (const zkutil::KeeperException & e) { future_is_dirty_checker.reset(); throw; @@ -1995,21 +1994,7 @@ protected: return successful_shards; } - zkutil::ZooKeeperPtr getZooKeeper() - { - auto zookeeper = context.getZooKeeper(); - - if (!zookeeper) - { - context.setZooKeeper(std::make_shared(*zookeeper_config, "zookeeper")); - zookeeper = context.getZooKeeper(); - } - - return zookeeper; - } - private: - ConfigurationPtr zookeeper_config; String task_zookeeper_path; String task_description_path; String host_id; @@ -2152,6 +2137,7 @@ void ClusterCopierApp::mainImpl() auto context = std::make_unique(Context::createGlobal()); SCOPE_EXIT(context->shutdown()); + context->setConfig(zookeeper_configuration); context->setGlobalContext(*context); context->setApplicationType(Context::ApplicationType::LOCAL); context->setPath(process_path); @@ -2165,8 +2151,7 @@ void ClusterCopierApp::mainImpl() context->addDatabase(default_database, std::make_shared(default_database)); context->setCurrentDatabase(default_database); - std::unique_ptr copier(new ClusterCopier( - zookeeper_configuration, task_path, host_id, default_database, *context)); + std::unique_ptr copier = std::make_unique(task_path, host_id, default_database, *context); copier->setSafeMode(is_safe_mode); copier->setCopyFaultProbability(copy_fault_probability); diff --git a/dbms/src/Server/Server.cpp b/dbms/src/Server/Server.cpp index 5c692462058..6133d4be2bf 100644 --- a/dbms/src/Server/Server.cpp +++ b/dbms/src/Server/Server.cpp @@ -102,12 +102,7 @@ int Server::main(const std::vector & /*args*/) global_context->setGlobalContext(*global_context); global_context->setApplicationType(Context::ApplicationType::SERVER); - bool has_zookeeper = false; - if (config().has("zookeeper")) - { - global_context->setZooKeeper(std::make_shared(config(), "zookeeper")); - has_zookeeper = true; - } + bool has_zookeeper = config().has("zookeeper"); zkutil::ZooKeeperNodeCache main_config_zk_node_cache([&] { return global_context->getZooKeeper(); }); if (loaded_config.has_zk_includes) diff --git a/dbms/src/Server/config.d/zookeeper.xml b/dbms/src/Server/config.d/zookeeper.xml new file mode 100644 index 00000000000..d390a935107 --- /dev/null +++ b/dbms/src/Server/config.d/zookeeper.xml @@ -0,0 +1,8 @@ + + + + localhost + 2181 + + + diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index 5b5a6b87c8e..19daa71d3e3 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -2189,18 +2189,17 @@ bool MergeTreeData::isPrimaryKeyColumnPossiblyWrappedInFunctions(const ASTPtr & bool MergeTreeData::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand) const { - /// Make sure that the left side of the IN operator is part of the primary key. - /// If there is a tuple on the left side of the IN operator, each item of the tuple must be part of the primary key. + /// Make sure that the left side of the IN operator contain part of the primary key. + /// If there is a tuple on the left side of the IN operator, at least one item of the tuple must be part of the primary key (probably wrapped by a chain of some acceptable functions). const ASTFunction * left_in_operand_tuple = typeid_cast(left_in_operand.get()); if (left_in_operand_tuple && left_in_operand_tuple->name == "tuple") { for (const auto & item : left_in_operand_tuple->arguments->children) - if (!isPrimaryKeyColumnPossiblyWrappedInFunctions(item)) - /// The tuple itself may be part of the primary key, so check that as a last resort. - return isPrimaryKeyColumnPossiblyWrappedInFunctions(left_in_operand); + if (isPrimaryKeyColumnPossiblyWrappedInFunctions(item)) + return true; - /// tuple() is invalid but can still be found here since this method may be called before the arguments are validated. - return !left_in_operand_tuple->arguments->children.empty(); + /// The tuple itself may be part of the primary key, so check that as a last resort. + return isPrimaryKeyColumnPossiblyWrappedInFunctions(left_in_operand); } else { diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.cpp index 3c20b0a2069..3b9099f23eb 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp index 9fafd5f2e9d..8aca9fe4f2e 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -5,8 +5,10 @@ #include #include #include +#include #include + namespace ProfileEvents { extern const Event DuplicatedInsertedBlocks; @@ -25,6 +27,7 @@ namespace ErrorCodes extern const int READONLY; extern const int UNKNOWN_STATUS_OF_INSERT; extern const int INSERT_WAS_DEDUPLICATED; + extern const int KEEPER_EXCEPTION; } diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 97fcb1523a8..6b20b5c86c1 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include diff --git a/dbms/src/Storages/StorageODBC.h b/dbms/src/Storages/StorageODBC.h index 5632fe38282..605d35b0202 100644 --- a/dbms/src/Storages/StorageODBC.h +++ b/dbms/src/Storages/StorageODBC.h @@ -1,17 +1,13 @@ #pragma once #include - #include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + #include +#pragma GCC diagnostic pop -namespace Poco -{ - namespace Data - { - class SessionPool; - } -} namespace DB diff --git a/dbms/src/Storages/registerStorages.cpp b/dbms/src/Storages/registerStorages.cpp index ae10de9c916..6f140d92562 100644 --- a/dbms/src/Storages/registerStorages.cpp +++ b/dbms/src/Storages/registerStorages.cpp @@ -23,7 +23,7 @@ void registerStorageJoin(StorageFactory & factory); void registerStorageView(StorageFactory & factory); void registerStorageMaterializedView(StorageFactory & factory); -#if Poco_DataODBC_FOUND +#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND void registerStorageODBC(StorageFactory & factory); #endif @@ -56,7 +56,7 @@ void registerStorages() registerStorageView(factory); registerStorageMaterializedView(factory); - #if Poco_DataODBC_FOUND + #if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND registerStorageODBC(factory); #endif diff --git a/dbms/src/TableFunctions/CMakeLists.txt b/dbms/src/TableFunctions/CMakeLists.txt index e717359090e..4708ed9b602 100644 --- a/dbms/src/TableFunctions/CMakeLists.txt +++ b/dbms/src/TableFunctions/CMakeLists.txt @@ -6,7 +6,13 @@ list(REMOVE_ITEM clickhouse_table_functions_headers ITableFunction.h TableFuncti add_library(clickhouse_table_functions ${clickhouse_table_functions_sources}) target_link_libraries(clickhouse_table_functions dbms clickhouse_storages_system ${Poco_Foundation_LIBRARY}) + +if (Poco_SQLODBC_FOUND) + target_link_libraries (clickhouse_table_functions ${Poco_SQLODBC_LIBRARY}) + target_include_directories (clickhouse_table_functions PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_SQLODBC_INCLUDE_DIRS}) +endif () + if (Poco_DataODBC_FOUND) target_link_libraries (clickhouse_table_functions ${Poco_DataODBC_LIBRARY}) - target_include_directories (clickhouse_table_functions PRIVATE ${ODBC_INCLUDE_DIRECTORIES}) + target_include_directories (clickhouse_table_functions PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_DataODBC_INCLUDE_DIRS}) endif () diff --git a/dbms/src/TableFunctions/TableFunctionODBC.cpp b/dbms/src/TableFunctions/TableFunctionODBC.cpp index b75e48de054..c9cb78479a9 100644 --- a/dbms/src/TableFunctions/TableFunctionODBC.cpp +++ b/dbms/src/TableFunctions/TableFunctionODBC.cpp @@ -1,6 +1,6 @@ #include -#if Poco_DataODBC_FOUND +#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND #include #include diff --git a/dbms/src/TableFunctions/TableFunctionODBC.h b/dbms/src/TableFunctions/TableFunctionODBC.h index b0f81749647..eb06a8c5097 100644 --- a/dbms/src/TableFunctions/TableFunctionODBC.h +++ b/dbms/src/TableFunctions/TableFunctionODBC.h @@ -1,7 +1,7 @@ #pragma once #include -#if Poco_DataODBC_FOUND +#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND #include diff --git a/dbms/src/TableFunctions/registerTableFunctions.cpp b/dbms/src/TableFunctions/registerTableFunctions.cpp index af069a5fcf6..94ac0a79fa6 100644 --- a/dbms/src/TableFunctions/registerTableFunctions.cpp +++ b/dbms/src/TableFunctions/registerTableFunctions.cpp @@ -11,7 +11,7 @@ void registerTableFunctionRemote(TableFunctionFactory & factory); void registerTableFunctionShardByHash(TableFunctionFactory & factory); void registerTableFunctionNumbers(TableFunctionFactory & factory); void registerTableFunctionCatBoostPool(TableFunctionFactory & factory); -#if Poco_DataODBC_FOUND +#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND void registerTableFunctionODBC(TableFunctionFactory & factory); #endif @@ -30,7 +30,7 @@ void registerTableFunctions() registerTableFunctionNumbers(factory); registerTableFunctionCatBoostPool(factory); -#if Poco_DataODBC_FOUND +#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND registerTableFunctionODBC(factory); #endif diff --git a/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments8.reference b/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments8.reference new file mode 100644 index 00000000000..7193c3d3f3d --- /dev/null +++ b/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments8.reference @@ -0,0 +1 @@ +Still alive diff --git a/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments9.reference b/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments9.reference new file mode 100644 index 00000000000..7193c3d3f3d --- /dev/null +++ b/dbms/tests/queries/0_stateless/00534_long_functions_bad_arguments9.reference @@ -0,0 +1 @@ +Still alive diff --git a/dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference b/dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference new file mode 100644 index 00000000000..de69348862a --- /dev/null +++ b/dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference @@ -0,0 +1,13 @@ +all +1 [1] +2 [2] +key, arrayJoin(arr) in (1, 1) +1 1 +key, arrayJoin(arr) in ((1, 1), (2, 2)) +1 1 +2 2 +(key, left array join arr) in (1, 1) +1 +(key, left array join arr) in ((1, 1), (2, 2)) +1 +2 diff --git a/dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql b/dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql new file mode 100644 index 00000000000..f6e34909cae --- /dev/null +++ b/dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql @@ -0,0 +1,16 @@ +create database if not exists test; +drop table if exists test.tab; +create table test.tab (key UInt64, arr Array(UInt64)) Engine = MergeTree order by key; +insert into test.tab values (1, [1]); +insert into test.tab values (2, [2]); +select 'all'; +select * from test.tab order by key; +select 'key, arrayJoin(arr) in (1, 1)'; +select key, arrayJoin(arr) as val from test.tab where (key, val) in (1, 1); +select 'key, arrayJoin(arr) in ((1, 1), (2, 2))'; +select key, arrayJoin(arr) as val from test.tab where (key, val) in ((1, 1), (2, 2)) order by key; +select '(key, left array join arr) in (1, 1)'; +select key from test.tab left array join arr as val where (key, val) in (1, 1); +select '(key, left array join arr) in ((1, 1), (2, 2))'; +select key from test.tab left array join arr as val where (key, val) in ((1, 1), (2, 2)) order by key; + diff --git a/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference b/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference new file mode 100644 index 00000000000..8b1acc12b63 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference @@ -0,0 +1,10 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 diff --git a/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql b/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql new file mode 100644 index 00000000000..dcd227bd2cf --- /dev/null +++ b/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql @@ -0,0 +1,2 @@ +SET max_execution_time = 1, timeout_overflow_mode = 'break'; +SELECT DISTINCT * FROM remote('127.0.0.{2,3}', system.numbers) WHERE number < 10; diff --git a/debian/changelog b/debian/changelog index 2ff28b59456..11e474be4f7 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (1.1.54371) unstable; urgency=low +clickhouse (1.1.54372) unstable; urgency=low * Modified source code - -- Fri, 23 Mar 2018 01:11:42 +0300 + -- Mon, 02 Apr 2018 22:13:54 +0300 diff --git a/debian/control b/debian/control index 46b743636a5..8f57ae258f4 100644 --- a/debian/control +++ b/debian/control @@ -50,11 +50,14 @@ Description: Server binary for clickhouse . This package provides clickhouse common configuration files -Package: clickhouse-common-dbg +Package: clickhouse-common-static-dbg Architecture: any Section: debug Priority: extra Depends: ${misc:Depends}, clickhouse-common-static (= ${binary:Version}) +Replaces: clickhouse-common-dbg +Provides: clickhouse-common-dbg +Conflicts: clickhouse-common-dbg Description: debugging symbols for clickhouse-common-static This package contains the debugging symbols for clickhouse-common. diff --git a/debian/rules b/debian/rules index fe53eac4643..fb21adf9984 100755 --- a/debian/rules +++ b/debian/rules @@ -70,7 +70,7 @@ override_dh_clean: dh_clean override_dh_strip: - dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-dbg + dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-static-dbg override_dh_install: # Making docs diff --git a/docs/en/getting_started/index.md b/docs/en/getting_started/index.md index 10ed75040d8..d3e9ea03915 100755 --- a/docs/en/getting_started/index.md +++ b/docs/en/getting_started/index.md @@ -31,7 +31,7 @@ Then run: ```bash sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 # optional sudo apt-get update -sudo apt-get install clickhouse-client clickhouse-server +sudo apt-get install clickhouse-client clickhouse-server-common ``` You can also download and install packages manually from here: diff --git a/docs/en/interfaces/third-party_client_libraries.md b/docs/en/interfaces/third-party_client_libraries.md index 8437be23b99..10ef1e62b49 100755 --- a/docs/en/interfaces/third-party_client_libraries.md +++ b/docs/en/interfaces/third-party_client_libraries.md @@ -33,6 +33,9 @@ There are libraries for working with ClickHouse for: - [ClickHouse-Net](https://github.com/killwort/ClickHouse-Net) - C++ - [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp/) +- Elixir + - [clickhousex](https://github.com/appodeal/clickhousex/) + - [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto) We have not tested these libraries. They are listed in random order. diff --git a/docs/ru/getting_started/index.md b/docs/ru/getting_started/index.md index 2198ab2bc7d..3847663b3d5 100644 --- a/docs/ru/getting_started/index.md +++ b/docs/ru/getting_started/index.md @@ -31,7 +31,7 @@ deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ ```bash sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 # optional sudo apt-get update -sudo apt-get install clickhouse-client clickhouse-server +sudo apt-get install clickhouse-client clickhouse-server-common ``` Также можно скачать и установить пакеты вручную, отсюда: . diff --git a/docs/ru/interfaces/third-party_client_libraries.md b/docs/ru/interfaces/third-party_client_libraries.md index 3a081b9b6ce..c416d9ee859 100644 --- a/docs/ru/interfaces/third-party_client_libraries.md +++ b/docs/ru/interfaces/third-party_client_libraries.md @@ -33,5 +33,8 @@ - [ClickHouse-Net](https://github.com/killwort/ClickHouse-Net) - C++ - [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp/) +- Elixir + - [clickhousex](https://github.com/appodeal/clickhousex/) + - [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto) Библиотеки не тестировались нами. Порядок перечисления произвольный. diff --git a/docs/ru/table_engines/mergetree.md b/docs/ru/table_engines/mergetree.md index 714d368909d..b11e00f83d1 100644 --- a/docs/ru/table_engines/mergetree.md +++ b/docs/ru/table_engines/mergetree.md @@ -38,7 +38,7 @@ MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID) Для столбцов также пишутся «засечки» каждую index_granularity строку, чтобы данные можно было читать в определённом диапазоне. При чтении из таблицы, запрос SELECT анализируется на предмет того, можно ли использовать индексы. -Индекс может использоваться, если в секции WHERE/PREWHERE, в качестве одного из элементов конъюнкции, или целиком, есть выражение, представляющее операции сравнения на равенства, неравенства, а также IN над столбцами, входящими в первичный ключ / дату, а также логические связки над ними. +Индекс может использоваться, если в секции WHERE/PREWHERE, в качестве одного из элементов конъюнкции, или целиком, есть выражение, представляющее операции сравнения на равенства, неравенства, а также IN или LIKE с фиксированным префиксом, над столбцами или выражениями, входящими в первичный ключ или ключ партиционирования, либо над некоторыми частистично монотонными функциями от этих столбцов, а также логические связки над такими выражениями. Таким образом, обеспечивается возможность быстро выполнять запросы по одному или многим диапазонам первичного ключа. Например, в указанном примере будут быстро работать запросы для конкретного счётчика; для конкретного счётчика и диапазона дат; для конкретного счётчика и даты, для нескольких счётчиков и диапазона дат и т. п. diff --git a/utils/zookeeper-cli/zookeeper-cli.cpp b/utils/zookeeper-cli/zookeeper-cli.cpp index 266bf5bb34d..6da1a7c02a3 100644 --- a/utils/zookeeper-cli/zookeeper-cli.cpp +++ b/utils/zookeeper-cli/zookeeper-cli.cpp @@ -206,13 +206,13 @@ int main(int argc, char ** argv) } } - catch (zkutil::KeeperException & e) + catch (const zkutil::KeeperException & e) { std::cerr << "KeeperException: " << e.displayText() << std::endl; } } } - catch (zkutil::KeeperException & e) + catch (const zkutil::KeeperException & e) { std::cerr << "KeeperException: " << e.displayText() << std::endl; return 1; diff --git a/utils/zookeeper-dump-tree/main.cpp b/utils/zookeeper-dump-tree/main.cpp index 1d614216a24..a0b0f1554ea 100644 --- a/utils/zookeeper-dump-tree/main.cpp +++ b/utils/zookeeper-dump-tree/main.cpp @@ -3,6 +3,7 @@ #include #include #include +#include int main(int argc, char ** argv) diff --git a/website/deprecated/reference_en.html b/website/deprecated/reference_en.html index e6e4dee6227..728c9622087 100644 --- a/website/deprecated/reference_en.html +++ b/website/deprecated/reference_en.html @@ -439,7 +439,7 @@ Then run: %% sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 # optional sudo apt-get update -sudo apt-get install -y clickhouse-client clickhouse-server +sudo apt-get install clickhouse-client clickhouse-server-common %% You can also download and install packages manually from here: @@ -709,7 +709,7 @@ echo 'DROP TABLE t' | POST 'http://localhost:8123/' For successful requests that don't return a data table, an empty response body is returned. -You can use compression when transmitting data. The compressed data has a non-standard format, and you will need to use a special clickhouse-compressor program to work with it (%%sudo apt-get install clickhouse-utils%%). +You can use compression when transmitting data. The compressed data has a non-standard format, and you will need to use a special compressor program to work with it (%%sudo apt-get install clickhouse-compressor%%). If you specified 'compress=1' in the URL, the server will compress the data it sends you. If you specified 'decompress=1' in the URL, the server will decompress the same data that you pass in the POST method. diff --git a/website/deprecated/reference_ru.html b/website/deprecated/reference_ru.html index 2965054a737..c7b4126a167 100644 --- a/website/deprecated/reference_ru.html +++ b/website/deprecated/reference_ru.html @@ -449,7 +449,7 @@ deb http://repo.yandex.ru/clickhouse/trusty stable main %% sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 # optional sudo apt-get update -sudo apt-get install -y clickhouse-client clickhouse-server +sudo apt-get install clickhouse-client clickhouse-server-common %% Также можно скачать и установить пакеты вручную, отсюда: @@ -725,7 +725,7 @@ echo 'DROP TABLE t' | POST 'http://localhost:8123/' Для запросов, которые не возвращают таблицу с данными, в случае успеха, выдаётся пустое тело ответа. -Вы можете использовать сжатие при передаче данных. Формат сжатых данных нестандартный, и вам придётся использовать для работы с ним специальную программу clickhouse-compressor (%%sudo apt-get install clickhouse-utils%%). +Вы можете использовать сжатие при передаче данных. Формат сжатых данных нестандартный, и вам придётся использовать для работы с ним специальную программу compressor (%%sudo apt-get install clickhouse-compressor%%). Если вы указали в URL compress=1, то сервер будет сжимать отправляемые вам данные. Если вы указали в URL decompress=1, то сервер будет разжимать те данные, которые вы передаёте ему POST-ом. diff --git a/website/index.html b/website/index.html index 78b89d3b07b..e315b78199d 100644 --- a/website/index.html +++ b/website/index.html @@ -393,7 +393,7 @@ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 # optional sudo apt-add-repository "deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" sudo apt-get update -sudo apt-get install -y clickhouse-server clickhouse-client +sudo apt-get install clickhouse-server-common clickhouse-client -y sudo service clickhouse-server start clickhouse-client diff --git a/website/tutorial.html b/website/tutorial.html index 558d9a0d0fe..0472bef268d 100644 --- a/website/tutorial.html +++ b/website/tutorial.html @@ -51,7 +51,7 @@

clickhouse-client package contains clickhouse-client application — - interactive ClickHouse client. clickhouse-common contains a clickhouse-server binary file. clickhouse-server + interactive ClickHouse client. clickhouse-server-base contains a clickhouse-server binary file. clickhouse-server-common — contains config files for the clickhouse-server.

Server config files are located in /etc/clickhouse-server/. Before getting to work please notice the path