Merge remote-tracking branch 'upstream/master' into fix3

This commit is contained in:
proller 2018-04-03 12:50:35 +03:00
commit 9fa97cf19b
17 changed files with 58 additions and 226 deletions

View File

@ -1,202 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cmake_minimum_required(VERSION 3.0)
# Modified version of CMakeLists.txt for ClickHouse. Doesn't link the library to libm.
# Otherwise we have extra dependency when compiling with the most fresh libc version.
# How to check:
# readelf -s ./clickhouse | grep -F 2.23
SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/zookeeper/src/c)
project(zookeeper VERSION 3.5.3)
set(email user@zookeeper.apache.org)
set(description "zookeeper C client")
# general options
if(UNIX)
add_compile_options(-Wall -fPIC)
elseif(WIN32)
add_compile_options(/W3)
endif()
add_definitions(-DUSE_STATIC_LIB)
# TODO: Enable /WX and /W4 on Windows. Currently there are ~1000 warnings.
# TODO: Add Solaris support.
# TODO: Add a shared library option.
# TODO: Specify symbols to export.
# TODO: Generate doxygen documentation.
# Sync API option
option(WANT_SYNCAPI "Enables Sync API support" ON)
if(WANT_SYNCAPI)
add_definitions(-DTHREADED)
if(WIN32)
# Note that the generator expression ensures that `/MTd` is used when Debug
# configurations are built.
add_compile_options(/MT$<$<CONFIG:Debug>:d>)
endif()
endif()
# CppUnit option
if(WIN32 OR APPLE)
# The tests do not yet compile on Windows or macOS,
# so we set this to off by default.
#
# Note that CMake does not have expressions except in conditionals,
# so we're left with this if/else/endif pattern.
set(DEFAULT_WANT_CPPUNIT OFF)
else()
set(DEFAULT_WANT_CPPUNIT ON)
endif()
option(WANT_CPPUNIT "Enables CppUnit and tests" ${DEFAULT_WANT_CPPUNIT})
# The function `to_have(in out)` converts a header name like `arpa/inet.h`
# into an Autotools style preprocessor definition `HAVE_ARPA_INET_H`.
# This is then set or unset in `configure_file()` step.
#
# Note that CMake functions do not have return values; instead an "out"
# variable must be passed, and explicitly set with parent scope.
function(to_have in out)
string(TOUPPER ${in} str)
string(REGEX REPLACE "/|\\." "_" str ${str})
set(${out} "HAVE_${str}" PARENT_SCOPE)
endfunction()
# include file checks
foreach(f generated/zookeeper.jute.h generated/zookeeper.jute.c)
if(EXISTS "${LIBRARY_DIR}/${f}")
to_have(${f} name)
set(${name} 1)
else()
message(FATAL_ERROR
"jute files are missing!\n"
"Please run 'ant compile_jute' while in the ZooKeeper top level directory.")
endif()
endforeach()
# header checks
include(CheckIncludeFile)
set(check_headers
arpa/inet.h
dlfcn.h
fcntl.h
inttypes.h
memory.h
netdb.h
netinet/in.h
stdint.h
stdlib.h
string.h
strings.h
sys/socket.h
sys/stat.h
sys/time.h
sys/types.h
unistd.h
sys/utsname.h)
foreach(f ${check_headers})
to_have(${f} name)
check_include_file(${f} ${name})
endforeach()
# function checks
include(CheckFunctionExists)
set(check_functions
getcwd
gethostbyname
gethostname
getlogin
getpwuid_r
gettimeofday
getuid
memmove
memset
poll
socket
strchr
strdup
strerror
strtol)
foreach(fn ${check_functions})
to_have(${fn} name)
check_function_exists(${fn} ${name})
endforeach()
# library checks
set(check_libraries rt m pthread)
foreach(lib ${check_libraries})
to_have("lib${lib}" name)
find_library(${name} ${lib})
endforeach()
# IPv6 check
include(CheckStructHasMember)
check_struct_has_member("struct sockaddr_in6" sin6_addr "netinet/in.h" ZOO_IPV6_ENABLED)
# configure
configure_file(${LIBRARY_DIR}/cmake_config.h.in ${CMAKE_CURRENT_BINARY_DIR}/include/config.h)
# hashtable library
set(hashtable_sources ${LIBRARY_DIR}/src/hashtable/hashtable_itr.c ${LIBRARY_DIR}/src/hashtable/hashtable.c)
add_library(hashtable STATIC ${hashtable_sources})
# zookeeper library
set(zookeeper_sources
${LIBRARY_DIR}/src/zookeeper.c
${LIBRARY_DIR}/src/recordio.c
${LIBRARY_DIR}/generated/zookeeper.jute.c
${LIBRARY_DIR}/src/zk_log.c
${LIBRARY_DIR}/src/zk_hashtable.c
)
# src/addrvec.c
if(WANT_SYNCAPI)
list(APPEND zookeeper_sources ${LIBRARY_DIR}/src/mt_adaptor.c)
else()
list(APPEND zookeeper_sources ${LIBRARY_DIR}/src/st_adaptor.c)
endif()
if(WIN32)
list(APPEND zookeeper_sources ${LIBRARY_DIR}/src/winport.c)
endif()
add_library(zookeeper STATIC ${zookeeper_sources})
target_include_directories(zookeeper BEFORE PUBLIC ${LIBRARY_DIR}/include ${CMAKE_CURRENT_BINARY_DIR}/include ${LIBRARY_DIR}/generated)
target_link_libraries(zookeeper PUBLIC
hashtable
$<$<PLATFORM_ID:Linux>:rt> # clock_gettime
$<$<PLATFORM_ID:Windows>:ws2_32>) # Winsock 2.0
if(WANT_SYNCAPI AND NOT WIN32)
find_package(Threads REQUIRED)
target_link_libraries(zookeeper PUBLIC Threads::Threads)
endif()
# cli executable
add_executable(cli ${LIBRARY_DIR}/src/cli.c)
target_link_libraries(cli zookeeper)
target_link_libraries(cli $<$<PLATFORM_ID:Linux>:m>)
# load_gen executable
if(WANT_SYNCAPI AND NOT WIN32)
add_executable(load_gen ${LIBRARY_DIR}/src/load_gen.c)
target_link_libraries(load_gen zookeeper)
target_link_libraries(load_gen $<$<PLATFORM_ID:Linux>:m>)
endif()

View File

@ -1,6 +1,6 @@
# This strings autochanged from release_lib.sh: # This strings autochanged from release_lib.sh:
set(VERSION_DESCRIBE v1.1.54371-testing) set(VERSION_DESCRIBE v1.1.54372-testing)
set(VERSION_REVISION 54371) set(VERSION_REVISION 54372)
# end of autochange # end of autochange
set (VERSION_MAJOR 1) set (VERSION_MAJOR 1)

View File

@ -280,8 +280,7 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead
read_list.push_back(*connection->socket); read_list.push_back(*connection->socket);
} }
/// If no data was found, then we check if there are any connections /// If no data was found, then we check if there are any connections ready for reading.
/// ready for reading.
if (read_list.empty()) if (read_list.empty())
{ {
Poco::Net::Socket::SocketList write_list; Poco::Net::Socket::SocketList write_list;
@ -300,6 +299,9 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead
throw Exception("Timeout exceeded while reading from " + dumpAddressesUnlocked(), ErrorCodes::TIMEOUT_EXCEEDED); throw Exception("Timeout exceeded while reading from " + dumpAddressesUnlocked(), ErrorCodes::TIMEOUT_EXCEEDED);
} }
/// TODO Absolutely wrong code: read_list could be empty; rand() is not thread safe and has low quality; motivation of rand is unclear.
/// This code path is disabled by default.
auto & socket = read_list[rand() % read_list.size()]; auto & socket = read_list[rand() % read_list.size()];
if (fd_to_replica_state_idx.empty()) if (fd_to_replica_state_idx.empty())
{ {

View File

@ -195,6 +195,9 @@ int32_t ZooKeeper::getChildrenImpl(const std::string & path, Strings & res,
impl->list(path, callback, watch_callback); impl->list(path, callback, watch_callback);
event.wait(); event.wait();
ProfileEvents::increment(ProfileEvents::ZooKeeperGetChildren);
ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions);
return code; return code;
} }

View File

@ -280,7 +280,7 @@ public:
{ {
zookeeper.tryRemove(path); zookeeper.tryRemove(path);
} }
catch (const KeeperException & e) catch (...)
{ {
ProfileEvents::increment(ProfileEvents::CannotRemoveEphemeralNode); ProfileEvents::increment(ProfileEvents::CannotRemoveEphemeralNode);
DB::tryLogCurrentException(__PRETTY_FUNCTION__); DB::tryLogCurrentException(__PRETTY_FUNCTION__);

View File

@ -737,7 +737,10 @@ void addRootPath(String & path, const String & root_path)
if (root_path.empty()) if (root_path.empty())
return; return;
path = root_path + path; if (path.size() == 1) /// "/"
path = root_path;
else
path = root_path + path;
} }
void removeRootPath(String & path, const String & root_path) void removeRootPath(String & path, const String & root_path)

View File

@ -42,7 +42,7 @@ Block IProfilingBlockInputStream::read()
if (isCancelledOrThrowIfKilled()) if (isCancelledOrThrowIfKilled())
return res; return res;
if (!checkTimeLimits()) if (!checkTimeLimit())
limit_exceeded_need_break = true; limit_exceeded_need_break = true;
if (!limit_exceeded_need_break) if (!limit_exceeded_need_break)
@ -191,7 +191,7 @@ static bool handleOverflowMode(OverflowMode mode, const String & message, int co
}; };
bool IProfilingBlockInputStream::checkTimeLimits() bool IProfilingBlockInputStream::checkTimeLimit()
{ {
if (limits.max_execution_time != 0 if (limits.max_execution_time != 0
&& info.total_stopwatch.elapsed() > static_cast<UInt64>(limits.max_execution_time.totalMicroseconds()) * 1000) && info.total_stopwatch.elapsed() > static_cast<UInt64>(limits.max_execution_time.totalMicroseconds()) * 1000)

View File

@ -234,10 +234,10 @@ private:
void updateExtremes(Block & block); void updateExtremes(Block & block);
/** Check constraints and quotas. /** Check limits and quotas.
* But only those that can be tested within each separate source. * But only those that can be checked within each separate stream.
*/ */
bool checkTimeLimits(); bool checkTimeLimit();
void checkQuota(Block & block); void checkQuota(Block & block);

View File

@ -687,19 +687,26 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns(Pipeline
stream->addTableLock(table_lock); stream->addTableLock(table_lock);
}); });
/** Set the limits and quota for reading data, the speed and time of the query. /// Set the limits and quota for reading data, the speed and time of the query.
* Such restrictions are checked on the initiating server of the request, and not on remote servers.
* Because the initiating server has a summary of the execution of the request on all servers.
*/
if (to_stage == QueryProcessingStage::Complete)
{ {
IProfilingBlockInputStream::LocalLimits limits; IProfilingBlockInputStream::LocalLimits limits;
limits.mode = IProfilingBlockInputStream::LIMITS_TOTAL; limits.mode = IProfilingBlockInputStream::LIMITS_TOTAL;
limits.size_limits = SizeLimits(settings.max_rows_to_read, settings.max_bytes_to_read, settings.read_overflow_mode); limits.size_limits = SizeLimits(settings.max_rows_to_read, settings.max_bytes_to_read, settings.read_overflow_mode);
limits.max_execution_time = settings.max_execution_time; limits.max_execution_time = settings.max_execution_time;
limits.timeout_overflow_mode = settings.timeout_overflow_mode; limits.timeout_overflow_mode = settings.timeout_overflow_mode;
limits.min_execution_speed = settings.min_execution_speed;
limits.timeout_before_checking_execution_speed = settings.timeout_before_checking_execution_speed; /** Quota and minimal speed restrictions are checked on the initiating server of the request, and not on remote servers,
* because the initiating server has a summary of the execution of the request on all servers.
*
* But limits on data size to read and maximum execution time are reasonable to check both on initiator and
* additionally on each remote server, because these limits are checked per block of data processed,
* and remote servers may process way more blocks of data than are received by initiator.
*/
if (to_stage == QueryProcessingStage::Complete)
{
limits.min_execution_speed = settings.min_execution_speed;
limits.timeout_before_checking_execution_speed = settings.timeout_before_checking_execution_speed;
}
QuotaForIntervals & quota = context.getQuota(); QuotaForIntervals & quota = context.getQuota();
@ -708,7 +715,9 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns(Pipeline
if (IProfilingBlockInputStream * p_stream = dynamic_cast<IProfilingBlockInputStream *>(stream.get())) if (IProfilingBlockInputStream * p_stream = dynamic_cast<IProfilingBlockInputStream *>(stream.get()))
{ {
p_stream->setLimits(limits); p_stream->setLimits(limits);
p_stream->setQuota(quota);
if (to_stage == QueryProcessingStage::Complete)
p_stream->setQuota(quota);
} }
}); });
} }

View File

@ -0,0 +1 @@
Still alive

View File

@ -0,0 +1 @@
Still alive

View File

@ -0,0 +1,10 @@
0
1
2
3
4
5
6
7
8
9

View File

@ -0,0 +1,2 @@
SET max_execution_time = 1, timeout_overflow_mode = 'break';
SELECT DISTINCT * FROM remote('127.0.0.{2,3}', system.numbers) WHERE number < 10;

4
debian/changelog vendored
View File

@ -1,5 +1,5 @@
clickhouse (1.1.54371) unstable; urgency=low clickhouse (1.1.54372) unstable; urgency=low
* Modified source code * Modified source code
-- <robot-metrika-test@yandex-team.ru> Fri, 23 Mar 2018 01:11:42 +0300 -- <robot-metrika-test@yandex-team.ru> Mon, 02 Apr 2018 22:13:54 +0300

5
debian/control vendored
View File

@ -50,11 +50,14 @@ Description: Server binary for clickhouse
. .
This package provides clickhouse common configuration files This package provides clickhouse common configuration files
Package: clickhouse-common-dbg Package: clickhouse-common-static-dbg
Architecture: any Architecture: any
Section: debug Section: debug
Priority: extra Priority: extra
Depends: ${misc:Depends}, clickhouse-common-static (= ${binary:Version}) Depends: ${misc:Depends}, clickhouse-common-static (= ${binary:Version})
Replaces: clickhouse-common-dbg
Provides: clickhouse-common-dbg
Conflicts: clickhouse-common-dbg
Description: debugging symbols for clickhouse-common-static Description: debugging symbols for clickhouse-common-static
This package contains the debugging symbols for clickhouse-common. This package contains the debugging symbols for clickhouse-common.

2
debian/rules vendored
View File

@ -70,7 +70,7 @@ override_dh_clean:
dh_clean dh_clean
override_dh_strip: override_dh_strip:
dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-dbg dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-static-dbg
override_dh_install: override_dh_install:
# Making docs # Making docs

View File

@ -38,7 +38,7 @@ MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)
Для столбцов также пишутся «засечки» каждую index_granularity строку, чтобы данные можно было читать в определённом диапазоне. Для столбцов также пишутся «засечки» каждую index_granularity строку, чтобы данные можно было читать в определённом диапазоне.
При чтении из таблицы, запрос SELECT анализируется на предмет того, можно ли использовать индексы. При чтении из таблицы, запрос SELECT анализируется на предмет того, можно ли использовать индексы.
Индекс может использоваться, если в секции WHERE/PREWHERE, в качестве одного из элементов конъюнкции, или целиком, есть выражение, представляющее операции сравнения на равенства, неравенства, а также IN над столбцами, входящими в первичный ключ / дату, а также логические связки над ними. Индекс может использоваться, если в секции WHERE/PREWHERE, в качестве одного из элементов конъюнкции, или целиком, есть выражение, представляющее операции сравнения на равенства, неравенства, а также IN или LIKE с фиксированным префиксом, над столбцами или выражениями, входящими в первичный ключ или ключ партиционирования, либо над некоторыми частистично монотонными функциями от этих столбцов, а также логические связки над такими выражениями.
Таким образом, обеспечивается возможность быстро выполнять запросы по одному или многим диапазонам первичного ключа. Например, в указанном примере будут быстро работать запросы для конкретного счётчика; для конкретного счётчика и диапазона дат; для конкретного счётчика и даты, для нескольких счётчиков и диапазона дат и т. п. Таким образом, обеспечивается возможность быстро выполнять запросы по одному или многим диапазонам первичного ключа. Например, в указанном примере будут быстро работать запросы для конкретного счётчика; для конкретного счётчика и диапазона дат; для конкретного счётчика и даты, для нескольких счётчиков и диапазона дат и т. п.