mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-28 02:21:59 +00:00
Merge branch 'master' of https://github.com/ClickHouse/ClickHouse
This commit is contained in:
commit
092f839aaa
6
.gitignore
vendored
6
.gitignore
vendored
@ -13,12 +13,14 @@
|
|||||||
/build_*
|
/build_*
|
||||||
/build-*
|
/build-*
|
||||||
/docs/build
|
/docs/build
|
||||||
|
/docs/publish
|
||||||
/docs/edit
|
/docs/edit
|
||||||
/docs/tools/venv/
|
/docs/tools/venv/
|
||||||
/docs/en/development/build/
|
|
||||||
/docs/ru/development/build/
|
|
||||||
/docs/en/single.md
|
/docs/en/single.md
|
||||||
/docs/ru/single.md
|
/docs/ru/single.md
|
||||||
|
/docs/zh/single.md
|
||||||
|
/docs/ja/single.md
|
||||||
|
/docs/fa/single.md
|
||||||
|
|
||||||
# callgrind files
|
# callgrind files
|
||||||
callgrind.out.*
|
callgrind.out.*
|
||||||
|
@ -15,3 +15,4 @@ ClickHouse is an open-source column-oriented database management system that all
|
|||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
* [ClickHouse Meetup in San Francisco](https://www.eventbrite.com/e/clickhouse-february-meetup-registration-88496227599) on February 5.
|
* [ClickHouse Meetup in San Francisco](https://www.eventbrite.com/e/clickhouse-february-meetup-registration-88496227599) on February 5.
|
||||||
|
* [ClickHouse Meetup in New York](https://www.meetup.com/Uber-Engineering-Events-New-York/events/268328663/) on February 11.
|
||||||
|
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -306,7 +306,7 @@ if (USE_INTERNAL_AWS_S3_LIBRARY)
|
|||||||
# The library is large - avoid bloat.
|
# The library is large - avoid bloat.
|
||||||
target_compile_options (aws_s3 PRIVATE -g0)
|
target_compile_options (aws_s3 PRIVATE -g0)
|
||||||
target_compile_options (aws_s3_checksums PRIVATE -g0)
|
target_compile_options (aws_s3_checksums PRIVATE -g0)
|
||||||
target_compile_options (libcurl PRIVATE -g0)
|
target_compile_options (curl PRIVATE -g0)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (USE_BASE64)
|
if (USE_BASE64)
|
||||||
|
@ -102,4 +102,4 @@ if (OPENSSL_FOUND)
|
|||||||
target_link_libraries(aws_s3 PRIVATE ${OPENSSL_LIBRARIES})
|
target_link_libraries(aws_s3 PRIVATE ${OPENSSL_LIBRARIES})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
target_link_libraries(aws_s3 PRIVATE aws_s3_checksums libcurl)
|
target_link_libraries(aws_s3 PRIVATE aws_s3_checksums curl)
|
||||||
|
@ -1,61 +0,0 @@
|
|||||||
include(CheckCSourceCompiles)
|
|
||||||
|
|
||||||
option(CURL_HIDDEN_SYMBOLS "Set to ON to hide libcurl internal symbols (=hide all symbols that aren't officially external)." ON)
|
|
||||||
mark_as_advanced(CURL_HIDDEN_SYMBOLS)
|
|
||||||
|
|
||||||
if(CURL_HIDDEN_SYMBOLS)
|
|
||||||
set(SUPPORTS_SYMBOL_HIDING FALSE)
|
|
||||||
|
|
||||||
if(CMAKE_C_COMPILER_ID MATCHES "Clang")
|
|
||||||
set(SUPPORTS_SYMBOL_HIDING TRUE)
|
|
||||||
set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))")
|
|
||||||
set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden")
|
|
||||||
elseif(CMAKE_COMPILER_IS_GNUCC)
|
|
||||||
if(NOT CMAKE_VERSION VERSION_LESS 2.8.10)
|
|
||||||
set(GCC_VERSION ${CMAKE_C_COMPILER_VERSION})
|
|
||||||
else()
|
|
||||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion
|
|
||||||
OUTPUT_VARIABLE GCC_VERSION)
|
|
||||||
endif()
|
|
||||||
if(NOT GCC_VERSION VERSION_LESS 3.4)
|
|
||||||
# note: this is considered buggy prior to 4.0 but the autotools don't care, so let's ignore that fact
|
|
||||||
set(SUPPORTS_SYMBOL_HIDING TRUE)
|
|
||||||
set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))")
|
|
||||||
set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden")
|
|
||||||
endif()
|
|
||||||
elseif(CMAKE_C_COMPILER_ID MATCHES "SunPro" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 8.0)
|
|
||||||
set(SUPPORTS_SYMBOL_HIDING TRUE)
|
|
||||||
set(_SYMBOL_EXTERN "__global")
|
|
||||||
set(_CFLAG_SYMBOLS_HIDE "-xldscope=hidden")
|
|
||||||
elseif(CMAKE_C_COMPILER_ID MATCHES "Intel" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 9.0)
|
|
||||||
# note: this should probably just check for version 9.1.045 but I'm not 100% sure
|
|
||||||
# so let's do it the same way autotools do.
|
|
||||||
set(SUPPORTS_SYMBOL_HIDING TRUE)
|
|
||||||
set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))")
|
|
||||||
set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden")
|
|
||||||
check_c_source_compiles("#include <stdio.h>
|
|
||||||
int main (void) { printf(\"icc fvisibility bug test\"); return 0; }" _no_bug)
|
|
||||||
if(NOT _no_bug)
|
|
||||||
set(SUPPORTS_SYMBOL_HIDING FALSE)
|
|
||||||
set(_SYMBOL_EXTERN "")
|
|
||||||
set(_CFLAG_SYMBOLS_HIDE "")
|
|
||||||
endif()
|
|
||||||
elseif(MSVC)
|
|
||||||
set(SUPPORTS_SYMBOL_HIDING TRUE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(HIDES_CURL_PRIVATE_SYMBOLS ${SUPPORTS_SYMBOL_HIDING})
|
|
||||||
elseif(MSVC)
|
|
||||||
if(NOT CMAKE_VERSION VERSION_LESS 3.7)
|
|
||||||
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE) #present since 3.4.3 but broken
|
|
||||||
set(HIDES_CURL_PRIVATE_SYMBOLS FALSE)
|
|
||||||
else()
|
|
||||||
message(WARNING "Hiding private symbols regardless CURL_HIDDEN_SYMBOLS being disabled.")
|
|
||||||
set(HIDES_CURL_PRIVATE_SYMBOLS TRUE)
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
set(HIDES_CURL_PRIVATE_SYMBOLS FALSE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(CURL_CFLAG_SYMBOLS_HIDE ${_CFLAG_SYMBOLS_HIDE})
|
|
||||||
set(CURL_EXTERN_SYMBOL ${_SYMBOL_EXTERN})
|
|
@ -1,617 +0,0 @@
|
|||||||
/***************************************************************************
|
|
||||||
* _ _ ____ _
|
|
||||||
* Project ___| | | | _ \| |
|
|
||||||
* / __| | | | |_) | |
|
|
||||||
* | (__| |_| | _ <| |___
|
|
||||||
* \___|\___/|_| \_\_____|
|
|
||||||
*
|
|
||||||
* Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
|
|
||||||
*
|
|
||||||
* This software is licensed as described in the file COPYING, which
|
|
||||||
* you should have received as part of this distribution. The terms
|
|
||||||
* are also available at https://curl.haxx.se/docs/copyright.html.
|
|
||||||
*
|
|
||||||
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
|
||||||
* copies of the Software, and permit persons to whom the Software is
|
|
||||||
* furnished to do so, under the terms of the COPYING file.
|
|
||||||
*
|
|
||||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
||||||
* KIND, either express or implied.
|
|
||||||
*
|
|
||||||
***************************************************************************/
|
|
||||||
#ifdef TIME_WITH_SYS_TIME
|
|
||||||
/* Time with sys/time test */
|
|
||||||
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <time.h>
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
if ((struct tm *) 0)
|
|
||||||
return 0;
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef HAVE_FCNTL_O_NONBLOCK
|
|
||||||
|
|
||||||
/* headers for FCNTL_O_NONBLOCK test */
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
/* */
|
|
||||||
#if defined(sun) || defined(__sun__) || \
|
|
||||||
defined(__SUNPRO_C) || defined(__SUNPRO_CC)
|
|
||||||
# if defined(__SVR4) || defined(__srv4__)
|
|
||||||
# define PLATFORM_SOLARIS
|
|
||||||
# else
|
|
||||||
# define PLATFORM_SUNOS4
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
#if (defined(_AIX) || defined(__xlC__)) && !defined(_AIX41)
|
|
||||||
# define PLATFORM_AIX_V3
|
|
||||||
#endif
|
|
||||||
/* */
|
|
||||||
#if defined(PLATFORM_SUNOS4) || defined(PLATFORM_AIX_V3) || defined(__BEOS__)
|
|
||||||
#error "O_NONBLOCK does not work on this platform"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
/* O_NONBLOCK source test */
|
|
||||||
int flags = 0;
|
|
||||||
if(0 != fcntl(0, F_SETFL, flags | O_NONBLOCK))
|
|
||||||
return 1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* tests for gethostbyaddr_r or gethostbyname_r */
|
|
||||||
#if defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT)
|
|
||||||
# define _REENTRANT
|
|
||||||
/* no idea whether _REENTRANT is always set, just invent a new flag */
|
|
||||||
# define TEST_GETHOSTBYFOO_REENTRANT
|
|
||||||
#endif
|
|
||||||
#if defined(HAVE_GETHOSTBYADDR_R_5) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_7) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_8) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_3) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_5) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_6) || \
|
|
||||||
defined(TEST_GETHOSTBYFOO_REENTRANT)
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <netdb.h>
|
|
||||||
int main(void)
|
|
||||||
{
|
|
||||||
char *address = "example.com";
|
|
||||||
int length = 0;
|
|
||||||
int type = 0;
|
|
||||||
struct hostent h;
|
|
||||||
int rc = 0;
|
|
||||||
#if defined(HAVE_GETHOSTBYADDR_R_5) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT) || \
|
|
||||||
\
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_3) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT)
|
|
||||||
struct hostent_data hdata;
|
|
||||||
#elif defined(HAVE_GETHOSTBYADDR_R_7) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_8) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT) || \
|
|
||||||
\
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_5) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_6) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT)
|
|
||||||
char buffer[8192];
|
|
||||||
int h_errnop;
|
|
||||||
struct hostent *hp;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef gethostbyaddr_r
|
|
||||||
(void)gethostbyaddr_r;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(HAVE_GETHOSTBYADDR_R_5) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT)
|
|
||||||
rc = gethostbyaddr_r(address, length, type, &h, &hdata);
|
|
||||||
(void)rc;
|
|
||||||
#elif defined(HAVE_GETHOSTBYADDR_R_7) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT)
|
|
||||||
hp = gethostbyaddr_r(address, length, type, &h, buffer, 8192, &h_errnop);
|
|
||||||
(void)hp;
|
|
||||||
#elif defined(HAVE_GETHOSTBYADDR_R_8) || \
|
|
||||||
defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT)
|
|
||||||
rc = gethostbyaddr_r(address, length, type, &h, buffer, 8192, &hp, &h_errnop);
|
|
||||||
(void)rc;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(HAVE_GETHOSTBYNAME_R_3) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT)
|
|
||||||
rc = gethostbyname_r(address, &h, &hdata);
|
|
||||||
#elif defined(HAVE_GETHOSTBYNAME_R_5) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT)
|
|
||||||
rc = gethostbyname_r(address, &h, buffer, 8192, &h_errnop);
|
|
||||||
(void)hp; /* not used for test */
|
|
||||||
#elif defined(HAVE_GETHOSTBYNAME_R_6) || \
|
|
||||||
defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT)
|
|
||||||
rc = gethostbyname_r(address, &h, buffer, 8192, &hp, &h_errnop);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
(void)length;
|
|
||||||
(void)type;
|
|
||||||
(void)rc;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef HAVE_SOCKLEN_T
|
|
||||||
#ifdef _WIN32
|
|
||||||
#include <ws2tcpip.h>
|
|
||||||
#else
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/socket.h>
|
|
||||||
#endif
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
if ((socklen_t *) 0)
|
|
||||||
return 0;
|
|
||||||
if (sizeof (socklen_t))
|
|
||||||
return 0;
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_IN_ADDR_T
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/socket.h>
|
|
||||||
#include <arpa/inet.h>
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
if ((in_addr_t *) 0)
|
|
||||||
return 0;
|
|
||||||
if (sizeof (in_addr_t))
|
|
||||||
return 0;
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef HAVE_BOOL_T
|
|
||||||
#ifdef HAVE_SYS_TYPES_H
|
|
||||||
#include <sys/types.h>
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_STDBOOL_H
|
|
||||||
#include <stdbool.h>
|
|
||||||
#endif
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
if (sizeof (bool *) )
|
|
||||||
return 0;
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef STDC_HEADERS
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdarg.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <float.h>
|
|
||||||
int main() { return 0; }
|
|
||||||
#endif
|
|
||||||
#ifdef RETSIGTYPE_TEST
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#ifdef signal
|
|
||||||
# undef signal
|
|
||||||
#endif
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" void (*signal (int, void (*)(int)))(int);
|
|
||||||
#else
|
|
||||||
void (*signal ()) ();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_INET_NTOA_R_DECL
|
|
||||||
#include <arpa/inet.h>
|
|
||||||
|
|
||||||
typedef void (*func_type)();
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
#ifndef inet_ntoa_r
|
|
||||||
func_type func;
|
|
||||||
func = (func_type)inet_ntoa_r;
|
|
||||||
(void)func;
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_INET_NTOA_R_DECL_REENTRANT
|
|
||||||
#define _REENTRANT
|
|
||||||
#include <arpa/inet.h>
|
|
||||||
|
|
||||||
typedef void (*func_type)();
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
#ifndef inet_ntoa_r
|
|
||||||
func_type func;
|
|
||||||
func = (func_type)&inet_ntoa_r;
|
|
||||||
(void)func;
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_GETADDRINFO
|
|
||||||
#include <netdb.h>
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/socket.h>
|
|
||||||
|
|
||||||
int main(void) {
|
|
||||||
struct addrinfo hints, *ai;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
memset(&hints, 0, sizeof(hints));
|
|
||||||
hints.ai_family = AF_UNSPEC;
|
|
||||||
hints.ai_socktype = SOCK_STREAM;
|
|
||||||
#ifndef getaddrinfo
|
|
||||||
(void)getaddrinfo;
|
|
||||||
#endif
|
|
||||||
error = getaddrinfo("127.0.0.1", "8080", &hints, &ai);
|
|
||||||
if (error) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_FILE_OFFSET_BITS
|
|
||||||
#ifdef _FILE_OFFSET_BITS
|
|
||||||
#undef _FILE_OFFSET_BITS
|
|
||||||
#endif
|
|
||||||
#define _FILE_OFFSET_BITS 64
|
|
||||||
#include <sys/types.h>
|
|
||||||
/* Check that off_t can represent 2**63 - 1 correctly.
|
|
||||||
We can't simply define LARGE_OFF_T to be 9223372036854775807,
|
|
||||||
since some C++ compilers masquerading as C compilers
|
|
||||||
incorrectly reject 9223372036854775807. */
|
|
||||||
#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
|
|
||||||
int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
|
|
||||||
&& LARGE_OFF_T % 2147483647 == 1)
|
|
||||||
? 1 : -1];
|
|
||||||
int main () { ; return 0; }
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_IOCTLSOCKET
|
|
||||||
/* includes start */
|
|
||||||
#ifdef HAVE_WINDOWS_H
|
|
||||||
# ifndef WIN32_LEAN_AND_MEAN
|
|
||||||
# define WIN32_LEAN_AND_MEAN
|
|
||||||
# endif
|
|
||||||
# include <windows.h>
|
|
||||||
# ifdef HAVE_WINSOCK2_H
|
|
||||||
# include <winsock2.h>
|
|
||||||
# else
|
|
||||||
# ifdef HAVE_WINSOCK_H
|
|
||||||
# include <winsock.h>
|
|
||||||
# endif
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
|
|
||||||
/* ioctlsocket source code */
|
|
||||||
int socket;
|
|
||||||
unsigned long flags = ioctlsocket(socket, FIONBIO, &flags);
|
|
||||||
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_IOCTLSOCKET_CAMEL
|
|
||||||
/* includes start */
|
|
||||||
#ifdef HAVE_WINDOWS_H
|
|
||||||
# ifndef WIN32_LEAN_AND_MEAN
|
|
||||||
# define WIN32_LEAN_AND_MEAN
|
|
||||||
# endif
|
|
||||||
# include <windows.h>
|
|
||||||
# ifdef HAVE_WINSOCK2_H
|
|
||||||
# include <winsock2.h>
|
|
||||||
# else
|
|
||||||
# ifdef HAVE_WINSOCK_H
|
|
||||||
# include <winsock.h>
|
|
||||||
# endif
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
|
|
||||||
/* IoctlSocket source code */
|
|
||||||
if(0 != IoctlSocket(0, 0, 0))
|
|
||||||
return 1;
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_IOCTLSOCKET_CAMEL_FIONBIO
|
|
||||||
/* includes start */
|
|
||||||
#ifdef HAVE_WINDOWS_H
|
|
||||||
# ifndef WIN32_LEAN_AND_MEAN
|
|
||||||
# define WIN32_LEAN_AND_MEAN
|
|
||||||
# endif
|
|
||||||
# include <windows.h>
|
|
||||||
# ifdef HAVE_WINSOCK2_H
|
|
||||||
# include <winsock2.h>
|
|
||||||
# else
|
|
||||||
# ifdef HAVE_WINSOCK_H
|
|
||||||
# include <winsock.h>
|
|
||||||
# endif
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
|
|
||||||
/* IoctlSocket source code */
|
|
||||||
long flags = 0;
|
|
||||||
if(0 != ioctlsocket(0, FIONBIO, &flags))
|
|
||||||
return 1;
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_IOCTLSOCKET_FIONBIO
|
|
||||||
/* includes start */
|
|
||||||
#ifdef HAVE_WINDOWS_H
|
|
||||||
# ifndef WIN32_LEAN_AND_MEAN
|
|
||||||
# define WIN32_LEAN_AND_MEAN
|
|
||||||
# endif
|
|
||||||
# include <windows.h>
|
|
||||||
# ifdef HAVE_WINSOCK2_H
|
|
||||||
# include <winsock2.h>
|
|
||||||
# else
|
|
||||||
# ifdef HAVE_WINSOCK_H
|
|
||||||
# include <winsock.h>
|
|
||||||
# endif
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
|
|
||||||
int flags = 0;
|
|
||||||
if(0 != ioctlsocket(0, FIONBIO, &flags))
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_IOCTL_FIONBIO
|
|
||||||
/* headers for FIONBIO test */
|
|
||||||
/* includes start */
|
|
||||||
#ifdef HAVE_SYS_TYPES_H
|
|
||||||
# include <sys/types.h>
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_UNISTD_H
|
|
||||||
# include <unistd.h>
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_SYS_SOCKET_H
|
|
||||||
# include <sys/socket.h>
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_SYS_IOCTL_H
|
|
||||||
# include <sys/ioctl.h>
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_STROPTS_H
|
|
||||||
# include <stropts.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
|
|
||||||
int flags = 0;
|
|
||||||
if(0 != ioctl(0, FIONBIO, &flags))
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_IOCTL_SIOCGIFADDR
|
|
||||||
/* headers for FIONBIO test */
|
|
||||||
/* includes start */
|
|
||||||
#ifdef HAVE_SYS_TYPES_H
|
|
||||||
# include <sys/types.h>
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_UNISTD_H
|
|
||||||
# include <unistd.h>
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_SYS_SOCKET_H
|
|
||||||
# include <sys/socket.h>
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_SYS_IOCTL_H
|
|
||||||
# include <sys/ioctl.h>
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_STROPTS_H
|
|
||||||
# include <stropts.h>
|
|
||||||
#endif
|
|
||||||
#include <net/if.h>
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
struct ifreq ifr;
|
|
||||||
if(0 != ioctl(0, SIOCGIFADDR, &ifr))
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_SETSOCKOPT_SO_NONBLOCK
|
|
||||||
/* includes start */
|
|
||||||
#ifdef HAVE_WINDOWS_H
|
|
||||||
# ifndef WIN32_LEAN_AND_MEAN
|
|
||||||
# define WIN32_LEAN_AND_MEAN
|
|
||||||
# endif
|
|
||||||
# include <windows.h>
|
|
||||||
# ifdef HAVE_WINSOCK2_H
|
|
||||||
# include <winsock2.h>
|
|
||||||
# else
|
|
||||||
# ifdef HAVE_WINSOCK_H
|
|
||||||
# include <winsock.h>
|
|
||||||
# endif
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
/* includes start */
|
|
||||||
#ifdef HAVE_SYS_TYPES_H
|
|
||||||
# include <sys/types.h>
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_SYS_SOCKET_H
|
|
||||||
# include <sys/socket.h>
|
|
||||||
#endif
|
|
||||||
/* includes end */
|
|
||||||
|
|
||||||
int
|
|
||||||
main ()
|
|
||||||
{
|
|
||||||
if(0 != setsockopt(0, SOL_SOCKET, SO_NONBLOCK, 0, 0))
|
|
||||||
return 1;
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_GLIBC_STRERROR_R
|
|
||||||
#include <string.h>
|
|
||||||
#include <errno.h>
|
|
||||||
|
|
||||||
void check(char c) {}
|
|
||||||
|
|
||||||
int
|
|
||||||
main () {
|
|
||||||
char buffer[1024];
|
|
||||||
/* This will not compile if strerror_r does not return a char* */
|
|
||||||
check(strerror_r(EACCES, buffer, sizeof(buffer))[0]);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_POSIX_STRERROR_R
|
|
||||||
#include <string.h>
|
|
||||||
#include <errno.h>
|
|
||||||
|
|
||||||
/* float, because a pointer can't be implicitly cast to float */
|
|
||||||
void check(float f) {}
|
|
||||||
|
|
||||||
int
|
|
||||||
main () {
|
|
||||||
char buffer[1024];
|
|
||||||
/* This will not compile if strerror_r does not return an int */
|
|
||||||
check(strerror_r(EACCES, buffer, sizeof(buffer)));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_FSETXATTR_6
|
|
||||||
#include <sys/xattr.h> /* header from libc, not from libattr */
|
|
||||||
int
|
|
||||||
main() {
|
|
||||||
fsetxattr(0, 0, 0, 0, 0, 0);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_FSETXATTR_5
|
|
||||||
#include <sys/xattr.h> /* header from libc, not from libattr */
|
|
||||||
int
|
|
||||||
main() {
|
|
||||||
fsetxattr(0, 0, 0, 0, 0);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_CLOCK_GETTIME_MONOTONIC
|
|
||||||
#include <time.h>
|
|
||||||
int
|
|
||||||
main() {
|
|
||||||
struct timespec ts = {0, 0};
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_BUILTIN_AVAILABLE
|
|
||||||
int
|
|
||||||
main() {
|
|
||||||
if(__builtin_available(macOS 10.12, *)) {}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_VARIADIC_MACROS_C99
|
|
||||||
#define c99_vmacro3(first, ...) fun3(first, __VA_ARGS__)
|
|
||||||
#define c99_vmacro2(first, ...) fun2(first, __VA_ARGS__)
|
|
||||||
|
|
||||||
int fun3(int arg1, int arg2, int arg3);
|
|
||||||
int fun2(int arg1, int arg2);
|
|
||||||
|
|
||||||
int fun3(int arg1, int arg2, int arg3) {
|
|
||||||
return arg1 + arg2 + arg3;
|
|
||||||
}
|
|
||||||
int fun2(int arg1, int arg2) {
|
|
||||||
return arg1 + arg2;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
main() {
|
|
||||||
int res3 = c99_vmacro3(1, 2, 3);
|
|
||||||
int res2 = c99_vmacro2(1, 2);
|
|
||||||
(void)res3;
|
|
||||||
(void)res2;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_VARIADIC_MACROS_GCC
|
|
||||||
#define gcc_vmacro3(first, args...) fun3(first, args)
|
|
||||||
#define gcc_vmacro2(first, args...) fun2(first, args)
|
|
||||||
|
|
||||||
int fun3(int arg1, int arg2, int arg3);
|
|
||||||
int fun2(int arg1, int arg2);
|
|
||||||
|
|
||||||
int fun3(int arg1, int arg2, int arg3) {
|
|
||||||
return arg1 + arg2 + arg3;
|
|
||||||
}
|
|
||||||
int fun2(int arg1, int arg2) {
|
|
||||||
return arg1 + arg2;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
main() {
|
|
||||||
int res3 = gcc_vmacro3(1, 2, 3);
|
|
||||||
int res2 = gcc_vmacro2(1, 2);
|
|
||||||
(void)res3;
|
|
||||||
(void)res2;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,84 +0,0 @@
|
|||||||
#File defines convenience macros for available feature testing
|
|
||||||
|
|
||||||
# This macro checks if the symbol exists in the library and if it
|
|
||||||
# does, it prepends library to the list. It is intended to be called
|
|
||||||
# multiple times with a sequence of possibly dependent libraries in
|
|
||||||
# order of least-to-most-dependent. Some libraries depend on others
|
|
||||||
# to link correctly.
|
|
||||||
macro(check_library_exists_concat LIBRARY SYMBOL VARIABLE)
|
|
||||||
check_library_exists("${LIBRARY};${CURL_LIBS}" ${SYMBOL} "${CMAKE_LIBRARY_PATH}"
|
|
||||||
${VARIABLE})
|
|
||||||
if(${VARIABLE})
|
|
||||||
set(CURL_LIBS ${LIBRARY} ${CURL_LIBS})
|
|
||||||
endif()
|
|
||||||
endmacro()
|
|
||||||
|
|
||||||
# Check if header file exists and add it to the list.
|
|
||||||
# This macro is intended to be called multiple times with a sequence of
|
|
||||||
# possibly dependent header files. Some headers depend on others to be
|
|
||||||
# compiled correctly.
|
|
||||||
macro(check_include_file_concat FILE VARIABLE)
|
|
||||||
check_include_files("${CURL_INCLUDES};${FILE}" ${VARIABLE})
|
|
||||||
if(${VARIABLE})
|
|
||||||
set(CURL_INCLUDES ${CURL_INCLUDES} ${FILE})
|
|
||||||
set(CURL_TEST_DEFINES "${CURL_TEST_DEFINES} -D${VARIABLE}")
|
|
||||||
endif()
|
|
||||||
endmacro()
|
|
||||||
|
|
||||||
# For other curl specific tests, use this macro.
|
|
||||||
macro(curl_internal_test CURL_TEST)
|
|
||||||
if(NOT DEFINED "${CURL_TEST}")
|
|
||||||
set(MACRO_CHECK_FUNCTION_DEFINITIONS
|
|
||||||
"-D${CURL_TEST} ${CURL_TEST_DEFINES} ${CMAKE_REQUIRED_FLAGS}")
|
|
||||||
if(CMAKE_REQUIRED_LIBRARIES)
|
|
||||||
set(CURL_TEST_ADD_LIBRARIES
|
|
||||||
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
try_compile(${CURL_TEST}
|
|
||||||
${CMAKE_BINARY_DIR}
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/CMake/CurlTests.c
|
|
||||||
CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS}
|
|
||||||
"${CURL_TEST_ADD_LIBRARIES}"
|
|
||||||
OUTPUT_VARIABLE OUTPUT)
|
|
||||||
if(${CURL_TEST})
|
|
||||||
set(${CURL_TEST} 1 CACHE INTERNAL "Curl test ${FUNCTION}")
|
|
||||||
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
|
|
||||||
"Performing Curl Test ${CURL_TEST} passed with the following output:\n"
|
|
||||||
"${OUTPUT}\n")
|
|
||||||
else()
|
|
||||||
set(${CURL_TEST} "" CACHE INTERNAL "Curl test ${FUNCTION}")
|
|
||||||
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
|
|
||||||
"Performing Curl Test ${CURL_TEST} failed with the following output:\n"
|
|
||||||
"${OUTPUT}\n")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endmacro()
|
|
||||||
|
|
||||||
macro(curl_nroff_check)
|
|
||||||
find_program(NROFF NAMES gnroff nroff)
|
|
||||||
if(NROFF)
|
|
||||||
# Need a way to write to stdin, this will do
|
|
||||||
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt" "test")
|
|
||||||
# Tests for a valid nroff option to generate a manpage
|
|
||||||
foreach(_MANOPT "-man" "-mandoc")
|
|
||||||
execute_process(COMMAND "${NROFF}" ${_MANOPT}
|
|
||||||
OUTPUT_VARIABLE NROFF_MANOPT_OUTPUT
|
|
||||||
INPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt"
|
|
||||||
ERROR_QUIET)
|
|
||||||
# Save the option if it was valid
|
|
||||||
if(NROFF_MANOPT_OUTPUT)
|
|
||||||
set(NROFF_MANOPT ${_MANOPT})
|
|
||||||
set(NROFF_USEFUL ON)
|
|
||||||
break()
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
||||||
# No need for the temporary file
|
|
||||||
file(REMOVE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt")
|
|
||||||
if(NOT NROFF_USEFUL)
|
|
||||||
message(WARNING "Found no *nroff option to get plaintext from man pages")
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
message(WARNING "Found no *nroff program")
|
|
||||||
endif()
|
|
||||||
endmacro()
|
|
@ -1,260 +0,0 @@
|
|||||||
include(CheckCSourceCompiles)
|
|
||||||
# The begin of the sources (macros and includes)
|
|
||||||
set(_source_epilogue "#undef inline")
|
|
||||||
|
|
||||||
macro(add_header_include check header)
|
|
||||||
if(${check})
|
|
||||||
set(_source_epilogue "${_source_epilogue}\n#include <${header}>")
|
|
||||||
endif()
|
|
||||||
endmacro()
|
|
||||||
|
|
||||||
set(signature_call_conv)
|
|
||||||
if(HAVE_WINDOWS_H)
|
|
||||||
add_header_include(HAVE_WINSOCK2_H "winsock2.h")
|
|
||||||
add_header_include(HAVE_WINDOWS_H "windows.h")
|
|
||||||
add_header_include(HAVE_WINSOCK_H "winsock.h")
|
|
||||||
set(_source_epilogue
|
|
||||||
"${_source_epilogue}\n#ifndef WIN32_LEAN_AND_MEAN\n#define WIN32_LEAN_AND_MEAN\n#endif")
|
|
||||||
set(signature_call_conv "PASCAL")
|
|
||||||
if(HAVE_LIBWS2_32)
|
|
||||||
set(CMAKE_REQUIRED_LIBRARIES ws2_32)
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
add_header_include(HAVE_SYS_TYPES_H "sys/types.h")
|
|
||||||
add_header_include(HAVE_SYS_SOCKET_H "sys/socket.h")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
|
|
||||||
|
|
||||||
check_c_source_compiles("${_source_epilogue}
|
|
||||||
int main(void) {
|
|
||||||
recv(0, 0, 0, 0);
|
|
||||||
return 0;
|
|
||||||
}" curl_cv_recv)
|
|
||||||
if(curl_cv_recv)
|
|
||||||
if(NOT DEFINED curl_cv_func_recv_args OR "${curl_cv_func_recv_args}" STREQUAL "unknown")
|
|
||||||
foreach(recv_retv "int" "ssize_t" )
|
|
||||||
foreach(recv_arg1 "SOCKET" "int" )
|
|
||||||
foreach(recv_arg2 "char *" "void *" )
|
|
||||||
foreach(recv_arg3 "int" "size_t" "socklen_t" "unsigned int")
|
|
||||||
foreach(recv_arg4 "int" "unsigned int")
|
|
||||||
if(NOT curl_cv_func_recv_done)
|
|
||||||
unset(curl_cv_func_recv_test CACHE)
|
|
||||||
check_c_source_compiles("
|
|
||||||
${_source_epilogue}
|
|
||||||
extern ${recv_retv} ${signature_call_conv}
|
|
||||||
recv(${recv_arg1}, ${recv_arg2}, ${recv_arg3}, ${recv_arg4});
|
|
||||||
int main(void) {
|
|
||||||
${recv_arg1} s=0;
|
|
||||||
${recv_arg2} buf=0;
|
|
||||||
${recv_arg3} len=0;
|
|
||||||
${recv_arg4} flags=0;
|
|
||||||
${recv_retv} res = recv(s, buf, len, flags);
|
|
||||||
(void) res;
|
|
||||||
return 0;
|
|
||||||
}"
|
|
||||||
curl_cv_func_recv_test)
|
|
||||||
if(curl_cv_func_recv_test)
|
|
||||||
set(curl_cv_func_recv_args
|
|
||||||
"${recv_arg1},${recv_arg2},${recv_arg3},${recv_arg4},${recv_retv}")
|
|
||||||
set(RECV_TYPE_ARG1 "${recv_arg1}")
|
|
||||||
set(RECV_TYPE_ARG2 "${recv_arg2}")
|
|
||||||
set(RECV_TYPE_ARG3 "${recv_arg3}")
|
|
||||||
set(RECV_TYPE_ARG4 "${recv_arg4}")
|
|
||||||
set(RECV_TYPE_RETV "${recv_retv}")
|
|
||||||
set(HAVE_RECV 1)
|
|
||||||
set(curl_cv_func_recv_done 1)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
||||||
endforeach()
|
|
||||||
endforeach()
|
|
||||||
endforeach()
|
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
string(REGEX REPLACE "^([^,]*),[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG1 "${curl_cv_func_recv_args}")
|
|
||||||
string(REGEX REPLACE "^[^,]*,([^,]*),[^,]*,[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG2 "${curl_cv_func_recv_args}")
|
|
||||||
string(REGEX REPLACE "^[^,]*,[^,]*,([^,]*),[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG3 "${curl_cv_func_recv_args}")
|
|
||||||
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,([^,]*),[^,]*$" "\\1" RECV_TYPE_ARG4 "${curl_cv_func_recv_args}")
|
|
||||||
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)$" "\\1" RECV_TYPE_RETV "${curl_cv_func_recv_args}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if("${curl_cv_func_recv_args}" STREQUAL "unknown")
|
|
||||||
message(FATAL_ERROR "Cannot find proper types to use for recv args")
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "Unable to link function recv")
|
|
||||||
endif()
|
|
||||||
set(curl_cv_func_recv_args "${curl_cv_func_recv_args}" CACHE INTERNAL "Arguments for recv")
|
|
||||||
set(HAVE_RECV 1)
|
|
||||||
|
|
||||||
check_c_source_compiles("${_source_epilogue}
|
|
||||||
int main(void) {
|
|
||||||
send(0, 0, 0, 0);
|
|
||||||
return 0;
|
|
||||||
}" curl_cv_send)
|
|
||||||
if(curl_cv_send)
|
|
||||||
if(NOT DEFINED curl_cv_func_send_args OR "${curl_cv_func_send_args}" STREQUAL "unknown")
|
|
||||||
foreach(send_retv "int" "ssize_t" )
|
|
||||||
foreach(send_arg1 "SOCKET" "int" "ssize_t" )
|
|
||||||
foreach(send_arg2 "const char *" "const void *" "void *" "char *")
|
|
||||||
foreach(send_arg3 "int" "size_t" "socklen_t" "unsigned int")
|
|
||||||
foreach(send_arg4 "int" "unsigned int")
|
|
||||||
if(NOT curl_cv_func_send_done)
|
|
||||||
unset(curl_cv_func_send_test CACHE)
|
|
||||||
check_c_source_compiles("
|
|
||||||
${_source_epilogue}
|
|
||||||
extern ${send_retv} ${signature_call_conv}
|
|
||||||
send(${send_arg1}, ${send_arg2}, ${send_arg3}, ${send_arg4});
|
|
||||||
int main(void) {
|
|
||||||
${send_arg1} s=0;
|
|
||||||
${send_arg2} buf=0;
|
|
||||||
${send_arg3} len=0;
|
|
||||||
${send_arg4} flags=0;
|
|
||||||
${send_retv} res = send(s, buf, len, flags);
|
|
||||||
(void) res;
|
|
||||||
return 0;
|
|
||||||
}"
|
|
||||||
curl_cv_func_send_test)
|
|
||||||
if(curl_cv_func_send_test)
|
|
||||||
string(REGEX REPLACE "(const) .*" "\\1" send_qual_arg2 "${send_arg2}")
|
|
||||||
string(REGEX REPLACE "const (.*)" "\\1" send_arg2 "${send_arg2}")
|
|
||||||
set(curl_cv_func_send_args
|
|
||||||
"${send_arg1},${send_arg2},${send_arg3},${send_arg4},${send_retv},${send_qual_arg2}")
|
|
||||||
set(SEND_TYPE_ARG1 "${send_arg1}")
|
|
||||||
set(SEND_TYPE_ARG2 "${send_arg2}")
|
|
||||||
set(SEND_TYPE_ARG3 "${send_arg3}")
|
|
||||||
set(SEND_TYPE_ARG4 "${send_arg4}")
|
|
||||||
set(SEND_TYPE_RETV "${send_retv}")
|
|
||||||
set(HAVE_SEND 1)
|
|
||||||
set(curl_cv_func_send_done 1)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
||||||
endforeach()
|
|
||||||
endforeach()
|
|
||||||
endforeach()
|
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
string(REGEX REPLACE "^([^,]*),[^,]*,[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG1 "${curl_cv_func_send_args}")
|
|
||||||
string(REGEX REPLACE "^[^,]*,([^,]*),[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG2 "${curl_cv_func_send_args}")
|
|
||||||
string(REGEX REPLACE "^[^,]*,[^,]*,([^,]*),[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG3 "${curl_cv_func_send_args}")
|
|
||||||
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,([^,]*),[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG4 "${curl_cv_func_send_args}")
|
|
||||||
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,([^,]*),[^,]*$" "\\1" SEND_TYPE_RETV "${curl_cv_func_send_args}")
|
|
||||||
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)$" "\\1" SEND_QUAL_ARG2 "${curl_cv_func_send_args}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if("${curl_cv_func_send_args}" STREQUAL "unknown")
|
|
||||||
message(FATAL_ERROR "Cannot find proper types to use for send args")
|
|
||||||
endif()
|
|
||||||
set(SEND_QUAL_ARG2 "const")
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "Unable to link function send")
|
|
||||||
endif()
|
|
||||||
set(curl_cv_func_send_args "${curl_cv_func_send_args}" CACHE INTERNAL "Arguments for send")
|
|
||||||
set(HAVE_SEND 1)
|
|
||||||
|
|
||||||
check_c_source_compiles("${_source_epilogue}
|
|
||||||
int main(void) {
|
|
||||||
int flag = MSG_NOSIGNAL;
|
|
||||||
(void)flag;
|
|
||||||
return 0;
|
|
||||||
}" HAVE_MSG_NOSIGNAL)
|
|
||||||
|
|
||||||
if(NOT HAVE_WINDOWS_H)
|
|
||||||
add_header_include(HAVE_SYS_TIME_H "sys/time.h")
|
|
||||||
add_header_include(TIME_WITH_SYS_TIME "time.h")
|
|
||||||
add_header_include(HAVE_TIME_H "time.h")
|
|
||||||
endif()
|
|
||||||
check_c_source_compiles("${_source_epilogue}
|
|
||||||
int main(void) {
|
|
||||||
struct timeval ts;
|
|
||||||
ts.tv_sec = 0;
|
|
||||||
ts.tv_usec = 0;
|
|
||||||
(void)ts;
|
|
||||||
return 0;
|
|
||||||
}" HAVE_STRUCT_TIMEVAL)
|
|
||||||
|
|
||||||
set(HAVE_SIG_ATOMIC_T 1)
|
|
||||||
set(CMAKE_REQUIRED_FLAGS)
|
|
||||||
if(HAVE_SIGNAL_H)
|
|
||||||
set(CMAKE_REQUIRED_FLAGS "-DHAVE_SIGNAL_H")
|
|
||||||
set(CMAKE_EXTRA_INCLUDE_FILES "signal.h")
|
|
||||||
endif()
|
|
||||||
check_type_size("sig_atomic_t" SIZEOF_SIG_ATOMIC_T)
|
|
||||||
if(HAVE_SIZEOF_SIG_ATOMIC_T)
|
|
||||||
check_c_source_compiles("
|
|
||||||
#ifdef HAVE_SIGNAL_H
|
|
||||||
# include <signal.h>
|
|
||||||
#endif
|
|
||||||
int main(void) {
|
|
||||||
static volatile sig_atomic_t dummy = 0;
|
|
||||||
(void)dummy;
|
|
||||||
return 0;
|
|
||||||
}" HAVE_SIG_ATOMIC_T_NOT_VOLATILE)
|
|
||||||
if(NOT HAVE_SIG_ATOMIC_T_NOT_VOLATILE)
|
|
||||||
set(HAVE_SIG_ATOMIC_T_VOLATILE 1)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(HAVE_WINDOWS_H)
|
|
||||||
set(CMAKE_EXTRA_INCLUDE_FILES winsock2.h)
|
|
||||||
else()
|
|
||||||
set(CMAKE_EXTRA_INCLUDE_FILES)
|
|
||||||
if(HAVE_SYS_SOCKET_H)
|
|
||||||
set(CMAKE_EXTRA_INCLUDE_FILES sys/socket.h)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
check_type_size("struct sockaddr_storage" SIZEOF_STRUCT_SOCKADDR_STORAGE)
|
|
||||||
if(HAVE_SIZEOF_STRUCT_SOCKADDR_STORAGE)
|
|
||||||
set(HAVE_STRUCT_SOCKADDR_STORAGE 1)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
unset(CMAKE_TRY_COMPILE_TARGET_TYPE)
|
|
||||||
|
|
||||||
if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)
|
|
||||||
# if not cross-compilation...
|
|
||||||
include(CheckCSourceRuns)
|
|
||||||
set(CMAKE_REQUIRED_FLAGS "")
|
|
||||||
if(HAVE_SYS_POLL_H)
|
|
||||||
set(CMAKE_REQUIRED_FLAGS "-DHAVE_SYS_POLL_H")
|
|
||||||
elseif(HAVE_POLL_H)
|
|
||||||
set(CMAKE_REQUIRED_FLAGS "-DHAVE_POLL_H")
|
|
||||||
endif()
|
|
||||||
check_c_source_runs("
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
|
|
||||||
#ifdef HAVE_SYS_POLL_H
|
|
||||||
# include <sys/poll.h>
|
|
||||||
#elif HAVE_POLL_H
|
|
||||||
# include <poll.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int main(void)
|
|
||||||
{
|
|
||||||
if(0 != poll(0, 0, 10)) {
|
|
||||||
return 1; /* fail */
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
/* detect the 10.12 poll() breakage */
|
|
||||||
struct timeval before, after;
|
|
||||||
int rc;
|
|
||||||
size_t us;
|
|
||||||
|
|
||||||
gettimeofday(&before, NULL);
|
|
||||||
rc = poll(NULL, 0, 500);
|
|
||||||
gettimeofday(&after, NULL);
|
|
||||||
|
|
||||||
us = (after.tv_sec - before.tv_sec) * 1000000 +
|
|
||||||
(after.tv_usec - before.tv_usec);
|
|
||||||
|
|
||||||
if(us < 400000) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}" HAVE_POLL_FINE)
|
|
||||||
endif()
|
|
||||||
|
|
@ -1,28 +0,0 @@
|
|||||||
#***************************************************************************
|
|
||||||
# _ _ ____ _
|
|
||||||
# Project ___| | | | _ \| |
|
|
||||||
# / __| | | | |_) | |
|
|
||||||
# | (__| |_| | _ <| |___
|
|
||||||
# \___|\___/|_| \_\_____|
|
|
||||||
#
|
|
||||||
# Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
|
|
||||||
#
|
|
||||||
# This software is licensed as described in the file COPYING, which
|
|
||||||
# you should have received as part of this distribution. The terms
|
|
||||||
# are also available at https://curl.haxx.se/docs/copyright.html.
|
|
||||||
#
|
|
||||||
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
|
||||||
# copies of the Software, and permit persons to whom the Software is
|
|
||||||
# furnished to do so, under the terms of the COPYING file.
|
|
||||||
#
|
|
||||||
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
||||||
# KIND, either express or implied.
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
CMake files under this directory were reused from project curl.
|
|
||||||
Here are links to original source files:
|
|
||||||
https://github.com/curl/curl/blob/master/CMake/CurlSymbolHiding.cmake
|
|
||||||
https://github.com/curl/curl/blob/master/CMake/CurlTests,c
|
|
||||||
https://github.com/curl/curl/blob/master/CMake/Macros.cmake
|
|
||||||
https://github.com/curl/curl/blob/master/CMake/OtherTests.cmake
|
|
File diff suppressed because it is too large
Load Diff
@ -1,605 +1,148 @@
|
|||||||
#***************************************************************************
|
set (CURL_DIR ${ClickHouse_SOURCE_DIR}/contrib/curl)
|
||||||
# _ _ ____ _
|
|
||||||
# Project ___| | | | _ \| |
|
set (SRCS
|
||||||
# / __| | | | |_) | |
|
${CURL_DIR}/lib/file.c
|
||||||
# | (__| |_| | _ <| |___
|
${CURL_DIR}/lib/timeval.c
|
||||||
# \___|\___/|_| \_\_____|
|
${CURL_DIR}/lib/base64.c
|
||||||
#
|
${CURL_DIR}/lib/hostip.c
|
||||||
# Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
|
${CURL_DIR}/lib/progress.c
|
||||||
#
|
${CURL_DIR}/lib/formdata.c
|
||||||
# This software is licensed as described in the file COPYING, which
|
${CURL_DIR}/lib/cookie.c
|
||||||
# you should have received as part of this distribution. The terms
|
${CURL_DIR}/lib/http.c
|
||||||
# are also available at https://curl.haxx.se/docs/copyright.html.
|
${CURL_DIR}/lib/sendf.c
|
||||||
#
|
${CURL_DIR}/lib/url.c
|
||||||
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
${CURL_DIR}/lib/dict.c
|
||||||
# copies of the Software, and permit persons to whom the Software is
|
${CURL_DIR}/lib/if2ip.c
|
||||||
# furnished to do so, under the terms of the COPYING file.
|
${CURL_DIR}/lib/speedcheck.c
|
||||||
#
|
${CURL_DIR}/lib/ldap.c
|
||||||
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
${CURL_DIR}/lib/version.c
|
||||||
# KIND, either express or implied.
|
${CURL_DIR}/lib/getenv.c
|
||||||
#
|
${CURL_DIR}/lib/escape.c
|
||||||
###########################################################################
|
${CURL_DIR}/lib/mprintf.c
|
||||||
|
${CURL_DIR}/lib/telnet.c
|
||||||
# NOTE:
|
${CURL_DIR}/lib/netrc.c
|
||||||
# This file is shrinked and reworked version of original curl CMakeLists.txt
|
${CURL_DIR}/lib/getinfo.c
|
||||||
# Original file link https://github.com/curl/curl/blob/3b8bbbbd1609c638a3d3d0acb148a33dedb67be3/CMakeLists.txt
|
${CURL_DIR}/lib/transfer.c
|
||||||
# If you need to update curl building you can find patch file in this directory
|
${CURL_DIR}/lib/strcase.c
|
||||||
# and apply it to fresh original CMakeLists.txt file.
|
${CURL_DIR}/lib/easy.c
|
||||||
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
|
${CURL_DIR}/lib/security.c
|
||||||
|
${CURL_DIR}/lib/curl_fnmatch.c
|
||||||
SET(CURL_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/curl)
|
${CURL_DIR}/lib/fileinfo.c
|
||||||
SET(CURL_LIBRARY_DIR ${CURL_SOURCE_DIR}/lib)
|
${CURL_DIR}/lib/wildcard.c
|
||||||
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake;${CMAKE_MODULE_PATH}")
|
${CURL_DIR}/lib/krb5.c
|
||||||
# Disable status messages when perform checks
|
${CURL_DIR}/lib/memdebug.c
|
||||||
set(CMAKE_REQUIRED_QUIET TRUE)
|
${CURL_DIR}/lib/http_chunks.c
|
||||||
|
${CURL_DIR}/lib/strtok.c
|
||||||
include(Macros)
|
${CURL_DIR}/lib/connect.c
|
||||||
include(CMakeDependentOption)
|
${CURL_DIR}/lib/llist.c
|
||||||
include(CheckCCompilerFlag)
|
${CURL_DIR}/lib/hash.c
|
||||||
|
${CURL_DIR}/lib/multi.c
|
||||||
file(READ ${CURL_SOURCE_DIR}/include/curl/curlver.h CURL_VERSION_H_CONTENTS)
|
${CURL_DIR}/lib/content_encoding.c
|
||||||
string(REGEX MATCH "#define LIBCURL_VERSION \"[^\"]*"
|
${CURL_DIR}/lib/share.c
|
||||||
CURL_VERSION ${CURL_VERSION_H_CONTENTS})
|
${CURL_DIR}/lib/http_digest.c
|
||||||
string(REGEX REPLACE "[^\"]+\"" "" CURL_VERSION ${CURL_VERSION})
|
${CURL_DIR}/lib/md4.c
|
||||||
string(REGEX MATCH "#define LIBCURL_VERSION_NUM 0x[0-9a-fA-F]+"
|
${CURL_DIR}/lib/md5.c
|
||||||
CURL_VERSION_NUM ${CURL_VERSION_H_CONTENTS})
|
${CURL_DIR}/lib/http_negotiate.c
|
||||||
string(REGEX REPLACE "[^0]+0x" "" CURL_VERSION_NUM ${CURL_VERSION_NUM})
|
${CURL_DIR}/lib/inet_pton.c
|
||||||
|
${CURL_DIR}/lib/strtoofft.c
|
||||||
message(STATUS "Use curl version=[${CURL_VERSION}]")
|
${CURL_DIR}/lib/strerror.c
|
||||||
set(OPERATING_SYSTEM "${CMAKE_SYSTEM_NAME}")
|
${CURL_DIR}/lib/amigaos.c
|
||||||
set(OS "\"${CMAKE_SYSTEM_NAME}\"")
|
${CURL_DIR}/lib/hostasyn.c
|
||||||
|
${CURL_DIR}/lib/hostip4.c
|
||||||
option(PICKY_COMPILER "Enable picky compiler options" ON)
|
${CURL_DIR}/lib/hostip6.c
|
||||||
option(ENABLE_THREADED_RESOLVER "Set to ON to enable threaded DNS lookup" ON)
|
${CURL_DIR}/lib/hostsyn.c
|
||||||
|
${CURL_DIR}/lib/inet_ntop.c
|
||||||
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG)
|
${CURL_DIR}/lib/parsedate.c
|
||||||
if(PICKY_COMPILER)
|
${CURL_DIR}/lib/select.c
|
||||||
foreach(_CCOPT -pedantic -Wall -W -Wpointer-arith -Wwrite-strings -Wunused -Wshadow -Winline -Wnested-externs -Wmissing-declarations -Wmissing-prototypes -Wno-long-long -Wfloat-equal -Wno-multichar -Wsign-compare -Wundef -Wno-format-nonliteral -Wendif-labels -Wstrict-prototypes -Wdeclaration-after-statement -Wstrict-aliasing=3 -Wcast-align -Wtype-limits -Wold-style-declaration -Wmissing-parameter-type -Wempty-body -Wclobbered -Wignored-qualifiers -Wconversion -Wno-sign-conversion -Wvla -Wdouble-promotion -Wno-system-headers -Wno-pedantic-ms-format)
|
${CURL_DIR}/lib/splay.c
|
||||||
# surprisingly, CHECK_C_COMPILER_FLAG needs a new variable to store each new
|
${CURL_DIR}/lib/strdup.c
|
||||||
# test result in.
|
${CURL_DIR}/lib/socks.c
|
||||||
check_c_compiler_flag(${_CCOPT} OPT${_CCOPT})
|
${CURL_DIR}/lib/curl_addrinfo.c
|
||||||
if(OPT${_CCOPT})
|
${CURL_DIR}/lib/socks_gssapi.c
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_CCOPT}")
|
${CURL_DIR}/lib/socks_sspi.c
|
||||||
endif()
|
${CURL_DIR}/lib/curl_sspi.c
|
||||||
endforeach()
|
${CURL_DIR}/lib/slist.c
|
||||||
endif()
|
${CURL_DIR}/lib/nonblock.c
|
||||||
endif()
|
${CURL_DIR}/lib/curl_memrchr.c
|
||||||
|
${CURL_DIR}/lib/imap.c
|
||||||
# initialize CURL_LIBS
|
${CURL_DIR}/lib/pop3.c
|
||||||
set(CURL_LIBS "")
|
${CURL_DIR}/lib/smtp.c
|
||||||
|
${CURL_DIR}/lib/pingpong.c
|
||||||
include(CurlSymbolHiding)
|
${CURL_DIR}/lib/rtsp.c
|
||||||
|
${CURL_DIR}/lib/curl_threads.c
|
||||||
# Http only
|
${CURL_DIR}/lib/warnless.c
|
||||||
set(CURL_DISABLE_FTP ON)
|
${CURL_DIR}/lib/hmac.c
|
||||||
set(CURL_DISABLE_LDAP ON)
|
${CURL_DIR}/lib/curl_rtmp.c
|
||||||
set(CURL_DISABLE_LDAPS ON)
|
${CURL_DIR}/lib/openldap.c
|
||||||
set(CURL_DISABLE_TELNET ON)
|
${CURL_DIR}/lib/curl_gethostname.c
|
||||||
set(CURL_DISABLE_DICT ON)
|
${CURL_DIR}/lib/gopher.c
|
||||||
set(CURL_DISABLE_FILE ON)
|
${CURL_DIR}/lib/idn_win32.c
|
||||||
set(CURL_DISABLE_TFTP ON)
|
${CURL_DIR}/lib/http_proxy.c
|
||||||
set(CURL_DISABLE_RTSP ON)
|
${CURL_DIR}/lib/non-ascii.c
|
||||||
set(CURL_DISABLE_POP3 ON)
|
${CURL_DIR}/lib/asyn-thread.c
|
||||||
set(CURL_DISABLE_IMAP ON)
|
${CURL_DIR}/lib/curl_gssapi.c
|
||||||
set(CURL_DISABLE_SMTP ON)
|
${CURL_DIR}/lib/http_ntlm.c
|
||||||
set(CURL_DISABLE_GOPHER ON)
|
${CURL_DIR}/lib/curl_ntlm_wb.c
|
||||||
|
${CURL_DIR}/lib/curl_ntlm_core.c
|
||||||
option(CURL_DISABLE_COOKIES "to disable cookies support" OFF)
|
${CURL_DIR}/lib/curl_sasl.c
|
||||||
mark_as_advanced(CURL_DISABLE_COOKIES)
|
${CURL_DIR}/lib/rand.c
|
||||||
|
${CURL_DIR}/lib/curl_multibyte.c
|
||||||
option(CURL_DISABLE_CRYPTO_AUTH "to disable cryptographic authentication" OFF)
|
${CURL_DIR}/lib/hostcheck.c
|
||||||
mark_as_advanced(CURL_DISABLE_CRYPTO_AUTH)
|
${CURL_DIR}/lib/conncache.c
|
||||||
|
${CURL_DIR}/lib/dotdot.c
|
||||||
option(CURL_DISABLE_VERBOSE_STRINGS "to disable verbose strings" OFF)
|
${CURL_DIR}/lib/x509asn1.c
|
||||||
mark_as_advanced(CURL_DISABLE_VERBOSE_STRINGS)
|
${CURL_DIR}/lib/http2.c
|
||||||
|
${CURL_DIR}/lib/smb.c
|
||||||
option(ENABLE_IPV6 "Define if you want to enable IPv6 support" ON)
|
${CURL_DIR}/lib/curl_endian.c
|
||||||
mark_as_advanced(ENABLE_IPV6)
|
${CURL_DIR}/lib/curl_des.c
|
||||||
|
${CURL_DIR}/lib/system_win32.c
|
||||||
if(ENABLE_IPV6 AND NOT WIN32)
|
${CURL_DIR}/lib/mime.c
|
||||||
include(CheckStructHasMember)
|
${CURL_DIR}/lib/sha256.c
|
||||||
check_struct_has_member("struct sockaddr_in6" sin6_addr "netinet/in.h"
|
${CURL_DIR}/lib/setopt.c
|
||||||
HAVE_SOCKADDR_IN6_SIN6_ADDR)
|
${CURL_DIR}/lib/curl_path.c
|
||||||
check_struct_has_member("struct sockaddr_in6" sin6_scope_id "netinet/in.h"
|
${CURL_DIR}/lib/curl_ctype.c
|
||||||
HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID)
|
${CURL_DIR}/lib/curl_range.c
|
||||||
if(NOT HAVE_SOCKADDR_IN6_SIN6_ADDR)
|
${CURL_DIR}/lib/psl.c
|
||||||
message(WARNING "struct sockaddr_in6 not available, disabling IPv6 support")
|
${CURL_DIR}/lib/doh.c
|
||||||
# Force the feature off as this name is used as guard macro...
|
${CURL_DIR}/lib/urlapi.c
|
||||||
set(ENABLE_IPV6 OFF
|
${CURL_DIR}/lib/curl_get_line.c
|
||||||
CACHE BOOL "Define if you want to enable IPv6 support" FORCE)
|
${CURL_DIR}/lib/altsvc.c
|
||||||
endif()
|
${CURL_DIR}/lib/socketpair.c
|
||||||
endif()
|
${CURL_DIR}/lib/vauth/vauth.c
|
||||||
|
${CURL_DIR}/lib/vauth/cleartext.c
|
||||||
# We need ansi c-flags, especially on HP
|
${CURL_DIR}/lib/vauth/cram.c
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}")
|
${CURL_DIR}/lib/vauth/digest.c
|
||||||
set(CMAKE_REQUIRED_FLAGS ${CMAKE_ANSI_CFLAGS})
|
${CURL_DIR}/lib/vauth/digest_sspi.c
|
||||||
|
${CURL_DIR}/lib/vauth/krb5_gssapi.c
|
||||||
# Include all the necessary files for macros
|
${CURL_DIR}/lib/vauth/krb5_sspi.c
|
||||||
include(CheckFunctionExists)
|
${CURL_DIR}/lib/vauth/ntlm.c
|
||||||
include(CheckIncludeFile)
|
${CURL_DIR}/lib/vauth/ntlm_sspi.c
|
||||||
include(CheckIncludeFiles)
|
${CURL_DIR}/lib/vauth/oauth2.c
|
||||||
include(CheckLibraryExists)
|
${CURL_DIR}/lib/vauth/spnego_gssapi.c
|
||||||
include(CheckSymbolExists)
|
${CURL_DIR}/lib/vauth/spnego_sspi.c
|
||||||
include(CheckTypeSize)
|
${CURL_DIR}/lib/vtls/openssl.c
|
||||||
include(CheckCSourceCompiles)
|
${CURL_DIR}/lib/vtls/gtls.c
|
||||||
|
${CURL_DIR}/lib/vtls/vtls.c
|
||||||
if(ENABLE_THREADED_RESOLVER)
|
${CURL_DIR}/lib/vtls/nss.c
|
||||||
find_package(Threads REQUIRED)
|
${CURL_DIR}/lib/vtls/polarssl.c
|
||||||
set(USE_THREADS_POSIX ${CMAKE_USE_PTHREADS_INIT})
|
${CURL_DIR}/lib/vtls/polarssl_threadlock.c
|
||||||
set(HAVE_PTHREAD_H ${CMAKE_USE_PTHREADS_INIT})
|
${CURL_DIR}/lib/vtls/wolfssl.c
|
||||||
set(CURL_LIBS ${CURL_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
${CURL_DIR}/lib/vtls/schannel.c
|
||||||
endif()
|
${CURL_DIR}/lib/vtls/schannel_verify.c
|
||||||
|
${CURL_DIR}/lib/vtls/sectransp.c
|
||||||
# Check for all needed libraries
|
${CURL_DIR}/lib/vtls/gskit.c
|
||||||
|
${CURL_DIR}/lib/vtls/mbedtls.c
|
||||||
# We don't want any plugin loading at runtime. It is harmful.
|
${CURL_DIR}/lib/vtls/mesalink.c
|
||||||
#check_library_exists_concat("${CMAKE_DL_LIBS}" dlopen HAVE_LIBDL)
|
${CURL_DIR}/lib/vtls/bearssl.c
|
||||||
|
${CURL_DIR}/lib/vquic/ngtcp2.c
|
||||||
# This is unneeded.
|
${CURL_DIR}/lib/vquic/quiche.c
|
||||||
#check_library_exists_concat("socket" connect HAVE_LIBSOCKET)
|
${CURL_DIR}/lib/vssh/libssh2.c
|
||||||
|
${CURL_DIR}/lib/vssh/libssh.c
|
||||||
set (NOT_NEED_LIBNSL 1)
|
|
||||||
set (gethostname HAVE_GETHOSTNAME 1)
|
|
||||||
|
|
||||||
# From cmake/find/ssl.cmake
|
|
||||||
if (OPENSSL_FOUND)
|
|
||||||
set(SSL_ENABLED ON)
|
|
||||||
set(USE_OPENSSL ON)
|
|
||||||
|
|
||||||
list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES})
|
|
||||||
check_include_file("openssl/crypto.h" HAVE_OPENSSL_CRYPTO_H)
|
|
||||||
check_include_file("openssl/err.h" HAVE_OPENSSL_ERR_H)
|
|
||||||
check_include_file("openssl/pem.h" HAVE_OPENSSL_PEM_H)
|
|
||||||
check_include_file("openssl/rsa.h" HAVE_OPENSSL_RSA_H)
|
|
||||||
check_include_file("openssl/ssl.h" HAVE_OPENSSL_SSL_H)
|
|
||||||
check_include_file("openssl/x509.h" HAVE_OPENSSL_X509_H)
|
|
||||||
check_include_file("openssl/rand.h" HAVE_OPENSSL_RAND_H)
|
|
||||||
check_symbol_exists(RAND_status "${CURL_INCLUDES}" HAVE_RAND_STATUS)
|
|
||||||
check_symbol_exists(RAND_screen "${CURL_INCLUDES}" HAVE_RAND_SCREEN)
|
|
||||||
check_symbol_exists(RAND_egd "${CURL_INCLUDES}" HAVE_RAND_EGD)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Check for idn
|
|
||||||
# No, we don't need that.
|
|
||||||
# check_library_exists_concat("idn2" idn2_lookup_ul HAVE_LIBIDN2)
|
|
||||||
|
|
||||||
# Check for symbol dlopen (same as HAVE_LIBDL)
|
|
||||||
# We don't want any plugin loading at runtime. It is harmful.
|
|
||||||
# check_library_exists("${CURL_LIBS}" dlopen "" HAVE_DLOPEN)
|
|
||||||
|
|
||||||
# From /cmake/find/zlib.cmake
|
|
||||||
if (ZLIB_FOUND)
|
|
||||||
set(HAVE_ZLIB_H ON)
|
|
||||||
set(HAVE_LIBZ ON)
|
|
||||||
set(USE_ZLIB ON)
|
|
||||||
|
|
||||||
list(APPEND CURL_LIBS ${ZLIB_LIBRARIES})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
option(ENABLE_UNIX_SOCKETS "Define if you want Unix domain sockets support" OFF)
|
|
||||||
if(ENABLE_UNIX_SOCKETS)
|
|
||||||
include(CheckStructHasMember)
|
|
||||||
check_struct_has_member("struct sockaddr_un" sun_path "sys/un.h" USE_UNIX_SOCKETS)
|
|
||||||
else()
|
|
||||||
unset(USE_UNIX_SOCKETS CACHE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# CA handling
|
|
||||||
# Explicitly set to most common case
|
|
||||||
if (OPENSSL_FOUND)
|
|
||||||
set(CURL_CA_BUNDLE "/etc/ssl/certs/ca-certificates.crt")
|
|
||||||
set(CURL_CA_BUNDLE_SET TRUE CACHE BOOL "Path to the CA bundle has been set")
|
|
||||||
set(CURL_CA_PATH "/etc/ssl/certs")
|
|
||||||
set(CURL_CA_PATH_SET TRUE CACHE BOOL "Path to the CA bundle has been set")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
check_include_file_concat("stdio.h" HAVE_STDIO_H)
|
|
||||||
check_include_file_concat("inttypes.h" HAVE_INTTYPES_H)
|
|
||||||
check_include_file_concat("sys/filio.h" HAVE_SYS_FILIO_H)
|
|
||||||
check_include_file_concat("sys/ioctl.h" HAVE_SYS_IOCTL_H)
|
|
||||||
check_include_file_concat("sys/param.h" HAVE_SYS_PARAM_H)
|
|
||||||
check_include_file_concat("sys/poll.h" HAVE_SYS_POLL_H)
|
|
||||||
check_include_file_concat("sys/resource.h" HAVE_SYS_RESOURCE_H)
|
|
||||||
check_include_file_concat("sys/select.h" HAVE_SYS_SELECT_H)
|
|
||||||
check_include_file_concat("sys/socket.h" HAVE_SYS_SOCKET_H)
|
|
||||||
check_include_file_concat("sys/sockio.h" HAVE_SYS_SOCKIO_H)
|
|
||||||
check_include_file_concat("sys/stat.h" HAVE_SYS_STAT_H)
|
|
||||||
check_include_file_concat("sys/time.h" HAVE_SYS_TIME_H)
|
|
||||||
check_include_file_concat("sys/types.h" HAVE_SYS_TYPES_H)
|
|
||||||
check_include_file_concat("sys/uio.h" HAVE_SYS_UIO_H)
|
|
||||||
check_include_file_concat("sys/un.h" HAVE_SYS_UN_H)
|
|
||||||
check_include_file_concat("sys/utime.h" HAVE_SYS_UTIME_H)
|
|
||||||
check_include_file_concat("sys/xattr.h" HAVE_SYS_XATTR_H)
|
|
||||||
check_include_file_concat("alloca.h" HAVE_ALLOCA_H)
|
|
||||||
check_include_file_concat("arpa/inet.h" HAVE_ARPA_INET_H)
|
|
||||||
#check_include_file_concat("arpa/tftp.h" HAVE_ARPA_TFTP_H)
|
|
||||||
check_include_file_concat("assert.h" HAVE_ASSERT_H)
|
|
||||||
check_include_file_concat("crypto.h" HAVE_CRYPTO_H)
|
|
||||||
check_include_file_concat("des.h" HAVE_DES_H)
|
|
||||||
check_include_file_concat("err.h" HAVE_ERR_H)
|
|
||||||
check_include_file_concat("errno.h" HAVE_ERRNO_H)
|
|
||||||
check_include_file_concat("fcntl.h" HAVE_FCNTL_H)
|
|
||||||
#check_include_file_concat("idn2.h" HAVE_IDN2_H)
|
|
||||||
check_include_file_concat("ifaddrs.h" HAVE_IFADDRS_H)
|
|
||||||
check_include_file_concat("io.h" HAVE_IO_H)
|
|
||||||
check_include_file_concat("krb.h" HAVE_KRB_H)
|
|
||||||
check_include_file_concat("libgen.h" HAVE_LIBGEN_H)
|
|
||||||
check_include_file_concat("locale.h" HAVE_LOCALE_H)
|
|
||||||
check_include_file_concat("net/if.h" HAVE_NET_IF_H)
|
|
||||||
check_include_file_concat("netdb.h" HAVE_NETDB_H)
|
|
||||||
check_include_file_concat("netinet/in.h" HAVE_NETINET_IN_H)
|
|
||||||
check_include_file_concat("netinet/tcp.h" HAVE_NETINET_TCP_H)
|
|
||||||
|
|
||||||
check_include_file_concat("pem.h" HAVE_PEM_H)
|
|
||||||
check_include_file_concat("poll.h" HAVE_POLL_H)
|
|
||||||
check_include_file_concat("pwd.h" HAVE_PWD_H)
|
|
||||||
check_include_file_concat("rsa.h" HAVE_RSA_H)
|
|
||||||
check_include_file_concat("setjmp.h" HAVE_SETJMP_H)
|
|
||||||
check_include_file_concat("sgtty.h" HAVE_SGTTY_H)
|
|
||||||
check_include_file_concat("signal.h" HAVE_SIGNAL_H)
|
|
||||||
check_include_file_concat("ssl.h" HAVE_SSL_H)
|
|
||||||
check_include_file_concat("stdbool.h" HAVE_STDBOOL_H)
|
|
||||||
check_include_file_concat("stdint.h" HAVE_STDINT_H)
|
|
||||||
check_include_file_concat("stdio.h" HAVE_STDIO_H)
|
|
||||||
check_include_file_concat("stdlib.h" HAVE_STDLIB_H)
|
|
||||||
check_include_file_concat("string.h" HAVE_STRING_H)
|
|
||||||
check_include_file_concat("strings.h" HAVE_STRINGS_H)
|
|
||||||
check_include_file_concat("stropts.h" HAVE_STROPTS_H)
|
|
||||||
check_include_file_concat("termio.h" HAVE_TERMIO_H)
|
|
||||||
check_include_file_concat("termios.h" HAVE_TERMIOS_H)
|
|
||||||
check_include_file_concat("time.h" HAVE_TIME_H)
|
|
||||||
check_include_file_concat("unistd.h" HAVE_UNISTD_H)
|
|
||||||
check_include_file_concat("utime.h" HAVE_UTIME_H)
|
|
||||||
check_include_file_concat("x509.h" HAVE_X509_H)
|
|
||||||
|
|
||||||
check_include_file_concat("process.h" HAVE_PROCESS_H)
|
|
||||||
check_include_file_concat("stddef.h" HAVE_STDDEF_H)
|
|
||||||
#check_include_file_concat("dlfcn.h" HAVE_DLFCN_H)
|
|
||||||
check_include_file_concat("malloc.h" HAVE_MALLOC_H)
|
|
||||||
check_include_file_concat("memory.h" HAVE_MEMORY_H)
|
|
||||||
check_include_file_concat("netinet/if_ether.h" HAVE_NETINET_IF_ETHER_H)
|
|
||||||
check_include_file_concat("stdint.h" HAVE_STDINT_H)
|
|
||||||
check_include_file_concat("sockio.h" HAVE_SOCKIO_H)
|
|
||||||
check_include_file_concat("sys/utsname.h" HAVE_SYS_UTSNAME_H)
|
|
||||||
|
|
||||||
check_type_size(size_t SIZEOF_SIZE_T)
|
|
||||||
check_type_size(ssize_t SIZEOF_SSIZE_T)
|
|
||||||
check_type_size("long long" SIZEOF_LONG_LONG)
|
|
||||||
check_type_size("long" SIZEOF_LONG)
|
|
||||||
check_type_size("short" SIZEOF_SHORT)
|
|
||||||
check_type_size("int" SIZEOF_INT)
|
|
||||||
check_type_size("__int64" SIZEOF___INT64)
|
|
||||||
check_type_size("long double" SIZEOF_LONG_DOUBLE)
|
|
||||||
check_type_size("time_t" SIZEOF_TIME_T)
|
|
||||||
|
|
||||||
set(HAVE_LONGLONG 1)
|
|
||||||
set(HAVE_LL 1)
|
|
||||||
|
|
||||||
set(RANDOM_FILE /dev/urandom)
|
|
||||||
|
|
||||||
check_symbol_exists(basename "${CURL_INCLUDES}" HAVE_BASENAME)
|
|
||||||
check_symbol_exists(socket "${CURL_INCLUDES}" HAVE_SOCKET)
|
|
||||||
check_symbol_exists(select "${CURL_INCLUDES}" HAVE_SELECT)
|
|
||||||
check_symbol_exists(poll "${CURL_INCLUDES}" HAVE_POLL)
|
|
||||||
check_symbol_exists(strdup "${CURL_INCLUDES}" HAVE_STRDUP)
|
|
||||||
check_symbol_exists(strstr "${CURL_INCLUDES}" HAVE_STRSTR)
|
|
||||||
check_symbol_exists(strtok_r "${CURL_INCLUDES}" HAVE_STRTOK_R)
|
|
||||||
check_symbol_exists(strftime "${CURL_INCLUDES}" HAVE_STRFTIME)
|
|
||||||
check_symbol_exists(uname "${CURL_INCLUDES}" HAVE_UNAME)
|
|
||||||
check_symbol_exists(strcasecmp "${CURL_INCLUDES}" HAVE_STRCASECMP)
|
|
||||||
#check_symbol_exists(stricmp "${CURL_INCLUDES}" HAVE_STRICMP)
|
|
||||||
#check_symbol_exists(strcmpi "${CURL_INCLUDES}" HAVE_STRCMPI)
|
|
||||||
#check_symbol_exists(strncmpi "${CURL_INCLUDES}" HAVE_STRNCMPI)
|
|
||||||
check_symbol_exists(alarm "${CURL_INCLUDES}" HAVE_ALARM)
|
|
||||||
#check_symbol_exists(gethostbyaddr "${CURL_INCLUDES}" HAVE_GETHOSTBYADDR)
|
|
||||||
check_symbol_exists(gethostbyaddr_r "${CURL_INCLUDES}" HAVE_GETHOSTBYADDR_R)
|
|
||||||
check_symbol_exists(gettimeofday "${CURL_INCLUDES}" HAVE_GETTIMEOFDAY)
|
|
||||||
check_symbol_exists(inet_addr "${CURL_INCLUDES}" HAVE_INET_ADDR)
|
|
||||||
#check_symbol_exists(inet_ntoa "${CURL_INCLUDES}" HAVE_INET_NTOA)
|
|
||||||
check_symbol_exists(inet_ntoa_r "${CURL_INCLUDES}" HAVE_INET_NTOA_R)
|
|
||||||
check_symbol_exists(tcsetattr "${CURL_INCLUDES}" HAVE_TCSETATTR)
|
|
||||||
check_symbol_exists(tcgetattr "${CURL_INCLUDES}" HAVE_TCGETATTR)
|
|
||||||
check_symbol_exists(perror "${CURL_INCLUDES}" HAVE_PERROR)
|
|
||||||
check_symbol_exists(closesocket "${CURL_INCLUDES}" HAVE_CLOSESOCKET)
|
|
||||||
check_symbol_exists(setvbuf "${CURL_INCLUDES}" HAVE_SETVBUF)
|
|
||||||
check_symbol_exists(sigsetjmp "${CURL_INCLUDES}" HAVE_SIGSETJMP)
|
|
||||||
check_symbol_exists(getpass_r "${CURL_INCLUDES}" HAVE_GETPASS_R)
|
|
||||||
#check_symbol_exists(strlcat "${CURL_INCLUDES}" HAVE_STRLCAT)
|
|
||||||
#check_symbol_exists(getpwuid "${CURL_INCLUDES}" HAVE_GETPWUID)
|
|
||||||
check_symbol_exists(getpwuid_r "${CURL_INCLUDES}" HAVE_GETPWUID_R)
|
|
||||||
check_symbol_exists(geteuid "${CURL_INCLUDES}" HAVE_GETEUID)
|
|
||||||
check_symbol_exists(usleep "${CURL_INCLUDES}" HAVE_USLEEP)
|
|
||||||
check_symbol_exists(utime "${CURL_INCLUDES}" HAVE_UTIME)
|
|
||||||
check_symbol_exists(gmtime_r "${CURL_INCLUDES}" HAVE_GMTIME_R)
|
|
||||||
check_symbol_exists(localtime_r "${CURL_INCLUDES}" HAVE_LOCALTIME_R)
|
|
||||||
|
|
||||||
#check_symbol_exists(gethostbyname "${CURL_INCLUDES}" HAVE_GETHOSTBYNAME)
|
|
||||||
check_symbol_exists(gethostbyname_r "${CURL_INCLUDES}" HAVE_GETHOSTBYNAME_R)
|
|
||||||
|
|
||||||
check_symbol_exists(signal "${CURL_INCLUDES}" HAVE_SIGNAL_FUNC)
|
|
||||||
check_symbol_exists(SIGALRM "${CURL_INCLUDES}" HAVE_SIGNAL_MACRO)
|
|
||||||
set(HAVE_SIGNAL 1)
|
|
||||||
check_symbol_exists(uname "${CURL_INCLUDES}" HAVE_UNAME)
|
|
||||||
check_symbol_exists(strtoll "${CURL_INCLUDES}" HAVE_STRTOLL)
|
|
||||||
#check_symbol_exists(_strtoi64 "${CURL_INCLUDES}" HAVE__STRTOI64)
|
|
||||||
check_symbol_exists(strerror_r "${CURL_INCLUDES}" HAVE_STRERROR_R)
|
|
||||||
check_symbol_exists(siginterrupt "${CURL_INCLUDES}" HAVE_SIGINTERRUPT)
|
|
||||||
check_symbol_exists(perror "${CURL_INCLUDES}" HAVE_PERROR)
|
|
||||||
check_symbol_exists(fork "${CURL_INCLUDES}" HAVE_FORK)
|
|
||||||
check_symbol_exists(getaddrinfo "${CURL_INCLUDES}" HAVE_GETADDRINFO)
|
|
||||||
check_symbol_exists(freeaddrinfo "${CURL_INCLUDES}" HAVE_FREEADDRINFO)
|
|
||||||
check_symbol_exists(freeifaddrs "${CURL_INCLUDES}" HAVE_FREEIFADDRS)
|
|
||||||
check_symbol_exists(pipe "${CURL_INCLUDES}" HAVE_PIPE)
|
|
||||||
check_symbol_exists(ftruncate "${CURL_INCLUDES}" HAVE_FTRUNCATE)
|
|
||||||
check_symbol_exists(getprotobyname "${CURL_INCLUDES}" HAVE_GETPROTOBYNAME)
|
|
||||||
check_symbol_exists(getpeername "${CURL_INCLUDES}" HAVE_GETPEERNAME)
|
|
||||||
check_symbol_exists(getsockname "${CURL_INCLUDES}" HAVE_GETSOCKNAME)
|
|
||||||
check_symbol_exists(if_nametoindex "${CURL_INCLUDES}" HAVE_IF_NAMETOINDEX)
|
|
||||||
check_symbol_exists(getrlimit "${CURL_INCLUDES}" HAVE_GETRLIMIT)
|
|
||||||
check_symbol_exists(setlocale "${CURL_INCLUDES}" HAVE_SETLOCALE)
|
|
||||||
check_symbol_exists(setmode "${CURL_INCLUDES}" HAVE_SETMODE)
|
|
||||||
check_symbol_exists(setrlimit "${CURL_INCLUDES}" HAVE_SETRLIMIT)
|
|
||||||
check_symbol_exists(fcntl "${CURL_INCLUDES}" HAVE_FCNTL)
|
|
||||||
check_symbol_exists(ioctl "${CURL_INCLUDES}" HAVE_IOCTL)
|
|
||||||
check_symbol_exists(setsockopt "${CURL_INCLUDES}" HAVE_SETSOCKOPT)
|
|
||||||
check_function_exists(mach_absolute_time HAVE_MACH_ABSOLUTE_TIME)
|
|
||||||
|
|
||||||
check_symbol_exists(fsetxattr "${CURL_INCLUDES}" HAVE_FSETXATTR)
|
|
||||||
if(HAVE_FSETXATTR)
|
|
||||||
foreach(CURL_TEST HAVE_FSETXATTR_5 HAVE_FSETXATTR_6)
|
|
||||||
curl_internal_test(${CURL_TEST})
|
|
||||||
endforeach()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# sigaction and sigsetjmp are special. Use special mechanism for
|
|
||||||
# detecting those, but only if previous attempt failed.
|
|
||||||
if(HAVE_SIGNAL_H)
|
|
||||||
check_symbol_exists(sigaction "signal.h" HAVE_SIGACTION)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(NOT HAVE_SIGSETJMP)
|
|
||||||
if(HAVE_SETJMP_H)
|
|
||||||
check_symbol_exists(sigsetjmp "setjmp.h" HAVE_MACRO_SIGSETJMP)
|
|
||||||
if(HAVE_MACRO_SIGSETJMP)
|
|
||||||
set(HAVE_SIGSETJMP 1)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# If there is no stricmp(), do not allow LDAP to parse URLs
|
|
||||||
if(NOT HAVE_STRICMP)
|
|
||||||
set(HAVE_LDAP_URL_PARSE 1)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Do curl specific tests
|
|
||||||
foreach(CURL_TEST
|
|
||||||
HAVE_FCNTL_O_NONBLOCK
|
|
||||||
HAVE_IOCTLSOCKET
|
|
||||||
HAVE_IOCTLSOCKET_CAMEL
|
|
||||||
HAVE_IOCTLSOCKET_CAMEL_FIONBIO
|
|
||||||
HAVE_IOCTLSOCKET_FIONBIO
|
|
||||||
HAVE_IOCTL_FIONBIO
|
|
||||||
HAVE_IOCTL_SIOCGIFADDR
|
|
||||||
HAVE_SETSOCKOPT_SO_NONBLOCK
|
|
||||||
HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID
|
|
||||||
TIME_WITH_SYS_TIME
|
|
||||||
HAVE_O_NONBLOCK
|
|
||||||
HAVE_GETHOSTBYADDR_R_5
|
|
||||||
HAVE_GETHOSTBYADDR_R_7
|
|
||||||
HAVE_GETHOSTBYADDR_R_8
|
|
||||||
HAVE_GETHOSTBYADDR_R_5_REENTRANT
|
|
||||||
HAVE_GETHOSTBYADDR_R_7_REENTRANT
|
|
||||||
HAVE_GETHOSTBYADDR_R_8_REENTRANT
|
|
||||||
HAVE_GETHOSTBYNAME_R_3
|
|
||||||
HAVE_GETHOSTBYNAME_R_5
|
|
||||||
HAVE_GETHOSTBYNAME_R_6
|
|
||||||
HAVE_GETHOSTBYNAME_R_3_REENTRANT
|
|
||||||
HAVE_GETHOSTBYNAME_R_5_REENTRANT
|
|
||||||
HAVE_GETHOSTBYNAME_R_6_REENTRANT
|
|
||||||
HAVE_IN_ADDR_T
|
|
||||||
HAVE_BOOL_T
|
|
||||||
STDC_HEADERS
|
|
||||||
RETSIGTYPE_TEST
|
|
||||||
HAVE_INET_NTOA_R_DECL
|
|
||||||
HAVE_INET_NTOA_R_DECL_REENTRANT
|
|
||||||
HAVE_GETADDRINFO
|
|
||||||
HAVE_FILE_OFFSET_BITS
|
|
||||||
HAVE_VARIADIC_MACROS_C99
|
|
||||||
HAVE_VARIADIC_MACROS_GCC
|
|
||||||
)
|
|
||||||
curl_internal_test(${CURL_TEST})
|
|
||||||
endforeach()
|
|
||||||
|
|
||||||
if(HAVE_FILE_OFFSET_BITS)
|
|
||||||
set(_FILE_OFFSET_BITS 64)
|
|
||||||
set(CMAKE_REQUIRED_FLAGS "-D_FILE_OFFSET_BITS=64")
|
|
||||||
endif()
|
|
||||||
check_type_size("off_t" SIZEOF_OFF_T)
|
|
||||||
|
|
||||||
# include this header to get the type
|
|
||||||
set(CMAKE_REQUIRED_INCLUDES "${CURL_SOURCE_DIR}/include")
|
|
||||||
set(CMAKE_EXTRA_INCLUDE_FILES "curl/system.h")
|
|
||||||
check_type_size("curl_off_t" SIZEOF_CURL_OFF_T)
|
|
||||||
set(CMAKE_EXTRA_INCLUDE_FILES "")
|
|
||||||
|
|
||||||
foreach(CURL_TEST
|
|
||||||
HAVE_GLIBC_STRERROR_R
|
|
||||||
HAVE_POSIX_STRERROR_R
|
|
||||||
)
|
|
||||||
curl_internal_test(${CURL_TEST})
|
|
||||||
endforeach()
|
|
||||||
|
|
||||||
# Check for reentrant
|
|
||||||
foreach(CURL_TEST
|
|
||||||
HAVE_GETHOSTBYADDR_R_5
|
|
||||||
HAVE_GETHOSTBYADDR_R_7
|
|
||||||
HAVE_GETHOSTBYADDR_R_8
|
|
||||||
HAVE_GETHOSTBYNAME_R_3
|
|
||||||
HAVE_GETHOSTBYNAME_R_5
|
|
||||||
HAVE_GETHOSTBYNAME_R_6
|
|
||||||
HAVE_INET_NTOA_R_DECL_REENTRANT)
|
|
||||||
if(NOT ${CURL_TEST})
|
|
||||||
if(${CURL_TEST}_REENTRANT)
|
|
||||||
set(NEED_REENTRANT 1)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
||||||
|
|
||||||
if(NEED_REENTRANT)
|
|
||||||
foreach(CURL_TEST
|
|
||||||
HAVE_GETHOSTBYADDR_R_5
|
|
||||||
HAVE_GETHOSTBYADDR_R_7
|
|
||||||
HAVE_GETHOSTBYADDR_R_8
|
|
||||||
HAVE_GETHOSTBYNAME_R_3
|
|
||||||
HAVE_GETHOSTBYNAME_R_5
|
|
||||||
HAVE_GETHOSTBYNAME_R_6)
|
|
||||||
set(${CURL_TEST} 0)
|
|
||||||
if(${CURL_TEST}_REENTRANT)
|
|
||||||
set(${CURL_TEST} 1)
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(HAVE_INET_NTOA_R_DECL_REENTRANT)
|
|
||||||
set(HAVE_INET_NTOA_R_DECL 1)
|
|
||||||
set(NEED_REENTRANT 1)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Check clock_gettime(CLOCK_MONOTONIC, x) support
|
|
||||||
curl_internal_test(HAVE_CLOCK_GETTIME_MONOTONIC)
|
|
||||||
|
|
||||||
# Check compiler support of __builtin_available()
|
|
||||||
curl_internal_test(HAVE_BUILTIN_AVAILABLE)
|
|
||||||
|
|
||||||
# Some other minor tests
|
|
||||||
|
|
||||||
if(NOT HAVE_IN_ADDR_T)
|
|
||||||
set(in_addr_t "unsigned long")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Check for nonblocking
|
|
||||||
set(HAVE_DISABLED_NONBLOCKING 1)
|
|
||||||
if(HAVE_FIONBIO OR
|
|
||||||
HAVE_IOCTLSOCKET OR
|
|
||||||
HAVE_IOCTLSOCKET_CASE OR
|
|
||||||
HAVE_O_NONBLOCK)
|
|
||||||
set(HAVE_DISABLED_NONBLOCKING)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(CURL_PULL_SYS_TYPES_H ${HAVE_SYS_TYPES_H})
|
|
||||||
set(CURL_PULL_SYS_SOCKET_H ${HAVE_SYS_SOCKET_H})
|
|
||||||
set(CURL_PULL_SYS_POLL_H ${HAVE_SYS_POLL_H})
|
|
||||||
set(CURL_PULL_STDINT_H ${HAVE_STDINT_H})
|
|
||||||
set(CURL_PULL_INTTYPES_H ${HAVE_INTTYPES_H})
|
|
||||||
|
|
||||||
include(CMake/OtherTests.cmake)
|
|
||||||
|
|
||||||
SET(LIB_VAUTH_CFILES
|
|
||||||
"${CURL_LIBRARY_DIR}/vauth/vauth.c" "${CURL_LIBRARY_DIR}/vauth/cleartext.c" "${CURL_LIBRARY_DIR}/vauth/cram.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/vauth/digest.c" "${CURL_LIBRARY_DIR}/vauth/digest_sspi.c" "${CURL_LIBRARY_DIR}/vauth/krb5_gssapi.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/vauth/krb5_sspi.c" "${CURL_LIBRARY_DIR}/vauth/ntlm.c" "${CURL_LIBRARY_DIR}/vauth/ntlm_sspi.c" "${CURL_LIBRARY_DIR}/vauth/oauth2.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/vauth/spnego_gssapi.c" "${CURL_LIBRARY_DIR}/vauth/spnego_sspi.c")
|
|
||||||
|
|
||||||
SET(LIB_VAUTH_HFILES "${CURL_LIBRARY_DIR}/vauth/vauth.h" "${CURL_LIBRARY_DIR}/vauth/digest.h" "${CURL_LIBRARY_DIR}/vauth/ntlm.h")
|
|
||||||
|
|
||||||
SET(LIB_VTLS_CFILES "${CURL_LIBRARY_DIR}/vtls/openssl.c" "${CURL_LIBRARY_DIR}/vtls/gtls.c" "${CURL_LIBRARY_DIR}/vtls/vtls.c" "${CURL_LIBRARY_DIR}/vtls/nss.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/vtls/polarssl.c" "${CURL_LIBRARY_DIR}/vtls/polarssl_threadlock.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/vtls/wolfssl.c" "${CURL_LIBRARY_DIR}/vtls/schannel.c" "${CURL_LIBRARY_DIR}/vtls/schannel_verify.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/vtls/sectransp.c" "${CURL_LIBRARY_DIR}/vtls/gskit.c" "${CURL_LIBRARY_DIR}/vtls/mbedtls.c" "${CURL_LIBRARY_DIR}/vtls/mesalink.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/vtls/bearssl.c")
|
|
||||||
|
|
||||||
SET(LIB_VTLS_HFILES "${CURL_LIBRARY_DIR}/vtls/openssl.h" "${CURL_LIBRARY_DIR}/vtls/vtls.h" "${CURL_LIBRARY_DIR}/vtls/gtls.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/vtls/nssg.h" "${CURL_LIBRARY_DIR}/vtls/polarssl.h" "${CURL_LIBRARY_DIR}/vtls/polarssl_threadlock.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/vtls/wolfssl.h" "${CURL_LIBRARY_DIR}/vtls/schannel.h" "${CURL_LIBRARY_DIR}/vtls/sectransp.h" "${CURL_LIBRARY_DIR}/vtls/gskit.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/vtls/mbedtls.h" "${CURL_LIBRARY_DIR}/vtls/mesalink.h" "${CURL_LIBRARY_DIR}/vtls/bearssl.h")
|
|
||||||
|
|
||||||
SET(LIB_VQUIC_CFILES "${CURL_LIBRARY_DIR}/vquic/ngtcp2.c" "${CURL_LIBRARY_DIR}/vquic/quiche.c")
|
|
||||||
|
|
||||||
SET(LIB_VQUIC_HFILES "${CURL_LIBRARY_DIR}/vquic/ngtcp2.h" "${CURL_LIBRARY_DIR}/vquic/quiche.h")
|
|
||||||
|
|
||||||
SET(LIB_VSSH_CFILES "${CURL_LIBRARY_DIR}/vssh/libssh2.c" "${CURL_LIBRARY_DIR}/vssh/libssh.c")
|
|
||||||
|
|
||||||
SET(LIB_VSSH_HFILES "${CURL_LIBRARY_DIR}/vssh/ssh.h")
|
|
||||||
|
|
||||||
SET(LIB_CFILES "${CURL_LIBRARY_DIR}/file.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/timeval.c" "${CURL_LIBRARY_DIR}/base64.c" "${CURL_LIBRARY_DIR}/hostip.c" "${CURL_LIBRARY_DIR}/progress.c" "${CURL_LIBRARY_DIR}/formdata.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/cookie.c" "${CURL_LIBRARY_DIR}/http.c" "${CURL_LIBRARY_DIR}/sendf.c" "${CURL_LIBRARY_DIR}/url.c" "${CURL_LIBRARY_DIR}/dict.c" "${CURL_LIBRARY_DIR}/if2ip.c" "${CURL_LIBRARY_DIR}/speedcheck.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/ldap.c" "${CURL_LIBRARY_DIR}/version.c" "${CURL_LIBRARY_DIR}/getenv.c" "${CURL_LIBRARY_DIR}/escape.c" "${CURL_LIBRARY_DIR}/mprintf.c" "${CURL_LIBRARY_DIR}/telnet.c" "${CURL_LIBRARY_DIR}/netrc.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/getinfo.c" "${CURL_LIBRARY_DIR}/transfer.c" "${CURL_LIBRARY_DIR}/strcase.c" "${CURL_LIBRARY_DIR}/easy.c" "${CURL_LIBRARY_DIR}/security.c" "${CURL_LIBRARY_DIR}/curl_fnmatch.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/fileinfo.c" "${CURL_LIBRARY_DIR}/wildcard.c" "${CURL_LIBRARY_DIR}/krb5.c" "${CURL_LIBRARY_DIR}/memdebug.c" "${CURL_LIBRARY_DIR}/http_chunks.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/strtok.c" "${CURL_LIBRARY_DIR}/connect.c" "${CURL_LIBRARY_DIR}/llist.c" "${CURL_LIBRARY_DIR}/hash.c" "${CURL_LIBRARY_DIR}/multi.c" "${CURL_LIBRARY_DIR}/content_encoding.c" "${CURL_LIBRARY_DIR}/share.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/http_digest.c" "${CURL_LIBRARY_DIR}/md4.c" "${CURL_LIBRARY_DIR}/md5.c" "${CURL_LIBRARY_DIR}/http_negotiate.c" "${CURL_LIBRARY_DIR}/inet_pton.c" "${CURL_LIBRARY_DIR}/strtoofft.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/strerror.c" "${CURL_LIBRARY_DIR}/amigaos.c" "${CURL_LIBRARY_DIR}/hostasyn.c" "${CURL_LIBRARY_DIR}/hostip4.c" "${CURL_LIBRARY_DIR}/hostip6.c" "${CURL_LIBRARY_DIR}/hostsyn.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/inet_ntop.c" "${CURL_LIBRARY_DIR}/parsedate.c" "${CURL_LIBRARY_DIR}/select.c" "${CURL_LIBRARY_DIR}/splay.c" "${CURL_LIBRARY_DIR}/strdup.c" "${CURL_LIBRARY_DIR}/socks.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/curl_addrinfo.c" "${CURL_LIBRARY_DIR}/socks_gssapi.c" "${CURL_LIBRARY_DIR}/socks_sspi.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/curl_sspi.c" "${CURL_LIBRARY_DIR}/slist.c" "${CURL_LIBRARY_DIR}/nonblock.c" "${CURL_LIBRARY_DIR}/curl_memrchr.c" "${CURL_LIBRARY_DIR}/imap.c" "${CURL_LIBRARY_DIR}/pop3.c" "${CURL_LIBRARY_DIR}/smtp.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/pingpong.c" "${CURL_LIBRARY_DIR}/rtsp.c" "${CURL_LIBRARY_DIR}/curl_threads.c" "${CURL_LIBRARY_DIR}/warnless.c" "${CURL_LIBRARY_DIR}/hmac.c" "${CURL_LIBRARY_DIR}/curl_rtmp.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/openldap.c" "${CURL_LIBRARY_DIR}/curl_gethostname.c" "${CURL_LIBRARY_DIR}/gopher.c" "${CURL_LIBRARY_DIR}/idn_win32.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/http_proxy.c" "${CURL_LIBRARY_DIR}/non-ascii.c" "${CURL_LIBRARY_DIR}/asyn-ares.c" "${CURL_LIBRARY_DIR}/asyn-thread.c" "${CURL_LIBRARY_DIR}/curl_gssapi.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/http_ntlm.c" "${CURL_LIBRARY_DIR}/curl_ntlm_wb.c" "${CURL_LIBRARY_DIR}/curl_ntlm_core.c" "${CURL_LIBRARY_DIR}/curl_sasl.c" "${CURL_LIBRARY_DIR}/rand.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/curl_multibyte.c" "${CURL_LIBRARY_DIR}/hostcheck.c" "${CURL_LIBRARY_DIR}/conncache.c" "${CURL_LIBRARY_DIR}/dotdot.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/x509asn1.c" "${CURL_LIBRARY_DIR}/http2.c" "${CURL_LIBRARY_DIR}/smb.c" "${CURL_LIBRARY_DIR}/curl_endian.c" "${CURL_LIBRARY_DIR}/curl_des.c" "${CURL_LIBRARY_DIR}/system_win32.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/mime.c" "${CURL_LIBRARY_DIR}/sha256.c" "${CURL_LIBRARY_DIR}/setopt.c" "${CURL_LIBRARY_DIR}/curl_path.c" "${CURL_LIBRARY_DIR}/curl_ctype.c" "${CURL_LIBRARY_DIR}/curl_range.c" "${CURL_LIBRARY_DIR}/psl.c"
|
|
||||||
"${CURL_LIBRARY_DIR}/doh.c" "${CURL_LIBRARY_DIR}/urlapi.c" "${CURL_LIBRARY_DIR}/curl_get_line.c" "${CURL_LIBRARY_DIR}/altsvc.c" "${CURL_LIBRARY_DIR}/socketpair.c")
|
|
||||||
|
|
||||||
SET(LIB_HFILES "${CURL_LIBRARY_DIR}/arpa_telnet.h" "${CURL_LIBRARY_DIR}/netrc.h" "${CURL_LIBRARY_DIR}/file.h" "${CURL_LIBRARY_DIR}/timeval.h" "${CURL_LIBRARY_DIR}/hostip.h" "${CURL_LIBRARY_DIR}/progress.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/formdata.h" "${CURL_LIBRARY_DIR}/cookie.h" "${CURL_LIBRARY_DIR}/http.h" "${CURL_LIBRARY_DIR}/sendf.h" "${CURL_LIBRARY_DIR}/url.h" "${CURL_LIBRARY_DIR}/dict.h" "${CURL_LIBRARY_DIR}/if2ip.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/speedcheck.h" "${CURL_LIBRARY_DIR}/urldata.h" "${CURL_LIBRARY_DIR}/curl_ldap.h" "${CURL_LIBRARY_DIR}/escape.h" "${CURL_LIBRARY_DIR}/telnet.h" "${CURL_LIBRARY_DIR}/getinfo.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/strcase.h" "${CURL_LIBRARY_DIR}/curl_sec.h" "${CURL_LIBRARY_DIR}/memdebug.h" "${CURL_LIBRARY_DIR}/http_chunks.h" "${CURL_LIBRARY_DIR}/curl_fnmatch.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/wildcard.h" "${CURL_LIBRARY_DIR}/fileinfo.h" "${CURL_LIBRARY_DIR}/strtok.h" "${CURL_LIBRARY_DIR}/connect.h" "${CURL_LIBRARY_DIR}/llist.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/hash.h" "${CURL_LIBRARY_DIR}/content_encoding.h" "${CURL_LIBRARY_DIR}/share.h" "${CURL_LIBRARY_DIR}/curl_md4.h" "${CURL_LIBRARY_DIR}/curl_md5.h" "${CURL_LIBRARY_DIR}/http_digest.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/http_negotiate.h" "${CURL_LIBRARY_DIR}/inet_pton.h" "${CURL_LIBRARY_DIR}/amigaos.h" "${CURL_LIBRARY_DIR}/strtoofft.h" "${CURL_LIBRARY_DIR}/strerror.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/inet_ntop.h" "${CURL_LIBRARY_DIR}/curlx.h" "${CURL_LIBRARY_DIR}/curl_memory.h" "${CURL_LIBRARY_DIR}/curl_setup.h" "${CURL_LIBRARY_DIR}/transfer.h" "${CURL_LIBRARY_DIR}/select.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/easyif.h" "${CURL_LIBRARY_DIR}/multiif.h" "${CURL_LIBRARY_DIR}/parsedate.h" "${CURL_LIBRARY_DIR}/sockaddr.h" "${CURL_LIBRARY_DIR}/splay.h" "${CURL_LIBRARY_DIR}/strdup.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/socks.h" "${CURL_LIBRARY_DIR}/curl_base64.h" "${CURL_LIBRARY_DIR}/curl_addrinfo.h" "${CURL_LIBRARY_DIR}/curl_sspi.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/slist.h" "${CURL_LIBRARY_DIR}/nonblock.h" "${CURL_LIBRARY_DIR}/curl_memrchr.h" "${CURL_LIBRARY_DIR}/imap.h" "${CURL_LIBRARY_DIR}/pop3.h" "${CURL_LIBRARY_DIR}/smtp.h" "${CURL_LIBRARY_DIR}/pingpong.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/rtsp.h" "${CURL_LIBRARY_DIR}/curl_threads.h" "${CURL_LIBRARY_DIR}/warnless.h" "${CURL_LIBRARY_DIR}/curl_hmac.h" "${CURL_LIBRARY_DIR}/curl_rtmp.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/curl_gethostname.h" "${CURL_LIBRARY_DIR}/gopher.h" "${CURL_LIBRARY_DIR}/http_proxy.h" "${CURL_LIBRARY_DIR}/non-ascii.h" "${CURL_LIBRARY_DIR}/asyn.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/http_ntlm.h" "${CURL_LIBRARY_DIR}/curl_gssapi.h" "${CURL_LIBRARY_DIR}/curl_ntlm_wb.h" "${CURL_LIBRARY_DIR}/curl_ntlm_core.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/curl_sasl.h" "${CURL_LIBRARY_DIR}/curl_multibyte.h" "${CURL_LIBRARY_DIR}/hostcheck.h" "${CURL_LIBRARY_DIR}/conncache.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/multihandle.h" "${CURL_LIBRARY_DIR}/setup-vms.h" "${CURL_LIBRARY_DIR}/dotdot.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/x509asn1.h" "${CURL_LIBRARY_DIR}/http2.h" "${CURL_LIBRARY_DIR}/sigpipe.h" "${CURL_LIBRARY_DIR}/smb.h" "${CURL_LIBRARY_DIR}/curl_endian.h" "${CURL_LIBRARY_DIR}/curl_des.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/curl_printf.h" "${CURL_LIBRARY_DIR}/system_win32.h" "${CURL_LIBRARY_DIR}/rand.h" "${CURL_LIBRARY_DIR}/mime.h" "${CURL_LIBRARY_DIR}/curl_sha256.h" "${CURL_LIBRARY_DIR}/setopt.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/curl_path.h" "${CURL_LIBRARY_DIR}/curl_ctype.h" "${CURL_LIBRARY_DIR}/curl_range.h" "${CURL_LIBRARY_DIR}/psl.h" "${CURL_LIBRARY_DIR}/doh.h" "${CURL_LIBRARY_DIR}/urlapi-int.h"
|
|
||||||
"${CURL_LIBRARY_DIR}/curl_get_line.h" "${CURL_LIBRARY_DIR}/altsvc.h" "${CURL_LIBRARY_DIR}/quic.h" "${CURL_LIBRARY_DIR}/socketpair.h")
|
|
||||||
|
|
||||||
SET(LIB_RCFILES "${CURL_LIBRARY_DIR}/libcurl.rc")
|
|
||||||
|
|
||||||
SET(CSOURCES ${LIB_CFILES} ${LIB_VAUTH_CFILES} ${LIB_VTLS_CFILES}
|
|
||||||
${LIB_VQUIC_CFILES} ${LIB_VSSH_CFILES})
|
|
||||||
SET(HHEADERS ${LIB_HFILES} ${LIB_VAUTH_HFILES} ${LIB_VTLS_HFILES}
|
|
||||||
${LIB_VQUIC_HFILES} ${LIB_VSSH_HFILES})
|
|
||||||
|
|
||||||
configure_file(${CURL_SOURCE_DIR}/lib/curl_config.h.cmake
|
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/curl/curl_config.h)
|
|
||||||
|
|
||||||
list(APPEND HHEADERS
|
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/curl/curl_config.h
|
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(libcurl ${HHEADERS} ${CSOURCES})
|
add_library (curl ${SRCS})
|
||||||
|
|
||||||
if(NOT BUILD_SHARED_LIBS)
|
target_compile_definitions(curl PRIVATE HAVE_CONFIG_H BUILDING_LIBCURL CURL_HIDDEN_SYMBOLS libcurl_EXPORTS)
|
||||||
set_target_properties(libcurl PROPERTIES INTERFACE_COMPILE_DEFINITIONS CURL_STATICLIB)
|
target_include_directories(curl PUBLIC ${CURL_DIR}/include ${CURL_DIR}/lib .)
|
||||||
endif()
|
|
||||||
|
|
||||||
if(HIDES_CURL_PRIVATE_SYMBOLS)
|
target_compile_definitions(curl PRIVATE OS="${CMAKE_SYSTEM_NAME}")
|
||||||
set_property(TARGET libcurl APPEND PROPERTY COMPILE_DEFINITIONS "CURL_HIDDEN_SYMBOLS")
|
|
||||||
set_property(TARGET libcurl APPEND PROPERTY COMPILE_FLAGS ${CURL_CFLAG_SYMBOLS_HIDE})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(OPENSSL_FOUND)
|
|
||||||
target_include_directories(libcurl PUBLIC ${OPENSSL_INCLUDE_DIR})
|
|
||||||
message("-- Including openssl ${OPENSSL_INCLUDE_DIR} to curl")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(ZLIB_FOUND)
|
|
||||||
target_include_directories(libcurl PUBLIC ${ZLIB_INCLUDE_DIRS}})
|
|
||||||
message("-- Including zlib ${ZLIB_INCLUDE_DIRS} to curl")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
target_compile_definitions(libcurl PUBLIC -DHAVE_CONFIG_H)
|
|
||||||
target_compile_definitions(libcurl PUBLIC -DBUILDING_LIBCURL)
|
|
||||||
target_include_directories(libcurl PUBLIC "${CURL_SOURCE_DIR}/include" "${CURL_LIBRARY_DIR}" "${CMAKE_CURRENT_BINARY_DIR}/curl")
|
|
||||||
|
|
||||||
target_link_libraries(libcurl ${CURL_LIBS})
|
|
||||||
|
38
contrib/curl-cmake/curl_config.h
Normal file
38
contrib/curl-cmake/curl_config.h
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
#define CURL_DISABLE_FTP
|
||||||
|
#define CURL_DISABLE_TFTP
|
||||||
|
#define CURL_DISABLE_LDAP
|
||||||
|
#define CURL_EXTERN_SYMBOL __attribute__ ((__visibility__ ("default")))
|
||||||
|
|
||||||
|
#define SIZEOF_SHORT 2
|
||||||
|
#define SIZEOF_INT 4
|
||||||
|
#define SIZEOF_LONG 8
|
||||||
|
#define SIZEOF_CURL_OFF_T 8
|
||||||
|
#define SIZEOF_SIZE_T 8
|
||||||
|
|
||||||
|
#define HAVE_FCNTL_O_NONBLOCK
|
||||||
|
#define HAVE_LONGLONG
|
||||||
|
#define HAVE_POLL_FINE
|
||||||
|
#define HAVE_SOCKET
|
||||||
|
#define HAVE_STRUCT_TIMEVAL
|
||||||
|
|
||||||
|
#define HAVE_RECV
|
||||||
|
#define RECV_TYPE_ARG1 int
|
||||||
|
#define RECV_TYPE_ARG2 void*
|
||||||
|
#define RECV_TYPE_ARG3 size_t
|
||||||
|
#define RECV_TYPE_ARG4 int
|
||||||
|
#define RECV_TYPE_RETV ssize_t
|
||||||
|
|
||||||
|
#define HAVE_SEND
|
||||||
|
#define SEND_TYPE_ARG1 int
|
||||||
|
#define SEND_TYPE_ARG2 void*
|
||||||
|
#define SEND_QUAL_ARG2 const
|
||||||
|
#define SEND_TYPE_ARG3 size_t
|
||||||
|
#define SEND_TYPE_ARG4 int
|
||||||
|
#define SEND_TYPE_RETV ssize_t
|
||||||
|
|
||||||
|
#define HAVE_ARPA_INET_H
|
||||||
|
#define HAVE_ERRNO_H
|
||||||
|
#define HAVE_FCNTL_H
|
||||||
|
#define HAVE_NETDB_H
|
||||||
|
#define HAVE_SYS_STAT_H
|
||||||
|
#define HAVE_UNISTD_H
|
@ -83,6 +83,8 @@ AggregateFunctionPtr createAggregateFunctionSumMap(const std::string & name, con
|
|||||||
AggregateFunctionPtr res(createWithNumericBasedType<Function>(*keys_type, keys_type, values_types, arguments));
|
AggregateFunctionPtr res(createWithNumericBasedType<Function>(*keys_type, keys_type, values_types, arguments));
|
||||||
if (!res)
|
if (!res)
|
||||||
res.reset(createWithDecimalType<Function>(*keys_type, keys_type, values_types, arguments));
|
res.reset(createWithDecimalType<Function>(*keys_type, keys_type, values_types, arguments));
|
||||||
|
if (!res)
|
||||||
|
res.reset(createWithStringType<Function>(*keys_type, keys_type, values_types, arguments));
|
||||||
if (!res)
|
if (!res)
|
||||||
throw Exception("Illegal type of argument for aggregate function " + name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
throw Exception("Illegal type of argument for aggregate function " + name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
@ -106,6 +108,8 @@ AggregateFunctionPtr createAggregateFunctionSumMapFiltered(const std::string & n
|
|||||||
AggregateFunctionPtr res(createWithNumericBasedType<Function>(*keys_type, keys_type, values_types, keys_to_keep, arguments, params));
|
AggregateFunctionPtr res(createWithNumericBasedType<Function>(*keys_type, keys_type, values_types, keys_to_keep, arguments, params));
|
||||||
if (!res)
|
if (!res)
|
||||||
res.reset(createWithDecimalType<Function>(*keys_type, keys_type, values_types, keys_to_keep, arguments, params));
|
res.reset(createWithDecimalType<Function>(*keys_type, keys_type, values_types, keys_to_keep, arguments, params));
|
||||||
|
if (!res)
|
||||||
|
res.reset(createWithStringType<Function>(*keys_type, keys_type, values_types, keys_to_keep, arguments, params));
|
||||||
if (!res)
|
if (!res)
|
||||||
throw Exception("Illegal type of argument for aggregate function " + name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
throw Exception("Illegal type of argument for aggregate function " + name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <Columns/ColumnTuple.h>
|
#include <Columns/ColumnTuple.h>
|
||||||
#include <Columns/ColumnVector.h>
|
#include <Columns/ColumnVector.h>
|
||||||
#include <Columns/ColumnDecimal.h>
|
#include <Columns/ColumnDecimal.h>
|
||||||
|
#include <Columns/ColumnString.h>
|
||||||
|
|
||||||
#include <Common/FieldVisitors.h>
|
#include <Common/FieldVisitors.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
@ -56,8 +57,6 @@ class AggregateFunctionSumMapBase : public IAggregateFunctionDataHelper<
|
|||||||
AggregateFunctionSumMapData<NearestFieldType<T>>, Derived>
|
AggregateFunctionSumMapData<NearestFieldType<T>>, Derived>
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
using ColVecType = std::conditional_t<IsDecimalNumber<T>, ColumnDecimal<T>, ColumnVector<T>>;
|
|
||||||
|
|
||||||
DataTypePtr keys_type;
|
DataTypePtr keys_type;
|
||||||
DataTypes values_types;
|
DataTypes values_types;
|
||||||
|
|
||||||
@ -84,9 +83,10 @@ public:
|
|||||||
void add(AggregateDataPtr place, const IColumn ** columns, const size_t row_num, Arena *) const override
|
void add(AggregateDataPtr place, const IColumn ** columns, const size_t row_num, Arena *) const override
|
||||||
{
|
{
|
||||||
// Column 0 contains array of keys of known type
|
// Column 0 contains array of keys of known type
|
||||||
|
Field key_field;
|
||||||
const ColumnArray & array_column0 = assert_cast<const ColumnArray &>(*columns[0]);
|
const ColumnArray & array_column0 = assert_cast<const ColumnArray &>(*columns[0]);
|
||||||
const IColumn::Offsets & offsets0 = array_column0.getOffsets();
|
const IColumn::Offsets & offsets0 = array_column0.getOffsets();
|
||||||
const auto & keys_vec = static_cast<const ColVecType &>(array_column0.getData());
|
const IColumn & key_column = array_column0.getData();
|
||||||
const size_t keys_vec_offset = offsets0[row_num - 1];
|
const size_t keys_vec_offset = offsets0[row_num - 1];
|
||||||
const size_t keys_vec_size = (offsets0[row_num] - keys_vec_offset);
|
const size_t keys_vec_size = (offsets0[row_num] - keys_vec_offset);
|
||||||
|
|
||||||
@ -111,7 +111,8 @@ public:
|
|||||||
using IteratorType = typename MapType::iterator;
|
using IteratorType = typename MapType::iterator;
|
||||||
|
|
||||||
array_column.getData().get(values_vec_offset + i, value);
|
array_column.getData().get(values_vec_offset + i, value);
|
||||||
const auto & key = keys_vec.getElement(keys_vec_offset + i);
|
key_column.get(keys_vec_offset + i, key_field);
|
||||||
|
auto && key = key_field.get<T>();
|
||||||
|
|
||||||
if (!keepKey(key))
|
if (!keepKey(key))
|
||||||
{
|
{
|
||||||
@ -121,7 +122,7 @@ public:
|
|||||||
IteratorType it;
|
IteratorType it;
|
||||||
if constexpr (IsDecimalNumber<T>)
|
if constexpr (IsDecimalNumber<T>)
|
||||||
{
|
{
|
||||||
UInt32 scale = keys_vec.getData().getScale();
|
UInt32 scale = static_cast<const ColumnDecimal<T> &>(key_column).getData().getScale();
|
||||||
it = merged_maps.find(DecimalField<T>(key, scale));
|
it = merged_maps.find(DecimalField<T>(key, scale));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -139,7 +140,7 @@ public:
|
|||||||
|
|
||||||
if constexpr (IsDecimalNumber<T>)
|
if constexpr (IsDecimalNumber<T>)
|
||||||
{
|
{
|
||||||
UInt32 scale = keys_vec.getData().getScale();
|
UInt32 scale = static_cast<const ColumnDecimal<T> &>(key_column).getData().getScale();
|
||||||
merged_maps.emplace(DecimalField<T>(key, scale), std::move(new_values));
|
merged_maps.emplace(DecimalField<T>(key, scale), std::move(new_values));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -149,4 +149,13 @@ static IAggregateFunction * createWithTwoNumericTypes(const IDataType & first_ty
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <template <typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||||
|
static IAggregateFunction * createWithStringType(const IDataType & argument_type, TArgs && ... args)
|
||||||
|
{
|
||||||
|
WhichDataType which(argument_type);
|
||||||
|
if (which.idx == TypeIndex::String) return new AggregateFunctionTemplate<String>(std::forward<TArgs>(args)...);
|
||||||
|
if (which.idx == TypeIndex::FixedString) return new AggregateFunctionTemplate<String>(std::forward<TArgs>(args)...);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,28 +0,0 @@
|
|||||||
#include <DataStreams/AddingMissedBlockInputStream.h>
|
|
||||||
#include <Interpreters/addMissingDefaults.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
|
|
||||||
AddingMissedBlockInputStream::AddingMissedBlockInputStream(
|
|
||||||
const BlockInputStreamPtr & input_,
|
|
||||||
const Block & header_,
|
|
||||||
const ColumnDefaults & column_defaults_,
|
|
||||||
const Context & context_)
|
|
||||||
: input(input_), header(header_),
|
|
||||||
column_defaults(column_defaults_), context(context_)
|
|
||||||
{
|
|
||||||
children.emplace_back(input);
|
|
||||||
}
|
|
||||||
|
|
||||||
Block AddingMissedBlockInputStream::readImpl()
|
|
||||||
{
|
|
||||||
Block src = children.back()->read();
|
|
||||||
if (!src)
|
|
||||||
return src;
|
|
||||||
|
|
||||||
return addMissingDefaults(src, header.getNamesAndTypesList(), column_defaults, context);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -247,10 +247,11 @@ struct LLVMContext
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
template <typename... Ts, typename F>
|
template <typename... Ts>
|
||||||
static bool castToEither(IColumn * column, F && f)
|
static bool castToEitherWithNullable(IColumn * column)
|
||||||
{
|
{
|
||||||
return ((typeid_cast<Ts *>(column) ? f(*typeid_cast<Ts *>(column)) : false) || ...);
|
return ((typeid_cast<Ts *>(column)
|
||||||
|
|| (typeid_cast<ColumnNullable *>(column) && typeid_cast<Ts *>(&(typeid_cast<ColumnNullable *>(column)->getNestedColumn())))) || ...);
|
||||||
}
|
}
|
||||||
|
|
||||||
class LLVMExecutableFunction : public IExecutableFunctionImpl
|
class LLVMExecutableFunction : public IExecutableFunctionImpl
|
||||||
@ -280,12 +281,12 @@ public:
|
|||||||
|
|
||||||
if (block_size)
|
if (block_size)
|
||||||
{
|
{
|
||||||
if (!castToEither<
|
if (!castToEitherWithNullable<
|
||||||
ColumnUInt8, ColumnUInt16, ColumnUInt32, ColumnUInt64,
|
ColumnUInt8, ColumnUInt16, ColumnUInt32, ColumnUInt64,
|
||||||
ColumnInt8, ColumnInt16, ColumnInt32, ColumnInt64,
|
ColumnInt8, ColumnInt16, ColumnInt32, ColumnInt64,
|
||||||
ColumnFloat32, ColumnFloat64>(col_res.get(), [block_size](auto & col) { col.getData().resize(block_size); return true; }))
|
ColumnFloat32, ColumnFloat64>(col_res.get()))
|
||||||
throw Exception("Unexpected column in LLVMExecutableFunction: " + col_res->getName(), ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Unexpected column in LLVMExecutableFunction: " + col_res->getName(), ErrorCodes::LOGICAL_ERROR);
|
||||||
|
col_res = col_res->cloneResized(block_size);
|
||||||
std::vector<ColumnData> columns(arguments.size() + 1);
|
std::vector<ColumnData> columns(arguments.size() + 1);
|
||||||
for (size_t i = 0; i < arguments.size(); ++i)
|
for (size_t i = 0; i < arguments.size(); ++i)
|
||||||
{
|
{
|
||||||
|
@ -95,6 +95,7 @@
|
|||||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||||
#include <DataStreams/materializeBlock.h>
|
#include <DataStreams/materializeBlock.h>
|
||||||
#include <Processors/Pipe.h>
|
#include <Processors/Pipe.h>
|
||||||
|
#include <Processors/Executors/TreeExecutorBlockInputStream.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -164,7 +165,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
const Context & context_,
|
const Context & context_,
|
||||||
const SelectQueryOptions & options_,
|
const SelectQueryOptions & options_,
|
||||||
const Names & required_result_column_names_)
|
const Names & required_result_column_names_)
|
||||||
: InterpreterSelectQuery(query_ptr_, context_, nullptr, nullptr, options_, required_result_column_names_)
|
: InterpreterSelectQuery(query_ptr_, context_, nullptr, std::nullopt, nullptr, options_, required_result_column_names_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,7 +174,15 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
const Context & context_,
|
const Context & context_,
|
||||||
const BlockInputStreamPtr & input_,
|
const BlockInputStreamPtr & input_,
|
||||||
const SelectQueryOptions & options_)
|
const SelectQueryOptions & options_)
|
||||||
: InterpreterSelectQuery(query_ptr_, context_, input_, nullptr, options_.copy().noSubquery())
|
: InterpreterSelectQuery(query_ptr_, context_, input_, std::nullopt, nullptr, options_.copy().noSubquery())
|
||||||
|
{}
|
||||||
|
|
||||||
|
InterpreterSelectQuery::InterpreterSelectQuery(
|
||||||
|
const ASTPtr & query_ptr_,
|
||||||
|
const Context & context_,
|
||||||
|
Pipe input_pipe_,
|
||||||
|
const SelectQueryOptions & options_)
|
||||||
|
: InterpreterSelectQuery(query_ptr_, context_, nullptr, std::move(input_pipe_), nullptr, options_.copy().noSubquery())
|
||||||
{}
|
{}
|
||||||
|
|
||||||
InterpreterSelectQuery::InterpreterSelectQuery(
|
InterpreterSelectQuery::InterpreterSelectQuery(
|
||||||
@ -181,7 +190,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
const Context & context_,
|
const Context & context_,
|
||||||
const StoragePtr & storage_,
|
const StoragePtr & storage_,
|
||||||
const SelectQueryOptions & options_)
|
const SelectQueryOptions & options_)
|
||||||
: InterpreterSelectQuery(query_ptr_, context_, nullptr, storage_, options_.copy().noSubquery())
|
: InterpreterSelectQuery(query_ptr_, context_, nullptr, std::nullopt, storage_, options_.copy().noSubquery())
|
||||||
{}
|
{}
|
||||||
|
|
||||||
InterpreterSelectQuery::~InterpreterSelectQuery() = default;
|
InterpreterSelectQuery::~InterpreterSelectQuery() = default;
|
||||||
@ -217,6 +226,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
const ASTPtr & query_ptr_,
|
const ASTPtr & query_ptr_,
|
||||||
const Context & context_,
|
const Context & context_,
|
||||||
const BlockInputStreamPtr & input_,
|
const BlockInputStreamPtr & input_,
|
||||||
|
std::optional<Pipe> input_pipe_,
|
||||||
const StoragePtr & storage_,
|
const StoragePtr & storage_,
|
||||||
const SelectQueryOptions & options_,
|
const SelectQueryOptions & options_,
|
||||||
const Names & required_result_column_names)
|
const Names & required_result_column_names)
|
||||||
@ -226,6 +236,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
, context(std::make_shared<Context>(context_))
|
, context(std::make_shared<Context>(context_))
|
||||||
, storage(storage_)
|
, storage(storage_)
|
||||||
, input(input_)
|
, input(input_)
|
||||||
|
, input_pipe(std::move(input_pipe_))
|
||||||
, log(&Logger::get("InterpreterSelectQuery"))
|
, log(&Logger::get("InterpreterSelectQuery"))
|
||||||
{
|
{
|
||||||
checkStackSize();
|
checkStackSize();
|
||||||
@ -261,6 +272,11 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
/// Read from prepared input.
|
/// Read from prepared input.
|
||||||
source_header = input->getHeader();
|
source_header = input->getHeader();
|
||||||
}
|
}
|
||||||
|
else if (input_pipe)
|
||||||
|
{
|
||||||
|
/// Read from prepared input.
|
||||||
|
source_header = input_pipe_->getHeader();
|
||||||
|
}
|
||||||
else if (is_subquery)
|
else if (is_subquery)
|
||||||
{
|
{
|
||||||
/// Read from subquery.
|
/// Read from subquery.
|
||||||
@ -326,14 +342,14 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
|
|
||||||
if (!options.only_analyze)
|
if (!options.only_analyze)
|
||||||
{
|
{
|
||||||
if (query.sample_size() && (input || !storage || !storage->supportsSampling()))
|
if (query.sample_size() && (input || input_pipe || !storage || !storage->supportsSampling()))
|
||||||
throw Exception("Illegal SAMPLE: table doesn't support sampling", ErrorCodes::SAMPLING_NOT_SUPPORTED);
|
throw Exception("Illegal SAMPLE: table doesn't support sampling", ErrorCodes::SAMPLING_NOT_SUPPORTED);
|
||||||
|
|
||||||
if (query.final() && (input || !storage || !storage->supportsFinal()))
|
if (query.final() && (input || input_pipe || !storage || !storage->supportsFinal()))
|
||||||
throw Exception((!input && storage) ? "Storage " + storage->getName() + " doesn't support FINAL" : "Illegal FINAL", ErrorCodes::ILLEGAL_FINAL);
|
throw Exception((!input && !input_pipe && storage) ? "Storage " + storage->getName() + " doesn't support FINAL" : "Illegal FINAL", ErrorCodes::ILLEGAL_FINAL);
|
||||||
|
|
||||||
if (query.prewhere() && (input || !storage || !storage->supportsPrewhere()))
|
if (query.prewhere() && (input || input_pipe || !storage || !storage->supportsPrewhere()))
|
||||||
throw Exception((!input && storage) ? "Storage " + storage->getName() + " doesn't support PREWHERE" : "Illegal PREWHERE", ErrorCodes::ILLEGAL_PREWHERE);
|
throw Exception((!input && !input_pipe && storage) ? "Storage " + storage->getName() + " doesn't support PREWHERE" : "Illegal PREWHERE", ErrorCodes::ILLEGAL_PREWHERE);
|
||||||
|
|
||||||
/// Save the new temporary tables in the query context
|
/// Save the new temporary tables in the query context
|
||||||
for (const auto & it : query_analyzer->getExternalTables())
|
for (const auto & it : query_analyzer->getExternalTables())
|
||||||
@ -464,7 +480,7 @@ BlockIO InterpreterSelectQuery::execute()
|
|||||||
{
|
{
|
||||||
Pipeline pipeline;
|
Pipeline pipeline;
|
||||||
BlockIO res;
|
BlockIO res;
|
||||||
executeImpl(pipeline, input, res.pipeline);
|
executeImpl(pipeline, input, std::move(input_pipe), res.pipeline);
|
||||||
executeUnion(pipeline, getSampleBlock());
|
executeUnion(pipeline, getSampleBlock());
|
||||||
|
|
||||||
res.in = pipeline.firstStream();
|
res.in = pipeline.firstStream();
|
||||||
@ -477,7 +493,7 @@ BlockInputStreams InterpreterSelectQuery::executeWithMultipleStreams(QueryPipeli
|
|||||||
{
|
{
|
||||||
///FIXME pipeline must be alive until query is finished
|
///FIXME pipeline must be alive until query is finished
|
||||||
Pipeline pipeline;
|
Pipeline pipeline;
|
||||||
executeImpl(pipeline, input, parent_pipeline);
|
executeImpl(pipeline, input, std::move(input_pipe), parent_pipeline);
|
||||||
unifyStreams(pipeline, getSampleBlock());
|
unifyStreams(pipeline, getSampleBlock());
|
||||||
parent_pipeline.addInterpreterContext(context);
|
parent_pipeline.addInterpreterContext(context);
|
||||||
parent_pipeline.addStorageHolder(storage);
|
parent_pipeline.addStorageHolder(storage);
|
||||||
@ -487,7 +503,7 @@ BlockInputStreams InterpreterSelectQuery::executeWithMultipleStreams(QueryPipeli
|
|||||||
QueryPipeline InterpreterSelectQuery::executeWithProcessors()
|
QueryPipeline InterpreterSelectQuery::executeWithProcessors()
|
||||||
{
|
{
|
||||||
QueryPipeline query_pipeline;
|
QueryPipeline query_pipeline;
|
||||||
executeImpl(query_pipeline, input, query_pipeline);
|
executeImpl(query_pipeline, input, std::move(input_pipe), query_pipeline);
|
||||||
query_pipeline.setMaxThreads(max_streams);
|
query_pipeline.setMaxThreads(max_streams);
|
||||||
query_pipeline.addInterpreterContext(context);
|
query_pipeline.addInterpreterContext(context);
|
||||||
query_pipeline.addStorageHolder(storage);
|
query_pipeline.addStorageHolder(storage);
|
||||||
@ -957,7 +973,7 @@ static UInt64 getLimitForSorting(const ASTSelectQuery & query, const Context & c
|
|||||||
|
|
||||||
|
|
||||||
template <typename TPipeline>
|
template <typename TPipeline>
|
||||||
void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputStreamPtr & prepared_input, QueryPipeline & save_context_and_storage)
|
void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputStreamPtr & prepared_input, std::optional<Pipe> prepared_pipe, QueryPipeline & save_context_and_storage)
|
||||||
{
|
{
|
||||||
/** Streams of data. When the query is executed in parallel, we have several data streams.
|
/** Streams of data. When the query is executed in parallel, we have several data streams.
|
||||||
* If there is no GROUP BY, then perform all operations before ORDER BY and LIMIT in parallel, then
|
* If there is no GROUP BY, then perform all operations before ORDER BY and LIMIT in parallel, then
|
||||||
@ -1024,6 +1040,13 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS
|
|||||||
else
|
else
|
||||||
pipeline.streams.push_back(prepared_input);
|
pipeline.streams.push_back(prepared_input);
|
||||||
}
|
}
|
||||||
|
else if (prepared_pipe)
|
||||||
|
{
|
||||||
|
if constexpr (pipeline_with_processors)
|
||||||
|
pipeline.init(std::move(*prepared_pipe));
|
||||||
|
else
|
||||||
|
pipeline.streams.push_back(std::make_shared<TreeExecutorBlockInputStream>(std::move(*prepared_pipe)));
|
||||||
|
}
|
||||||
|
|
||||||
if (from_stage == QueryProcessingStage::WithMergeableState &&
|
if (from_stage == QueryProcessingStage::WithMergeableState &&
|
||||||
options.to_stage == QueryProcessingStage::WithMergeableState)
|
options.to_stage == QueryProcessingStage::WithMergeableState)
|
||||||
|
@ -58,6 +58,13 @@ public:
|
|||||||
const BlockInputStreamPtr & input_,
|
const BlockInputStreamPtr & input_,
|
||||||
const SelectQueryOptions & = {});
|
const SelectQueryOptions & = {});
|
||||||
|
|
||||||
|
/// Read data not from the table specified in the query, but from the prepared pipe `input`.
|
||||||
|
InterpreterSelectQuery(
|
||||||
|
const ASTPtr & query_ptr_,
|
||||||
|
const Context & context_,
|
||||||
|
Pipe input_pipe_,
|
||||||
|
const SelectQueryOptions & = {});
|
||||||
|
|
||||||
/// Read data not from the table specified in the query, but from the specified `storage_`.
|
/// Read data not from the table specified in the query, but from the specified `storage_`.
|
||||||
InterpreterSelectQuery(
|
InterpreterSelectQuery(
|
||||||
const ASTPtr & query_ptr_,
|
const ASTPtr & query_ptr_,
|
||||||
@ -90,6 +97,7 @@ private:
|
|||||||
const ASTPtr & query_ptr_,
|
const ASTPtr & query_ptr_,
|
||||||
const Context & context_,
|
const Context & context_,
|
||||||
const BlockInputStreamPtr & input_,
|
const BlockInputStreamPtr & input_,
|
||||||
|
std::optional<Pipe> input_pipe,
|
||||||
const StoragePtr & storage_,
|
const StoragePtr & storage_,
|
||||||
const SelectQueryOptions &,
|
const SelectQueryOptions &,
|
||||||
const Names & required_result_column_names = {});
|
const Names & required_result_column_names = {});
|
||||||
@ -142,7 +150,7 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
template <typename TPipeline>
|
template <typename TPipeline>
|
||||||
void executeImpl(TPipeline & pipeline, const BlockInputStreamPtr & prepared_input, QueryPipeline & save_context_and_storage);
|
void executeImpl(TPipeline & pipeline, const BlockInputStreamPtr & prepared_input, std::optional<Pipe> prepared_pipe, QueryPipeline & save_context_and_storage);
|
||||||
|
|
||||||
struct AnalysisResult
|
struct AnalysisResult
|
||||||
{
|
{
|
||||||
@ -301,6 +309,7 @@ private:
|
|||||||
|
|
||||||
/// Used when we read from prepared input, not table or subquery.
|
/// Used when we read from prepared input, not table or subquery.
|
||||||
BlockInputStreamPtr input;
|
BlockInputStreamPtr input;
|
||||||
|
std::optional<Pipe> input_pipe;
|
||||||
|
|
||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
};
|
};
|
||||||
|
@ -481,7 +481,7 @@ MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vector<K
|
|||||||
* 1: the intersection of the set and the range is non-empty
|
* 1: the intersection of the set and the range is non-empty
|
||||||
* 2: the range contains elements not in the set
|
* 2: the range contains elements not in the set
|
||||||
*/
|
*/
|
||||||
BoolMask MergeTreeSetIndex::mayBeTrueInRange(const std::vector<Range> & key_ranges, const DataTypes & data_types)
|
BoolMask MergeTreeSetIndex::checkInRange(const std::vector<Range> & key_ranges, const DataTypes & data_types)
|
||||||
{
|
{
|
||||||
size_t tuple_size = indexes_mapping.size();
|
size_t tuple_size = indexes_mapping.size();
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ using Sets = std::vector<SetPtr>;
|
|||||||
class IFunction;
|
class IFunction;
|
||||||
using FunctionPtr = std::shared_ptr<IFunction>;
|
using FunctionPtr = std::shared_ptr<IFunction>;
|
||||||
|
|
||||||
/// Class for mayBeTrueInRange function.
|
/// Class for checkInRange function.
|
||||||
class MergeTreeSetIndex
|
class MergeTreeSetIndex
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -199,7 +199,7 @@ public:
|
|||||||
|
|
||||||
size_t size() const { return ordered_set.at(0)->size(); }
|
size_t size() const { return ordered_set.at(0)->size(); }
|
||||||
|
|
||||||
BoolMask mayBeTrueInRange(const std::vector<Range> & key_ranges, const DataTypes & data_types);
|
BoolMask checkInRange(const std::vector<Range> & key_ranges, const DataTypes & data_types);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Columns ordered_set;
|
Columns ordered_set;
|
||||||
|
@ -18,8 +18,18 @@ public:
|
|||||||
/// * processors form a tree
|
/// * processors form a tree
|
||||||
/// * all processors are attainable from root
|
/// * all processors are attainable from root
|
||||||
/// * there is no other connected processors
|
/// * there is no other connected processors
|
||||||
explicit TreeExecutorBlockInputStream(Pipe pipe) : output_port(pipe.getPort()), processors(std::move(pipe).detachProcessors())
|
explicit TreeExecutorBlockInputStream(Pipe pipe) : output_port(pipe.getPort())
|
||||||
{
|
{
|
||||||
|
for (auto & table_lock : pipe.getTableLocks())
|
||||||
|
addTableLock(table_lock);
|
||||||
|
|
||||||
|
for (auto & storage : pipe.getStorageHolders())
|
||||||
|
storage_holders.emplace_back(storage);
|
||||||
|
|
||||||
|
for (auto & context : pipe.getContexts())
|
||||||
|
interpreter_context.emplace_back(context);
|
||||||
|
|
||||||
|
processors = std::move(pipe).detachProcessors();
|
||||||
init();
|
init();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -51,6 +61,10 @@ private:
|
|||||||
void init();
|
void init();
|
||||||
/// Execute tree step-by-step until root returns next chunk or execution is finished.
|
/// Execute tree step-by-step until root returns next chunk or execution is finished.
|
||||||
void execute();
|
void execute();
|
||||||
|
|
||||||
|
/// Moved from pipe.
|
||||||
|
std::vector<std::shared_ptr<Context>> interpreter_context;
|
||||||
|
std::vector<StoragePtr> storage_holders;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -216,6 +216,18 @@ void JSONEachRowRowInputFormat::readNestedData(const String & name, MutableColum
|
|||||||
|
|
||||||
bool JSONEachRowRowInputFormat::readRow(MutableColumns & columns, RowReadExtension & ext)
|
bool JSONEachRowRowInputFormat::readRow(MutableColumns & columns, RowReadExtension & ext)
|
||||||
{
|
{
|
||||||
|
/// Set flag data_in_square_brackets if data starts with '['.
|
||||||
|
if (!in.eof() && parsing_stage == ParsingStage::START)
|
||||||
|
{
|
||||||
|
parsing_stage = ParsingStage::PROCESS;
|
||||||
|
skipWhitespaceIfAny(in);
|
||||||
|
if (*in.position() == '[')
|
||||||
|
{
|
||||||
|
data_in_square_brackets = true;
|
||||||
|
++in.position();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
skipWhitespaceIfAny(in);
|
skipWhitespaceIfAny(in);
|
||||||
|
|
||||||
/// We consume ;, or \n before scanning a new row, instead scanning to next row at the end.
|
/// We consume ;, or \n before scanning a new row, instead scanning to next row at the end.
|
||||||
@ -227,9 +239,23 @@ bool JSONEachRowRowInputFormat::readRow(MutableColumns & columns, RowReadExtensi
|
|||||||
if (!in.eof() && (*in.position() == ',' || *in.position() == ';'))
|
if (!in.eof() && (*in.position() == ',' || *in.position() == ';'))
|
||||||
++in.position();
|
++in.position();
|
||||||
|
|
||||||
|
/// Finish reading rows if data is in square brackets and ']' received.
|
||||||
skipWhitespaceIfAny(in);
|
skipWhitespaceIfAny(in);
|
||||||
if (in.eof())
|
if (!in.eof() && *in.position() == ']' && data_in_square_brackets)
|
||||||
|
{
|
||||||
|
data_in_square_brackets = false;
|
||||||
|
parsing_stage = ParsingStage::FINISH;
|
||||||
|
++in.position();
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
skipWhitespaceIfAny(in);
|
||||||
|
if (in.eof() || parsing_stage == ParsingStage::FINISH)
|
||||||
|
{
|
||||||
|
if (data_in_square_brackets)
|
||||||
|
throw Exception("Unexpected end of data: received end of stream instead of ']'.", ErrorCodes::INCORRECT_DATA);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
size_t num_columns = columns.size();
|
size_t num_columns = columns.size();
|
||||||
|
|
||||||
|
@ -39,8 +39,6 @@ private:
|
|||||||
void readJSONObject(MutableColumns & columns);
|
void readJSONObject(MutableColumns & columns);
|
||||||
void readNestedData(const String & name, MutableColumns & columns);
|
void readNestedData(const String & name, MutableColumns & columns);
|
||||||
|
|
||||||
private:
|
|
||||||
|
|
||||||
const FormatSettings format_settings;
|
const FormatSettings format_settings;
|
||||||
|
|
||||||
/// Buffer for the read from the stream field name. Used when you have to copy it.
|
/// Buffer for the read from the stream field name. Used when you have to copy it.
|
||||||
@ -69,6 +67,19 @@ private:
|
|||||||
|
|
||||||
/// Cached search results for previous row (keyed as index in JSON object) - used as a hint.
|
/// Cached search results for previous row (keyed as index in JSON object) - used as a hint.
|
||||||
std::vector<NameMap::LookupResult> prev_positions;
|
std::vector<NameMap::LookupResult> prev_positions;
|
||||||
|
|
||||||
|
/// This flag is needed to know if data is in square brackets.
|
||||||
|
bool data_in_square_brackets = false;
|
||||||
|
|
||||||
|
/// This is needed to know the stage of parsing.
|
||||||
|
enum class ParsingStage
|
||||||
|
{
|
||||||
|
START,
|
||||||
|
PROCESS,
|
||||||
|
FINISH
|
||||||
|
};
|
||||||
|
|
||||||
|
ParsingStage parsing_stage = ParsingStage::START;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -61,6 +61,11 @@ Pipe::Pipe(ProcessorPtr source)
|
|||||||
processors.emplace_back(std::move(source));
|
processors.emplace_back(std::move(source));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pipe::Pipe(Processors processors_, OutputPort * output_port_, OutputPort * totals_)
|
||||||
|
: processors(std::move(processors_)), output_port(output_port_), totals(totals_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
Pipe::Pipe(Pipes && pipes, ProcessorPtr transform)
|
Pipe::Pipe(Pipes && pipes, ProcessorPtr transform)
|
||||||
{
|
{
|
||||||
checkSingleOutput(*transform);
|
checkSingleOutput(*transform);
|
||||||
|
@ -8,6 +8,9 @@ namespace DB
|
|||||||
class Pipe;
|
class Pipe;
|
||||||
using Pipes = std::vector<Pipe>;
|
using Pipes = std::vector<Pipe>;
|
||||||
|
|
||||||
|
class IStorage;
|
||||||
|
using StoragePtr = std::shared_ptr<IStorage>;
|
||||||
|
|
||||||
/// Pipe is a set of processors which represents the part of pipeline with single output.
|
/// Pipe is a set of processors which represents the part of pipeline with single output.
|
||||||
/// All processors in pipe are connected. All ports are connected except the output one.
|
/// All processors in pipe are connected. All ports are connected except the output one.
|
||||||
class Pipe
|
class Pipe
|
||||||
@ -47,10 +50,38 @@ public:
|
|||||||
void setTotalsPort(OutputPort * totals_) { totals = totals_; }
|
void setTotalsPort(OutputPort * totals_) { totals = totals_; }
|
||||||
OutputPort * getTotalsPort() const { return totals; }
|
OutputPort * getTotalsPort() const { return totals; }
|
||||||
|
|
||||||
|
/// Do not allow to change the table while the processors of pipe are alive.
|
||||||
|
/// TODO: move it to pipeline.
|
||||||
|
void addTableLock(const TableStructureReadLockHolder & lock) { table_locks.push_back(lock); }
|
||||||
|
/// This methods are from QueryPipeline. Needed to make conversion from pipeline to pipe possible.
|
||||||
|
void addInterpreterContext(std::shared_ptr<Context> context) { interpreter_context.emplace_back(std::move(context)); }
|
||||||
|
void addStorageHolder(StoragePtr storage) { storage_holders.emplace_back(std::move(storage)); }
|
||||||
|
|
||||||
|
const std::vector<TableStructureReadLockHolder> & getTableLocks() const { return table_locks; }
|
||||||
|
const std::vector<std::shared_ptr<Context>> & getContexts() const { return interpreter_context; }
|
||||||
|
const std::vector<StoragePtr> & getStorageHolders() const { return storage_holders; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Processors processors;
|
Processors processors;
|
||||||
OutputPort * output_port = nullptr;
|
OutputPort * output_port = nullptr;
|
||||||
OutputPort * totals = nullptr;
|
OutputPort * totals = nullptr;
|
||||||
|
|
||||||
|
std::vector<TableStructureReadLockHolder> table_locks;
|
||||||
|
|
||||||
|
/// Some processors may implicitly use Context or temporary Storage created by Interpreter.
|
||||||
|
/// But lifetime of Streams is not nested in lifetime of Interpreters, so we have to store it here,
|
||||||
|
/// because QueryPipeline is alive until query is finished.
|
||||||
|
std::vector<std::shared_ptr<Context>> interpreter_context;
|
||||||
|
std::vector<StoragePtr> storage_holders;
|
||||||
|
|
||||||
|
/// This private constructor is used only from QueryPipeline.
|
||||||
|
/// It is not public, because QueryPipeline checks that processors are connected and have single output,
|
||||||
|
/// and therefore we can skip those checks.
|
||||||
|
/// Note that Pipe represents a tree if it was created using public interface. But this constructor can't assert it.
|
||||||
|
/// So, it's possible that TreeExecutorBlockInputStream could be unable to convert such Pipe to IBlockInputStream.
|
||||||
|
explicit Pipe(Processors processors_, OutputPort * output_port, OutputPort * totals);
|
||||||
|
|
||||||
|
friend class QueryPipeline;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -63,6 +63,19 @@ void QueryPipeline::init(Pipes pipes)
|
|||||||
if (pipes.empty())
|
if (pipes.empty())
|
||||||
throw Exception("Can't initialize pipeline with empty pipes list.", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Can't initialize pipeline with empty pipes list.", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
|
/// Move locks from pipes to pipeline class.
|
||||||
|
for (auto & pipe : pipes)
|
||||||
|
{
|
||||||
|
for (auto & lock : pipe.getTableLocks())
|
||||||
|
table_locks.emplace_back(lock);
|
||||||
|
|
||||||
|
for (auto & context : pipe.getContexts())
|
||||||
|
interpreter_context.emplace_back(context);
|
||||||
|
|
||||||
|
for (auto & storage : pipe.getStorageHolders())
|
||||||
|
storage_holders.emplace_back(storage);
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<OutputPort *> totals;
|
std::vector<OutputPort *> totals;
|
||||||
|
|
||||||
for (auto & pipe : pipes)
|
for (auto & pipe : pipes)
|
||||||
@ -477,7 +490,7 @@ void QueryPipeline::unitePipelines(
|
|||||||
|
|
||||||
table_locks.insert(table_locks.end(), std::make_move_iterator(pipeline.table_locks.begin()), std::make_move_iterator(pipeline.table_locks.end()));
|
table_locks.insert(table_locks.end(), std::make_move_iterator(pipeline.table_locks.begin()), std::make_move_iterator(pipeline.table_locks.end()));
|
||||||
interpreter_context.insert(interpreter_context.end(), pipeline.interpreter_context.begin(), pipeline.interpreter_context.end());
|
interpreter_context.insert(interpreter_context.end(), pipeline.interpreter_context.begin(), pipeline.interpreter_context.end());
|
||||||
storage_holder.insert(storage_holder.end(), pipeline.storage_holder.begin(), pipeline.storage_holder.end());
|
storage_holders.insert(storage_holders.end(), pipeline.storage_holders.begin(), pipeline.storage_holders.end());
|
||||||
|
|
||||||
max_threads = std::max(max_threads, pipeline.max_threads);
|
max_threads = std::max(max_threads, pipeline.max_threads);
|
||||||
}
|
}
|
||||||
@ -625,6 +638,23 @@ void QueryPipeline::calcRowsBeforeLimit()
|
|||||||
output_format->setRowsBeforeLimit(has_partial_sorting ? rows_before_limit : rows_before_limit_at_least);
|
output_format->setRowsBeforeLimit(has_partial_sorting ? rows_before_limit : rows_before_limit_at_least);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pipe QueryPipeline::getPipe() &&
|
||||||
|
{
|
||||||
|
resize(1);
|
||||||
|
Pipe pipe(std::move(processors), streams.at(0), totals_having_port);
|
||||||
|
|
||||||
|
for (auto & lock : table_locks)
|
||||||
|
pipe.addTableLock(lock);
|
||||||
|
|
||||||
|
for (auto & context : interpreter_context)
|
||||||
|
pipe.addInterpreterContext(context);
|
||||||
|
|
||||||
|
for (auto & storage : storage_holders)
|
||||||
|
pipe.addStorageHolder(storage);
|
||||||
|
|
||||||
|
return pipe;
|
||||||
|
}
|
||||||
|
|
||||||
PipelineExecutorPtr QueryPipeline::execute()
|
PipelineExecutorPtr QueryPipeline::execute()
|
||||||
{
|
{
|
||||||
checkInitialized();
|
checkInitialized();
|
||||||
|
@ -81,7 +81,7 @@ public:
|
|||||||
|
|
||||||
void addTableLock(const TableStructureReadLockHolder & lock) { table_locks.push_back(lock); }
|
void addTableLock(const TableStructureReadLockHolder & lock) { table_locks.push_back(lock); }
|
||||||
void addInterpreterContext(std::shared_ptr<Context> context) { interpreter_context.emplace_back(std::move(context)); }
|
void addInterpreterContext(std::shared_ptr<Context> context) { interpreter_context.emplace_back(std::move(context)); }
|
||||||
void addStorageHolder(StoragePtr storage) { storage_holder.emplace_back(std::move(storage)); }
|
void addStorageHolder(StoragePtr storage) { storage_holders.emplace_back(std::move(storage)); }
|
||||||
|
|
||||||
/// For compatibility with IBlockInputStream.
|
/// For compatibility with IBlockInputStream.
|
||||||
void setProgressCallback(const ProgressCallback & callback);
|
void setProgressCallback(const ProgressCallback & callback);
|
||||||
@ -93,6 +93,9 @@ public:
|
|||||||
void setMaxThreads(size_t max_threads_) { max_threads = max_threads_; }
|
void setMaxThreads(size_t max_threads_) { max_threads = max_threads_; }
|
||||||
size_t getMaxThreads() const { return max_threads; }
|
size_t getMaxThreads() const { return max_threads; }
|
||||||
|
|
||||||
|
/// Convert query pipeline to single pipe.
|
||||||
|
Pipe getPipe() &&;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
/// All added processors.
|
/// All added processors.
|
||||||
@ -117,7 +120,7 @@ private:
|
|||||||
/// But lifetime of Streams is not nested in lifetime of Interpreters, so we have to store it here,
|
/// But lifetime of Streams is not nested in lifetime of Interpreters, so we have to store it here,
|
||||||
/// because QueryPipeline is alive until query is finished.
|
/// because QueryPipeline is alive until query is finished.
|
||||||
std::vector<std::shared_ptr<Context>> interpreter_context;
|
std::vector<std::shared_ptr<Context>> interpreter_context;
|
||||||
std::vector<StoragePtr> storage_holder;
|
std::vector<StoragePtr> storage_holders;
|
||||||
|
|
||||||
IOutputFormat * output_format = nullptr;
|
IOutputFormat * output_format = nullptr;
|
||||||
|
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include <Processors/ISource.h>
|
#include <Processors/Sources/SourceWithProgress.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
class SourceFromSingleChunk : public ISource
|
class SourceFromSingleChunk : public SourceWithProgress
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit SourceFromSingleChunk(Block header, Chunk chunk_) : ISource(std::move(header)), chunk(std::move(chunk_)) {}
|
explicit SourceFromSingleChunk(Block header, Chunk chunk_) : SourceWithProgress(std::move(header)), chunk(std::move(chunk_)) {}
|
||||||
String getName() const override { return "SourceFromSingleChunk"; }
|
String getName() const override { return "SourceFromSingleChunk"; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -18,13 +18,22 @@ void SourceWithProgress::work()
|
|||||||
if (!limits.speed_limits.checkTimeLimit(total_stopwatch.elapsed(), limits.timeout_overflow_mode))
|
if (!limits.speed_limits.checkTimeLimit(total_stopwatch.elapsed(), limits.timeout_overflow_mode))
|
||||||
cancel();
|
cancel();
|
||||||
else
|
else
|
||||||
|
{
|
||||||
|
was_progress_called = false;
|
||||||
|
|
||||||
ISourceWithProgress::work();
|
ISourceWithProgress::work();
|
||||||
|
|
||||||
|
if (!was_progress_called && has_input)
|
||||||
|
progress({ current_chunk.chunk.getNumRows(), current_chunk.chunk.bytes() });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Aggregated copy-paste from IBlockInputStream::progressImpl.
|
/// Aggregated copy-paste from IBlockInputStream::progressImpl.
|
||||||
/// Most of this must be done in PipelineExecutor outside. Now it's done for compatibility with IBlockInputStream.
|
/// Most of this must be done in PipelineExecutor outside. Now it's done for compatibility with IBlockInputStream.
|
||||||
void SourceWithProgress::progress(const Progress & value)
|
void SourceWithProgress::progress(const Progress & value)
|
||||||
{
|
{
|
||||||
|
was_progress_called = true;
|
||||||
|
|
||||||
if (total_rows_approx != 0)
|
if (total_rows_approx != 0)
|
||||||
{
|
{
|
||||||
Progress total_rows_progress = {0, 0, total_rows_approx};
|
Progress total_rows_progress = {0, 0, total_rows_approx};
|
||||||
|
@ -72,6 +72,10 @@ private:
|
|||||||
Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; /// Time with waiting time.
|
Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; /// Time with waiting time.
|
||||||
/// According to total_stopwatch in microseconds.
|
/// According to total_stopwatch in microseconds.
|
||||||
UInt64 last_profile_events_update_time = 0;
|
UInt64 last_profile_events_update_time = 0;
|
||||||
|
|
||||||
|
/// This flag checks if progress() was manually called at generate() call.
|
||||||
|
/// If not, it will be called for chunk after generate() was finished.
|
||||||
|
bool was_progress_called = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
27
dbms/src/Processors/Transforms/AddingMissedTransform.cpp
Normal file
27
dbms/src/Processors/Transforms/AddingMissedTransform.cpp
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
#include <Processors/Transforms/AddingMissedTransform.h>
|
||||||
|
#include <Interpreters/addMissingDefaults.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
AddingMissedTransform::AddingMissedTransform(
|
||||||
|
Block header_,
|
||||||
|
Block result_header_,
|
||||||
|
const ColumnDefaults & column_defaults_,
|
||||||
|
const Context & context_)
|
||||||
|
: ISimpleTransform(std::move(header_), std::move(result_header_), false)
|
||||||
|
, column_defaults(column_defaults_), context(context_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddingMissedTransform::transform(Chunk & chunk)
|
||||||
|
{
|
||||||
|
auto num_rows = chunk.getNumRows();
|
||||||
|
Block src = getInputPort().getHeader().cloneWithColumns(chunk.detachColumns());
|
||||||
|
|
||||||
|
auto res = addMissingDefaults(src, getOutputPort().getHeader().getNamesAndTypesList(), column_defaults, context);
|
||||||
|
chunk.setColumns(res.getColumns(), num_rows);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <DataStreams/IBlockInputStream.h>
|
#include <Processors/ISimpleTransform.h>
|
||||||
#include <Storages/ColumnDefault.h>
|
#include <Storages/ColumnDefault.h>
|
||||||
|
|
||||||
|
|
||||||
@ -14,24 +14,20 @@ namespace DB
|
|||||||
* 3. Columns that materialized from other columns (materialized columns)
|
* 3. Columns that materialized from other columns (materialized columns)
|
||||||
* All three types of columns are materialized (not constants).
|
* All three types of columns are materialized (not constants).
|
||||||
*/
|
*/
|
||||||
class AddingMissedBlockInputStream : public IBlockInputStream
|
class AddingMissedTransform : public ISimpleTransform
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AddingMissedBlockInputStream(
|
AddingMissedTransform(
|
||||||
const BlockInputStreamPtr & input_,
|
Block header_,
|
||||||
const Block & header_,
|
Block result_header_,
|
||||||
const ColumnDefaults & column_defaults_,
|
const ColumnDefaults & column_defaults_,
|
||||||
const Context & context_);
|
const Context & context_);
|
||||||
|
|
||||||
String getName() const override { return "AddingMissed"; }
|
String getName() const override { return "AddingMissed"; }
|
||||||
Block getHeader() const override { return header; }
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Block readImpl() override;
|
void transform(Chunk &) override;
|
||||||
|
|
||||||
BlockInputStreamPtr input;
|
|
||||||
/// Blocks after this stream should have this structure
|
|
||||||
const Block header;
|
|
||||||
const ColumnDefaults column_defaults;
|
const ColumnDefaults column_defaults;
|
||||||
const Context & context;
|
const Context & context;
|
||||||
};
|
};
|
@ -52,16 +52,21 @@ void KafkaBlockInputStream::readPrefixImpl()
|
|||||||
if (!buffer)
|
if (!buffer)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
buffer->subscribe(storage.getTopics());
|
buffer->subscribe();
|
||||||
|
|
||||||
broken = true;
|
broken = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
Block KafkaBlockInputStream::readImpl()
|
Block KafkaBlockInputStream::readImpl()
|
||||||
{
|
{
|
||||||
if (!buffer)
|
if (!buffer || finished)
|
||||||
return Block();
|
return Block();
|
||||||
|
|
||||||
|
finished = true;
|
||||||
|
// now it's one-time usage InputStream
|
||||||
|
// one block of the needed size (or with desired flush timeout) is formed in one internal iteration
|
||||||
|
// otherwise external iteration will reuse that and logic will became even more fuzzy
|
||||||
|
|
||||||
MutableColumns result_columns = non_virtual_header.cloneEmptyColumns();
|
MutableColumns result_columns = non_virtual_header.cloneEmptyColumns();
|
||||||
MutableColumns virtual_columns = virtual_header.cloneEmptyColumns();
|
MutableColumns virtual_columns = virtual_header.cloneEmptyColumns();
|
||||||
|
|
||||||
@ -126,6 +131,8 @@ Block KafkaBlockInputStream::readImpl()
|
|||||||
|
|
||||||
auto new_rows = read_kafka_message();
|
auto new_rows = read_kafka_message();
|
||||||
|
|
||||||
|
buffer->storeLastReadMessageOffset();
|
||||||
|
|
||||||
auto _topic = buffer->currentTopic();
|
auto _topic = buffer->currentTopic();
|
||||||
auto _key = buffer->currentKey();
|
auto _key = buffer->currentKey();
|
||||||
auto _offset = buffer->currentOffset();
|
auto _offset = buffer->currentOffset();
|
||||||
@ -151,11 +158,18 @@ Block KafkaBlockInputStream::readImpl()
|
|||||||
|
|
||||||
total_rows = total_rows + new_rows;
|
total_rows = total_rows + new_rows;
|
||||||
buffer->allowNext();
|
buffer->allowNext();
|
||||||
if (!new_rows || total_rows >= max_block_size || !checkTimeLimit())
|
|
||||||
|
if (buffer->hasMorePolledMessages())
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (total_rows >= max_block_size || !checkTimeLimit())
|
||||||
|
{
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (total_rows == 0)
|
if (buffer->rebalanceHappened() || total_rows == 0)
|
||||||
return Block();
|
return Block();
|
||||||
|
|
||||||
/// MATERIALIZED columns can be added here, but I think
|
/// MATERIALIZED columns can be added here, but I think
|
||||||
|
@ -33,7 +33,8 @@ private:
|
|||||||
UInt64 max_block_size;
|
UInt64 max_block_size;
|
||||||
|
|
||||||
ConsumerBufferPtr buffer;
|
ConsumerBufferPtr buffer;
|
||||||
bool broken = true, claimed = false, commit_in_suffix;
|
bool broken = true, finished = false, claimed = false, commit_in_suffix;
|
||||||
|
|
||||||
const Block non_virtual_header, virtual_header;
|
const Block non_virtual_header, virtual_header;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -3,11 +3,14 @@
|
|||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
|
|
||||||
#include <cppkafka/cppkafka.h>
|
#include <cppkafka/cppkafka.h>
|
||||||
|
#include <boost/algorithm/string/join.hpp>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
using namespace std::chrono_literals;
|
using namespace std::chrono_literals;
|
||||||
|
const auto MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS = 15000;
|
||||||
|
|
||||||
|
|
||||||
ReadBufferFromKafkaConsumer::ReadBufferFromKafkaConsumer(
|
ReadBufferFromKafkaConsumer::ReadBufferFromKafkaConsumer(
|
||||||
ConsumerPtr consumer_,
|
ConsumerPtr consumer_,
|
||||||
@ -15,7 +18,8 @@ ReadBufferFromKafkaConsumer::ReadBufferFromKafkaConsumer(
|
|||||||
size_t max_batch_size,
|
size_t max_batch_size,
|
||||||
size_t poll_timeout_,
|
size_t poll_timeout_,
|
||||||
bool intermediate_commit_,
|
bool intermediate_commit_,
|
||||||
const std::atomic<bool> & stopped_)
|
const std::atomic<bool> & stopped_,
|
||||||
|
const Names & _topics)
|
||||||
: ReadBuffer(nullptr, 0)
|
: ReadBuffer(nullptr, 0)
|
||||||
, consumer(consumer_)
|
, consumer(consumer_)
|
||||||
, log(log_)
|
, log(log_)
|
||||||
@ -24,7 +28,51 @@ ReadBufferFromKafkaConsumer::ReadBufferFromKafkaConsumer(
|
|||||||
, intermediate_commit(intermediate_commit_)
|
, intermediate_commit(intermediate_commit_)
|
||||||
, stopped(stopped_)
|
, stopped(stopped_)
|
||||||
, current(messages.begin())
|
, current(messages.begin())
|
||||||
|
, topics(_topics)
|
||||||
{
|
{
|
||||||
|
// called (synchroniously, during poll) when we enter the consumer group
|
||||||
|
consumer->set_assignment_callback([this](const cppkafka::TopicPartitionList& topic_partitions)
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Topics/partitions assigned: " << topic_partitions);
|
||||||
|
assignment = topic_partitions;
|
||||||
|
});
|
||||||
|
|
||||||
|
// called (synchroniously, during poll) when we leave the consumer group
|
||||||
|
consumer->set_revocation_callback([this](const cppkafka::TopicPartitionList& topic_partitions)
|
||||||
|
{
|
||||||
|
// Rebalance is happening now, and now we have a chance to finish the work
|
||||||
|
// with topics/partitions we were working with before rebalance
|
||||||
|
LOG_TRACE(log, "Rebalance initiated. Revoking partitions: " << topic_partitions);
|
||||||
|
|
||||||
|
// we can not flush data to target from that point (it is pulled, not pushed)
|
||||||
|
// so the best we can now it to
|
||||||
|
// 1) repeat last commit in sync mode (async could be still in queue, we need to be sure is is properly committed before rebalance)
|
||||||
|
// 2) stop / brake the current reading:
|
||||||
|
// * clean buffered non-commited messages
|
||||||
|
// * set flag / flush
|
||||||
|
|
||||||
|
messages.clear();
|
||||||
|
current = messages.begin();
|
||||||
|
BufferBase::set(nullptr, 0, 0);
|
||||||
|
|
||||||
|
rebalance_happened = true;
|
||||||
|
assignment.clear();
|
||||||
|
|
||||||
|
// for now we use slower (but reliable) sync commit in main loop, so no need to repeat
|
||||||
|
// try
|
||||||
|
// {
|
||||||
|
// consumer->commit();
|
||||||
|
// }
|
||||||
|
// catch (cppkafka::HandleException & e)
|
||||||
|
// {
|
||||||
|
// LOG_WARNING(log, "Commit error: " << e.what());
|
||||||
|
// }
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer->set_rebalance_error_callback([this](cppkafka::Error err)
|
||||||
|
{
|
||||||
|
LOG_ERROR(log, "Rebalance error: " << err);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
ReadBufferFromKafkaConsumer::~ReadBufferFromKafkaConsumer()
|
ReadBufferFromKafkaConsumer::~ReadBufferFromKafkaConsumer()
|
||||||
@ -32,7 +80,7 @@ ReadBufferFromKafkaConsumer::~ReadBufferFromKafkaConsumer()
|
|||||||
/// NOTE: see https://github.com/edenhill/librdkafka/issues/2077
|
/// NOTE: see https://github.com/edenhill/librdkafka/issues/2077
|
||||||
consumer->unsubscribe();
|
consumer->unsubscribe();
|
||||||
consumer->unassign();
|
consumer->unassign();
|
||||||
while (consumer->get_consumer_queue().next_event(1s));
|
while (consumer->get_consumer_queue().next_event(100ms));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReadBufferFromKafkaConsumer::commit()
|
void ReadBufferFromKafkaConsumer::commit()
|
||||||
@ -72,55 +120,60 @@ void ReadBufferFromKafkaConsumer::commit()
|
|||||||
|
|
||||||
PrintOffsets("Polled offset", consumer->get_offsets_position(consumer->get_assignment()));
|
PrintOffsets("Polled offset", consumer->get_offsets_position(consumer->get_assignment()));
|
||||||
|
|
||||||
consumer->async_commit();
|
if (hasMorePolledMessages())
|
||||||
|
{
|
||||||
|
LOG_WARNING(log,"Logical error. Non all polled messages were processed.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (offsets_stored > 0)
|
||||||
|
{
|
||||||
|
// if we will do async commit here (which is faster)
|
||||||
|
// we may need to repeat commit in sync mode in revocation callback,
|
||||||
|
// but it seems like existing API doesn't allow us to to that
|
||||||
|
// in a controlled manner (i.e. we don't know the offsets to commit then)
|
||||||
|
consumer->commit();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_TRACE(log,"Nothing to commit.");
|
||||||
|
}
|
||||||
|
|
||||||
PrintOffsets("Committed offset", consumer->get_offsets_committed(consumer->get_assignment()));
|
PrintOffsets("Committed offset", consumer->get_offsets_committed(consumer->get_assignment()));
|
||||||
|
offsets_stored = 0;
|
||||||
|
|
||||||
stalled = false;
|
stalled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReadBufferFromKafkaConsumer::subscribe(const Names & topics)
|
void ReadBufferFromKafkaConsumer::subscribe()
|
||||||
{
|
{
|
||||||
{
|
LOG_TRACE(log,"Already subscribed to topics: [ "
|
||||||
String message = "Already subscribed to topics:";
|
<< boost::algorithm::join(consumer->get_subscription(), ", ")
|
||||||
for (const auto & topic : consumer->get_subscription())
|
<< " ]");
|
||||||
message += " " + topic;
|
|
||||||
LOG_TRACE(log, message);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
LOG_TRACE(log, "Already assigned to : " << assignment);
|
||||||
String message = "Already assigned to topics:";
|
|
||||||
for (const auto & toppar : consumer->get_assignment())
|
size_t max_retries = 5;
|
||||||
message += " " + toppar.get_topic();
|
|
||||||
LOG_TRACE(log, message);
|
|
||||||
}
|
|
||||||
|
|
||||||
// While we wait for an assignment after subscribtion, we'll poll zero messages anyway.
|
|
||||||
// If we're doing a manual select then it's better to get something after a wait, then immediate nothing.
|
|
||||||
// But due to the nature of async pause/resume/subscribe we can't guarantee any persistent state:
|
|
||||||
// see https://github.com/edenhill/librdkafka/issues/2455
|
|
||||||
while (consumer->get_subscription().empty())
|
while (consumer->get_subscription().empty())
|
||||||
{
|
{
|
||||||
stalled = false;
|
--max_retries;
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
consumer->subscribe(topics);
|
consumer->subscribe(topics);
|
||||||
if (nextImpl())
|
|
||||||
break;
|
|
||||||
|
|
||||||
// FIXME: if we failed to receive "subscribe" response while polling and destroy consumer now, then we may hang up.
|
// FIXME: if we failed to receive "subscribe" response while polling and destroy consumer now, then we may hang up.
|
||||||
// see https://github.com/edenhill/librdkafka/issues/2077
|
// see https://github.com/edenhill/librdkafka/issues/2077
|
||||||
}
|
}
|
||||||
catch (cppkafka::HandleException & e)
|
catch (cppkafka::HandleException & e)
|
||||||
{
|
{
|
||||||
if (e.get_error() == RD_KAFKA_RESP_ERR__TIMED_OUT)
|
if (max_retries > 0 && e.get_error() == RD_KAFKA_RESP_ERR__TIMED_OUT)
|
||||||
continue;
|
continue;
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stalled = false;
|
stalled = false;
|
||||||
|
rebalance_happened = false;
|
||||||
|
offsets_stored = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReadBufferFromKafkaConsumer::unsubscribe()
|
void ReadBufferFromKafkaConsumer::unsubscribe()
|
||||||
@ -134,13 +187,33 @@ void ReadBufferFromKafkaConsumer::unsubscribe()
|
|||||||
consumer->unsubscribe();
|
consumer->unsubscribe();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool ReadBufferFromKafkaConsumer::hasMorePolledMessages() const
|
||||||
|
{
|
||||||
|
return (!stalled) && (current != messages.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ReadBufferFromKafkaConsumer::resetToLastCommitted(const char * msg)
|
||||||
|
{
|
||||||
|
if (assignment.empty())
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Not assignned. Can't reset to last committed position.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto committed_offset = consumer->get_offsets_committed(consumer->get_assignment());
|
||||||
|
consumer->assign(committed_offset);
|
||||||
|
LOG_TRACE(log, msg << "Returned to committed position: " << committed_offset);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/// Do commit messages implicitly after we processed the previous batch.
|
/// Do commit messages implicitly after we processed the previous batch.
|
||||||
bool ReadBufferFromKafkaConsumer::nextImpl()
|
bool ReadBufferFromKafkaConsumer::nextImpl()
|
||||||
{
|
{
|
||||||
/// NOTE: ReadBuffer was implemented with an immutable underlying contents in mind.
|
/// NOTE: ReadBuffer was implemented with an immutable underlying contents in mind.
|
||||||
/// If we failed to poll any message once - don't try again.
|
/// If we failed to poll any message once - don't try again.
|
||||||
/// Otherwise, the |poll_timeout| expectations get flawn.
|
/// Otherwise, the |poll_timeout| expectations get flawn.
|
||||||
if (stalled || stopped || !allowed)
|
if (stalled || stopped || !allowed || rebalance_happened)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (current == messages.end())
|
if (current == messages.end())
|
||||||
@ -148,18 +221,60 @@ bool ReadBufferFromKafkaConsumer::nextImpl()
|
|||||||
if (intermediate_commit)
|
if (intermediate_commit)
|
||||||
commit();
|
commit();
|
||||||
|
|
||||||
|
size_t waited_for_assignment = 0;
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
/// Don't drop old messages immediately, since we may need them for virtual columns.
|
/// Don't drop old messages immediately, since we may need them for virtual columns.
|
||||||
auto new_messages = consumer->poll_batch(batch_size, std::chrono::milliseconds(poll_timeout));
|
auto new_messages = consumer->poll_batch(batch_size, std::chrono::milliseconds(poll_timeout));
|
||||||
|
|
||||||
|
if (rebalance_happened)
|
||||||
|
{
|
||||||
|
if (!new_messages.empty())
|
||||||
|
{
|
||||||
|
// we have polled something just after rebalance.
|
||||||
|
// we will not use current batch, so we need to return to last commited position
|
||||||
|
// otherwise we will continue polling from that position
|
||||||
|
resetToLastCommitted("Rewind last poll after rebalance.");
|
||||||
|
}
|
||||||
|
|
||||||
|
offsets_stored = 0;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (new_messages.empty())
|
if (new_messages.empty())
|
||||||
|
{
|
||||||
|
// While we wait for an assignment after subscription, we'll poll zero messages anyway.
|
||||||
|
// If we're doing a manual select then it's better to get something after a wait, then immediate nothing.
|
||||||
|
if (assignment.empty())
|
||||||
|
{
|
||||||
|
waited_for_assignment += poll_timeout; // slightly innaccurate, but rough calculation is ok.
|
||||||
|
if (waited_for_assignment < MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Can't get assignment");
|
||||||
|
stalled = true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
else
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Stalled");
|
LOG_TRACE(log, "Stalled");
|
||||||
stalled = true;
|
stalled = true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
messages = std::move(new_messages);
|
messages = std::move(new_messages);
|
||||||
current = messages.begin();
|
current = messages.begin();
|
||||||
|
LOG_TRACE(log, "Polled batch of " << messages.size() << " messages. Offset position: " << consumer->get_offsets_position(consumer->get_assignment()));
|
||||||
LOG_TRACE(log, "Polled batch of " << messages.size() << " messages");
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auto err = current->get_error())
|
if (auto err = current->get_error())
|
||||||
@ -176,12 +291,18 @@ bool ReadBufferFromKafkaConsumer::nextImpl()
|
|||||||
BufferBase::set(new_position, current->get_payload().get_size(), 0);
|
BufferBase::set(new_position, current->get_payload().get_size(), 0);
|
||||||
allowed = false;
|
allowed = false;
|
||||||
|
|
||||||
/// Since we can poll more messages than we already processed - commit only processed messages.
|
|
||||||
consumer->store_offset(*current);
|
|
||||||
|
|
||||||
++current;
|
++current;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ReadBufferFromKafkaConsumer::storeLastReadMessageOffset()
|
||||||
|
{
|
||||||
|
if (!stalled && !rebalance_happened)
|
||||||
|
{
|
||||||
|
consumer->store_offset(*(current - 1));
|
||||||
|
++offsets_stored;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -25,16 +25,24 @@ public:
|
|||||||
size_t max_batch_size,
|
size_t max_batch_size,
|
||||||
size_t poll_timeout_,
|
size_t poll_timeout_,
|
||||||
bool intermediate_commit_,
|
bool intermediate_commit_,
|
||||||
const std::atomic<bool> & stopped_);
|
const std::atomic<bool> & stopped_,
|
||||||
|
const Names & _topics
|
||||||
|
);
|
||||||
~ReadBufferFromKafkaConsumer() override;
|
~ReadBufferFromKafkaConsumer() override;
|
||||||
|
|
||||||
void allowNext() { allowed = true; } // Allow to read next message.
|
void allowNext() { allowed = true; } // Allow to read next message.
|
||||||
void commit(); // Commit all processed messages.
|
void commit(); // Commit all processed messages.
|
||||||
void subscribe(const Names & topics); // Subscribe internal consumer to topics.
|
void subscribe(); // Subscribe internal consumer to topics.
|
||||||
void unsubscribe(); // Unsubscribe internal consumer in case of failure.
|
void unsubscribe(); // Unsubscribe internal consumer in case of failure.
|
||||||
|
|
||||||
auto pollTimeout() const { return poll_timeout; }
|
auto pollTimeout() const { return poll_timeout; }
|
||||||
|
|
||||||
|
bool hasMorePolledMessages() const;
|
||||||
|
auto rebalanceHappened() const { return rebalance_happened; }
|
||||||
|
|
||||||
|
void storeLastReadMessageOffset();
|
||||||
|
void resetToLastCommitted(const char * msg);
|
||||||
|
|
||||||
// Return values for the message that's being read.
|
// Return values for the message that's being read.
|
||||||
String currentTopic() const { return current[-1].get_topic(); }
|
String currentTopic() const { return current[-1].get_topic(); }
|
||||||
String currentKey() const { return current[-1].get_key(); }
|
String currentKey() const { return current[-1].get_key(); }
|
||||||
@ -49,6 +57,7 @@ private:
|
|||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
const size_t batch_size = 1;
|
const size_t batch_size = 1;
|
||||||
const size_t poll_timeout = 0;
|
const size_t poll_timeout = 0;
|
||||||
|
size_t offsets_stored = 0;
|
||||||
bool stalled = false;
|
bool stalled = false;
|
||||||
bool intermediate_commit = true;
|
bool intermediate_commit = true;
|
||||||
bool allowed = true;
|
bool allowed = true;
|
||||||
@ -58,6 +67,10 @@ private:
|
|||||||
Messages messages;
|
Messages messages;
|
||||||
Messages::const_iterator current;
|
Messages::const_iterator current;
|
||||||
|
|
||||||
|
bool rebalance_happened = false;
|
||||||
|
cppkafka::TopicPartitionList assignment;
|
||||||
|
const Names topics;
|
||||||
|
|
||||||
bool nextImpl() override;
|
bool nextImpl() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -272,7 +272,7 @@ ConsumerBufferPtr StorageKafka::createReadBuffer()
|
|||||||
size_t poll_timeout = settings.stream_poll_timeout_ms.totalMilliseconds();
|
size_t poll_timeout = settings.stream_poll_timeout_ms.totalMilliseconds();
|
||||||
|
|
||||||
/// NOTE: we pass |stream_cancelled| by reference here, so the buffers should not outlive the storage.
|
/// NOTE: we pass |stream_cancelled| by reference here, so the buffers should not outlive the storage.
|
||||||
return std::make_shared<ReadBufferFromKafkaConsumer>(consumer, log, batch_size, poll_timeout, intermediate_commit, stream_cancelled);
|
return std::make_shared<ReadBufferFromKafkaConsumer>(consumer, log, batch_size, poll_timeout, intermediate_commit, stream_cancelled, getTopics());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
5
dbms/src/Storages/MergeTree/BoolMask.cpp
Normal file
5
dbms/src/Storages/MergeTree/BoolMask.cpp
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#include "BoolMask.h"
|
||||||
|
|
||||||
|
|
||||||
|
const BoolMask BoolMask::consider_only_can_be_true(false, true);
|
||||||
|
const BoolMask BoolMask::consider_only_can_be_false(true, false);
|
@ -21,4 +21,17 @@ struct BoolMask
|
|||||||
{
|
{
|
||||||
return BoolMask(can_be_false, can_be_true);
|
return BoolMask(can_be_false, can_be_true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// If mask is (true, true), then it can no longer change under operation |.
|
||||||
|
/// We use this condition to early-exit KeyConditions::check{InRange,After} methods.
|
||||||
|
bool isComplete() const
|
||||||
|
{
|
||||||
|
return can_be_false && can_be_true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// These special constants are used to implement KeyCondition::mayBeTrue{InRange,After} via KeyCondition::check{InRange,After}.
|
||||||
|
/// When used as an initial_mask argument in KeyCondition::check{InRange,After} methods, they effectively prevent
|
||||||
|
/// calculation of discarded BoolMask component as it is already set to true.
|
||||||
|
static const BoolMask consider_only_can_be_true;
|
||||||
|
static const BoolMask consider_only_can_be_false;
|
||||||
};
|
};
|
||||||
|
@ -888,7 +888,7 @@ String KeyCondition::toString() const
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
template <typename F>
|
template <typename F>
|
||||||
static bool forAnyParallelogram(
|
static BoolMask forAnyParallelogram(
|
||||||
size_t key_size,
|
size_t key_size,
|
||||||
const Field * key_left,
|
const Field * key_left,
|
||||||
const Field * key_right,
|
const Field * key_right,
|
||||||
@ -896,6 +896,7 @@ static bool forAnyParallelogram(
|
|||||||
bool right_bounded,
|
bool right_bounded,
|
||||||
std::vector<Range> & parallelogram,
|
std::vector<Range> & parallelogram,
|
||||||
size_t prefix_size,
|
size_t prefix_size,
|
||||||
|
BoolMask initial_mask,
|
||||||
F && callback)
|
F && callback)
|
||||||
{
|
{
|
||||||
if (!left_bounded && !right_bounded)
|
if (!left_bounded && !right_bounded)
|
||||||
@ -944,16 +945,25 @@ static bool forAnyParallelogram(
|
|||||||
for (size_t i = prefix_size + 1; i < key_size; ++i)
|
for (size_t i = prefix_size + 1; i < key_size; ++i)
|
||||||
parallelogram[i] = Range();
|
parallelogram[i] = Range();
|
||||||
|
|
||||||
if (callback(parallelogram))
|
|
||||||
return true;
|
BoolMask result = initial_mask;
|
||||||
|
result = result | callback(parallelogram);
|
||||||
|
|
||||||
|
/// There are several early-exit conditions (like the one below) hereinafter.
|
||||||
|
/// They are important; in particular, if initial_mask == BoolMask::consider_only_can_be_true
|
||||||
|
/// (which happens when this routine is called from KeyCondition::mayBeTrueXXX),
|
||||||
|
/// they provide significant speedup, which may be observed on merge_tree_huge_pk performance test.
|
||||||
|
if (result.isComplete())
|
||||||
|
return result;
|
||||||
|
|
||||||
/// [x1] x [y1 .. +inf)
|
/// [x1] x [y1 .. +inf)
|
||||||
|
|
||||||
if (left_bounded)
|
if (left_bounded)
|
||||||
{
|
{
|
||||||
parallelogram[prefix_size] = Range(key_left[prefix_size]);
|
parallelogram[prefix_size] = Range(key_left[prefix_size]);
|
||||||
if (forAnyParallelogram(key_size, key_left, key_right, true, false, parallelogram, prefix_size + 1, callback))
|
result = result | forAnyParallelogram(key_size, key_left, key_right, true, false, parallelogram, prefix_size + 1, initial_mask, callback);
|
||||||
return true;
|
if (result.isComplete())
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [x2] x (-inf .. y2]
|
/// [x2] x (-inf .. y2]
|
||||||
@ -961,20 +971,22 @@ static bool forAnyParallelogram(
|
|||||||
if (right_bounded)
|
if (right_bounded)
|
||||||
{
|
{
|
||||||
parallelogram[prefix_size] = Range(key_right[prefix_size]);
|
parallelogram[prefix_size] = Range(key_right[prefix_size]);
|
||||||
if (forAnyParallelogram(key_size, key_left, key_right, false, true, parallelogram, prefix_size + 1, callback))
|
result = result | forAnyParallelogram(key_size, key_left, key_right, false, true, parallelogram, prefix_size + 1, initial_mask, callback);
|
||||||
return true;
|
if (result.isComplete())
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool KeyCondition::mayBeTrueInRange(
|
BoolMask KeyCondition::checkInRange(
|
||||||
size_t used_key_size,
|
size_t used_key_size,
|
||||||
const Field * left_key,
|
const Field * left_key,
|
||||||
const Field * right_key,
|
const Field * right_key,
|
||||||
const DataTypes & data_types,
|
const DataTypes & data_types,
|
||||||
bool right_bounded) const
|
bool right_bounded,
|
||||||
|
BoolMask initial_mask) const
|
||||||
{
|
{
|
||||||
std::vector<Range> key_ranges(used_key_size, Range());
|
std::vector<Range> key_ranges(used_key_size, Range());
|
||||||
|
|
||||||
@ -992,10 +1004,10 @@ bool KeyCondition::mayBeTrueInRange(
|
|||||||
else
|
else
|
||||||
std::cerr << "+inf)\n";*/
|
std::cerr << "+inf)\n";*/
|
||||||
|
|
||||||
return forAnyParallelogram(used_key_size, left_key, right_key, true, right_bounded, key_ranges, 0,
|
return forAnyParallelogram(used_key_size, left_key, right_key, true, right_bounded, key_ranges, 0, initial_mask,
|
||||||
[&] (const std::vector<Range> & key_ranges_parallelogram)
|
[&] (const std::vector<Range> & key_ranges_parallelogram)
|
||||||
{
|
{
|
||||||
auto res = mayBeTrueInParallelogram(key_ranges_parallelogram, data_types);
|
auto res = checkInParallelogram(key_ranges_parallelogram, data_types);
|
||||||
|
|
||||||
/* std::cerr << "Parallelogram: ";
|
/* std::cerr << "Parallelogram: ";
|
||||||
for (size_t i = 0, size = key_ranges.size(); i != size; ++i)
|
for (size_t i = 0, size = key_ranges.size(); i != size; ++i)
|
||||||
@ -1006,11 +1018,11 @@ bool KeyCondition::mayBeTrueInRange(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::optional<Range> KeyCondition::applyMonotonicFunctionsChainToRange(
|
std::optional<Range> KeyCondition::applyMonotonicFunctionsChainToRange(
|
||||||
Range key_range,
|
Range key_range,
|
||||||
MonotonicFunctionsChain & functions,
|
MonotonicFunctionsChain & functions,
|
||||||
DataTypePtr current_type
|
DataTypePtr current_type)
|
||||||
)
|
|
||||||
{
|
{
|
||||||
for (auto & func : functions)
|
for (auto & func : functions)
|
||||||
{
|
{
|
||||||
@ -1043,7 +1055,9 @@ std::optional<Range> KeyCondition::applyMonotonicFunctionsChainToRange(
|
|||||||
return key_range;
|
return key_range;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KeyCondition::mayBeTrueInParallelogram(const std::vector<Range> & parallelogram, const DataTypes & data_types) const
|
BoolMask KeyCondition::checkInParallelogram(
|
||||||
|
const std::vector<Range> & parallelogram,
|
||||||
|
const DataTypes & data_types) const
|
||||||
{
|
{
|
||||||
std::vector<BoolMask> rpn_stack;
|
std::vector<BoolMask> rpn_stack;
|
||||||
for (size_t i = 0; i < rpn.size(); ++i)
|
for (size_t i = 0; i < rpn.size(); ++i)
|
||||||
@ -1091,7 +1105,7 @@ bool KeyCondition::mayBeTrueInParallelogram(const std::vector<Range> & parallelo
|
|||||||
if (!element.set_index)
|
if (!element.set_index)
|
||||||
throw Exception("Set for IN is not created yet", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Set for IN is not created yet", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
rpn_stack.emplace_back(element.set_index->mayBeTrueInRange(parallelogram, data_types));
|
rpn_stack.emplace_back(element.set_index->checkInRange(parallelogram, data_types));
|
||||||
if (element.function == RPNElement::FUNCTION_NOT_IN_SET)
|
if (element.function == RPNElement::FUNCTION_NOT_IN_SET)
|
||||||
rpn_stack.back() = !rpn_stack.back();
|
rpn_stack.back() = !rpn_stack.back();
|
||||||
}
|
}
|
||||||
@ -1132,22 +1146,49 @@ bool KeyCondition::mayBeTrueInParallelogram(const std::vector<Range> & parallelo
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (rpn_stack.size() != 1)
|
if (rpn_stack.size() != 1)
|
||||||
throw Exception("Unexpected stack size in KeyCondition::mayBeTrueInParallelogram", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Unexpected stack size in KeyCondition::checkInRange", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
return rpn_stack[0].can_be_true;
|
return rpn_stack[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
BoolMask KeyCondition::checkInRange(
|
||||||
|
size_t used_key_size,
|
||||||
|
const Field * left_key,
|
||||||
|
const Field * right_key,
|
||||||
|
const DataTypes & data_types,
|
||||||
|
BoolMask initial_mask) const
|
||||||
|
{
|
||||||
|
return checkInRange(used_key_size, left_key, right_key, data_types, true, initial_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool KeyCondition::mayBeTrueInRange(
|
bool KeyCondition::mayBeTrueInRange(
|
||||||
size_t used_key_size, const Field * left_key, const Field * right_key, const DataTypes & data_types) const
|
size_t used_key_size,
|
||||||
|
const Field * left_key,
|
||||||
|
const Field * right_key,
|
||||||
|
const DataTypes & data_types) const
|
||||||
{
|
{
|
||||||
return mayBeTrueInRange(used_key_size, left_key, right_key, data_types, true);
|
return checkInRange(used_key_size, left_key, right_key, data_types, true, BoolMask::consider_only_can_be_true).can_be_true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KeyCondition::mayBeTrueAfter(
|
|
||||||
size_t used_key_size, const Field * left_key, const DataTypes & data_types) const
|
BoolMask KeyCondition::checkAfter(
|
||||||
|
size_t used_key_size,
|
||||||
|
const Field * left_key,
|
||||||
|
const DataTypes & data_types,
|
||||||
|
BoolMask initial_mask) const
|
||||||
{
|
{
|
||||||
return mayBeTrueInRange(used_key_size, left_key, nullptr, data_types, false);
|
return checkInRange(used_key_size, left_key, nullptr, data_types, false, initial_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool KeyCondition::mayBeTrueAfter(
|
||||||
|
size_t used_key_size,
|
||||||
|
const Field * left_key,
|
||||||
|
const DataTypes & data_types) const
|
||||||
|
{
|
||||||
|
return checkInRange(used_key_size, left_key, nullptr, data_types, false, BoolMask::consider_only_can_be_true).can_be_true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -235,17 +235,45 @@ public:
|
|||||||
const Names & key_column_names,
|
const Names & key_column_names,
|
||||||
const ExpressionActionsPtr & key_expr);
|
const ExpressionActionsPtr & key_expr);
|
||||||
|
|
||||||
/// Whether the condition is feasible in the key range.
|
/// Whether the condition and its negation are feasible in the direct product of single column ranges specified by `parallelogram`.
|
||||||
|
BoolMask checkInParallelogram(
|
||||||
|
const std::vector<Range> & parallelogram,
|
||||||
|
const DataTypes & data_types) const;
|
||||||
|
|
||||||
|
/// Whether the condition and its negation are (independently) feasible in the key range.
|
||||||
/// left_key and right_key must contain all fields in the sort_descr in the appropriate order.
|
/// left_key and right_key must contain all fields in the sort_descr in the appropriate order.
|
||||||
/// data_types - the types of the key columns.
|
/// data_types - the types of the key columns.
|
||||||
bool mayBeTrueInRange(size_t used_key_size, const Field * left_key, const Field * right_key, const DataTypes & data_types) const;
|
/// Argument initial_mask is used for early exiting the implementation when we do not care about
|
||||||
|
/// one of the resulting mask components (see BoolMask::consider_only_can_be_XXX).
|
||||||
|
BoolMask checkInRange(
|
||||||
|
size_t used_key_size,
|
||||||
|
const Field * left_key,
|
||||||
|
const Field * right_key,
|
||||||
|
const DataTypes & data_types,
|
||||||
|
BoolMask initial_mask = BoolMask(false, false)) const;
|
||||||
|
|
||||||
/// Whether the condition is feasible in the direct product of single column ranges specified by `parallelogram`.
|
/// Are the condition and its negation valid in a semi-infinite (not limited to the right) key range.
|
||||||
bool mayBeTrueInParallelogram(const std::vector<Range> & parallelogram, const DataTypes & data_types) const;
|
|
||||||
|
|
||||||
/// Is the condition valid in a semi-infinite (not limited to the right) key range.
|
|
||||||
/// left_key must contain all the fields in the sort_descr in the appropriate order.
|
/// left_key must contain all the fields in the sort_descr in the appropriate order.
|
||||||
bool mayBeTrueAfter(size_t used_key_size, const Field * left_key, const DataTypes & data_types) const;
|
BoolMask checkAfter(
|
||||||
|
size_t used_key_size,
|
||||||
|
const Field * left_key,
|
||||||
|
const DataTypes & data_types,
|
||||||
|
BoolMask initial_mask = BoolMask(false, false)) const;
|
||||||
|
|
||||||
|
/// Same as checkInRange, but calculate only may_be_true component of a result.
|
||||||
|
/// This is more efficient than checkInRange(...).can_be_true.
|
||||||
|
bool mayBeTrueInRange(
|
||||||
|
size_t used_key_size,
|
||||||
|
const Field * left_key,
|
||||||
|
const Field * right_key,
|
||||||
|
const DataTypes & data_types) const;
|
||||||
|
|
||||||
|
/// Same as checkAfter, but calculate only may_be_true component of a result.
|
||||||
|
/// This is more efficient than checkAfter(...).can_be_true.
|
||||||
|
bool mayBeTrueAfter(
|
||||||
|
size_t used_key_size,
|
||||||
|
const Field * left_key,
|
||||||
|
const DataTypes & data_types) const;
|
||||||
|
|
||||||
/// Checks that the index can not be used.
|
/// Checks that the index can not be used.
|
||||||
bool alwaysUnknownOrTrue() const;
|
bool alwaysUnknownOrTrue() const;
|
||||||
@ -330,12 +358,13 @@ public:
|
|||||||
static const AtomMap atom_map;
|
static const AtomMap atom_map;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool mayBeTrueInRange(
|
BoolMask checkInRange(
|
||||||
size_t used_key_size,
|
size_t used_key_size,
|
||||||
const Field * left_key,
|
const Field * left_key,
|
||||||
const Field * right_key,
|
const Field * right_key,
|
||||||
const DataTypes & data_types,
|
const DataTypes & data_types,
|
||||||
bool right_bounded) const;
|
bool right_bounded,
|
||||||
|
BoolMask initial_mask) const;
|
||||||
|
|
||||||
void traverseAST(const ASTPtr & node, const Context & context, Block & block_with_constants);
|
void traverseAST(const ASTPtr & node, const Context & context, Block & block_with_constants);
|
||||||
bool atomFromAST(const ASTPtr & node, const Context & context, Block & block_with_constants, RPNElement & out);
|
bool atomFromAST(const ASTPtr & node, const Context & context, Block & block_with_constants, RPNElement & out);
|
||||||
|
@ -276,8 +276,8 @@ Pipes MergeTreeDataSelectExecutor::readFromParts(
|
|||||||
if (part->isEmpty())
|
if (part->isEmpty())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (minmax_idx_condition && !minmax_idx_condition->mayBeTrueInParallelogram(
|
if (minmax_idx_condition && !minmax_idx_condition->checkInParallelogram(
|
||||||
part->minmax_idx.parallelogram, data.minmax_idx_column_types))
|
part->minmax_idx.parallelogram, data.minmax_idx_column_types).can_be_true)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (max_block_numbers_to_read)
|
if (max_block_numbers_to_read)
|
||||||
|
@ -378,11 +378,11 @@ bool MergeTreeConditionFullText::mayBeTrueOnGranule(MergeTreeIndexGranulePtr idx
|
|||||||
rpn_stack.emplace_back(true, false);
|
rpn_stack.emplace_back(true, false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
throw Exception("Unexpected function type in KeyCondition::RPNElement", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Unexpected function type in BloomFilterCondition::RPNElement", ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rpn_stack.size() != 1)
|
if (rpn_stack.size() != 1)
|
||||||
throw Exception("Unexpected stack size in KeyCondition::mayBeTrueInRange", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Unexpected stack size in BloomFilterCondition::mayBeTrueOnGranule", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
return rpn_stack[0].can_be_true;
|
return rpn_stack[0].can_be_true;
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,7 @@ bool MergeTreeIndexConditionMinMax::mayBeTrueOnGranule(MergeTreeIndexGranulePtr
|
|||||||
for (const auto & range : granule->parallelogram)
|
for (const auto & range : granule->parallelogram)
|
||||||
if (range.left.isNull() || range.right.isNull())
|
if (range.left.isNull() || range.right.isNull())
|
||||||
return true;
|
return true;
|
||||||
return condition.mayBeTrueInParallelogram(granule->parallelogram, index.data_types);
|
return condition.checkInParallelogram(granule->parallelogram, index.data_types).can_be_true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -263,7 +263,7 @@ void MergeTreeRangeReader::ReadResult::shrink(Columns & old_columns)
|
|||||||
continue;
|
continue;
|
||||||
auto new_column = old_columns[i]->cloneEmpty();
|
auto new_column = old_columns[i]->cloneEmpty();
|
||||||
new_column->reserve(total_rows_per_granule);
|
new_column->reserve(total_rows_per_granule);
|
||||||
for (size_t j = 0, pos = 0; j < rows_per_granule_original.size(); pos += rows_per_granule_original[i], ++j)
|
for (size_t j = 0, pos = 0; j < rows_per_granule_original.size(); pos += rows_per_granule_original[j++])
|
||||||
{
|
{
|
||||||
if (rows_per_granule[j])
|
if (rows_per_granule[j])
|
||||||
new_column->insertRangeFrom(*old_columns[i], pos, rows_per_granule[j]);
|
new_column->insertRangeFrom(*old_columns[i], pos, rows_per_granule[j]);
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
#include <Interpreters/InterpreterAlterQuery.h>
|
#include <Interpreters/InterpreterAlterQuery.h>
|
||||||
#include <Interpreters/castColumn.h>
|
#include <Interpreters/castColumn.h>
|
||||||
#include <Interpreters/evaluateConstantExpression.h>
|
#include <Interpreters/evaluateConstantExpression.h>
|
||||||
#include <DataStreams/AddingMissedBlockInputStream.h>
|
#include <Processors/Transforms/AddingMissedTransform.h>
|
||||||
#include <DataStreams/ConvertingBlockInputStream.h>
|
#include <DataStreams/ConvertingBlockInputStream.h>
|
||||||
#include <DataStreams/IBlockInputStream.h>
|
#include <DataStreams/IBlockInputStream.h>
|
||||||
#include <Databases/IDatabase.h>
|
#include <Databases/IDatabase.h>
|
||||||
@ -27,6 +27,10 @@
|
|||||||
#include <ext/range.h>
|
#include <ext/range.h>
|
||||||
#include <DataStreams/FilterBlockInputStream.h>
|
#include <DataStreams/FilterBlockInputStream.h>
|
||||||
#include <DataStreams/ExpressionBlockInputStream.h>
|
#include <DataStreams/ExpressionBlockInputStream.h>
|
||||||
|
#include <Processors/Transforms/ConvertingTransform.h>
|
||||||
|
#include <Processors/Transforms/FilterTransform.h>
|
||||||
|
#include <Processors/Transforms/ExpressionTransform.h>
|
||||||
|
#include <Processors/Sources/SourceFromInputStream.h>
|
||||||
|
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
@ -94,20 +98,19 @@ StorageBuffer::~StorageBuffer()
|
|||||||
|
|
||||||
|
|
||||||
/// Reads from one buffer (from one block) under its mutex.
|
/// Reads from one buffer (from one block) under its mutex.
|
||||||
class BufferBlockInputStream : public IBlockInputStream
|
class BufferSource : public SourceWithProgress
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
BufferBlockInputStream(const Names & column_names_, StorageBuffer::Buffer & buffer_, const StorageBuffer & storage_)
|
BufferSource(const Names & column_names_, StorageBuffer::Buffer & buffer_, const StorageBuffer & storage)
|
||||||
: column_names(column_names_.begin(), column_names_.end()), buffer(buffer_), storage(storage_) {}
|
: SourceWithProgress(storage.getSampleBlockForColumns(column_names_))
|
||||||
|
, column_names(column_names_.begin(), column_names_.end()), buffer(buffer_) {}
|
||||||
|
|
||||||
String getName() const override { return "Buffer"; }
|
String getName() const override { return "Buffer"; }
|
||||||
|
|
||||||
Block getHeader() const override { return storage.getSampleBlockForColumns(column_names); }
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Block readImpl() override
|
Chunk generate() override
|
||||||
{
|
{
|
||||||
Block res;
|
Chunk res;
|
||||||
|
|
||||||
if (has_been_read)
|
if (has_been_read)
|
||||||
return res;
|
return res;
|
||||||
@ -118,8 +121,14 @@ protected:
|
|||||||
if (!buffer.data.rows())
|
if (!buffer.data.rows())
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
|
Columns columns;
|
||||||
|
columns.reserve(column_names.size());
|
||||||
|
|
||||||
for (const auto & name : column_names)
|
for (const auto & name : column_names)
|
||||||
res.insert(buffer.data.getByName(name));
|
columns.push_back(buffer.data.getByName(name).column);
|
||||||
|
|
||||||
|
UInt64 size = columns.at(0)->size();
|
||||||
|
res.setColumns(std::move(columns), size);
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -127,7 +136,6 @@ protected:
|
|||||||
private:
|
private:
|
||||||
Names column_names;
|
Names column_names;
|
||||||
StorageBuffer::Buffer & buffer;
|
StorageBuffer::Buffer & buffer;
|
||||||
const StorageBuffer & storage;
|
|
||||||
bool has_been_read = false;
|
bool has_been_read = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -147,7 +155,8 @@ QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(const Context
|
|||||||
return QueryProcessingStage::FetchColumns;
|
return QueryProcessingStage::FetchColumns;
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInputStreams StorageBuffer::read(
|
static Pipes readAsPipes(
|
||||||
|
const StoragePtr & storage,
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
@ -155,7 +164,27 @@ BlockInputStreams StorageBuffer::read(
|
|||||||
size_t max_block_size,
|
size_t max_block_size,
|
||||||
unsigned num_streams)
|
unsigned num_streams)
|
||||||
{
|
{
|
||||||
BlockInputStreams streams_from_dst;
|
if (storage->supportProcessorsPipeline())
|
||||||
|
return storage->readWithProcessors(column_names, query_info, context, processed_stage, max_block_size, num_streams);
|
||||||
|
|
||||||
|
auto streams = storage->read(column_names, query_info, context, processed_stage, max_block_size, num_streams);
|
||||||
|
|
||||||
|
Pipes pipes;
|
||||||
|
for (auto & stream : streams)
|
||||||
|
pipes.emplace_back(std::make_shared<SourceFromInputStream>(stream));
|
||||||
|
|
||||||
|
return pipes;
|
||||||
|
};
|
||||||
|
|
||||||
|
Pipes StorageBuffer::readWithProcessors(
|
||||||
|
const Names & column_names,
|
||||||
|
const SelectQueryInfo & query_info,
|
||||||
|
const Context & context,
|
||||||
|
QueryProcessingStage::Enum processed_stage,
|
||||||
|
size_t max_block_size,
|
||||||
|
unsigned num_streams)
|
||||||
|
{
|
||||||
|
Pipes pipes_from_dst;
|
||||||
|
|
||||||
if (!no_destination)
|
if (!no_destination)
|
||||||
{
|
{
|
||||||
@ -178,7 +207,7 @@ BlockInputStreams StorageBuffer::read(
|
|||||||
query_info.input_sorting_info = query_info.order_by_optimizer->getInputOrder(destination);
|
query_info.input_sorting_info = query_info.order_by_optimizer->getInputOrder(destination);
|
||||||
|
|
||||||
/// The destination table has the same structure of the requested columns and we can simply read blocks from there.
|
/// The destination table has the same structure of the requested columns and we can simply read blocks from there.
|
||||||
streams_from_dst = destination->read(column_names, query_info, context, processed_stage, max_block_size, num_streams);
|
pipes_from_dst = readAsPipes(destination, column_names, query_info, context, processed_stage, max_block_size, num_streams);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -213,49 +242,52 @@ BlockInputStreams StorageBuffer::read(
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
streams_from_dst = destination->read(columns_intersection, query_info, context, processed_stage, max_block_size, num_streams);
|
pipes_from_dst = readAsPipes(destination, columns_intersection, query_info, context, processed_stage, max_block_size, num_streams);
|
||||||
for (auto & stream : streams_from_dst)
|
for (auto & pipe : pipes_from_dst)
|
||||||
{
|
{
|
||||||
stream = std::make_shared<AddingMissedBlockInputStream>(
|
pipe.addSimpleTransform(std::make_shared<AddingMissedTransform>(
|
||||||
stream, header_after_adding_defaults, getColumns().getDefaults(), context);
|
pipe.getHeader(), header_after_adding_defaults, getColumns().getDefaults(), context));
|
||||||
stream = std::make_shared<ConvertingBlockInputStream>(
|
|
||||||
context, stream, header, ConvertingBlockInputStream::MatchColumnsMode::Name);
|
pipe.addSimpleTransform(std::make_shared<ConvertingTransform>(
|
||||||
|
pipe.getHeader(), header, ConvertingTransform::MatchColumnsMode::Name, context));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto & stream : streams_from_dst)
|
for (auto & pipe : pipes_from_dst)
|
||||||
stream->addTableLock(destination_lock);
|
pipe.addTableLock(destination_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInputStreams streams_from_buffers;
|
Pipes pipes_from_buffers;
|
||||||
streams_from_buffers.reserve(num_shards);
|
pipes_from_buffers.reserve(num_shards);
|
||||||
for (auto & buf : buffers)
|
for (auto & buf : buffers)
|
||||||
streams_from_buffers.push_back(std::make_shared<BufferBlockInputStream>(column_names, buf, *this));
|
pipes_from_buffers.emplace_back(std::make_shared<BufferSource>(column_names, buf, *this));
|
||||||
|
|
||||||
/** If the sources from the table were processed before some non-initial stage of query execution,
|
/** If the sources from the table were processed before some non-initial stage of query execution,
|
||||||
* then sources from the buffers must also be wrapped in the processing pipeline before the same stage.
|
* then sources from the buffers must also be wrapped in the processing pipeline before the same stage.
|
||||||
*/
|
*/
|
||||||
if (processed_stage > QueryProcessingStage::FetchColumns)
|
if (processed_stage > QueryProcessingStage::FetchColumns)
|
||||||
for (auto & stream : streams_from_buffers)
|
for (auto & pipe : pipes_from_buffers)
|
||||||
stream = InterpreterSelectQuery(query_info.query, context, stream, SelectQueryOptions(processed_stage)).execute().in;
|
pipe = InterpreterSelectQuery(query_info.query, context, std::move(pipe), SelectQueryOptions(processed_stage)).executeWithProcessors().getPipe();
|
||||||
|
|
||||||
if (query_info.prewhere_info)
|
if (query_info.prewhere_info)
|
||||||
{
|
{
|
||||||
for (auto & stream : streams_from_buffers)
|
for (auto & pipe : pipes_from_buffers)
|
||||||
stream = std::make_shared<FilterBlockInputStream>(stream, query_info.prewhere_info->prewhere_actions,
|
pipe.addSimpleTransform(std::make_shared<FilterTransform>(pipe.getHeader(), query_info.prewhere_info->prewhere_actions,
|
||||||
query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column);
|
query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column));
|
||||||
|
|
||||||
if (query_info.prewhere_info->alias_actions)
|
if (query_info.prewhere_info->alias_actions)
|
||||||
{
|
{
|
||||||
for (auto & stream : streams_from_buffers)
|
for (auto & pipe : pipes_from_buffers)
|
||||||
stream = std::make_shared<ExpressionBlockInputStream>(stream, query_info.prewhere_info->alias_actions);
|
pipe.addSimpleTransform(std::make_shared<ExpressionTransform>(pipe.getHeader(), query_info.prewhere_info->alias_actions));
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
streams_from_dst.insert(streams_from_dst.end(), streams_from_buffers.begin(), streams_from_buffers.end());
|
for (auto & pipe : pipes_from_buffers)
|
||||||
return streams_from_dst;
|
pipes_from_dst.emplace_back(std::move(pipe));
|
||||||
|
|
||||||
|
return pipes_from_dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ class Context;
|
|||||||
class StorageBuffer : public ext::shared_ptr_helper<StorageBuffer>, public IStorage
|
class StorageBuffer : public ext::shared_ptr_helper<StorageBuffer>, public IStorage
|
||||||
{
|
{
|
||||||
friend struct ext::shared_ptr_helper<StorageBuffer>;
|
friend struct ext::shared_ptr_helper<StorageBuffer>;
|
||||||
friend class BufferBlockInputStream;
|
friend class BufferSource;
|
||||||
friend class BufferBlockOutputStream;
|
friend class BufferBlockOutputStream;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -56,7 +56,7 @@ public:
|
|||||||
|
|
||||||
QueryProcessingStage::Enum getQueryProcessingStage(const Context & context) const override;
|
QueryProcessingStage::Enum getQueryProcessingStage(const Context & context) const override;
|
||||||
|
|
||||||
BlockInputStreams read(
|
Pipes readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
@ -64,6 +64,8 @@ public:
|
|||||||
size_t max_block_size,
|
size_t max_block_size,
|
||||||
unsigned num_streams) override;
|
unsigned num_streams) override;
|
||||||
|
|
||||||
|
bool supportProcessorsPipeline() const override { return true; }
|
||||||
|
|
||||||
BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override;
|
BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override;
|
||||||
|
|
||||||
void startup() override;
|
void startup() override;
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include <DataStreams/OneBlockInputStream.h>
|
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
#include <Storages/ColumnsDescription.h>
|
#include <Storages/ColumnsDescription.h>
|
||||||
#include <Storages/IStorage.h>
|
#include <Storages/IStorage.h>
|
||||||
#include <ext/shared_ptr_helper.h>
|
#include <ext/shared_ptr_helper.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
#include <Processors/Pipe.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -25,7 +26,7 @@ public:
|
|||||||
setColumns(ColumnsDescription(Self::getNamesAndTypes()));
|
setColumns(ColumnsDescription(Self::getNamesAndTypes()));
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInputStreams read(const Names & column_names,
|
Pipes readWithProcessors(const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
QueryProcessingStage::Enum /*processed_stage*/,
|
QueryProcessingStage::Enum /*processed_stage*/,
|
||||||
@ -38,7 +39,13 @@ public:
|
|||||||
MutableColumns res_columns = sample_block.cloneEmptyColumns();
|
MutableColumns res_columns = sample_block.cloneEmptyColumns();
|
||||||
fillData(res_columns, context, query_info);
|
fillData(res_columns, context, query_info);
|
||||||
|
|
||||||
return BlockInputStreams(1, std::make_shared<OneBlockInputStream>(sample_block.cloneWithColumns(std::move(res_columns))));
|
UInt64 num_rows = res_columns.at(0)->size();
|
||||||
|
Chunk chunk(std::move(res_columns), num_rows);
|
||||||
|
|
||||||
|
Pipes pipes;
|
||||||
|
pipes.emplace_back(std::make_shared<SourceFromSingleChunk>(sample_block, std::move(chunk)));
|
||||||
|
|
||||||
|
return pipes;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <Parsers/queryToString.h>
|
#include <Parsers/queryToString.h>
|
||||||
#include <Parsers/ASTSelectQuery.h>
|
#include <Parsers/ASTSelectQuery.h>
|
||||||
#include <Databases/IDatabase.h>
|
#include <Databases/IDatabase.h>
|
||||||
|
#include <Processors/Sources/NullSource.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -51,34 +52,33 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ColumnsBlockInputStream : public IBlockInputStream
|
class ColumnsSource : public SourceWithProgress
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
ColumnsBlockInputStream(
|
ColumnsSource(
|
||||||
const std::vector<UInt8> & columns_mask_,
|
std::vector<UInt8> columns_mask_,
|
||||||
const Block & header_,
|
Block header_,
|
||||||
UInt64 max_block_size_,
|
UInt64 max_block_size_,
|
||||||
ColumnPtr databases_,
|
ColumnPtr databases_,
|
||||||
ColumnPtr tables_,
|
ColumnPtr tables_,
|
||||||
Storages storages_,
|
Storages storages_,
|
||||||
String query_id_)
|
String query_id_)
|
||||||
: columns_mask(columns_mask_), header(header_), max_block_size(max_block_size_)
|
: SourceWithProgress(header_)
|
||||||
, databases(databases_), tables(tables_), storages(std::move(storages_))
|
, columns_mask(std::move(columns_mask_)), max_block_size(max_block_size_)
|
||||||
|
, databases(std::move(databases_)), tables(std::move(tables_)), storages(std::move(storages_))
|
||||||
, query_id(std::move(query_id_)), total_tables(tables->size())
|
, query_id(std::move(query_id_)), total_tables(tables->size())
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
String getName() const override { return "Columns"; }
|
String getName() const override { return "Columns"; }
|
||||||
Block getHeader() const override { return header; }
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Block readImpl() override
|
Chunk generate() override
|
||||||
{
|
{
|
||||||
if (db_table_num >= total_tables)
|
if (db_table_num >= total_tables)
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
Block res = header;
|
MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns();
|
||||||
MutableColumns res_columns = header.cloneEmptyColumns();
|
|
||||||
size_t rows_count = 0;
|
size_t rows_count = 0;
|
||||||
|
|
||||||
while (rows_count < max_block_size && db_table_num < total_tables)
|
while (rows_count < max_block_size && db_table_num < total_tables)
|
||||||
@ -210,13 +210,11 @@ protected:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res.setColumns(std::move(res_columns));
|
return Chunk(std::move(res_columns), rows_count);
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::vector<UInt8> columns_mask;
|
std::vector<UInt8> columns_mask;
|
||||||
Block header;
|
|
||||||
UInt64 max_block_size;
|
UInt64 max_block_size;
|
||||||
ColumnPtr databases;
|
ColumnPtr databases;
|
||||||
ColumnPtr tables;
|
ColumnPtr tables;
|
||||||
@ -227,7 +225,7 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
BlockInputStreams StorageSystemColumns::read(
|
Pipes StorageSystemColumns::readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
@ -242,7 +240,7 @@ BlockInputStreams StorageSystemColumns::read(
|
|||||||
NameSet names_set(column_names.begin(), column_names.end());
|
NameSet names_set(column_names.begin(), column_names.end());
|
||||||
|
|
||||||
Block sample_block = getSampleBlock();
|
Block sample_block = getSampleBlock();
|
||||||
Block res_block;
|
Block header;
|
||||||
|
|
||||||
std::vector<UInt8> columns_mask(sample_block.columns());
|
std::vector<UInt8> columns_mask(sample_block.columns());
|
||||||
for (size_t i = 0, size = columns_mask.size(); i < size; ++i)
|
for (size_t i = 0, size = columns_mask.size(); i < size; ++i)
|
||||||
@ -250,12 +248,13 @@ BlockInputStreams StorageSystemColumns::read(
|
|||||||
if (names_set.count(sample_block.getByPosition(i).name))
|
if (names_set.count(sample_block.getByPosition(i).name))
|
||||||
{
|
{
|
||||||
columns_mask[i] = 1;
|
columns_mask[i] = 1;
|
||||||
res_block.insert(sample_block.getByPosition(i));
|
header.insert(sample_block.getByPosition(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Block block_to_filter;
|
Block block_to_filter;
|
||||||
Storages storages;
|
Storages storages;
|
||||||
|
Pipes pipes;
|
||||||
|
|
||||||
{
|
{
|
||||||
Databases databases = context.getDatabases();
|
Databases databases = context.getDatabases();
|
||||||
@ -278,7 +277,10 @@ BlockInputStreams StorageSystemColumns::read(
|
|||||||
VirtualColumnUtils::filterBlockWithQuery(query_info.query, block_to_filter, context);
|
VirtualColumnUtils::filterBlockWithQuery(query_info.query, block_to_filter, context);
|
||||||
|
|
||||||
if (!block_to_filter.rows())
|
if (!block_to_filter.rows())
|
||||||
return {std::make_shared<NullBlockInputStream>(res_block)};
|
{
|
||||||
|
pipes.emplace_back(std::make_shared<NullSource>(header));
|
||||||
|
return pipes;
|
||||||
|
}
|
||||||
|
|
||||||
ColumnPtr & database_column = block_to_filter.getByName("database").column;
|
ColumnPtr & database_column = block_to_filter.getByName("database").column;
|
||||||
size_t rows = database_column->size();
|
size_t rows = database_column->size();
|
||||||
@ -311,15 +313,20 @@ BlockInputStreams StorageSystemColumns::read(
|
|||||||
VirtualColumnUtils::filterBlockWithQuery(query_info.query, block_to_filter, context);
|
VirtualColumnUtils::filterBlockWithQuery(query_info.query, block_to_filter, context);
|
||||||
|
|
||||||
if (!block_to_filter.rows())
|
if (!block_to_filter.rows())
|
||||||
return {std::make_shared<NullBlockInputStream>(res_block)};
|
{
|
||||||
|
pipes.emplace_back(std::make_shared<NullSource>(header));
|
||||||
|
return pipes;
|
||||||
|
}
|
||||||
|
|
||||||
ColumnPtr filtered_database_column = block_to_filter.getByName("database").column;
|
ColumnPtr filtered_database_column = block_to_filter.getByName("database").column;
|
||||||
ColumnPtr filtered_table_column = block_to_filter.getByName("table").column;
|
ColumnPtr filtered_table_column = block_to_filter.getByName("table").column;
|
||||||
|
|
||||||
return {std::make_shared<ColumnsBlockInputStream>(
|
pipes.emplace_back(std::make_shared<ColumnsSource>(
|
||||||
std::move(columns_mask), std::move(res_block), max_block_size,
|
std::move(columns_mask), std::move(header), max_block_size,
|
||||||
std::move(filtered_database_column), std::move(filtered_table_column), std::move(storages),
|
std::move(filtered_database_column), std::move(filtered_table_column), std::move(storages),
|
||||||
context.getCurrentQueryId())};
|
context.getCurrentQueryId()));
|
||||||
|
|
||||||
|
return pipes;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ class StorageSystemColumns : public ext::shared_ptr_helper<StorageSystemColumns>
|
|||||||
public:
|
public:
|
||||||
std::string getName() const override { return "SystemColumns"; }
|
std::string getName() const override { return "SystemColumns"; }
|
||||||
|
|
||||||
BlockInputStreams read(
|
Pipes readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <ext/shared_ptr_helper.h>
|
#include <ext/shared_ptr_helper.h>
|
||||||
#include <Storages/IStorage.h>
|
#include <Storages/IStorage.h>
|
||||||
#include <Storages/System/StorageSystemPartsBase.h>
|
#include <Storages/System/StorageSystemPartsBase.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -42,7 +43,7 @@ protected:
|
|||||||
}});
|
}});
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInputStreams read(
|
Pipes readWithProcessors(
|
||||||
const Names & /* column_names */,
|
const Names & /* column_names */,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
@ -74,8 +75,12 @@ protected:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return BlockInputStreams(1, std::make_shared<OneBlockInputStream>(
|
UInt64 num_rows = new_columns.at(0)->size();
|
||||||
block.cloneWithColumns(std::move(new_columns))));
|
Chunk chunk(std::move(new_columns), num_rows);
|
||||||
|
|
||||||
|
Pipes pipes;
|
||||||
|
pipes.emplace_back(std::make_shared<SourceFromSingleChunk>(std::move(block), std::move(chunk)));
|
||||||
|
return pipes;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include <DataStreams/OneBlockInputStream.h>
|
#include <DataStreams/OneBlockInputStream.h>
|
||||||
#include <Storages/System/StorageSystemDisks.h>
|
#include <Storages/System/StorageSystemDisks.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -22,7 +23,7 @@ StorageSystemDisks::StorageSystemDisks(const std::string & name_)
|
|||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInputStreams StorageSystemDisks::read(
|
Pipes StorageSystemDisks::readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & /*query_info*/,
|
const SelectQueryInfo & /*query_info*/,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
@ -49,15 +50,20 @@ BlockInputStreams StorageSystemDisks::read(
|
|||||||
col_keep->insert(disk_ptr->getKeepingFreeSpace());
|
col_keep->insert(disk_ptr->getKeepingFreeSpace());
|
||||||
}
|
}
|
||||||
|
|
||||||
Block res = getSampleBlock().cloneEmpty();
|
Columns res_columns;
|
||||||
size_t col_num = 0;
|
res_columns.emplace_back(std::move(col_name));
|
||||||
res.getByPosition(col_num++).column = std::move(col_name);
|
res_columns.emplace_back(std::move(col_path));
|
||||||
res.getByPosition(col_num++).column = std::move(col_path);
|
res_columns.emplace_back(std::move(col_free));
|
||||||
res.getByPosition(col_num++).column = std::move(col_free);
|
res_columns.emplace_back(std::move(col_total));
|
||||||
res.getByPosition(col_num++).column = std::move(col_total);
|
res_columns.emplace_back(std::move(col_keep));
|
||||||
res.getByPosition(col_num++).column = std::move(col_keep);
|
|
||||||
|
|
||||||
return BlockInputStreams(1, std::make_shared<OneBlockInputStream>(res));
|
UInt64 num_rows = res_columns.at(0)->size();
|
||||||
|
Chunk chunk(std::move(res_columns), num_rows);
|
||||||
|
|
||||||
|
Pipes pipes;
|
||||||
|
pipes.emplace_back(std::make_shared<SourceFromSingleChunk>(getSampleBlock(), std::move(chunk)));
|
||||||
|
|
||||||
|
return pipes;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ class StorageSystemDisks : public ext::shared_ptr_helper<StorageSystemDisks>, pu
|
|||||||
public:
|
public:
|
||||||
std::string getName() const override { return "SystemDisks"; }
|
std::string getName() const override { return "SystemDisks"; }
|
||||||
|
|
||||||
BlockInputStreams read(
|
Pipes readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
|
@ -2,8 +2,9 @@
|
|||||||
|
|
||||||
#include <Columns/ColumnsNumber.h>
|
#include <Columns/ColumnsNumber.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataStreams/OneBlockInputStream.h>
|
|
||||||
#include <Storages/System/StorageSystemOne.h>
|
#include <Storages/System/StorageSystemOne.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
#include <Processors/Pipe.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -17,7 +18,7 @@ StorageSystemOne::StorageSystemOne(const std::string & name_)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BlockInputStreams StorageSystemOne::read(
|
Pipes StorageSystemOne::readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo &,
|
const SelectQueryInfo &,
|
||||||
const Context & /*context*/,
|
const Context & /*context*/,
|
||||||
@ -27,11 +28,18 @@ BlockInputStreams StorageSystemOne::read(
|
|||||||
{
|
{
|
||||||
check(column_names);
|
check(column_names);
|
||||||
|
|
||||||
return BlockInputStreams(1, std::make_shared<OneBlockInputStream>(
|
Block header{ColumnWithTypeAndName(
|
||||||
Block{ColumnWithTypeAndName(
|
DataTypeUInt8().createColumn(),
|
||||||
DataTypeUInt8().createColumnConst(1, 0u)->convertToFullColumnIfConst(),
|
|
||||||
std::make_shared<DataTypeUInt8>(),
|
std::make_shared<DataTypeUInt8>(),
|
||||||
"dummy")}));
|
"dummy")};
|
||||||
|
|
||||||
|
auto column = DataTypeUInt8().createColumnConst(1, 0u)->convertToFullColumnIfConst();
|
||||||
|
Chunk chunk({ std::move(column) }, 1);
|
||||||
|
|
||||||
|
Pipes pipes;
|
||||||
|
pipes.emplace_back(std::make_shared<SourceFromSingleChunk>(std::move(header), std::move(chunk)));
|
||||||
|
|
||||||
|
return pipes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ class StorageSystemOne : public ext::shared_ptr_helper<StorageSystemOne>, public
|
|||||||
public:
|
public:
|
||||||
std::string getName() const override { return "SystemOne"; }
|
std::string getName() const override { return "SystemOne"; }
|
||||||
|
|
||||||
BlockInputStreams read(
|
Pipes readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include <Databases/IDatabase.h>
|
#include <Databases/IDatabase.h>
|
||||||
#include <Parsers/queryToString.h>
|
#include <Parsers/queryToString.h>
|
||||||
#include <Parsers/ASTIdentifier.h>
|
#include <Parsers/ASTIdentifier.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -210,7 +211,7 @@ StoragesInfo StoragesInfoStream::next()
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInputStreams StorageSystemPartsBase::read(
|
Pipes StorageSystemPartsBase::readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
@ -233,11 +234,17 @@ BlockInputStreams StorageSystemPartsBase::read(
|
|||||||
processNextStorage(res_columns, info, has_state_column);
|
processNextStorage(res_columns, info, has_state_column);
|
||||||
}
|
}
|
||||||
|
|
||||||
Block block = getSampleBlock();
|
Block header = getSampleBlock();
|
||||||
if (has_state_column)
|
if (has_state_column)
|
||||||
block.insert(ColumnWithTypeAndName(std::make_shared<DataTypeString>(), "_state"));
|
header.insert(ColumnWithTypeAndName(std::make_shared<DataTypeString>(), "_state"));
|
||||||
|
|
||||||
return BlockInputStreams(1, std::make_shared<OneBlockInputStream>(block.cloneWithColumns(std::move(res_columns))));
|
UInt64 num_rows = res_columns.at(0)->size();
|
||||||
|
Chunk chunk(std::move(res_columns), num_rows);
|
||||||
|
|
||||||
|
Pipes pipes;
|
||||||
|
pipes.emplace_back(std::make_shared<SourceFromSingleChunk>(std::move(header), std::move(chunk)));
|
||||||
|
|
||||||
|
return pipes;
|
||||||
}
|
}
|
||||||
|
|
||||||
NameAndTypePair StorageSystemPartsBase::getColumn(const String & column_name) const
|
NameAndTypePair StorageSystemPartsBase::getColumn(const String & column_name) const
|
||||||
|
@ -57,7 +57,7 @@ public:
|
|||||||
|
|
||||||
bool hasColumn(const String & column_name) const override;
|
bool hasColumn(const String & column_name) const override;
|
||||||
|
|
||||||
BlockInputStreams read(
|
Pipes readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <Storages/VirtualColumnUtils.h>
|
#include <Storages/VirtualColumnUtils.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <Databases/IDatabase.h>
|
#include <Databases/IDatabase.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -52,7 +53,7 @@ StorageSystemReplicas::StorageSystemReplicas(const std::string & name_)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BlockInputStreams StorageSystemReplicas::read(
|
Pipes StorageSystemReplicas::readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
@ -122,7 +123,7 @@ BlockInputStreams StorageSystemReplicas::read(
|
|||||||
VirtualColumnUtils::filterBlockWithQuery(query_info.query, filtered_block, context);
|
VirtualColumnUtils::filterBlockWithQuery(query_info.query, filtered_block, context);
|
||||||
|
|
||||||
if (!filtered_block.rows())
|
if (!filtered_block.rows())
|
||||||
return BlockInputStreams();
|
return Pipes();
|
||||||
|
|
||||||
col_database = filtered_block.getByName("database").column;
|
col_database = filtered_block.getByName("database").column;
|
||||||
col_table = filtered_block.getByName("table").column;
|
col_table = filtered_block.getByName("table").column;
|
||||||
@ -169,19 +170,24 @@ BlockInputStreams StorageSystemReplicas::read(
|
|||||||
res_columns[col_num++]->insert(status.active_replicas);
|
res_columns[col_num++]->insert(status.active_replicas);
|
||||||
}
|
}
|
||||||
|
|
||||||
Block res = getSampleBlock().cloneEmpty();
|
Block header = getSampleBlock();
|
||||||
size_t col_num = 0;
|
|
||||||
res.getByPosition(col_num++).column = col_database;
|
|
||||||
res.getByPosition(col_num++).column = col_table;
|
|
||||||
res.getByPosition(col_num++).column = col_engine;
|
|
||||||
size_t num_columns = res.columns();
|
|
||||||
while (col_num < num_columns)
|
|
||||||
{
|
|
||||||
res.getByPosition(col_num).column = std::move(res_columns[col_num]);
|
|
||||||
++col_num;
|
|
||||||
}
|
|
||||||
|
|
||||||
return BlockInputStreams(1, std::make_shared<OneBlockInputStream>(res));
|
Columns fin_columns;
|
||||||
|
fin_columns.reserve(res_columns.size());
|
||||||
|
|
||||||
|
for (auto & col : res_columns)
|
||||||
|
fin_columns.emplace_back(std::move(col));
|
||||||
|
|
||||||
|
fin_columns[0] = std::move(col_database);
|
||||||
|
fin_columns[1] = std::move(col_table);
|
||||||
|
fin_columns[2] = std::move(col_engine);
|
||||||
|
|
||||||
|
UInt64 num_rows = fin_columns.at(0)->size();
|
||||||
|
Chunk chunk(std::move(fin_columns), num_rows);
|
||||||
|
|
||||||
|
Pipes pipes;
|
||||||
|
pipes.emplace_back(std::make_shared<SourceFromSingleChunk>(getSampleBlock(), std::move(chunk)));
|
||||||
|
return pipes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ class StorageSystemReplicas : public ext::shared_ptr_helper<StorageSystemReplica
|
|||||||
public:
|
public:
|
||||||
std::string getName() const override { return "SystemReplicas"; }
|
std::string getName() const override { return "SystemReplicas"; }
|
||||||
|
|
||||||
BlockInputStreams read(
|
Pipes readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#include <DataStreams/OneBlockInputStream.h>
|
#include <DataStreams/OneBlockInputStream.h>
|
||||||
#include <Storages/System/StorageSystemStoragePolicies.h>
|
#include <Storages/System/StorageSystemStoragePolicies.h>
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -26,7 +27,7 @@ StorageSystemStoragePolicies::StorageSystemStoragePolicies(const std::string & n
|
|||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInputStreams StorageSystemStoragePolicies::read(
|
Pipes StorageSystemStoragePolicies::readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & /*query_info*/,
|
const SelectQueryInfo & /*query_info*/,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
@ -63,18 +64,21 @@ BlockInputStreams StorageSystemStoragePolicies::read(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Columns res_columns;
|
||||||
|
res_columns.emplace_back(std::move(col_policy_name));
|
||||||
|
res_columns.emplace_back(std::move(col_volume_name));
|
||||||
|
res_columns.emplace_back(std::move(col_priority));
|
||||||
|
res_columns.emplace_back(std::move(col_disks));
|
||||||
|
res_columns.emplace_back(std::move(col_max_part_size));
|
||||||
|
res_columns.emplace_back(std::move(col_move_factor));
|
||||||
|
|
||||||
Block res = getSampleBlock().cloneEmpty();
|
UInt64 num_rows = res_columns.at(0)->size();
|
||||||
|
Chunk chunk(std::move(res_columns), num_rows);
|
||||||
|
|
||||||
size_t col_num = 0;
|
Pipes pipes;
|
||||||
res.getByPosition(col_num++).column = std::move(col_policy_name);
|
pipes.emplace_back(std::make_shared<SourceFromSingleChunk>(getSampleBlock(), std::move(chunk)));
|
||||||
res.getByPosition(col_num++).column = std::move(col_volume_name);
|
|
||||||
res.getByPosition(col_num++).column = std::move(col_priority);
|
|
||||||
res.getByPosition(col_num++).column = std::move(col_disks);
|
|
||||||
res.getByPosition(col_num++).column = std::move(col_max_part_size);
|
|
||||||
res.getByPosition(col_num++).column = std::move(col_move_factor);
|
|
||||||
|
|
||||||
return BlockInputStreams(1, std::make_shared<OneBlockInputStream>(res));
|
return pipes;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ class StorageSystemStoragePolicies : public ext::shared_ptr_helper<StorageSystem
|
|||||||
public:
|
public:
|
||||||
std::string getName() const override { return "SystemStoragePolicies"; }
|
std::string getName() const override { return "SystemStoragePolicies"; }
|
||||||
|
|
||||||
BlockInputStreams read(
|
Pipes readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <Disks/DiskSpaceMonitor.h>
|
#include <Disks/DiskSpaceMonitor.h>
|
||||||
|
#include <Processors/Sources/SourceWithProgress.h>
|
||||||
|
#include <Processors/Pipe.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -78,32 +80,30 @@ static bool needLockStructure(const DatabasePtr & database, const Block & header
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
class TablesBlockInputStream : public IBlockInputStream
|
class TablesBlockSource : public SourceWithProgress
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
TablesBlockInputStream(
|
TablesBlockSource(
|
||||||
std::vector<UInt8> columns_mask_,
|
std::vector<UInt8> columns_mask_,
|
||||||
Block header_,
|
Block header,
|
||||||
UInt64 max_block_size_,
|
UInt64 max_block_size_,
|
||||||
ColumnPtr databases_,
|
ColumnPtr databases_,
|
||||||
const Context & context_)
|
const Context & context_)
|
||||||
: columns_mask(std::move(columns_mask_))
|
: SourceWithProgress(std::move(header))
|
||||||
, header(std::move(header_))
|
, columns_mask(std::move(columns_mask_))
|
||||||
, max_block_size(max_block_size_)
|
, max_block_size(max_block_size_)
|
||||||
, databases(std::move(databases_))
|
, databases(std::move(databases_))
|
||||||
, context(context_) {}
|
, context(context_) {}
|
||||||
|
|
||||||
String getName() const override { return "Tables"; }
|
String getName() const override { return "Tables"; }
|
||||||
Block getHeader() const override { return header; }
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Block readImpl() override
|
Chunk generate() override
|
||||||
{
|
{
|
||||||
if (done)
|
if (done)
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
Block res = header;
|
MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns();
|
||||||
MutableColumns res_columns = header.cloneEmptyColumns();
|
|
||||||
|
|
||||||
size_t rows_count = 0;
|
size_t rows_count = 0;
|
||||||
while (rows_count < max_block_size)
|
while (rows_count < max_block_size)
|
||||||
@ -188,15 +188,15 @@ protected:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res.setColumns(std::move(res_columns));
|
UInt64 num_rows = res_columns.at(0)->size();
|
||||||
done = true;
|
done = true;
|
||||||
return res;
|
return Chunk(std::move(res_columns), num_rows);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!tables_it || !tables_it->isValid())
|
if (!tables_it || !tables_it->isValid())
|
||||||
tables_it = database->getTablesWithDictionaryTablesIterator(context);
|
tables_it = database->getTablesWithDictionaryTablesIterator(context);
|
||||||
|
|
||||||
const bool need_lock_structure = needLockStructure(database, header);
|
const bool need_lock_structure = needLockStructure(database, getPort().getHeader());
|
||||||
|
|
||||||
for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next())
|
for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next())
|
||||||
{
|
{
|
||||||
@ -368,12 +368,11 @@ protected:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res.setColumns(std::move(res_columns));
|
UInt64 num_rows = res_columns.at(0)->size();
|
||||||
return res;
|
return Chunk(std::move(res_columns), num_rows);
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
std::vector<UInt8> columns_mask;
|
std::vector<UInt8> columns_mask;
|
||||||
Block header;
|
|
||||||
UInt64 max_block_size;
|
UInt64 max_block_size;
|
||||||
ColumnPtr databases;
|
ColumnPtr databases;
|
||||||
size_t database_idx = 0;
|
size_t database_idx = 0;
|
||||||
@ -385,7 +384,7 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
BlockInputStreams StorageSystemTables::read(
|
Pipes StorageSystemTables::readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
@ -413,8 +412,12 @@ BlockInputStreams StorageSystemTables::read(
|
|||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr filtered_databases_column = getFilteredDatabases(query_info.query, context);
|
ColumnPtr filtered_databases_column = getFilteredDatabases(query_info.query, context);
|
||||||
return {std::make_shared<TablesBlockInputStream>(
|
|
||||||
std::move(columns_mask), std::move(res_block), max_block_size, std::move(filtered_databases_column), context)};
|
Pipes pipes;
|
||||||
|
pipes.emplace_back(std::make_shared<TablesBlockSource>(
|
||||||
|
std::move(columns_mask), std::move(res_block), max_block_size, std::move(filtered_databases_column), context));
|
||||||
|
|
||||||
|
return pipes;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ class StorageSystemTables : public ext::shared_ptr_helper<StorageSystemTables>,
|
|||||||
public:
|
public:
|
||||||
std::string getName() const override { return "SystemTables"; }
|
std::string getName() const override { return "SystemTables"; }
|
||||||
|
|
||||||
BlockInputStreams read(
|
Pipes readWithProcessors(
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const SelectQueryInfo & query_info,
|
const SelectQueryInfo & query_info,
|
||||||
const Context & context,
|
const Context & context,
|
||||||
|
@ -2,7 +2,8 @@
|
|||||||
<yandex>
|
<yandex>
|
||||||
<profiles>
|
<profiles>
|
||||||
<default>
|
<default>
|
||||||
<stream_poll_timeout_ms>30000</stream_poll_timeout_ms>
|
<!--stream_poll_timeout_ms>1</stream_poll_timeout_ms>
|
||||||
|
<stream_flush_interval_ms>100</stream_flush_interval_ms-->
|
||||||
</default>
|
</default>
|
||||||
</profiles>
|
</profiles>
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@ import json
|
|||||||
import subprocess
|
import subprocess
|
||||||
import kafka.errors
|
import kafka.errors
|
||||||
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer
|
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer
|
||||||
|
from kafka.admin import NewTopic
|
||||||
from google.protobuf.internal.encoder import _VarintBytes
|
from google.protobuf.internal.encoder import _VarintBytes
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -70,7 +71,7 @@ def kafka_produce(topic, messages, timestamp=None):
|
|||||||
for message in messages:
|
for message in messages:
|
||||||
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
|
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
|
||||||
producer.flush()
|
producer.flush()
|
||||||
print ("Produced {} messages for topic {}".format(len(messages), topic))
|
# print ("Produced {} messages for topic {}".format(len(messages), topic))
|
||||||
|
|
||||||
|
|
||||||
def kafka_consume(topic):
|
def kafka_consume(topic):
|
||||||
@ -132,7 +133,7 @@ def kafka_setup_teardown():
|
|||||||
wait_kafka_is_available()
|
wait_kafka_is_available()
|
||||||
print("kafka is available - running test")
|
print("kafka is available - running test")
|
||||||
yield # run test
|
yield # run test
|
||||||
instance.query('DROP TABLE test.kafka')
|
instance.query('DROP TABLE IF EXISTS test.kafka')
|
||||||
|
|
||||||
|
|
||||||
# Tests
|
# Tests
|
||||||
@ -727,6 +728,337 @@ def test_kafka_commit_on_block_write(kafka_cluster):
|
|||||||
assert result == 1, 'Messages from kafka get duplicated!'
|
assert result == 1, 'Messages from kafka get duplicated!'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.timeout(180)
|
||||||
|
def test_kafka_virtual_columns2(kafka_cluster):
|
||||||
|
|
||||||
|
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
|
||||||
|
topic_list = []
|
||||||
|
topic_list.append(NewTopic(name="virt2_0", num_partitions=2, replication_factor=1))
|
||||||
|
topic_list.append(NewTopic(name="virt2_1", num_partitions=2, replication_factor=1))
|
||||||
|
|
||||||
|
admin_client.create_topics(new_topics=topic_list, validate_only=False)
|
||||||
|
|
||||||
|
instance.query('''
|
||||||
|
CREATE TABLE test.kafka (value UInt64)
|
||||||
|
ENGINE = Kafka
|
||||||
|
SETTINGS kafka_broker_list = 'kafka1:19092',
|
||||||
|
kafka_topic_list = 'virt2_0,virt2_1',
|
||||||
|
kafka_group_name = 'virt2',
|
||||||
|
kafka_format = 'JSONEachRow';
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW test.view Engine=Log AS
|
||||||
|
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
|
||||||
|
''')
|
||||||
|
|
||||||
|
producer = KafkaProducer(bootstrap_servers="localhost:9092")
|
||||||
|
|
||||||
|
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801000)
|
||||||
|
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802000)
|
||||||
|
producer.flush()
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803000)
|
||||||
|
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804000)
|
||||||
|
producer.flush()
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805000)
|
||||||
|
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806000)
|
||||||
|
producer.flush()
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807000)
|
||||||
|
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808000)
|
||||||
|
producer.flush()
|
||||||
|
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
|
||||||
|
|
||||||
|
expected = '''\
|
||||||
|
1 k1 virt2_0 0 0 1577836801
|
||||||
|
2 k2 virt2_0 0 1 1577836802
|
||||||
|
3 k3 virt2_0 1 0 1577836803
|
||||||
|
4 k4 virt2_0 1 1 1577836804
|
||||||
|
5 k5 virt2_1 0 0 1577836805
|
||||||
|
6 k6 virt2_1 0 1 1577836806
|
||||||
|
7 k7 virt2_1 1 0 1577836807
|
||||||
|
8 k8 virt2_1 1 1 1577836808
|
||||||
|
'''
|
||||||
|
|
||||||
|
assert TSV(result) == TSV(expected)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.timeout(600)
|
||||||
|
def test_kafka_flush_by_time(kafka_cluster):
|
||||||
|
instance.query('''
|
||||||
|
DROP TABLE IF EXISTS test.view;
|
||||||
|
DROP TABLE IF EXISTS test.consumer;
|
||||||
|
|
||||||
|
CREATE TABLE test.kafka (key UInt64, value UInt64)
|
||||||
|
ENGINE = Kafka
|
||||||
|
SETTINGS kafka_broker_list = 'kafka1:19092',
|
||||||
|
kafka_topic_list = 'flush_by_time',
|
||||||
|
kafka_group_name = 'flush_by_time',
|
||||||
|
kafka_format = 'JSONEachRow',
|
||||||
|
kafka_max_block_size = 100,
|
||||||
|
kafka_row_delimiter = '\\n';
|
||||||
|
|
||||||
|
CREATE TABLE test.view (key UInt64, value UInt64)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
ORDER BY key;
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
|
||||||
|
SELECT * FROM test.kafka;
|
||||||
|
''')
|
||||||
|
|
||||||
|
cancel = threading.Event()
|
||||||
|
|
||||||
|
def produce():
|
||||||
|
while not cancel.is_set():
|
||||||
|
messages = []
|
||||||
|
messages.append(json.dumps({'key': 0, 'value': 0}))
|
||||||
|
kafka_produce('flush_by_time', messages)
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
kafka_thread = threading.Thread(target=produce)
|
||||||
|
kafka_thread.start()
|
||||||
|
|
||||||
|
time.sleep(18)
|
||||||
|
|
||||||
|
result = instance.query('SELECT count() FROM test.view')
|
||||||
|
|
||||||
|
print(result)
|
||||||
|
cancel.set()
|
||||||
|
kafka_thread.join()
|
||||||
|
|
||||||
|
# kafka_cluster.open_bash_shell('instance')
|
||||||
|
|
||||||
|
instance.query('''
|
||||||
|
DROP TABLE test.consumer;
|
||||||
|
DROP TABLE test.view;
|
||||||
|
''')
|
||||||
|
|
||||||
|
# 40 = 2 flushes (7.5 sec), 15 polls each, about 1 mgs per 1.5 sec
|
||||||
|
assert int(result) > 12, 'Messages from kafka should be flushed at least every stream_flush_interval_ms!'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.timeout(600)
|
||||||
|
def test_kafka_flush_by_block_size(kafka_cluster):
|
||||||
|
instance.query('''
|
||||||
|
DROP TABLE IF EXISTS test.view;
|
||||||
|
DROP TABLE IF EXISTS test.consumer;
|
||||||
|
|
||||||
|
CREATE TABLE test.kafka (key UInt64, value UInt64)
|
||||||
|
ENGINE = Kafka
|
||||||
|
SETTINGS kafka_broker_list = 'kafka1:19092',
|
||||||
|
kafka_topic_list = 'flush_by_block_size',
|
||||||
|
kafka_group_name = 'flush_by_block_size',
|
||||||
|
kafka_format = 'JSONEachRow',
|
||||||
|
kafka_max_block_size = 100,
|
||||||
|
kafka_row_delimiter = '\\n';
|
||||||
|
|
||||||
|
SELECT * FROM test.kafka;
|
||||||
|
|
||||||
|
CREATE TABLE test.view (key UInt64, value UInt64)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
ORDER BY key;
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
|
||||||
|
SELECT * FROM test.kafka;
|
||||||
|
''')
|
||||||
|
|
||||||
|
messages = []
|
||||||
|
for _ in range(101):
|
||||||
|
messages.append(json.dumps({'key': 0, 'value': 0}))
|
||||||
|
kafka_produce('flush_by_block_size', messages)
|
||||||
|
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
result = instance.query('SELECT count() FROM test.view')
|
||||||
|
print(result)
|
||||||
|
|
||||||
|
# kafka_cluster.open_bash_shell('instance')
|
||||||
|
|
||||||
|
instance.query('''
|
||||||
|
DROP TABLE test.consumer;
|
||||||
|
DROP TABLE test.view;
|
||||||
|
''')
|
||||||
|
|
||||||
|
# 100 = first poll should return 100 messages (and rows)
|
||||||
|
# not waiting for stream_flush_interval_ms
|
||||||
|
assert int(result) == 100, 'Messages from kafka should be flushed at least every stream_flush_interval_ms!'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.timeout(600)
|
||||||
|
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
|
||||||
|
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
|
||||||
|
|
||||||
|
topic_list = []
|
||||||
|
topic_list.append(NewTopic(name="topic_with_multiple_partitions2", num_partitions=10, replication_factor=1))
|
||||||
|
admin_client.create_topics(new_topics=topic_list, validate_only=False)
|
||||||
|
|
||||||
|
instance.query('''
|
||||||
|
DROP TABLE IF EXISTS test.view;
|
||||||
|
DROP TABLE IF EXISTS test.consumer;
|
||||||
|
CREATE TABLE test.kafka (key UInt64, value UInt64)
|
||||||
|
ENGINE = Kafka
|
||||||
|
SETTINGS kafka_broker_list = 'kafka1:19092',
|
||||||
|
kafka_topic_list = 'topic_with_multiple_partitions2',
|
||||||
|
kafka_group_name = 'topic_with_multiple_partitions2',
|
||||||
|
kafka_format = 'JSONEachRow',
|
||||||
|
kafka_max_block_size = 211;
|
||||||
|
CREATE TABLE test.view (key UInt64, value UInt64)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
ORDER BY key;
|
||||||
|
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
|
||||||
|
SELECT * FROM test.kafka;
|
||||||
|
''')
|
||||||
|
|
||||||
|
messages = []
|
||||||
|
count = 0
|
||||||
|
for dummy_msg in range(1000):
|
||||||
|
rows = []
|
||||||
|
for dummy_row in range(random.randrange(3,10)):
|
||||||
|
count = count + 1
|
||||||
|
rows.append(json.dumps({'key': count, 'value': count}))
|
||||||
|
messages.append("\n".join(rows))
|
||||||
|
kafka_produce('topic_with_multiple_partitions2', messages)
|
||||||
|
|
||||||
|
time.sleep(30)
|
||||||
|
|
||||||
|
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
|
||||||
|
print(result)
|
||||||
|
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count) )
|
||||||
|
|
||||||
|
instance.query('''
|
||||||
|
DROP TABLE test.consumer;
|
||||||
|
DROP TABLE test.view;
|
||||||
|
''')
|
||||||
|
|
||||||
|
@pytest.mark.timeout(1200)
|
||||||
|
def test_kafka_rebalance(kafka_cluster):
|
||||||
|
|
||||||
|
NUMBER_OF_CONSURRENT_CONSUMERS=11
|
||||||
|
|
||||||
|
instance.query('''
|
||||||
|
DROP TABLE IF EXISTS test.destination;
|
||||||
|
CREATE TABLE test.destination (
|
||||||
|
key UInt64,
|
||||||
|
value UInt64,
|
||||||
|
_topic String,
|
||||||
|
_key String,
|
||||||
|
_offset UInt64,
|
||||||
|
_partition UInt64,
|
||||||
|
_timestamp Nullable(DateTime),
|
||||||
|
_consumed_by LowCardinality(String)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
ORDER BY key;
|
||||||
|
''')
|
||||||
|
|
||||||
|
# kafka_cluster.open_bash_shell('instance')
|
||||||
|
|
||||||
|
#time.sleep(2)
|
||||||
|
|
||||||
|
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
|
||||||
|
topic_list = []
|
||||||
|
topic_list.append(NewTopic(name="topic_with_multiple_partitions", num_partitions=11, replication_factor=1))
|
||||||
|
admin_client.create_topics(new_topics=topic_list, validate_only=False)
|
||||||
|
|
||||||
|
cancel = threading.Event()
|
||||||
|
|
||||||
|
msg_index = [0]
|
||||||
|
def produce():
|
||||||
|
while not cancel.is_set():
|
||||||
|
messages = []
|
||||||
|
for _ in range(59):
|
||||||
|
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
|
||||||
|
msg_index[0] += 1
|
||||||
|
kafka_produce('topic_with_multiple_partitions', messages)
|
||||||
|
|
||||||
|
kafka_thread = threading.Thread(target=produce)
|
||||||
|
kafka_thread.start()
|
||||||
|
|
||||||
|
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
|
||||||
|
table_name = 'kafka_consumer{}'.format(consumer_index)
|
||||||
|
print("Setting up {}".format(table_name))
|
||||||
|
|
||||||
|
instance.query('''
|
||||||
|
DROP TABLE IF EXISTS test.{0};
|
||||||
|
DROP TABLE IF EXISTS test.{0}_mv;
|
||||||
|
CREATE TABLE test.{0} (key UInt64, value UInt64)
|
||||||
|
ENGINE = Kafka
|
||||||
|
SETTINGS kafka_broker_list = 'kafka1:19092',
|
||||||
|
kafka_topic_list = 'topic_with_multiple_partitions',
|
||||||
|
kafka_group_name = 'rebalance_test_group',
|
||||||
|
kafka_format = 'JSONEachRow',
|
||||||
|
kafka_max_block_size = 33;
|
||||||
|
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
|
||||||
|
SELECT
|
||||||
|
key,
|
||||||
|
value,
|
||||||
|
_topic,
|
||||||
|
_key,
|
||||||
|
_offset,
|
||||||
|
_partition,
|
||||||
|
_timestamp,
|
||||||
|
'{0}' as _consumed_by
|
||||||
|
FROM test.{0};
|
||||||
|
'''.format(table_name))
|
||||||
|
# kafka_cluster.open_bash_shell('instance')
|
||||||
|
while int(instance.query("SELECT count() FROM test.destination WHERE _consumed_by='{}'".format(table_name))) == 0:
|
||||||
|
print("Waiting for test.kafka_consumer{} to start consume".format(consumer_index))
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
cancel.set()
|
||||||
|
|
||||||
|
# I leave last one working by intent (to finish consuming after all rebalances)
|
||||||
|
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS-1):
|
||||||
|
print("Dropping test.kafka_consumer{}".format(consumer_index))
|
||||||
|
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{}'.format(consumer_index))
|
||||||
|
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='kafka_consumer{}'".format(consumer_index))) == 1:
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
# print(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
|
||||||
|
# kafka_cluster.open_bash_shell('instance')
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
|
||||||
|
if messages_consumed >= msg_index[0]:
|
||||||
|
break
|
||||||
|
time.sleep(1)
|
||||||
|
print("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed,msg_index[0]))
|
||||||
|
|
||||||
|
print(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
|
||||||
|
|
||||||
|
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
|
||||||
|
# select number + 1 as key from numbers(4141) left join test.destination using (key) where test.destination.key = 0;
|
||||||
|
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
|
||||||
|
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
|
||||||
|
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
|
||||||
|
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
|
||||||
|
|
||||||
|
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
|
||||||
|
|
||||||
|
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
|
||||||
|
print("kafka_consumer{}".format(consumer_index))
|
||||||
|
table_name = 'kafka_consumer{}'.format(consumer_index)
|
||||||
|
instance.query('''
|
||||||
|
DROP TABLE IF EXISTS test.{0};
|
||||||
|
DROP TABLE IF EXISTS test.{0}_mv;
|
||||||
|
'''.format(table_name))
|
||||||
|
|
||||||
|
instance.query('''
|
||||||
|
DROP TABLE IF EXISTS test.destination;
|
||||||
|
''')
|
||||||
|
|
||||||
|
kafka_thread.join()
|
||||||
|
|
||||||
|
assert result == 1, 'Messages from kafka get duplicated!'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
cluster.start()
|
cluster.start()
|
||||||
raw_input("Cluster created, press any key to destroy...")
|
raw_input("Cluster created, press any key to destroy...")
|
||||||
|
@ -19,3 +19,5 @@
|
|||||||
(['1970-01-02'],[1])
|
(['1970-01-02'],[1])
|
||||||
(['01234567-89ab-cdef-0123-456789abcdef'],[1])
|
(['01234567-89ab-cdef-0123-456789abcdef'],[1])
|
||||||
([1.01],[1])
|
([1.01],[1])
|
||||||
|
(['a','b'],[1,2])
|
||||||
|
(['a','ab','abc'],[3,2,1])
|
||||||
|
@ -35,5 +35,5 @@ select sumMap(val, cnt) from ( SELECT [ CAST(1, 'Date') ] as val, [1] as cnt );
|
|||||||
select sumMap(val, cnt) from ( SELECT [ CAST('01234567-89ab-cdef-0123-456789abcdef', 'UUID') ] as val, [1] as cnt );
|
select sumMap(val, cnt) from ( SELECT [ CAST('01234567-89ab-cdef-0123-456789abcdef', 'UUID') ] as val, [1] as cnt );
|
||||||
select sumMap(val, cnt) from ( SELECT [ CAST(1.01, 'Decimal(10,2)') ] as val, [1] as cnt );
|
select sumMap(val, cnt) from ( SELECT [ CAST(1.01, 'Decimal(10,2)') ] as val, [1] as cnt );
|
||||||
|
|
||||||
select sumMap(val, cnt) from ( SELECT [ CAST('a', 'FixedString(1)') ] as val, [1] as cnt ); -- { serverError 43 }
|
select sumMap(val, cnt) from ( SELECT [ CAST('a', 'FixedString(1)'), CAST('b', 'FixedString(1)' ) ] as val, [1, 2] as cnt );
|
||||||
select sumMap(val, cnt) from ( SELECT [ CAST('a', 'String') ] as val, [1] as cnt ); -- { serverError 43 }
|
select sumMap(val, cnt) from ( SELECT [ CAST('abc', 'String'), CAST('ab', 'String'), CAST('a', 'String') ] as val, [1, 2, 3] as cnt );
|
||||||
|
@ -0,0 +1,2 @@
|
|||||||
|
1 name1
|
||||||
|
2 name2
|
@ -0,0 +1,6 @@
|
|||||||
|
DROP TABLE IF EXISTS json_square_brackets;
|
||||||
|
CREATE TABLE json_square_brackets (id UInt32, name String) ENGINE = Memory;
|
||||||
|
INSERT INTO json_square_brackets FORMAT JSONEachRow [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}]
|
||||||
|
|
||||||
|
SELECT * FROM json_square_brackets ORDER BY id;
|
||||||
|
DROP TABLE IF EXISTS json_square_brackets;
|
@ -0,0 +1,2 @@
|
|||||||
|
0.6363636363636362
|
||||||
|
0.6363636363636362
|
18
dbms/tests/queries/0_stateless/01072_nullable_jit.sql
Normal file
18
dbms/tests/queries/0_stateless/01072_nullable_jit.sql
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
DROP TABLE IF EXISTS foo;
|
||||||
|
|
||||||
|
CREATE TABLE foo (
|
||||||
|
id UInt32,
|
||||||
|
a Float64,
|
||||||
|
b Float64,
|
||||||
|
c Float64,
|
||||||
|
d Float64
|
||||||
|
) Engine = MergeTree()
|
||||||
|
PARTITION BY id
|
||||||
|
ORDER BY id;
|
||||||
|
|
||||||
|
INSERT INTO foo VALUES (1, 0.5, 0.2, 0.3, 0.8);
|
||||||
|
|
||||||
|
SELECT divide(sum(a) + sum(b), nullIf(sum(c) + sum(d), 0)) FROM foo SETTINGS compile_expressions = 1, min_count_to_compile = 0;
|
||||||
|
SELECT divide(sum(a) + sum(b), nullIf(sum(c) + sum(d), 0)) FROM foo SETTINGS compile_expressions = 1, min_count_to_compile = 0;
|
||||||
|
|
||||||
|
DROP TABLE foo;
|
@ -1,6 +1,6 @@
|
|||||||
# Contributing to ClickHouse Documentation
|
# Contributing to ClickHouse Documentation
|
||||||
|
|
||||||
## Why You Need to Document ClickHouse
|
## Why Do You Need to Document ClickHouse
|
||||||
|
|
||||||
The main reason is that ClickHouse is an open source project, and if you don't write the docs, nobody does. "Incomplete or Confusing Documentation" is the top complaint about open source software by the results of a [Github Open Source Survey](http://opensourcesurvey.org/2017/) of 2017. Documentation is highly valued but often overlooked. One of the most important contributions someone can make to an open source repository is a documentation update.
|
The main reason is that ClickHouse is an open source project, and if you don't write the docs, nobody does. "Incomplete or Confusing Documentation" is the top complaint about open source software by the results of a [Github Open Source Survey](http://opensourcesurvey.org/2017/) of 2017. Documentation is highly valued but often overlooked. One of the most important contributions someone can make to an open source repository is a documentation update.
|
||||||
|
|
||||||
@ -38,7 +38,7 @@ Writing the docs is extremely useful for project's users and developers, and gro
|
|||||||
|
|
||||||
The documentation contains information about all the aspects of the ClickHouse lifecycle: developing, testing, installing, operating, and using. The base language of the documentation is English. The English version is the most actual. All other languages are supported as much as they can by contributors from different countries.
|
The documentation contains information about all the aspects of the ClickHouse lifecycle: developing, testing, installing, operating, and using. The base language of the documentation is English. The English version is the most actual. All other languages are supported as much as they can by contributors from different countries.
|
||||||
|
|
||||||
At the moment, [documentation](https://clickhouse.yandex/docs) exists in English, Russian, Chinese, Japanese, and Farsi. We store the documentation besides the ClickHouse source code in the [GitHub repository](https://github.com/ClickHouse/ClickHouse/tree/master/docs).
|
At the moment, [documentation](https://clickhouse.tech/docs) exists in English, Russian, Chinese, Japanese, and Farsi. We store the documentation besides the ClickHouse source code in the [GitHub repository](https://github.com/ClickHouse/ClickHouse/tree/master/docs).
|
||||||
|
|
||||||
Each language lays in the corresponding folder. Files that are not translated from English are the symbolic links to the English ones.
|
Each language lays in the corresponding folder. Files that are not translated from English are the symbolic links to the English ones.
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ You can contribute to the documentation in many ways, for example:
|
|||||||
|
|
||||||
- Open a required file in the ClickHouse repository and edit it from the GitHub web interface.
|
- Open a required file in the ClickHouse repository and edit it from the GitHub web interface.
|
||||||
|
|
||||||
You can do it on GitHub, or on the [ClickHouse Documentation](https://clickhouse.yandex/docs/en/) site. Each page of ClickHouse Documentation site contains an "Edit this page" (🖋) element in the upper right corner. Clicking this symbol, you get to the ClickHouse docs file opened for editing.
|
You can do it on GitHub, or on the [ClickHouse Documentation](https://clickhouse.tech/docs/en/) site. Each page of ClickHouse Documentation site contains an "Edit this page" (🖋) element in the upper right corner. Clicking this symbol, you get to the ClickHouse docs file opened for editing.
|
||||||
|
|
||||||
When you are saving a file, GitHub opens a pull-request for your contribution. Add the `documentation` label to this pull request for proper automatic checks applying. If you have no permissions for adding labels, the reviewer of your PR adds it.
|
When you are saving a file, GitHub opens a pull-request for your contribution. Add the `documentation` label to this pull request for proper automatic checks applying. If you have no permissions for adding labels, the reviewer of your PR adds it.
|
||||||
|
|
||||||
@ -161,7 +161,7 @@ When writing documentation, think about people who read it. Each audience has sp
|
|||||||
|
|
||||||
ClickHouse documentation can be divided by the audience for the following parts:
|
ClickHouse documentation can be divided by the audience for the following parts:
|
||||||
|
|
||||||
- Conceptual topics in [Introduction](https://clickhouse.yandex/docs/en/), tutorials and overviews, changelog.
|
- Conceptual topics in [Introduction](https://clickhouse.tech/docs/en/), tutorials and overviews, changelog.
|
||||||
|
|
||||||
These topics are for the most common auditory. When editing text in them, use the most common terms that are comfortable for the audience with basic technical skills.
|
These topics are for the most common auditory. When editing text in them, use the most common terms that are comfortable for the audience with basic technical skills.
|
||||||
|
|
||||||
|
@ -66,4 +66,4 @@ Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/array/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/array/) <!--hide-->
|
||||||
|
@ -3,4 +3,4 @@
|
|||||||
There isn't a separate type for boolean values. They use the UInt8 type, restricted to the values 0 or 1.
|
There isn't a separate type for boolean values. They use the UInt8 type, restricted to the values 0 or 1.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/boolean/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/boolean/) <!--hide-->
|
||||||
|
@ -6,4 +6,4 @@ The minimum value is output as 0000-00-00.
|
|||||||
The date is stored without the time zone.
|
The date is stored without the time zone.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/date/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/date/) <!--hide-->
|
||||||
|
@ -87,4 +87,4 @@ SELECT toDateTime(now(), 'Europe/Moscow') AS column, toTypeName(column) AS x
|
|||||||
- [Operators for working with dates and times](../query_language/operators.md#operators-datetime)
|
- [Operators for working with dates and times](../query_language/operators.md#operators-datetime)
|
||||||
- [The `Date` data type](date.md)
|
- [The `Date` data type](date.md)
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/datetime/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/datetime/) <!--hide-->
|
||||||
|
@ -95,4 +95,4 @@ SELECT toDecimal32(1, 8) < 100
|
|||||||
DB::Exception: Can't compare.
|
DB::Exception: Can't compare.
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/decimal/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/decimal/) <!--hide-->
|
||||||
|
@ -26,16 +26,16 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from;
|
|||||||
`IPv4` domain supports custom input format as IPv4-strings:
|
`IPv4` domain supports custom input format as IPv4-strings:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.yandex', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242');
|
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242');
|
||||||
|
|
||||||
SELECT * FROM hits;
|
SELECT * FROM hits;
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```text
|
||||||
┌─url────────────────────────────────┬───────────from─┐
|
┌─url────────────────────────────────┬───────────from─┐
|
||||||
│ https://clickhouse.yandex/docs/en/ │ 116.106.34.242 │
|
│ https://clickhouse.tech/docs/en/ │ 116.106.34.242 │
|
||||||
│ https://wikipedia.org │ 116.253.40.133 │
|
│ https://wikipedia.org │ 116.253.40.133 │
|
||||||
│ https://clickhouse.yandex │ 183.247.232.58 │
|
│ https://clickhouse.tech │ 183.247.232.58 │
|
||||||
└────────────────────────────────────┴────────────────┘
|
└────────────────────────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -76,4 +76,4 @@ SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1;
|
|||||||
└──────────────────────────────────┴────────────┘
|
└──────────────────────────────────┴────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/domains/ipv4) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/domains/ipv4) <!--hide-->
|
||||||
|
@ -26,15 +26,15 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from;
|
|||||||
`IPv6` domain supports custom input as IPv6-strings:
|
`IPv6` domain supports custom input as IPv6-strings:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.yandex', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1');
|
INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1');
|
||||||
|
|
||||||
SELECT * FROM hits;
|
SELECT * FROM hits;
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```text
|
||||||
┌─url────────────────────────────────┬─from──────────────────────────┐
|
┌─url────────────────────────────────┬─from──────────────────────────┐
|
||||||
│ https://clickhouse.yandex │ 2001:44c8:129:2632:33:0:252:2 │
|
│ https://clickhouse.tech │ 2001:44c8:129:2632:33:0:252:2 │
|
||||||
│ https://clickhouse.yandex/docs/en/ │ 2a02:e980:1e::1 │
|
│ https://clickhouse.tech/docs/en/ │ 2a02:e980:1e::1 │
|
||||||
│ https://wikipedia.org │ 2a02:aa08:e000:3100::2 │
|
│ https://wikipedia.org │ 2a02:aa08:e000:3100::2 │
|
||||||
└────────────────────────────────────┴───────────────────────────────┘
|
└────────────────────────────────────┴───────────────────────────────┘
|
||||||
```
|
```
|
||||||
@ -76,4 +76,4 @@ SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1;
|
|||||||
└───────────────────────────────────────────┴─────────┘
|
└───────────────────────────────────────────┴─────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/domains/ipv6) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/domains/ipv6) <!--hide-->
|
||||||
|
@ -23,4 +23,4 @@ You can use domains anywhere corresponding base type can be used:
|
|||||||
* Can't implicitly convert string values into domain values when inserting data from another column or table.
|
* Can't implicitly convert string values into domain values when inserting data from another column or table.
|
||||||
* Domain adds no constrains on stored values.
|
* Domain adds no constrains on stored values.
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/domains/overview) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
@ -117,4 +117,4 @@ The Enum type can be changed without cost using ALTER, if only the set of values
|
|||||||
Using ALTER, it is possible to change an Enum8 to an Enum16 or vice versa, just like changing an Int8 to Int16.
|
Using ALTER, it is possible to change an Enum8 to an Enum16 or vice versa, just like changing an Int8 to Int16.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/enum/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/enum/) <!--hide-->
|
||||||
|
@ -52,4 +52,4 @@ This behavior differs from MySQL behavior for the `CHAR` type (where strings are
|
|||||||
|
|
||||||
Note that the length of the `FixedString(N)` value is constant. The [length](../query_language/functions/array_functions.md#array_functions-length) function returns `N` even if the `FixedString(N)` value is filled only with null bytes, but the [empty](../query_language/functions/string_functions.md#string_functions-empty) function returns `1` in this case.
|
Note that the length of the `FixedString(N)` value is constant. The [length](../query_language/functions/array_functions.md#array_functions-length) function returns `N` even if the `FixedString(N)` value is filled only with null bytes, but the [empty](../query_language/functions/string_functions.md#string_functions-empty) function returns `1` in this case.
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/fixedstring/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/fixedstring/) <!--hide-->
|
||||||
|
@ -70,4 +70,4 @@ SELECT 0 / 0
|
|||||||
See the rules for `NaN` sorting in the section [ORDER BY clause](../query_language/select.md).
|
See the rules for `NaN` sorting in the section [ORDER BY clause](../query_language/select.md).
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/float/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/float/) <!--hide-->
|
||||||
|
@ -5,4 +5,4 @@ ClickHouse can store various types of data in table cells.
|
|||||||
This section describes the supported data types and special considerations when using and/or implementing them, if any.
|
This section describes the supported data types and special considerations when using and/or implementing them, if any.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/) <!--hide-->
|
||||||
|
@ -18,4 +18,4 @@ Fixed-length integers, with or without a sign.
|
|||||||
- UInt64 - [0 : 18446744073709551615]
|
- UInt64 - [0 : 18446744073709551615]
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/int_uint/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/int_uint/) <!--hide-->
|
||||||
|
@ -61,4 +61,4 @@ SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP
|
|||||||
See [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) engine description.
|
See [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) engine description.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/nested_data_structures/aggregatefunction/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/nested_data_structures/aggregatefunction/) <!--hide-->
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Nested Data Structures
|
# Nested Data Structures
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/nested_data_structures/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/nested_data_structures/) <!--hide-->
|
||||||
|
@ -97,4 +97,4 @@ For a DESCRIBE query, the columns in a nested data structure are listed separate
|
|||||||
The ALTER query is very limited for elements in a nested data structure.
|
The ALTER query is very limited for elements in a nested data structure.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/nested_data_structures/nested/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/nested_data_structures/nested/) <!--hide-->
|
||||||
|
@ -33,4 +33,4 @@ SELECT x + y FROM t_null
|
|||||||
└────────────┘
|
└────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/nullable/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/nullable/) <!--hide-->
|
||||||
|
@ -3,4 +3,4 @@
|
|||||||
Used for representing lambda expressions in high-order functions.
|
Used for representing lambda expressions in high-order functions.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/special_data_types/expression/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/special_data_types/expression/) <!--hide-->
|
||||||
|
@ -3,4 +3,4 @@
|
|||||||
Special data type values can't be saved to a table or output in results, but are used as the intermediate result of running a query.
|
Special data type values can't be saved to a table or output in results, but are used as the intermediate result of running a query.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/special_data_types/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/special_data_types/) <!--hide-->
|
||||||
|
@ -17,4 +17,4 @@ SELECT toTypeName(array())
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/special_data_types/nothing/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/special_data_types/nothing/) <!--hide-->
|
||||||
|
@ -3,4 +3,4 @@
|
|||||||
Used for the right half of an IN expression.
|
Used for the right half of an IN expression.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/special_data_types/set/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/special_data_types/set/) <!--hide-->
|
||||||
|
@ -12,4 +12,4 @@ Similarly, certain functions for working with strings have separate variations t
|
|||||||
For example, the 'length' function calculates the string length in bytes, while the 'lengthUTF8' function calculates the string length in Unicode code points, assuming that the value is UTF-8 encoded.
|
For example, the 'length' function calculates the string length in bytes, while the 'lengthUTF8' function calculates the string length in Unicode code points, assuming that the value is UTF-8 encoded.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/string/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/string/) <!--hide-->
|
||||||
|
@ -42,4 +42,4 @@ SELECT tuple(1, NULL) AS x, toTypeName(x)
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/tuple/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/tuple/) <!--hide-->
|
||||||
|
@ -62,4 +62,4 @@ The UUID data type only supports functions which [String](string.md) data type a
|
|||||||
|
|
||||||
The UUID data type is not supported by arithmetic operations (for example, [abs](../query_language/functions/arithmetic_functions.md#arithm_func-abs)) or aggregate functions, such as [sum](../query_language/agg_functions/reference.md#agg_function-sum) and [avg](../query_language/agg_functions/reference.md#agg_function-avg).
|
The UUID data type is not supported by arithmetic operations (for example, [abs](../query_language/functions/arithmetic_functions.md#arithm_func-abs)) or aggregate functions, such as [sum](../query_language/agg_functions/reference.md#agg_function-sum) and [avg](../query_language/agg_functions/reference.md#agg_function-avg).
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/data_types/uuid/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/data_types/uuid/) <!--hide-->
|
||||||
|
@ -10,4 +10,4 @@ You can also use the following database engines:
|
|||||||
|
|
||||||
- [Lazy](lazy.md)
|
- [Lazy](lazy.md)
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/database_engines/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/database_engines/) <!--hide-->
|
||||||
|
@ -10,4 +10,4 @@ It's optimized for storing many small \*Log tables, for which there is a long ti
|
|||||||
CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds);
|
CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds);
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/database_engines/lazy/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/database_engines/lazy/) <!--hide-->
|
||||||
|
@ -119,4 +119,4 @@ SELECT * FROM mysql_db.mysql_table
|
|||||||
└────────┴───────┘
|
└────────┴───────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.yandex/docs/en/database_engines/mysql/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/database_engines/mysql/) <!--hide-->
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user