From fcb238715b30d1c353ef5a2e933e363c0a43c3da Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 12 Mar 2021 01:12:52 +0800 Subject: [PATCH 001/133] Support apple m1 --- base/common/wide_integer_impl.h | 4 + cmake/arch.cmake | 2 +- cmake/find/ldap.cmake | 3 +- contrib/NuRaft | 2 +- .../darwin_aarch64/include/lber_types.h | 63 + .../darwin_aarch64/include/ldap_config.h | 74 ++ .../darwin_aarch64/include/ldap_features.h | 61 + .../darwin_aarch64/include/portable.h | 1169 +++++++++++++++++ contrib/poco | 2 +- src/Common/StackTrace.cpp | 4 + 10 files changed, 1380 insertions(+), 4 deletions(-) create mode 100644 contrib/openldap-cmake/darwin_aarch64/include/lber_types.h create mode 100644 contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h create mode 100644 contrib/openldap-cmake/darwin_aarch64/include/ldap_features.h create mode 100644 contrib/openldap-cmake/darwin_aarch64/include/portable.h diff --git a/base/common/wide_integer_impl.h b/base/common/wide_integer_impl.h index a34e757eaa5..170d1c981b0 100644 --- a/base/common/wide_integer_impl.h +++ b/base/common/wide_integer_impl.h @@ -271,9 +271,13 @@ struct integer::_impl /// As to_Integral does a static_cast to int64_t, it may result in UB. /// The necessary check here is that long double has enough significant (mantissa) bits to store the /// int64_t max value precisely. + + //TODO Be compatible with Apple aarch64 +#if not (defined(__APPLE__) && defined(__aarch64__)) static_assert(LDBL_MANT_DIG >= 64, "On your system long double has less than 64 precision bits," "which may result in UB when initializing double from int64_t"); +#endif if ((rhs > 0 && rhs < max_int) || (rhs < 0 && rhs > min_int)) { diff --git a/cmake/arch.cmake b/cmake/arch.cmake index 9604ef62b31..60e0346dbbf 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -1,7 +1,7 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64") set (ARCH_AMD64 1) endif () -if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)") +if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)") set (ARCH_AARCH64 1) endif () if (ARCH_AARCH64 OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm") diff --git a/cmake/find/ldap.cmake b/cmake/find/ldap.cmake index 369c1e42e8d..7ead9be7606 100644 --- a/cmake/find/ldap.cmake +++ b/cmake/find/ldap.cmake @@ -63,7 +63,8 @@ if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY) ( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "x86_64" ) OR ( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR ( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR - ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) + ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) OR + ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "aarch64" ) ) set (_ldap_supported_platform TRUE) endif () diff --git a/contrib/NuRaft b/contrib/NuRaft index ff9049bcc8e..9a0d78de4b9 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit ff9049bcc8ea6a02276ccdc8629d764e9e5de853 +Subproject commit 9a0d78de4b90546368d954b6434f0e9a823e8d80 diff --git a/contrib/openldap-cmake/darwin_aarch64/include/lber_types.h b/contrib/openldap-cmake/darwin_aarch64/include/lber_types.h new file mode 100644 index 00000000000..dbd59430527 --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/lber_types.h @@ -0,0 +1,63 @@ +/* include/lber_types.h. Generated from lber_types.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LBER types + */ + +#ifndef _LBER_TYPES_H +#define _LBER_TYPES_H + +#include + +LDAP_BEGIN_DECL + +/* LBER boolean, enum, integers (32 bits or larger) */ +#define LBER_INT_T int + +/* LBER tags (32 bits or larger) */ +#define LBER_TAG_T long + +/* LBER socket descriptor */ +#define LBER_SOCKET_T int + +/* LBER lengths (32 bits or larger) */ +#define LBER_LEN_T long + +/* ------------------------------------------------------------ */ + +/* booleans, enumerations, and integers */ +typedef LBER_INT_T ber_int_t; + +/* signed and unsigned versions */ +typedef signed LBER_INT_T ber_sint_t; +typedef unsigned LBER_INT_T ber_uint_t; + +/* tags */ +typedef unsigned LBER_TAG_T ber_tag_t; + +/* "socket" descriptors */ +typedef LBER_SOCKET_T ber_socket_t; + +/* lengths */ +typedef unsigned LBER_LEN_T ber_len_t; + +/* signed lengths */ +typedef signed LBER_LEN_T ber_slen_t; + +LDAP_END_DECL + +#endif /* _LBER_TYPES_H */ diff --git a/contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h b/contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h new file mode 100644 index 00000000000..89f7b40b884 --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h @@ -0,0 +1,74 @@ +/* include/ldap_config.h. Generated from ldap_config.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * This file works in conjunction with OpenLDAP configure system. + * If you do no like the values below, adjust your configure options. + */ + +#ifndef _LDAP_CONFIG_H +#define _LDAP_CONFIG_H + +/* directory separator */ +#ifndef LDAP_DIRSEP +#ifndef _WIN32 +#define LDAP_DIRSEP "/" +#else +#define LDAP_DIRSEP "\\" +#endif +#endif + +/* directory for temporary files */ +#if defined(_WIN32) +# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */ +#elif defined( _P_tmpdir ) +# define LDAP_TMPDIR _P_tmpdir +#elif defined( P_tmpdir ) +# define LDAP_TMPDIR P_tmpdir +#elif defined( _PATH_TMPDIR ) +# define LDAP_TMPDIR _PATH_TMPDIR +#else +# define LDAP_TMPDIR LDAP_DIRSEP "tmp" +#endif + +/* directories */ +#ifndef LDAP_BINDIR +#define LDAP_BINDIR "/tmp/ldap-prefix/bin" +#endif +#ifndef LDAP_SBINDIR +#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin" +#endif +#ifndef LDAP_DATADIR +#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap" +#endif +#ifndef LDAP_SYSCONFDIR +#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap" +#endif +#ifndef LDAP_LIBEXECDIR +#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec" +#endif +#ifndef LDAP_MODULEDIR +#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap" +#endif +#ifndef LDAP_RUNDIR +#define LDAP_RUNDIR "/tmp/ldap-prefix/var" +#endif +#ifndef LDAP_LOCALEDIR +#define LDAP_LOCALEDIR "" +#endif + + +#endif /* _LDAP_CONFIG_H */ diff --git a/contrib/openldap-cmake/darwin_aarch64/include/ldap_features.h b/contrib/openldap-cmake/darwin_aarch64/include/ldap_features.h new file mode 100644 index 00000000000..f0cc7c3626f --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/ldap_features.h @@ -0,0 +1,61 @@ +/* include/ldap_features.h. Generated from ldap_features.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LDAP Features + */ + +#ifndef _LDAP_FEATURES_H +#define _LDAP_FEATURES_H 1 + +/* OpenLDAP API version macros */ +#define LDAP_VENDOR_VERSION 20501 +#define LDAP_VENDOR_VERSION_MAJOR 2 +#define LDAP_VENDOR_VERSION_MINOR 5 +#define LDAP_VENDOR_VERSION_PATCH X + +/* +** WORK IN PROGRESS! +** +** OpenLDAP reentrancy/thread-safeness should be dynamically +** checked using ldap_get_option(). +** +** The -lldap implementation is not thread-safe. +** +** The -lldap_r implementation is: +** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety) +** but also be: +** LDAP_API_FEATURE_SESSION_THREAD_SAFE +** LDAP_API_FEATURE_OPERATION_THREAD_SAFE +** +** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE +** can be used to determine if -lldap_r is available at compile +** time. You must define LDAP_THREAD_SAFE if and only if you +** link with -lldap_r. +** +** If you fail to define LDAP_THREAD_SAFE when linking with +** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap, +** provided header definitions and declarations may be incorrect. +** +*/ + +/* is -lldap_r available or not */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* LDAP v2 Referrals */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +#endif /* LDAP_FEATURES */ diff --git a/contrib/openldap-cmake/darwin_aarch64/include/portable.h b/contrib/openldap-cmake/darwin_aarch64/include/portable.h new file mode 100644 index 00000000000..fdf4e89017e --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/portable.h @@ -0,0 +1,1169 @@ +/* include/portable.h. Generated from portable.hin by configure. */ +/* include/portable.hin. Generated from configure.in by autoheader. */ + + +/* begin of portable.h.pre */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +#ifndef _LDAP_PORTABLE_H +#define _LDAP_PORTABLE_H + +/* define this if needed to get reentrant functions */ +#ifndef REENTRANT +#define REENTRANT 1 +#endif +#ifndef _REENTRANT +#define _REENTRANT 1 +#endif + +/* define this if needed to get threadsafe functions */ +#ifndef THREADSAFE +#define THREADSAFE 1 +#endif +#ifndef _THREADSAFE +#define _THREADSAFE 1 +#endif +#ifndef THREAD_SAFE +#define THREAD_SAFE 1 +#endif +#ifndef _THREAD_SAFE +#define _THREAD_SAFE 1 +#endif + +#ifndef _SGI_MP_SOURCE +#define _SGI_MP_SOURCE 1 +#endif + +/* end of portable.h.pre */ + + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* define to use both and */ +/* #undef BOTH_STRINGS_H */ + +/* define if cross compiling */ +/* #undef CROSS_COMPILING */ + +/* set to the number of arguments ctime_r() expects */ +#define CTIME_R_NARGS 2 + +/* define if toupper() requires islower() */ +/* #undef C_UPPER_LOWER */ + +/* define if sys_errlist is not declared in stdio.h or errno.h */ +/* #undef DECL_SYS_ERRLIST */ + +/* define to enable slapi library */ +/* #undef ENABLE_SLAPI */ + +/* defined to be the EXE extension */ +#define EXEEXT "" + +/* set to the number of arguments gethostbyaddr_r() expects */ +/* #undef GETHOSTBYADDR_R_NARGS */ + +/* set to the number of arguments gethostbyname_r() expects */ +/* #undef GETHOSTBYNAME_R_NARGS */ + +/* Define to 1 if `TIOCGWINSZ' requires . */ +/* #undef GWINSZ_IN_SYS_IOCTL */ + +/* define if you have AIX security lib */ +/* #undef HAVE_AIX_SECURITY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ASSERT_H 1 + +/* Define to 1 if you have the `bcopy' function. */ +#define HAVE_BCOPY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_BITS_TYPES_H */ + +/* Define to 1 if you have the `chroot' function. */ +#define HAVE_CHROOT 1 + +/* Define to 1 if you have the `closesocket' function. */ +/* #undef HAVE_CLOSESOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CONIO_H */ + +/* define if crypt(3) is available */ +/* #undef HAVE_CRYPT */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CRYPT_H */ + +/* define if crypt_r() is also available */ +/* #undef HAVE_CRYPT_R */ + +/* Define to 1 if you have the `ctime_r' function. */ +#define HAVE_CTIME_R 1 + +/* define if you have Cyrus SASL */ +/* #undef HAVE_CYRUS_SASL */ + +/* define if your system supports /dev/poll */ +/* #undef HAVE_DEVPOLL */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DIRECT_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* define if system uses EBCDIC instead of ASCII */ +/* #undef HAVE_EBCDIC */ + +/* Define to 1 if you have the `endgrent' function. */ +#define HAVE_ENDGRENT 1 + +/* Define to 1 if you have the `endpwent' function. */ +#define HAVE_ENDPWENT 1 + +/* define if your system supports epoll */ +/* #undef HAVE_EPOLL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the `fcntl' function. */ +#define HAVE_FCNTL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* define if you actually have FreeBSD fetch(3) */ +/* #undef HAVE_FETCH */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_FILIO_H */ + +/* Define to 1 if you have the `flock' function. */ +#define HAVE_FLOCK 1 + +/* Define to 1 if you have the `fstat' function. */ +#define HAVE_FSTAT 1 + +/* Define to 1 if you have the `gai_strerror' function. */ +#define HAVE_GAI_STRERROR 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getdtablesize' function. */ +#define HAVE_GETDTABLESIZE 1 + +/* Define to 1 if you have the `geteuid' function. */ +#define HAVE_GETEUID 1 + +/* Define to 1 if you have the `getgrgid' function. */ +#define HAVE_GETGRGID 1 + +/* Define to 1 if you have the `gethostbyaddr_r' function. */ +/* #undef HAVE_GETHOSTBYADDR_R */ + +/* Define to 1 if you have the `gethostbyname_r' function. */ +/* #undef HAVE_GETHOSTBYNAME_R */ + +/* Define to 1 if you have the `gethostname' function. */ +#define HAVE_GETHOSTNAME 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#define HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the `getopt' function. */ +#define HAVE_GETOPT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_GETOPT_H 1 + +/* Define to 1 if you have the `getpassphrase' function. */ +/* #undef HAVE_GETPASSPHRASE */ + +/* Define to 1 if you have the `getpeereid' function. */ +#define HAVE_GETPEEREID 1 + +/* Define to 1 if you have the `getpeerucred' function. */ +/* #undef HAVE_GETPEERUCRED */ + +/* Define to 1 if you have the `getpwnam' function. */ +#define HAVE_GETPWNAM 1 + +/* Define to 1 if you have the `getpwuid' function. */ +#define HAVE_GETPWUID 1 + +/* Define to 1 if you have the `getspnam' function. */ +/* #undef HAVE_GETSPNAM */ + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GMP_H */ + +/* Define to 1 if you have the `gmtime_r' function. */ +#define HAVE_GMTIME_R 1 + +/* define if you have GNUtls */ +/* #undef HAVE_GNUTLS */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GNUTLS_GNUTLS_H */ + +/* if you have GNU Pth */ +/* #undef HAVE_GNU_PTH */ + +/* Define to 1 if you have the header file. */ +#define HAVE_GRP_H 1 + +/* Define to 1 if you have the `hstrerror' function. */ +#define HAVE_HSTRERROR 1 + +/* define to you inet_aton(3) is available */ +#define HAVE_INET_ATON 1 + +/* Define to 1 if you have the `inet_ntoa_b' function. */ +/* #undef HAVE_INET_NTOA_B */ + +/* Define to 1 if you have the `inet_ntop' function. */ +#define HAVE_INET_NTOP 1 + +/* Define to 1 if you have the `initgroups' function. */ +#define HAVE_INITGROUPS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `ioctl' function. */ +#define HAVE_IOCTL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_IO_H */ + +/* define if your system supports kqueue */ +#define HAVE_KQUEUE 1 + +/* Define to 1 if you have the `gen' library (-lgen). */ +/* #undef HAVE_LIBGEN */ + +/* Define to 1 if you have the `gmp' library (-lgmp). */ +/* #undef HAVE_LIBGMP */ + +/* Define to 1 if you have the `inet' library (-linet). */ +/* #undef HAVE_LIBINET */ + +/* define if you have libtool -ltdl */ +/* #undef HAVE_LIBLTDL */ + +/* Define to 1 if you have the `net' library (-lnet). */ +/* #undef HAVE_LIBNET */ + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +/* #undef HAVE_LIBNSL */ + +/* Define to 1 if you have the `nsl_s' library (-lnsl_s). */ +/* #undef HAVE_LIBNSL_S */ + +/* Define to 1 if you have the `socket' library (-lsocket). */ +/* #undef HAVE_LIBSOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LIBUTIL_H */ + +/* Define to 1 if you have the `V3' library (-lV3). */ +/* #undef HAVE_LIBV3 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* if you have LinuxThreads */ +/* #undef HAVE_LINUX_THREADS */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Define to 1 if you have the `localtime_r' function. */ +#define HAVE_LOCALTIME_R 1 + +/* Define to 1 if you have the `lockf' function. */ +#define HAVE_LOCKF 1 + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LTDL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MALLOC_H */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `memrchr' function. */ +/* #undef HAVE_MEMRCHR */ + +/* Define to 1 if you have the `mkstemp' function. */ +#define HAVE_MKSTEMP 1 + +/* Define to 1 if you have the `mktemp' function. */ +#define HAVE_MKTEMP 1 + +/* define this if you have mkversion */ +#define HAVE_MKVERSION 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_TCP_H 1 + +/* define if strerror_r returns char* instead of int */ +/* #undef HAVE_NONPOSIX_STRERROR_R */ + +/* if you have NT Event Log */ +/* #undef HAVE_NT_EVENT_LOG */ + +/* if you have NT Service Manager */ +/* #undef HAVE_NT_SERVICE_MANAGER */ + +/* if you have NT Threads */ +/* #undef HAVE_NT_THREADS */ + +/* define if you have OpenSSL */ +#define HAVE_OPENSSL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_BN_H 1 + +/* define if you have OpenSSL with CRL checking capability */ +#define HAVE_OPENSSL_CRL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_CRYPTO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_SSL_H 1 + +/* Define to 1 if you have the `pipe' function. */ +#define HAVE_PIPE 1 + +/* Define to 1 if you have the `poll' function. */ +#define HAVE_POLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PROCESS_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PSAP_H */ + +/* define to pthreads API spec revision */ +#define HAVE_PTHREADS 10 + +/* define if you have pthread_detach function */ +#define HAVE_PTHREAD_DETACH 1 + +/* Define to 1 if you have the `pthread_getconcurrency' function. */ +#define HAVE_PTHREAD_GETCONCURRENCY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_H 1 + +/* Define to 1 if you have the `pthread_kill' function. */ +#define HAVE_PTHREAD_KILL 1 + +/* Define to 1 if you have the `pthread_kill_other_threads_np' function. */ +/* #undef HAVE_PTHREAD_KILL_OTHER_THREADS_NP */ + +/* define if you have pthread_rwlock_destroy function */ +#define HAVE_PTHREAD_RWLOCK_DESTROY 1 + +/* Define to 1 if you have the `pthread_setconcurrency' function. */ +#define HAVE_PTHREAD_SETCONCURRENCY 1 + +/* Define to 1 if you have the `pthread_yield' function. */ +/* #undef HAVE_PTHREAD_YIELD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PTH_H */ + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PWD_H 1 + +/* Define to 1 if you have the `read' function. */ +#define HAVE_READ 1 + +/* Define to 1 if you have the `recv' function. */ +#define HAVE_RECV 1 + +/* Define to 1 if you have the `recvfrom' function. */ +#define HAVE_RECVFROM 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_REGEX_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_RESOLV_H */ + +/* define if you have res_query() */ +/* #undef HAVE_RES_QUERY */ + +/* define if OpenSSL needs RSAref */ +/* #undef HAVE_RSAREF */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_SASL_H */ + +/* define if your SASL library has sasl_version() */ +/* #undef HAVE_SASL_VERSION */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SCHED_H 1 + +/* Define to 1 if you have the `sched_yield' function. */ +#define HAVE_SCHED_YIELD 1 + +/* Define to 1 if you have the `send' function. */ +#define HAVE_SEND 1 + +/* Define to 1 if you have the `sendmsg' function. */ +#define HAVE_SENDMSG 1 + +/* Define to 1 if you have the `sendto' function. */ +#define HAVE_SENDTO 1 + +/* Define to 1 if you have the `setegid' function. */ +#define HAVE_SETEGID 1 + +/* Define to 1 if you have the `seteuid' function. */ +#define HAVE_SETEUID 1 + +/* Define to 1 if you have the `setgid' function. */ +#define HAVE_SETGID 1 + +/* Define to 1 if you have the `setpwfile' function. */ +/* #undef HAVE_SETPWFILE */ + +/* Define to 1 if you have the `setsid' function. */ +#define HAVE_SETSID 1 + +/* Define to 1 if you have the `setuid' function. */ +#define HAVE_SETUID 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SGTTY_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SHADOW_H */ + +/* Define to 1 if you have the `sigaction' function. */ +#define HAVE_SIGACTION 1 + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the `sigset' function. */ +#define HAVE_SIGSET 1 + +/* define if you have -lslp */ +/* #undef HAVE_SLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SLP_H */ + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* if you have spawnlp() */ +/* #undef HAVE_SPAWNLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQLEXT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the `strerror_r' function. */ +#define HAVE_STRERROR_R 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strpbrk' function. */ +#define HAVE_STRPBRK 1 + +/* Define to 1 if you have the `strrchr' function. */ +#define HAVE_STRRCHR 1 + +/* Define to 1 if you have the `strsep' function. */ +#define HAVE_STRSEP 1 + +/* Define to 1 if you have the `strspn' function. */ +#define HAVE_STRSPN 1 + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have the `strtoq' function. */ +#define HAVE_STRTOQ 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if you have the `strtoull' function. */ +#define HAVE_STRTOULL 1 + +/* Define to 1 if you have the `strtouq' function. */ +#define HAVE_STRTOUQ 1 + +/* Define to 1 if `msg_accrightslen' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTSLEN */ + +/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_CONTROL */ + +/* Define to 1 if `pw_gecos' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_GECOS 1 + +/* Define to 1 if `pw_passwd' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_PASSWD 1 + +/* Define to 1 if `st_blksize' is a member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 + +/* Define to 1 if `st_fstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE */ + +/* define to 1 if st_fstype is char * */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_CHAR */ + +/* define to 1 if st_fstype is int */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_INT */ + +/* Define to 1 if `st_vfstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_VFSTYPE */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYNCH_H */ + +/* Define to 1 if you have the `sysconf' function. */ +#define HAVE_SYSCONF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSEXITS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DEVPOLL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_EPOLL_H */ + +/* define if you actually have sys_errlist in your libs */ +#define HAVE_SYS_ERRLIST 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_EVENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILIO_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FSTYP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_IOCTL_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_PARAM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_PRIVGRP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_RESOURCE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UCRED_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UUID_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_VMOUNT_H */ + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#define HAVE_SYS_WAIT_H 1 + +/* define if you have -lwrap */ +/* #undef HAVE_TCPD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_TCPD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_TERMIOS_H 1 + +/* if you have Solaris LWP (thr) package */ +/* #undef HAVE_THR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_THREAD_H */ + +/* Define to 1 if you have the `thr_getconcurrency' function. */ +/* #undef HAVE_THR_GETCONCURRENCY */ + +/* Define to 1 if you have the `thr_setconcurrency' function. */ +/* #undef HAVE_THR_SETCONCURRENCY */ + +/* Define to 1 if you have the `thr_yield' function. */ +/* #undef HAVE_THR_YIELD */ + +/* define if you have TLS */ +#define HAVE_TLS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UTIME_H 1 + +/* define if you have uuid_generate() */ +/* #undef HAVE_UUID_GENERATE */ + +/* define if you have uuid_to_str() */ +/* #undef HAVE_UUID_TO_STR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UUID_UUID_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `wait4' function. */ +#define HAVE_WAIT4 1 + +/* Define to 1 if you have the `waitpid' function. */ +#define HAVE_WAITPID 1 + +/* define if you have winsock */ +/* #undef HAVE_WINSOCK */ + +/* define if you have winsock2 */ +/* #undef HAVE_WINSOCK2 */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK2_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WIREDTIGER_H */ + +/* Define to 1 if you have the `write' function. */ +#define HAVE_WRITE 1 + +/* define if select implicitly yields */ +#define HAVE_YIELDING_SELECT 1 + +/* Define to 1 if you have the `_vsnprintf' function. */ +/* #undef HAVE__VSNPRINTF */ + +/* define to 32-bit or greater integer type */ +#define LBER_INT_T int + +/* define to large integer type */ +#define LBER_LEN_T long + +/* define to socket descriptor type */ +#define LBER_SOCKET_T int + +/* define to large integer type */ +#define LBER_TAG_T long + +/* define to 1 if library is thread safe */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* define to LDAP VENDOR VERSION */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +/* define this to add debugging code */ +/* #undef LDAP_DEBUG */ + +/* define if LDAP libs are dynamic */ +/* #undef LDAP_LIBS_DYNAMIC */ + +/* define to support PF_INET6 */ +#define LDAP_PF_INET6 1 + +/* define to support PF_LOCAL */ +#define LDAP_PF_LOCAL 1 + +/* define this to add SLAPI code */ +/* #undef LDAP_SLAPI */ + +/* define this to add syslog code */ +/* #undef LDAP_SYSLOG */ + +/* Version */ +#define LDAP_VENDOR_VERSION 20501 + +/* Major */ +#define LDAP_VENDOR_VERSION_MAJOR 2 + +/* Minor */ +#define LDAP_VENDOR_VERSION_MINOR 5 + +/* Patch */ +#define LDAP_VENDOR_VERSION_PATCH X + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* define if memcmp is not 8-bit clean or is otherwise broken */ +/* #undef NEED_MEMCMP_REPLACEMENT */ + +/* define if you have (or want) no threads */ +/* #undef NO_THREADS */ + +/* define to use the original debug style */ +/* #undef OLD_DEBUG */ + +/* Package */ +#define OPENLDAP_PACKAGE "OpenLDAP" + +/* Version */ +#define OPENLDAP_VERSION "2.5.X" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* define if sched_yield yields the entire process */ +/* #undef REPLACE_BROKEN_YIELD */ + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to the type of arg 1 for `select'. */ +#define SELECT_TYPE_ARG1 int + +/* Define to the type of args 2, 3 and 4 for `select'. */ +#define SELECT_TYPE_ARG234 (fd_set *) + +/* Define to the type of arg 5 for `select'. */ +#define SELECT_TYPE_ARG5 (struct timeval *) + +/* The size of `int', as computed by sizeof. */ +#define SIZEOF_INT 4 + +/* The size of `long', as computed by sizeof. */ +#define SIZEOF_LONG 8 + +/* The size of `long long', as computed by sizeof. */ +#define SIZEOF_LONG_LONG 8 + +/* The size of `short', as computed by sizeof. */ +#define SIZEOF_SHORT 2 + +/* The size of `wchar_t', as computed by sizeof. */ +#define SIZEOF_WCHAR_T 4 + +/* define to support per-object ACIs */ +/* #undef SLAPD_ACI_ENABLED */ + +/* define to support LDAP Async Metadirectory backend */ +/* #undef SLAPD_ASYNCMETA */ + +/* define to support cleartext passwords */ +/* #undef SLAPD_CLEARTEXT */ + +/* define to support crypt(3) passwords */ +/* #undef SLAPD_CRYPT */ + +/* define to support DNS SRV backend */ +/* #undef SLAPD_DNSSRV */ + +/* define to support LDAP backend */ +/* #undef SLAPD_LDAP */ + +/* define to support MDB backend */ +/* #undef SLAPD_MDB */ + +/* define to support LDAP Metadirectory backend */ +/* #undef SLAPD_META */ + +/* define to support modules */ +/* #undef SLAPD_MODULES */ + +/* dynamically linked module */ +#define SLAPD_MOD_DYNAMIC 2 + +/* statically linked module */ +#define SLAPD_MOD_STATIC 1 + +/* define to support cn=Monitor backend */ +/* #undef SLAPD_MONITOR */ + +/* define to support NDB backend */ +/* #undef SLAPD_NDB */ + +/* define to support NULL backend */ +/* #undef SLAPD_NULL */ + +/* define for In-Directory Access Logging overlay */ +/* #undef SLAPD_OVER_ACCESSLOG */ + +/* define for Audit Logging overlay */ +/* #undef SLAPD_OVER_AUDITLOG */ + +/* define for Automatic Certificate Authority overlay */ +/* #undef SLAPD_OVER_AUTOCA */ + +/* define for Collect overlay */ +/* #undef SLAPD_OVER_COLLECT */ + +/* define for Attribute Constraint overlay */ +/* #undef SLAPD_OVER_CONSTRAINT */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DDS */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DEREF */ + +/* define for Dynamic Group overlay */ +/* #undef SLAPD_OVER_DYNGROUP */ + +/* define for Dynamic List overlay */ +/* #undef SLAPD_OVER_DYNLIST */ + +/* define for Reverse Group Membership overlay */ +/* #undef SLAPD_OVER_MEMBEROF */ + +/* define for Password Policy overlay */ +/* #undef SLAPD_OVER_PPOLICY */ + +/* define for Proxy Cache overlay */ +/* #undef SLAPD_OVER_PROXYCACHE */ + +/* define for Referential Integrity overlay */ +/* #undef SLAPD_OVER_REFINT */ + +/* define for Return Code overlay */ +/* #undef SLAPD_OVER_RETCODE */ + +/* define for Rewrite/Remap overlay */ +/* #undef SLAPD_OVER_RWM */ + +/* define for Sequential Modify overlay */ +/* #undef SLAPD_OVER_SEQMOD */ + +/* define for ServerSideSort/VLV overlay */ +/* #undef SLAPD_OVER_SSSVLV */ + +/* define for Syncrepl Provider overlay */ +/* #undef SLAPD_OVER_SYNCPROV */ + +/* define for Translucent Proxy overlay */ +/* #undef SLAPD_OVER_TRANSLUCENT */ + +/* define for Attribute Uniqueness overlay */ +/* #undef SLAPD_OVER_UNIQUE */ + +/* define for Value Sorting overlay */ +/* #undef SLAPD_OVER_VALSORT */ + +/* define to support PASSWD backend */ +/* #undef SLAPD_PASSWD */ + +/* define to support PERL backend */ +/* #undef SLAPD_PERL */ + +/* define to support relay backend */ +/* #undef SLAPD_RELAY */ + +/* define to support reverse lookups */ +/* #undef SLAPD_RLOOKUPS */ + +/* define to support SHELL backend */ +/* #undef SLAPD_SHELL */ + +/* define to support SOCK backend */ +/* #undef SLAPD_SOCK */ + +/* define to support SASL passwords */ +/* #undef SLAPD_SPASSWD */ + +/* define to support SQL backend */ +/* #undef SLAPD_SQL */ + +/* define to support WiredTiger backend */ +/* #undef SLAPD_WT */ + +/* define to support run-time loadable ACL */ +/* #undef SLAP_DYNACL */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* set to urandom device */ +#define URANDOM_DEVICE "/dev/urandom" + +/* define to use OpenSSL BIGNUM for MP */ +/* #undef USE_MP_BIGNUM */ + +/* define to use GMP for MP */ +/* #undef USE_MP_GMP */ + +/* define to use 'long' for MP */ +/* #undef USE_MP_LONG */ + +/* define to use 'long long' for MP */ +/* #undef USE_MP_LONG_LONG */ + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to the type of arg 3 for `accept'. */ +#define ber_socklen_t socklen_t + +/* Define to `char *' if does not define. */ +/* #undef caddr_t */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define to `int' if does not define. */ +/* #undef mode_t */ + +/* Define to `long' if does not define. */ +/* #undef off_t */ + +/* Define to `int' if does not define. */ +/* #undef pid_t */ + +/* Define to `int' if does not define. */ +/* #undef sig_atomic_t */ + +/* Define to `unsigned' if does not define. */ +/* #undef size_t */ + +/* define to snprintf routine */ +/* #undef snprintf */ + +/* Define like ber_socklen_t if does not define. */ +/* #undef socklen_t */ + +/* Define to `signed int' if does not define. */ +/* #undef ssize_t */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ + +/* define as empty if volatile is not supported */ +/* #undef volatile */ + +/* define to snprintf routine */ +/* #undef vsnprintf */ + + +/* begin of portable.h.post */ + +#ifdef _WIN32 +/* don't suck in all of the win32 api */ +# define WIN32_LEAN_AND_MEAN 1 +#endif + +#ifndef LDAP_NEEDS_PROTOTYPES +/* force LDAP_P to always include prototypes */ +#define LDAP_NEEDS_PROTOTYPES 1 +#endif + +#ifndef LDAP_REL_ENG +#if (LDAP_VENDOR_VERSION == 000000) && !defined(LDAP_DEVEL) +#define LDAP_DEVEL +#endif +#if defined(LDAP_DEVEL) && !defined(LDAP_TEST) +#define LDAP_TEST +#endif +#endif + +#ifdef HAVE_STDDEF_H +# include +#endif + +#ifdef HAVE_EBCDIC +/* ASCII/EBCDIC converting replacements for stdio funcs + * vsnprintf and snprintf are used too, but they are already + * checked by the configure script + */ +#define fputs ber_pvt_fputs +#define fgets ber_pvt_fgets +#define printf ber_pvt_printf +#define fprintf ber_pvt_fprintf +#define vfprintf ber_pvt_vfprintf +#define vsprintf ber_pvt_vsprintf +#endif + +#include "ac/fdset.h" + +#include "ldap_cdefs.h" +#include "ldap_features.h" + +#include "ac/assert.h" +#include "ac/localize.h" + +#endif /* _LDAP_PORTABLE_H */ +/* end of portable.h.post */ + diff --git a/contrib/poco b/contrib/poco index c55b91f394e..fbaaba4a02e 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit c55b91f394efa9c238c33957682501681ef9b716 +Subproject commit fbaaba4a02e29987b8c584747a496c79528f125f diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index c4cf7f11e68..a539b78dab0 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -184,6 +184,10 @@ static void * getCallerAddress(const ucontext_t & context) # else return reinterpret_cast(context.uc_mcontext.gregs[REG_RIP]); # endif + +#elif defined(__APPLE__) && defined(__aarch64__) + return reinterpret_cast(context.uc_mcontext->__ss.__pc); + #elif defined(__aarch64__) return reinterpret_cast(context.uc_mcontext.pc); #else From ce4c81c5ff08c689463f7af686de60f55e0dd0b8 Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 12 Mar 2021 01:40:08 +0800 Subject: [PATCH 002/133] Update --- contrib/boost-cmake/CMakeLists.txt | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/contrib/boost-cmake/CMakeLists.txt b/contrib/boost-cmake/CMakeLists.txt index b9298f59f2b..2c1488ed2bc 100644 --- a/contrib/boost-cmake/CMakeLists.txt +++ b/contrib/boost-cmake/CMakeLists.txt @@ -176,6 +176,15 @@ if (NOT EXTERNAL_BOOST_FOUND) ${LIBRARY_DIR}/libs/context/src/execution_context.cpp ${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp ) + elseif(ARCH_ARM AND OS_DARWIN) + set (SRCS_CONTEXT + ${LIBRARY_DIR}/libs/context/src/asm/jump_darwin_arm64_aapcs_elf_gas.S + ${LIBRARY_DIR}/libs/context/src/asm/make_darwin_arm64_aapcs_elf_gas.S + ${LIBRARY_DIR}/libs/context/src/asm/ontop_darwin_arm64_aapcs_elf_gas.S + ${LIBRARY_DIR}/libs/context/src/dummy.cpp + ${LIBRARY_DIR}/libs/context/src/execution_context.cpp + ${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp + ) elseif (ARCH_ARM) set (SRCS_CONTEXT ${LIBRARY_DIR}/libs/context/src/asm/jump_arm64_aapcs_elf_gas.S From 99ce2de1071f59a767b12fad91af0466d6a12be2 Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 12 Mar 2021 13:24:29 +0800 Subject: [PATCH 003/133] Update --- .gitmodules | 2 +- contrib/boost | 2 +- contrib/boost-cmake/CMakeLists.txt | 9 --------- contrib/poco-cmake/Foundation/CMakeLists.txt | 7 +++++++ 4 files changed, 9 insertions(+), 11 deletions(-) diff --git a/.gitmodules b/.gitmodules index 7a2c5600e65..6982caf5249 100644 --- a/.gitmodules +++ b/.gitmodules @@ -47,7 +47,7 @@ branch = v3.13.0.1 [submodule "contrib/boost"] path = contrib/boost - url = https://github.com/ClickHouse-Extras/boost.git + url = https://github.com/changvvb/boost.git [submodule "contrib/base64"] path = contrib/base64 url = https://github.com/ClickHouse-Extras/Turbo-Base64.git diff --git a/contrib/boost b/contrib/boost index ee24fa55bc4..670ad0fa167 160000 --- a/contrib/boost +++ b/contrib/boost @@ -1 +1 @@ -Subproject commit ee24fa55bc46e4d2ce7d0d052cc5a0d9b1be8c36 +Subproject commit 670ad0fa1675ad37957f35bdc0d8e6d06e8bddf4 diff --git a/contrib/boost-cmake/CMakeLists.txt b/contrib/boost-cmake/CMakeLists.txt index 2c1488ed2bc..b9298f59f2b 100644 --- a/contrib/boost-cmake/CMakeLists.txt +++ b/contrib/boost-cmake/CMakeLists.txt @@ -176,15 +176,6 @@ if (NOT EXTERNAL_BOOST_FOUND) ${LIBRARY_DIR}/libs/context/src/execution_context.cpp ${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp ) - elseif(ARCH_ARM AND OS_DARWIN) - set (SRCS_CONTEXT - ${LIBRARY_DIR}/libs/context/src/asm/jump_darwin_arm64_aapcs_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/make_darwin_arm64_aapcs_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/ontop_darwin_arm64_aapcs_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/dummy.cpp - ${LIBRARY_DIR}/libs/context/src/execution_context.cpp - ${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp - ) elseif (ARCH_ARM) set (SRCS_CONTEXT ${LIBRARY_DIR}/libs/context/src/asm/jump_arm64_aapcs_elf_gas.S diff --git a/contrib/poco-cmake/Foundation/CMakeLists.txt b/contrib/poco-cmake/Foundation/CMakeLists.txt index f4647461ec0..6476845b4e3 100644 --- a/contrib/poco-cmake/Foundation/CMakeLists.txt +++ b/contrib/poco-cmake/Foundation/CMakeLists.txt @@ -233,3 +233,10 @@ else () message (STATUS "Using Poco::Foundation: ${LIBRARY_POCO_FOUNDATION} ${INCLUDE_POCO_FOUNDATION}") endif () + +if(OS_DARWIN AND ARCH_AARCH64) + target_compile_definitions (_poco_foundation + PRIVATE + POCO_NO_STAT64 + ) +endif() From 18191377565ae08a4a444d0d5c9ba61b3a4fa80a Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 12 Mar 2021 13:35:39 +0800 Subject: [PATCH 004/133] revert submodule commit --- contrib/NuRaft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index 9a0d78de4b9..ff9049bcc8e 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 9a0d78de4b90546368d954b6434f0e9a823e8d80 +Subproject commit ff9049bcc8ea6a02276ccdc8629d764e9e5de853 From 1c973c164b1a32ce4c244dba9b0be3a23d063e30 Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 12 Mar 2021 13:51:46 +0800 Subject: [PATCH 005/133] Update submodule url --- .gitmodules | 2 +- contrib/boost | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 6982caf5249..7a2c5600e65 100644 --- a/.gitmodules +++ b/.gitmodules @@ -47,7 +47,7 @@ branch = v3.13.0.1 [submodule "contrib/boost"] path = contrib/boost - url = https://github.com/changvvb/boost.git + url = https://github.com/ClickHouse-Extras/boost.git [submodule "contrib/base64"] path = contrib/base64 url = https://github.com/ClickHouse-Extras/Turbo-Base64.git diff --git a/contrib/boost b/contrib/boost index 670ad0fa167..a8d43d3142c 160000 --- a/contrib/boost +++ b/contrib/boost @@ -1 +1 @@ -Subproject commit 670ad0fa1675ad37957f35bdc0d8e6d06e8bddf4 +Subproject commit a8d43d3142cc6b26fc55bec33f7f6edb1156ab7a From abe71ebbaa2ac25556a78b5a56cb7d3124ba3eab Mon Sep 17 00:00:00 2001 From: changvvb Date: Mon, 15 Mar 2021 19:03:24 +0800 Subject: [PATCH 006/133] try to fix rocksdb but not success --- .gitmodules | 2 +- contrib/librdkafka-cmake/config.h.in | 2 +- contrib/rocksdb | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.gitmodules b/.gitmodules index 7a2c5600e65..fecc0322085 100644 --- a/.gitmodules +++ b/.gitmodules @@ -195,7 +195,7 @@ url = https://github.com/danlark1/miniselect [submodule "contrib/rocksdb"] path = contrib/rocksdb - url = https://github.com/ClickHouse-Extras/rocksdb.git + url = https://github.com/changvvb/rocksdb.git [submodule "contrib/xz"] path = contrib/xz url = https://github.com/xz-mirror/xz diff --git a/contrib/librdkafka-cmake/config.h.in b/contrib/librdkafka-cmake/config.h.in index 80b6ea61b6e..63509e0679e 100644 --- a/contrib/librdkafka-cmake/config.h.in +++ b/contrib/librdkafka-cmake/config.h.in @@ -66,7 +66,7 @@ #cmakedefine WITH_SASL_OAUTHBEARER 1 #cmakedefine WITH_SASL_CYRUS 1 // crc32chw -#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) +#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) && !(defined(__aarch64__) && defined(__APPLE__)) #define WITH_CRC32C_HW 1 #endif // regex diff --git a/contrib/rocksdb b/contrib/rocksdb index 54a0decabbc..160932a81bc 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 54a0decabbcf4c0bb5cf7befa9c597f28289bff5 +Subproject commit 160932a81bc94856fb48e98889a21fd753a21cb6 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 77a30776a4a..117015ef5c2 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -142,14 +142,14 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") endif(HAS_ALTIVEC) endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") -if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") +if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") CHECK_C_COMPILER_FLAG("-march=armv8-a+crc+crypto" HAS_ARMV8_CRC) if(HAS_ARMV8_CRC) message(STATUS " HAS_ARMV8_CRC yes") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") endif(HAS_ARMV8_CRC) -endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") +endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") include(CheckCXXSourceCompiles) From 66ff11bed9dd02bb66afc0a2318920c4014afee1 Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 19 Mar 2021 13:50:11 +0800 Subject: [PATCH 007/133] Add cmake/darwin/toolchain-aarch64.cmake --- cmake/darwin/toolchain-aarch64.cmake | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 cmake/darwin/toolchain-aarch64.cmake diff --git a/cmake/darwin/toolchain-aarch64.cmake b/cmake/darwin/toolchain-aarch64.cmake new file mode 100644 index 00000000000..81398111495 --- /dev/null +++ b/cmake/darwin/toolchain-aarch64.cmake @@ -0,0 +1,14 @@ +set (CMAKE_SYSTEM_NAME "Darwin") +set (CMAKE_SYSTEM_PROCESSOR "aarch64") +set (CMAKE_C_COMPILER_TARGET "aarch64-apple-darwin") +set (CMAKE_CXX_COMPILER_TARGET "aarch64-apple-darwin") +set (CMAKE_ASM_COMPILER_TARGET "aarch64-apple-darwin") +set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64") + +set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake + +set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) + +set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) From 7d716fe56c33cdb1cf1196b4fdc759b5ce51f633 Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 19 Mar 2021 14:04:57 +0800 Subject: [PATCH 008/133] Update --- contrib/NuRaft | 2 +- contrib/boringssl | 2 +- contrib/cassandra | 2 +- contrib/googletest | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index ff9049bcc8e..3d3683e7775 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit ff9049bcc8ea6a02276ccdc8629d764e9e5de853 +Subproject commit 3d3683e77753cfe015a05fae95ddf418e19f59e1 diff --git a/contrib/boringssl b/contrib/boringssl index 8b2bf912ba0..fd9ce1a0406 160000 --- a/contrib/boringssl +++ b/contrib/boringssl @@ -1 +1 @@ -Subproject commit 8b2bf912ba04823cfe9e7e8f5bb60cb7f6252449 +Subproject commit fd9ce1a0406f571507068b9555d0b545b8a18332 diff --git a/contrib/cassandra b/contrib/cassandra index b446d7eb68e..c097fb5c7e6 160000 --- a/contrib/cassandra +++ b/contrib/cassandra @@ -1 +1 @@ -Subproject commit b446d7eb68e6962f431e2b3771313bfe9a2bbd93 +Subproject commit c097fb5c7e63cc430016d9a8b240d8e63fbefa52 diff --git a/contrib/googletest b/contrib/googletest index 356f2d264a4..e7e591764ba 160000 --- a/contrib/googletest +++ b/contrib/googletest @@ -1 +1 @@ -Subproject commit 356f2d264a485db2fcc50ec1c672e0d37b6cb39b +Subproject commit e7e591764baba0a0c3c9ad0014430e7a27331d16 From 8efd6c5e20c93fb44059b6950a171eb10ae4a055 Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 19 Mar 2021 14:05:33 +0800 Subject: [PATCH 009/133] Update --- contrib/NuRaft | 2 +- contrib/boringssl | 2 +- contrib/cassandra | 2 +- contrib/googletest | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index 3d3683e7775..ff9049bcc8e 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 3d3683e77753cfe015a05fae95ddf418e19f59e1 +Subproject commit ff9049bcc8ea6a02276ccdc8629d764e9e5de853 diff --git a/contrib/boringssl b/contrib/boringssl index fd9ce1a0406..8b2bf912ba0 160000 --- a/contrib/boringssl +++ b/contrib/boringssl @@ -1 +1 @@ -Subproject commit fd9ce1a0406f571507068b9555d0b545b8a18332 +Subproject commit 8b2bf912ba04823cfe9e7e8f5bb60cb7f6252449 diff --git a/contrib/cassandra b/contrib/cassandra index c097fb5c7e6..b446d7eb68e 160000 --- a/contrib/cassandra +++ b/contrib/cassandra @@ -1 +1 @@ -Subproject commit c097fb5c7e63cc430016d9a8b240d8e63fbefa52 +Subproject commit b446d7eb68e6962f431e2b3771313bfe9a2bbd93 diff --git a/contrib/googletest b/contrib/googletest index e7e591764ba..356f2d264a4 160000 --- a/contrib/googletest +++ b/contrib/googletest @@ -1 +1 @@ -Subproject commit e7e591764baba0a0c3c9ad0014430e7a27331d16 +Subproject commit 356f2d264a485db2fcc50ec1c672e0d37b6cb39b From d6de2005c882b16a50c200d1da5b040ed1dc3e6b Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 19 Mar 2021 14:34:52 +0800 Subject: [PATCH 010/133] Update --- contrib/NuRaft | 2 +- contrib/boringssl | 2 +- contrib/cassandra | 2 +- contrib/googletest | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index ff9049bcc8e..3d3683e7775 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit ff9049bcc8ea6a02276ccdc8629d764e9e5de853 +Subproject commit 3d3683e77753cfe015a05fae95ddf418e19f59e1 diff --git a/contrib/boringssl b/contrib/boringssl index 8b2bf912ba0..fd9ce1a0406 160000 --- a/contrib/boringssl +++ b/contrib/boringssl @@ -1 +1 @@ -Subproject commit 8b2bf912ba04823cfe9e7e8f5bb60cb7f6252449 +Subproject commit fd9ce1a0406f571507068b9555d0b545b8a18332 diff --git a/contrib/cassandra b/contrib/cassandra index b446d7eb68e..c097fb5c7e6 160000 --- a/contrib/cassandra +++ b/contrib/cassandra @@ -1 +1 @@ -Subproject commit b446d7eb68e6962f431e2b3771313bfe9a2bbd93 +Subproject commit c097fb5c7e63cc430016d9a8b240d8e63fbefa52 diff --git a/contrib/googletest b/contrib/googletest index 356f2d264a4..e7e591764ba 160000 --- a/contrib/googletest +++ b/contrib/googletest @@ -1 +1 @@ -Subproject commit 356f2d264a485db2fcc50ec1c672e0d37b6cb39b +Subproject commit e7e591764baba0a0c3c9ad0014430e7a27331d16 From a8a38ae9e980e1ffffca921ddd72db1af32f282a Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 19 Mar 2021 14:37:30 +0800 Subject: [PATCH 011/133] Update --- .gitmodules | 2 +- contrib/mariadb-connector-c | 2 +- contrib/poco | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitmodules b/.gitmodules index fecc0322085..7a2c5600e65 100644 --- a/.gitmodules +++ b/.gitmodules @@ -195,7 +195,7 @@ url = https://github.com/danlark1/miniselect [submodule "contrib/rocksdb"] path = contrib/rocksdb - url = https://github.com/changvvb/rocksdb.git + url = https://github.com/ClickHouse-Extras/rocksdb.git [submodule "contrib/xz"] path = contrib/xz url = https://github.com/xz-mirror/xz diff --git a/contrib/mariadb-connector-c b/contrib/mariadb-connector-c index 21f451d4d31..f4476ee7311 160000 --- a/contrib/mariadb-connector-c +++ b/contrib/mariadb-connector-c @@ -1 +1 @@ -Subproject commit 21f451d4d3157ffed31ec60a8b76c407190e66bd +Subproject commit f4476ee7311b35b593750f6ae2cbdb62a4006374 diff --git a/contrib/poco b/contrib/poco index fbaaba4a02e..83beecccb09 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit fbaaba4a02e29987b8c584747a496c79528f125f +Subproject commit 83beecccb09eec0c9fd2669cacea03ede1d9f138 From c446593ca7fb9bd3a8fc9a6066f74b9211a79870 Mon Sep 17 00:00:00 2001 From: changvvb Date: Fri, 19 Mar 2021 15:28:30 +0800 Subject: [PATCH 012/133] Update .gitmodules --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 7a2c5600e65..fecc0322085 100644 --- a/.gitmodules +++ b/.gitmodules @@ -195,7 +195,7 @@ url = https://github.com/danlark1/miniselect [submodule "contrib/rocksdb"] path = contrib/rocksdb - url = https://github.com/ClickHouse-Extras/rocksdb.git + url = https://github.com/changvvb/rocksdb.git [submodule "contrib/xz"] path = contrib/xz url = https://github.com/xz-mirror/xz From c1c5db1944c188648dd925805245a271422b645f Mon Sep 17 00:00:00 2001 From: changvvb Date: Tue, 30 Mar 2021 00:07:34 +0800 Subject: [PATCH 013/133] Update jemalloc_internal_defs & fix unit test --- .../jemalloc/internal/jemalloc_internal_defs.h.in | 13 ++++--------- src/Columns/tests/gtest_weak_hash_32.cpp | 1 - 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in index c7c884d0eaa..5c0407db24a 100644 --- a/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in @@ -42,7 +42,7 @@ * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 * bits are the same as bit 47. */ -#define LG_VADDR 48 +#define LG_VADDR 64 /* Defined if C11 atomics are available. */ #define JEMALLOC_C11_ATOMICS 1 @@ -101,11 +101,6 @@ */ #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 -/* - * Defined if clock_gettime(CLOCK_REALTIME, ...) is available. - */ -#define JEMALLOC_HAVE_CLOCK_REALTIME 1 - /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc @@ -181,14 +176,14 @@ /* #undef LG_QUANTUM */ /* One page is 2^LG_PAGE bytes. */ -#define LG_PAGE 16 +#define LG_PAGE 14 /* * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the * system does not explicitly support huge pages; system calls that require * explicit huge page support are separately configured. */ -#define LG_HUGEPAGE 29 +#define LG_HUGEPAGE 21 /* * If defined, adjacent virtual memory mappings with identical attributes @@ -356,7 +351,7 @@ /* #undef JEMALLOC_EXPORT */ /* config.malloc_conf options string. */ -#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@" +#define JEMALLOC_CONFIG_MALLOC_CONF "" /* If defined, jemalloc takes the malloc/free/etc. symbol names. */ /* #undef JEMALLOC_IS_MALLOC */ diff --git a/src/Columns/tests/gtest_weak_hash_32.cpp b/src/Columns/tests/gtest_weak_hash_32.cpp index a04bd94124c..0dabfc32b33 100644 --- a/src/Columns/tests/gtest_weak_hash_32.cpp +++ b/src/Columns/tests/gtest_weak_hash_32.cpp @@ -88,7 +88,6 @@ void checkColumn( if (num_collisions <= max_collisions_to_print) { collisions_str << "Collision:\n"; - collisions_str << print_for_row(it->second) << '\n'; collisions_str << print_for_row(i) << std::endl; } From 3a9d063f12a99bfcbf1da9828384cd29551dc1ab Mon Sep 17 00:00:00 2001 From: changvvb Date: Tue, 6 Apr 2021 15:54:35 +0800 Subject: [PATCH 014/133] Fix unit test --- src/CMakeLists.txt | 5 +++++ src/IO/tests/gtest_DateTimeToString.cpp | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b34a64b2d19..13bb4eb8ecf 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -493,3 +493,8 @@ if (ENABLE_TESTS AND USE_GTEST) add_check(unit_tests_dbms) endif () + +if (OS_DARWIN AND ARCH_AARCH64) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto") +endif() diff --git a/src/IO/tests/gtest_DateTimeToString.cpp b/src/IO/tests/gtest_DateTimeToString.cpp index eb609f0f97e..058e7c12fdd 100644 --- a/src/IO/tests/gtest_DateTimeToString.cpp +++ b/src/IO/tests/gtest_DateTimeToString.cpp @@ -161,7 +161,7 @@ INSTANTIATE_TEST_SUITE_P(DateTimeToString, DateTimeToStringParamTestDayNum, { "Negative DayNum value wraps as if it was UInt16 due to LUT limitations and to maintain compatibility with existing code.", DayNum(-10 * 365), - "2106-02-07" + "2139-06-10" }, }) ); From fdcf077cf5db1142c33cc87f0a9f07e5cac8be90 Mon Sep 17 00:00:00 2001 From: changvvb Date: Tue, 6 Apr 2021 17:06:18 +0800 Subject: [PATCH 015/133] Disabled rocksdb jemalloc --- cmake/find/rocksdb.cmake | 5 +++++ contrib/jemalloc-cmake/CMakeLists.txt | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/cmake/find/rocksdb.cmake b/cmake/find/rocksdb.cmake index 968cdb52407..f476e90033f 100644 --- a/cmake/find/rocksdb.cmake +++ b/cmake/find/rocksdb.cmake @@ -1,5 +1,10 @@ option(ENABLE_ROCKSDB "Enable ROCKSDB" ${ENABLE_LIBRARIES}) +if (OS_DARWIN AND ARCH_AARCH64) + set (USE_INTERNAL_ROCKSDB_LIBRARY OFF) + set (ENABLE_ROCKSDB OFF) +endif() + if (NOT ENABLE_ROCKSDB) if (USE_INTERNAL_ROCKSDB_LIBRARY) message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use internal rocksdb library with ENABLE_ROCKSDB=OFF") diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index 73afa99f1d8..43ac0ad568f 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -1,4 +1,4 @@ -if (SANITIZE OR NOT (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE) OR NOT (OS_LINUX OR OS_FREEBSD OR OS_DARWIN)) +if (SANITIZE OR NOT (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE) OR NOT (OS_LINUX OR OS_FREEBSD OR OS_DARWIN) OR (OS_DARWIN AND ARCH_AARCH64)) if (ENABLE_JEMALLOC) message (${RECONFIGURE_MESSAGE_LEVEL} "jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64 or ppc64le on linux or freebsd.") From 55a2ea4a79886a31a1b30464c66ad1e36319f70d Mon Sep 17 00:00:00 2001 From: changvvb Date: Tue, 6 Apr 2021 21:26:48 +0800 Subject: [PATCH 016/133] GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DateLUTWithTimeZoneAndTimeRange); --- src/Common/tests/gtest_DateLUTImpl.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Common/tests/gtest_DateLUTImpl.cpp b/src/Common/tests/gtest_DateLUTImpl.cpp index 74fd7cb6149..84662bd8191 100644 --- a/src/Common/tests/gtest_DateLUTImpl.cpp +++ b/src/Common/tests/gtest_DateLUTImpl.cpp @@ -20,6 +20,8 @@ extern const char * auto_time_zones[]; namespace { +GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DateLUTWithTimeZoneAndTimeRange); + cctz::civil_day YYYYMMDDToDay(unsigned value) { return cctz::civil_day( From 46c52b4f833173d9e9ac078635dbe9f6dd146582 Mon Sep 17 00:00:00 2001 From: changvvb Date: Tue, 6 Apr 2021 21:34:45 +0800 Subject: [PATCH 017/133] revert rocksdb submodule --- .gitmodules | 2 +- contrib/rocksdb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 5dc56dfc079..de7250166b8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -195,7 +195,7 @@ url = https://github.com/danlark1/miniselect [submodule "contrib/rocksdb"] path = contrib/rocksdb - url = https://github.com/changvvb/rocksdb.git + url = https://github.com/ClickHouse-Extras/rocksdb.git [submodule "contrib/xz"] path = contrib/xz url = https://github.com/xz-mirror/xz diff --git a/contrib/rocksdb b/contrib/rocksdb index 160932a81bc..54a0decabbc 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 160932a81bc94856fb48e98889a21fd753a21cb6 +Subproject commit 54a0decabbcf4c0bb5cf7befa9c597f28289bff5 From 15ae912b5604d8d27edc40401446a99b44c1b618 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 7 Apr 2021 12:55:20 +0000 Subject: [PATCH 018/133] Fix postgresql protocol with row policy --- docker/test/stateless/Dockerfile | 4 +- src/Core/PostgreSQLProtocol.h | 10 +++-- ...tgresql_protocol_with_row_policy.reference | 24 +++++++++++ ...est_postgresql_protocol_with_row_policy.sh | 41 +++++++++++++++++++ 4 files changed, 75 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.reference create mode 100755 tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 61d1b2f4849..c759037e929 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -28,7 +28,9 @@ RUN apt-get update -y \ tree \ unixodbc \ wget \ - mysql-client=5.7* + mysql-client=5.7* \ + postgresql \ + postgresql-contrib RUN pip3 install numpy scipy pandas diff --git a/src/Core/PostgreSQLProtocol.h b/src/Core/PostgreSQLProtocol.h index 471811e969b..c4da28807d8 100644 --- a/src/Core/PostgreSQLProtocol.h +++ b/src/Core/PostgreSQLProtocol.h @@ -832,10 +832,13 @@ class NoPasswordAuth : public AuthenticationMethod { public: void authenticate( - const String & /* user_name */, - Context & /* context */, + const String & user_name, + Context & context, Messaging::MessageTransport & /* mt */, - const Poco::Net::SocketAddress & /* address */) override {} + const Poco::Net::SocketAddress & address) override + { + context.setUser(user_name, "", address); + } Authentication::Type getType() const override { @@ -859,6 +862,7 @@ public: { std::unique_ptr password = mt.receive(); setPassword(user_name, password->password, context, mt, address); + context.setUser(user_name, password->password, address); } else throw Exception( diff --git a/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.reference b/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.reference new file mode 100644 index 00000000000..729d93bf322 --- /dev/null +++ b/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.reference @@ -0,0 +1,24 @@ +before row policy +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 + +after row policy with no password + val +----- + 2 +(1 row) + +after row policy with plaintext_password + val +----- + 2 +(1 row) + diff --git a/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh b/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh new file mode 100755 index 00000000000..b2b198b7f7d --- /dev/null +++ b/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +echo " +DROP USER IF EXISTS postgresql_protocol_user; +DROP TABLE IF EXISTS postgresql_protocol_with_row_policy; +DROP ROW POLICY IF EXISTS test_policy ON postgresql_protocol_with_row_policy; + +CREATE TABLE postgresql_protocol_with_row_policy (val UInt32) ENGINE=MergeTree ORDER BY val; +INSERT INTO postgresql_protocol_with_row_policy SELECT number FROM numbers(10); + +SELECT 'before row policy'; +SELECT * FROM postgresql_protocol_with_row_policy; +SELECT ''; +" | $CLICKHOUSE_CLIENT -n + +echo " +CREATE USER postgresql_protocol_user HOST IP '127.0.0.1' IDENTIFIED WITH no_password; +GRANT SELECT(val) ON postgresql_protocol_with_row_policy TO postgresql_protocol_user; +CREATE ROW POLICY IF NOT EXISTS test_policy ON postgresql_protocol_with_row_policy FOR SELECT USING val = 2 TO postgresql_protocol_user; + +SELECT 'after row policy with no password'; +" | $CLICKHOUSE_CLIENT -n + +psql --host localhost --port 5433 default --user postgresql_protocol_user -c "SELECT * FROM postgresql_protocol_with_row_policy;" + +echo " +DROP USER IF EXISTS postgresql_protocol_user; +DROP ROW POLICY IF EXISTS test_policy ON postgresql_protocol_with_row_policy; +CREATE USER postgresql_protocol_user HOST IP '127.0.0.1' IDENTIFIED WITH plaintext_password BY 'qwerty'; +GRANT SELECT(val) ON postgresql_protocol_with_row_policy TO postgresql_protocol_user; +CREATE ROW POLICY IF NOT EXISTS test_policy ON postgresql_protocol_with_row_policy FOR SELECT USING val = 2 TO postgresql_protocol_user; + +SELECT 'after row policy with plaintext_password'; +" | $CLICKHOUSE_CLIENT -n + +psql "postgresql://postgresql_protocol_user:qwerty@localhost:5433/default" -c "SELECT * FROM postgresql_protocol_with_row_policy;" + From 3420cf9142b4aa868267dd7f4f7e1e09321ba621 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 7 Apr 2021 14:02:54 +0000 Subject: [PATCH 019/133] Add setting to config --- programs/server/config.xml | 2 +- .../01802_test_postgresql_protocol_with_row_policy.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/programs/server/config.xml b/programs/server/config.xml index 9c01b328290..e3ceee41631 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -76,7 +76,7 @@ - + 9005 + SELECT arrayFold(x, acc -> acc + x, range(1000000), toUInt64(0)) + + + + + + From b5f6de3cf9432ed3b8e248acaa43b0a9ffa4572d Mon Sep 17 00:00:00 2001 From: Dmitry Krylov Date: Sun, 11 Apr 2021 12:06:29 +1000 Subject: [PATCH 056/133] Renaming tests --- .../{90001_array_fold.reference => 01810_array_fold.reference} | 0 .../0_stateless/{90001_array_fold.sql => 01811_array_fold.sql} | 0 ...bles.reference => 01812_array_fold_data_from_tables.reference} | 0 ...data_from_tables.sql => 01813_array_fold_data_from_tables.sql} | 0 ...ay_fold_errors.reference => 01814_array_fold_errors.reference} | 0 .../{90003_array_fold_errors.sql => 01815_array_fold_errors.sql} | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/{90001_array_fold.reference => 01810_array_fold.reference} (100%) rename tests/queries/0_stateless/{90001_array_fold.sql => 01811_array_fold.sql} (100%) rename tests/queries/0_stateless/{90002_array_fold_data_from_tables.reference => 01812_array_fold_data_from_tables.reference} (100%) rename tests/queries/0_stateless/{90002_array_fold_data_from_tables.sql => 01813_array_fold_data_from_tables.sql} (100%) rename tests/queries/0_stateless/{90003_array_fold_errors.reference => 01814_array_fold_errors.reference} (100%) rename tests/queries/0_stateless/{90003_array_fold_errors.sql => 01815_array_fold_errors.sql} (100%) diff --git a/tests/queries/0_stateless/90001_array_fold.reference b/tests/queries/0_stateless/01810_array_fold.reference similarity index 100% rename from tests/queries/0_stateless/90001_array_fold.reference rename to tests/queries/0_stateless/01810_array_fold.reference diff --git a/tests/queries/0_stateless/90001_array_fold.sql b/tests/queries/0_stateless/01811_array_fold.sql similarity index 100% rename from tests/queries/0_stateless/90001_array_fold.sql rename to tests/queries/0_stateless/01811_array_fold.sql diff --git a/tests/queries/0_stateless/90002_array_fold_data_from_tables.reference b/tests/queries/0_stateless/01812_array_fold_data_from_tables.reference similarity index 100% rename from tests/queries/0_stateless/90002_array_fold_data_from_tables.reference rename to tests/queries/0_stateless/01812_array_fold_data_from_tables.reference diff --git a/tests/queries/0_stateless/90002_array_fold_data_from_tables.sql b/tests/queries/0_stateless/01813_array_fold_data_from_tables.sql similarity index 100% rename from tests/queries/0_stateless/90002_array_fold_data_from_tables.sql rename to tests/queries/0_stateless/01813_array_fold_data_from_tables.sql diff --git a/tests/queries/0_stateless/90003_array_fold_errors.reference b/tests/queries/0_stateless/01814_array_fold_errors.reference similarity index 100% rename from tests/queries/0_stateless/90003_array_fold_errors.reference rename to tests/queries/0_stateless/01814_array_fold_errors.reference diff --git a/tests/queries/0_stateless/90003_array_fold_errors.sql b/tests/queries/0_stateless/01815_array_fold_errors.sql similarity index 100% rename from tests/queries/0_stateless/90003_array_fold_errors.sql rename to tests/queries/0_stateless/01815_array_fold_errors.sql From 5c24225d86e7126f9dc67a0726ff98d3e569a1e9 Mon Sep 17 00:00:00 2001 From: Dmitry Krylov Date: Sun, 11 Apr 2021 12:09:22 +1000 Subject: [PATCH 057/133] Fix performance tests --- tests/performance/array_fold_small.xml | 4 +++ tests/performance/array_reduce_small.xml | 37 ------------------------ 2 files changed, 4 insertions(+), 37 deletions(-) create mode 100644 tests/performance/array_fold_small.xml delete mode 100644 tests/performance/array_reduce_small.xml diff --git a/tests/performance/array_fold_small.xml b/tests/performance/array_fold_small.xml new file mode 100644 index 00000000000..96b30ae8ace --- /dev/null +++ b/tests/performance/array_fold_small.xml @@ -0,0 +1,4 @@ + + SELECT arrayFold(x, acc -> acc + 1, range(100000), toUInt64(0)) + SELECT arrayFold(x, acc -> acc + x, range(100000), toUInt64(0)) + diff --git a/tests/performance/array_reduce_small.xml b/tests/performance/array_reduce_small.xml deleted file mode 100644 index e449559cf81..00000000000 --- a/tests/performance/array_reduce_small.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - SELECT arrayReduce('count', range(1000000)) - SELECT arrayReduce('sum', range(1000000)) - SELECT arrayReduceInRanges('count', [(1, 1000000)], range(1000000)) - SELECT arrayReduceInRanges('sum', [(1, 1000000)], range(1000000)) - SELECT arrayReduceInRanges('count', arrayZip(range(1000000), range(1000000)), range(1000000))[123456] - SELECT arrayReduceInRanges('sum', arrayZip(range(1000000), range(1000000)), range(1000000))[123456] - - SELECT arrayFold(x, acc -> acc + 1, range(1000000), toUInt64(0)) - SELECT arrayFold(x, acc -> acc + x, range(1000000), toUInt64(0)) - - - - - - From 35472bcc2ff09dea516cbea7fd348ba95833d2bf Mon Sep 17 00:00:00 2001 From: Dmitry Krylov Date: Mon, 12 Apr 2021 11:37:44 +1000 Subject: [PATCH 058/133] Update to new IFunction::create() --- src/Functions/array/arrayFold.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/array/arrayFold.cpp b/src/Functions/array/arrayFold.cpp index b82d84a77ba..78dabfff025 100644 --- a/src/Functions/array/arrayFold.cpp +++ b/src/Functions/array/arrayFold.cpp @@ -11,7 +11,7 @@ class FunctionArrayFold : public IFunction { public: static constexpr auto name = "arrayFold"; - static FunctionPtr create(const Context &) { return std::make_shared(); } + static FunctionPtr create(ContextPtr) { return std::make_shared(); } String getName() const override { return name; } bool isVariadic() const override { return true; } From 68bd27f3c5f5edffd3f2fc57fe02c953a2bfa420 Mon Sep 17 00:00:00 2001 From: Dmitry Krylov Date: Mon, 12 Apr 2021 12:12:40 +1000 Subject: [PATCH 059/133] Fix code style --- src/Functions/array/arrayFold.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/Functions/array/arrayFold.cpp b/src/Functions/array/arrayFold.cpp index 78dabfff025..546c5a627bb 100644 --- a/src/Functions/array/arrayFold.cpp +++ b/src/Functions/array/arrayFold.cpp @@ -5,6 +5,15 @@ namespace DB { +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int SIZES_OF_ARRAYS_DOESNT_MATCH; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + + /** arrayFold(x1,...,xn,accum -> expression, array1,...,arrayn, init_accum) - apply the expression to each element of the array (or set of parallel arrays). */ class FunctionArrayFold : public IFunction From 18f58b0c49d8ac913f00fc37b9c33199952b6caa Mon Sep 17 00:00:00 2001 From: Dmitry Krylov Date: Mon, 12 Apr 2021 12:16:20 +1000 Subject: [PATCH 060/133] Fix test filenames --- tests/queries/0_stateless/01810_array_fold.reference | 8 -------- .../0_stateless/01813_array_fold_data_from_tables.sql | 8 -------- .../0_stateless/01814_array_fold_errors.reference | 0 tests/queries/0_stateless/01815_array_fold_errors.sql | 11 ----------- 4 files changed, 27 deletions(-) delete mode 100644 tests/queries/0_stateless/01810_array_fold.reference delete mode 100644 tests/queries/0_stateless/01813_array_fold_data_from_tables.sql delete mode 100644 tests/queries/0_stateless/01814_array_fold_errors.reference delete mode 100644 tests/queries/0_stateless/01815_array_fold_errors.sql diff --git a/tests/queries/0_stateless/01810_array_fold.reference b/tests/queries/0_stateless/01810_array_fold.reference deleted file mode 100644 index d0c64c8a31f..00000000000 --- a/tests/queries/0_stateless/01810_array_fold.reference +++ /dev/null @@ -1,8 +0,0 @@ -23 -3 -101 -269 -[1,2,3,4] -[4,3,2,1] -([4,3,2,1],[1,2,3,4]) -([1,3,5],[2,4,6]) diff --git a/tests/queries/0_stateless/01813_array_fold_data_from_tables.sql b/tests/queries/0_stateless/01813_array_fold_data_from_tables.sql deleted file mode 100644 index 23e85ead56f..00000000000 --- a/tests/queries/0_stateless/01813_array_fold_data_from_tables.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT arrayFold(x,acc -> acc+x, range(number), toInt64(0)) FROM system.numbers LIMIT 10; -SELECT arrayFold(x,acc -> acc+x, range(number), number) FROM system.numbers LIMIT 10; -SELECT arrayFold(x,acc -> arrayPushFront(acc, x), range(number), emptyArrayUInt64()) FROM system.numbers LIMIT 10; -SELECT arrayFold(x,acc -> x % 2 ? arrayPushFront(acc, x) : arrayPushBack(acc, x), range(number), emptyArrayUInt64()) FROM system.numbers LIMIT 10; -SELECT arrayFold(x,acc -> (acc.1+x, acc.2-x), range(number), (toInt64(0), toInt64(0))) FROM system.numbers LIMIT 10; -SELECT arrayFold(x,acc -> (acc.1+x.1, acc.2-x.2), arrayZip(range(number), range(number)), (toInt64(0), toInt64(0))) FROM system.numbers LIMIT 10; -SELECT arrayFold(x,acc -> arrayPushFront(acc, (x, x+1)), range(number), [(toUInt64(0),toUInt64(0))]) FROM system.numbers LIMIT 10; -SELECT arrayFold(x, acc -> concat(acc, arrayMap(z -> toString(x), [number])) , range(number), CAST([] as Array(String))) FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/01814_array_fold_errors.reference b/tests/queries/0_stateless/01814_array_fold_errors.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/01815_array_fold_errors.sql b/tests/queries/0_stateless/01815_array_fold_errors.sql deleted file mode 100644 index 49fd085dfe2..00000000000 --- a/tests/queries/0_stateless/01815_array_fold_errors.sql +++ /dev/null @@ -1,11 +0,0 @@ -SELECT arrayFold([]); -- { serverError 42 } -SELECT arrayFold([1,2,3]); -- { serverError 42 } -SELECT arrayFold([1,2,3], [4,5,6]); -- { serverError 43 } -SELECT arrayFold(1234); -- { serverError 42 } -SELECT arrayFold(x, acc -> acc + x, 10, 20); -- { serverError 43 } -SELECT arrayFold(x, acc -> acc + x, 10, [20, 30, 40]); -- { serverError 43 } -SELECT arrayFold(x -> x * 2, [1,2,3,4], toInt64(3)); -- { serverError 43 } -SELECT arrayFold(x,acc -> acc+x, number, toInt64(0)) FROM system.numbers LIMIT 10; -- { serverError 43 } -SELECT arrayFold(x,y,acc -> acc + x * 2 + y * 3, [1,2,3,4], [5,6,7], toInt64(3)); -- { serverError 190 } -SELECT arrayFold(x,acc -> acc + x * 2 + y * 3, [1,2,3,4], [5,6,7,8], toInt64(3)); -- { serverError 47 } -SELECT arrayFold(x,acc -> acc + x * 2, [1,2,3,4], [5,6,7,8], toInt64(3)); -- { serverError 43 } From a5a4fbdb0007ecd0af1a111eb6309fe33a038e0a Mon Sep 17 00:00:00 2001 From: Dmitry Krylov Date: Mon, 12 Apr 2021 13:09:36 +1000 Subject: [PATCH 061/133] Fix test filenames --- tests/queries/0_stateless/01811_array_fold.reference | 8 ++++++++ .../0_stateless/01812_array_fold_data_from_tables.sql | 8 ++++++++ .../0_stateless/01813_array_fold_errors.reference | 0 tests/queries/0_stateless/01813_array_fold_errors.sql | 11 +++++++++++ 4 files changed, 27 insertions(+) create mode 100644 tests/queries/0_stateless/01811_array_fold.reference create mode 100644 tests/queries/0_stateless/01812_array_fold_data_from_tables.sql create mode 100644 tests/queries/0_stateless/01813_array_fold_errors.reference create mode 100644 tests/queries/0_stateless/01813_array_fold_errors.sql diff --git a/tests/queries/0_stateless/01811_array_fold.reference b/tests/queries/0_stateless/01811_array_fold.reference new file mode 100644 index 00000000000..d0c64c8a31f --- /dev/null +++ b/tests/queries/0_stateless/01811_array_fold.reference @@ -0,0 +1,8 @@ +23 +3 +101 +269 +[1,2,3,4] +[4,3,2,1] +([4,3,2,1],[1,2,3,4]) +([1,3,5],[2,4,6]) diff --git a/tests/queries/0_stateless/01812_array_fold_data_from_tables.sql b/tests/queries/0_stateless/01812_array_fold_data_from_tables.sql new file mode 100644 index 00000000000..23e85ead56f --- /dev/null +++ b/tests/queries/0_stateless/01812_array_fold_data_from_tables.sql @@ -0,0 +1,8 @@ +SELECT arrayFold(x,acc -> acc+x, range(number), toInt64(0)) FROM system.numbers LIMIT 10; +SELECT arrayFold(x,acc -> acc+x, range(number), number) FROM system.numbers LIMIT 10; +SELECT arrayFold(x,acc -> arrayPushFront(acc, x), range(number), emptyArrayUInt64()) FROM system.numbers LIMIT 10; +SELECT arrayFold(x,acc -> x % 2 ? arrayPushFront(acc, x) : arrayPushBack(acc, x), range(number), emptyArrayUInt64()) FROM system.numbers LIMIT 10; +SELECT arrayFold(x,acc -> (acc.1+x, acc.2-x), range(number), (toInt64(0), toInt64(0))) FROM system.numbers LIMIT 10; +SELECT arrayFold(x,acc -> (acc.1+x.1, acc.2-x.2), arrayZip(range(number), range(number)), (toInt64(0), toInt64(0))) FROM system.numbers LIMIT 10; +SELECT arrayFold(x,acc -> arrayPushFront(acc, (x, x+1)), range(number), [(toUInt64(0),toUInt64(0))]) FROM system.numbers LIMIT 10; +SELECT arrayFold(x, acc -> concat(acc, arrayMap(z -> toString(x), [number])) , range(number), CAST([] as Array(String))) FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/01813_array_fold_errors.reference b/tests/queries/0_stateless/01813_array_fold_errors.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01813_array_fold_errors.sql b/tests/queries/0_stateless/01813_array_fold_errors.sql new file mode 100644 index 00000000000..49fd085dfe2 --- /dev/null +++ b/tests/queries/0_stateless/01813_array_fold_errors.sql @@ -0,0 +1,11 @@ +SELECT arrayFold([]); -- { serverError 42 } +SELECT arrayFold([1,2,3]); -- { serverError 42 } +SELECT arrayFold([1,2,3], [4,5,6]); -- { serverError 43 } +SELECT arrayFold(1234); -- { serverError 42 } +SELECT arrayFold(x, acc -> acc + x, 10, 20); -- { serverError 43 } +SELECT arrayFold(x, acc -> acc + x, 10, [20, 30, 40]); -- { serverError 43 } +SELECT arrayFold(x -> x * 2, [1,2,3,4], toInt64(3)); -- { serverError 43 } +SELECT arrayFold(x,acc -> acc+x, number, toInt64(0)) FROM system.numbers LIMIT 10; -- { serverError 43 } +SELECT arrayFold(x,y,acc -> acc + x * 2 + y * 3, [1,2,3,4], [5,6,7], toInt64(3)); -- { serverError 190 } +SELECT arrayFold(x,acc -> acc + x * 2 + y * 3, [1,2,3,4], [5,6,7,8], toInt64(3)); -- { serverError 47 } +SELECT arrayFold(x,acc -> acc + x * 2, [1,2,3,4], [5,6,7,8], toInt64(3)); -- { serverError 43 } From a3d57bd5aff3f6f57cc0e3ec7fe5e9eefbebe7b2 Mon Sep 17 00:00:00 2001 From: Dmitry Krylov Date: Mon, 12 Apr 2021 20:29:35 +1000 Subject: [PATCH 062/133] Check number of arguments --- src/Functions/array/arrayFold.cpp | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/Functions/array/arrayFold.cpp b/src/Functions/array/arrayFold.cpp index 546c5a627bb..bd38a25d77a 100644 --- a/src/Functions/array/arrayFold.cpp +++ b/src/Functions/array/arrayFold.cpp @@ -30,13 +30,8 @@ public: /// For argument-lambda expressions, it defines the types of arguments of these expressions. void getLambdaArgumentTypes(DataTypes & arguments) const override { - if (arguments.empty()) - throw Exception("Function " + getName() + " needs at least one argument; passed " - + toString(arguments.size()) + ".", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - - if (arguments.size() == 1) - throw Exception("Function " + getName() + " needs at least one array argument.", + if (arguments.size() < 3) + throw Exception("Function " + getName() + " needs lambda function, at least one array argument and one accumulator argument.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); DataTypes nested_types(arguments.size() - 1); From df6072484718589413d0fab7c5a6d7ddd41897d8 Mon Sep 17 00:00:00 2001 From: Dmitry Krylov Date: Mon, 12 Apr 2021 20:29:58 +1000 Subject: [PATCH 063/133] Fix constness --- src/Functions/array/arrayFold.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Functions/array/arrayFold.cpp b/src/Functions/array/arrayFold.cpp index bd38a25d77a..5c80b01c5c9 100644 --- a/src/Functions/array/arrayFold.cpp +++ b/src/Functions/array/arrayFold.cpp @@ -65,9 +65,7 @@ public: throw Exception("First argument for function " + getName() + " must be a function.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); /// The types of the remaining arguments are already checked in getLambdaArgumentTypes. - DataTypePtr return_type = removeLowCardinality(data_type_function->getReturnType()); - const auto accum_type = arguments.back().type; - return accum_type; + return DataTypePtr(arguments.back().type); } ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override From 9089f44a5079bec888a9b1cc96bcf2a72f32488b Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 12 Apr 2021 18:40:34 +0000 Subject: [PATCH 064/133] Fix tests --- docker/test/stateful/run.sh | 4 ++-- docker/test/stateless/run.sh | 6 +++--- .../01802_test_postgresql_protocol_with_row_policy.sh | 4 ++-- tests/queries/shell_config.sh | 2 ++ 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 9e210dc92a2..8d865431570 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -21,14 +21,14 @@ function start() -- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \ --logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \ --tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \ - --mysql_port 19004 \ + --mysql_port 19004 --postgresql_port 19005 \ --keeper_server.tcp_port 19181 --keeper_server.server_id 2 sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \ -- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \ --logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \ --tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \ - --mysql_port 29004 \ + --mysql_port 29004 --postgresql_port 29005 \ --keeper_server.tcp_port 29181 --keeper_server.server_id 3 fi diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 20132eafb75..b984ca29d17 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -41,10 +41,10 @@ fi if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \ - -- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \ + -- --path /var/lib/clickhouse1/ --logger.sderr /var/log/clickhouse-server/stderr1.log \ --logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \ --tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \ - --mysql_port 19004 \ + --mysql_port 19004 --postgresql_port 19005 \ --keeper_server.tcp_port 19181 --keeper_server.server_id 2 \ --macros.replica r2 # It doesn't work :( @@ -52,7 +52,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] -- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \ --logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \ --tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \ - --mysql_port 29004 \ + --mysql_port 29004 --postgresql_port 29005 \ --keeper_server.tcp_port 29181 --keeper_server.server_id 3 \ --macros.shard s2 # It doesn't work :( diff --git a/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh b/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh index 386ca00c3ee..edd73131020 100755 --- a/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh +++ b/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh @@ -27,7 +27,7 @@ SELECT ''; SELECT 'after row policy with no password'; " | $CLICKHOUSE_CLIENT -n -psql --host localhost --port 9005 db01802 --user postgresql_user -c "SELECT * FROM postgresql;" +psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} db01802 --user postgresql_user -c "SELECT * FROM postgresql;" echo " DROP USER IF EXISTS postgresql_user; @@ -39,5 +39,5 @@ CREATE ROW POLICY IF NOT EXISTS test_policy ON db01802.postgresql FOR SELECT USI SELECT 'after row policy with plaintext_password'; " | $CLICKHOUSE_CLIENT -n -psql "postgresql://postgresql_user:qwerty@localhost:9005/db01802" -c "SELECT * FROM postgresql;" +psql "postgresql://postgresql_user:qwerty@localhost:${CLICKHOUSE_PORT_POSTGRESQL}/db01802" -c "SELECT * FROM postgresql;" diff --git a/tests/queries/shell_config.sh b/tests/queries/shell_config.sh index 5b942a95d02..98b67c3aa88 100644 --- a/tests/queries/shell_config.sh +++ b/tests/queries/shell_config.sh @@ -63,6 +63,8 @@ export CLICKHOUSE_PORT_HTTPS=${CLICKHOUSE_PORT_HTTPS:="8443"} export CLICKHOUSE_PORT_HTTP_PROTO=${CLICKHOUSE_PORT_HTTP_PROTO:="http"} export CLICKHOUSE_PORT_MYSQL=${CLICKHOUSE_PORT_MYSQL:=$(${CLICKHOUSE_EXTRACT_CONFIG} --try --key=mysql_port 2>/dev/null)} 2>/dev/null export CLICKHOUSE_PORT_MYSQL=${CLICKHOUSE_PORT_MYSQL:="9004"} +export CLICKHOUSE_PORT_POSTGRESQL=${CLICKHOUSE_PORT_POSTGRESQL:=$(${CLICKHOUSE_EXTRACT_CONFIG} --try --key=postgresql_port 2>/dev/null)} 2>/dev/null +export CLICKHOUSE_PORT_POSTGRESQL=${CLICKHOUSE_PORT_POSTGRESQL:="9005"} # Add database and log comment to url params if [ -v CLICKHOUSE_URL_PARAMS ] From d5580a8e71d736da4267bf05a4a2310070c31f24 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Apr 2021 23:02:42 +0300 Subject: [PATCH 065/133] Dynamic dispatch for intDiv --- src/Functions/intDiv.cpp | 126 ++++++++++++++++++++++++++++++++------- 1 file changed, 105 insertions(+), 21 deletions(-) diff --git a/src/Functions/intDiv.cpp b/src/Functions/intDiv.cpp index 804696f2776..42b0299ce01 100644 --- a/src/Functions/intDiv.cpp +++ b/src/Functions/intDiv.cpp @@ -1,12 +1,29 @@ #include #include +#include -#if defined(__SSE2__) -# define LIBDIVIDE_SSE2 1 +#if defined(__x86_64__) + #define LIBDIVIDE_SSE2 1 + #define LIBDIVIDE_AVX2 1 + + #if defined(__clang__) + #pragma clang attribute push(__attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2"))), apply_to=function) + #else + #pragma GCC push_options + #pragma GCC target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,tune=native") + #endif #endif #include +#if defined(__x86_64__) + #if defined(__clang__) + #pragma clang attribute pop + #else + #pragma GCC pop_options + #endif +#endif + namespace DB { @@ -20,6 +37,83 @@ namespace /// Optimizations for integer division by a constant. +#if defined(__x86_64__) + +DECLARE_DEFAULT_CODE ( + template + void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size) + { + libdivide::divider divider(b); + const A * a_end = a_pos + size; + + static constexpr size_t values_per_simd_register = 16 / sizeof(A); + const A * a_end_simd = a_pos + size / values_per_simd_register * values_per_simd_register; + + while (a_pos < a_end_simd) + { + _mm_storeu_si128(reinterpret_cast<__m128i *>(c_pos), + _mm_loadu_si128(reinterpret_cast(a_pos)) / divider); + + a_pos += values_per_simd_register; + c_pos += values_per_simd_register; + } + + while (a_pos < a_end) + { + *c_pos = *a_pos / divider; + ++a_pos; + ++c_pos; + } + } +) + +DECLARE_AVX2_SPECIFIC_CODE ( + template + void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size) + { + libdivide::divider divider(b); + const A * a_end = a_pos + size; + + static constexpr size_t values_per_simd_register = 32 / sizeof(A); + const A * a_end_simd = a_pos + size / values_per_simd_register * values_per_simd_register; + + while (a_pos < a_end_simd) + { + _mm256_storeu_si256(reinterpret_cast<__m256i *>(c_pos), + _mm256_loadu_si256(reinterpret_cast(a_pos)) / divider); + + a_pos += values_per_simd_register; + c_pos += values_per_simd_register; + } + + while (a_pos < a_end) + { + *c_pos = *a_pos / divider; + ++a_pos; + ++c_pos; + } + } +) + +#else + +template +void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size) +{ + libdivide::divider divider(b); + const A * a_end = a_pos + size; + + while (a_pos < a_end) + { + *c_pos = *a_pos / divider; + ++a_pos; + ++c_pos; + } +} + +#endif + + template struct DivideIntegralByConstantImpl : BinaryOperation> @@ -70,29 +164,19 @@ struct DivideIntegralByConstantImpl if (unlikely(static_cast(b) == 0)) throw Exception("Division by zero", ErrorCodes::ILLEGAL_DIVISION); - libdivide::divider divider(b); - - const A * a_end = a_pos + size; - -#if defined(__SSE2__) - static constexpr size_t values_per_sse_register = 16 / sizeof(A); - const A * a_end_sse = a_pos + size / values_per_sse_register * values_per_sse_register; - - while (a_pos < a_end_sse) +#if USE_MULTITARGET_CODE + if (isArchSupported(TargetArch::AVX2)) { - _mm_storeu_si128(reinterpret_cast<__m128i *>(c_pos), - _mm_loadu_si128(reinterpret_cast(a_pos)) / divider); - - a_pos += values_per_sse_register; - c_pos += values_per_sse_register; + TargetSpecific::AVX2::divideImpl(a_pos, b, c_pos, size); } + else #endif - - while (a_pos < a_end) { - *c_pos = *a_pos / divider; - ++a_pos; - ++c_pos; +#if __x86_64__ + TargetSpecific::Default::divideImpl(a_pos, b, c_pos, size); +#else + divideImpl(a_pos, b, c_pos, size); +#endif } } }; From 3effb74d317ac88809de1e9dd6fb3574d5984c1e Mon Sep 17 00:00:00 2001 From: songenjie Date: Thu, 8 Apr 2021 11:29:36 +0800 Subject: [PATCH 066/133] [clickhouse][server][ddl] add fetch part docs --- .../sql-reference/statements/alter/partition.md | 16 +++++++++++----- docs/en/sql-reference/statements/grant.md | 2 +- .../en/2016/how-to-update-data-in-clickhouse.md | 2 +- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index f7183ba525c..23f10684ae7 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -16,7 +16,7 @@ The following operations with [partitions](../../../engines/table-engines/merget - [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) — Resets the value of a specified column in a partition. - [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) — Resets the specified secondary index in a partition. - [FREEZE PARTITION](#alter_freeze-partition) — Creates a backup of a partition. -- [FETCH PARTITION](#alter_fetch-partition) — Downloads a partition from another server. +- [FETCH PART\|PARTITION](#alter_fetch-partition) — Downloads a part or partition from another server. - [MOVE PARTITION\|PART](#alter_move-partition) — Move partition/data part to another disk or volume. @@ -198,29 +198,35 @@ ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr The query works similar to `CLEAR COLUMN`, but it resets an index instead of a column data. -## FETCH PARTITION {#alter_fetch-partition} +## FETCH PART|PARTITION {#alter_fetch-partition} ``` sql -ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' +ALTER TABLE table_name FETCH PART|PARTITION partition_expr FROM 'path-in-zookeeper' ``` Downloads a partition from another server. This query only works for the replicated tables. The query does the following: -1. Downloads the partition from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper. +1. Downloads the partition|part from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper. 2. Then the query puts the downloaded data to the `detached` directory of the `table_name` table. Use the [ATTACH PARTITION\|PART](#alter_attach-partition) query to add the data to the table. For example: +1. FETCH PARTITION ``` sql ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; ALTER TABLE users ATTACH PARTITION 201902; ``` +2. FETCH PART +``` sql +ALTER TABLE users FETCH PART 201901_2_2_0 FROM '/clickhouse/tables/01-01/visits'; +ALTER TABLE users ATTACH PART 201901_2_2_0; +``` Note that: -- The `ALTER ... FETCH PARTITION` query isn’t replicated. It places the partition to the `detached` directory only on the local server. +- The `ALTER ... FETCH PART|PARTITION` query isn’t replicated. It places the part or partition to the `detached` directory only on the local server. - The `ALTER TABLE ... ATTACH` query is replicated. It adds the data to all replicas. The data is added to one of the replicas from the `detached` directory, and to the others - from neighboring replicas. Before downloading, the system checks if the partition exists and the table structure matches. The most appropriate replica is selected automatically from the healthy replicas. diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index 0afc9b5b95f..daa020f9469 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -279,7 +279,7 @@ Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries - `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL` - `ALTER SETTINGS`. Level: `TABLE`. Aliases: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING` - `ALTER MOVE PARTITION`. Level: `TABLE`. Aliases: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART` - - `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `FETCH PARTITION` + - `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `FETCH PARTITION`, `FETCH PART` - `ALTER FREEZE PARTITION`. Level: `TABLE`. Aliases: `FREEZE PARTITION` - `ALTER VIEW` Level: `GROUP` - `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW` diff --git a/website/blog/en/2016/how-to-update-data-in-clickhouse.md b/website/blog/en/2016/how-to-update-data-in-clickhouse.md index 22c2fa3ccc1..7e9b938203f 100644 --- a/website/blog/en/2016/how-to-update-data-in-clickhouse.md +++ b/website/blog/en/2016/how-to-update-data-in-clickhouse.md @@ -67,7 +67,7 @@ There is a nice set of operations to work with partitions: - `DROP PARTITION` - Delete a partition. - `ATTACH PART|PARTITION` -- Add a new part or partition from the 'detached' directory to the table. - `FREEZE PARTITION` - Create a backup of a partition. -- `FETCH PARTITION` - Download a partition from another server. +- `FETCH PART|PARTITION` - Download a part or partition from another server. We can do any data management operations on partitions level: move, copy and delete. Also, special DETACH and ATTACH operations are created to simplify data manipulation. DETACH detaches partition from table, moving all data to detached directory. Data is still there and you can copy it anywhere but detached data is not visible on request level. ATTACH is the opposite: attaches data from detached directory so it become visible. From 564136ec46ed7847ff296869c8b6156ee202f34b Mon Sep 17 00:00:00 2001 From: songenjie Date: Tue, 13 Apr 2021 12:40:33 +0800 Subject: [PATCH 067/133] [clickhouse][server][dll][alter]support fetch part --- src/Access/AccessType.h | 2 +- src/Common/ErrorCodes.cpp | 2 + src/Parsers/ASTAlterQuery.cpp | 2 +- src/Parsers/ParserAlterQuery.cpp | 16 +++ src/Storages/MergeTree/MergeTreeData.cpp | 9 +- src/Storages/MergeTree/MergeTreeData.h | 7 +- src/Storages/PartitionCommands.cpp | 6 +- src/Storages/StorageReplicatedMergeTree.cpp | 102 ++++++++++++++---- src/Storages/StorageReplicatedMergeTree.h | 10 +- tests/fuzz/ast.dict | 1 + .../__init__.py | 0 .../configs/zookeeper_config.xml | 28 +++++ .../test.py | 40 +++++++ .../test.py | 2 +- .../rbac/tests/privileges/grant_option.py | 2 +- 15 files changed, 198 insertions(+), 31 deletions(-) create mode 100644 tests/integration/test_fetch_part_from_auxiliary_zookeeper/__init__.py create mode 100644 tests/integration/test_fetch_part_from_auxiliary_zookeeper/configs/zookeeper_config.xml create mode 100644 tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index 40740b3164e..c7311997ba2 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -62,7 +62,7 @@ enum class AccessType enabled implicitly by the grant ALTER_TABLE */\ M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\ M(ALTER_MOVE_PARTITION, "ALTER MOVE PART, MOVE PARTITION, MOVE PART", TABLE, ALTER_TABLE) \ - M(ALTER_FETCH_PARTITION, "FETCH PARTITION", TABLE, ALTER_TABLE) \ + M(ALTER_FETCH_PARTITION, "ALTER FETCH PART, FETCH PARTITION, FETCH PART", TABLE, ALTER_TABLE) \ M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \ \ M(ALTER_TABLE, "", GROUP, ALTER) \ diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index ad0463db889..7658448976b 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -549,6 +549,8 @@ M(579, INCORRECT_PART_TYPE) \ M(580, CANNOT_SET_ROUNDING_MODE) \ M(581, TOO_LARGE_DISTRIBUTED_DEPTH) \ + M(582, PART_DOESNT_EXIST) \ + M(583, PART_ALREADY_EXISTS) \ \ M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(999, KEEPER_EXCEPTION) \ diff --git a/src/Parsers/ASTAlterQuery.cpp b/src/Parsers/ASTAlterQuery.cpp index df4a9a5f99a..5b052bae856 100644 --- a/src/Parsers/ASTAlterQuery.cpp +++ b/src/Parsers/ASTAlterQuery.cpp @@ -245,7 +245,7 @@ void ASTAlterCommand::formatImpl( else if (type == ASTAlterCommand::FETCH_PARTITION) { settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "FETCH " - << "PARTITION " << (settings.hilite ? hilite_none : ""); + << (part ? "PART " : "PARTITION ") << (settings.hilite ? hilite_none : ""); partition->formatImpl(settings, state, frame); settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM " << (settings.hilite ? hilite_none : "") << DB::quote << from; diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index e5cc4b1b95e..de524342fb4 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -61,6 +61,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ParserKeyword s_drop_detached_partition("DROP DETACHED PARTITION"); ParserKeyword s_drop_detached_part("DROP DETACHED PART"); ParserKeyword s_fetch_partition("FETCH PARTITION"); + ParserKeyword s_fetch_part("FETCH PART"); ParserKeyword s_replace_partition("REPLACE PARTITION"); ParserKeyword s_freeze("FREEZE"); ParserKeyword s_unfreeze("UNFREEZE"); @@ -428,6 +429,21 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected command->from = ast_from->as().value.get(); command->type = ASTAlterCommand::FETCH_PARTITION; } + else if (s_fetch_part.ignore(pos, expected)) + { + if (!parser_string_literal.parse(pos, command->partition, expected)) + return false; + + if (!s_from.ignore(pos, expected)) + return false; + + ASTPtr ast_from; + if (!parser_string_literal.parse(pos, ast_from, expected)) + return false; + command->from = ast_from->as().value.get(); + command->part = true; + command->type = ASTAlterCommand::FETCH_PARTITION; + } else if (s_freeze.ignore(pos, expected)) { if (s_partition.ignore(pos, expected)) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index ee8e15008cb..b5da72c517f 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2909,7 +2909,12 @@ void MergeTreeData::movePartitionToVolume(const ASTPtr & partition, const String throw Exception("Cannot move parts because moves are manually disabled", ErrorCodes::ABORTED); } -void MergeTreeData::fetchPartition(const ASTPtr & /*partition*/, const StorageMetadataPtr & /*metadata_snapshot*/, const String & /*from*/, ContextPtr /*query_context*/) +void MergeTreeData::fetchPartition( + const ASTPtr & /*partition*/, + const StorageMetadataPtr & /*metadata_snapshot*/, + const String & /*from*/, + bool /*fetch_part*/, + ContextPtr /*query_context*/) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "FETCH PARTITION is not supported by storage {}", getName()); } @@ -2972,7 +2977,7 @@ Pipe MergeTreeData::alterPartition( break; case PartitionCommand::FETCH_PARTITION: - fetchPartition(command.partition, metadata_snapshot, command.from_zookeeper_path, query_context); + fetchPartition(command.partition, metadata_snapshot, command.from_zookeeper_path, command.part, query_context); break; case PartitionCommand::FREEZE_PARTITION: diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 6c0bda07bb1..46c0014d9f7 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -970,7 +970,12 @@ protected: virtual void movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, ContextPtr context) = 0; /// Makes sense only for replicated tables - virtual void fetchPartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, const String & from, ContextPtr query_context); + virtual void fetchPartition( + const ASTPtr & partition, + const StorageMetadataPtr & metadata_snapshot, + const String & from, + bool fetch_part, + ContextPtr query_context); void writePartLog( PartLogElement::Type type, diff --git a/src/Storages/PartitionCommands.cpp b/src/Storages/PartitionCommands.cpp index e51a64d5d81..f09f60887e8 100644 --- a/src/Storages/PartitionCommands.cpp +++ b/src/Storages/PartitionCommands.cpp @@ -82,6 +82,7 @@ std::optional PartitionCommand::parse(const ASTAlterCommand * res.type = FETCH_PARTITION; res.partition = command_ast->partition; res.from_zookeeper_path = command_ast->from; + res.part = command_ast->part; return res; } else if (command_ast->type == ASTAlterCommand::FREEZE_PARTITION) @@ -140,7 +141,10 @@ std::string PartitionCommand::typeToString() const else return "DROP DETACHED PARTITION"; case PartitionCommand::Type::FETCH_PARTITION: - return "FETCH PARTITION"; + if (part) + return "FETCH PART"; + else + return "FETCH PARTITION"; case PartitionCommand::Type::FREEZE_ALL_PARTITIONS: return "FREEZE ALL"; case PartitionCommand::Type::FREEZE_PARTITION: diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index a2b1f1737a2..2d836e00a08 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -112,9 +112,11 @@ namespace ErrorCodes extern const int NOT_A_LEADER; extern const int TABLE_WAS_NOT_DROPPED; extern const int PARTITION_ALREADY_EXISTS; + extern const int PART_ALREADY_EXISTS; extern const int TOO_MANY_RETRIES_TO_FETCH_PARTS; extern const int RECEIVED_ERROR_FROM_REMOTE_IO_SERVER; extern const int PARTITION_DOESNT_EXIST; + extern const int PART_DOESNT_EXIST; extern const int UNFINISHED; extern const int RECEIVED_ERROR_TOO_MANY_REQUESTS; extern const int TOO_MANY_FETCHES; @@ -5356,11 +5358,11 @@ void StorageReplicatedMergeTree::getReplicaDelays(time_t & out_absolute_delay, t } } - void StorageReplicatedMergeTree::fetchPartition( const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, const String & from_, + bool fetch_part, ContextPtr query_context) { Macros::MacroExpansionInfo info; @@ -5373,40 +5375,54 @@ void StorageReplicatedMergeTree::fetchPartition( if (from.empty()) throw Exception("ZooKeeper path should not be empty", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - String partition_id = getPartitionIDFromQuery(partition, query_context); zkutil::ZooKeeperPtr zookeeper; if (auxiliary_zookeeper_name != default_zookeeper_name) - { zookeeper = getContext()->getAuxiliaryZooKeeper(auxiliary_zookeeper_name); - - LOG_INFO(log, "Will fetch partition {} from shard {} (auxiliary zookeeper '{}')", partition_id, from_, auxiliary_zookeeper_name); - } else - { zookeeper = getZooKeeper(); - LOG_INFO(log, "Will fetch partition {} from shard {}", partition_id, from_); - } - if (from.back() == '/') from.resize(from.size() - 1); + if (fetch_part) + { + String part_name = partition->as().value.safeGet(); + auto replica_path_ = findReplicaHavingPart(part_name, from, zookeeper); + + if (replica_path_.empty()) + throw Exception("fetch part " + part_name + " not exists !", ErrorCodes::PART_DOESNT_EXIST); + /** Let's check that there is no such part in the `detached` directory (where we will write the downloaded parts). + * Unreliable (there is a race condition) - such a part may appear a little later. + */ + if (checkDetachPartIfExists(part_name)) + throw Exception("Detached part " + part_name + " already exists.", ErrorCodes::PART_ALREADY_EXISTS); + LOG_INFO(log, "Will fetch part {} from shard {} (zookeeper '{}')", part_name, from_, auxiliary_zookeeper_name); + + try + { + /// part name , metadata, replica path , true, 0, zookeeper + if (!fetchPart(part_name, metadata_snapshot, replica_path_, true, 0, zookeeper)) + throw Exception("fetch part " + part_name + " failed! ", ErrorCodes::UNFINISHED); + } + catch (const DB::Exception & e) + { + if (e.code() != ErrorCodes::RECEIVED_ERROR_FROM_REMOTE_IO_SERVER && e.code() != ErrorCodes::RECEIVED_ERROR_TOO_MANY_REQUESTS + && e.code() != ErrorCodes::CANNOT_READ_ALL_DATA) + throw; + + LOG_INFO(log, e.displayText()); + } + return; + } + + String partition_id = getPartitionIDFromQuery(partition, query_context); + LOG_INFO(log, "Will fetch partition {} from shard {} (zookeeper '{}')", partition_id, from_, auxiliary_zookeeper_name); /** Let's check that there is no such partition in the `detached` directory (where we will write the downloaded parts). * Unreliable (there is a race condition) - such a partition may appear a little later. */ - Poco::DirectoryIterator dir_end; - for (const std::string & path : getDataPaths()) - { - for (Poco::DirectoryIterator dir_it{path + "detached/"}; dir_it != dir_end; ++dir_it) - { - MergeTreePartInfo part_info; - if (MergeTreePartInfo::tryParsePartName(dir_it.name(), &part_info, format_version) - && part_info.partition_id == partition_id) - throw Exception("Detached partition " + partition_id + " already exists.", ErrorCodes::PARTITION_ALREADY_EXISTS); - } - - } + if (checkDetachPartitionIfExists(partition_id)) + throw Exception("Detached partition " + partition_id + " already exists.", ErrorCodes::PARTITION_ALREADY_EXISTS); zkutil::Strings replicas; zkutil::Strings active_replicas; @@ -6913,4 +6929,46 @@ String StorageReplicatedMergeTree::getSharedDataReplica( return best_replica; } +String StorageReplicatedMergeTree::findReplicaHavingPart( + const String & part_name, const String & zookeeper_path_, zkutil::ZooKeeper::Ptr zookeeper_) +{ + Strings replicas = zookeeper_->getChildren(zookeeper_path_ + "/replicas"); + + /// Select replicas in uniformly random order. + std::shuffle(replicas.begin(), replicas.end(), thread_local_rng); + + for (const String & replica : replicas) + { + if (zookeeper_->exists(zookeeper_path_ + "/replicas/" + replica + "/parts/" + part_name) + && zookeeper_->exists(zookeeper_path_ + "/replicas/" + replica + "/is_active")) + return zookeeper_path_ + "/replicas/" + replica; + } + + return {}; +} + +bool StorageReplicatedMergeTree::checkDetachPartIfExists(const String & part_name) +{ + Poco::DirectoryIterator dir_end; + for (const std::string & path : getDataPaths()) + for (Poco::DirectoryIterator dir_it{path + "detached/"}; dir_it != dir_end; ++dir_it) + if (dir_it.name() == part_name) + return true; + return false; +} + +bool StorageReplicatedMergeTree::checkDetachPartitionIfExists(const String & partition_name) +{ + Poco::DirectoryIterator dir_end; + for (const std::string & path : getDataPaths()) + { + for (Poco::DirectoryIterator dir_it{path + "detached/"}; dir_it != dir_end; ++dir_it) + { + MergeTreePartInfo part_info; + if (MergeTreePartInfo::tryParsePartName(dir_it.name(), &part_info, format_version) && part_info.partition_id == partition_name) + return true; + } + } + return false; +} } diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 0180fa8bc1a..673c713fabc 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -522,8 +522,11 @@ private: /** Returns an empty string if no one has a part. */ String findReplicaHavingPart(const String & part_name, bool active); + String findReplicaHavingPart(const String & part_name, const String & zookeeper_path_, zkutil::ZooKeeper::Ptr zookeeper_); bool checkReplicaHavePart(const String & replica, const String & part_name); + bool checkDetachPartIfExists(const String & part_name); + bool checkDetachPartitionIfExists(const String & partition_name); /** Find replica having specified part or any part that covers it. * If active = true, consider only active replicas. @@ -626,7 +629,12 @@ private: PartitionCommandsResultInfo attachPartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, bool part, ContextPtr query_context) override; void replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, ContextPtr query_context) override; void movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, ContextPtr query_context) override; - void fetchPartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, const String & from, ContextPtr query_context) override; + void fetchPartition( + const ASTPtr & partition, + const StorageMetadataPtr & metadata_snapshot, + const String & from, + bool fetch_part, + ContextPtr query_context) override; /// Check granularity of already existing replicated table in zookeeper if it exists /// return true if it's fixed diff --git a/tests/fuzz/ast.dict b/tests/fuzz/ast.dict index 8327f276b31..7befb36c840 100644 --- a/tests/fuzz/ast.dict +++ b/tests/fuzz/ast.dict @@ -156,6 +156,7 @@ "extractURLParameterNames" "extractURLParameters" "FETCH PARTITION" +"FETCH PART" "FINAL" "FIRST" "firstSignificantSubdomain" diff --git a/tests/integration/test_fetch_part_from_auxiliary_zookeeper/__init__.py b/tests/integration/test_fetch_part_from_auxiliary_zookeeper/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_fetch_part_from_auxiliary_zookeeper/configs/zookeeper_config.xml b/tests/integration/test_fetch_part_from_auxiliary_zookeeper/configs/zookeeper_config.xml new file mode 100644 index 00000000000..b2b0667ebbf --- /dev/null +++ b/tests/integration/test_fetch_part_from_auxiliary_zookeeper/configs/zookeeper_config.xml @@ -0,0 +1,28 @@ + + + + zoo1 + 2181 + + + zoo2 + 2181 + + + zoo3 + 2181 + + + + + + zoo1 + 2181 + + + zoo2 + 2181 + + + + diff --git a/tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py b/tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py new file mode 100644 index 00000000000..3d13da49a63 --- /dev/null +++ b/tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py @@ -0,0 +1,40 @@ + + +import pytest +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance("node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + yield cluster + finally: + cluster.shutdown() + + +def test_fetch_part_from_allowed_zookeeper(start_cluster): + node.query( + "CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', 'node') ORDER BY tuple() PARTITION BY date;" + ) + node.query("INSERT INTO simple VALUES ('2020-08-27', 1)") + + node.query( + "CREATE TABLE simple2 (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/simple', 'node') ORDER BY tuple() PARTITION BY date;" + ) + node.query( + "ALTER TABLE simple2 FETCH PART '20200827_1_1_0' FROM 'zookeeper2:/clickhouse/tables/0/simple';" + ) + node.query("ALTER TABLE simple2 ATTACH PART '20200827_1_1_0';") + + with pytest.raises(QueryRuntimeException): + node.query( + "ALTER TABLE simple2 FETCH PART '20200827_1_1_0' FROM 'zookeeper:/clickhouse/tables/0/simple';" + ) + + assert node.query("SELECT id FROM simple2").strip() == "1" diff --git a/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py b/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py index f9c10d68fe3..0c94dfd3c48 100644 --- a/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py +++ b/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py @@ -18,7 +18,7 @@ def start_cluster(): cluster.shutdown() -def test_fetch_part_from_allowed_zookeeper(start_cluster): +def test_fetch_partition_from_allowed_zookeeper(start_cluster): node.query( "CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', 'node') ORDER BY tuple() PARTITION BY date;" ) diff --git a/tests/testflows/rbac/tests/privileges/grant_option.py b/tests/testflows/rbac/tests/privileges/grant_option.py index f337aec2619..a28350a78b9 100644 --- a/tests/testflows/rbac/tests/privileges/grant_option.py +++ b/tests/testflows/rbac/tests/privileges/grant_option.py @@ -89,7 +89,7 @@ def grant_option_check(grant_option_target, grant_target, user_name, table_type, @Examples("privilege", [ ("ALTER MOVE PARTITION",), ("ALTER MOVE PART",), ("MOVE PARTITION",), ("MOVE PART",), ("ALTER DELETE",), ("DELETE",), - ("ALTER FETCH PARTITION",), ("FETCH PARTITION",), + ("ALTER FETCH PARTITION",), ("ALTER FETCH PART",), ("FETCH PARTITION",), ("FETCH PART",), ("ALTER FREEZE PARTITION",), ("FREEZE PARTITION",), ("ALTER UPDATE",), ("UPDATE",), ("ALTER ADD COLUMN",), ("ADD COLUMN",), From fbd26789afe5b2aac3c9a808a24049dcd465755f Mon Sep 17 00:00:00 2001 From: songenjie Date: Tue, 13 Apr 2021 14:08:26 +0800 Subject: [PATCH 068/133] [test] show privileges --- tests/queries/0_stateless/01271_show_privileges.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index 7928f531a7d..e2784f1dfd5 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -28,7 +28,7 @@ ALTER TTL ['ALTER MODIFY TTL','MODIFY TTL'] TABLE ALTER TABLE ALTER MATERIALIZE TTL ['MATERIALIZE TTL'] TABLE ALTER TABLE ALTER SETTINGS ['ALTER SETTING','ALTER MODIFY SETTING','MODIFY SETTING'] TABLE ALTER TABLE ALTER MOVE PARTITION ['ALTER MOVE PART','MOVE PARTITION','MOVE PART'] TABLE ALTER TABLE -ALTER FETCH PARTITION ['FETCH PARTITION'] TABLE ALTER TABLE +ALTER FETCH PARTITION ['ALTER FETCH PART','FETCH PARTITION','FETCH PART'] TABLE ALTER TABLE ALTER FREEZE PARTITION ['FREEZE PARTITION','UNFREEZE'] TABLE ALTER TABLE ALTER TABLE [] \N ALTER ALTER VIEW REFRESH ['ALTER LIVE VIEW REFRESH','REFRESH VIEW'] VIEW ALTER VIEW From 520f4f39eccfcc16491fbc58d9cb44856deb5f53 Mon Sep 17 00:00:00 2001 From: songenjie Date: Tue, 13 Apr 2021 15:17:03 +0800 Subject: [PATCH 069/133] [test][integration] fetch part --- .../test_fetch_part_from_auxiliary_zookeeper/test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py b/tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py index 3d13da49a63..17617f1c45c 100644 --- a/tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py +++ b/tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py @@ -28,13 +28,13 @@ def test_fetch_part_from_allowed_zookeeper(start_cluster): "CREATE TABLE simple2 (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/simple', 'node') ORDER BY tuple() PARTITION BY date;" ) node.query( - "ALTER TABLE simple2 FETCH PART '20200827_1_1_0' FROM 'zookeeper2:/clickhouse/tables/0/simple';" + "ALTER TABLE simple2 FETCH PART '20200827_0_0_0' FROM 'zookeeper2:/clickhouse/tables/0/simple';" ) - node.query("ALTER TABLE simple2 ATTACH PART '20200827_1_1_0';") + node.query("ALTER TABLE simple2 ATTACH PART '20200827_0_0_0';") with pytest.raises(QueryRuntimeException): node.query( - "ALTER TABLE simple2 FETCH PART '20200827_1_1_0' FROM 'zookeeper:/clickhouse/tables/0/simple';" + "ALTER TABLE simple2 FETCH PART '20200827_0_0_0' FROM 'zookeeper:/clickhouse/tables/0/simple';" ) assert node.query("SELECT id FROM simple2").strip() == "1" From 7c32cc1e18e364553ed31a76ca24c280df45e025 Mon Sep 17 00:00:00 2001 From: songenjie Date: Tue, 13 Apr 2021 17:34:04 +0800 Subject: [PATCH 070/133] fix case style for local variable --- src/Storages/StorageReplicatedMergeTree.cpp | 8 ++++---- src/Storages/StorageReplicatedMergeTree.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 2d836e00a08..98f1ee5560e 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5387,9 +5387,9 @@ void StorageReplicatedMergeTree::fetchPartition( if (fetch_part) { String part_name = partition->as().value.safeGet(); - auto replica_path_ = findReplicaHavingPart(part_name, from, zookeeper); + auto part_path = findReplicaHavingPart(part_name, from, zookeeper); - if (replica_path_.empty()) + if (part_path.empty()) throw Exception("fetch part " + part_name + " not exists !", ErrorCodes::PART_DOESNT_EXIST); /** Let's check that there is no such part in the `detached` directory (where we will write the downloaded parts). * Unreliable (there is a race condition) - such a part may appear a little later. @@ -5400,8 +5400,8 @@ void StorageReplicatedMergeTree::fetchPartition( try { - /// part name , metadata, replica path , true, 0, zookeeper - if (!fetchPart(part_name, metadata_snapshot, replica_path_, true, 0, zookeeper)) + /// part name , metadata, part_path , true, 0, zookeeper + if (!fetchPart(part_name, metadata_snapshot, part_path, true, 0, zookeeper)) throw Exception("fetch part " + part_name + " failed! ", ErrorCodes::UNFINISHED); } catch (const DB::Exception & e) diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 673c713fabc..bc59a1bdf5f 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -522,7 +522,7 @@ private: /** Returns an empty string if no one has a part. */ String findReplicaHavingPart(const String & part_name, bool active); - String findReplicaHavingPart(const String & part_name, const String & zookeeper_path_, zkutil::ZooKeeper::Ptr zookeeper_); + static String findReplicaHavingPart(const String & part_name, const String & zookeeper_path_, zkutil::ZooKeeper::Ptr zookeeper_); bool checkReplicaHavePart(const String & replica, const String & part_name); bool checkDetachPartIfExists(const String & part_name); From 8abaf01a5d3ec5f41a29e3cf4915a34efc219b9a Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 13 Apr 2021 15:57:11 +0300 Subject: [PATCH 071/133] Updated dictionaries tests --- tests/performance/direct_dictionary.xml | 24 ++++++++++++++---------- tests/performance/flat_dictionary.xml | 15 +++++++-------- tests/performance/hashed_dictionary.xml | 20 ++++++++++++++------ 3 files changed, 35 insertions(+), 24 deletions(-) diff --git a/tests/performance/direct_dictionary.xml b/tests/performance/direct_dictionary.xml index e827ea0a76f..3f01449ed99 100644 --- a/tests/performance/direct_dictionary.xml +++ b/tests/performance/direct_dictionary.xml @@ -55,14 +55,14 @@ INSERT INTO simple_key_direct_dictionary_source_table SELECT number, number, toString(number), toDecimal64(number, 8), toString(number) FROM system.numbers - LIMIT 100000; + LIMIT 50000; INSERT INTO complex_key_direct_dictionary_source_table SELECT number, toString(number), number, toString(number), toDecimal64(number, 8), toString(number) FROM system.numbers - LIMIT 100000; + LIMIT 50000; @@ -79,47 +79,51 @@ elements_count - 25000 50000 75000 - 100000 - SELECT dictGet('default.simple_key_direct_dictionary', {column_name}, number) + WITH rand64() % toUInt64({elements_count}) as key + SELECT dictGet('default.simple_key_direct_dictionary', {column_name}, key) FROM system.numbers LIMIT {elements_count} FORMAT Null; - SELECT dictGet('default.simple_key_direct_dictionary', ('value_int', 'value_string', 'value_decimal', 'value_string_nullable'), number) + WITH rand64() % toUInt64({elements_count}) as key + SELECT dictGet('default.simple_key_direct_dictionary', ('value_int', 'value_string', 'value_decimal', 'value_string_nullable'), key) FROM system.numbers LIMIT {elements_count} FORMAT Null; - SELECT dictHas('default.simple_key_direct_dictionary', number) + WITH rand64() % toUInt64({elements_count}) as key + SELECT dictHas('default.simple_key_direct_dictionary', key) FROM system.numbers LIMIT {elements_count} FORMAT Null; - SELECT dictGet('default.complex_key_direct_dictionary', {column_name}, (number, toString(number))) + WITH (number, toString(number)) as key + SELECT dictGet('default.complex_key_direct_dictionary', {column_name}, key) FROM system.numbers LIMIT {elements_count} FORMAT Null; - SELECT dictGet('default.complex_key_direct_dictionary', ('value_int', 'value_string', 'value_decimal', 'value_string_nullable'), (number, toString(number))) + WITH (number, toString(number)) as key + SELECT dictGet('default.complex_key_direct_dictionary', ('value_int', 'value_string', 'value_decimal', 'value_string_nullable'), key) FROM system.numbers LIMIT {elements_count} FORMAT Null; - SELECT dictHas('default.complex_key_direct_dictionary', (number, toString(number))) + WITH (number, toString(number)) as key + SELECT dictHas('default.complex_key_direct_dictionary', key) FROM system.numbers LIMIT {elements_count} FORMAT Null; diff --git a/tests/performance/flat_dictionary.xml b/tests/performance/flat_dictionary.xml index 8111084586a..92ed975a671 100644 --- a/tests/performance/flat_dictionary.xml +++ b/tests/performance/flat_dictionary.xml @@ -1,8 +1,4 @@ - - please_fix_me - - CREATE TABLE simple_key_flat_dictionary_source_table ( @@ -52,22 +48,25 @@ 5000000 7500000 - 10000000 - SELECT dictGet('default.simple_key_flat_dictionary', {column_name}, rand64() % toUInt64(10000000)) + SELECT dictGet('default.simple_key_flat_dictionary', {column_name}, rand64() % toUInt64({elements_count})) FROM system.numbers LIMIT {elements_count} FORMAT Null; - SELECT dictHas('default.simple_key_flat_dictionary', rand64() % toUInt64(10000000)) + SELECT * FROM simple_key_flat_dictionary FORMAT Null; + + + + SELECT dictHas('default.simple_key_flat_dictionary', rand64() % toUInt64(75000000)) FROM system.numbers - LIMIT {elements_count} + LIMIT 75000000 FORMAT Null; diff --git a/tests/performance/hashed_dictionary.xml b/tests/performance/hashed_dictionary.xml index a38d2f30c23..b83018c67df 100644 --- a/tests/performance/hashed_dictionary.xml +++ b/tests/performance/hashed_dictionary.xml @@ -81,35 +81,43 @@ elements_count - 2500000 5000000 7500000 - 10000000 - SELECT dictGet('default.simple_key_hashed_dictionary', {column_name}, number) + WITH rand64() % toUInt64({elements_count}) as key + SELECT dictGet('default.simple_key_hashed_dictionary', {column_name}, key) FROM system.numbers LIMIT {elements_count} FORMAT Null; - SELECT dictHas('default.simple_key_hashed_dictionary', number) + SELECT * FROM default.simple_key_hashed_dictionary; + + + WITH rand64() % toUInt64({elements_count}) as key + SELECT dictHas('default.simple_key_hashed_dictionary', key) FROM system.numbers LIMIT {elements_count} FORMAT Null; - SELECT dictGet('default.complex_key_hashed_dictionary', {column_name}, (number, toString(number))) + WITH (rand64() % toUInt64({elements_count}), toString(rand64() % toUInt64({elements_count}))) as key + SELECT dictGet('default.complex_key_hashed_dictionary', {column_name}, key) FROM system.numbers LIMIT {elements_count} FORMAT Null; - SELECT dictHas('default.complex_key_hashed_dictionary', (number, toString(number))) + SELECT * FROM default.complex_key_hashed_dictionary; + + + WITH (rand64() % toUInt64({elements_count}), toString(rand64() % toUInt64({elements_count}))) as key + SELECT dictHas('default.complex_key_hashed_dictionary', key) FROM system.numbers LIMIT {elements_count} FORMAT Null; From fdbaf246fd619fcc50917ae9245547fd6f675dbd Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Tue, 13 Apr 2021 16:10:02 +0300 Subject: [PATCH 072/133] Fix typo --- docker/test/stateless/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index b984ca29d17..0c8fadaf503 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -41,7 +41,7 @@ fi if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \ - -- --path /var/lib/clickhouse1/ --logger.sderr /var/log/clickhouse-server/stderr1.log \ + -- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \ --logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \ --tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \ --mysql_port 19004 --postgresql_port 19005 \ From da3d3e906a202b7cb4e718f0380805f07bb3cd06 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 13 Apr 2021 21:13:04 +0300 Subject: [PATCH 073/133] Updated tests --- tests/performance/flat_dictionary.xml | 3 ++- tests/performance/hashed_dictionary.xml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/performance/flat_dictionary.xml b/tests/performance/flat_dictionary.xml index 92ed975a671..56a94358eb9 100644 --- a/tests/performance/flat_dictionary.xml +++ b/tests/performance/flat_dictionary.xml @@ -60,7 +60,8 @@ - SELECT * FROM simple_key_flat_dictionary FORMAT Null; + SELECT * FROM simple_key_flat_dictionary + FORMAT Null; diff --git a/tests/performance/hashed_dictionary.xml b/tests/performance/hashed_dictionary.xml index b83018c67df..cd19ba035e5 100644 --- a/tests/performance/hashed_dictionary.xml +++ b/tests/performance/hashed_dictionary.xml @@ -113,7 +113,8 @@ FORMAT Null; - SELECT * FROM default.complex_key_hashed_dictionary; + SELECT * FROM default.complex_key_hashed_dictionary + FORMAT Null; WITH (rand64() % toUInt64({elements_count}), toString(rand64() % toUInt64({elements_count}))) as key From b00c66cb36ea611e750197f669c0cc702ba2d615 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 13 Apr 2021 21:53:55 +0300 Subject: [PATCH 074/133] More safe CPU dispatching --- src/Functions/CMakeLists.txt | 4 +- src/Functions/intDiv.cpp | 115 +---------------------------------- 2 files changed, 5 insertions(+), 114 deletions(-) diff --git a/src/Functions/CMakeLists.txt b/src/Functions/CMakeLists.txt index 1c3beb2e47d..7cbca175c0d 100644 --- a/src/Functions/CMakeLists.txt +++ b/src/Functions/CMakeLists.txt @@ -1,5 +1,7 @@ configure_file(config_functions.h.in ${ConfigIncludePath}/config_functions.h) +add_subdirectory(divide) + include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) add_headers_and_sources(clickhouse_functions .) @@ -25,7 +27,7 @@ target_link_libraries(clickhouse_functions PRIVATE ${ZLIB_LIBRARIES} boost::filesystem - libdivide + divide_impl ) if (OPENSSL_CRYPTO_LIBRARY) diff --git a/src/Functions/intDiv.cpp b/src/Functions/intDiv.cpp index 42b0299ce01..98ce4fe30de 100644 --- a/src/Functions/intDiv.cpp +++ b/src/Functions/intDiv.cpp @@ -1,28 +1,7 @@ #include #include -#include -#if defined(__x86_64__) - #define LIBDIVIDE_SSE2 1 - #define LIBDIVIDE_AVX2 1 - - #if defined(__clang__) - #pragma clang attribute push(__attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2"))), apply_to=function) - #else - #pragma GCC push_options - #pragma GCC target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,tune=native") - #endif -#endif - -#include - -#if defined(__x86_64__) - #if defined(__clang__) - #pragma clang attribute pop - #else - #pragma GCC pop_options - #endif -#endif +#include "divide/divide.h" namespace DB @@ -37,83 +16,6 @@ namespace /// Optimizations for integer division by a constant. -#if defined(__x86_64__) - -DECLARE_DEFAULT_CODE ( - template - void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size) - { - libdivide::divider divider(b); - const A * a_end = a_pos + size; - - static constexpr size_t values_per_simd_register = 16 / sizeof(A); - const A * a_end_simd = a_pos + size / values_per_simd_register * values_per_simd_register; - - while (a_pos < a_end_simd) - { - _mm_storeu_si128(reinterpret_cast<__m128i *>(c_pos), - _mm_loadu_si128(reinterpret_cast(a_pos)) / divider); - - a_pos += values_per_simd_register; - c_pos += values_per_simd_register; - } - - while (a_pos < a_end) - { - *c_pos = *a_pos / divider; - ++a_pos; - ++c_pos; - } - } -) - -DECLARE_AVX2_SPECIFIC_CODE ( - template - void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size) - { - libdivide::divider divider(b); - const A * a_end = a_pos + size; - - static constexpr size_t values_per_simd_register = 32 / sizeof(A); - const A * a_end_simd = a_pos + size / values_per_simd_register * values_per_simd_register; - - while (a_pos < a_end_simd) - { - _mm256_storeu_si256(reinterpret_cast<__m256i *>(c_pos), - _mm256_loadu_si256(reinterpret_cast(a_pos)) / divider); - - a_pos += values_per_simd_register; - c_pos += values_per_simd_register; - } - - while (a_pos < a_end) - { - *c_pos = *a_pos / divider; - ++a_pos; - ++c_pos; - } - } -) - -#else - -template -void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size) -{ - libdivide::divider divider(b); - const A * a_end = a_pos + size; - - while (a_pos < a_end) - { - *c_pos = *a_pos / divider; - ++a_pos; - ++c_pos; - } -} - -#endif - - template struct DivideIntegralByConstantImpl : BinaryOperation> @@ -164,20 +66,7 @@ struct DivideIntegralByConstantImpl if (unlikely(static_cast(b) == 0)) throw Exception("Division by zero", ErrorCodes::ILLEGAL_DIVISION); -#if USE_MULTITARGET_CODE - if (isArchSupported(TargetArch::AVX2)) - { - TargetSpecific::AVX2::divideImpl(a_pos, b, c_pos, size); - } - else -#endif - { -#if __x86_64__ - TargetSpecific::Default::divideImpl(a_pos, b, c_pos, size); -#else - divideImpl(a_pos, b, c_pos, size); -#endif - } + divideImpl(a_pos, b, c_pos, size); } }; From fb98915435e5ce9981958b5ab874320f0f15c989 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 13 Apr 2021 21:54:46 +0300 Subject: [PATCH 075/133] More safe CPU dispatching --- src/Functions/divide/CMakeLists.txt | 10 +++ src/Functions/divide/divide.cpp | 66 ++++++++++++++++++++ src/Functions/divide/divide.h | 4 ++ src/Functions/divide/divideImpl.cpp | 95 +++++++++++++++++++++++++++++ 4 files changed, 175 insertions(+) create mode 100644 src/Functions/divide/CMakeLists.txt create mode 100644 src/Functions/divide/divide.cpp create mode 100644 src/Functions/divide/divide.h create mode 100644 src/Functions/divide/divideImpl.cpp diff --git a/src/Functions/divide/CMakeLists.txt b/src/Functions/divide/CMakeLists.txt new file mode 100644 index 00000000000..2bdd7e4c5ef --- /dev/null +++ b/src/Functions/divide/CMakeLists.txt @@ -0,0 +1,10 @@ +add_library(divide_impl_sse2 divideImpl.cpp) +target_compile_options(divide_impl_sse2 PRIVATE -msse2 -DNAMESPACE=SSE2) +target_link_libraries(divide_impl_sse2 libdivide) + +add_library(divide_impl_avx2 divideImpl.cpp) +target_compile_options(divide_impl_avx2 PRIVATE -mavx2 -DNAMESPACE=AVX2) +target_link_libraries(divide_impl_avx2 libdivide) + +add_library(divide_impl divide.cpp) +target_link_libraries(divide_impl divide_impl_sse2 divide_impl_avx2 clickhouse_common_io) diff --git a/src/Functions/divide/divide.cpp b/src/Functions/divide/divide.cpp new file mode 100644 index 00000000000..0c275dff6f6 --- /dev/null +++ b/src/Functions/divide/divide.cpp @@ -0,0 +1,66 @@ +#include "divide.h" +#include + + +namespace SSE2 +{ + template + void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size); +} + +namespace AVX2 +{ + template + void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size); +} + + +template +void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size) +{ + if (DB::Cpu::CpuFlagsCache::have_AVX2) + AVX2::divideImpl(a_pos, b, c_pos, size); + else if (DB::Cpu::CpuFlagsCache::have_SSE2) + SSE2::divideImpl(a_pos, b, c_pos, size); +} + + +template void divideImpl(const uint64_t * __restrict, uint64_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, uint32_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, uint16_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, char8_t, uint64_t * __restrict, size_t); + +template void divideImpl(const uint32_t * __restrict, uint64_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, uint32_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, uint16_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, char8_t, uint32_t * __restrict, size_t); + +template void divideImpl(const int64_t * __restrict, uint64_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, uint32_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, uint16_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, char8_t, int64_t * __restrict, size_t); + +template void divideImpl(const int32_t * __restrict, uint64_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, uint32_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, uint16_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, char8_t, int32_t * __restrict, size_t); + +template void divideImpl(const uint64_t * __restrict, int64_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, int32_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, int16_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, int8_t, uint64_t * __restrict, size_t); + +template void divideImpl(const uint32_t * __restrict, int64_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, int32_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, int16_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, int8_t, uint32_t * __restrict, size_t); + +template void divideImpl(const int64_t * __restrict, int64_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, int32_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, int16_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, int8_t, int64_t * __restrict, size_t); + +template void divideImpl(const int32_t * __restrict, int64_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, int32_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, int16_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, int8_t, int32_t * __restrict, size_t); diff --git a/src/Functions/divide/divide.h b/src/Functions/divide/divide.h new file mode 100644 index 00000000000..11a5371bc31 --- /dev/null +++ b/src/Functions/divide/divide.h @@ -0,0 +1,4 @@ +#include + +template +void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size); diff --git a/src/Functions/divide/divideImpl.cpp b/src/Functions/divide/divideImpl.cpp new file mode 100644 index 00000000000..a5c1755ab1f --- /dev/null +++ b/src/Functions/divide/divideImpl.cpp @@ -0,0 +1,95 @@ +/// This translation unit should be compiled multiple times +/// with different values of NAMESPACE and machine flags (sse2, avx2). + +#if !defined(NAMESPACE) +#error "NAMESPACE macro must be defined" +#endif + +#if defined(__AVX2__) + #define REG_SIZE 32 + #define LIBDIVIDE_AVX2 +#elif defined(__SSE2__) + #define REG_SIZE 16 + #define LIBDIVIDE_SSE2 +#endif + +#include + + +namespace NAMESPACE +{ + +template +void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size) +{ + libdivide::divider divider(b); + const A * a_end = a_pos + size; + +#if defined(__SSE2__) + static constexpr size_t values_per_simd_register = REG_SIZE / sizeof(A); + const A * a_end_simd = a_pos + size / values_per_simd_register * values_per_simd_register; + + while (a_pos < a_end_simd) + { +#if defined(__AVX2__) + _mm256_storeu_si256(reinterpret_cast<__m256i *>(c_pos), + _mm256_loadu_si256(reinterpret_cast(a_pos)) / divider); +#else + _mm_storeu_si128(reinterpret_cast<__m128i *>(c_pos), + _mm_loadu_si128(reinterpret_cast(a_pos)) / divider); +#endif + + a_pos += values_per_simd_register; + c_pos += values_per_simd_register; + } +#endif + + while (a_pos < a_end) + { + *c_pos = *a_pos / divider; + ++a_pos; + ++c_pos; + } +} + +template void divideImpl(const uint64_t * __restrict, uint64_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, uint32_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, uint16_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, char8_t, uint64_t * __restrict, size_t); + +template void divideImpl(const uint32_t * __restrict, uint64_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, uint32_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, uint16_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, char8_t, uint32_t * __restrict, size_t); + +template void divideImpl(const int64_t * __restrict, uint64_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, uint32_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, uint16_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, char8_t, int64_t * __restrict, size_t); + +template void divideImpl(const int32_t * __restrict, uint64_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, uint32_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, uint16_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, char8_t, int32_t * __restrict, size_t); + +template void divideImpl(const uint64_t * __restrict, int64_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, int32_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, int16_t, uint64_t * __restrict, size_t); +template void divideImpl(const uint64_t * __restrict, int8_t, uint64_t * __restrict, size_t); + +template void divideImpl(const uint32_t * __restrict, int64_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, int32_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, int16_t, uint32_t * __restrict, size_t); +template void divideImpl(const uint32_t * __restrict, int8_t, uint32_t * __restrict, size_t); + +template void divideImpl(const int64_t * __restrict, int64_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, int32_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, int16_t, int64_t * __restrict, size_t); +template void divideImpl(const int64_t * __restrict, int8_t, int64_t * __restrict, size_t); + +template void divideImpl(const int32_t * __restrict, int64_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, int32_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, int16_t, int32_t * __restrict, size_t); +template void divideImpl(const int32_t * __restrict, int8_t, int32_t * __restrict, size_t); + +} From 42412f9a085834f732772a9cc706bd7265c2b46a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 13 Apr 2021 21:59:55 +0300 Subject: [PATCH 076/133] extern template --- src/Functions/divide/divide.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/divide/divide.h b/src/Functions/divide/divide.h index 11a5371bc31..daf406038f2 100644 --- a/src/Functions/divide/divide.h +++ b/src/Functions/divide/divide.h @@ -1,4 +1,4 @@ #include template -void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size); +extern void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size); From 8bd5578c92d3bd1f293405a7a16844d3b8223603 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 13 Apr 2021 22:02:07 +0300 Subject: [PATCH 077/133] Less amount of template instantiations --- src/Functions/divide/divide.cpp | 20 -------------------- src/Functions/divide/divideImpl.cpp | 20 -------------------- 2 files changed, 40 deletions(-) diff --git a/src/Functions/divide/divide.cpp b/src/Functions/divide/divide.cpp index 0c275dff6f6..1c3c11af312 100644 --- a/src/Functions/divide/divide.cpp +++ b/src/Functions/divide/divide.cpp @@ -35,26 +35,6 @@ template void divideImpl(const uint32_t * __restri template void divideImpl(const uint32_t * __restrict, uint16_t, uint32_t * __restrict, size_t); template void divideImpl(const uint32_t * __restrict, char8_t, uint32_t * __restrict, size_t); -template void divideImpl(const int64_t * __restrict, uint64_t, int64_t * __restrict, size_t); -template void divideImpl(const int64_t * __restrict, uint32_t, int64_t * __restrict, size_t); -template void divideImpl(const int64_t * __restrict, uint16_t, int64_t * __restrict, size_t); -template void divideImpl(const int64_t * __restrict, char8_t, int64_t * __restrict, size_t); - -template void divideImpl(const int32_t * __restrict, uint64_t, int32_t * __restrict, size_t); -template void divideImpl(const int32_t * __restrict, uint32_t, int32_t * __restrict, size_t); -template void divideImpl(const int32_t * __restrict, uint16_t, int32_t * __restrict, size_t); -template void divideImpl(const int32_t * __restrict, char8_t, int32_t * __restrict, size_t); - -template void divideImpl(const uint64_t * __restrict, int64_t, uint64_t * __restrict, size_t); -template void divideImpl(const uint64_t * __restrict, int32_t, uint64_t * __restrict, size_t); -template void divideImpl(const uint64_t * __restrict, int16_t, uint64_t * __restrict, size_t); -template void divideImpl(const uint64_t * __restrict, int8_t, uint64_t * __restrict, size_t); - -template void divideImpl(const uint32_t * __restrict, int64_t, uint32_t * __restrict, size_t); -template void divideImpl(const uint32_t * __restrict, int32_t, uint32_t * __restrict, size_t); -template void divideImpl(const uint32_t * __restrict, int16_t, uint32_t * __restrict, size_t); -template void divideImpl(const uint32_t * __restrict, int8_t, uint32_t * __restrict, size_t); - template void divideImpl(const int64_t * __restrict, int64_t, int64_t * __restrict, size_t); template void divideImpl(const int64_t * __restrict, int32_t, int64_t * __restrict, size_t); template void divideImpl(const int64_t * __restrict, int16_t, int64_t * __restrict, size_t); diff --git a/src/Functions/divide/divideImpl.cpp b/src/Functions/divide/divideImpl.cpp index a5c1755ab1f..a62ce8126e2 100644 --- a/src/Functions/divide/divideImpl.cpp +++ b/src/Functions/divide/divideImpl.cpp @@ -62,26 +62,6 @@ template void divideImpl(const uint32_t * __restri template void divideImpl(const uint32_t * __restrict, uint16_t, uint32_t * __restrict, size_t); template void divideImpl(const uint32_t * __restrict, char8_t, uint32_t * __restrict, size_t); -template void divideImpl(const int64_t * __restrict, uint64_t, int64_t * __restrict, size_t); -template void divideImpl(const int64_t * __restrict, uint32_t, int64_t * __restrict, size_t); -template void divideImpl(const int64_t * __restrict, uint16_t, int64_t * __restrict, size_t); -template void divideImpl(const int64_t * __restrict, char8_t, int64_t * __restrict, size_t); - -template void divideImpl(const int32_t * __restrict, uint64_t, int32_t * __restrict, size_t); -template void divideImpl(const int32_t * __restrict, uint32_t, int32_t * __restrict, size_t); -template void divideImpl(const int32_t * __restrict, uint16_t, int32_t * __restrict, size_t); -template void divideImpl(const int32_t * __restrict, char8_t, int32_t * __restrict, size_t); - -template void divideImpl(const uint64_t * __restrict, int64_t, uint64_t * __restrict, size_t); -template void divideImpl(const uint64_t * __restrict, int32_t, uint64_t * __restrict, size_t); -template void divideImpl(const uint64_t * __restrict, int16_t, uint64_t * __restrict, size_t); -template void divideImpl(const uint64_t * __restrict, int8_t, uint64_t * __restrict, size_t); - -template void divideImpl(const uint32_t * __restrict, int64_t, uint32_t * __restrict, size_t); -template void divideImpl(const uint32_t * __restrict, int32_t, uint32_t * __restrict, size_t); -template void divideImpl(const uint32_t * __restrict, int16_t, uint32_t * __restrict, size_t); -template void divideImpl(const uint32_t * __restrict, int8_t, uint32_t * __restrict, size_t); - template void divideImpl(const int64_t * __restrict, int64_t, int64_t * __restrict, size_t); template void divideImpl(const int64_t * __restrict, int32_t, int64_t * __restrict, size_t); template void divideImpl(const int64_t * __restrict, int16_t, int64_t * __restrict, size_t); From 03662165f35fb6f45980d410e95239433189ffbe Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 13 Apr 2021 22:03:52 +0300 Subject: [PATCH 078/133] Comment --- src/Functions/intDiv.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/intDiv.cpp b/src/Functions/intDiv.cpp index 98ce4fe30de..79e35a19283 100644 --- a/src/Functions/intDiv.cpp +++ b/src/Functions/intDiv.cpp @@ -70,7 +70,7 @@ struct DivideIntegralByConstantImpl } }; -/** Specializations are specified for dividing numbers of the type UInt64 and UInt32 by the numbers of the same sign. +/** Specializations are specified for dividing numbers of the type UInt64, UInt32, Int64, Int32 by the numbers of the same sign. * Can be expanded to all possible combinations, but more code is needed. */ From f731739fab70c31bf02bc41af35db9d0a3115808 Mon Sep 17 00:00:00 2001 From: songenjie Date: Wed, 14 Apr 2021 10:05:41 +0800 Subject: [PATCH 079/133] fix suggests --- .../sql-reference/statements/alter/partition.md | 8 ++++---- src/Access/AccessType.h | 2 +- src/Common/ErrorCodes.cpp | 2 -- src/Storages/StorageReplicatedMergeTree.cpp | 15 +++++++-------- src/Storages/StorageReplicatedMergeTree.h | 4 ++-- .../0_stateless/01271_show_privileges.reference | 2 +- 6 files changed, 15 insertions(+), 18 deletions(-) diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index 23f10684ae7..948711e6d9e 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -16,7 +16,7 @@ The following operations with [partitions](../../../engines/table-engines/merget - [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) — Resets the value of a specified column in a partition. - [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) — Resets the specified secondary index in a partition. - [FREEZE PARTITION](#alter_freeze-partition) — Creates a backup of a partition. -- [FETCH PART\|PARTITION](#alter_fetch-partition) — Downloads a part or partition from another server. +- [FETCH PARTITION\|PART](#alter_fetch-partition) — Downloads a part or partition from another server. - [MOVE PARTITION\|PART](#alter_move-partition) — Move partition/data part to another disk or volume. @@ -198,10 +198,10 @@ ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr The query works similar to `CLEAR COLUMN`, but it resets an index instead of a column data. -## FETCH PART|PARTITION {#alter_fetch-partition} +## FETCH PARTITION|PART {#alter_fetch-partition} ``` sql -ALTER TABLE table_name FETCH PART|PARTITION partition_expr FROM 'path-in-zookeeper' +ALTER TABLE table_name FETCH PARTITION|PART partition_expr FROM 'path-in-zookeeper' ``` Downloads a partition from another server. This query only works for the replicated tables. @@ -226,7 +226,7 @@ ALTER TABLE users ATTACH PART 201901_2_2_0; Note that: -- The `ALTER ... FETCH PART|PARTITION` query isn’t replicated. It places the part or partition to the `detached` directory only on the local server. +- The `ALTER ... FETCH PARTITION|PART` query isn’t replicated. It places the part or partition to the `detached` directory only on the local server. - The `ALTER TABLE ... ATTACH` query is replicated. It adds the data to all replicas. The data is added to one of the replicas from the `detached` directory, and to the others - from neighboring replicas. Before downloading, the system checks if the partition exists and the table structure matches. The most appropriate replica is selected automatically from the healthy replicas. diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index c7311997ba2..952cddba5f5 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -62,7 +62,7 @@ enum class AccessType enabled implicitly by the grant ALTER_TABLE */\ M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\ M(ALTER_MOVE_PARTITION, "ALTER MOVE PART, MOVE PARTITION, MOVE PART", TABLE, ALTER_TABLE) \ - M(ALTER_FETCH_PARTITION, "ALTER FETCH PART, FETCH PARTITION, FETCH PART", TABLE, ALTER_TABLE) \ + M(ALTER_FETCH_PARTITION, "ALTER FETCH PART, FETCH PARTITION", TABLE, ALTER_TABLE) \ M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \ \ M(ALTER_TABLE, "", GROUP, ALTER) \ diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 7658448976b..ad0463db889 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -549,8 +549,6 @@ M(579, INCORRECT_PART_TYPE) \ M(580, CANNOT_SET_ROUNDING_MODE) \ M(581, TOO_LARGE_DISTRIBUTED_DEPTH) \ - M(582, PART_DOESNT_EXIST) \ - M(583, PART_ALREADY_EXISTS) \ \ M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(999, KEEPER_EXCEPTION) \ diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 98f1ee5560e..977ade9f1b8 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -112,11 +112,9 @@ namespace ErrorCodes extern const int NOT_A_LEADER; extern const int TABLE_WAS_NOT_DROPPED; extern const int PARTITION_ALREADY_EXISTS; - extern const int PART_ALREADY_EXISTS; extern const int TOO_MANY_RETRIES_TO_FETCH_PARTS; extern const int RECEIVED_ERROR_FROM_REMOTE_IO_SERVER; extern const int PARTITION_DOESNT_EXIST; - extern const int PART_DOESNT_EXIST; extern const int UNFINISHED; extern const int RECEIVED_ERROR_TOO_MANY_REQUESTS; extern const int TOO_MANY_FETCHES; @@ -132,6 +130,7 @@ namespace ErrorCodes extern const int UNKNOWN_POLICY; extern const int NO_SUCH_DATA_PART; extern const int INTERSERVER_SCHEME_DOESNT_MATCH; + extern const int DUPLICATED_PART_UUIDS; } namespace ActionLocks @@ -5390,12 +5389,12 @@ void StorageReplicatedMergeTree::fetchPartition( auto part_path = findReplicaHavingPart(part_name, from, zookeeper); if (part_path.empty()) - throw Exception("fetch part " + part_name + " not exists !", ErrorCodes::PART_DOESNT_EXIST); + throw Exception("fetch part " + part_name + " not exists !", ErrorCodes::NO_REPLICA_HAS_PART); /** Let's check that there is no such part in the `detached` directory (where we will write the downloaded parts). * Unreliable (there is a race condition) - such a part may appear a little later. */ - if (checkDetachPartIfExists(part_name)) - throw Exception("Detached part " + part_name + " already exists.", ErrorCodes::PART_ALREADY_EXISTS); + if (checkIfDetachedPartExists(part_name)) + throw Exception("Detached part " + part_name + " already exists.", ErrorCodes::DUPLICATED_PART_UUIDS); LOG_INFO(log, "Will fetch part {} from shard {} (zookeeper '{}')", part_name, from_, auxiliary_zookeeper_name); try @@ -5421,7 +5420,7 @@ void StorageReplicatedMergeTree::fetchPartition( /** Let's check that there is no such partition in the `detached` directory (where we will write the downloaded parts). * Unreliable (there is a race condition) - such a partition may appear a little later. */ - if (checkDetachPartitionIfExists(partition_id)) + if (checkIfDetachedPartitionExists(partition_id)) throw Exception("Detached partition " + partition_id + " already exists.", ErrorCodes::PARTITION_ALREADY_EXISTS); zkutil::Strings replicas; @@ -6947,7 +6946,7 @@ String StorageReplicatedMergeTree::findReplicaHavingPart( return {}; } -bool StorageReplicatedMergeTree::checkDetachPartIfExists(const String & part_name) +bool StorageReplicatedMergeTree::checkIfDetachedPartExists(const String & part_name) { Poco::DirectoryIterator dir_end; for (const std::string & path : getDataPaths()) @@ -6957,7 +6956,7 @@ bool StorageReplicatedMergeTree::checkDetachPartIfExists(const String & part_nam return false; } -bool StorageReplicatedMergeTree::checkDetachPartitionIfExists(const String & partition_name) +bool StorageReplicatedMergeTree::checkIfDetachedPartitionExists(const String & partition_name) { Poco::DirectoryIterator dir_end; for (const std::string & path : getDataPaths()) diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index bc59a1bdf5f..9122bdafbf0 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -525,8 +525,8 @@ private: static String findReplicaHavingPart(const String & part_name, const String & zookeeper_path_, zkutil::ZooKeeper::Ptr zookeeper_); bool checkReplicaHavePart(const String & replica, const String & part_name); - bool checkDetachPartIfExists(const String & part_name); - bool checkDetachPartitionIfExists(const String & partition_name); + bool checkIfDetachedPartExists(const String & part_name); + bool checkIfDetachedPartitionExists(const String & partition_name); /** Find replica having specified part or any part that covers it. * If active = true, consider only active replicas. diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index e2784f1dfd5..c8b8662dc3e 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -28,7 +28,7 @@ ALTER TTL ['ALTER MODIFY TTL','MODIFY TTL'] TABLE ALTER TABLE ALTER MATERIALIZE TTL ['MATERIALIZE TTL'] TABLE ALTER TABLE ALTER SETTINGS ['ALTER SETTING','ALTER MODIFY SETTING','MODIFY SETTING'] TABLE ALTER TABLE ALTER MOVE PARTITION ['ALTER MOVE PART','MOVE PARTITION','MOVE PART'] TABLE ALTER TABLE -ALTER FETCH PARTITION ['ALTER FETCH PART','FETCH PARTITION','FETCH PART'] TABLE ALTER TABLE +ALTER FETCH PARTITION ['ALTER FETCH PART','FETCH PARTITION'] TABLE ALTER TABLE ALTER FREEZE PARTITION ['FREEZE PARTITION','UNFREEZE'] TABLE ALTER TABLE ALTER TABLE [] \N ALTER ALTER VIEW REFRESH ['ALTER LIVE VIEW REFRESH','REFRESH VIEW'] VIEW ALTER VIEW From 96a95b05dbc80ab558181725b7a14c198670d5b9 Mon Sep 17 00:00:00 2001 From: songenjie Date: Wed, 14 Apr 2021 10:14:13 +0800 Subject: [PATCH 080/133] fix suggests --- tests/testflows/rbac/tests/privileges/grant_option.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testflows/rbac/tests/privileges/grant_option.py b/tests/testflows/rbac/tests/privileges/grant_option.py index a28350a78b9..bc8b73eb32f 100644 --- a/tests/testflows/rbac/tests/privileges/grant_option.py +++ b/tests/testflows/rbac/tests/privileges/grant_option.py @@ -89,7 +89,7 @@ def grant_option_check(grant_option_target, grant_target, user_name, table_type, @Examples("privilege", [ ("ALTER MOVE PARTITION",), ("ALTER MOVE PART",), ("MOVE PARTITION",), ("MOVE PART",), ("ALTER DELETE",), ("DELETE",), - ("ALTER FETCH PARTITION",), ("ALTER FETCH PART",), ("FETCH PARTITION",), ("FETCH PART",), + ("ALTER FETCH PARTITION",), ("ALTER FETCH PART",), ("FETCH PARTITION",), ("ALTER FREEZE PARTITION",), ("FREEZE PARTITION",), ("ALTER UPDATE",), ("UPDATE",), ("ALTER ADD COLUMN",), ("ADD COLUMN",), From ff8958965a095c5d7d1c67a523e1702c5df57ba6 Mon Sep 17 00:00:00 2001 From: songenjie Date: Wed, 14 Apr 2021 10:19:08 +0800 Subject: [PATCH 081/133] fix some docs --- docs/en/sql-reference/statements/grant.md | 2 +- website/blog/en/2016/how-to-update-data-in-clickhouse.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index daa020f9469..0a5c737b550 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -279,7 +279,7 @@ Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries - `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL` - `ALTER SETTINGS`. Level: `TABLE`. Aliases: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING` - `ALTER MOVE PARTITION`. Level: `TABLE`. Aliases: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART` - - `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `FETCH PARTITION`, `FETCH PART` + - `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `ALTER FETCH PART`, `FETCH PARTITION`, `FETCH PART` - `ALTER FREEZE PARTITION`. Level: `TABLE`. Aliases: `FREEZE PARTITION` - `ALTER VIEW` Level: `GROUP` - `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW` diff --git a/website/blog/en/2016/how-to-update-data-in-clickhouse.md b/website/blog/en/2016/how-to-update-data-in-clickhouse.md index 7e9b938203f..ed713db2aee 100644 --- a/website/blog/en/2016/how-to-update-data-in-clickhouse.md +++ b/website/blog/en/2016/how-to-update-data-in-clickhouse.md @@ -67,7 +67,7 @@ There is a nice set of operations to work with partitions: - `DROP PARTITION` - Delete a partition. - `ATTACH PART|PARTITION` -- Add a new part or partition from the 'detached' directory to the table. - `FREEZE PARTITION` - Create a backup of a partition. -- `FETCH PART|PARTITION` - Download a part or partition from another server. +- `FETCH PARTITION|PART` - Download a part or partition from another server. We can do any data management operations on partitions level: move, copy and delete. Also, special DETACH and ATTACH operations are created to simplify data manipulation. DETACH detaches partition from table, moving all data to detached directory. Data is still there and you can copy it anywhere but detached data is not visible on request level. ATTACH is the opposite: attaches data from detached directory so it become visible. From 8566df9b7d4eb6592a496608852145f78bcb24da Mon Sep 17 00:00:00 2001 From: songenjie Date: Wed, 14 Apr 2021 17:11:59 +0800 Subject: [PATCH 082/133] fix some Suggest --- src/Storages/StorageReplicatedMergeTree.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 977ade9f1b8..d5c12628848 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -130,7 +130,7 @@ namespace ErrorCodes extern const int UNKNOWN_POLICY; extern const int NO_SUCH_DATA_PART; extern const int INTERSERVER_SCHEME_DOESNT_MATCH; - extern const int DUPLICATED_PART_UUIDS; + extern const int DUPLICATE_DATA_PART; } namespace ActionLocks @@ -5394,7 +5394,7 @@ void StorageReplicatedMergeTree::fetchPartition( * Unreliable (there is a race condition) - such a part may appear a little later. */ if (checkIfDetachedPartExists(part_name)) - throw Exception("Detached part " + part_name + " already exists.", ErrorCodes::DUPLICATED_PART_UUIDS); + throw Exception("Detached part " + part_name + " already exists.", ErrorCodes::DUPLICATE_DATA_PART); LOG_INFO(log, "Will fetch part {} from shard {} (zookeeper '{}')", part_name, from_, auxiliary_zookeeper_name); try From 88b4994c65740d7e0f35cfff67edd1ede8f82d18 Mon Sep 17 00:00:00 2001 From: songenjie Date: Wed, 14 Apr 2021 17:49:06 +0800 Subject: [PATCH 083/133] fix some Suggestfix some Suggest --- website/blog/en/2016/how-to-update-data-in-clickhouse.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/blog/en/2016/how-to-update-data-in-clickhouse.md b/website/blog/en/2016/how-to-update-data-in-clickhouse.md index ed713db2aee..22c2fa3ccc1 100644 --- a/website/blog/en/2016/how-to-update-data-in-clickhouse.md +++ b/website/blog/en/2016/how-to-update-data-in-clickhouse.md @@ -67,7 +67,7 @@ There is a nice set of operations to work with partitions: - `DROP PARTITION` - Delete a partition. - `ATTACH PART|PARTITION` -- Add a new part or partition from the 'detached' directory to the table. - `FREEZE PARTITION` - Create a backup of a partition. -- `FETCH PARTITION|PART` - Download a part or partition from another server. +- `FETCH PARTITION` - Download a partition from another server. We can do any data management operations on partitions level: move, copy and delete. Also, special DETACH and ATTACH operations are created to simplify data manipulation. DETACH detaches partition from table, moving all data to detached directory. Data is still there and you can copy it anywhere but detached data is not visible on request level. ATTACH is the opposite: attaches data from detached directory so it become visible. From 1fc040ac58c2cd00d5c6fa7bf64e0875e93e26ea Mon Sep 17 00:00:00 2001 From: songenjie Date: Wed, 14 Apr 2021 17:54:56 +0800 Subject: [PATCH 084/133] fix some Suggestfix some Suggest --- src/Storages/StorageReplicatedMergeTree.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index d5c12628848..f729f0a5be5 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5389,19 +5389,19 @@ void StorageReplicatedMergeTree::fetchPartition( auto part_path = findReplicaHavingPart(part_name, from, zookeeper); if (part_path.empty()) - throw Exception("fetch part " + part_name + " not exists !", ErrorCodes::NO_REPLICA_HAS_PART); + throw Exception(ErrorCodes::PART_DOESNT_EXIST, "Part {} does not exist on any replica", part_name); /** Let's check that there is no such part in the `detached` directory (where we will write the downloaded parts). * Unreliable (there is a race condition) - such a part may appear a little later. */ if (checkIfDetachedPartExists(part_name)) - throw Exception("Detached part " + part_name + " already exists.", ErrorCodes::DUPLICATE_DATA_PART); + throw Exception(ErrorCodes::DUPLICATE_DATA_PART, "Detached part " + part_name + " already exists."); LOG_INFO(log, "Will fetch part {} from shard {} (zookeeper '{}')", part_name, from_, auxiliary_zookeeper_name); try { /// part name , metadata, part_path , true, 0, zookeeper if (!fetchPart(part_name, metadata_snapshot, part_path, true, 0, zookeeper)) - throw Exception("fetch part " + part_name + " failed! ", ErrorCodes::UNFINISHED); + throw Exception(ErrorCodes::UNFINISHED, "Failed to fetch part {} from {}", part_name, from_); } catch (const DB::Exception & e) { From 2534ed84d58c914dee424a843d9f32225f47ac25 Mon Sep 17 00:00:00 2001 From: songenjie Date: Wed, 14 Apr 2021 18:40:15 +0800 Subject: [PATCH 085/133] test_fetch_partition_from_auxiliary_zookeeper --- .../__init__.py | 0 .../configs/zookeeper_config.xml | 28 ------------- .../test.py | 40 ------------------- .../test.py | 22 +++++++--- 4 files changed, 16 insertions(+), 74 deletions(-) delete mode 100644 tests/integration/test_fetch_part_from_auxiliary_zookeeper/__init__.py delete mode 100644 tests/integration/test_fetch_part_from_auxiliary_zookeeper/configs/zookeeper_config.xml delete mode 100644 tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py diff --git a/tests/integration/test_fetch_part_from_auxiliary_zookeeper/__init__.py b/tests/integration/test_fetch_part_from_auxiliary_zookeeper/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/integration/test_fetch_part_from_auxiliary_zookeeper/configs/zookeeper_config.xml b/tests/integration/test_fetch_part_from_auxiliary_zookeeper/configs/zookeeper_config.xml deleted file mode 100644 index b2b0667ebbf..00000000000 --- a/tests/integration/test_fetch_part_from_auxiliary_zookeeper/configs/zookeeper_config.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - - zoo1 - 2181 - - - zoo2 - 2181 - - - zoo3 - 2181 - - - - - - zoo1 - 2181 - - - zoo2 - 2181 - - - - diff --git a/tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py b/tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py deleted file mode 100644 index 17617f1c45c..00000000000 --- a/tests/integration/test_fetch_part_from_auxiliary_zookeeper/test.py +++ /dev/null @@ -1,40 +0,0 @@ - - -import pytest -from helpers.client import QueryRuntimeException -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True) - - -@pytest.fixture(scope="module") -def start_cluster(): - try: - cluster.start() - - yield cluster - finally: - cluster.shutdown() - - -def test_fetch_part_from_allowed_zookeeper(start_cluster): - node.query( - "CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', 'node') ORDER BY tuple() PARTITION BY date;" - ) - node.query("INSERT INTO simple VALUES ('2020-08-27', 1)") - - node.query( - "CREATE TABLE simple2 (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/simple', 'node') ORDER BY tuple() PARTITION BY date;" - ) - node.query( - "ALTER TABLE simple2 FETCH PART '20200827_0_0_0' FROM 'zookeeper2:/clickhouse/tables/0/simple';" - ) - node.query("ALTER TABLE simple2 ATTACH PART '20200827_0_0_0';") - - with pytest.raises(QueryRuntimeException): - node.query( - "ALTER TABLE simple2 FETCH PART '20200827_0_0_0' FROM 'zookeeper:/clickhouse/tables/0/simple';" - ) - - assert node.query("SELECT id FROM simple2").strip() == "1" diff --git a/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py b/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py index 0c94dfd3c48..d8d240349fc 100644 --- a/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py +++ b/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py @@ -18,7 +18,14 @@ def start_cluster(): cluster.shutdown() -def test_fetch_partition_from_allowed_zookeeper(start_cluster): +@pytest.mark.parametrize( + ('part', 'part_name'), + [ + ('PARTITION', '2020-08-27'), + ('PART', '20200827_0_0_0'), + ] +) +def test_fetch_part_from_allowed_zookeeper(start_cluster, part, part_name): node.query( "CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', 'node') ORDER BY tuple() PARTITION BY date;" ) @@ -27,14 +34,17 @@ def test_fetch_partition_from_allowed_zookeeper(start_cluster): node.query( "CREATE TABLE simple2 (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/simple', 'node') ORDER BY tuple() PARTITION BY date;" ) + node.query( - "ALTER TABLE simple2 FETCH PARTITION '2020-08-27' FROM 'zookeeper2:/clickhouse/tables/0/simple';" - ) - node.query("ALTER TABLE simple2 ATTACH PARTITION '2020-08-27';") + """ALTER TABLE simple2 FETCH {part} '{part_name}' FROM 'zookeeper2:/clickhouse/tables/0/simple';""".format( + part=part, part_name=part_name)) + + node.query("""ALTER TABLE simple2 ATTACH {part} '{part_name}';""".format( + part=part, part_name=part_name)) with pytest.raises(QueryRuntimeException): node.query( - "ALTER TABLE simple2 FETCH PARTITION '2020-08-27' FROM 'zookeeper:/clickhouse/tables/0/simple';" - ) + """ALTER TABLE simple2 FETCH {part} '{part_name}' FROM 'zookeeper:/clickhouse/tables/0/simple';""".format( + part=part, part_name=part_name)) assert node.query("SELECT id FROM simple2").strip() == "1" From c06c624fc7ae0acbde4b7daf4f044308e91df359 Mon Sep 17 00:00:00 2001 From: songenjie Date: Wed, 14 Apr 2021 18:55:42 +0800 Subject: [PATCH 086/133] fix build --- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index f729f0a5be5..10061af22e7 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5389,7 +5389,7 @@ void StorageReplicatedMergeTree::fetchPartition( auto part_path = findReplicaHavingPart(part_name, from, zookeeper); if (part_path.empty()) - throw Exception(ErrorCodes::PART_DOESNT_EXIST, "Part {} does not exist on any replica", part_name); + throw Exception(ErrorCodes::NO_REPLICA_HAS_PART, "Part {} does not exist on any replica", part_name); /** Let's check that there is no such part in the `detached` directory (where we will write the downloaded parts). * Unreliable (there is a race condition) - such a part may appear a little later. */ From 382f702f592345789c071ba0ab28e26f4a247443 Mon Sep 17 00:00:00 2001 From: songenjie Date: Wed, 14 Apr 2021 20:04:59 +0800 Subject: [PATCH 087/133] test add param date --- .../test.py | 22 +++++++++---------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py b/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py index d8d240349fc..9553b0b64d3 100644 --- a/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py +++ b/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py @@ -1,5 +1,3 @@ - - import pytest from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster @@ -19,32 +17,32 @@ def start_cluster(): @pytest.mark.parametrize( - ('part', 'part_name'), + ('part', 'date', 'part_name'), [ - ('PARTITION', '2020-08-27'), - ('PART', '20200827_0_0_0'), + ('PARTITION', '2020-08-27', '2020-08-27'), + ('PART', '2020-08-28' '20200828_0_0_0'), ] ) -def test_fetch_part_from_allowed_zookeeper(start_cluster, part, part_name): +def test_fetch_part_from_allowed_zookeeper(start_cluster, part, date, part_name): node.query( - "CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', 'node') ORDER BY tuple() PARTITION BY date;" + "CREATE TABLE IF NOT EXISTS simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', 'node') ORDER BY tuple() PARTITION BY date;" ) - node.query("INSERT INTO simple VALUES ('2020-08-27', 1)") + + node.query("""INSERT INTO simple VALUES ('{date}', 1)""".format(date=date)) node.query( - "CREATE TABLE simple2 (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/simple', 'node') ORDER BY tuple() PARTITION BY date;" + "CREATE TABLE IF NOT EXISTS simple2 (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/simple', 'node') ORDER BY tuple() PARTITION BY date;" ) node.query( """ALTER TABLE simple2 FETCH {part} '{part_name}' FROM 'zookeeper2:/clickhouse/tables/0/simple';""".format( part=part, part_name=part_name)) - node.query("""ALTER TABLE simple2 ATTACH {part} '{part_name}';""".format( - part=part, part_name=part_name)) + node.query("""ALTER TABLE simple2 ATTACH {part} '{part_name}';""".format(part=part, part_name=part_name)) with pytest.raises(QueryRuntimeException): node.query( """ALTER TABLE simple2 FETCH {part} '{part_name}' FROM 'zookeeper:/clickhouse/tables/0/simple';""".format( part=part, part_name=part_name)) - assert node.query("SELECT id FROM simple2").strip() == "1" + assert node.query("""SELECT id FROM simple2 where date = '{date}'""".format(date=date)).strip() == "1" From ffd3b3d445036afe43bc941ac1b88a9b0f5cad2b Mon Sep 17 00:00:00 2001 From: songenjie Date: Wed, 14 Apr 2021 21:15:53 +0800 Subject: [PATCH 088/133] fix some docs --- .../test_fetch_partition_from_auxiliary_zookeeper/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py b/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py index 9553b0b64d3..7bce2d50011 100644 --- a/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py +++ b/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py @@ -20,7 +20,7 @@ def start_cluster(): ('part', 'date', 'part_name'), [ ('PARTITION', '2020-08-27', '2020-08-27'), - ('PART', '2020-08-28' '20200828_0_0_0'), + ('PART', '2020-08-28', '20200828_0_0_0'), ] ) def test_fetch_part_from_allowed_zookeeper(start_cluster, part, date, part_name): From 2c3abcaad12175b2545990e2f37515ba4c270523 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Wed, 14 Apr 2021 16:49:38 +0300 Subject: [PATCH 089/133] Updated test --- src/Dictionaries/DirectDictionary.cpp | 8 ++++++++ tests/performance/flat_dictionary.xml | 6 ++++-- tests/performance/hashed_dictionary.xml | 3 ++- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/Dictionaries/DirectDictionary.cpp b/src/Dictionaries/DirectDictionary.cpp index bacb1a87dc9..ed5da3eead0 100644 --- a/src/Dictionaries/DirectDictionary.cpp +++ b/src/Dictionaries/DirectDictionary.cpp @@ -51,6 +51,14 @@ Columns DirectDictionary::getColumns( key_to_fetched_index.reserve(requested_keys.size()); auto fetched_columns_from_storage = request.makeAttributesResultColumns(); + for (size_t attribute_index = 0; attribute_index < request.attributesSize(); ++attribute_index) + { + if (!request.shouldFillResultColumnWithIndex(attribute_index)) + continue; + + auto & fetched_column_from_storage = fetched_columns_from_storage[attribute_index]; + fetched_column_from_storage->reserve(requested_keys.size()); + } size_t fetched_key_index = 0; diff --git a/tests/performance/flat_dictionary.xml b/tests/performance/flat_dictionary.xml index 56a94358eb9..a80631db541 100644 --- a/tests/performance/flat_dictionary.xml +++ b/tests/performance/flat_dictionary.xml @@ -53,7 +53,8 @@ - SELECT dictGet('default.simple_key_flat_dictionary', {column_name}, rand64() % toUInt64({elements_count})) + WITH rand64() % toUInt64({elements_count}) as key + SELECT dictGet('default.simple_key_flat_dictionary', {column_name}, key) FROM system.numbers LIMIT {elements_count} FORMAT Null; @@ -65,7 +66,8 @@ - SELECT dictHas('default.simple_key_flat_dictionary', rand64() % toUInt64(75000000)) + WITH rand64() % toUInt64(75000000) as key + SELECT dictHas('default.simple_key_flat_dictionary', key) FROM system.numbers LIMIT 75000000 FORMAT Null; diff --git a/tests/performance/hashed_dictionary.xml b/tests/performance/hashed_dictionary.xml index cd19ba035e5..5cbe1caeb23 100644 --- a/tests/performance/hashed_dictionary.xml +++ b/tests/performance/hashed_dictionary.xml @@ -95,7 +95,8 @@ FORMAT Null; - SELECT * FROM default.simple_key_hashed_dictionary; + SELECT * FROM default.simple_key_hashed_dictionary + FORMAT Null; WITH rand64() % toUInt64({elements_count}) as key From da6dc64e0468d54613e915bafe95083f687fc8d6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 15 Apr 2021 00:34:46 +0300 Subject: [PATCH 090/133] jemalloc: set dirty_decay_ms/muzzy_decay_ms to 1 second --- contrib/jemalloc-cmake/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index b174d4d361e..a82345975d1 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -34,9 +34,9 @@ if (OS_LINUX) # avoid spurious latencies and additional work associated with # MADV_DONTNEED. See # https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation. - set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:10000") + set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:1000,dirty_decay_ms:1000") else() - set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:10000") + set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:1000,dirty_decay_ms:1000") endif() # CACHE variable is empty, to allow changing defaults without necessity # to purge cache From 7f7e04117d1094240def5f34bb7e44d5f1ba8762 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 15 Apr 2021 01:27:53 +0300 Subject: [PATCH 091/133] Arcadia, ARM, PowerPC --- src/Functions/divide/CMakeLists.txt | 26 +++++++++++++++++++------- src/Functions/divide/divide.cpp | 4 ++++ src/Functions/divide/divideImpl.cpp | 6 +++++- src/Functions/ya.make | 2 ++ 4 files changed, 30 insertions(+), 8 deletions(-) diff --git a/src/Functions/divide/CMakeLists.txt b/src/Functions/divide/CMakeLists.txt index 2bdd7e4c5ef..e5a10f0817c 100644 --- a/src/Functions/divide/CMakeLists.txt +++ b/src/Functions/divide/CMakeLists.txt @@ -1,10 +1,22 @@ -add_library(divide_impl_sse2 divideImpl.cpp) -target_compile_options(divide_impl_sse2 PRIVATE -msse2 -DNAMESPACE=SSE2) -target_link_libraries(divide_impl_sse2 libdivide) +# A library for integer division by constant with CPU dispatching. -add_library(divide_impl_avx2 divideImpl.cpp) -target_compile_options(divide_impl_avx2 PRIVATE -mavx2 -DNAMESPACE=AVX2) -target_link_libraries(divide_impl_avx2 libdivide) +if (ARCH_AMD64) + add_library(divide_impl_sse2 divideImpl.cpp) + target_compile_options(divide_impl_sse2 PRIVATE -msse2 -DNAMESPACE=SSE2) + target_link_libraries(divide_impl_sse2 libdivide) + + add_library(divide_impl_avx2 divideImpl.cpp) + target_compile_options(divide_impl_avx2 PRIVATE -mavx2 -DNAMESPACE=AVX2) + target_link_libraries(divide_impl_avx2 libdivide) + + set(IMPLEMENTATIONS divide_impl_sse2 divide_impl_avx2) +else () + add_library(divide_impl_generic divideImpl.cpp) + target_compile_options(divide_impl_generic PRIVATE -DNAMESPACE=Generic) + target_link_libraries(divide_impl_generic libdivide) + + set(IMPLEMENTATIONS divide_impl_generic) +endif () add_library(divide_impl divide.cpp) -target_link_libraries(divide_impl divide_impl_sse2 divide_impl_avx2 clickhouse_common_io) +target_link_libraries(divide_impl ${IMPLEMENTATIONS} clickhouse_common_io) diff --git a/src/Functions/divide/divide.cpp b/src/Functions/divide/divide.cpp index 1c3c11af312..7676c2cb02b 100644 --- a/src/Functions/divide/divide.cpp +++ b/src/Functions/divide/divide.cpp @@ -18,10 +18,14 @@ namespace AVX2 template void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size) { +#if defined(__x86_64__) && !defined(ARCADIA_BUILD) if (DB::Cpu::CpuFlagsCache::have_AVX2) AVX2::divideImpl(a_pos, b, c_pos, size); else if (DB::Cpu::CpuFlagsCache::have_SSE2) SSE2::divideImpl(a_pos, b, c_pos, size); +#else + Generic::divideImpl(a_pos, b, c_pos, size); +#endif } diff --git a/src/Functions/divide/divideImpl.cpp b/src/Functions/divide/divideImpl.cpp index a62ce8126e2..f4c1a97d3ad 100644 --- a/src/Functions/divide/divideImpl.cpp +++ b/src/Functions/divide/divideImpl.cpp @@ -2,7 +2,11 @@ /// with different values of NAMESPACE and machine flags (sse2, avx2). #if !defined(NAMESPACE) -#error "NAMESPACE macro must be defined" + #if defined(ARCADIA_BUILD) + #define NAMESPACE Generic + #else + #error "NAMESPACE macro must be defined" + #endif #endif #if defined(__AVX2__) diff --git a/src/Functions/ya.make b/src/Functions/ya.make index 52ed54ec64f..660f7b115bf 100644 --- a/src/Functions/ya.make +++ b/src/Functions/ya.make @@ -229,6 +229,8 @@ SRCS( defaultValueOfTypeName.cpp demange.cpp divide.cpp + divide/divide.cpp + divide/divideImpl.cpp dumpColumnStructure.cpp e.cpp empty.cpp From 2ae8839e3dc63ace3f8744817a81b192274a3c27 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 15 Apr 2021 01:28:40 +0300 Subject: [PATCH 092/133] Style --- src/Functions/divide/divide.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Functions/divide/divide.h b/src/Functions/divide/divide.h index daf406038f2..1c17a461159 100644 --- a/src/Functions/divide/divide.h +++ b/src/Functions/divide/divide.h @@ -1,3 +1,5 @@ +#pragma once + #include template From 076c746e6d4835dc0a3239ef21065268f70af812 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 15 Apr 2021 01:35:21 +0300 Subject: [PATCH 093/133] Add perf test --- tests/performance/intDiv.xml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 tests/performance/intDiv.xml diff --git a/tests/performance/intDiv.xml b/tests/performance/intDiv.xml new file mode 100644 index 00000000000..c6fa0238986 --- /dev/null +++ b/tests/performance/intDiv.xml @@ -0,0 +1,5 @@ + + SELECT count() FROM numbers(200000000) WHERE NOT ignore(intDiv(number, 1000000000)) + SELECT count() FROM numbers(200000000) WHERE NOT ignore(divide(number, 1000000000)) + SELECT count() FROM numbers(200000000) WHERE NOT ignore(toUInt32(divide(number, 1000000000))) + From 15153e504ade2e7a0c841d0a36b42272d5b9d6b9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 15 Apr 2021 02:08:43 +0300 Subject: [PATCH 094/133] Fix unpleasant behaviour of Markdown format --- src/Processors/Formats/Impl/MarkdownRowOutputFormat.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/Processors/Formats/Impl/MarkdownRowOutputFormat.cpp b/src/Processors/Formats/Impl/MarkdownRowOutputFormat.cpp index 5108650ff0d..ee5d4193a45 100644 --- a/src/Processors/Formats/Impl/MarkdownRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/MarkdownRowOutputFormat.cpp @@ -21,16 +21,13 @@ void MarkdownRowOutputFormat::writePrefix() } writeCString("\n|", out); String left_alignment = ":-|"; - String central_alignment = ":-:|"; String right_alignment = "-:|"; for (size_t i = 0; i < columns; ++i) { - if (isInteger(types[i])) + if (types[i]->shouldAlignRightInPrettyFormats()) writeString(right_alignment, out); - else if (isString(types[i])) - writeString(left_alignment, out); else - writeString(central_alignment, out); + writeString(left_alignment, out); } writeChar('\n', out); } From 8bd77e1c0c0a12c3764d89545fc775b5e2acf3e5 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Thu, 15 Apr 2021 11:58:14 +0300 Subject: [PATCH 095/133] Change markdown format test --- .../0_stateless/01231_markdown_format.reference | 10 +++++----- tests/queries/0_stateless/01231_markdown_format.sql | 6 ++++-- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/01231_markdown_format.reference b/tests/queries/0_stateless/01231_markdown_format.reference index e2ec03b401a..65838bfede7 100644 --- a/tests/queries/0_stateless/01231_markdown_format.reference +++ b/tests/queries/0_stateless/01231_markdown_format.reference @@ -1,5 +1,5 @@ -| id | name | array | -|-:|:-|:-:| -| 1 | name1 | [1,2,3] | -| 2 | name2 | [4,5,6] | -| 3 | name3 | [7,8,9] | +| id | name | array | nullable | low_cardinality | decimal | +|-:|:-|:-|:-|:-|-:| +| 1 | name1 | [1,2,3] | Some long string | name1 | 1.110000 | +| 2 | name2 | [4,5,60000] | \N | Another long string | 222.222222 | +| 30000 | One more long string | [7,8,9] | name3 | name3 | 3.330000 | diff --git a/tests/queries/0_stateless/01231_markdown_format.sql b/tests/queries/0_stateless/01231_markdown_format.sql index 693664be1ab..287e9a0e91e 100644 --- a/tests/queries/0_stateless/01231_markdown_format.sql +++ b/tests/queries/0_stateless/01231_markdown_format.sql @@ -1,6 +1,8 @@ DROP TABLE IF EXISTS makrdown; -CREATE TABLE markdown (id UInt32, name String, array Array(Int8)) ENGINE = Memory; -INSERT INTO markdown VALUES (1, 'name1', [1,2,3]), (2, 'name2', [4,5,6]), (3, 'name3', [7,8,9]); +CREATE TABLE markdown (id UInt32, name String, array Array(Int32), nullable Nullable(String), low_cardinality LowCardinality(String), decimal Decimal32(6)) ENGINE = Memory; +INSERT INTO markdown VALUES (1, 'name1', [1,2,3], 'Some long string', 'name1', 1.11), (2, 'name2', [4,5,60000], Null, 'Another long string', 222.222222), (30000, 'One more long string', [7,8,9], 'name3', 'name3', 3.33); SELECT * FROM markdown FORMAT Markdown; DROP TABLE IF EXISTS markdown + + From b909899cc9a0665396c6486264889c15858f6800 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Thu, 15 Apr 2021 11:59:56 +0300 Subject: [PATCH 096/133] Remove extra lines --- tests/queries/0_stateless/01231_markdown_format.sql | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/queries/0_stateless/01231_markdown_format.sql b/tests/queries/0_stateless/01231_markdown_format.sql index 287e9a0e91e..65c65389e12 100644 --- a/tests/queries/0_stateless/01231_markdown_format.sql +++ b/tests/queries/0_stateless/01231_markdown_format.sql @@ -4,5 +4,3 @@ INSERT INTO markdown VALUES (1, 'name1', [1,2,3], 'Some long string', 'name1', 1 SELECT * FROM markdown FORMAT Markdown; DROP TABLE IF EXISTS markdown - - From 75036debf4e0d91087948a22c4d2cd2242f9c1f1 Mon Sep 17 00:00:00 2001 From: Dmitry Krylov Date: Thu, 15 Apr 2021 19:25:30 +1000 Subject: [PATCH 097/133] Check type match of lambda and accumulator --- src/Functions/array/arrayFold.cpp | 17 ++++++++++++----- .../0_stateless/01813_array_fold_errors.sql | 1 + 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/Functions/array/arrayFold.cpp b/src/Functions/array/arrayFold.cpp index 5c80b01c5c9..bf0f019f0d8 100644 --- a/src/Functions/array/arrayFold.cpp +++ b/src/Functions/array/arrayFold.cpp @@ -9,8 +9,9 @@ namespace ErrorCodes { extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int SIZES_OF_ARRAYS_DOESNT_MATCH; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int SIZES_OF_ARRAYS_DOESNT_MATCH; + extern const int TYPE_MISMATCH; } @@ -26,8 +27,6 @@ public: bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } - /// Called if at least one function argument is a lambda expression. - /// For argument-lambda expressions, it defines the types of arguments of these expressions. void getLambdaArgumentTypes(DataTypes & arguments) const override { if (arguments.size() < 3) @@ -64,8 +63,16 @@ public: if (!data_type_function) throw Exception("First argument for function " + getName() + " must be a function.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - /// The types of the remaining arguments are already checked in getLambdaArgumentTypes. - return DataTypePtr(arguments.back().type); + + auto const accumulator_type = arguments.back().type; + auto const lambda_type = data_type_function->getReturnType(); + if (! accumulator_type->equals(*lambda_type)) + throw Exception("Return type of lambda function must be the same as the accumulator type. " + "Inferred type of lambda " + lambda_type->getName() + ", " + + "inferred type of accumulator " + accumulator_type->getName() + ".", + ErrorCodes::TYPE_MISMATCH); + + return accumulator_type; } ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override diff --git a/tests/queries/0_stateless/01813_array_fold_errors.sql b/tests/queries/0_stateless/01813_array_fold_errors.sql index 49fd085dfe2..9b3776d7cea 100644 --- a/tests/queries/0_stateless/01813_array_fold_errors.sql +++ b/tests/queries/0_stateless/01813_array_fold_errors.sql @@ -9,3 +9,4 @@ SELECT arrayFold(x,acc -> acc+x, number, toInt64(0)) FROM system.numbers LIMIT 1 SELECT arrayFold(x,y,acc -> acc + x * 2 + y * 3, [1,2,3,4], [5,6,7], toInt64(3)); -- { serverError 190 } SELECT arrayFold(x,acc -> acc + x * 2 + y * 3, [1,2,3,4], [5,6,7,8], toInt64(3)); -- { serverError 47 } SELECT arrayFold(x,acc -> acc + x * 2, [1,2,3,4], [5,6,7,8], toInt64(3)); -- { serverError 43 } +SELECT arrayFold(x,acc -> concat(acc,', ', x), [1, 2, 3, 4], '0') -- { serverError 44 } From eceed68d62eaba43377180b8c3b0ea3a6a7855d7 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 15 Apr 2021 12:31:09 +0300 Subject: [PATCH 098/133] Fix arcadia build S3 --- src/Dictionaries/ya.make | 1 - src/IO/S3/PocoHTTPClient.h | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Dictionaries/ya.make b/src/Dictionaries/ya.make index dc58d3f0a14..36152fe439a 100644 --- a/src/Dictionaries/ya.make +++ b/src/Dictionaries/ya.make @@ -51,7 +51,6 @@ SRCS( HierarchyDictionariesUtils.cpp IPAddressDictionary.cpp LibraryDictionarySource.cpp - LibraryDictionarySourceExternal.cpp MongoDBDictionarySource.cpp MySQLDictionarySource.cpp PolygonDictionary.cpp diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index da6c4dd5985..44af47237ba 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -1,5 +1,7 @@ #pragma once +#if USE_AWS_S3 + #include #include #include @@ -94,3 +96,5 @@ private: }; } + +#endif From b8a1ead3e9899ff4cbda7f8866bc7e6ff4323496 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 15 Apr 2021 13:51:40 +0300 Subject: [PATCH 099/133] Updated hashed_dictionary test --- tests/performance/hashed_dictionary.xml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/performance/hashed_dictionary.xml b/tests/performance/hashed_dictionary.xml index 5cbe1caeb23..26164b4f888 100644 --- a/tests/performance/hashed_dictionary.xml +++ b/tests/performance/hashed_dictionary.xml @@ -94,10 +94,6 @@ LIMIT {elements_count} FORMAT Null; - - SELECT * FROM default.simple_key_hashed_dictionary - FORMAT Null; - WITH rand64() % toUInt64({elements_count}) as key SELECT dictHas('default.simple_key_hashed_dictionary', key) @@ -113,10 +109,6 @@ LIMIT {elements_count} FORMAT Null; - - SELECT * FROM default.complex_key_hashed_dictionary - FORMAT Null; - WITH (rand64() % toUInt64({elements_count}), toString(rand64() % toUInt64({elements_count}))) as key SELECT dictHas('default.complex_key_hashed_dictionary', key) From d19b2cb9489b4185261dfe347b5c851e829b58f3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 15 Apr 2021 14:41:55 +0300 Subject: [PATCH 100/133] Fix build --- src/Functions/divide/divide.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/Functions/divide/divide.cpp b/src/Functions/divide/divide.cpp index 7676c2cb02b..5ab11df2a65 100644 --- a/src/Functions/divide/divide.cpp +++ b/src/Functions/divide/divide.cpp @@ -1,7 +1,7 @@ #include "divide.h" #include - +#if defined(__x86_64__) && !defined(ARCADIA_BUILD) namespace SSE2 { template @@ -13,6 +13,13 @@ namespace AVX2 template void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size); } +#else +namespace Generic +{ + template + void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size); +} +#endif template From bf51f94f37400f9701bd1390ea5ca6a1c16341d5 Mon Sep 17 00:00:00 2001 From: Dmitry Krylov Date: Thu, 15 Apr 2021 21:46:15 +1000 Subject: [PATCH 101/133] Fix constness --- src/Functions/array/arrayFold.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/array/arrayFold.cpp b/src/Functions/array/arrayFold.cpp index bf0f019f0d8..5fc7a304b03 100644 --- a/src/Functions/array/arrayFold.cpp +++ b/src/Functions/array/arrayFold.cpp @@ -72,7 +72,7 @@ public: + "inferred type of accumulator " + accumulator_type->getName() + ".", ErrorCodes::TYPE_MISMATCH); - return accumulator_type; + return DataTypePtr(accumulator_type); } ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override From aead70e72ac72917d8badb8a3bc78995aa966cab Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 15 Apr 2021 17:47:31 +0300 Subject: [PATCH 102/133] Updated zlib version --- contrib/zlib-ng | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index f7e0ac999a8..bf128f84df0 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit f7e0ac999a89e021c64f7bcfeadc4586daadb7aa +Subproject commit bf128f84df0806ec51c3513804222ae02007c4f3 From 77bc9e04c6df527ad245b21a307a6ae9ffa0af1c Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 15 Apr 2021 17:50:28 +0300 Subject: [PATCH 103/133] Updated zlib with apple linker fix --- contrib/zlib-ng | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index b82d3497a5a..bf128f84df0 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit b82d3497a5afc46dec3c5d07e4b163b169f251d7 +Subproject commit bf128f84df0806ec51c3513804222ae02007c4f3 From d2cf03ea41babefd715436f1f9e23e48fb3e6f8d Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 15 Apr 2021 21:00:16 +0300 Subject: [PATCH 104/133] Change logging from trace to debug for messages with rows/bytes --- src/Interpreters/Aggregator.cpp | 8 ++++---- src/Processors/Formats/IRowInputFormat.cpp | 2 +- src/Processors/Transforms/AggregatingInOrderTransform.cpp | 4 ++-- src/Processors/Transforms/AggregatingTransform.cpp | 4 ++-- src/Processors/Transforms/MergingAggregatedTransform.cpp | 2 +- src/Storages/Distributed/DirectoryMonitor.cpp | 6 +++--- src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp | 4 ++-- src/Storages/MergeTree/MergeTreePartsMover.cpp | 2 +- .../MergeTree/MergeTreeReverseSelectProcessor.cpp | 2 +- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 2 +- src/Storages/MergeTree/MergeTreeSequentialSource.cpp | 4 ++-- src/Storages/StorageBuffer.cpp | 6 +++--- 12 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 0f8b647096d..ad9cc0fc2d2 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -834,7 +834,7 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co ProfileEvents::increment(ProfileEvents::ExternalAggregationCompressedBytes, compressed_bytes); ProfileEvents::increment(ProfileEvents::ExternalAggregationUncompressedBytes, uncompressed_bytes); - LOG_TRACE(log, + LOG_DEBUG(log, "Written part in {} sec., {} rows, {} uncompressed, {} compressed," " {} uncompressed bytes per row, {} compressed bytes per row, compression rate: {}" " ({} rows/sec., {}/sec. uncompressed, {}/sec. compressed)", @@ -947,7 +947,7 @@ void Aggregator::writeToTemporaryFileImpl( /// `data_variants` will not destroy them in the destructor, they are now owned by ColumnAggregateFunction objects. data_variants.aggregator = nullptr; - LOG_TRACE(log, "Max size of temporary block: {} rows, {}.", max_temporary_block_size_rows, ReadableSize(max_temporary_block_size_bytes)); + LOG_DEBUG(log, "Max size of temporary block: {} rows, {}.", max_temporary_block_size_rows, ReadableSize(max_temporary_block_size_bytes)); } @@ -1481,7 +1481,7 @@ BlocksList Aggregator::convertToBlocks(AggregatedDataVariants & data_variants, b } double elapsed_seconds = watch.elapsedSeconds(); - LOG_TRACE(log, + LOG_DEBUG(log, "Converted aggregated data to blocks. {} rows, {} in {} sec. ({} rows/sec., {}/sec.)", rows, ReadableSize(bytes), elapsed_seconds, rows / elapsed_seconds, @@ -2109,7 +2109,7 @@ Block Aggregator::mergeBlocks(BlocksList & blocks, bool final) size_t rows = block.rows(); size_t bytes = block.bytes(); double elapsed_seconds = watch.elapsedSeconds(); - LOG_TRACE(log, "Merged partially aggregated blocks. {} rows, {}. in {} sec. ({} rows/sec., {}/sec.)", + LOG_DEBUG(log, "Merged partially aggregated blocks. {} rows, {}. in {} sec. ({} rows/sec., {}/sec.)", rows, ReadableSize(bytes), elapsed_seconds, rows / elapsed_seconds, ReadableSize(bytes / elapsed_seconds)); diff --git a/src/Processors/Formats/IRowInputFormat.cpp b/src/Processors/Formats/IRowInputFormat.cpp index 75a9abf6845..52e64a9d90d 100644 --- a/src/Processors/Formats/IRowInputFormat.cpp +++ b/src/Processors/Formats/IRowInputFormat.cpp @@ -190,7 +190,7 @@ Chunk IRowInputFormat::generate() if (num_errors && (params.allow_errors_num > 0 || params.allow_errors_ratio > 0)) { Poco::Logger * log = &Poco::Logger::get("IRowInputFormat"); - LOG_TRACE(log, "Skipped {} rows with errors while reading the input stream", num_errors); + LOG_DEBUG(log, "Skipped {} rows with errors while reading the input stream", num_errors); } readSuffix(); diff --git a/src/Processors/Transforms/AggregatingInOrderTransform.cpp b/src/Processors/Transforms/AggregatingInOrderTransform.cpp index 392e27166ef..d8b7742cdf4 100644 --- a/src/Processors/Transforms/AggregatingInOrderTransform.cpp +++ b/src/Processors/Transforms/AggregatingInOrderTransform.cpp @@ -214,8 +214,8 @@ IProcessor::Status AggregatingInOrderTransform::prepare() { output.push(std::move(to_push_chunk)); output.finish(); - LOG_TRACE(log, "Aggregated. {} to {} rows (from {})", src_rows, res_rows, - formatReadableSizeWithBinarySuffix(src_bytes)); + LOG_DEBUG(log, "Aggregated. {} to {} rows (from {})", + src_rows, res_rows, formatReadableSizeWithBinarySuffix(src_bytes)); return Status::Finished; } if (input.isFinished()) diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index c6907202d31..3400d06dae3 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -541,7 +541,7 @@ void AggregatingTransform::initGenerate() double elapsed_seconds = watch.elapsedSeconds(); size_t rows = variants.sizeWithoutOverflowRow(); - LOG_TRACE(log, "Aggregated. {} to {} rows (from {}) in {} sec. ({} rows/sec., {}/sec.)", + LOG_DEBUG(log, "Aggregated. {} to {} rows (from {}) in {} sec. ({} rows/sec., {}/sec.)", src_rows, rows, ReadableSize(src_bytes), elapsed_seconds, src_rows / elapsed_seconds, ReadableSize(src_bytes / elapsed_seconds)); @@ -599,7 +599,7 @@ void AggregatingTransform::initGenerate() pipe = Pipe::unitePipes(std::move(pipes)); } - LOG_TRACE(log, "Will merge {} temporary files of size {} compressed, {} uncompressed.", files.files.size(), ReadableSize(files.sum_size_compressed), ReadableSize(files.sum_size_uncompressed)); + LOG_DEBUG(log, "Will merge {} temporary files of size {} compressed, {} uncompressed.", files.files.size(), ReadableSize(files.sum_size_compressed), ReadableSize(files.sum_size_uncompressed)); addMergingAggregatedMemoryEfficientTransform(pipe, params, temporary_data_merge_threads); diff --git a/src/Processors/Transforms/MergingAggregatedTransform.cpp b/src/Processors/Transforms/MergingAggregatedTransform.cpp index 1a04f85fd9c..ddc58d830da 100644 --- a/src/Processors/Transforms/MergingAggregatedTransform.cpp +++ b/src/Processors/Transforms/MergingAggregatedTransform.cpp @@ -52,7 +52,7 @@ Chunk MergingAggregatedTransform::generate() if (!generate_started) { generate_started = true; - LOG_TRACE(log, "Read {} blocks of partially aggregated data, total {} rows.", total_input_blocks, total_input_rows); + LOG_DEBUG(log, "Read {} blocks of partially aggregated data, total {} rows.", total_input_blocks, total_input_rows); /// Exception safety. Make iterator valid in case any method below throws. next_block = blocks.begin(); diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index 2afa9747c60..29b69209253 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -535,7 +535,7 @@ void StorageDistributedDirectoryMonitor::processFile(const std::string & file_pa ReadBufferFromFile in(file_path); const auto & distributed_header = readDistributedHeader(in, log); - LOG_TRACE(log, "Started processing `{}` ({} rows, {} bytes)", file_path, + LOG_DEBUG(log, "Started processing `{}` ({} rows, {} bytes)", file_path, formatReadableQuantity(distributed_header.rows), formatReadableSizeWithBinarySuffix(distributed_header.bytes)); @@ -631,7 +631,7 @@ struct StorageDistributedDirectoryMonitor::Batch Stopwatch watch; - LOG_TRACE(parent.log, "Sending a batch of {} files ({} rows, {} bytes).", file_indices.size(), + LOG_DEBUG(parent.log, "Sending a batch of {} files ({} rows, {} bytes).", file_indices.size(), formatReadableQuantity(total_rows), formatReadableSizeWithBinarySuffix(total_bytes)); @@ -876,7 +876,7 @@ void StorageDistributedDirectoryMonitor::processFilesWithBatching(const std::map if (!total_rows || !header) { - LOG_TRACE(log, "Processing batch {} with old format (no header/rows)", in.getFileName()); + LOG_DEBUG(log, "Processing batch {} with old format (no header/rows)", in.getFileName()); CompressedReadBuffer decompressing_in(in); NativeBlockInputStream block_in(decompressing_in, DBMS_TCP_PROTOCOL_VERSION); diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index ddb140989f6..7f7370e6f1f 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -1054,7 +1054,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams( false); /// Let's estimate total number of rows for progress bar. - LOG_TRACE(log, "Reading approx. {} rows with {} streams", total_rows, num_streams); + LOG_DEBUG(log, "Reading approx. {} rows with {} streams", total_rows, num_streams); for (size_t i = 0; i < num_streams; ++i) { @@ -1576,7 +1576,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsFinal( settings.preferred_block_size_bytes, false); - LOG_TRACE(log, "Reading approx. {} rows with {} streams", total_rows_in_lonely_parts, num_streams_for_lonely_parts); + LOG_DEBUG(log, "Reading approx. {} rows with {} streams", total_rows_in_lonely_parts, num_streams_for_lonely_parts); for (size_t i = 0; i < num_streams_for_lonely_parts; ++i) { diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index cb21f50f9a0..f9e3883d5e2 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -182,7 +182,7 @@ bool MergeTreePartsMover::selectPartsForMove( if (!parts_to_move.empty()) { - LOG_TRACE(log, "Selected {} parts to move according to storage policy rules and {} parts according to TTL rules, {} total", parts_to_move_by_policy_rules, parts_to_move_by_ttl_rules, ReadableSize(parts_to_move_total_size_bytes)); + LOG_DEBUG(log, "Selected {} parts to move according to storage policy rules and {} parts according to TTL rules, {} total", parts_to_move_by_policy_rules, parts_to_move_by_ttl_rules, ReadableSize(parts_to_move_total_size_bytes)); return true; } else diff --git a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp index 1d3bb55eace..e9527efaa4a 100644 --- a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp @@ -47,7 +47,7 @@ MergeTreeReverseSelectProcessor::MergeTreeReverseSelectProcessor( size_t total_rows = data_part->index_granularity.getRowsCountInRanges(all_mark_ranges); if (!quiet) - LOG_TRACE(log, "Reading {} ranges in reverse order from part {}, approx. {} rows starting from {}", + LOG_DEBUG(log, "Reading {} ranges in reverse order from part {}, approx. {} rows starting from {}", all_mark_ranges.size(), data_part->name, total_rows, data_part->index_granularity.getMarkStartingRow(all_mark_ranges.front().begin)); diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 47429745b0d..980afa170e9 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -47,7 +47,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( size_t total_rows = data_part->index_granularity.getRowsCountInRanges(all_mark_ranges); if (!quiet) - LOG_TRACE(log, "Reading {} ranges from part {}, approx. {} rows starting from {}", + LOG_DEBUG(log, "Reading {} ranges from part {}, approx. {} rows starting from {}", all_mark_ranges.size(), data_part->name, total_rows, data_part->index_granularity.getMarkStartingRow(all_mark_ranges.front().begin)); diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 745a2860c56..e82b1966461 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -29,10 +29,10 @@ MergeTreeSequentialSource::MergeTreeSequentialSource( { /// Print column name but don't pollute logs in case of many columns. if (columns_to_read.size() == 1) - LOG_TRACE(log, "Reading {} marks from part {}, total {} rows starting from the beginning of the part, column {}", + LOG_DEBUG(log, "Reading {} marks from part {}, total {} rows starting from the beginning of the part, column {}", data_part->getMarksCount(), data_part->name, data_part->rows_count, columns_to_read.front()); else - LOG_TRACE(log, "Reading {} marks from part {}, total {} rows starting from the beginning of the part", + LOG_DEBUG(log, "Reading {} marks from part {}, total {} rows starting from the beginning of the part", data_part->getMarksCount(), data_part->name, data_part->rows_count); } diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 5c4b4e7d1d8..c9bfb9e1ee7 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -542,7 +542,7 @@ public: { if (storage.destination_id) { - LOG_TRACE(storage.log, "Writing block with {} rows, {} bytes directly.", rows, bytes); + LOG_DEBUG(storage.log, "Writing block with {} rows, {} bytes directly.", rows, bytes); storage.writeBlockToDestination(block, destination); } return; @@ -804,7 +804,7 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc if (!destination_id) { - LOG_TRACE(log, "Flushing buffer with {} rows (discarded), {} bytes, age {} seconds {}.", rows, bytes, time_passed, (check_thresholds ? "(bg)" : "(direct)")); + LOG_DEBUG(log, "Flushing buffer with {} rows (discarded), {} bytes, age {} seconds {}.", rows, bytes, time_passed, (check_thresholds ? "(bg)" : "(direct)")); return; } @@ -841,7 +841,7 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc } UInt64 milliseconds = watch.elapsedMilliseconds(); - LOG_TRACE(log, "Flushing buffer with {} rows, {} bytes, age {} seconds, took {} ms {}.", rows, bytes, time_passed, milliseconds, (check_thresholds ? "(bg)" : "(direct)")); + LOG_DEBUG(log, "Flushing buffer with {} rows, {} bytes, age {} seconds, took {} ms {}.", rows, bytes, time_passed, milliseconds, (check_thresholds ? "(bg)" : "(direct)")); } From 19e04396295af4a15e6dddf674aaf16c6e285395 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 12 Apr 2021 09:04:38 +0300 Subject: [PATCH 105/133] Add ability to flush buffer only in background for StorageBuffer Add 3 new engine arguments: - flush_time - flush_rows - flush_bytes That will be checked only for background flush, this maybe useful if INSERT latency is "crucial". --- .../engines/table-engines/special/buffer.md | 12 ++- src/Common/ProfileEvents.cpp | 3 + src/Storages/StorageBuffer.cpp | 80 ++++++++++++++----- src/Storages/StorageBuffer.h | 17 ++-- ..._storage_buffer_flush_parameters.reference | 1 + .../01811_storage_buffer_flush_parameters.sql | 22 +++++ .../01817_storage_buffer_parameters.reference | 0 .../01817_storage_buffer_parameters.sql | 42 ++++++++++ 8 files changed, 147 insertions(+), 30 deletions(-) create mode 100644 tests/queries/0_stateless/01811_storage_buffer_flush_parameters.reference create mode 100644 tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sql create mode 100644 tests/queries/0_stateless/01817_storage_buffer_parameters.reference create mode 100644 tests/queries/0_stateless/01817_storage_buffer_parameters.sql diff --git a/docs/en/engines/table-engines/special/buffer.md b/docs/en/engines/table-engines/special/buffer.md index bf6c08f8f6c..8245cd19e8c 100644 --- a/docs/en/engines/table-engines/special/buffer.md +++ b/docs/en/engines/table-engines/special/buffer.md @@ -18,11 +18,17 @@ Engine parameters: - `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` of independent buffers. Recommended value: 16. - `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`, and `max_bytes` – Conditions for flushing data from the buffer. +Optional engine parameters: + +- `flush_time`, `flush_rows`, `flush_bytes` – Conditions for flushing data from the buffer, that will happen only in background (ommited or zero means no `flush*` parameters). + Data is flushed from the buffer and written to the destination table if all the `min*` conditions or at least one `max*` condition are met. -- `min_time`, `max_time` – Condition for the time in seconds from the moment of the first write to the buffer. -- `min_rows`, `max_rows` – Condition for the number of rows in the buffer. -- `min_bytes`, `max_bytes` – Condition for the number of bytes in the buffer. +Also if at least one `flush*` condition are met flush initiated in background, this is different from `max*`, since `flush*` allows you to configure background flushes separately to avoid adding latency for `INSERT` (into `Buffer`) queries. + +- `min_time`, `max_time`, `flush_time` – Condition for the time in seconds from the moment of the first write to the buffer. +- `min_rows`, `max_rows`, `flush_rows` – Condition for the number of rows in the buffer. +- `min_bytes`, `max_bytes`, `flush_bytes` – Condition for the number of bytes in the buffer. During the write operation, data is inserted to a `num_layers` number of random buffers. Or, if the data part to insert is large enough (greater than `max_rows` or `max_bytes`), it is written directly to the destination table, omitting the buffer. diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index d0876c5e69c..162d6e035cc 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -146,6 +146,9 @@ M(StorageBufferPassedTimeMaxThreshold, "") \ M(StorageBufferPassedRowsMaxThreshold, "") \ M(StorageBufferPassedBytesMaxThreshold, "") \ + M(StorageBufferPassedTimeFlushThreshold, "") \ + M(StorageBufferPassedRowsFlushThreshold, "") \ + M(StorageBufferPassedBytesFlushThreshold, "") \ M(StorageBufferLayerLockReadersWaitMilliseconds, "Time for waiting for Buffer layer during reading") \ M(StorageBufferLayerLockWritersWaitMilliseconds, "Time for waiting free Buffer layer to write to (can be used to tune Buffer layers)") \ \ diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 5c4b4e7d1d8..7b03622431d 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -40,6 +40,9 @@ namespace ProfileEvents extern const Event StorageBufferPassedTimeMaxThreshold; extern const Event StorageBufferPassedRowsMaxThreshold; extern const Event StorageBufferPassedBytesMaxThreshold; + extern const Event StorageBufferPassedTimeFlushThreshold; + extern const Event StorageBufferPassedRowsFlushThreshold; + extern const Event StorageBufferPassedBytesFlushThreshold; extern const Event StorageBufferLayerLockReadersWaitMilliseconds; extern const Event StorageBufferLayerLockWritersWaitMilliseconds; } @@ -103,6 +106,7 @@ StorageBuffer::StorageBuffer( size_t num_shards_, const Thresholds & min_thresholds_, const Thresholds & max_thresholds_, + const Thresholds & flush_thresholds_, const StorageID & destination_id_, bool allow_materialized_) : IStorage(table_id_) @@ -110,6 +114,7 @@ StorageBuffer::StorageBuffer( , num_shards(num_shards_), buffers(num_shards_) , min_thresholds(min_thresholds_) , max_thresholds(max_thresholds_) + , flush_thresholds(flush_thresholds_) , destination_id(destination_id_) , allow_materialized(allow_materialized_) , log(&Poco::Logger::get("StorageBuffer (" + table_id_.getFullTableName() + ")")) @@ -602,7 +607,7 @@ private: { buffer.data = sorted_block.cloneEmpty(); } - else if (storage.checkThresholds(buffer, current_time, sorted_block.rows(), sorted_block.bytes())) + else if (storage.checkThresholds(buffer, /* direct= */true, current_time, sorted_block.rows(), sorted_block.bytes())) { /** If, after inserting the buffer, the constraints are exceeded, then we will reset the buffer. * This also protects against unlimited consumption of RAM, since if it is impossible to write to the table, @@ -713,7 +718,7 @@ bool StorageBuffer::supportsPrewhere() const return false; } -bool StorageBuffer::checkThresholds(const Buffer & buffer, time_t current_time, size_t additional_rows, size_t additional_bytes) const +bool StorageBuffer::checkThresholds(const Buffer & buffer, bool direct, time_t current_time, size_t additional_rows, size_t additional_bytes) const { time_t time_passed = 0; if (buffer.first_write_time) @@ -722,11 +727,11 @@ bool StorageBuffer::checkThresholds(const Buffer & buffer, time_t current_time, size_t rows = buffer.data.rows() + additional_rows; size_t bytes = buffer.data.bytes() + additional_bytes; - return checkThresholdsImpl(rows, bytes, time_passed); + return checkThresholdsImpl(direct, rows, bytes, time_passed); } -bool StorageBuffer::checkThresholdsImpl(size_t rows, size_t bytes, time_t time_passed) const +bool StorageBuffer::checkThresholdsImpl(bool direct, size_t rows, size_t bytes, time_t time_passed) const { if (time_passed > min_thresholds.time && rows > min_thresholds.rows && bytes > min_thresholds.bytes) { @@ -752,6 +757,27 @@ bool StorageBuffer::checkThresholdsImpl(size_t rows, size_t bytes, time_t time_p return true; } + if (!direct) + { + if (flush_thresholds.time && time_passed > flush_thresholds.time) + { + ProfileEvents::increment(ProfileEvents::StorageBufferPassedTimeFlushThreshold); + return true; + } + + if (flush_thresholds.rows && rows > flush_thresholds.rows) + { + ProfileEvents::increment(ProfileEvents::StorageBufferPassedRowsFlushThreshold); + return true; + } + + if (flush_thresholds.bytes && bytes > flush_thresholds.bytes) + { + ProfileEvents::increment(ProfileEvents::StorageBufferPassedBytesFlushThreshold); + return true; + } + } + return false; } @@ -785,7 +811,7 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc if (check_thresholds) { - if (!checkThresholdsImpl(rows, bytes, time_passed)) + if (!checkThresholdsImpl(/* direct= */false, rows, bytes, time_passed)) return; } else @@ -1040,16 +1066,17 @@ void registerStorageBuffer(StorageFactory & factory) * * db, table - in which table to put data from buffer. * num_buckets - level of parallelism. - * min_time, max_time, min_rows, max_rows, min_bytes, max_bytes - conditions for flushing the buffer. + * min_time, max_time, min_rows, max_rows, min_bytes, max_bytes - conditions for flushing the buffer, + * flush_time, flush_rows, flush_bytes - conditions for flushing. */ factory.registerStorage("Buffer", [](const StorageFactory::Arguments & args) { ASTs & engine_args = args.engine_args; - if (engine_args.size() != 9) - throw Exception("Storage Buffer requires 9 parameters: " - " destination_database, destination_table, num_buckets, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes.", + if (engine_args.size() < 9 || engine_args.size() > 12) + throw Exception("Storage Buffer requires from 9 to 12 parameters: " + " destination_database, destination_table, num_buckets, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes[, flush_time, flush_rows, flush_bytes].", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); // Table and database name arguments accept expressions, evaluate them. @@ -1058,7 +1085,7 @@ void registerStorageBuffer(StorageFactory & factory) // After we evaluated all expressions, check that all arguments are // literals. - for (size_t i = 0; i < 9; i++) + for (size_t i = 0; i < engine_args.size(); i++) { if (!typeid_cast(engine_args[i].get())) { @@ -1068,17 +1095,29 @@ void registerStorageBuffer(StorageFactory & factory) } } - String destination_database = engine_args[0]->as().value.safeGet(); - String destination_table = engine_args[1]->as().value.safeGet(); + size_t i = 0; - UInt64 num_buckets = applyVisitor(FieldVisitorConvertToNumber(), engine_args[2]->as().value); + String destination_database = engine_args[i++]->as().value.safeGet(); + String destination_table = engine_args[i++]->as().value.safeGet(); - Int64 min_time = applyVisitor(FieldVisitorConvertToNumber(), engine_args[3]->as().value); - Int64 max_time = applyVisitor(FieldVisitorConvertToNumber(), engine_args[4]->as().value); - UInt64 min_rows = applyVisitor(FieldVisitorConvertToNumber(), engine_args[5]->as().value); - UInt64 max_rows = applyVisitor(FieldVisitorConvertToNumber(), engine_args[6]->as().value); - UInt64 min_bytes = applyVisitor(FieldVisitorConvertToNumber(), engine_args[7]->as().value); - UInt64 max_bytes = applyVisitor(FieldVisitorConvertToNumber(), engine_args[8]->as().value); + UInt64 num_buckets = applyVisitor(FieldVisitorConvertToNumber(), engine_args[i++]->as().value); + + StorageBuffer::Thresholds min; + StorageBuffer::Thresholds max; + StorageBuffer::Thresholds flush; + + min.time = applyVisitor(FieldVisitorConvertToNumber(), engine_args[i++]->as().value); + max.time = applyVisitor(FieldVisitorConvertToNumber(), engine_args[i++]->as().value); + min.rows = applyVisitor(FieldVisitorConvertToNumber(), engine_args[i++]->as().value); + max.rows = applyVisitor(FieldVisitorConvertToNumber(), engine_args[i++]->as().value); + min.bytes = applyVisitor(FieldVisitorConvertToNumber(), engine_args[i++]->as().value); + max.bytes = applyVisitor(FieldVisitorConvertToNumber(), engine_args[i++]->as().value); + if (engine_args.size() > i) + flush.time = applyVisitor(FieldVisitorConvertToNumber(), engine_args[i++]->as().value); + if (engine_args.size() > i) + flush.rows = applyVisitor(FieldVisitorConvertToNumber(), engine_args[i++]->as().value); + if (engine_args.size() > i) + flush.bytes = applyVisitor(FieldVisitorConvertToNumber(), engine_args[i++]->as().value); /// If destination_id is not set, do not write data from the buffer, but simply empty the buffer. StorageID destination_id = StorageID::createEmpty(); @@ -1094,8 +1133,7 @@ void registerStorageBuffer(StorageFactory & factory) args.constraints, args.getContext(), num_buckets, - StorageBuffer::Thresholds{min_time, min_rows, min_bytes}, - StorageBuffer::Thresholds{max_time, max_rows, max_bytes}, + min, max, flush, destination_id, static_cast(args.getLocalContext()->getSettingsRef().insert_allow_materialized_columns)); }, diff --git a/src/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h index b29bbf179f4..1747c024a74 100644 --- a/src/Storages/StorageBuffer.h +++ b/src/Storages/StorageBuffer.h @@ -35,6 +35,10 @@ namespace DB * Thresholds can be exceeded. For example, if max_rows = 1 000 000, the buffer already had 500 000 rows, * and a part of 800 000 rows is added, then there will be 1 300 000 rows in the buffer, and then such a block will be written to the subordinate table. * + * There are also separate thresholds for flush, those thresholds are checked only for non-direct flush. + * This maybe useful if you do not want to add extra latency for INSERT queries, + * so you can set max_rows=1e6 and flush_rows=500e3, then each 500e3 rows buffer will be flushed in background only. + * * When you destroy a Buffer table, all remaining data is flushed to the subordinate table. * The data in the buffer is not replicated, not logged to disk, not indexed. With a rough restart of the server, the data is lost. */ @@ -45,12 +49,11 @@ friend class BufferSource; friend class BufferBlockOutputStream; public: - /// Thresholds. struct Thresholds { - time_t time; /// The number of seconds from the insertion of the first row into the block. - size_t rows; /// The number of rows in the block. - size_t bytes; /// The number of (uncompressed) bytes in the block. + time_t time = 0; /// The number of seconds from the insertion of the first row into the block. + size_t rows = 0; /// The number of rows in the block. + size_t bytes = 0; /// The number of (uncompressed) bytes in the block. }; std::string getName() const override { return "Buffer"; } @@ -135,6 +138,7 @@ private: const Thresholds min_thresholds; const Thresholds max_thresholds; + const Thresholds flush_thresholds; StorageID destination_id; bool allow_materialized; @@ -153,8 +157,8 @@ private: /// are exceeded. If reset_block_structure is set - clears inner block /// structure inside buffer (useful in OPTIMIZE and ALTER). void flushBuffer(Buffer & buffer, bool check_thresholds, bool locked = false, bool reset_block_structure = false); - bool checkThresholds(const Buffer & buffer, time_t current_time, size_t additional_rows = 0, size_t additional_bytes = 0) const; - bool checkThresholdsImpl(size_t rows, size_t bytes, time_t time_passed) const; + bool checkThresholds(const Buffer & buffer, bool direct, time_t current_time, size_t additional_rows = 0, size_t additional_bytes = 0) const; + bool checkThresholdsImpl(bool direct, size_t rows, size_t bytes, time_t time_passed) const; /// `table` argument is passed, as it is sometimes evaluated beforehand. It must match the `destination`. void writeBlockToDestination(const Block & block, StoragePtr table); @@ -177,6 +181,7 @@ protected: size_t num_shards_, const Thresholds & min_thresholds_, const Thresholds & max_thresholds_, + const Thresholds & flush_thresholds_, const StorageID & destination_id, bool allow_materialized_); }; diff --git a/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.reference b/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.reference new file mode 100644 index 00000000000..209e3ef4b62 --- /dev/null +++ b/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.reference @@ -0,0 +1 @@ +20 diff --git a/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sql b/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sql new file mode 100644 index 00000000000..dac68ad4ae8 --- /dev/null +++ b/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sql @@ -0,0 +1,22 @@ +drop table if exists data_01811; +drop table if exists buffer_01811; + +create table data_01811 (key Int) Engine=Memory(); +/* Buffer with flush_rows=1000 */ +create table buffer_01811 (key Int) Engine=Buffer(currentDatabase(), data_01811, + /* num_layers= */ 1, + /* min_time= */ 1, /* max_time= */ 86400, + /* min_rows= */ 1e9, /* max_rows= */ 1e6, + /* min_bytes= */ 0, /* max_bytes= */ 4e6, + /* flush_time= */ 86400, /* flush_rows= */ 10, /* flush_bytes= */0 +); + +insert into buffer_01811 select * from numbers(10); +insert into buffer_01811 select * from numbers(10); + +-- wait for background buffer flush +select sleep(3) format Null; +select count() from data_01811; + +drop table buffer_01811; +drop table data_01811; diff --git a/tests/queries/0_stateless/01817_storage_buffer_parameters.reference b/tests/queries/0_stateless/01817_storage_buffer_parameters.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01817_storage_buffer_parameters.sql b/tests/queries/0_stateless/01817_storage_buffer_parameters.sql new file mode 100644 index 00000000000..84727bc5d6b --- /dev/null +++ b/tests/queries/0_stateless/01817_storage_buffer_parameters.sql @@ -0,0 +1,42 @@ +drop table if exists data_01817; +drop table if exists buffer_01817; + +create table data_01817 (key Int) Engine=Null(); + +-- w/ flush_* +create table buffer_01817 (key Int) Engine=Buffer(currentDatabase(), data_01817, + /* num_layers= */ 1, + /* min_time= */ 1, /* max_time= */ 86400, + /* min_rows= */ 1e9, /* max_rows= */ 1e6, + /* min_bytes= */ 0, /* max_bytes= */ 4e6, + /* flush_time= */ 86400, /* flush_rows= */ 10, /* flush_bytes= */0 +); +drop table buffer_01817; + +-- w/o flush_* +create table buffer_01817 (key Int) Engine=Buffer(currentDatabase(), data_01817, + /* num_layers= */ 1, + /* min_time= */ 1, /* max_time= */ 86400, + /* min_rows= */ 1e9, /* max_rows= */ 1e6, + /* min_bytes= */ 0, /* max_bytes= */ 4e6 +); +drop table buffer_01817; + +-- not enough args +create table buffer_01817 (key Int) Engine=Buffer(currentDatabase(), data_01817, + /* num_layers= */ 1, + /* min_time= */ 1, /* max_time= */ 86400, + /* min_rows= */ 1e9, /* max_rows= */ 1e6, + /* min_bytes= */ 0 /* max_bytes= 4e6 */ +); -- { serverError 42 } +-- too much args +create table buffer_01817 (key Int) Engine=Buffer(currentDatabase(), data_01817, + /* num_layers= */ 1, + /* min_time= */ 1, /* max_time= */ 86400, + /* min_rows= */ 1e9, /* max_rows= */ 1e6, + /* min_bytes= */ 0, /* max_bytes= */ 4e6, + /* flush_time= */ 86400, /* flush_rows= */ 10, /* flush_bytes= */0, + 0 +); -- { serverError 42 } + +drop table data_01817; From 0ad6205fa6bd5849dce3071235ba7def6a49233c Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 15 Apr 2021 21:34:53 +0300 Subject: [PATCH 106/133] logs for debuging test failures with Replicated and Keeper --- src/Interpreters/DDLWorker.cpp | 13 ++++++++++++ .../ReplicatedMergeTreeCleanupThread.cpp | 13 +++++++++--- ...0953_zookeeper_suetin_deduplication_bug.sh | 8 +------- .../01305_replica_create_drop_zookeeper.sh | 20 +++++-------------- 4 files changed, 29 insertions(+), 25 deletions(-) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 6081f06b25f..1c023f757f8 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -372,7 +372,20 @@ void DDLWorker::scheduleTasks(bool reinitialized) } Strings queue_nodes = zookeeper->getChildren(queue_dir, nullptr, queue_updated_event); + size_t size_before_filtering = queue_nodes.size(); filterAndSortQueueNodes(queue_nodes); + /// The following message is too verbose, but it can be useful too debug mysterious test failures in CI + LOG_TRACE(log, "scheduleTasks: initialized={}, size_before_filtering={}, queue_size={}, " + "entries={}..{}, " + "first_failed_task_name={}, current_tasks_size={}," + "last_current_task={}," + "last_skipped_entry_name={}", + initialized, size_before_filtering, queue_nodes.size(), + queue_nodes.empty() ? "none" : queue_nodes.front(), queue_nodes.empty() ? "none" : queue_nodes.back(), + first_failed_task_name ? *first_failed_task_name : "none", current_tasks.size(), + current_tasks.empty() ? "none" : current_tasks.back()->entry_name, + last_skipped_entry_name ? *last_skipped_entry_name : "none"); + if (max_tasks_in_queue < queue_nodes.size()) cleanup_event->set(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 792a77d5e1a..502c6215a9a 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -342,6 +342,15 @@ void ReplicatedMergeTreeCleanupThread::clearOldBlocks() timed_blocks.begin(), timed_blocks.end(), block_threshold, NodeWithStat::greaterByTime); auto first_outdated_block = std::min(first_outdated_block_fixed_threshold, first_outdated_block_time_threshold); + auto num_nodes_to_delete = timed_blocks.end() - first_outdated_block; + if (!num_nodes_to_delete) + return; + + auto last_outdated_block = timed_blocks.end() - 1; + LOG_TRACE(log, "Will clear {} old blocks from {} (ctime {}) to {} (ctime {})", num_nodes_to_delete, + first_outdated_block->node, first_outdated_block->ctime, + last_outdated_block->node, last_outdated_block->ctime); + zkutil::AsyncResponses try_remove_futures; for (auto it = first_outdated_block; it != timed_blocks.end(); ++it) { @@ -372,9 +381,7 @@ void ReplicatedMergeTreeCleanupThread::clearOldBlocks() first_outdated_block++; } - auto num_nodes_to_delete = timed_blocks.end() - first_outdated_block; - if (num_nodes_to_delete) - LOG_TRACE(log, "Cleared {} old blocks from ZooKeeper", num_nodes_to_delete); + LOG_TRACE(log, "Cleared {} old blocks from ZooKeeper", num_nodes_to_delete); } diff --git a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh index baa2b0cf53f..71ca29bfd96 100755 --- a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh +++ b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh @@ -21,15 +21,12 @@ ORDER BY (engine_id) SETTINGS replicated_deduplication_window = 2, cleanup_delay_period=4, cleanup_delay_period_random_add=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 1, 'hello')" -sleep 1 $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'hello')" -sleep 1 $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 3, 'hello')" $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 3 rows count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") - while [[ $count != 2 ]] do sleep 1 @@ -39,9 +36,8 @@ done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 1, 'hello')" $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 4 rows + count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") - - while [[ $count != 2 ]] do sleep 1 @@ -53,12 +49,10 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'h $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 5 rows count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") - while [[ $count != 2 ]] do sleep 1 count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") - done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'hello')" diff --git a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh index 01bb9af461c..e7b8091284a 100755 --- a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh +++ b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh @@ -8,21 +8,11 @@ set -e function thread() { - db_engine=`$CLICKHOUSE_CLIENT -q "SELECT engine FROM system.databases WHERE name='$CLICKHOUSE_DATABASE'"` - if [[ $db_engine == "Atomic" ]]; then - # Ignore "Replica already exists" exception - while true; do - $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS test_table_$1 NO DELAY; - CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 | - grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now|is already finished removing by another replica right now|Removing leftovers from table|Another replica was suddenly created|was successfully removed from ZooKeeper|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time|already exists' - done - else - while true; do - $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS test_table_$1; - CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 | - grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now|is already finished removing by another replica right now|Removing leftovers from table|Another replica was suddenly created|was successfully removed from ZooKeeper|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time' - done - fi + while true; do + $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS test_table_$1 SYNC; + CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 | + grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now|is already finished removing by another replica right now|Removing leftovers from table|Another replica was suddenly created|was successfully removed from ZooKeeper|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time' + done } From 94b228acc9f61631da6f63203c215cf8827a34f5 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 15 Apr 2021 21:51:15 +0300 Subject: [PATCH 107/133] Update PocoHTTPClient.h --- src/IO/S3/PocoHTTPClient.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index 44af47237ba..cf8656a2f6c 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -1,5 +1,7 @@ #pragma once +#include + #if USE_AWS_S3 #include From 09571ca91fca2f525d2e5b3a8cea61abece6f61a Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 15 Apr 2021 22:22:40 +0300 Subject: [PATCH 108/133] Updated zlib-ng submodule --- contrib/zlib-ng | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index bf128f84df0..527425a08cb 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit bf128f84df0806ec51c3513804222ae02007c4f3 +Subproject commit 527425a08cbdce33acff00eaf8f83d6bdb6b29ae From 4affe01ffbc3280bd45ea3a1d06a7bc6e45cce3a Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 15 Apr 2021 23:30:41 +0300 Subject: [PATCH 109/133] Backport zlib x86 arm check features constructor --- contrib/zlib-ng | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index 527425a08cb..16b42c7a030 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit 527425a08cbdce33acff00eaf8f83d6bdb6b29ae +Subproject commit 16b42c7a03097ca3df67d90246a0d9bf826734e1 From 9110a76d00807baf9dc368cd613512298188269b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 16 Apr 2021 00:14:37 +0300 Subject: [PATCH 110/133] Reordered settings to avoid confusion --- src/Core/Settings.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index d31073ae932..ff58f5d4e5f 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -252,8 +252,6 @@ class IColumn; * Almost all limits apply to each stream individually. \ */ \ \ - M(UInt64, limit, 0, "Limit on read rows from the most 'end' result for select query, default 0 means no limit length", 0) \ - M(UInt64, offset, 0, "Offset on read rows from the most 'end' result for select query", 0) \ M(UInt64, max_rows_to_read, 0, "Limit on read rows from the most 'deep' sources. That is, only in the deepest subquery. When reading from a remote server, it is only checked on a remote server.", 0) \ M(UInt64, max_bytes_to_read, 0, "Limit on read bytes (after decompression) from the most 'deep' sources. That is, only in the deepest subquery. When reading from a remote server, it is only checked on a remote server.", 0) \ M(OverflowMode, read_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \ @@ -464,6 +462,8 @@ class IColumn; \ M(Bool, database_replicated_ddl_output, true, "Obsolete setting, does nothing. Will be removed after 2021-09-08", 0) \ M(HandleKafkaErrorMode, handle_kafka_error_mode, HandleKafkaErrorMode::DEFAULT, "How to handle errors for Kafka engine. Passible values: default, stream.", 0) \ + M(UInt64, limit, 0, "Limit on read rows from the most 'end' result for select query, default 0 means no limit length", 0) \ + M(UInt64, offset, 0, "Offset on read rows from the most 'end' result for select query", 0) \ // End of COMMON_SETTINGS // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS below. From 55b1fc5a21a92de64bbb525111375fd2f66648c2 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 16 Apr 2021 00:18:52 +0300 Subject: [PATCH 111/133] Updated zlib-ng --- contrib/zlib-ng | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index 16b42c7a030..28dfdaa8a3c 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit 16b42c7a03097ca3df67d90246a0d9bf826734e1 +Subproject commit 28dfdaa8a3c5add48dcaf56086a9306a357e6e6b From 9238d8e54aea2eb2a497d314f10f870f6f9cba5d Mon Sep 17 00:00:00 2001 From: madianjun Date: Fri, 16 Apr 2021 14:28:52 +0800 Subject: [PATCH 112/133] Fix exception message for parts_to_throw_insert --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index ee8e15008cb..2008edb8919 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2563,7 +2563,7 @@ void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until) const ProfileEvents::increment(ProfileEvents::RejectedInserts); throw Exception( ErrorCodes::TOO_MANY_PARTS, - "Too many parts ({}). Parts cleaning are processing significantly slower than inserts", + "Too many parts ({}). Merges are processing significantly slower than inserts", parts_count_in_partition); } From 75d18a6d278ac519cefe44515c9b4dce5f00d95e Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 16 Apr 2021 10:45:53 +0300 Subject: [PATCH 113/133] Updated zlib-ng aarch64 --- contrib/zlib-ng | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index 28dfdaa8a3c..4039bb46239 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit 28dfdaa8a3c5add48dcaf56086a9306a357e6e6b +Subproject commit 4039bb4623905e73c6e32a0c022f144bab87b2b3 From 29281ea6e062294652210d3a3011ce0998956f28 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 16 Apr 2021 10:54:11 +0300 Subject: [PATCH 114/133] jemalloc: set muzzy_decay_ms/dirty_decay_ms to 5s --- contrib/jemalloc-cmake/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index a82345975d1..830a280465c 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -34,9 +34,9 @@ if (OS_LINUX) # avoid spurious latencies and additional work associated with # MADV_DONTNEED. See # https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation. - set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:1000,dirty_decay_ms:1000") + set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000") else() - set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:1000,dirty_decay_ms:1000") + set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000") endif() # CACHE variable is empty, to allow changing defaults without necessity # to purge cache From 88e2d28666e7d614d9187121fef550f20a1ede5b Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 16 Apr 2021 00:16:35 +0300 Subject: [PATCH 115/133] Improve documentation for CREATE ROW POLICY command. --- .../statements/create/row-policy.md | 71 ++++++++++++++----- .../statements/create/row-policy.md | 65 +++++++++++++---- 2 files changed, 105 insertions(+), 31 deletions(-) diff --git a/docs/en/sql-reference/statements/create/row-policy.md b/docs/en/sql-reference/statements/create/row-policy.md index cbe639c6fc5..6f769fb1dca 100644 --- a/docs/en/sql-reference/statements/create/row-policy.md +++ b/docs/en/sql-reference/statements/create/row-policy.md @@ -5,39 +5,78 @@ toc_title: ROW POLICY # CREATE ROW POLICY {#create-row-policy-statement} -Creates [filters for rows](../../../operations/access-rights.md#row-policy-management), which a user can read from a table. +Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table. Syntax: ``` sql CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1 [, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...] + [FOR SELECT] USING condition [AS {PERMISSIVE | RESTRICTIVE}] - [FOR SELECT] - [USING condition] [TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}] ``` `ON CLUSTER` clause allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). -## AS Clause {#create-row-policy-as} +## USING Clause {#create-row-policy-using} -Using this section you can create permissive or restrictive policies. - -Permissive policy grants access to rows. Permissive policies which apply to the same table are combined together using the boolean `OR` operator. Policies are permissive by default. - -Restrictive policy restricts access to rows. Restrictive policies which apply to the same table are combined together using the boolean `AND` operator. - -Restrictive policies apply to rows that passed the permissive filters. If you set restrictive policies but no permissive policies, the user can’t get any row from the table. +Allows to specify a condition to filter rows. An user will see a row if the condition is calculated to non-zero for the row. ## TO Clause {#create-row-policy-to} -In the section `TO` you can provide a mixed list of roles and users, for example, `CREATE ROW POLICY ... TO accountant, john@localhost`. +In the section `TO` you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`. -Keyword `ALL` means all the ClickHouse users including current user. Keywords `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` +Keyword `ALL` means all the ClickHouse users including current user. Keyword `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` -## Examples {#examples} +!!! note "Note" + If there are no row policies defined for a table then any user can `SELECT` all the row from the table. + Defining one or more row policies for the table makes the access to the table depending on the row policies no matter if + those row policies are defined for the current user or not. For example, the following row policy -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost` + `CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter` -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira` + forbids the users `mira` and `peter` to see the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all! If that isn't desirable you can fix it by adding one more row policy, for example: + + `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` + +## AS Clause {#create-row-policy-as} + +It's allowed to have more than one policy enabled on the same table for the same user at the one time. +So we need a way to combine the conditions from multiple policies. +By default policies are combined using the boolean `OR` operator. For example, the following policies + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio +``` + +enables the user `peter` to see rows with either `b=1` or `c=2`. + +The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. +By default policies are permissive, which means they are combined using the boolean `OR` operator. + +A policy can be defined as restrictive as an alternative. Restrictive policies are combined using the boolean `AND` operator. +Here is the formula: + +``` +row_is_visible = (one or more of the permissive policies' conditions are non-zero) AND (all of the restrictive policies's conditions are non-zero)` +``` + +For example, the following policies + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio +``` + +enables the user `peter` to see rows only if both `b=1` AND `c=2`. + + +## Examples + +`CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost` + +`CREATE ROW POLICY filter2 ON mydb.mytable USING a<1000 AND b=5 TO ALL EXCEPT mira` + +`CREATE ROW POLICY filter3 ON mydb.mytable USING 1 TO admin` diff --git a/docs/ru/sql-reference/statements/create/row-policy.md b/docs/ru/sql-reference/statements/create/row-policy.md index 88709598906..95fa29ff48a 100644 --- a/docs/ru/sql-reference/statements/create/row-policy.md +++ b/docs/ru/sql-reference/statements/create/row-policy.md @@ -5,7 +5,7 @@ toc_title: "Политика доступа" # CREATE ROW POLICY {#create-row-policy-statement} -Создает [фильтры для строк](../../../operations/access-rights.md#row-policy-management), которые пользователь может прочесть из таблицы. +Создает [политики доступа к строкам](../../../operations/access-rights.md#row-policy-management), т.е. фильтры, которые определяют, какие строки пользователь может читать из таблицы. Синтаксис: @@ -13,33 +13,68 @@ toc_title: "Политика доступа" CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1 [, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...] [AS {PERMISSIVE | RESTRICTIVE}] - [FOR SELECT] - [USING condition] + [FOR SELECT] USING condition [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] ``` -Секция `ON CLUSTER` позволяет создавать фильтры для строк на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md). +Секция `ON CLUSTER` позволяет создавать политики на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md). -## Секция AS {#create-row-policy-as} +## USING Clause {#create-row-policy-using} -С помощью данной секции можно создать политику разрешения или ограничения. - -Политика разрешения предоставляет доступ к строкам. Разрешительные политики, которые применяются к одной таблице, объединяются с помощью логического оператора `OR`. Политики являются разрешительными по умолчанию. - -Политика ограничения запрещает доступ к строкам. Ограничительные политики, которые применяются к одной таблице, объединяются логическим оператором `AND`. - -Ограничительные политики применяются к строкам, прошедшим фильтр разрешительной политики. Если вы не зададите разрешительные политики, пользователь не сможет обращаться ни к каким строкам из таблицы. +Секция `USING` указывает условие для фильтрации строк. Пользователь может видеть строку, если это условие, вычисленное для строки, дает ненулевой результат. ## Секция TO {#create-row-policy-to} -В секции `TO` вы можете перечислить как роли, так и пользователей. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`. +В секции `TO` перечисляются пользователи и роли, для которых должна действовать политика. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`. Ключевым словом `ALL` обозначаются все пользователи, включая текущего. Ключевые слова `ALL EXCEPT` позволяют исключить пользователей из списка всех пользователей. Например, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` +!!! note "Note" + Если для таблицы не задано ни одной политики доступа к строкам, то любой пользователь может выполнить `SELECT` и получить все строки таблицы. + Если определить хотя бы одну политику для таблицы, до доступ к строкам будет управляться этими политиками, причем для всех пользователей + (даже для тех, для кого политики не определялись). Например, следующая политика + + `CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter` + + запретит пользователям `mira` и `peter` видеть строки с `b != 1`, и еще запретит всем остальным пользователям (например, пользователю `paul`) + видеть какие-либо строки вообще из таблицы `mydb.table1`! Если это нежелательно, такое поведение можно исправить, определив дополнительную политику: + + `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` + +## Секция AS {#create-row-policy-as} + +Может быть одновременно активно более одной политики для одной и той же таблицы и одного и того же пользователя. +Поэтому нам нужен способ комбинировать политики. По умолчанию политики комбинируются с использованием логического оператора `OR`. +Например, политики: + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio +``` + +разрешат пользователю с именем `peter` видеть строки, для которых будет верно `b=1` или `c=2`. + +Секция `AS` указывает, как политики должны комбинироваться с другими политиками. Политики могут быть или разрешительными (`PERMISSIVE`), или ограничительными (`RESTRICTIVE`). По умолчанию политики создаются разрешительными (`PERMISSIVE`); такие политики комбинируются с использованием логического оператора `OR`. +Ограничительные (`RESTRICTIVE`) политики комбинируются с использованием логического оператора `AND`. +Используется следующая формула: + +`строка_видима = (одна или больше permissive-политик дала ненулевой результат проверки условия) И (все restrictive-политики дали ненулевой результат проверки условия)` + +Например, политики + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio +``` + +разрешат пользователю с именем `peter` видеть только те строки, для которых будет одновременно `b=1` и `c=2`. + ## Примеры -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost` +`CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost` -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira` +`CREATE ROW POLICY filter2 ON mydb.mytable USING a<1000 AND b=5 TO ALL EXCEPT mira` + +`CREATE ROW POLICY filter3 ON mydb.mytable USING 1 TO admin` \ No newline at end of file From dc442b90d08757505fc843d2c84038a4e6a45326 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Fri, 16 Apr 2021 13:23:38 +0300 Subject: [PATCH 116/133] Fix flapping tests test_s3_zero_copy_replication, test_s3_zero_copy_on_hybrid_storage --- .../configs/config.d/s3.xml | 1 + .../test_s3_zero_copy_replication/test.py | 25 ++++++++++--------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml index 88eb49d9f17..ec28840054a 100644 --- a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml +++ b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml @@ -26,6 +26,7 @@ s31 + 0.0 diff --git a/tests/integration/test_s3_zero_copy_replication/test.py b/tests/integration/test_s3_zero_copy_replication/test.py index 5bc30ab1d6b..f7078d55c33 100644 --- a/tests/integration/test_s3_zero_copy_replication/test.py +++ b/tests/integration/test_s3_zero_copy_replication/test.py @@ -36,6 +36,15 @@ def get_large_objects_count(cluster, size=100): return counter +def wait_for_large_objects_count(cluster, expected, size=100, timeout=30): + while timeout > 0: + if get_large_objects_count(cluster, size) == expected: + return + timeout -= 1 + time.sleep(1) + assert get_large_objects_count(cluster, size) == expected + + @pytest.mark.parametrize( "policy", ["s3"] ) @@ -67,23 +76,15 @@ def test_s3_zero_copy_replication(cluster, policy): assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" # Based on version 20.x - two parts - assert get_large_objects_count(cluster) == 2 + wait_for_large_objects_count(cluster, 2) node1.query("OPTIMIZE TABLE s3_test") - time.sleep(1) - # Based on version 20.x - after merge, two old parts and one merged - assert get_large_objects_count(cluster) == 3 + wait_for_large_objects_count(cluster, 3) # Based on version 20.x - after cleanup - only one merged part - countdown = 60 - while countdown > 0: - if get_large_objects_count(cluster) == 1: - break - time.sleep(1) - countdown -= 1 - assert get_large_objects_count(cluster) == 1 + wait_for_large_objects_count(cluster, 1, timeout=60) node1.query("DROP TABLE IF EXISTS s3_test NO DELAY") node2.query("DROP TABLE IF EXISTS s3_test NO DELAY") @@ -127,7 +128,7 @@ def test_s3_zero_copy_on_hybrid_storage(cluster): assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','s31')" # Check that after moving partition on node2 no new obects on s3 - assert get_large_objects_count(cluster, 0) == s3_objects + wait_for_large_objects_count(cluster, s3_objects, size=0) assert node1.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')" assert node2.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')" From ebf0a3119e6a9425f167dc5ecac1d3477e51800d Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 16 Apr 2021 13:15:35 +0300 Subject: [PATCH 117/133] fix data_type_default_nullable on attach --- src/Databases/DatabaseOnDisk.cpp | 4 ++-- src/Interpreters/InterpreterCreateQuery.cpp | 10 +++++----- src/Interpreters/InterpreterCreateQuery.h | 2 +- src/Interpreters/InterpreterSystemQuery.cpp | 2 +- .../parseColumnsListForTableFunction.cpp | 2 +- .../0_stateless/01269_create_with_null.reference | 3 +++ tests/queries/0_stateless/01269_create_with_null.sql | 12 ++++++++++++ 7 files changed, 25 insertions(+), 10 deletions(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 358f9030db5..14ad1c7e4c5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -58,7 +58,7 @@ std::pair createTableFromAST( auto table_function = factory.get(ast_create_query.as_table_function, context); ColumnsDescription columns; if (ast_create_query.columns_list && ast_create_query.columns_list->columns) - columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, false); + columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, true); StoragePtr storage = table_function->execute(ast_create_query.as_table_function, context, ast_create_query.table, std::move(columns)); storage->renameInMemory(ast_create_query); return {ast_create_query.table, storage}; @@ -69,7 +69,7 @@ std::pair createTableFromAST( if (!ast_create_query.columns_list || !ast_create_query.columns_list->columns) throw Exception("Missing definition of columns.", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED); - ColumnsDescription columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, false); + ColumnsDescription columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, true); ConstraintsDescription constraints = InterpreterCreateQuery::getConstraintsDescription(ast_create_query.columns_list->constraints); return diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 6d234f5f846..8db4415822f 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -363,7 +363,7 @@ ASTPtr InterpreterCreateQuery::formatConstraints(const ConstraintsDescription & } ColumnsDescription InterpreterCreateQuery::getColumnsDescription( - const ASTExpressionList & columns_ast, ContextPtr context_, bool sanity_check_compression_codecs) + const ASTExpressionList & columns_ast, ContextPtr context_, bool attach) { /// First, deduce implicit types. @@ -372,6 +372,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( ASTPtr default_expr_list = std::make_shared(); NamesAndTypesList column_names_and_types; + bool make_columns_nullable = !attach && context_->getSettingsRef().data_type_default_nullable; for (const auto & ast : columns_ast.children) { @@ -390,8 +391,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( if (*col_decl.null_modifier) column_type = makeNullable(column_type); } - /// XXX: context_ or context ? - else if (context_->getSettingsRef().data_type_default_nullable) + else if (make_columns_nullable) { column_type = makeNullable(column_type); } @@ -436,6 +436,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( if (!default_expr_list->children.empty()) defaults_sample_block = validateColumnsDefaultsAndGetSampleBlock(default_expr_list, column_names_and_types, context_); + bool sanity_check_compression_codecs = !attach && !context_->getSettingsRef().allow_suspicious_codecs; ColumnsDescription res; auto name_type_it = column_names_and_types.begin(); for (auto ast_it = columns_ast.children.begin(); ast_it != columns_ast.children.end(); ++ast_it, ++name_type_it) @@ -511,8 +512,7 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::setProperties(AS if (create.columns_list->columns) { - bool sanity_check_compression_codecs = !create.attach && !getContext()->getSettingsRef().allow_suspicious_codecs; - properties.columns = getColumnsDescription(*create.columns_list->columns, getContext(), sanity_check_compression_codecs); + properties.columns = getColumnsDescription(*create.columns_list->columns, getContext(), create.attach); } if (create.columns_list->indices) diff --git a/src/Interpreters/InterpreterCreateQuery.h b/src/Interpreters/InterpreterCreateQuery.h index 30db7dbdc8e..f674bf19123 100644 --- a/src/Interpreters/InterpreterCreateQuery.h +++ b/src/Interpreters/InterpreterCreateQuery.h @@ -53,7 +53,7 @@ public: /// Obtain information about columns, their types, default values and column comments, /// for case when columns in CREATE query is specified explicitly. - static ColumnsDescription getColumnsDescription(const ASTExpressionList & columns, ContextPtr context, bool sanity_check_compression_codecs); + static ColumnsDescription getColumnsDescription(const ASTExpressionList & columns, ContextPtr context, bool attach); static ConstraintsDescription getConstraintsDescription(const ASTExpressionList * constraints); static void prepareOnClusterQuery(ASTCreateQuery & create, ContextPtr context, const String & cluster_name); diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index c13b9d97037..02d5296a9d2 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -429,7 +429,7 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, auto & create = create_ast->as(); create.attach = true; - auto columns = InterpreterCreateQuery::getColumnsDescription(*create.columns_list->columns, system_context, false); + auto columns = InterpreterCreateQuery::getColumnsDescription(*create.columns_list->columns, system_context, true); auto constraints = InterpreterCreateQuery::getConstraintsDescription(create.columns_list->constraints); auto data_path = database->getTableDataPath(create); diff --git a/src/TableFunctions/parseColumnsListForTableFunction.cpp b/src/TableFunctions/parseColumnsListForTableFunction.cpp index 659aa779ede..08e80ef425a 100644 --- a/src/TableFunctions/parseColumnsListForTableFunction.cpp +++ b/src/TableFunctions/parseColumnsListForTableFunction.cpp @@ -25,7 +25,7 @@ ColumnsDescription parseColumnsListFromString(const std::string & structure, Con if (!columns_list) throw Exception("Could not cast AST to ASTExpressionList", ErrorCodes::LOGICAL_ERROR); - return InterpreterCreateQuery::getColumnsDescription(*columns_list, context, !settings.allow_suspicious_codecs); + return InterpreterCreateQuery::getColumnsDescription(*columns_list, context, false); } } diff --git a/tests/queries/0_stateless/01269_create_with_null.reference b/tests/queries/0_stateless/01269_create_with_null.reference index 86be41bc06a..73f834da75a 100644 --- a/tests/queries/0_stateless/01269_create_with_null.reference +++ b/tests/queries/0_stateless/01269_create_with_null.reference @@ -2,3 +2,6 @@ Nullable(Int32) Int32 Nullable(Int32) Int32 CREATE TABLE default.data_null\n(\n `a` Nullable(Int32),\n `b` Int32,\n `c` Nullable(Int32),\n `d` Int32\n)\nENGINE = Memory Nullable(Int32) Int32 Nullable(Int32) Nullable(Int32) CREATE TABLE default.set_null\n(\n `a` Nullable(Int32),\n `b` Int32,\n `c` Nullable(Int32),\n `d` Nullable(Int32)\n)\nENGINE = Memory +CREATE TABLE default.set_null\n(\n `a` Nullable(Int32),\n `b` Int32,\n `c` Nullable(Int32),\n `d` Nullable(Int32)\n)\nENGINE = Memory +CREATE TABLE default.cannot_be_nullable\n(\n `n` Nullable(Int8),\n `a` Array(UInt8)\n)\nENGINE = Memory +CREATE TABLE default.cannot_be_nullable\n(\n `n` Nullable(Int8),\n `a` Array(UInt8)\n)\nENGINE = Memory diff --git a/tests/queries/0_stateless/01269_create_with_null.sql b/tests/queries/0_stateless/01269_create_with_null.sql index 856b6ea75f4..faa6b84e9e4 100644 --- a/tests/queries/0_stateless/01269_create_with_null.sql +++ b/tests/queries/0_stateless/01269_create_with_null.sql @@ -1,5 +1,6 @@ DROP TABLE IF EXISTS data_null; DROP TABLE IF EXISTS set_null; +DROP TABLE IF EXISTS cannot_be_nullable; SET data_type_default_nullable='false'; @@ -45,6 +46,17 @@ INSERT INTO set_null VALUES (NULL, 2, NULL, NULL); SELECT toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d) FROM set_null; SHOW CREATE TABLE set_null; +DETACH TABLE set_null; +ATTACH TABLE set_null; +SHOW CREATE TABLE set_null; + +CREATE TABLE cannot_be_nullable (n Int8, a Array(UInt8)) ENGINE=Memory; -- { serverError 43 } +CREATE TABLE cannot_be_nullable (n Int8, a Array(UInt8) NOT NULL) ENGINE=Memory; +SHOW CREATE TABLE cannot_be_nullable; +DETACH TABLE cannot_be_nullable; +ATTACH TABLE cannot_be_nullable; +SHOW CREATE TABLE cannot_be_nullable; DROP TABLE data_null; DROP TABLE set_null; +DROP TABLE cannot_be_nullable; From 4d323fa556677a769d588aba4aafa76900b134b4 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 16 Apr 2021 13:59:54 +0300 Subject: [PATCH 118/133] MacOS clang build instructions fix --- docs/en/development/build-osx.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/development/build-osx.md b/docs/en/development/build-osx.md index 886e85bbf86..29fdf0f324c 100644 --- a/docs/en/development/build-osx.md +++ b/docs/en/development/build-osx.md @@ -62,7 +62,7 @@ $ cd ClickHouse $ rm -rf build $ mkdir build $ cd build -$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER==$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF .. +$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF .. $ cmake --build . --config RelWithDebInfo $ cd .. ``` From 440efb6fb684b7890bac0d37977ce4d55b9b02b7 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 16 Apr 2021 14:22:23 +0300 Subject: [PATCH 119/133] Update arrayFold.cpp --- src/Functions/array/arrayFold.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Functions/array/arrayFold.cpp b/src/Functions/array/arrayFold.cpp index 5fc7a304b03..21a228929ad 100644 --- a/src/Functions/array/arrayFold.cpp +++ b/src/Functions/array/arrayFold.cpp @@ -138,7 +138,8 @@ public: size_t arr_cursor = 0; for (size_t irow = 0; irow < column_first_array->size(); ++irow) // for each row of result { - // Make accumulator column for this row + // Make accumulator column for this row. We initialize it + // with the starting value given as the last argument. ColumnWithTypeAndName accumulator_column = arguments.back(); ColumnPtr acc(accumulator_column.column->cut(irow, 1)); auto accumulator = ColumnWithTypeAndName(acc, From 0ddb396338312fc1c8d97e412f5c58785de0d4d4 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 16 Apr 2021 15:52:48 +0300 Subject: [PATCH 120/133] add Y_IGNORE --- src/Disks/S3/registerDiskS3.cpp | 19 +++++++++++++++++-- src/IO/S3/PocoHTTPClient.h | 8 ++++---- src/IO/S3Common.h | 4 ++-- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/src/Disks/S3/registerDiskS3.cpp b/src/Disks/S3/registerDiskS3.cpp index a15b6bcf822..303b7224756 100644 --- a/src/Disks/S3/registerDiskS3.cpp +++ b/src/Disks/S3/registerDiskS3.cpp @@ -1,9 +1,17 @@ -#include +#if !defined(ARCADIA_BUILD) + #include +#endif + #include -#include #include #include #include + + +#if USE_AWS_S3 + +#include +#include #include "DiskS3.h" #include "Disks/DiskCacheWrapper.h" #include "Disks/DiskFactory.h" @@ -196,3 +204,10 @@ void registerDiskS3(DiskFactory & factory) } } + +#else + +void registerDiskS3(DiskFactory &) {} + +#endif + diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index cf8656a2f6c..34b8ea801bd 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -8,10 +8,10 @@ #include #include #include -#include -#include -#include -#include +#include // Y_IGNORE +#include // Y_IGNORE +#include // Y_IGNORE +#include // Y_IGNORE namespace Aws::Http::Standard { diff --git a/src/IO/S3Common.h b/src/IO/S3Common.h index b071daefee1..53230d49f2b 100644 --- a/src/IO/S3Common.h +++ b/src/IO/S3Common.h @@ -5,8 +5,8 @@ #if USE_AWS_S3 #include -#include -#include +#include // Y_IGNORE +#include // Y_IGNORE #include #include From a809323179c8ad7af20a9072583287c2ed34f9d2 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 16 Apr 2021 21:15:32 +0300 Subject: [PATCH 121/133] Updated zlib version --- contrib/zlib-ng | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index bf128f84df0..4039bb46239 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit bf128f84df0806ec51c3513804222ae02007c4f3 +Subproject commit 4039bb4623905e73c6e32a0c022f144bab87b2b3 From df2e75bf00609c0586270071eb5159b23f9236cf Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 16 Apr 2021 23:56:52 +0300 Subject: [PATCH 122/133] Update version_date.tsv after release 21.4.4.30 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index bf9ef37e2b7..2da4e230e9f 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v21.4.4.30-stable 2021-04-16 v21.4.3.21-stable 2021-04-12 v21.3.6.55-lts 2021-04-12 v21.3.5.42-lts 2021-04-07 From a1f6b0e5600109217c81df494da94e37e0d7d4c4 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 17 Apr 2021 00:28:08 +0300 Subject: [PATCH 123/133] Update version_date.tsv after release 21.3.7.62 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 2da4e230e9f..c47d78110db 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,5 +1,6 @@ v21.4.4.30-stable 2021-04-16 v21.4.3.21-stable 2021-04-12 +v21.3.7.62-stable 2021-04-16 v21.3.6.55-lts 2021-04-12 v21.3.5.42-lts 2021-04-07 v21.3.4.25-lts 2021-03-28 From d2101c05ce37b02b41bbd6e4fc149bfb1a5291d0 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 17 Apr 2021 00:40:38 +0300 Subject: [PATCH 124/133] Update version_date.tsv after release 21.2.10.48 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index c47d78110db..b2165a49149 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -6,6 +6,7 @@ v21.3.5.42-lts 2021-04-07 v21.3.4.25-lts 2021-03-28 v21.3.3.14-lts 2021-03-19 v21.3.2.5-lts 2021-03-12 +v21.2.10.48-stable 2021-04-16 v21.2.9.41-stable 2021-04-12 v21.2.8.31-stable 2021-04-07 v21.2.7.11-stable 2021-03-28 From 88445294b4640681dfc13c59987972b1eba18ead Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 17 Apr 2021 01:11:03 +0300 Subject: [PATCH 125/133] Update version_date.tsv after release 20.8.18.32 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index b2165a49149..a69f96970bb 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -49,6 +49,7 @@ v20.9.5.5-stable 2020-11-13 v20.9.4.76-stable 2020-10-29 v20.9.3.45-stable 2020-10-09 v20.9.2.20-stable 2020-09-22 +v20.8.18.32-lts 2021-04-16 v20.8.17.25-lts 2021-04-08 v20.8.16.20-lts 2021-04-06 v20.8.15.11-lts 2021-04-01 From 36fe378a17ac39c3417b9c38966a085508bb6568 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Apr 2021 02:11:21 +0300 Subject: [PATCH 126/133] Fix fairly terrible bug in LowCardinality #16171 --- src/Columns/ColumnLowCardinality.cpp | 4 ++-- src/Columns/ColumnsCommon.h | 2 +- src/DataTypes/Serializations/SerializationLowCardinality.cpp | 2 +- src/Functions/lowCardinalityIndices.cpp | 2 +- src/Functions/lowCardinalityKeys.cpp | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Columns/ColumnLowCardinality.cpp b/src/Columns/ColumnLowCardinality.cpp index 9433bf079e2..c4d7e75dd2d 100644 --- a/src/Columns/ColumnLowCardinality.cpp +++ b/src/Columns/ColumnLowCardinality.cpp @@ -122,7 +122,7 @@ namespace else if (auto * data_uint64 = getIndexesData(column)) return mapUniqueIndexImpl(*data_uint64); else - throw Exception("Indexes column for getUniqueIndex must be ColumnUInt, got" + column.getName(), + throw Exception("Indexes column for getUniqueIndex must be ColumnUInt, got " + column.getName(), ErrorCodes::LOGICAL_ERROR); } } @@ -151,7 +151,7 @@ void ColumnLowCardinality::insertFrom(const IColumn & src, size_t n) const auto * low_cardinality_src = typeid_cast(&src); if (!low_cardinality_src) - throw Exception("Expected ColumnLowCardinality, got" + src.getName(), ErrorCodes::ILLEGAL_COLUMN); + throw Exception("Expected ColumnLowCardinality, got " + src.getName(), ErrorCodes::ILLEGAL_COLUMN); size_t position = low_cardinality_src->getIndexes().getUInt(n); diff --git a/src/Columns/ColumnsCommon.h b/src/Columns/ColumnsCommon.h index 7655edffa71..71f2884bf86 100644 --- a/src/Columns/ColumnsCommon.h +++ b/src/Columns/ColumnsCommon.h @@ -66,7 +66,7 @@ ColumnPtr selectIndexImpl(const Column & column, const IColumn & indexes, size_t else if (auto * data_uint64 = detail::getIndexesData(indexes)) return column.template indexImpl(*data_uint64, limit); else - throw Exception("Indexes column for IColumn::select must be ColumnUInt, got" + indexes.getName(), + throw Exception("Indexes column for IColumn::select must be ColumnUInt, got " + indexes.getName(), ErrorCodes::LOGICAL_ERROR); } diff --git a/src/DataTypes/Serializations/SerializationLowCardinality.cpp b/src/DataTypes/Serializations/SerializationLowCardinality.cpp index 31058cb6e57..41d9a4100e0 100644 --- a/src/DataTypes/Serializations/SerializationLowCardinality.cpp +++ b/src/DataTypes/Serializations/SerializationLowCardinality.cpp @@ -466,7 +466,7 @@ namespace else if (auto * data_uint64 = getIndexesData(column)) return mapIndexWithAdditionalKeys(*data_uint64, dict_size); else - throw Exception("Indexes column for mapIndexWithAdditionalKeys must be UInt, got" + column.getName(), + throw Exception("Indexes column for mapIndexWithAdditionalKeys must be UInt, got " + column.getName(), ErrorCodes::LOGICAL_ERROR); } } diff --git a/src/Functions/lowCardinalityIndices.cpp b/src/Functions/lowCardinalityIndices.cpp index 25834b2b3a8..b46fb17d6b1 100644 --- a/src/Functions/lowCardinalityIndices.cpp +++ b/src/Functions/lowCardinalityIndices.cpp @@ -35,7 +35,7 @@ public: { const auto * type = typeid_cast(arguments[0].get()); if (!type) - throw Exception("First first argument of function lowCardinalityIndexes must be ColumnLowCardinality, but got" + throw Exception("First first argument of function lowCardinalityIndexes must be ColumnLowCardinality, but got " + arguments[0]->getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); return std::make_shared(); diff --git a/src/Functions/lowCardinalityKeys.cpp b/src/Functions/lowCardinalityKeys.cpp index 5bcc2429592..245a016ac6a 100644 --- a/src/Functions/lowCardinalityKeys.cpp +++ b/src/Functions/lowCardinalityKeys.cpp @@ -33,7 +33,7 @@ public: { const auto * type = typeid_cast(arguments[0].get()); if (!type) - throw Exception("First first argument of function lowCardinalityKeys must be ColumnLowCardinality, but got" + throw Exception("First first argument of function lowCardinalityKeys must be ColumnLowCardinality, but got " + arguments[0]->getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); return type->getDictionaryType(); From ec473a90d2e9891eb9bad841e42c27a685bac5f6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Apr 2021 02:14:48 +0300 Subject: [PATCH 127/133] Fixed missing semicolon in exception message --- src/Interpreters/ActionsVisitor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 21abf36c51c..2ad1d0804c4 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -650,7 +650,7 @@ std::optional ActionsMatcher::getNameAndTypeFromAST(const ASTPt return NameAndTypePair(child_column_name, node->result_type); if (!data.only_consts) - throw Exception("Unknown identifier: " + child_column_name + " there are columns: " + data.actions_stack.dumpNames(), + throw Exception("Unknown identifier: " + child_column_name + "; there are columns: " + data.actions_stack.dumpNames(), ErrorCodes::UNKNOWN_IDENTIFIER); return {}; From ecf8de7111f689b1072633948927c50103636f21 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Apr 2021 02:22:32 +0300 Subject: [PATCH 128/133] Add a test for #14610 --- .../queries/0_stateless/01818_case_float_value_fangyc.reference | 1 + tests/queries/0_stateless/01818_case_float_value_fangyc.sql | 1 + 2 files changed, 2 insertions(+) create mode 100644 tests/queries/0_stateless/01818_case_float_value_fangyc.reference create mode 100644 tests/queries/0_stateless/01818_case_float_value_fangyc.sql diff --git a/tests/queries/0_stateless/01818_case_float_value_fangyc.reference b/tests/queries/0_stateless/01818_case_float_value_fangyc.reference new file mode 100644 index 00000000000..61780798228 --- /dev/null +++ b/tests/queries/0_stateless/01818_case_float_value_fangyc.reference @@ -0,0 +1 @@ +b diff --git a/tests/queries/0_stateless/01818_case_float_value_fangyc.sql b/tests/queries/0_stateless/01818_case_float_value_fangyc.sql new file mode 100644 index 00000000000..3cdb8503e64 --- /dev/null +++ b/tests/queries/0_stateless/01818_case_float_value_fangyc.sql @@ -0,0 +1 @@ +select case 1.1 when 0.1 then 'a' when 1.1 then 'b' when 2.1 then 'c' else 'default' end as f; From 5dc2dfa437be017578b2d7826b164b5daf28c5d0 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 17 Apr 2021 08:09:22 +0000 Subject: [PATCH 129/133] Fix --- programs/odbc-bridge/MainHandler.cpp | 31 +++++----- src/Bridge/XDBCBridgeHelper.h | 5 +- src/Dictionaries/XDBCDictionarySource.cpp | 61 ++++++++++--------- src/Dictionaries/XDBCDictionarySource.h | 2 +- src/Storages/StorageXDBC.cpp | 43 +++++++------ .../integration/test_odbc_interaction/test.py | 33 ++++++++++ 6 files changed, 110 insertions(+), 65 deletions(-) diff --git a/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp index 24bcaf63c69..e24b51f6037 100644 --- a/programs/odbc-bridge/MainHandler.cpp +++ b/programs/odbc-bridge/MainHandler.cpp @@ -54,9 +54,10 @@ void ODBCHandler::processError(HTTPServerResponse & response, const std::string void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) { HTMLForm params(request); + LOG_TRACE(log, "Request URI: {}", request.getURI()); + if (mode == "read") params.read(request.getStream()); - LOG_TRACE(log, "Request URI: {}", request.getURI()); if (mode == "read" && !params.has("query")) { @@ -64,11 +65,6 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse return; } - if (!params.has("columns")) - { - processError(response, "No 'columns' in request URL"); - return; - } if (!params.has("connection_string")) { @@ -76,6 +72,16 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse return; } + if (!params.has("sample_block")) + { + processError(response, "No 'sample_block' in request URL"); + return; + } + + std::string format = params.get("format", "RowBinary"); + std::string connection_string = params.get("connection_string"); + LOG_TRACE(log, "Connection string: '{}'", connection_string); + UInt64 max_block_size = DEFAULT_BLOCK_SIZE; if (params.has("max_block_size")) { @@ -88,24 +94,19 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse max_block_size = parse(max_block_size_str); } - std::string columns = params.get("columns"); + std::string sample_block_string = params.get("sample_block"); std::unique_ptr sample_block; try { - sample_block = parseColumns(std::move(columns)); + sample_block = parseColumns(std::move(sample_block_string)); } catch (const Exception & ex) { - processError(response, "Invalid 'columns' parameter in request body '" + ex.message() + "'"); - LOG_WARNING(log, ex.getStackTraceString()); + processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'"); + LOG_ERROR(log, ex.getStackTraceString()); return; } - std::string format = params.get("format", "RowBinary"); - - std::string connection_string = params.get("connection_string"); - LOG_TRACE(log, "Connection string: '{}'", connection_string); - WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); try diff --git a/src/Bridge/XDBCBridgeHelper.h b/src/Bridge/XDBCBridgeHelper.h index 8a31171111d..a5f21e28204 100644 --- a/src/Bridge/XDBCBridgeHelper.h +++ b/src/Bridge/XDBCBridgeHelper.h @@ -37,7 +37,7 @@ class IXDBCBridgeHelper : public IBridgeHelper public: explicit IXDBCBridgeHelper(ContextPtr context_) : IBridgeHelper(context_) {} - virtual std::vector> getURLParams(const std::string & cols, UInt64 max_block_size) const = 0; + virtual std::vector> getURLParams(UInt64 max_block_size) const = 0; virtual Poco::URI getColumnsInfoURI() const = 0; @@ -138,12 +138,11 @@ protected: return uri; } - URLParams getURLParams(const std::string & cols, UInt64 max_block_size) const override + URLParams getURLParams(UInt64 max_block_size) const override { std::vector> result; result.emplace_back("connection_string", connection_string); /// already validated - result.emplace_back("columns", cols); result.emplace_back("max_block_size", std::to_string(max_block_size)); return result; diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index ac7b9111a11..5774641a90f 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -16,12 +16,9 @@ #include "DictionarySourceFactory.h" #include "DictionaryStructure.h" #include "readInvalidateQuery.h" - #include "registerDictionaries.h" +#include -#if USE_ODBC -# include // Y_IGNORE -#endif namespace DB { @@ -125,7 +122,7 @@ XDBCDictionarySource::XDBCDictionarySource( { bridge_url = bridge_helper->getMainURI(); - auto url_params = bridge_helper->getURLParams(sample_block_.getNamesAndTypesList().toString(), max_block_size); + auto url_params = bridge_helper->getURLParams(max_block_size); for (const auto & [name, value] : url_params) bridge_url.addQueryParameter(name, value); } @@ -151,6 +148,7 @@ XDBCDictionarySource::XDBCDictionarySource(const XDBCDictionarySource & other) { } + std::string XDBCDictionarySource::getUpdateFieldAndDate() { if (update_time != std::chrono::system_clock::from_time_t(0)) @@ -167,52 +165,61 @@ std::string XDBCDictionarySource::getUpdateFieldAndDate() } } + BlockInputStreamPtr XDBCDictionarySource::loadAll() { LOG_TRACE(log, load_all_query); - return loadBase(load_all_query); + return loadFromQuery(bridge_url, sample_block, load_all_query); } + BlockInputStreamPtr XDBCDictionarySource::loadUpdatedAll() { std::string load_query_update = getUpdateFieldAndDate(); LOG_TRACE(log, load_query_update); - return loadBase(load_query_update); + return loadFromQuery(bridge_url, sample_block, load_query_update); } + BlockInputStreamPtr XDBCDictionarySource::loadIds(const std::vector & ids) { const auto query = query_builder.composeLoadIdsQuery(ids); - return loadBase(query); + return loadFromQuery(bridge_url, sample_block, query); } + BlockInputStreamPtr XDBCDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { const auto query = query_builder.composeLoadKeysQuery(key_columns, requested_rows, ExternalQueryBuilder::AND_OR_CHAIN); - return loadBase(query); + return loadFromQuery(bridge_url, sample_block, query); } + bool XDBCDictionarySource::supportsSelectiveLoad() const { return true; } + bool XDBCDictionarySource::hasUpdateField() const { return !update_field.empty(); } + DictionarySourcePtr XDBCDictionarySource::clone() const { return std::make_unique(*this); } + std::string XDBCDictionarySource::toString() const { return bridge_helper->getName() + ": " + db + '.' + table + (where.empty() ? "" : ", where: " + where); } + bool XDBCDictionarySource::isModified() const { if (!invalidate_query.empty()) @@ -235,41 +242,38 @@ std::string XDBCDictionarySource::doInvalidateQuery(const std::string & request) bridge_helper->startBridgeSync(); auto invalidate_url = bridge_helper->getMainURI(); - auto url_params = bridge_helper->getURLParams(invalidate_sample_block.getNamesAndTypesList().toString(), max_block_size); + auto url_params = bridge_helper->getURLParams(max_block_size); for (const auto & [name, value] : url_params) invalidate_url.addQueryParameter(name, value); - XDBCBridgeBlockInputStream stream( - invalidate_url, - [request](std::ostream & os) { os << "query=" << request; }, - invalidate_sample_block, - getContext(), - max_block_size, - timeouts, - bridge_helper->getName() + "BlockInputStream"); - - return readInvalidateQuery(stream); + return readInvalidateQuery(*loadFromQuery(invalidate_url, invalidate_sample_block, request)); } -BlockInputStreamPtr XDBCDictionarySource::loadBase(const std::string & query) const + +BlockInputStreamPtr XDBCDictionarySource::loadFromQuery(const Poco::URI url, const Block & required_sample_block, const std::string & query) const { bridge_helper->startBridgeSync(); + + auto write_body_callback = [required_sample_block, query](std::ostream & os) + { + os << "sample_block=" << escapeForFileName(required_sample_block.getNamesAndTypesList().toString()); + os << "&"; + os << "query=" << escapeForFileName(query); + }; + return std::make_shared( - bridge_url, - [query](std::ostream & os) { os << "query=" << query; }, - sample_block, + url, + write_body_callback, + required_sample_block, getContext(), max_block_size, timeouts, bridge_helper->getName() + "BlockInputStream"); } + void registerDictionarySourceXDBC(DictionarySourceFactory & factory) { -#if USE_ODBC - Poco::Data::ODBC::Connector::registerConnector(); -#endif - auto create_table_source = [=](const DictionaryStructure & dict_struct, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, @@ -294,6 +298,7 @@ void registerDictionarySourceXDBC(DictionarySourceFactory & factory) factory.registerSource("odbc", create_table_source); } + void registerDictionarySourceJDBC(DictionarySourceFactory & factory) { auto create_table_source = [=](const DictionaryStructure & /* dict_struct */, diff --git a/src/Dictionaries/XDBCDictionarySource.h b/src/Dictionaries/XDBCDictionarySource.h index 438f48027e5..bd473e0db8a 100644 --- a/src/Dictionaries/XDBCDictionarySource.h +++ b/src/Dictionaries/XDBCDictionarySource.h @@ -62,7 +62,7 @@ private: // execute invalidate_query. expects single cell in result std::string doInvalidateQuery(const std::string & request) const; - BlockInputStreamPtr loadBase(const std::string & query) const; + BlockInputStreamPtr loadFromQuery(const Poco::URI url, const Block & required_sample_block, const std::string & query) const; Poco::Logger * log; diff --git a/src/Storages/StorageXDBC.cpp b/src/Storages/StorageXDBC.cpp index c535f6977a6..f94696c716b 100644 --- a/src/Storages/StorageXDBC.cpp +++ b/src/Storages/StorageXDBC.cpp @@ -14,6 +14,8 @@ #include #include #include +#include + namespace DB { @@ -53,24 +55,18 @@ std::string StorageXDBC::getReadMethod() const } std::vector> StorageXDBC::getReadURIParams( - const Names & column_names, - const StorageMetadataPtr & metadata_snapshot, + const Names & /* column_names */, + const StorageMetadataPtr & /* metadata_snapshot */, const SelectQueryInfo & /*query_info*/, ContextPtr /*context*/, QueryProcessingStage::Enum & /*processed_stage*/, size_t max_block_size) const { - NamesAndTypesList cols; - for (const String & name : column_names) - { - auto column_data = metadata_snapshot->getColumns().getPhysical(name); - cols.emplace_back(column_data.name, column_data.type); - } - return bridge_helper->getURLParams(cols.toString(), max_block_size); + return bridge_helper->getURLParams(max_block_size); } std::function StorageXDBC::getReadPOSTDataCallback( - const Names & /*column_names*/, + const Names & column_names, const StorageMetadataPtr & metadata_snapshot, const SelectQueryInfo & query_info, ContextPtr local_context, @@ -84,7 +80,21 @@ std::function StorageXDBC::getReadPOSTDataCallback( remote_table_name, local_context); - return [query](std::ostream & os) { os << "query=" << query; }; + NamesAndTypesList cols; + for (const String & name : column_names) + { + auto column_data = metadata_snapshot->getColumns().getPhysical(name); + cols.emplace_back(column_data.name, column_data.type); + } + + auto write_body_callback = [query, cols](std::ostream & os) + { + os << "sample_block=" << escapeForFileName(cols.toString()); + os << "&"; + os << "query=" << escapeForFileName(query); + }; + + return write_body_callback; } Pipe StorageXDBC::read( @@ -106,20 +116,17 @@ BlockOutputStreamPtr StorageXDBC::write(const ASTPtr & /*query*/, const StorageM { bridge_helper->startBridgeSync(); - NamesAndTypesList cols; Poco::URI request_uri = uri; request_uri.setPath("/write"); - for (const String & name : metadata_snapshot->getSampleBlock().getNames()) - { - auto column_data = metadata_snapshot->getColumns().getPhysical(name); - cols.emplace_back(column_data.name, column_data.type); - } - auto url_params = bridge_helper->getURLParams(cols.toString(), 65536); + + auto url_params = bridge_helper->getURLParams(65536); for (const auto & [param, value] : url_params) request_uri.addQueryParameter(param, value); + request_uri.addQueryParameter("db_name", remote_database_name); request_uri.addQueryParameter("table_name", remote_table_name); request_uri.addQueryParameter("format_name", format_name); + request_uri.addQueryParameter("sample_block", metadata_snapshot->getSampleBlock().getNamesAndTypesList().toString()); return std::make_shared( request_uri, diff --git a/tests/integration/test_odbc_interaction/test.py b/tests/integration/test_odbc_interaction/test.py index f6026e8dd3b..f7babe19906 100644 --- a/tests/integration/test_odbc_interaction/test.py +++ b/tests/integration/test_odbc_interaction/test.py @@ -505,3 +505,36 @@ def test_concurrent_queries(started_cluster): node1.query('DROP TABLE test_pg_table;') cursor.execute('DROP TABLE clickhouse.test_pg_table;') + + +def test_odbc_long_column_names(started_cluster): + conn = get_postgres_conn(); + cursor = conn.cursor() + + column_name = "column" * 8 + create_table = "CREATE TABLE clickhouse.test_long_column_names (" + for i in range(1000): + if i != 0: + create_table += ", " + create_table += "{} integer".format(column_name + str(i)) + create_table += ")" + cursor.execute(create_table) + insert = "INSERT INTO clickhouse.test_long_column_names SELECT i" + ", i" * 999 + " FROM generate_series(0, 99) as t(i)" + cursor.execute(insert) + conn.commit() + + create_table = "CREATE TABLE test_long_column_names (" + for i in range(1000): + if i != 0: + create_table += ", " + create_table += "{} UInt32".format(column_name + str(i)) + create_table += ") ENGINE=ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_long_column_names')" + result = node1.query(create_table); + + result = node1.query('SELECT * FROM test_long_column_names'); + expected = node1.query("SELECT number" + ", number" * 999 + " FROM numbers(100)") + assert(result == expected) + + cursor.execute("DROP TABLE IF EXISTS clickhouse.test_long_column_names") + node1.query("DROP TABLE IF EXISTS test_long_column_names") + From 5258cccdf7be404b47d1170ea12c5bd203dc3178 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 17 Apr 2021 09:55:36 +0000 Subject: [PATCH 130/133] Add test --- .../integration/test_odbc_interaction/test.py | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tests/integration/test_odbc_interaction/test.py b/tests/integration/test_odbc_interaction/test.py index f6026e8dd3b..865ba6b7aba 100644 --- a/tests/integration/test_odbc_interaction/test.py +++ b/tests/integration/test_odbc_interaction/test.py @@ -505,3 +505,26 @@ def test_concurrent_queries(started_cluster): node1.query('DROP TABLE test_pg_table;') cursor.execute('DROP TABLE clickhouse.test_pg_table;') + + +def test_odbc_long_text(started_cluster): + conn = get_postgres_conn() + cursor = conn.cursor() + cursor.execute("drop table if exists clickhouse.test_long_text") + cursor.execute("create table clickhouse.test_long_text(flen int, field1 text)"); + + # sample test from issue 9363 + text_from_issue = """BEGIN These examples only show the order that data is arranged in. The values from different columns are stored separately, and data from the same column is stored together. Examples of a column-oriented DBMS: Vertica, Paraccel (Actian Matrix and Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise and Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid, and kdb+. Different orders for storing data are better suited to different scenarios. The data access scenario refers to what queries are made, how often, and in what proportion; how much data is read for each type of query – rows, columns, and bytes; the relationship between reading and updating data; the working size of the data and how locally it is used; whether transactions are used, and how isolated they are; requirements for data replication and logical integrity; requirements for latency and throughput for each type of query, and so on. The higher the load on the system, the more important it is to customize the system set up to match the requirements of the usage scenario, and the more fine grained this customization becomes. There is no system that is equally well-suited to significantly different scenarios. If a system is adaptable to a wide set of scenarios, under a high load, the system will handle all the scenarios equally poorly, or will work well for just one or few of possible scenarios. Key Properties of OLAP Scenario¶ The vast majority of requests are for read access. Data is updated in fairly large batches (> 1000 rows), not by single rows; or it is not updated at all. Data is added to the DB but is not modified. For reads, quite a large number of rows are extracted from the DB, but only a small subset of columns. Tables are "wide," meaning they contain a large number of columns. Queries are relatively rare (usually hundreds of queries per server or less per second). For simple queries, latencies around 50 ms are allowed. Column values are fairly small: numbers and short strings (for example, 60 bytes per URL). Requires high throughput when processing a single query (up to billions of rows per second per server). Transactions are not necessary. Low requirements for data consistency. There is one large table per query. All tables are small, except for one. A query result is significantly smaller than the source data. In other words, data is filtered or aggregated, so the result fits in a single server"s RAM. It is easy to see that the OLAP scenario is very different from other popular scenarios (such as OLTP or Key-Value access). So it doesn"t make sense to try to use OLTP or a Key-Value DB for processing analytical queries if you want to get decent performance. For example, if you try to use MongoDB or Redis for analytics, you will get very poor performance compared to OLAP databases. Why Column-Oriented Databases Work Better in the OLAP Scenario¶ Column-oriented databases are better suited to OLAP scenarios: they are at least 100 times faster in processing most queries. The reasons are explained in detail below, but the fact is easier to demonstrate visually. END""" + cursor.execute("""insert into clickhouse.test_long_text (flen, field1) values (3248, '{}')""".format(text_from_issue)); + + node1.query(''' + DROP TABLE IF EXISTS test_long_test; + CREATE TABLE test_long_text (flen UInt32, field1 String) + ENGINE = ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_long_text')''') + result = node1.query("select field1 from test_long_text;") + assert(result.strip() == text_from_issue) + + long_text = "text" * 1000000 + cursor.execute("""insert into clickhouse.test_long_text (flen, field1) values (400000, '{}')""".format(long_text)); + result = node1.query("select field1 from test_long_text where flen=400000;") + assert(result.strip() == long_text) From 660efccbe296cb467e6cd6546a6cf9b6403d656d Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sat, 17 Apr 2021 19:00:37 +0300 Subject: [PATCH 131/133] Improve documentation for CREATE ROW POLICY command #2. --- .../statements/create/row-policy.md | 29 +++++++++------- .../statements/create/row-policy.md | 34 +++++++++++-------- 2 files changed, 36 insertions(+), 27 deletions(-) diff --git a/docs/en/sql-reference/statements/create/row-policy.md b/docs/en/sql-reference/statements/create/row-policy.md index 6f769fb1dca..5a1fa218fad 100644 --- a/docs/en/sql-reference/statements/create/row-policy.md +++ b/docs/en/sql-reference/statements/create/row-policy.md @@ -17,8 +17,6 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste [TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}] ``` -`ON CLUSTER` clause allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). - ## USING Clause {#create-row-policy-using} Allows to specify a condition to filter rows. An user will see a row if the condition is calculated to non-zero for the row. @@ -30,20 +28,20 @@ In the section `TO` you can provide a list of users and roles this policy should Keyword `ALL` means all the ClickHouse users including current user. Keyword `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` !!! note "Note" - If there are no row policies defined for a table then any user can `SELECT` all the row from the table. - Defining one or more row policies for the table makes the access to the table depending on the row policies no matter if - those row policies are defined for the current user or not. For example, the following row policy - + If there are no row policies defined for a table then any user can `SELECT` all the row from the table. Defining one or more row policies for the table makes the access to the table depending on the row policies no matter if those row policies are defined for the current user or not. For example, the following policy + `CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter` - forbids the users `mira` and `peter` to see the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all! If that isn't desirable you can fix it by adding one more row policy, for example: + forbids the users `mira` and `peter` to see the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all. + + If that's not desirable it can't be fixed by adding one more row policy, like the following: `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` ## AS Clause {#create-row-policy-as} -It's allowed to have more than one policy enabled on the same table for the same user at the one time. -So we need a way to combine the conditions from multiple policies. +It's allowed to have more than one policy enabled on the same table for the same user at the one time. So we need a way to combine the conditions from multiple policies. + By default policies are combined using the boolean `OR` operator. For example, the following policies ``` sql @@ -53,14 +51,15 @@ CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio enables the user `peter` to see rows with either `b=1` or `c=2`. -The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. -By default policies are permissive, which means they are combined using the boolean `OR` operator. +The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. By default policies are permissive, which means they are combined using the boolean `OR` operator. A policy can be defined as restrictive as an alternative. Restrictive policies are combined using the boolean `AND` operator. -Here is the formula: + +Here is the general formula: ``` -row_is_visible = (one or more of the permissive policies' conditions are non-zero) AND (all of the restrictive policies's conditions are non-zero)` +row_is_visible = (one or more of the permissive policies' conditions are non-zero) AND + (all of the restrictive policies's conditions are non-zero) ``` For example, the following policies @@ -72,6 +71,10 @@ CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio enables the user `peter` to see rows only if both `b=1` AND `c=2`. +## ON CLUSTER Clause {#create-row-policy-on-cluster} + +Allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). + ## Examples diff --git a/docs/ru/sql-reference/statements/create/row-policy.md b/docs/ru/sql-reference/statements/create/row-policy.md index 95fa29ff48a..6fe1dc45815 100644 --- a/docs/ru/sql-reference/statements/create/row-policy.md +++ b/docs/ru/sql-reference/statements/create/row-policy.md @@ -17,9 +17,7 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] ``` -Секция `ON CLUSTER` позволяет создавать политики на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md). - -## USING Clause {#create-row-policy-using} +## Секция USING {#create-row-policy-using} Секция `USING` указывает условие для фильтрации строк. Пользователь может видеть строку, если это условие, вычисленное для строки, дает ненулевой результат. @@ -30,22 +28,21 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste Ключевым словом `ALL` обозначаются все пользователи, включая текущего. Ключевые слова `ALL EXCEPT` позволяют исключить пользователей из списка всех пользователей. Например, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` !!! note "Note" - Если для таблицы не задано ни одной политики доступа к строкам, то любой пользователь может выполнить `SELECT` и получить все строки таблицы. - Если определить хотя бы одну политику для таблицы, до доступ к строкам будет управляться этими политиками, причем для всех пользователей - (даже для тех, для кого политики не определялись). Например, следующая политика + Если для таблицы не задано ни одной политики доступа к строкам, то любой пользователь может выполнить команду SELECT и получить все строки таблицы. Если определить хотя бы одну политику для таблицы, до доступ к строкам будет управляться этими политиками, причем для всех пользователей (даже для тех, для кого политики не определялись). Например, следующая политика `CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter` - запретит пользователям `mira` и `peter` видеть строки с `b != 1`, и еще запретит всем остальным пользователям (например, пользователю `paul`) - видеть какие-либо строки вообще из таблицы `mydb.table1`! Если это нежелательно, такое поведение можно исправить, определив дополнительную политику: + запретит пользователям `mira` и `peter` видеть строки с `b != 1`, и еще запретит всем остальным пользователям (например, пользователю `paul`) видеть какие-либо строки вообще из таблицы `mydb.table1`. + + Если это нежелательно, такое поведение можно исправить, определив дополнительную политику: `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` ## Секция AS {#create-row-policy-as} -Может быть одновременно активно более одной политики для одной и той же таблицы и одного и того же пользователя. -Поэтому нам нужен способ комбинировать политики. По умолчанию политики комбинируются с использованием логического оператора `OR`. -Например, политики: +Может быть одновременно активно более одной политики для одной и той же таблицы и одного и того же пользователя. Поэтому нам нужен способ комбинировать политики. + +По умолчанию политики комбинируются с использованием логического оператора `OR`. Например, политики: ``` sql CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter @@ -55,10 +52,15 @@ CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio разрешат пользователю с именем `peter` видеть строки, для которых будет верно `b=1` или `c=2`. Секция `AS` указывает, как политики должны комбинироваться с другими политиками. Политики могут быть или разрешительными (`PERMISSIVE`), или ограничительными (`RESTRICTIVE`). По умолчанию политики создаются разрешительными (`PERMISSIVE`); такие политики комбинируются с использованием логического оператора `OR`. -Ограничительные (`RESTRICTIVE`) политики комбинируются с использованием логического оператора `AND`. -Используется следующая формула: -`строка_видима = (одна или больше permissive-политик дала ненулевой результат проверки условия) И (все restrictive-политики дали ненулевой результат проверки условия)` +Ограничительные (`RESTRICTIVE`) политики комбинируются с использованием логического оператора `AND`. + +Общая формула выглядит так: + +``` +строка_видима = (одна или больше permissive-политик дала ненулевой результат проверки условия) И + (все restrictive-политики дали ненулевой результат проверки условия) +``` Например, политики @@ -69,6 +71,10 @@ CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio разрешат пользователю с именем `peter` видеть только те строки, для которых будет одновременно `b=1` и `c=2`. +## Секция ON CLUSTER {#create-row-policy-on-cluster} + +Секция `ON CLUSTER` позволяет создавать политики на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md). + ## Примеры `CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost` From 8ac953f45914385b342f9471b7043155122c0aaf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Apr 2021 19:31:35 +0300 Subject: [PATCH 132/133] Make function `unhex` case insensitive for compatibility --- src/Functions/FunctionsCoding.cpp | 2 +- .../queries/0_stateless/01820_unhex_case_insensitive.reference | 1 + tests/queries/0_stateless/01820_unhex_case_insensitive.sql | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01820_unhex_case_insensitive.reference create mode 100644 tests/queries/0_stateless/01820_unhex_case_insensitive.sql diff --git a/src/Functions/FunctionsCoding.cpp b/src/Functions/FunctionsCoding.cpp index cf85b4512d5..c1e20a657b2 100644 --- a/src/Functions/FunctionsCoding.cpp +++ b/src/Functions/FunctionsCoding.cpp @@ -20,7 +20,7 @@ void registerFunctionsCoding(FunctionFactory & factory) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(FunctionFactory::CaseInsensitive); - factory.registerFunction(); + factory.registerFunction(FunctionFactory::CaseInsensitive); factory.registerFunction(FunctionFactory::CaseInsensitive); factory.registerFunction(); factory.registerFunction(); diff --git a/tests/queries/0_stateless/01820_unhex_case_insensitive.reference b/tests/queries/0_stateless/01820_unhex_case_insensitive.reference new file mode 100644 index 00000000000..e692ee54787 --- /dev/null +++ b/tests/queries/0_stateless/01820_unhex_case_insensitive.reference @@ -0,0 +1 @@ +012 MySQL diff --git a/tests/queries/0_stateless/01820_unhex_case_insensitive.sql b/tests/queries/0_stateless/01820_unhex_case_insensitive.sql new file mode 100644 index 00000000000..99d8031eeda --- /dev/null +++ b/tests/queries/0_stateless/01820_unhex_case_insensitive.sql @@ -0,0 +1,2 @@ +-- MySQL has function `unhex`, so we will make our function `unhex` also case insensitive for compatibility. +SELECT unhex('303132'), UNHEX('4D7953514C'); From fe0b76b1052fecd80783ca3f9929725b47dc89d2 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sat, 17 Apr 2021 22:26:45 +0300 Subject: [PATCH 133/133] Fix documentation for the GRANT command. --- docs/en/sql-reference/statements/grant.md | 12 ++++++------ docs/ru/sql-reference/statements/grant.md | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index 0a5c737b550..89f35b5f701 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -91,7 +91,7 @@ Hierarchy of privileges: - `ALTER ADD CONSTRAINT` - `ALTER DROP CONSTRAINT` - `ALTER TTL` - - `ALTER MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL` - `ALTER SETTINGS` - `ALTER MOVE PARTITION` - `ALTER FETCH PARTITION` @@ -102,9 +102,9 @@ Hierarchy of privileges: - [CREATE](#grant-create) - `CREATE DATABASE` - `CREATE TABLE` + - `CREATE TEMPORARY TABLE` - `CREATE VIEW` - `CREATE DICTIONARY` - - `CREATE TEMPORARY TABLE` - [DROP](#grant-drop) - `DROP DATABASE` - `DROP TABLE` @@ -150,7 +150,7 @@ Hierarchy of privileges: - `SYSTEM RELOAD` - `SYSTEM RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES` - `SYSTEM TTL MERGES` - `SYSTEM FETCHES` @@ -276,7 +276,7 @@ Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries - `ALTER ADD CONSTRAINT`. Level: `TABLE`. Aliases: `ADD CONSTRAINT` - `ALTER DROP CONSTRAINT`. Level: `TABLE`. Aliases: `DROP CONSTRAINT` - `ALTER TTL`. Level: `TABLE`. Aliases: `ALTER MODIFY TTL`, `MODIFY TTL` - - `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL` - `ALTER SETTINGS`. Level: `TABLE`. Aliases: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING` - `ALTER MOVE PARTITION`. Level: `TABLE`. Aliases: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART` - `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `ALTER FETCH PART`, `FETCH PARTITION`, `FETCH PART` @@ -304,9 +304,9 @@ Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [A - `CREATE`. Level: `GROUP` - `CREATE DATABASE`. Level: `DATABASE` - `CREATE TABLE`. Level: `TABLE` + - `CREATE TEMPORARY TABLE`. Level: `GLOBAL` - `CREATE VIEW`. Level: `VIEW` - `CREATE DICTIONARY`. Level: `DICTIONARY` - - `CREATE TEMPORARY TABLE`. Level: `GLOBAL` **Notes** @@ -401,7 +401,7 @@ Allows a user to execute [SYSTEM](../../sql-reference/statements/system.md) quer - `SYSTEM RELOAD`. Level: `GROUP` - `SYSTEM RELOAD CONFIG`. Level: `GLOBAL`. Aliases: `RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY`. Level: `GLOBAL`. Aliases: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Level: `GLOBAL`. Aliases: R`ELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Level: `GLOBAL`. Aliases: `RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES` - `SYSTEM TTL MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES` - `SYSTEM FETCHES`. Level: `TABLE`. Aliases: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES` diff --git a/docs/ru/sql-reference/statements/grant.md b/docs/ru/sql-reference/statements/grant.md index 7b2d26902ef..093e6eb3b93 100644 --- a/docs/ru/sql-reference/statements/grant.md +++ b/docs/ru/sql-reference/statements/grant.md @@ -93,7 +93,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - `ALTER ADD CONSTRAINT` - `ALTER DROP CONSTRAINT` - `ALTER TTL` - - `ALTER MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL` - `ALTER SETTINGS` - `ALTER MOVE PARTITION` - `ALTER FETCH PARTITION` @@ -104,9 +104,9 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - [CREATE](#grant-create) - `CREATE DATABASE` - `CREATE TABLE` + - `CREATE TEMPORARY TABLE` - `CREATE VIEW` - `CREATE DICTIONARY` - - `CREATE TEMPORARY TABLE` - [DROP](#grant-drop) - `DROP DATABASE` - `DROP TABLE` @@ -152,7 +152,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - `SYSTEM RELOAD` - `SYSTEM RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES` - `SYSTEM TTL MERGES` - `SYSTEM FETCHES` @@ -279,7 +279,7 @@ GRANT INSERT(x,y) ON db.table TO john - `ALTER ADD CONSTRAINT`. Уровень: `TABLE`. Алиасы: `ADD CONSTRAINT` - `ALTER DROP CONSTRAINT`. Уровень: `TABLE`. Алиасы: `DROP CONSTRAINT` - `ALTER TTL`. Уровень: `TABLE`. Алиасы: `ALTER MODIFY TTL`, `MODIFY TTL` - - `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL` - `ALTER SETTINGS`. Уровень: `TABLE`. Алиасы: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING` - `ALTER MOVE PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART` - `ALTER FETCH PARTITION`. Уровень: `TABLE`. Алиасы: `FETCH PARTITION` @@ -307,9 +307,9 @@ GRANT INSERT(x,y) ON db.table TO john - `CREATE`. Уровень: `GROUP` - `CREATE DATABASE`. Уровень: `DATABASE` - `CREATE TABLE`. Уровень: `TABLE` + - `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL` - `CREATE VIEW`. Уровень: `VIEW` - `CREATE DICTIONARY`. Уровень: `DICTIONARY` - - `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL` **Дополнительно** @@ -407,7 +407,7 @@ GRANT INSERT(x,y) ON db.table TO john - `SYSTEM RELOAD`. Уровень: `GROUP` - `SYSTEM RELOAD CONFIG`. Уровень: `GLOBAL`. Алиасы: `RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY`. Уровень: `GLOBAL`. Алиасы: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Уровень: `GLOBAL`. Алиасы: `RELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Уровень: `GLOBAL`. Алиасы: `RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES` - `SYSTEM TTL MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES` - `SYSTEM FETCHES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES`