Merge branch 'master' into refactor-secret-finder

This commit is contained in:
Yakov Olkhovskiy 2024-09-17 12:16:19 +00:00
commit fd0c7a1c18
216 changed files with 3663 additions and 1361 deletions

6
.gitmodules vendored
View File

@ -170,9 +170,6 @@
[submodule "contrib/fast_float"]
path = contrib/fast_float
url = https://github.com/fastfloat/fast_float
[submodule "contrib/libpq"]
path = contrib/libpq
url = https://github.com/ClickHouse/libpq
[submodule "contrib/NuRaft"]
path = contrib/NuRaft
url = https://github.com/ClickHouse/NuRaft
@ -369,3 +366,6 @@
[submodule "contrib/numactl"]
path = contrib/numactl
url = https://github.com/ClickHouse/numactl.git
[submodule "contrib/postgres"]
path = contrib/postgres
url = https://github.com/ClickHouse/postgres.git

View File

@ -145,8 +145,13 @@ add_contrib (isa-l-cmake isa-l)
add_contrib (libhdfs3-cmake libhdfs3) # requires: google-protobuf, krb5, isa-l
add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arrow, libhdfs3
add_contrib (cppkafka-cmake cppkafka)
add_contrib (libpqxx-cmake libpqxx)
add_contrib (libpq-cmake libpq)
option(ENABLE_LIBPQXX "Enable PostgreSQL" ${ENABLE_LIBRARIES})
if (ENABLE_LIBPQXX)
add_contrib (postgres-cmake postgres)
add_contrib (libpqxx-cmake libpqxx)
endif()
add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing
add_contrib (nuraft-cmake NuRaft)
add_contrib (fast_float-cmake fast_float)

1
contrib/libpq vendored

@ -1 +0,0 @@
Subproject commit 2446f2c85650b56df9d4ebc4c2ea7f4b01beee57

View File

@ -1,78 +0,0 @@
if (NOT ENABLE_LIBPQXX)
return()
endif()
set(LIBPQ_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpq")
set(SRCS
"${LIBPQ_SOURCE_DIR}/fe-auth.c"
"${LIBPQ_SOURCE_DIR}/fe-auth-scram.c"
"${LIBPQ_SOURCE_DIR}/fe-connect.c"
"${LIBPQ_SOURCE_DIR}/fe-exec.c"
"${LIBPQ_SOURCE_DIR}/fe-lobj.c"
"${LIBPQ_SOURCE_DIR}/fe-misc.c"
"${LIBPQ_SOURCE_DIR}/fe-print.c"
"${LIBPQ_SOURCE_DIR}/fe-trace.c"
"${LIBPQ_SOURCE_DIR}/fe-protocol3.c"
"${LIBPQ_SOURCE_DIR}/fe-secure.c"
"${LIBPQ_SOURCE_DIR}/fe-secure-common.c"
"${LIBPQ_SOURCE_DIR}/fe-secure-openssl.c"
"${LIBPQ_SOURCE_DIR}/legacy-pqsignal.c"
"${LIBPQ_SOURCE_DIR}/libpq-events.c"
"${LIBPQ_SOURCE_DIR}/pqexpbuffer.c"
"${LIBPQ_SOURCE_DIR}/common/scram-common.c"
"${LIBPQ_SOURCE_DIR}/common/sha2.c"
"${LIBPQ_SOURCE_DIR}/common/sha1.c"
"${LIBPQ_SOURCE_DIR}/common/md5.c"
"${LIBPQ_SOURCE_DIR}/common/md5_common.c"
"${LIBPQ_SOURCE_DIR}/common/hmac_openssl.c"
"${LIBPQ_SOURCE_DIR}/common/cryptohash.c"
"${LIBPQ_SOURCE_DIR}/common/saslprep.c"
"${LIBPQ_SOURCE_DIR}/common/unicode_norm.c"
"${LIBPQ_SOURCE_DIR}/common/ip.c"
"${LIBPQ_SOURCE_DIR}/common/jsonapi.c"
"${LIBPQ_SOURCE_DIR}/common/wchar.c"
"${LIBPQ_SOURCE_DIR}/common/base64.c"
"${LIBPQ_SOURCE_DIR}/common/link-canary.c"
"${LIBPQ_SOURCE_DIR}/common/fe_memutils.c"
"${LIBPQ_SOURCE_DIR}/common/string.c"
"${LIBPQ_SOURCE_DIR}/common/pg_get_line.c"
"${LIBPQ_SOURCE_DIR}/common/stringinfo.c"
"${LIBPQ_SOURCE_DIR}/common/psprintf.c"
"${LIBPQ_SOURCE_DIR}/common/encnames.c"
"${LIBPQ_SOURCE_DIR}/common/logging.c"
"${LIBPQ_SOURCE_DIR}/port/snprintf.c"
"${LIBPQ_SOURCE_DIR}/port/strlcpy.c"
"${LIBPQ_SOURCE_DIR}/port/strerror.c"
"${LIBPQ_SOURCE_DIR}/port/inet_net_ntop.c"
"${LIBPQ_SOURCE_DIR}/port/getpeereid.c"
"${LIBPQ_SOURCE_DIR}/port/chklocale.c"
"${LIBPQ_SOURCE_DIR}/port/noblock.c"
"${LIBPQ_SOURCE_DIR}/port/pg_strong_random.c"
"${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c"
"${LIBPQ_SOURCE_DIR}/port/thread.c"
"${LIBPQ_SOURCE_DIR}/port/path.c"
)
add_library(_libpq ${SRCS})
add_definitions(-DHAVE_BIO_METH_NEW)
add_definitions(-DHAVE_HMAC_CTX_NEW)
add_definitions(-DHAVE_HMAC_CTX_FREE)
target_include_directories (_libpq SYSTEM PUBLIC ${LIBPQ_SOURCE_DIR})
target_include_directories (_libpq SYSTEM PUBLIC "${LIBPQ_SOURCE_DIR}/include")
target_include_directories (_libpq SYSTEM PRIVATE "${LIBPQ_SOURCE_DIR}/configs")
# NOTE: this is a dirty hack to avoid and instead pg_config.h should be shipped
# for different OS'es like for jemalloc, not one generic for all OS'es like
# now.
if (OS_DARWIN OR OS_FREEBSD OR USE_MUSL)
target_compile_definitions(_libpq PRIVATE -DSTRERROR_R_INT=1)
endif()
target_link_libraries (_libpq PRIVATE OpenSSL::SSL)
add_library(ch_contrib::libpq ALIAS _libpq)

View File

@ -1,10 +1,3 @@
option(ENABLE_LIBPQXX "Enalbe libpqxx" ${ENABLE_LIBRARIES})
if (NOT ENABLE_LIBPQXX)
message(STATUS "Not using libpqxx")
return()
endif()
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpqxx")
set (SRCS

1
contrib/postgres vendored Submodule

@ -0,0 +1 @@
Subproject commit 665ff8c164d56d012e359735efe4d400c0564b44

View File

@ -0,0 +1,78 @@
# Build description for libpq which is part of the PostgreSQL sources
set(POSTGRES_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/postgres")
set(LIBPQ_SOURCE_DIR "${POSTGRES_SOURCE_DIR}/src/interfaces/libpq")
set(LIBPQ_CMAKE_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/postgres-cmake")
set(SRCS
"${LIBPQ_SOURCE_DIR}/fe-auth.c"
"${LIBPQ_SOURCE_DIR}/fe-auth-scram.c"
"${LIBPQ_SOURCE_DIR}/fe-connect.c"
"${LIBPQ_SOURCE_DIR}/fe-exec.c"
"${LIBPQ_SOURCE_DIR}/fe-lobj.c"
"${LIBPQ_SOURCE_DIR}/fe-misc.c"
"${LIBPQ_SOURCE_DIR}/fe-print.c"
"${LIBPQ_SOURCE_DIR}/fe-trace.c"
"${LIBPQ_SOURCE_DIR}/fe-protocol3.c"
"${LIBPQ_SOURCE_DIR}/fe-secure.c"
"${LIBPQ_SOURCE_DIR}/fe-secure-common.c"
"${LIBPQ_SOURCE_DIR}/fe-secure-openssl.c"
"${LIBPQ_SOURCE_DIR}/legacy-pqsignal.c"
"${LIBPQ_SOURCE_DIR}/libpq-events.c"
"${LIBPQ_SOURCE_DIR}/pqexpbuffer.c"
"${POSTGRES_SOURCE_DIR}/src/common/scram-common.c"
"${POSTGRES_SOURCE_DIR}/src/common/sha2.c"
"${POSTGRES_SOURCE_DIR}/src/common/sha1.c"
"${POSTGRES_SOURCE_DIR}/src/common/md5.c"
"${POSTGRES_SOURCE_DIR}/src/common/md5_common.c"
"${POSTGRES_SOURCE_DIR}/src/common/hmac_openssl.c"
"${POSTGRES_SOURCE_DIR}/src/common/cryptohash.c"
"${POSTGRES_SOURCE_DIR}/src/common/saslprep.c"
"${POSTGRES_SOURCE_DIR}/src/common/unicode_norm.c"
"${POSTGRES_SOURCE_DIR}/src/common/ip.c"
"${POSTGRES_SOURCE_DIR}/src/common/jsonapi.c"
"${POSTGRES_SOURCE_DIR}/src/common/wchar.c"
"${POSTGRES_SOURCE_DIR}/src/common/base64.c"
"${POSTGRES_SOURCE_DIR}/src/common/link-canary.c"
"${POSTGRES_SOURCE_DIR}/src/common/fe_memutils.c"
"${POSTGRES_SOURCE_DIR}/src/common/string.c"
"${POSTGRES_SOURCE_DIR}/src/common/pg_get_line.c"
"${POSTGRES_SOURCE_DIR}/src/common/stringinfo.c"
"${POSTGRES_SOURCE_DIR}/src/common/psprintf.c"
"${POSTGRES_SOURCE_DIR}/src/common/encnames.c"
"${POSTGRES_SOURCE_DIR}/src/common/logging.c"
"${POSTGRES_SOURCE_DIR}/src/port/snprintf.c"
"${POSTGRES_SOURCE_DIR}/src/port/strlcpy.c"
"${POSTGRES_SOURCE_DIR}/src/port/strerror.c"
"${POSTGRES_SOURCE_DIR}/src/port/inet_net_ntop.c"
"${POSTGRES_SOURCE_DIR}/src/port/getpeereid.c"
"${POSTGRES_SOURCE_DIR}/src/port/chklocale.c"
"${POSTGRES_SOURCE_DIR}/src/port/noblock.c"
"${POSTGRES_SOURCE_DIR}/src/port/pg_strong_random.c"
"${POSTGRES_SOURCE_DIR}/src/port/pgstrcasecmp.c"
"${POSTGRES_SOURCE_DIR}/src/port/thread.c"
"${POSTGRES_SOURCE_DIR}/src/port/path.c"
)
add_library(_libpq ${SRCS})
add_definitions(-DHAVE_BIO_METH_NEW)
add_definitions(-DHAVE_HMAC_CTX_NEW)
add_definitions(-DHAVE_HMAC_CTX_FREE)
target_include_directories (_libpq SYSTEM PUBLIC ${LIBPQ_SOURCE_DIR})
target_include_directories (_libpq SYSTEM PUBLIC "${POSTGRES_SOURCE_DIR}/src/include")
target_include_directories (_libpq SYSTEM PUBLIC "${LIBPQ_CMAKE_SOURCE_DIR}") # pre-generated headers
# NOTE: this is a dirty hack to avoid and instead pg_config.h should be shipped
# for different OS'es like for jemalloc, not one generic for all OS'es like
# now.
if (OS_DARWIN OR OS_FREEBSD OR USE_MUSL)
target_compile_definitions(_libpq PRIVATE -DSTRERROR_R_INT=1)
endif()
target_link_libraries (_libpq PRIVATE OpenSSL::SSL)
add_library(ch_contrib::libpq ALIAS _libpq)

View File

@ -0,0 +1,941 @@
/* src/include/pg_config.h. Generated from pg_config.h.in by configure. */
/* src/include/pg_config.h.in. Generated from configure.in by autoheader. */
/* Define to the type of arg 1 of 'accept' */
#define ACCEPT_TYPE_ARG1 int
/* Define to the type of arg 2 of 'accept' */
#define ACCEPT_TYPE_ARG2 struct sockaddr *
/* Define to the type of arg 3 of 'accept' */
#define ACCEPT_TYPE_ARG3 size_t
/* Define to the return type of 'accept' */
#define ACCEPT_TYPE_RETURN int
/* Define if building universal (internal helper macro) */
/* #undef AC_APPLE_UNIVERSAL_BUILD */
/* The normal alignment of `double', in bytes. */
#define ALIGNOF_DOUBLE 4
/* The normal alignment of `int', in bytes. */
#define ALIGNOF_INT 4
/* The normal alignment of `long', in bytes. */
#define ALIGNOF_LONG 4
/* The normal alignment of `long long int', in bytes. */
#define ALIGNOF_LONG_LONG_INT 4
/* The normal alignment of `short', in bytes. */
#define ALIGNOF_SHORT 2
/* Size of a disk block --- this also limits the size of a tuple. You can set
it bigger if you need bigger tuples (although TOAST should reduce the need
to have large tuples, since fields can be spread across multiple tuples).
BLCKSZ must be a power of 2. The maximum possible value of BLCKSZ is
currently 2^15 (32768). This is determined by the 15-bit widths of the
lp_off and lp_len fields in ItemIdData (see include/storage/itemid.h).
Changing BLCKSZ requires an initdb. */
#define BLCKSZ 8192
/* Define to the default TCP port number on which the server listens and to
which clients will try to connect. This can be overridden at run-time, but
it's convenient if your clients have the right default compiled in.
(--with-pgport=PORTNUM) */
#define DEF_PGPORT 5432
/* Define to the default TCP port number as a string constant. */
#define DEF_PGPORT_STR "5432"
/* Define to build with GSSAPI support. (--with-gssapi) */
//#define ENABLE_GSS 0
/* Define to 1 if you want National Language Support. (--enable-nls) */
/* #undef ENABLE_NLS */
/* Define to 1 to build client libraries as thread-safe code.
(--enable-thread-safety) */
#define ENABLE_THREAD_SAFETY 1
/* Define to nothing if C supports flexible array members, and to 1 if it does
not. That way, with a declaration like `struct s { int n; double
d[FLEXIBLE_ARRAY_MEMBER]; };', the struct hack can be used with pre-C99
compilers. When computing the size of such an object, don't use 'sizeof
(struct s)' as it overestimates the size. Use 'offsetof (struct s, d)'
instead. Don't use 'offsetof (struct s, d[0])', as this doesn't work with
MSVC and with C++ compilers. */
#define FLEXIBLE_ARRAY_MEMBER /**/
/* float4 values are passed by value if 'true', by reference if 'false' */
#define FLOAT4PASSBYVAL true
/* float8, int8, and related values are passed by value if 'true', by
reference if 'false' */
#define FLOAT8PASSBYVAL false
/* Define to 1 if gettimeofday() takes only 1 argument. */
/* #undef GETTIMEOFDAY_1ARG */
#ifdef GETTIMEOFDAY_1ARG
# define gettimeofday(a,b) gettimeofday(a)
#endif
/* Define to 1 if you have the `append_history' function. */
/* #undef HAVE_APPEND_HISTORY */
/* Define to 1 if you want to use atomics if available. */
#define HAVE_ATOMICS 1
/* Define to 1 if you have the <atomic.h> header file. */
/* #undef HAVE_ATOMIC_H */
/* Define to 1 if you have the `cbrt' function. */
#define HAVE_CBRT 1
/* Define to 1 if you have the `class' function. */
/* #undef HAVE_CLASS */
/* Define to 1 if you have the <crtdefs.h> header file. */
/* #undef HAVE_CRTDEFS_H */
/* Define to 1 if you have the `crypt' function. */
#define HAVE_CRYPT 1
/* Define to 1 if you have the <crypt.h> header file. */
#define HAVE_CRYPT_H 1
/* Define to 1 if you have the declaration of `fdatasync', and to 0 if you
don't. */
#define HAVE_DECL_FDATASYNC 1
/* Define to 1 if you have the declaration of `F_FULLFSYNC', and to 0 if you
don't. */
#define HAVE_DECL_F_FULLFSYNC 0
/* Define to 1 if you have the declaration of `posix_fadvise', and to 0 if you
don't. */
#define HAVE_DECL_POSIX_FADVISE 1
/* Define to 1 if you have the declaration of `snprintf', and to 0 if you
don't. */
#define HAVE_DECL_SNPRINTF 1
/* Define to 1 if you have the declaration of `strlcat', and to 0 if you
don't. */
#if OS_DARWIN
#define HAVE_DECL_STRLCAT 1
#endif
/* Define to 1 if you have the declaration of `strlcpy', and to 0 if you
don't. */
#if OS_DARWIN
#define HAVE_DECL_STRLCPY 1
#endif
/* Define to 1 if you have the declaration of `sys_siglist', and to 0 if you
don't. */
#define HAVE_DECL_SYS_SIGLIST 1
/* Define to 1 if you have the declaration of `vsnprintf', and to 0 if you
don't. */
#define HAVE_DECL_VSNPRINTF 1
/* Define to 1 if you have the <dld.h> header file. */
/* #undef HAVE_DLD_H */
/* Define to 1 if you have the `dlopen' function. */
#define HAVE_DLOPEN 1
/* Define to 1 if you have the <editline/history.h> header file. */
/* #undef HAVE_EDITLINE_HISTORY_H */
/* Define to 1 if you have the <editline/readline.h> header file. */
#define HAVE_EDITLINE_READLINE_H 1
/* Define to 1 if you have the `fdatasync' function. */
#define HAVE_FDATASYNC 1
/* Define to 1 if you have the `fls' function. */
/* #undef HAVE_FLS */
/* Define to 1 if you have the `fpclass' function. */
/* #undef HAVE_FPCLASS */
/* Define to 1 if you have the `fp_class' function. */
/* #undef HAVE_FP_CLASS */
/* Define to 1 if you have the `fp_class_d' function. */
/* #undef HAVE_FP_CLASS_D */
/* Define to 1 if you have the <fp_class.h> header file. */
/* #undef HAVE_FP_CLASS_H */
/* Define to 1 if fseeko (and presumably ftello) exists and is declared. */
#define HAVE_FSEEKO 1
/* Define to 1 if your compiler understands __func__. */
#define HAVE_FUNCNAME__FUNC 1
/* Define to 1 if your compiler understands __FUNCTION__. */
/* #undef HAVE_FUNCNAME__FUNCTION */
/* Define to 1 if you have __atomic_compare_exchange_n(int *, int *, int). */
/* #undef HAVE_GCC__ATOMIC_INT32_CAS */
/* Define to 1 if you have __atomic_compare_exchange_n(int64 *, int *, int64).
*/
/* #undef HAVE_GCC__ATOMIC_INT64_CAS */
/* Define to 1 if you have __sync_lock_test_and_set(char *) and friends. */
#define HAVE_GCC__SYNC_CHAR_TAS 1
/* Define to 1 if you have __sync_compare_and_swap(int *, int, int). */
/* #undef HAVE_GCC__SYNC_INT32_CAS */
/* Define to 1 if you have __sync_lock_test_and_set(int *) and friends. */
#define HAVE_GCC__SYNC_INT32_TAS 1
/* Define to 1 if you have __sync_compare_and_swap(int64 *, int64, int64). */
/* #undef HAVE_GCC__SYNC_INT64_CAS */
/* Define to 1 if you have the `getaddrinfo' function. */
#define HAVE_GETADDRINFO 1
/* Define to 1 if you have the `gethostbyname_r' function. */
#define HAVE_GETHOSTBYNAME_R 1
/* Define to 1 if you have the `getifaddrs' function. */
#define HAVE_GETIFADDRS 1
/* Define to 1 if you have the `getopt' function. */
#define HAVE_GETOPT 1
/* Define to 1 if you have the <getopt.h> header file. */
#define HAVE_GETOPT_H 1
/* Define to 1 if you have the `getopt_long' function. */
#define HAVE_GETOPT_LONG 1
/* Define to 1 if you have the `getpeereid' function. */
/* #undef HAVE_GETPEEREID */
/* Define to 1 if you have the `getpeerucred' function. */
/* #undef HAVE_GETPEERUCRED */
/* Define to 1 if you have the `getpwuid_r' function. */
#define HAVE_GETPWUID_R 1
/* Define to 1 if you have the `getrlimit' function. */
#define HAVE_GETRLIMIT 1
/* Define to 1 if you have the `getrusage' function. */
#define HAVE_GETRUSAGE 1
/* Define to 1 if you have the `gettimeofday' function. */
/* #undef HAVE_GETTIMEOFDAY */
/* Define to 1 if you have the <gssapi/gssapi.h> header file. */
//#define HAVE_GSSAPI_GSSAPI_H 0
/* Define to 1 if you have the <gssapi.h> header file. */
/* #undef HAVE_GSSAPI_H */
/* Define to 1 if you have the <history.h> header file. */
/* #undef HAVE_HISTORY_H */
/* Define to 1 if you have the `history_truncate_file' function. */
#define HAVE_HISTORY_TRUNCATE_FILE 1
/* Define to 1 if you have the <ieeefp.h> header file. */
/* #undef HAVE_IEEEFP_H */
/* Define to 1 if you have the <ifaddrs.h> header file. */
#define HAVE_IFADDRS_H 1
/* Define to 1 if you have the `inet_aton' function. */
#define HAVE_INET_ATON 1
/* Define to 1 if the system has the type `int64'. */
/* #undef HAVE_INT64 */
/* Define to 1 if the system has the type `int8'. */
/* #undef HAVE_INT8 */
/* Define to 1 if the system has the type `intptr_t'. */
#define HAVE_INTPTR_T 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the global variable 'int opterr'. */
#define HAVE_INT_OPTERR 1
/* Define to 1 if you have the global variable 'int optreset'. */
/* #undef HAVE_INT_OPTRESET */
/* Define to 1 if you have the global variable 'int timezone'. */
#define HAVE_INT_TIMEZONE 1
/* Define to 1 if you have support for IPv6. */
#define HAVE_IPV6 1
/* Define to 1 if you have isinf(). */
#define HAVE_ISINF 1
/* Define to 1 if you have the <langinfo.h> header file. */
#define HAVE_LANGINFO_H 1
/* Define to 1 if you have the <ldap.h> header file. */
//#define HAVE_LDAP_H 0
/* Define to 1 if you have the `crypto' library (-lcrypto). */
#define HAVE_LIBCRYPTO 1
/* Define to 1 if you have the `ldap' library (-lldap). */
//#define HAVE_LIBLDAP 0
/* Define to 1 if you have the `m' library (-lm). */
#define HAVE_LIBM 1
/* Define to 1 if you have the `pam' library (-lpam). */
#define HAVE_LIBPAM 1
/* Define if you have a function readline library */
#define HAVE_LIBREADLINE 1
/* Define to 1 if you have the `selinux' library (-lselinux). */
/* #undef HAVE_LIBSELINUX */
/* Define to 1 if you have the `ssl' library (-lssl). */
#define HAVE_LIBSSL 0
/* Define to 1 if you have the `wldap32' library (-lwldap32). */
/* #undef HAVE_LIBWLDAP32 */
/* Define to 1 if you have the `xml2' library (-lxml2). */
#define HAVE_LIBXML2 1
/* Define to 1 if you have the `xslt' library (-lxslt). */
#define HAVE_LIBXSLT 1
/* Define to 1 if you have the `z' library (-lz). */
#define HAVE_LIBZ 1
/* Define to 1 if constants of type 'long long int' should have the suffix LL.
*/
#define HAVE_LL_CONSTANTS 1
/* Define to 1 if the system has the type `locale_t'. */
#define HAVE_LOCALE_T 1
/* Define to 1 if `long int' works and is 64 bits. */
/* #undef HAVE_LONG_INT_64 */
/* Define to 1 if the system has the type `long long int'. */
#define HAVE_LONG_LONG_INT 1
/* Define to 1 if `long long int' works and is 64 bits. */
#define HAVE_LONG_LONG_INT_64 1
/* Define to 1 if you have the <mbarrier.h> header file. */
/* #undef HAVE_MBARRIER_H */
/* Define to 1 if you have the `mbstowcs_l' function. */
/* #undef HAVE_MBSTOWCS_L */
/* Define to 1 if you have the `memmove' function. */
#define HAVE_MEMMOVE 1
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if the system has the type `MINIDUMP_TYPE'. */
/* #undef HAVE_MINIDUMP_TYPE */
/* Define to 1 if you have the `mkdtemp' function. */
#define HAVE_MKDTEMP 1
/* Define to 1 if you have the <netinet/in.h> header file. */
#define HAVE_NETINET_IN_H 1
/* Define to 1 if you have the <netinet/tcp.h> header file. */
#define HAVE_NETINET_TCP_H 1
/* Define to 1 if you have the <net/if.h> header file. */
#define HAVE_NET_IF_H 1
/* Define to 1 if you have the <ossp/uuid.h> header file. */
/* #undef HAVE_OSSP_UUID_H */
/* Define to 1 if you have the <pam/pam_appl.h> header file. */
/* #undef HAVE_PAM_PAM_APPL_H */
/* Define to 1 if you have the `poll' function. */
#define HAVE_POLL 1
/* Define to 1 if you have the <poll.h> header file. */
#define HAVE_POLL_H 1
/* Define to 1 if you have the `posix_fadvise' function. */
#define HAVE_POSIX_FADVISE 1
/* Define to 1 if you have the declaration of `preadv', and to 0 if you don't. */
/* #undef HAVE_DECL_PREADV */
/* Define to 1 if you have the declaration of `pwritev', and to 0 if you don't. */
/* #define HAVE_DECL_PWRITEV */
/* Define to 1 if you have the `X509_get_signature_info' function. */
/* #undef HAVE_X509_GET_SIGNATURE_INFO */
/* Define to 1 if you have the POSIX signal interface. */
#define HAVE_POSIX_SIGNALS 1
/* Define to 1 if the assembler supports PPC's LWARX mutex hint bit. */
/* #undef HAVE_PPC_LWARX_MUTEX_HINT */
/* Define to 1 if you have the `pstat' function. */
/* #undef HAVE_PSTAT */
/* Define to 1 if the PS_STRINGS thing exists. */
/* #undef HAVE_PS_STRINGS */
/* Define to 1 if you have the `pthread_is_threaded_np' function. */
/* #undef HAVE_PTHREAD_IS_THREADED_NP */
/* Define to 1 if you have the <pwd.h> header file. */
#define HAVE_PWD_H 1
/* Define to 1 if you have the `random' function. */
#define HAVE_RANDOM 1
/* Define to 1 if you have the <readline.h> header file. */
/* #undef HAVE_READLINE_H */
/* Define to 1 if you have the <readline/history.h> header file. */
#define HAVE_READLINE_HISTORY_H 1
/* Define to 1 if you have the <readline/readline.h> header file. */
/* #undef HAVE_READLINE_READLINE_H */
/* Define to 1 if you have the `readlink' function. */
#define HAVE_READLINK 1
/* Define to 1 if you have the `rint' function. */
#define HAVE_RINT 1
/* Define to 1 if you have the global variable
'rl_completion_append_character'. */
/* #undef HAVE_RL_COMPLETION_APPEND_CHARACTER */
/* Define to 1 if you have the `rl_completion_matches' function. */
#define HAVE_RL_COMPLETION_MATCHES 1
/* Define to 1 if you have the `rl_filename_completion_function' function. */
#define HAVE_RL_FILENAME_COMPLETION_FUNCTION 1
/* Define to 1 if you have the `rl_reset_screen_size' function. */
/* #undef HAVE_RL_RESET_SCREEN_SIZE */
/* Define to 1 if you have the <security/pam_appl.h> header file. */
#define HAVE_SECURITY_PAM_APPL_H 1
/* Define to 1 if you have the `setproctitle' function. */
/* #undef HAVE_SETPROCTITLE */
/* Define to 1 if you have the `setsid' function. */
#define HAVE_SETSID 1
/* Define to 1 if you have the `shm_open' function. */
#define HAVE_SHM_OPEN 1
/* Define to 1 if you have the `sigprocmask' function. */
#define HAVE_SIGPROCMASK 1
/* Define to 1 if you have sigsetjmp(). */
#define HAVE_SIGSETJMP 1
/* Define to 1 if the system has the type `sig_atomic_t'. */
#define HAVE_SIG_ATOMIC_T 1
/* Define to 1 if you have the `snprintf' function. */
#define HAVE_SNPRINTF 1
/* Define to 1 if you have spinlocks. */
#define HAVE_SPINLOCKS 1
/* Define to 1 if you have the `srandom' function. */
#define HAVE_SRANDOM 1
/* Define to 1 if you have the `SSL_CTX_set_num_tickets' function. */
/* #define HAVE_SSL_CTX_SET_NUM_TICKETS */
/* Define to 1 if you have the `SSL_get_current_compression' function. */
#define HAVE_SSL_GET_CURRENT_COMPRESSION 0
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the `strerror' function. */
#define HAVE_STRERROR 1
/* Define to 1 if you have the `strerror_r' function. */
#define HAVE_STRERROR_R 1
/* Define to 1 if you have the <strings.h> header file. */
//#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcat' function. */
/* #undef HAVE_STRLCAT */
/* Define to 1 if you have the `strlcpy' function. */
/* #undef HAVE_STRLCPY */
/* Define to 1 if you have the `strtoll' function. */
#define HAVE_STRTOLL 1
#if (!OS_DARWIN)
#define HAVE_STRCHRNUL 1
#endif
/* Define to 1 if you have the `strtoq' function. */
/* #undef HAVE_STRTOQ */
/* Define to 1 if you have the `strtoull' function. */
#define HAVE_STRTOULL 1
/* Define to 1 if you have the `strtouq' function. */
/* #undef HAVE_STRTOUQ */
/* Define to 1 if the system has the type `struct addrinfo'. */
#define HAVE_STRUCT_ADDRINFO 1
/* Define to 1 if the system has the type `struct cmsgcred'. */
/* #undef HAVE_STRUCT_CMSGCRED */
/* Define to 1 if the system has the type `struct option'. */
#define HAVE_STRUCT_OPTION 1
/* Define to 1 if `sa_len' is a member of `struct sockaddr'. */
/* #undef HAVE_STRUCT_SOCKADDR_SA_LEN */
/* Define to 1 if the system has the type `struct sockaddr_storage'. */
#define HAVE_STRUCT_SOCKADDR_STORAGE 1
/* Define to 1 if `ss_family' is a member of `struct sockaddr_storage'. */
#define HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY 1
/* Define to 1 if `ss_len' is a member of `struct sockaddr_storage'. */
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN */
/* Define to 1 if `__ss_family' is a member of `struct sockaddr_storage'. */
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY */
/* Define to 1 if `__ss_len' is a member of `struct sockaddr_storage'. */
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE___SS_LEN */
/* Define to 1 if `tm_zone' is a member of `struct tm'. */
#define HAVE_STRUCT_TM_TM_ZONE 1
/* Define to 1 if you have the `symlink' function. */
#define HAVE_SYMLINK 1
/* Define to 1 if you have the `sync_file_range' function. */
/* #undef HAVE_SYNC_FILE_RANGE */
/* Define to 1 if you have the syslog interface. */
#define HAVE_SYSLOG 1
/* Define to 1 if you have the <sys/ioctl.h> header file. */
#define HAVE_SYS_IOCTL_H 1
/* Define to 1 if you have the <sys/ipc.h> header file. */
#define HAVE_SYS_IPC_H 1
/* Define to 1 if you have the <sys/personality.h> header file. */
/* #undef HAVE_SYS_PERSONALITY_H */
/* Define to 1 if you have the <sys/poll.h> header file. */
#define HAVE_SYS_POLL_H 1
/* Define to 1 if you have the <sys/pstat.h> header file. */
/* #undef HAVE_SYS_PSTAT_H */
/* Define to 1 if you have the <sys/resource.h> header file. */
#define HAVE_SYS_RESOURCE_H 1
/* Define to 1 if you have the <sys/select.h> header file. */
#define HAVE_SYS_SELECT_H 1
/* Define to 1 if you have the <sys/sem.h> header file. */
#define HAVE_SYS_SEM_H 1
/* Define to 1 if you have the <sys/shm.h> header file. */
#define HAVE_SYS_SHM_H 1
/* Define to 1 if you have the <sys/signalfd.h> header file. */
/* #undef HAVE_SYS_SIGNALFD_H */
/* Define to 1 if you have the <sys/socket.h> header file. */
#define HAVE_SYS_SOCKET_H 1
/* Define to 1 if you have the <sys/sockio.h> header file. */
/* #undef HAVE_SYS_SOCKIO_H */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/tas.h> header file. */
/* #undef HAVE_SYS_TAS_H */
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <sys/ucred.h> header file. */
#if (OS_DARWIN || OS_FREEBSD)
#define HAVE_SYS_UCRED_H 1
#endif
/* Define to 1 if you have the <sys/un.h> header file. */
#define HAVE_SYS_UN_H 1
#define _GNU_SOURCE 1 /* Needed for glibc struct ucred */
/* Define to 1 if you have the <termios.h> header file. */
#define HAVE_TERMIOS_H 1
/* Define to 1 if your `struct tm' has `tm_zone'. Deprecated, use
`HAVE_STRUCT_TM_TM_ZONE' instead. */
#define HAVE_TM_ZONE 1
/* Define to 1 if you have the `towlower' function. */
#define HAVE_TOWLOWER 1
/* Define to 1 if you have the external array `tzname'. */
#define HAVE_TZNAME 1
/* Define to 1 if you have the <ucred.h> header file. */
/* #undef HAVE_UCRED_H */
/* Define to 1 if the system has the type `uint64'. */
/* #undef HAVE_UINT64 */
/* Define to 1 if the system has the type `uint8'. */
/* #undef HAVE_UINT8 */
/* Define to 1 if the system has the type `uintptr_t'. */
#define HAVE_UINTPTR_T 1
/* Define to 1 if the system has the type `union semun'. */
/* #undef HAVE_UNION_SEMUN */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have unix sockets. */
#define HAVE_UNIX_SOCKETS 1
/* Define to 1 if you have the `unsetenv' function. */
#define HAVE_UNSETENV 1
/* Define to 1 if the system has the type `unsigned long long int'. */
#define HAVE_UNSIGNED_LONG_LONG_INT 1
/* Define to 1 if you have the `utime' function. */
#define HAVE_UTIME 1
/* Define to 1 if you have the `utimes' function. */
#define HAVE_UTIMES 1
/* Define to 1 if you have the <utime.h> header file. */
#define HAVE_UTIME_H 1
/* Define to 1 if you have BSD UUID support. */
/* #undef HAVE_UUID_BSD */
/* Define to 1 if you have E2FS UUID support. */
/* #undef HAVE_UUID_E2FS */
/* Define to 1 if you have the <uuid.h> header file. */
#define HAVE_UUID_H 1
/* Define to 1 if you have OSSP UUID support. */
#define HAVE_UUID_OSSP 1
/* Define to 1 if you have the <uuid/uuid.h> header file. */
/* #undef HAVE_UUID_UUID_H */
/* Define to 1 if you have the `vsnprintf' function. */
#define HAVE_VSNPRINTF 1
/* Define to 1 if you have the <wchar.h> header file. */
#define HAVE_WCHAR_H 1
/* Define to 1 if you have the `wcstombs' function. */
#define HAVE_WCSTOMBS 1
/* Define to 1 if you have the `wcstombs_l' function. */
/* #undef HAVE_WCSTOMBS_L */
/* Define to 1 if you have the <wctype.h> header file. */
#define HAVE_WCTYPE_H 1
/* Define to 1 if you have the <winldap.h> header file. */
/* #undef HAVE_WINLDAP_H */
/* Define to 1 if your compiler understands __builtin_bswap32. */
/* #undef HAVE__BUILTIN_BSWAP32 */
/* Define to 1 if your compiler understands __builtin_constant_p. */
#define HAVE__BUILTIN_CONSTANT_P 1
/* Define to 1 if your compiler understands __builtin_frame_address. */
/* #undef HAVE__BUILTIN_FRAME_ADDRESS */
/* Define to 1 if your compiler understands __builtin_types_compatible_p. */
#define HAVE__BUILTIN_TYPES_COMPATIBLE_P 1
/* Define to 1 if your compiler understands __builtin_unreachable. */
/* #undef HAVE__BUILTIN_UNREACHABLE */
/* Define to 1 if you have __cpuid. */
/* #undef HAVE__CPUID */
/* Define to 1 if you have __get_cpuid. */
/* #undef HAVE__GET_CPUID */
/* Define to 1 if your compiler understands _Static_assert. */
/* #undef HAVE__STATIC_ASSERT */
/* Define to 1 if your compiler understands __VA_ARGS__ in macros. */
#define HAVE__VA_ARGS 1
/* Define to the appropriate snprintf length modifier for 64-bit ints. */
#define INT64_MODIFIER "ll"
/* Define to 1 if `locale_t' requires <xlocale.h>. */
/* #undef LOCALE_T_IN_XLOCALE */
/* Define as the maximum alignment requirement of any C data type. */
#define MAXIMUM_ALIGNOF 4
/* Define bytes to use libc memset(). */
#define MEMSET_LOOP_LIMIT 1024
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "pgsql-bugs@postgresql.org"
/* Define to the full name of this package. */
#define PACKAGE_NAME "PostgreSQL"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "PostgreSQL 9.5.4"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "postgresql"
/* Define to the home page for this package. */
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION "9.5.4"
/* Define to the name of a signed 128-bit integer type. */
/* #undef PG_INT128_TYPE */
/* Define to the name of a signed 64-bit integer type. */
#define PG_INT64_TYPE long long int
/* Define to the name of the default PostgreSQL service principal in Kerberos
(GSSAPI). (--with-krb-srvnam=NAME) */
#define PG_KRB_SRVNAM "postgres"
/* PostgreSQL major version as a string */
#define PG_MAJORVERSION "9.5"
/* Define to gnu_printf if compiler supports it, else printf. */
#define PG_PRINTF_ATTRIBUTE printf
/* Define to 1 if "static inline" works without unwanted warnings from
compilations where static inline functions are defined but not called. */
#define PG_USE_INLINE 1
/* PostgreSQL version as a string */
#define PG_VERSION "9.5.4"
/* PostgreSQL version as a number */
#define PG_VERSION_NUM 90504
/* A string containing the version number, platform, and C compiler */
#define PG_VERSION_STR "PostgreSQL 9.5.4 on i686-pc-linux-gnu, compiled by gcc (GCC) 4.1.2 20080704 (Red Hat 4.1.2-55), 32-bit"
/* Define to 1 to allow profiling output to be saved separately for each
process. */
/* #undef PROFILE_PID_DIR */
/* RELSEG_SIZE is the maximum number of blocks allowed in one disk file. Thus,
the maximum size of a single file is RELSEG_SIZE * BLCKSZ; relations bigger
than that are divided into multiple files. RELSEG_SIZE * BLCKSZ must be
less than your OS' limit on file size. This is often 2 GB or 4GB in a
32-bit operating system, unless you have large file support enabled. By
default, we make the limit 1 GB to avoid any possible integer-overflow
problems within the OS. A limit smaller than necessary only means we divide
a large relation into more chunks than necessary, so it seems best to err
in the direction of a small limit. A power-of-2 value is recommended to
save a few cycles in md.c, but is not absolutely required. Changing
RELSEG_SIZE requires an initdb. */
#define RELSEG_SIZE 131072
/* The size of `long', as computed by sizeof. */
#define SIZEOF_LONG 4
/* The size of `off_t', as computed by sizeof. */
#define SIZEOF_OFF_T 8
/* The size of `size_t', as computed by sizeof. */
#define SIZEOF_SIZE_T 4
/* The size of `void *', as computed by sizeof. */
#define SIZEOF_VOID_P 4
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Define to 1 if strerror_r() returns a int. */
/* #undef STRERROR_R_INT */
/* Define to 1 if your <sys/time.h> declares `struct tm'. */
/* #undef TM_IN_SYS_TIME */
/* Define to 1 to build with assertion checks. (--enable-cassert) */
/* #undef USE_ASSERT_CHECKING */
/* Define to 1 to build with Bonjour support. (--with-bonjour) */
/* #undef USE_BONJOUR */
/* Define to 1 if you want float4 values to be passed by value.
(--enable-float4-byval) */
#define USE_FLOAT4_BYVAL 1
/* Define to 1 if you want float8, int8, etc values to be passed by value.
(--enable-float8-byval) */
/* #undef USE_FLOAT8_BYVAL */
/* Define to 1 if you want 64-bit integer timestamp and interval support.
(--enable-integer-datetimes) */
#define USE_INTEGER_DATETIMES 1
/* Define to 1 to build with LDAP support. (--with-ldap) */
//#define USE_LDAP 0
/* Define to 1 to build with XML support. (--with-libxml) */
#define USE_LIBXML 1
/* Define to 1 to use XSLT support when building contrib/xml2.
(--with-libxslt) */
#define USE_LIBXSLT 1
/* Define to select named POSIX semaphores. */
/* #undef USE_NAMED_POSIX_SEMAPHORES */
/* Define to build with OpenSSL support. (--with-openssl) */
#define USE_OPENSSL 0
#define USE_OPENSSL_RANDOM 0
#define FRONTEND 1
/* Define to 1 to build with PAM support. (--with-pam) */
#define USE_PAM 1
/* Use replacement snprintf() functions. */
/* #undef USE_REPL_SNPRINTF */
/* Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check. */
#define USE_SLICING_BY_8_CRC32C 1
/* Define to 1 use Intel SSE 4.2 CRC instructions. */
/* #undef USE_SSE42_CRC32C */
/* Define to 1 to use Intel SSSE 4.2 CRC instructions with a runtime check. */
/* #undef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK */
/* Define to select SysV-style semaphores. */
#define USE_SYSV_SEMAPHORES 1
/* Define to select SysV-style shared memory. */
#define USE_SYSV_SHARED_MEMORY 1
/* Define to select unnamed POSIX semaphores. */
/* #undef USE_UNNAMED_POSIX_SEMAPHORES */
/* Define to select Win32-style semaphores. */
/* #undef USE_WIN32_SEMAPHORES */
/* Define to select Win32-style shared memory. */
/* #undef USE_WIN32_SHARED_MEMORY */
/* Define to 1 if `wcstombs_l' requires <xlocale.h>. */
/* #undef WCSTOMBS_L_IN_XLOCALE */
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
#if defined AC_APPLE_UNIVERSAL_BUILD
# if defined __BIG_ENDIAN__
# define WORDS_BIGENDIAN 1
# endif
#else
# ifndef WORDS_BIGENDIAN
/* # undef WORDS_BIGENDIAN */
# endif
#endif
/* Size of a WAL file block. This need have no particular relation to BLCKSZ.
XLOG_BLCKSZ must be a power of 2, and if your system supports O_DIRECT I/O,
XLOG_BLCKSZ must be a multiple of the alignment requirement for direct-I/O
buffers, else direct I/O may fail. Changing XLOG_BLCKSZ requires an initdb.
*/
#define XLOG_BLCKSZ 8192
/* XLOG_SEG_SIZE is the size of a single WAL file. This must be a power of 2
and larger than XLOG_BLCKSZ (preferably, a great deal larger than
XLOG_BLCKSZ). Changing XLOG_SEG_SIZE requires an initdb. */
#define XLOG_SEG_SIZE (16 * 1024 * 1024)
/* Number of bits in a file offset, on hosts where this is settable. */
#define _FILE_OFFSET_BITS 64
/* Define to 1 to make fseeko visible on some hosts (e.g. glibc 2.2). */
/* #undef _LARGEFILE_SOURCE */
/* Define for large files, on AIX-style hosts. */
/* #undef _LARGE_FILES */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef __cplusplus
/* #undef inline */
#endif
/* Define to the type of a signed integer type wide enough to hold a pointer,
if such a type exists, and if the system does not define it. */
/* #undef intptr_t */
/* Define to empty if the C compiler does not understand signed types. */
/* #undef signed */
/* Define to the type of an unsigned integer type wide enough to hold a
pointer, if such a type exists, and if the system does not define it. */
/* #undef uintptr_t */

View File

@ -0,0 +1,7 @@
/*
* * src/include/pg_config_ext.h.in. This is generated manually, not by
* * autoheader, since we want to limit which symbols get defined here.
* */
/* Define to the name of a signed 64-bit integer type. */
#define PG_INT64_TYPE long long int

View File

@ -0,0 +1,34 @@
#if defined(OS_DARWIN)
/* src/include/port/darwin.h */
#define __darwin__ 1
#if HAVE_DECL_F_FULLFSYNC /* not present before macOS 10.3 */
#define HAVE_FSYNC_WRITETHROUGH
#endif
#else
/* src/include/port/linux.h */
/*
* As of July 2007, all known versions of the Linux kernel will sometimes
* return EIDRM for a shmctl() operation when EINVAL is correct (it happens
* when the low-order 15 bits of the supplied shm ID match the slot number
* assigned to a newer shmem segment). We deal with this by assuming that
* EIDRM means EINVAL in PGSharedMemoryIsInUse(). This is reasonably safe
* since in fact Linux has no excuse for ever returning EIDRM; it doesn't
* track removed segments in a way that would allow distinguishing them from
* private ones. But someday that code might get upgraded, and we'd have
* to have a kernel version test here.
*/
#define HAVE_LINUX_EIDRM_BUG
/*
* Set the default wal_sync_method to fdatasync. With recent Linux versions,
* xlogdefs.h's normal rules will prefer open_datasync, which (a) doesn't
* perform better and (b) causes outright failures on ext4 data=journal
* filesystems, because those don't support O_DIRECT.
*/
#define PLATFORM_DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
#endif

View File

@ -0,0 +1,12 @@
#define PGBINDIR "/bin"
#define PGSHAREDIR "/share"
#define SYSCONFDIR "/etc"
#define INCLUDEDIR "/include"
#define PKGINCLUDEDIR "/include"
#define INCLUDEDIRSERVER "/include/server"
#define LIBDIR "/lib"
#define PKGLIBDIR "/lib"
#define LOCALEDIR "/share/locale"
#define DOCDIR "/doc"
#define HTMLDIR "/doc"
#define MANDIR "/man"

View File

View File

@ -13,16 +13,17 @@ Here is a complete list of available database engines. Follow the links for more
- [Atomic](../../engines/database-engines/atomic.md)
- [MySQL](../../engines/database-engines/mysql.md)
- [Lazy](../../engines/database-engines/lazy.md)
- [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md)
- [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md)
- [Lazy](../../engines/database-engines/lazy.md)
- [MySQL](../../engines/database-engines/mysql.md)
- [PostgreSQL](../../engines/database-engines/postgresql.md)
- [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md)
- [Replicated](../../engines/database-engines/replicated.md)
- [SQLite](../../engines/database-engines/sqlite.md)

View File

@ -155,6 +155,12 @@ Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.htm
Sets a comma-separated list of PostgreSQL database tables, which will be replicated via [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) database engine.
Each table can have subset of replicated columns in brackets. If subset of columns is omitted, then all columns for table will be replicated.
``` sql
materialized_postgresql_tables_list = 'table1(co1, col2),table2,table3(co3, col5, col7)
```
Default value: empty list — means whole PostgreSQL database will be replicated.
### `materialized_postgresql_schema` {#materialized-postgresql-schema}

View File

@ -112,7 +112,7 @@ Example:
```
The NATS server configuration can be added using the ClickHouse config file.
More specifically you can add Redis password for NATS engine:
More specifically you can add Redis password for NATS engine:
``` xml
<nats>
@ -167,7 +167,7 @@ If you want to change the target table by using `ALTER`, we recommend disabling
- `_subject` - NATS message subject. Data type: `String`.
Additional virtual columns when `kafka_handle_error_mode='stream'`:
Additional virtual columns when `nats_handle_error_mode='stream'`:
- `_raw_message` - Raw message that couldn't be parsed successfully. Data type: `Nullable(String)`.
- `_error` - Exception message happened during failed parsing. Data type: `Nullable(String)`.

View File

@ -97,7 +97,7 @@ If you want to change the target table by using `ALTER`, we recommend disabling
- `_filename` - Name of the log file. Data type: `LowCardinality(String)`.
- `_offset` - Offset in the log file. Data type: `UInt64`.
Additional virtual columns when `kafka_handle_error_mode='stream'`:
Additional virtual columns when `handle_error_mode='stream'`:
- `_raw_record` - Raw record that couldn't be parsed successfully. Data type: `Nullable(String)`.
- `_error` - Exception message happened during failed parsing. Data type: `Nullable(String)`.

View File

@ -826,17 +826,17 @@ Result:
## JSONAsObject {#jsonasobject}
In this format, a single JSON object is interpreted as a single [Object('json')](/docs/en/sql-reference/data-types/json.md) value. If the input has several JSON objects (comma separated), they are interpreted as separate rows. If the input data is enclosed in square brackets, it is interpreted as an array of JSONs.
In this format, a single JSON object is interpreted as a single [JSON](/docs/en/sql-reference/data-types/newjson.md) value. If the input has several JSON objects (comma separated), they are interpreted as separate rows. If the input data is enclosed in square brackets, it is interpreted as an array of JSONs.
This format can only be parsed for a table with a single field of type [Object('json')](/docs/en/sql-reference/data-types/json.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized).
This format can only be parsed for a table with a single field of type [JSON](/docs/en/sql-reference/data-types/newjson.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized).
**Examples**
Query:
``` sql
SET allow_experimental_object_type = 1;
CREATE TABLE json_as_object (json Object('json')) ENGINE = Memory;
SET allow_experimental_json_type = 1;
CREATE TABLE json_as_object (json JSON) ENGINE = Memory;
INSERT INTO json_as_object (json) FORMAT JSONAsObject {"foo":{"bar":{"x":"y"},"baz":1}},{},{"any json stucture":1}
SELECT * FROM json_as_object FORMAT JSONEachRow;
```
@ -844,9 +844,9 @@ SELECT * FROM json_as_object FORMAT JSONEachRow;
Result:
``` response
{"json":{"any json stucture":0,"foo":{"bar":{"x":"y"},"baz":1}}}
{"json":{"any json stucture":0,"foo":{"bar":{"x":""},"baz":0}}}
{"json":{"any json stucture":1,"foo":{"bar":{"x":""},"baz":0}}}
{"json":{"foo":{"bar":{"x":"y"},"baz":"1"}}}
{"json":{}}
{"json":{"any json stucture":"1"}}
```
**An array of JSON objects**
@ -854,35 +854,34 @@ Result:
Query:
``` sql
SET allow_experimental_object_type = 1;
CREATE TABLE json_square_brackets (field Object('json')) ENGINE = Memory;
SET allow_experimental_json_type = 1;
CREATE TABLE json_square_brackets (field JSON) ENGINE = Memory;
INSERT INTO json_square_brackets FORMAT JSONAsObject [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}];
SELECT * FROM json_square_brackets FORMAT JSONEachRow;
```
Result:
```response
{"field":{"id":1,"name":"name1"}}
{"field":{"id":2,"name":"name2"}}
{"field":{"id":"1","name":"name1"}}
{"field":{"id":"2","name":"name2"}}
```
**Columns with default values**
```sql
SET allow_experimental_object_type = 1;
CREATE TABLE json_as_object (json Object('json'), time DateTime MATERIALIZED now()) ENGINE = Memory;
SET allow_experimental_json_type = 1;
CREATE TABLE json_as_object (json JSON, time DateTime MATERIALIZED now()) ENGINE = Memory;
INSERT INTO json_as_object (json) FORMAT JSONAsObject {"foo":{"bar":{"x":"y"},"baz":1}};
INSERT INTO json_as_object (json) FORMAT JSONAsObject {};
INSERT INTO json_as_object (json) FORMAT JSONAsObject {"any json stucture":1}
SELECT * FROM json_as_object FORMAT JSONEachRow
SELECT time, json FROM json_as_object FORMAT JSONEachRow
```
```resonse
{"json":{"any json stucture":0,"foo":{"bar":{"x":"y"},"baz":1}},"time":"2024-07-25 17:02:45"}
{"json":{"any json stucture":0,"foo":{"bar":{"x":""},"baz":0}},"time":"2024-07-25 17:02:47"}
{"json":{"any json stucture":1,"foo":{"bar":{"x":""},"baz":0}},"time":"2024-07-25 17:02:50"}
{"time":"2024-09-16 12:18:10","json":{}}
{"time":"2024-09-16 12:18:13","json":{"any json stucture":"1"}}
{"time":"2024-09-16 12:18:08","json":{"foo":{"bar":{"x":"y"},"baz":"1"}}}
```
## JSONCompact {#jsoncompact}

View File

@ -3150,3 +3150,15 @@ Default value: "default"
**See Also**
- [Workload Scheduling](/docs/en/operations/workload-scheduling.md)
## max_authentication_methods_per_user {#max_authentication_methods_per_user}
The maximum number of authentication methods a user can be created with or altered to.
Changing this setting does not affect existing users. Create/alter authentication-related queries will fail if they exceed the limit specified in this setting.
Non authentication create/alter queries will succeed.
Type: UInt64
Default value: 100
Zero means unlimited

View File

@ -12,9 +12,10 @@ Syntax:
``` sql
ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
[, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name' | SAN 'TYPE:subject_alt_name'}]
[NOT IDENTIFIED | IDENTIFIED | ADD IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name' | SAN 'TYPE:subject_alt_name'}]
[[ADD | DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
[VALID UNTIL datetime]
[RESET AUTHENTICATION METHODS TO NEW]
[DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]
[GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...]
@ -62,3 +63,31 @@ Allows the user with `john` account to grant his privileges to the user with `ja
``` sql
ALTER USER john GRANTEES jack;
```
Adds new authentication methods to the user while keeping the existing ones:
``` sql
ALTER USER user1 ADD IDENTIFIED WITH plaintext_password by '1', bcrypt_password by '2', plaintext_password by '3'
```
Notes:
1. Older versions of ClickHouse might not support the syntax of multiple authentication methods. Therefore, if the ClickHouse server contains such users and is downgraded to a version that does not support it, such users will become unusable and some user related operations will be broken. In order to downgrade gracefully, one must set all users to contain a single authentication method prior to downgrading. Alternatively, if the server was downgraded without the proper procedure, the faulty users should be dropped.
2. `no_password` can not co-exist with other authentication methods for security reasons.
Because of that, it is not possible to `ADD` a `no_password` authentication method. The below query will throw an error:
``` sql
ALTER USER user1 ADD IDENTIFIED WITH no_password
```
If you want to drop authentication methods for a user and rely on `no_password`, you must specify in the below replacing form.
Reset authentication methods and adds the ones specified in the query (effect of leading IDENTIFIED without the ADD keyword):
``` sql
ALTER USER user1 IDENTIFIED WITH plaintext_password by '1', bcrypt_password by '2', plaintext_password by '3'
```
Reset authentication methods and keep the most recent added one:
``` sql
ALTER USER user1 RESET AUTHENTICATION METHODS TO NEW
```

View File

@ -15,6 +15,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name' | SAN 'TYPE:subject_alt_name'} | {WITH ssh_key BY KEY 'public_key' TYPE 'ssh-rsa|...'} | {WITH http SERVER 'server_name' [SCHEME 'Basic']}]
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
[VALID UNTIL datetime]
[RESET AUTHENTICATION METHODS TO NEW]
[IN access_storage_type]
[DEFAULT ROLE role [,...]]
[DEFAULT DATABASE database | NONE]
@ -144,6 +145,17 @@ In ClickHouse Cloud, by default, passwords must meet the following complexity re
The available password types are: `plaintext_password`, `sha256_password`, `double_sha1_password`.
7. Multiple authentication methods can be specified:
```sql
CREATE USER user1 IDENTIFIED WITH plaintext_password by '1', bcrypt_password by '2', plaintext_password by '3''
```
Notes:
1. Older versions of ClickHouse might not support the syntax of multiple authentication methods. Therefore, if the ClickHouse server contains such users and is downgraded to a version that does not support it, such users will become unusable and some user related operations will be broken. In order to downgrade gracefully, one must set all users to contain a single authentication method prior to downgrading. Alternatively, if the server was downgraded without the proper procedure, the faulty users should be dropped.
2. `no_password` can not co-exist with other authentication methods for security reasons. Therefore, you can only specify
`no_password` if it is the only authentication method in the query.
## User Host
User host is a host from which a connection to ClickHouse server could be established. The host can be specified in the `HOST` query section in the following ways:

View File

@ -82,7 +82,7 @@ AccessEntityPtr deserializeAccessEntityImpl(const String & definition)
if (res)
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
res = user = std::make_unique<User>();
InterpreterCreateUserQuery::updateUserFromQuery(*user, *create_user_query, /* allow_no_password = */ true, /* allow_plaintext_password = */ true);
InterpreterCreateUserQuery::updateUserFromQuery(*user, *create_user_query, /* allow_no_password = */ true, /* allow_plaintext_password = */ true, /* max_number_of_authentication_methods = zero is unlimited*/ 0);
}
else if (auto * create_role_query = query->as<ASTCreateRoleQuery>())
{

View File

@ -14,11 +14,6 @@
namespace DB
{
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
extern const int SUPPORT_IS_DISABLED;
}
namespace
{
@ -84,12 +79,140 @@ namespace
return false;
}
#endif
}
bool checkKerberosAuthentication(
const GSSAcceptorContext * gss_acceptor_context,
const AuthenticationData & authentication_method,
const ExternalAuthenticators & external_authenticators)
{
return authentication_method.getType() == AuthenticationType::KERBEROS
&& external_authenticators.checkKerberosCredentials(authentication_method.getKerberosRealm(), *gss_acceptor_context);
}
bool checkMySQLAuthentication(
const MySQLNative41Credentials * mysql_credentials,
const AuthenticationData & authentication_method)
{
switch (authentication_method.getType())
{
case AuthenticationType::PLAINTEXT_PASSWORD:
return checkPasswordPlainTextMySQL(
mysql_credentials->getScramble(),
mysql_credentials->getScrambledPassword(),
authentication_method.getPasswordHashBinary());
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
return checkPasswordDoubleSHA1MySQL(
mysql_credentials->getScramble(),
mysql_credentials->getScrambledPassword(),
authentication_method.getPasswordHashBinary());
default:
return false;
}
}
bool checkBasicAuthentication(
const BasicCredentials * basic_credentials,
const AuthenticationData & authentication_method,
const ExternalAuthenticators & external_authenticators,
SettingsChanges & settings)
{
switch (authentication_method.getType())
{
case AuthenticationType::NO_PASSWORD:
{
return true; // N.B. even if the password is not empty!
}
case AuthenticationType::PLAINTEXT_PASSWORD:
{
return checkPasswordPlainText(basic_credentials->getPassword(), authentication_method.getPasswordHashBinary());
}
case AuthenticationType::SHA256_PASSWORD:
{
return checkPasswordSHA256(
basic_credentials->getPassword(), authentication_method.getPasswordHashBinary(), authentication_method.getSalt());
}
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
{
return checkPasswordDoubleSHA1(basic_credentials->getPassword(), authentication_method.getPasswordHashBinary());
}
case AuthenticationType::LDAP:
{
return external_authenticators.checkLDAPCredentials(authentication_method.getLDAPServerName(), *basic_credentials);
}
case AuthenticationType::BCRYPT_PASSWORD:
{
return checkPasswordBcrypt(basic_credentials->getPassword(), authentication_method.getPasswordHashBinary());
}
case AuthenticationType::HTTP:
{
if (authentication_method.getHTTPAuthenticationScheme() == HTTPAuthenticationScheme::BASIC)
{
return external_authenticators.checkHTTPBasicCredentials(
authentication_method.getHTTPAuthenticationServerName(), *basic_credentials, settings);
}
break;
}
default:
break;
}
return false;
}
bool checkSSLCertificateAuthentication(
const SSLCertificateCredentials * ssl_certificate_credentials,
const AuthenticationData & authentication_method)
{
if (AuthenticationType::SSL_CERTIFICATE != authentication_method.getType())
{
return false;
}
for (SSLCertificateSubjects::Type type : {SSLCertificateSubjects::Type::CN, SSLCertificateSubjects::Type::SAN})
{
for (const auto & subject : authentication_method.getSSLCertificateSubjects().at(type))
{
if (ssl_certificate_credentials->getSSLCertificateSubjects().at(type).contains(subject))
return true;
// Wildcard support (1 only)
if (subject.contains('*'))
{
auto prefix = std::string_view(subject).substr(0, subject.find('*'));
auto suffix = std::string_view(subject).substr(subject.find('*') + 1);
auto slashes = std::count(subject.begin(), subject.end(), '/');
for (const auto & certificate_subject : ssl_certificate_credentials->getSSLCertificateSubjects().at(type))
{
bool matches_wildcard = certificate_subject.starts_with(prefix) && certificate_subject.ends_with(suffix);
// '*' must not represent a '/' in URI, so check if the number of '/' are equal
bool matches_slashes = slashes == count(certificate_subject.begin(), certificate_subject.end(), '/');
if (matches_wildcard && matches_slashes)
return true;
}
}
}
}
return false;
}
#if USE_SSH
bool checkSshAuthentication(
const SshCredentials * ssh_credentials,
const AuthenticationData & authentication_method)
{
return AuthenticationType::SSH_KEY == authentication_method.getType()
&& checkSshSignature(authentication_method.getSSHKeys(), ssh_credentials->getSignature(), ssh_credentials->getOriginal());
}
#endif
}
bool Authentication::areCredentialsValid(
const Credentials & credentials,
const AuthenticationData & auth_data,
const AuthenticationData & authentication_method,
const ExternalAuthenticators & external_authenticators,
SettingsChanges & settings)
{
@ -98,225 +221,35 @@ bool Authentication::areCredentialsValid(
if (const auto * gss_acceptor_context = typeid_cast<const GSSAcceptorContext *>(&credentials))
{
switch (auth_data.getType())
{
case AuthenticationType::NO_PASSWORD:
case AuthenticationType::PLAINTEXT_PASSWORD:
case AuthenticationType::SHA256_PASSWORD:
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
case AuthenticationType::BCRYPT_PASSWORD:
case AuthenticationType::LDAP:
case AuthenticationType::HTTP:
throw Authentication::Require<BasicCredentials>("ClickHouse Basic Authentication");
case AuthenticationType::JWT:
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
case AuthenticationType::KERBEROS:
return external_authenticators.checkKerberosCredentials(auth_data.getKerberosRealm(), *gss_acceptor_context);
case AuthenticationType::SSL_CERTIFICATE:
throw Authentication::Require<BasicCredentials>("ClickHouse X.509 Authentication");
case AuthenticationType::SSH_KEY:
#if USE_SSH
throw Authentication::Require<SshCredentials>("SSH Keys Authentication");
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSH is disabled, because ClickHouse is built without libssh");
#endif
case AuthenticationType::MAX:
break;
}
return checkKerberosAuthentication(gss_acceptor_context, authentication_method, external_authenticators);
}
if (const auto * mysql_credentials = typeid_cast<const MySQLNative41Credentials *>(&credentials))
{
switch (auth_data.getType())
{
case AuthenticationType::NO_PASSWORD:
return true; // N.B. even if the password is not empty!
case AuthenticationType::PLAINTEXT_PASSWORD:
return checkPasswordPlainTextMySQL(mysql_credentials->getScramble(), mysql_credentials->getScrambledPassword(), auth_data.getPasswordHashBinary());
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
return checkPasswordDoubleSHA1MySQL(mysql_credentials->getScramble(), mysql_credentials->getScrambledPassword(), auth_data.getPasswordHashBinary());
case AuthenticationType::SHA256_PASSWORD:
case AuthenticationType::BCRYPT_PASSWORD:
case AuthenticationType::LDAP:
case AuthenticationType::KERBEROS:
case AuthenticationType::HTTP:
throw Authentication::Require<BasicCredentials>("ClickHouse Basic Authentication");
case AuthenticationType::SSL_CERTIFICATE:
throw Authentication::Require<BasicCredentials>("ClickHouse X.509 Authentication");
case AuthenticationType::JWT:
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
case AuthenticationType::SSH_KEY:
#if USE_SSH
throw Authentication::Require<SshCredentials>("SSH Keys Authentication");
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSH is disabled, because ClickHouse is built without libssh");
#endif
case AuthenticationType::MAX:
break;
}
return checkMySQLAuthentication(mysql_credentials, authentication_method);
}
if (const auto * basic_credentials = typeid_cast<const BasicCredentials *>(&credentials))
{
switch (auth_data.getType())
{
case AuthenticationType::NO_PASSWORD:
return true; // N.B. even if the password is not empty!
case AuthenticationType::PLAINTEXT_PASSWORD:
return checkPasswordPlainText(basic_credentials->getPassword(), auth_data.getPasswordHashBinary());
case AuthenticationType::SHA256_PASSWORD:
return checkPasswordSHA256(basic_credentials->getPassword(), auth_data.getPasswordHashBinary(), auth_data.getSalt());
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
return checkPasswordDoubleSHA1(basic_credentials->getPassword(), auth_data.getPasswordHashBinary());
case AuthenticationType::LDAP:
return external_authenticators.checkLDAPCredentials(auth_data.getLDAPServerName(), *basic_credentials);
case AuthenticationType::KERBEROS:
throw Authentication::Require<GSSAcceptorContext>(auth_data.getKerberosRealm());
case AuthenticationType::SSL_CERTIFICATE:
throw Authentication::Require<BasicCredentials>("ClickHouse X.509 Authentication");
case AuthenticationType::SSH_KEY:
#if USE_SSH
throw Authentication::Require<SshCredentials>("SSH Keys Authentication");
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSH is disabled, because ClickHouse is built without libssh");
#endif
case AuthenticationType::JWT:
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
case AuthenticationType::BCRYPT_PASSWORD:
return checkPasswordBcrypt(basic_credentials->getPassword(), auth_data.getPasswordHashBinary());
case AuthenticationType::HTTP:
switch (auth_data.getHTTPAuthenticationScheme())
{
case HTTPAuthenticationScheme::BASIC:
return external_authenticators.checkHTTPBasicCredentials(
auth_data.getHTTPAuthenticationServerName(), *basic_credentials, settings);
}
case AuthenticationType::MAX:
break;
}
return checkBasicAuthentication(basic_credentials, authentication_method, external_authenticators, settings);
}
if (const auto * ssl_certificate_credentials = typeid_cast<const SSLCertificateCredentials *>(&credentials))
{
switch (auth_data.getType())
{
case AuthenticationType::NO_PASSWORD:
case AuthenticationType::PLAINTEXT_PASSWORD:
case AuthenticationType::SHA256_PASSWORD:
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
case AuthenticationType::BCRYPT_PASSWORD:
case AuthenticationType::LDAP:
case AuthenticationType::HTTP:
throw Authentication::Require<BasicCredentials>("ClickHouse Basic Authentication");
case AuthenticationType::JWT:
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
case AuthenticationType::KERBEROS:
throw Authentication::Require<GSSAcceptorContext>(auth_data.getKerberosRealm());
case AuthenticationType::SSL_CERTIFICATE:
{
for (SSLCertificateSubjects::Type type : {SSLCertificateSubjects::Type::CN, SSLCertificateSubjects::Type::SAN})
{
for (const auto & subject : auth_data.getSSLCertificateSubjects().at(type))
{
if (ssl_certificate_credentials->getSSLCertificateSubjects().at(type).contains(subject))
return true;
// Wildcard support (1 only)
if (subject.contains('*'))
{
auto prefix = std::string_view(subject).substr(0, subject.find('*'));
auto suffix = std::string_view(subject).substr(subject.find('*') + 1);
auto slashes = std::count(subject.begin(), subject.end(), '/');
for (const auto & certificate_subject : ssl_certificate_credentials->getSSLCertificateSubjects().at(type))
{
bool matches_wildcard = certificate_subject.starts_with(prefix) && certificate_subject.ends_with(suffix);
// '*' must not represent a '/' in URI, so check if the number of '/' are equal
bool matches_slashes = slashes == count(certificate_subject.begin(), certificate_subject.end(), '/');
if (matches_wildcard && matches_slashes)
return true;
}
}
}
}
return false;
}
case AuthenticationType::SSH_KEY:
#if USE_SSH
throw Authentication::Require<SshCredentials>("SSH Keys Authentication");
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSH is disabled, because ClickHouse is built without libssh");
#endif
case AuthenticationType::MAX:
break;
}
return checkSSLCertificateAuthentication(ssl_certificate_credentials, authentication_method);
}
#if USE_SSH
if (const auto * ssh_credentials = typeid_cast<const SshCredentials *>(&credentials))
{
switch (auth_data.getType())
{
case AuthenticationType::NO_PASSWORD:
case AuthenticationType::PLAINTEXT_PASSWORD:
case AuthenticationType::SHA256_PASSWORD:
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
case AuthenticationType::BCRYPT_PASSWORD:
case AuthenticationType::LDAP:
case AuthenticationType::HTTP:
throw Authentication::Require<BasicCredentials>("ClickHouse Basic Authentication");
case AuthenticationType::JWT:
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
case AuthenticationType::KERBEROS:
throw Authentication::Require<GSSAcceptorContext>(auth_data.getKerberosRealm());
case AuthenticationType::SSL_CERTIFICATE:
throw Authentication::Require<SSLCertificateCredentials>("ClickHouse X.509 Authentication");
case AuthenticationType::SSH_KEY:
return checkSshSignature(auth_data.getSSHKeys(), ssh_credentials->getSignature(), ssh_credentials->getOriginal());
case AuthenticationType::MAX:
break;
}
return checkSshAuthentication(ssh_credentials, authentication_method);
}
#endif
if ([[maybe_unused]] const auto * always_allow_credentials = typeid_cast<const AlwaysAllowCredentials *>(&credentials))
return true;
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "areCredentialsValid(): authentication type {} not supported", toString(auth_data.getType()));
return false;
}
}

View File

@ -24,7 +24,7 @@ struct Authentication
/// returned by the authentication server
static bool areCredentialsValid(
const Credentials & credentials,
const AuthenticationData & auth_data,
const AuthenticationData & authentication_method,
const ExternalAuthenticators & external_authenticators,
SettingsChanges & settings);

View File

@ -375,7 +375,8 @@ std::shared_ptr<ASTAuthenticationData> AuthenticationData::toAST() const
break;
}
case AuthenticationType::NO_PASSWORD: [[fallthrough]];
case AuthenticationType::NO_PASSWORD:
break;
case AuthenticationType::MAX:
throw Exception(ErrorCodes::LOGICAL_ERROR, "AST: Unexpected authentication type {}", toString(auth_type));
}

View File

@ -30,7 +30,6 @@ namespace ErrorCodes
extern const int IP_ADDRESS_NOT_ALLOWED;
extern const int LOGICAL_ERROR;
extern const int NOT_IMPLEMENTED;
extern const int AUTHENTICATION_FAILED;
}
@ -525,15 +524,32 @@ std::optional<AuthResult> IAccessStorage::authenticateImpl(
if (!isAddressAllowed(*user, address))
throwAddressNotAllowed(address);
auto auth_type = user->auth_data.getType();
if (((auth_type == AuthenticationType::NO_PASSWORD) && !allow_no_password) ||
((auth_type == AuthenticationType::PLAINTEXT_PASSWORD) && !allow_plaintext_password))
throwAuthenticationTypeNotAllowed(auth_type);
bool skipped_not_allowed_authentication_methods = false;
if (!areCredentialsValid(*user, credentials, external_authenticators, auth_result.settings))
throwInvalidCredentials();
for (const auto & auth_method : user->authentication_methods)
{
auto auth_type = auth_method.getType();
if (((auth_type == AuthenticationType::NO_PASSWORD) && !allow_no_password) ||
((auth_type == AuthenticationType::PLAINTEXT_PASSWORD) && !allow_plaintext_password))
{
skipped_not_allowed_authentication_methods = true;
continue;
}
return auth_result;
if (areCredentialsValid(user->getName(), user->valid_until, auth_method, credentials, external_authenticators, auth_result.settings))
{
auth_result.authentication_data = auth_method;
return auth_result;
}
}
if (skipped_not_allowed_authentication_methods)
{
LOG_INFO(log, "Skipped the check for not allowed authentication methods,"
"check allow_no_password and allow_plaintext_password settings in the server configuration");
}
throwInvalidCredentials();
}
}
@ -543,9 +559,10 @@ std::optional<AuthResult> IAccessStorage::authenticateImpl(
return std::nullopt;
}
bool IAccessStorage::areCredentialsValid(
const User & user,
const std::string & user_name,
time_t valid_until,
const AuthenticationData & authentication_method,
const Credentials & credentials,
const ExternalAuthenticators & external_authenticators,
SettingsChanges & settings) const
@ -553,21 +570,20 @@ bool IAccessStorage::areCredentialsValid(
if (!credentials.isReady())
return false;
if (credentials.getUserName() != user.getName())
if (credentials.getUserName() != user_name)
return false;
if (user.valid_until)
if (valid_until)
{
const time_t now = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
if (now > user.valid_until)
if (now > valid_until)
return false;
}
return Authentication::areCredentialsValid(credentials, user.auth_data, external_authenticators, settings);
return Authentication::areCredentialsValid(credentials, authentication_method, external_authenticators, settings);
}
bool IAccessStorage::isAddressAllowed(const User & user, const Poco::Net::IPAddress & address) const
{
return user.allowed_client_hosts.contains(address);
@ -747,14 +763,6 @@ void IAccessStorage::throwAddressNotAllowed(const Poco::Net::IPAddress & address
throw Exception(ErrorCodes::IP_ADDRESS_NOT_ALLOWED, "Connections from {} are not allowed", address.toString());
}
void IAccessStorage::throwAuthenticationTypeNotAllowed(AuthenticationType auth_type)
{
throw Exception(
ErrorCodes::AUTHENTICATION_FAILED,
"Authentication type {} is not allowed, check the setting allow_{} in the server configuration",
toString(auth_type), AuthenticationTypeInfo::get(auth_type).name);
}
void IAccessStorage::throwInvalidCredentials()
{
throw Exception(ErrorCodes::WRONG_PASSWORD, "Invalid credentials");

View File

@ -1,6 +1,7 @@
#pragma once
#include <Access/IAccessEntity.h>
#include <Access/AuthenticationData.h>
#include <Core/Types.h>
#include <Core/UUID.h>
#include <Parsers/IParser.h>
@ -34,6 +35,7 @@ struct AuthResult
UUID user_id;
/// Session settings received from authentication server (if any)
SettingsChanges settings{};
AuthenticationData authentication_data {};
};
/// Contains entities, i.e. instances of classes derived from IAccessEntity.
@ -227,7 +229,9 @@ protected:
bool allow_no_password,
bool allow_plaintext_password) const;
virtual bool areCredentialsValid(
const User & user,
const std::string & user_name,
time_t valid_until,
const AuthenticationData & authentication_method,
const Credentials & credentials,
const ExternalAuthenticators & external_authenticators,
SettingsChanges & settings) const;
@ -248,7 +252,6 @@ protected:
[[noreturn]] void throwReadonlyCannotRemove(AccessEntityType type, const String & name) const;
[[noreturn]] static void throwAddressNotAllowed(const Poco::Net::IPAddress & address);
[[noreturn]] static void throwInvalidCredentials();
[[noreturn]] static void throwAuthenticationTypeNotAllowed(AuthenticationType auth_type);
[[noreturn]] void throwBackupNotAllowed() const;
[[noreturn]] void throwRestoreNotAllowed() const;

View File

@ -468,8 +468,8 @@ std::optional<AuthResult> LDAPAccessStorage::authenticateImpl(
// User does not exist, so we create one, and will add it if authentication is successful.
new_user = std::make_shared<User>();
new_user->setName(credentials.getUserName());
new_user->auth_data = AuthenticationData(AuthenticationType::LDAP);
new_user->auth_data.setLDAPServerName(ldap_server_name);
new_user->authentication_methods.emplace_back(AuthenticationType::LDAP);
new_user->authentication_methods.back().setLDAPServerName(ldap_server_name);
user = new_user;
}
@ -504,7 +504,7 @@ std::optional<AuthResult> LDAPAccessStorage::authenticateImpl(
}
if (id)
return AuthResult{ .user_id = *id };
return AuthResult{ .user_id = *id, .authentication_data = AuthenticationData(AuthenticationType::LDAP) };
return std::nullopt;
}

View File

@ -16,7 +16,8 @@ bool User::equal(const IAccessEntity & other) const
if (!IAccessEntity::equal(other))
return false;
const auto & other_user = typeid_cast<const User &>(other);
return (auth_data == other_user.auth_data) && (allowed_client_hosts == other_user.allowed_client_hosts)
return (authentication_methods == other_user.authentication_methods)
&& (allowed_client_hosts == other_user.allowed_client_hosts)
&& (access == other_user.access) && (granted_roles == other_user.granted_roles) && (default_roles == other_user.default_roles)
&& (settings == other_user.settings) && (grantees == other_user.grantees) && (default_database == other_user.default_database)
&& (valid_until == other_user.valid_until);

View File

@ -15,7 +15,7 @@ namespace DB
*/
struct User : public IAccessEntity
{
AuthenticationData auth_data;
std::vector<AuthenticationData> authentication_methods;
AllowedClientHosts allowed_client_hosts = AllowedClientHosts::AnyHostTag{};
AccessRights access;
GrantedRoles granted_roles;

View File

@ -155,18 +155,18 @@ namespace
if (has_password_plaintext)
{
user->auth_data = AuthenticationData{AuthenticationType::PLAINTEXT_PASSWORD};
user->auth_data.setPassword(config.getString(user_config + ".password"));
user->authentication_methods.emplace_back(AuthenticationType::PLAINTEXT_PASSWORD);
user->authentication_methods.back().setPassword(config.getString(user_config + ".password"));
}
else if (has_password_sha256_hex)
{
user->auth_data = AuthenticationData{AuthenticationType::SHA256_PASSWORD};
user->auth_data.setPasswordHashHex(config.getString(user_config + ".password_sha256_hex"));
user->authentication_methods.emplace_back(AuthenticationType::SHA256_PASSWORD);
user->authentication_methods.back().setPasswordHashHex(config.getString(user_config + ".password_sha256_hex"));
}
else if (has_password_double_sha1_hex)
{
user->auth_data = AuthenticationData{AuthenticationType::DOUBLE_SHA1_PASSWORD};
user->auth_data.setPasswordHashHex(config.getString(user_config + ".password_double_sha1_hex"));
user->authentication_methods.emplace_back(AuthenticationType::DOUBLE_SHA1_PASSWORD);
user->authentication_methods.back().setPasswordHashHex(config.getString(user_config + ".password_double_sha1_hex"));
}
else if (has_ldap)
{
@ -178,19 +178,19 @@ namespace
if (ldap_server_name.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP server name cannot be empty for user {}.", user_name);
user->auth_data = AuthenticationData{AuthenticationType::LDAP};
user->auth_data.setLDAPServerName(ldap_server_name);
user->authentication_methods.emplace_back(AuthenticationType::LDAP);
user->authentication_methods.back().setLDAPServerName(ldap_server_name);
}
else if (has_kerberos)
{
const auto realm = config.getString(user_config + ".kerberos.realm", "");
user->auth_data = AuthenticationData{AuthenticationType::KERBEROS};
user->auth_data.setKerberosRealm(realm);
user->authentication_methods.emplace_back(AuthenticationType::KERBEROS);
user->authentication_methods.back().setKerberosRealm(realm);
}
else if (has_certificates)
{
user->auth_data = AuthenticationData{AuthenticationType::SSL_CERTIFICATE};
user->authentication_methods.emplace_back(AuthenticationType::SSL_CERTIFICATE);
/// Fill list of allowed certificates.
Poco::Util::AbstractConfiguration::Keys keys;
@ -200,14 +200,14 @@ namespace
if (key.starts_with("common_name"))
{
String value = config.getString(certificates_config + "." + key);
user->auth_data.addSSLCertificateSubject(SSLCertificateSubjects::Type::CN, std::move(value));
user->authentication_methods.back().addSSLCertificateSubject(SSLCertificateSubjects::Type::CN, std::move(value));
}
else if (key.starts_with("subject_alt_name"))
{
String value = config.getString(certificates_config + "." + key);
if (value.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected ssl_certificates.subject_alt_name to not be empty");
user->auth_data.addSSLCertificateSubject(SSLCertificateSubjects::Type::SAN, std::move(value));
user->authentication_methods.back().addSSLCertificateSubject(SSLCertificateSubjects::Type::SAN, std::move(value));
}
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown certificate pattern type: {}", key);
@ -216,7 +216,7 @@ namespace
else if (has_ssh_keys)
{
#if USE_SSH
user->auth_data = AuthenticationData{AuthenticationType::SSH_KEY};
user->authentication_methods.emplace_back(AuthenticationType::SSH_KEY);
Poco::Util::AbstractConfiguration::Keys entries;
config.keys(ssh_keys_config, entries);
@ -253,26 +253,33 @@ namespace
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown ssh_key entry pattern type: {}", entry);
}
user->auth_data.setSSHKeys(std::move(keys));
user->authentication_methods.back().setSSHKeys(std::move(keys));
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSH is disabled, because ClickHouse is built without libssh");
#endif
}
else if (has_http_auth)
{
user->auth_data = AuthenticationData{AuthenticationType::HTTP};
user->auth_data.setHTTPAuthenticationServerName(config.getString(http_auth_config + ".server"));
user->authentication_methods.emplace_back(AuthenticationType::HTTP);
user->authentication_methods.back().setHTTPAuthenticationServerName(config.getString(http_auth_config + ".server"));
auto scheme = config.getString(http_auth_config + ".scheme");
user->auth_data.setHTTPAuthenticationScheme(parseHTTPAuthenticationScheme(scheme));
user->authentication_methods.back().setHTTPAuthenticationScheme(parseHTTPAuthenticationScheme(scheme));
}
else
{
user->authentication_methods.emplace_back();
}
auto auth_type = user->auth_data.getType();
if (((auth_type == AuthenticationType::NO_PASSWORD) && !allow_no_password) ||
((auth_type == AuthenticationType::PLAINTEXT_PASSWORD) && !allow_plaintext_password))
for (const auto & authentication_method : user->authentication_methods)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Authentication type {} is not allowed, check the setting allow_{} in the server configuration",
toString(auth_type), AuthenticationTypeInfo::get(auth_type).name);
auto auth_type = authentication_method.getType();
if (((auth_type == AuthenticationType::NO_PASSWORD) && !allow_no_password) ||
((auth_type == AuthenticationType::PLAINTEXT_PASSWORD) && !allow_plaintext_password))
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Authentication type {} is not allowed, check the setting allow_{} in the server configuration",
toString(auth_type), AuthenticationTypeInfo::get(auth_type).name);
}
}
const auto profile_name_config = user_config + ".profile";

View File

@ -1875,11 +1875,11 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
if (const auto * create_user_query = parsed_query->as<ASTCreateUserQuery>())
{
if (!create_user_query->attach && create_user_query->auth_data)
if (!create_user_query->attach && !create_user_query->authentication_methods.empty())
{
if (const auto * auth_data = create_user_query->auth_data->as<ASTAuthenticationData>())
for (const auto & authentication_method : create_user_query->authentication_methods)
{
auto password = auth_data->getPassword();
auto password = authentication_method->getPassword();
if (password)
client_context->getAccessControl().checkPasswordComplexityRules(*password);

View File

@ -2545,6 +2545,7 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
response.responses[i]->error = failed_multi->error_codes[i];
}
response.error = failed_multi->global_error;
storage.uncommitted_state.commit(zxid);
return response_ptr;
}
@ -2901,7 +2902,19 @@ void KeeperStorage<Container>::preprocessRequest(
if (check_acl && !request_processor->checkAuth(*this, session_id, false))
{
uncommitted_state.deltas.emplace_back(new_last_zxid, Coordination::Error::ZNOAUTH);
/// Multi requests handle failures using FailedMultiDelta
if (zk_request->getOpNum() == Coordination::OpNum::Multi || zk_request->getOpNum() == Coordination::OpNum::MultiRead)
{
const auto & multi_request = dynamic_cast<const Coordination::ZooKeeperMultiRequest &>(*zk_request);
std::vector<Coordination::Error> response_errors;
response_errors.resize(multi_request.requests.size(), Coordination::Error::ZOK);
uncommitted_state.deltas.emplace_back(
new_last_zxid, KeeperStorage<Container>::FailedMultiDelta{std::move(response_errors), Coordination::Error::ZNOAUTH});
}
else
{
uncommitted_state.deltas.emplace_back(new_last_zxid, Coordination::Error::ZNOAUTH);
}
return;
}

View File

@ -522,6 +522,7 @@ public:
struct FailedMultiDelta
{
std::vector<Coordination::Error> error_codes;
Coordination::Error global_error{Coordination::Error::ZOK};
};
// Denotes end of a subrequest in multi request

View File

@ -2280,6 +2280,62 @@ TYPED_TEST(CoordinationTest, TestPreprocessWhenCloseSessionIsPrecommitted)
}
}
TYPED_TEST(CoordinationTest, TestMultiRequestWithNoAuth)
{
using namespace Coordination;
using namespace DB;
ChangelogDirTest snapshots("./snapshots");
this->setSnapshotDirectory("./snapshots");
using Storage = typename TestFixture::Storage;
ChangelogDirTest rocks("./rocksdb");
this->setRocksDBDirectory("./rocksdb");
ResponsesQueue queue(std::numeric_limits<size_t>::max());
SnapshotsQueue snapshots_queue{1};
int64_t session_without_auth = 1;
int64_t session_with_auth = 2;
size_t term = 0;
auto state_machine = std::make_shared<KeeperStateMachine<Storage>>(queue, snapshots_queue, this->keeper_context, nullptr);
state_machine->init();
auto & storage = state_machine->getStorageUnsafe();
auto auth_req = std::make_shared<ZooKeeperAuthRequest>();
auth_req->scheme = "digest";
auth_req->data = "test_user:test_password";
// Add auth data to the session
auto auth_entry = getLogEntryFromZKRequest(term, session_with_auth, state_machine->getNextZxid(), auth_req);
state_machine->pre_commit(1, auth_entry->get_buf());
state_machine->commit(1, auth_entry->get_buf());
std::string node_with_acl = "/node_with_acl";
{
auto create_req = std::make_shared<ZooKeeperCreateRequest>();
create_req->path = node_with_acl;
create_req->data = "notmodified";
create_req->acls = {{.permissions = ACL::Read, .scheme = "auth", .id = ""}};
auto create_entry = getLogEntryFromZKRequest(term, session_with_auth, state_machine->getNextZxid(), create_req);
state_machine->pre_commit(3, create_entry->get_buf());
state_machine->commit(3, create_entry->get_buf());
ASSERT_TRUE(storage.container.contains(node_with_acl));
}
Requests ops;
ops.push_back(zkutil::makeSetRequest(node_with_acl, "modified", -1));
ops.push_back(zkutil::makeCheckRequest("/nonexistentnode", -1));
auto multi_req = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
auto multi_entry = getLogEntryFromZKRequest(term, session_without_auth, state_machine->getNextZxid(), multi_req);
state_machine->pre_commit(4, multi_entry->get_buf());
state_machine->commit(4, multi_entry->get_buf());
auto node_it = storage.container.find(node_with_acl);
ASSERT_FALSE(node_it == storage.container.end());
ASSERT_TRUE(node_it->value.getData() == "notmodified");
}
TYPED_TEST(CoordinationTest, TestSetACLWithAuthSchemeForAclWhenAuthIsPrecommitted)
{
using namespace Coordination;

View File

@ -890,16 +890,19 @@ public:
Messaging::MessageTransport & mt,
const Poco::Net::SocketAddress & address)
{
AuthenticationType user_auth_type;
try
{
user_auth_type = session.getAuthenticationTypeOrLogInFailure(user_name);
if (type_to_method.find(user_auth_type) != type_to_method.end())
const auto user_authentication_types = session.getAuthenticationTypesOrLogInFailure(user_name);
for (auto user_authentication_type : user_authentication_types)
{
type_to_method[user_auth_type]->authenticate(user_name, session, mt, address);
mt.send(Messaging::AuthenticationOk(), true);
LOG_DEBUG(log, "Authentication for user {} was successful.", user_name);
return;
if (type_to_method.find(user_authentication_type) != type_to_method.end())
{
type_to_method[user_authentication_type]->authenticate(user_name, session, mt, address);
mt.send(Messaging::AuthenticationOk(), true);
LOG_DEBUG(log, "Authentication for user {} was successful.", user_name);
return;
}
}
}
catch (const Exception&)
@ -913,7 +916,7 @@ public:
mt.send(Messaging::ErrorOrNoticeResponse(Messaging::ErrorOrNoticeResponse::ERROR, "0A000", "Authentication method is not supported"),
true);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Authentication method is not supported: {}", user_auth_type);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "None of the authentication methods registered for the user are supported");
}
};
}

View File

@ -119,6 +119,7 @@ namespace DB
M(UInt64, max_part_num_to_warn, 100000lu, "If the number of parts is greater than this value, the server will create a warning that will displayed to user.", 0) \
M(UInt64, max_table_num_to_throw, 0lu, "If number of tables is greater than this value, server will throw an exception. 0 means no limitation. View, remote tables, dictionary, system tables are not counted. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \
M(UInt64, max_database_num_to_throw, 0lu, "If number of databases is greater than this value, server will throw an exception. 0 means no limitation.", 0) \
M(UInt64, max_authentication_methods_per_user, 100, "The maximum number of authentication methods a user can be created with or altered. Changing this setting does not affect existing users. Zero means unlimited", 0) \
M(UInt64, concurrent_threads_soft_limit_num, 0, "Sets how many concurrent thread can be allocated before applying CPU pressure. Zero means unlimited.", 0) \
M(UInt64, concurrent_threads_soft_limit_ratio_to_cores, 0, "Same as concurrent_threads_soft_limit_num, but with ratio to cores.", 0) \
\

View File

@ -972,7 +972,6 @@ class IColumn;
\
M(Bool, allow_experimental_database_materialized_mysql, false, "Allow to create database with Engine=MaterializedMySQL(...).", 0) \
M(Bool, allow_experimental_database_materialized_postgresql, false, "Allow to create database with Engine=MaterializedPostgreSQL(...).", 0) \
\
/** Experimental feature for moving data between shards. */ \
M(Bool, allow_experimental_query_deduplication, false, "Experimental data deduplication for SELECT queries based on part UUIDs", 0) \

View File

@ -4,6 +4,7 @@
#include <Common/ZooKeeper/KeeperException.h>
#include <Core/ServerUUID.h>
#include <Core/Settings.h>
#include <base/sleep.h>
#include <filesystem>
namespace fs = std::filesystem;
@ -249,6 +250,8 @@ String DatabaseReplicatedDDLWorker::enqueueQueryImpl(const ZooKeeperPtr & zookee
}
else if (code != Coordination::Error::ZNODEEXISTS)
zkutil::KeeperMultiException::check(code, ops, res);
sleepForMilliseconds(50);
}
if (counter_path.empty())

View File

@ -13,6 +13,7 @@
#include <DataTypes/DataTypeDateTime64.h>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <boost/algorithm/string/join.hpp>
#include <Common/quoteString.h>
#include <Core/PostgreSQL/Utils.h>
#include <base/FnTraits.h>
@ -292,7 +293,7 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList(
template<typename T>
PostgreSQLTableStructure fetchPostgreSQLTableStructure(
T & tx, const String & postgres_table, const String & postgres_schema, bool use_nulls, bool with_primary_key, bool with_replica_identity_index)
T & tx, const String & postgres_table, const String & postgres_schema, bool use_nulls, bool with_primary_key, bool with_replica_identity_index, const Strings & columns)
{
PostgreSQLTableStructure table;
@ -302,6 +303,10 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure(
? " AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'public')"
: fmt::format(" AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = {})", quoteString(postgres_schema));
std::string columns_part;
if (!columns.empty())
columns_part = fmt::format(" AND attname IN ('{}')", boost::algorithm::join(columns, "','"));
std::string query = fmt::format(
"SELECT attname AS name, " /// column name
"format_type(atttypid, atttypmod) AS type, " /// data type
@ -312,9 +317,9 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure(
"attnum as att_num, "
"attgenerated as generated " /// if column has GENERATED
"FROM pg_attribute "
"WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) "
"WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) {}"
"AND NOT attisdropped AND attnum > 0 "
"ORDER BY attnum ASC", where);
"ORDER BY attnum ASC", where, columns_part);
auto postgres_table_with_schema = postgres_schema.empty() ? postgres_table : doubleQuoteString(postgres_schema) + '.' + doubleQuoteString(postgres_table);
table.physical_columns = readNamesAndTypesList(tx, postgres_table_with_schema, query, use_nulls, false);
@ -415,7 +420,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure(
PostgreSQLTableStructure fetchPostgreSQLTableStructure(pqxx::connection & connection, const String & postgres_table, const String & postgres_schema, bool use_nulls)
{
pqxx::ReadTransaction tx(connection);
auto result = fetchPostgreSQLTableStructure(tx, postgres_table, postgres_schema, use_nulls, false, false);
auto result = fetchPostgreSQLTableStructure(tx, postgres_table, postgres_schema, use_nulls, false, false, {});
tx.commit();
return result;
}
@ -433,17 +438,17 @@ std::set<String> fetchPostgreSQLTablesList(pqxx::connection & connection, const
template
PostgreSQLTableStructure fetchPostgreSQLTableStructure(
pqxx::ReadTransaction & tx, const String & postgres_table, const String & postgres_schema,
bool use_nulls, bool with_primary_key, bool with_replica_identity_index);
bool use_nulls, bool with_primary_key, bool with_replica_identity_index, const Strings & columns);
template
PostgreSQLTableStructure fetchPostgreSQLTableStructure(
pqxx::ReplicationTransaction & tx, const String & postgres_table, const String & postgres_schema,
bool use_nulls, bool with_primary_key, bool with_replica_identity_index);
bool use_nulls, bool with_primary_key, bool with_replica_identity_index, const Strings & columns);
template
PostgreSQLTableStructure fetchPostgreSQLTableStructure(
pqxx::nontransaction & tx, const String & postgres_table, const String & postrges_schema,
bool use_nulls, bool with_primary_key, bool with_replica_identity_index);
bool use_nulls, bool with_primary_key, bool with_replica_identity_index, const Strings & columns);
std::set<String> fetchPostgreSQLTablesList(pqxx::work & tx, const String & postgres_schema);

View File

@ -48,7 +48,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure(
template<typename T>
PostgreSQLTableStructure fetchPostgreSQLTableStructure(
T & tx, const String & postgres_table, const String & postgres_schema, bool use_nulls = true,
bool with_primary_key = false, bool with_replica_identity_index = false);
bool with_primary_key = false, bool with_replica_identity_index = false, const Strings & columns = {});
template<typename T>
std::set<String> fetchPostgreSQLTablesList(T & tx, const String & postgres_schema);

View File

@ -350,6 +350,12 @@ public:
return delegate;
}
UInt32 getRefCount(const String & path) const override
{
auto wrapped_path = wrappedPath(path);
return delegate->getRefCount(wrapped_path);
}
#if USE_AWS_S3
std::shared_ptr<const S3::Client> getS3StorageClient() const override
{

View File

@ -27,7 +27,8 @@ class FunctionStringOrArrayToT : public IFunction
{
public:
static constexpr auto name = Name::name;
static FunctionPtr create(ContextPtr)
static FunctionPtr create(ContextPtr) { return createImpl(); }
static FunctionPtr createImpl()
{
return std::make_shared<FunctionStringOrArrayToT>();
}

View File

@ -1,4 +1,4 @@
#include <Functions/IFunction.h>
#include <Functions/array/arrayResize.h>
#include <Functions/FunctionFactory.h>
#include <Functions/GatherUtils/GatherUtils.h>
#include <DataTypes/DataTypeArray.h>
@ -21,117 +21,99 @@ namespace ErrorCodes
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
class FunctionArrayResize : public IFunction
DataTypePtr FunctionArrayResize::getReturnTypeImpl(const DataTypes & arguments) const
{
public:
static constexpr auto name = "arrayResize";
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionArrayResize>(); }
const size_t number_of_arguments = arguments.size();
String getName() const override { return name; }
if (number_of_arguments < 2 || number_of_arguments > 3)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Number of arguments for function {} doesn't match: passed {}, should be 2 or 3",
getName(), number_of_arguments);
bool isVariadic() const override { return true; }
size_t getNumberOfArguments() const override { return 0; }
if (arguments[0]->onlyNull())
return arguments[0];
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
const auto * array_type = typeid_cast<const DataTypeArray *>(arguments[0].get());
if (!array_type)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"First argument for function {} must be an array but it has type {}.",
getName(), arguments[0]->getName());
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
if (WhichDataType(array_type->getNestedType()).isNothing())
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function {} cannot resize {}", getName(), array_type->getName());
if (!isInteger(removeNullable(arguments[1])) && !arguments[1]->onlyNull())
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Argument {} for function {} must be integer but it has type {}.",
toString(1), getName(), arguments[1]->getName());
if (number_of_arguments == 2)
return arguments[0];
else /* if (number_of_arguments == 3) */
return std::make_shared<DataTypeArray>(getLeastSupertype(DataTypes{array_type->getNestedType(), arguments[2]}));
}
ColumnPtr FunctionArrayResize::executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type, size_t input_rows_count) const
{
if (return_type->onlyNull())
return return_type->createColumnConstWithDefaultValue(input_rows_count);
auto result_column = return_type->createColumn();
auto array_column = arguments[0].column;
auto size_column = arguments[1].column;
if (!arguments[0].type->equals(*return_type))
array_column = castColumn(arguments[0], return_type);
const DataTypePtr & return_nested_type = typeid_cast<const DataTypeArray &>(*return_type).getNestedType();
size_t size = array_column->size();
ColumnPtr appended_column;
if (arguments.size() == 3)
{
const size_t number_of_arguments = arguments.size();
appended_column = arguments[2].column;
if (!arguments[2].type->equals(*return_nested_type))
appended_column = castColumn(arguments[2], return_nested_type);
}
else
appended_column = return_nested_type->createColumnConstWithDefaultValue(size);
if (number_of_arguments < 2 || number_of_arguments > 3)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Number of arguments for function {} doesn't match: passed {}, should be 2 or 3",
getName(), number_of_arguments);
std::unique_ptr<GatherUtils::IArraySource> array_source;
std::unique_ptr<GatherUtils::IValueSource> value_source;
if (arguments[0]->onlyNull())
return arguments[0];
bool is_const = false;
const auto * array_type = typeid_cast<const DataTypeArray *>(arguments[0].get());
if (!array_type)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"First argument for function {} must be an array but it has type {}.",
getName(), arguments[0]->getName());
if (WhichDataType(array_type->getNestedType()).isNothing())
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function {} cannot resize {}", getName(), array_type->getName());
if (!isInteger(removeNullable(arguments[1])) && !arguments[1]->onlyNull())
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Argument {} for function {} must be integer but it has type {}.",
toString(1), getName(), arguments[1]->getName());
if (number_of_arguments == 2)
return arguments[0];
else /* if (number_of_arguments == 3) */
return std::make_shared<DataTypeArray>(getLeastSupertype(DataTypes{array_type->getNestedType(), arguments[2]}));
if (const auto * const_array_column = typeid_cast<const ColumnConst *>(array_column.get()))
{
is_const = true;
array_column = const_array_column->getDataColumnPtr();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type, size_t input_rows_count) const override
if (const auto * argument_column_array = typeid_cast<const ColumnArray *>(array_column.get()))
array_source = GatherUtils::createArraySource(*argument_column_array, is_const, size);
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "First arguments for function {} must be array.", getName());
bool is_appended_const = false;
if (const auto * const_appended_column = typeid_cast<const ColumnConst *>(appended_column.get()))
{
if (return_type->onlyNull())
return return_type->createColumnConstWithDefaultValue(input_rows_count);
auto result_column = return_type->createColumn();
auto array_column = arguments[0].column;
auto size_column = arguments[1].column;
if (!arguments[0].type->equals(*return_type))
array_column = castColumn(arguments[0], return_type);
const DataTypePtr & return_nested_type = typeid_cast<const DataTypeArray &>(*return_type).getNestedType();
size_t size = array_column->size();
ColumnPtr appended_column;
if (arguments.size() == 3)
{
appended_column = arguments[2].column;
if (!arguments[2].type->equals(*return_nested_type))
appended_column = castColumn(arguments[2], return_nested_type);
}
else
appended_column = return_nested_type->createColumnConstWithDefaultValue(size);
std::unique_ptr<GatherUtils::IArraySource> array_source;
std::unique_ptr<GatherUtils::IValueSource> value_source;
bool is_const = false;
if (const auto * const_array_column = typeid_cast<const ColumnConst *>(array_column.get()))
{
is_const = true;
array_column = const_array_column->getDataColumnPtr();
}
if (const auto * argument_column_array = typeid_cast<const ColumnArray *>(array_column.get()))
array_source = GatherUtils::createArraySource(*argument_column_array, is_const, size);
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "First arguments for function {} must be array.", getName());
bool is_appended_const = false;
if (const auto * const_appended_column = typeid_cast<const ColumnConst *>(appended_column.get()))
{
is_appended_const = true;
appended_column = const_appended_column->getDataColumnPtr();
}
value_source = GatherUtils::createValueSource(*appended_column, is_appended_const, size);
auto sink = GatherUtils::createArraySink(typeid_cast<ColumnArray &>(*result_column), size);
if (isColumnConst(*size_column))
GatherUtils::resizeConstantSize(*array_source, *value_source, *sink, size_column->getInt(0));
else
GatherUtils::resizeDynamicSize(*array_source, *value_source, *sink, *size_column);
return result_column;
is_appended_const = true;
appended_column = const_appended_column->getDataColumnPtr();
}
bool useDefaultImplementationForConstants() const override { return true; }
bool useDefaultImplementationForNulls() const override { return false; }
};
value_source = GatherUtils::createValueSource(*appended_column, is_appended_const, size);
auto sink = GatherUtils::createArraySink(typeid_cast<ColumnArray &>(*result_column), size);
if (isColumnConst(*size_column))
GatherUtils::resizeConstantSize(*array_source, *value_source, *sink, size_column->getInt(0));
else
GatherUtils::resizeDynamicSize(*array_source, *value_source, *sink, *size_column);
return result_column;
}
REGISTER_FUNCTION(ArrayResize)
{

View File

@ -0,0 +1,28 @@
#pragma once
#include <Functions/IFunction.h>
#include <Interpreters/Context_fwd.h>
namespace DB
{
class FunctionArrayResize : public IFunction
{
public:
static constexpr auto name = "arrayResize";
static FunctionPtr createImpl() { return std::make_shared<FunctionArrayResize>(); }
static FunctionPtr create(ContextPtr) { return createImpl(); }
String getName() const override { return name; }
bool isVariadic() const override { return true; }
size_t getNumberOfArguments() const override { return 0; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override;
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type, size_t input_rows_count) const override;
bool useDefaultImplementationForConstants() const override { return true; }
bool useDefaultImplementationForNulls() const override { return false; }
};
}

View File

@ -1,4 +1,4 @@
#include <Functions/IFunction.h>
#include <Functions/array/emptyArrayToSingle.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
#include <DataTypes/DataTypeArray.h>
@ -20,35 +20,6 @@ namespace ErrorCodes
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
/** emptyArrayToSingle(arr) - replace empty arrays with arrays of one element with a default value.
*/
class FunctionEmptyArrayToSingle : public IFunction
{
public:
static constexpr auto name = "emptyArrayToSingle";
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionEmptyArrayToSingle>(); }
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 1; }
bool useDefaultImplementationForConstants() const override { return true; }
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
const DataTypeArray * array_type = checkAndGetDataType<DataTypeArray>(arguments[0].get());
if (!array_type)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Argument for function {} must be array.", getName());
return arguments[0];
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override;
};
namespace
{
namespace FunctionEmptyArrayToSingleImpl
@ -366,6 +337,14 @@ namespace
}
}
DataTypePtr FunctionEmptyArrayToSingle::getReturnTypeImpl(const DataTypes & arguments) const
{
const DataTypeArray * array_type = checkAndGetDataType<DataTypeArray>(arguments[0].get());
if (!array_type)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Argument for function {} must be array.", getName());
return arguments[0];
}
ColumnPtr FunctionEmptyArrayToSingle::executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const
{

View File

@ -0,0 +1,29 @@
#pragma once
#include <Functions/IFunction.h>
#include <Interpreters/Context_fwd.h>
namespace DB
{
/** emptyArrayToSingle(arr) - replace empty arrays with arrays of one element with a default value.
*/
class FunctionEmptyArrayToSingle : public IFunction
{
public:
static constexpr auto name = "emptyArrayToSingle";
static FunctionPtr createImpl() { return std::make_shared<FunctionEmptyArrayToSingle>(); }
static FunctionPtr create(ContextPtr) { return createImpl(); }
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 1; }
bool useDefaultImplementationForConstants() const override { return true; }
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override;
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override;
};
}

View File

@ -1,65 +1,7 @@
#include <DataTypes/DataTypeString.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionStringOrArrayToT.h>
#include <Functions/array/length.h>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
/** Calculates the length of a string in bytes.
*/
struct LengthImpl
{
static constexpr auto is_fixed_to_constant = true;
static void vector(const ColumnString::Chars & /*data*/, const ColumnString::Offsets & offsets, PaddedPODArray<UInt64> & res, size_t input_rows_count)
{
for (size_t i = 0; i < input_rows_count; ++i)
res[i] = offsets[i] - 1 - offsets[i - 1];
}
static void vectorFixedToConstant(const ColumnString::Chars & /*data*/, size_t n, UInt64 & res, size_t)
{
res = n;
}
static void vectorFixedToVector(const ColumnString::Chars & /*data*/, size_t /*n*/, PaddedPODArray<UInt64> & /*res*/, size_t)
{
}
static void array(const ColumnString::Offsets & offsets, PaddedPODArray<UInt64> & res, size_t input_rows_count)
{
for (size_t i = 0; i < input_rows_count; ++i)
res[i] = offsets[i] - offsets[i - 1];
}
[[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray<UInt64> &, size_t)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to UUID argument");
}
[[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray<UInt64> &, size_t)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to IPv6 argument");
}
[[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray<UInt64> &, size_t)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to IPv4 argument");
}
};
struct NameLength
{
static constexpr auto name = "length";
};
using FunctionLength = FunctionStringOrArrayToT<LengthImpl, NameLength, UInt64, false>;
REGISTER_FUNCTION(Length)
{

View File

@ -0,0 +1,66 @@
#pragma once
#include <DataTypes/DataTypeString.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionStringOrArrayToT.h>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
/** Calculates the length of a string in bytes.
*/
struct LengthImpl
{
static constexpr auto is_fixed_to_constant = true;
static void vector(const ColumnString::Chars & /*data*/, const ColumnString::Offsets & offsets, PaddedPODArray<UInt64> & res, size_t input_rows_count)
{
for (size_t i = 0; i < input_rows_count; ++i)
res[i] = offsets[i] - 1 - offsets[i - 1];
}
static void vectorFixedToConstant(const ColumnString::Chars & /*data*/, size_t n, UInt64 & res, size_t)
{
res = n;
}
static void vectorFixedToVector(const ColumnString::Chars & /*data*/, size_t /*n*/, PaddedPODArray<UInt64> & /*res*/, size_t)
{
}
static void array(const ColumnString::Offsets & offsets, PaddedPODArray<UInt64> & res, size_t input_rows_count)
{
for (size_t i = 0; i < input_rows_count; ++i)
res[i] = offsets[i] - offsets[i - 1];
}
[[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray<UInt64> &, size_t)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to UUID argument");
}
[[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray<UInt64> &, size_t)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to IPv6 argument");
}
[[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray<UInt64> &, size_t)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to IPv4 argument");
}
};
struct NameLength
{
static constexpr auto name = "length";
};
using FunctionLength = FunctionStringOrArrayToT<LengthImpl, NameLength, UInt64, false>;
}

View File

@ -50,13 +50,6 @@ private:
return executeNonconstant(input);
}
[[maybe_unused]] String toString() const
{
WriteBufferFromOwnString buf;
buf << "format:" << format << ", rows:" << rows << ", is_literal:" << is_literal << ", input:" << input.dumpStructure() << "\n";
return buf.str();
}
private:
ColumnWithTypeAndName executeLiteral(std::string_view literal) const
{
@ -231,9 +224,7 @@ public:
const auto & instruction = instructions[i];
try
{
// std::cout << "instruction[" << i << "]:" << instructions[i].toString() << std::endl;
concat_args[i] = instruction.execute();
// std::cout << "concat_args[" << i << "]:" << concat_args[i].dumpStructure() << std::endl;
}
catch (const fmt::v9::format_error & e)
{
@ -358,7 +349,14 @@ private:
REGISTER_FUNCTION(Printf)
{
factory.registerFunction<FunctionPrintf>();
factory.registerFunction<FunctionPrintf>(
FunctionDocumentation{.description=R"(
The `printf` function formats the given string with the values (strings, integers, floating-points etc.) listed in the arguments, similar to printf function in C++.
The format string can contain format specifiers starting with `%` character.
Anything not contained in `%` and the following format specifier is considered literal text and copied verbatim into the output.
Literal `%` character can be escaped by `%%`.)", .examples{{"sum", "select printf('%%%s %s %d', 'Hello', 'World', 2024);", "%Hello World 2024"}}, .categories{"String"}
});
}
}

View File

@ -6,6 +6,7 @@
#include <Access/ReplicatedAccessStorage.h>
#include <Access/User.h>
#include <Common/logger_useful.h>
#include <Core/ServerSettings.h>
#include <Interpreters/Access/InterpreterSetRoleQuery.h>
#include <Interpreters/Context.h>
#include <Interpreters/executeDDLQueryOnCluster.h>
@ -33,15 +34,18 @@ namespace
void updateUserFromQueryImpl(
User & user,
const ASTCreateUserQuery & query,
const std::optional<AuthenticationData> auth_data,
const std::vector<AuthenticationData> authentication_methods,
const std::shared_ptr<ASTUserNameWithHost> & override_name,
const std::optional<RolesOrUsersSet> & override_default_roles,
const std::optional<SettingsProfileElements> & override_settings,
const std::optional<RolesOrUsersSet> & override_grantees,
const std::optional<time_t> & valid_until,
bool reset_authentication_methods,
bool replace_authentication_methods,
bool allow_implicit_no_password,
bool allow_no_password,
bool allow_plaintext_password)
bool allow_plaintext_password,
std::size_t max_number_of_authentication_methods)
{
if (override_name)
user.setName(override_name->toString());
@ -50,25 +54,77 @@ namespace
else if (query.names->size() == 1)
user.setName(query.names->front()->toString());
if (!query.attach && !query.alter && !auth_data && !allow_implicit_no_password)
if (!query.attach && !query.alter && authentication_methods.empty() && !allow_implicit_no_password)
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Authentication type NO_PASSWORD must "
"be explicitly specified, check the setting allow_implicit_no_password "
"in the server configuration");
if (auth_data)
user.auth_data = *auth_data;
if (auth_data || !query.alter)
// if user does not have an authentication method and it has not been specified in the query,
// add a default one
if (user.authentication_methods.empty() && authentication_methods.empty())
{
auto auth_type = user.auth_data.getType();
if (((auth_type == AuthenticationType::NO_PASSWORD) && !allow_no_password) ||
((auth_type == AuthenticationType::PLAINTEXT_PASSWORD) && !allow_plaintext_password))
user.authentication_methods.emplace_back();
}
// 1. an IDENTIFIED WITH will drop existing authentication methods in favor of new ones.
if (replace_authentication_methods)
{
user.authentication_methods.clear();
}
// drop existing ones and keep the most recent
if (reset_authentication_methods)
{
auto backup_authentication_method = user.authentication_methods.back();
user.authentication_methods.clear();
user.authentication_methods.emplace_back(backup_authentication_method);
}
// max_number_of_authentication_methods == 0 means unlimited
if (!authentication_methods.empty() && max_number_of_authentication_methods != 0)
{
// we only check if user exceeds the allowed quantity of authentication methods in case the create/alter query includes
// authentication information. Otherwise, we can bypass this check to avoid blocking non-authentication related alters.
auto number_of_authentication_methods = user.authentication_methods.size() + authentication_methods.size();
if (number_of_authentication_methods > max_number_of_authentication_methods)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Authentication type {} is not allowed, check the setting allow_{} in the server configuration",
toString(auth_type),
AuthenticationTypeInfo::get(auth_type).name);
"User can not be created/updated because it exceeds the allowed quantity of authentication methods per user. "
"Check the `max_authentication_methods_per_user` setting");
}
}
for (const auto & authentication_method : authentication_methods)
{
user.authentication_methods.emplace_back(authentication_method);
}
bool has_no_password_authentication_method = std::find_if(user.authentication_methods.begin(),
user.authentication_methods.end(),
[](const AuthenticationData & authentication_data)
{
return authentication_data.getType() == AuthenticationType::NO_PASSWORD;
}) != user.authentication_methods.end();
if (has_no_password_authentication_method && user.authentication_methods.size() > 1)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Authentication method 'no_password' cannot co-exist with other authentication methods");
}
if (!query.alter)
{
for (const auto & authentication_method : user.authentication_methods)
{
auto auth_type = authentication_method.getType();
if (((auth_type == AuthenticationType::NO_PASSWORD) && !allow_no_password) ||
((auth_type == AuthenticationType::PLAINTEXT_PASSWORD) && !allow_plaintext_password))
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Authentication type {} is not allowed, check the setting allow_{} in the server configuration",
toString(auth_type),
AuthenticationTypeInfo::get(auth_type).name);
}
}
}
@ -156,9 +212,14 @@ BlockIO InterpreterCreateUserQuery::execute()
bool no_password_allowed = access_control.isNoPasswordAllowed();
bool plaintext_password_allowed = access_control.isPlaintextPasswordAllowed();
std::optional<AuthenticationData> auth_data;
if (query.auth_data)
auth_data = AuthenticationData::fromAST(*query.auth_data, getContext(), !query.attach);
std::vector<AuthenticationData> authentication_methods;
if (!query.authentication_methods.empty())
{
for (const auto & authentication_method_ast : query.authentication_methods)
{
authentication_methods.push_back(AuthenticationData::fromAST(*authentication_method_ast, getContext(), !query.attach));
}
}
std::optional<time_t> valid_until;
if (query.valid_until)
@ -207,8 +268,10 @@ BlockIO InterpreterCreateUserQuery::execute()
{
auto updated_user = typeid_cast<std::shared_ptr<User>>(entity->clone());
updateUserFromQueryImpl(
*updated_user, query, auth_data, {}, default_roles_from_query, settings_from_query, grantees_from_query,
valid_until, implicit_no_password_allowed, no_password_allowed, plaintext_password_allowed);
*updated_user, query, authentication_methods, {}, default_roles_from_query, settings_from_query, grantees_from_query,
valid_until, query.reset_authentication_methods_to_new, query.replace_authentication_methods,
implicit_no_password_allowed, no_password_allowed,
plaintext_password_allowed, getContext()->getServerSettings().max_authentication_methods_per_user);
return updated_user;
};
@ -227,8 +290,10 @@ BlockIO InterpreterCreateUserQuery::execute()
{
auto new_user = std::make_shared<User>();
updateUserFromQueryImpl(
*new_user, query, auth_data, name, default_roles_from_query, settings_from_query, RolesOrUsersSet::AllTag{},
valid_until, implicit_no_password_allowed, no_password_allowed, plaintext_password_allowed);
*new_user, query, authentication_methods, name, default_roles_from_query, settings_from_query, RolesOrUsersSet::AllTag{},
valid_until, query.reset_authentication_methods_to_new, query.replace_authentication_methods,
implicit_no_password_allowed, no_password_allowed,
plaintext_password_allowed, getContext()->getServerSettings().max_authentication_methods_per_user);
new_users.emplace_back(std::move(new_user));
}
@ -265,17 +330,41 @@ BlockIO InterpreterCreateUserQuery::execute()
}
void InterpreterCreateUserQuery::updateUserFromQuery(User & user, const ASTCreateUserQuery & query, bool allow_no_password, bool allow_plaintext_password)
void InterpreterCreateUserQuery::updateUserFromQuery(
User & user,
const ASTCreateUserQuery & query,
bool allow_no_password,
bool allow_plaintext_password,
std::size_t max_number_of_authentication_methods)
{
std::optional<AuthenticationData> auth_data;
if (query.auth_data)
auth_data = AuthenticationData::fromAST(*query.auth_data, {}, !query.attach);
std::vector<AuthenticationData> authentication_methods;
if (!query.authentication_methods.empty())
{
for (const auto & authentication_method_ast : query.authentication_methods)
{
authentication_methods.emplace_back(AuthenticationData::fromAST(*authentication_method_ast, {}, !query.attach));
}
}
std::optional<time_t> valid_until;
if (query.valid_until)
valid_until = getValidUntilFromAST(query.valid_until, {});
updateUserFromQueryImpl(user, query, auth_data, {}, {}, {}, {}, valid_until, allow_no_password, allow_plaintext_password, true);
updateUserFromQueryImpl(
user,
query,
authentication_methods,
{},
{},
{},
{},
valid_until,
query.reset_authentication_methods_to_new,
query.replace_authentication_methods,
allow_no_password,
allow_plaintext_password,
true,
max_number_of_authentication_methods);
}
void registerInterpreterCreateUserQuery(InterpreterFactory & factory)

View File

@ -17,7 +17,12 @@ public:
BlockIO execute() override;
static void updateUserFromQuery(User & user, const ASTCreateUserQuery & query, bool allow_no_password, bool allow_plaintext_password);
static void updateUserFromQuery(
User & user,
const ASTCreateUserQuery & query,
bool allow_no_password,
bool allow_plaintext_password,
std::size_t max_number_of_authentication_methods);
private:
ASTPtr query_ptr;

View File

@ -64,8 +64,10 @@ namespace
query->default_roles = user.default_roles.toASTWithNames(*access_control);
}
if (user.auth_data.getType() != AuthenticationType::NO_PASSWORD)
query->auth_data = user.auth_data.toAST();
for (const auto & authentication_method : user.authentication_methods)
{
query->authentication_methods.push_back(authentication_method.toAST());
}
if (user.valid_until)
{

View File

@ -2028,8 +2028,9 @@ ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set<const Node *> split
return {std::move(first_actions), std::move(second_actions), std::move(split_nodes_mapping)};
}
ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet & array_joined_columns) const
ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const Names & array_joined_columns) const
{
std::unordered_set<std::string_view> array_joined_columns_set(array_joined_columns.begin(), array_joined_columns.end());
struct Frame
{
const Node * node = nullptr;
@ -2072,7 +2073,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet &
if (cur.next_child_to_visit == cur.node->children.size())
{
bool depend_on_array_join = false;
if (cur.node->type == ActionType::INPUT && array_joined_columns.contains(cur.node->result_name))
if (cur.node->type == ActionType::INPUT && array_joined_columns_set.contains(cur.node->result_name))
depend_on_array_join = true;
for (const auto * child : cur.node->children)

View File

@ -340,7 +340,7 @@ public:
SplitResult split(std::unordered_set<const Node *> split_nodes, bool create_split_nodes_mapping = false, bool avoid_duplicate_inputs = false) const;
/// Splits actions into two parts. Returned first half may be swapped with ARRAY JOIN.
SplitResult splitActionsBeforeArrayJoin(const NameSet & array_joined_columns) const;
SplitResult splitActionsBeforeArrayJoin(const Names & array_joined_columns) const;
/// Splits actions into two parts. First part has minimal size sufficient for calculation of column_name.
/// Outputs of initial actions must contain column_name.

View File

@ -0,0 +1,13 @@
#pragma once
#include <Core/Names.h>
namespace DB
{
struct ArrayJoin
{
Names columns;
bool is_left = false;
};
}

View File

@ -6,6 +6,9 @@
#include <Columns/ColumnMap.h>
#include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h>
#include <Functions/array/length.h>
#include <Functions/array/arrayResize.h>
#include <Functions/array/emptyArrayToSingle.h>
#include <Interpreters/Context.h>
#include <Interpreters/ArrayJoinAction.h>
@ -59,26 +62,31 @@ ColumnWithTypeAndName convertArrayJoinColumn(const ColumnWithTypeAndName & src_c
return array_col;
}
ArrayJoinAction::ArrayJoinAction(const NameSet & array_joined_columns_, bool array_join_is_left, ContextPtr context)
: columns(array_joined_columns_)
, is_left(array_join_is_left)
, is_unaligned(context->getSettingsRef().enable_unaligned_array_join)
, max_block_size(context->getSettingsRef().max_block_size)
ArrayJoinAction::ArrayJoinAction(const Names & columns_, bool is_left_, bool is_unaligned_, size_t max_block_size_)
: columns(columns_.begin(), columns_.end())
, is_left(is_left_)
, is_unaligned(is_unaligned_)
, max_block_size(max_block_size_)
{
if (columns.empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "No arrays to join");
if (is_unaligned)
{
function_length = FunctionFactory::instance().get("length", context);
function_greatest = FunctionFactory::instance().get("greatest", context);
function_array_resize = FunctionFactory::instance().get("arrayResize", context);
function_length = std::make_unique<FunctionToOverloadResolverAdaptor>(FunctionLength::createImpl());
function_array_resize = std::make_unique<FunctionToOverloadResolverAdaptor>(FunctionArrayResize::createImpl());
}
else if (is_left)
function_builder = FunctionFactory::instance().get("emptyArrayToSingle", context);
function_builder = std::make_unique<FunctionToOverloadResolverAdaptor>(FunctionEmptyArrayToSingle::createImpl());
}
void ArrayJoinAction::prepare(ColumnsWithTypeAndName & sample) const
void ArrayJoinAction::prepare(const Names & columns, ColumnsWithTypeAndName & sample)
{
NameSet columns_set(columns.begin(), columns.end());
prepare(columns_set, sample);
}
void ArrayJoinAction::prepare(const NameSet & columns, ColumnsWithTypeAndName & sample)
{
for (auto & current : sample)
{
@ -103,6 +111,35 @@ ArrayJoinResultIteratorPtr ArrayJoinAction::execute(Block block)
return std::make_unique<ArrayJoinResultIterator>(this, std::move(block));
}
static void updateMaxLength(ColumnUInt64 & max_length, UInt64 length)
{
for (auto & value : max_length.getData())
value = std::max(value, length);
}
static void updateMaxLength(ColumnUInt64 & max_length, const IColumn & length)
{
if (const auto * length_const = typeid_cast<const ColumnConst *>(&length))
{
updateMaxLength(max_length, length_const->getUInt(0));
return;
}
const auto * length_uint64 = typeid_cast<const ColumnUInt64 *>(&length);
if (!length_uint64)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected UInt64 for array length, got {}", length.getName());
auto & max_lenght_data = max_length.getData();
const auto & length_data = length_uint64->getData();
size_t num_rows = max_lenght_data.size();
if (num_rows != length_data.size())
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Different columns sizes in ARRAY JOIN: {} and {}", num_rows, length_data.size());
for (size_t row = 0; row < num_rows; ++row)
max_lenght_data[row] = std::max(max_lenght_data[row], length_data[row]);
}
ArrayJoinResultIterator::ArrayJoinResultIterator(const ArrayJoinAction * array_join_, Block block_)
: array_join(array_join_), block(std::move(block_)), total_rows(block.rows()), current_row(0)
@ -111,7 +148,6 @@ ArrayJoinResultIterator::ArrayJoinResultIterator(const ArrayJoinAction * array_j
bool is_unaligned = array_join->is_unaligned;
bool is_left = array_join->is_left;
const auto & function_length = array_join->function_length;
const auto & function_greatest = array_join->function_greatest;
const auto & function_array_resize = array_join->function_array_resize;
const auto & function_builder = array_join->function_builder;
@ -125,11 +161,7 @@ ArrayJoinResultIterator::ArrayJoinResultIterator(const ArrayJoinAction * array_j
/// Resize all array joined columns to the longest one, (at least 1 if LEFT ARRAY JOIN), padded with default values.
auto rows = block.rows();
auto uint64 = std::make_shared<DataTypeUInt64>();
ColumnWithTypeAndName column_of_max_length{{}, uint64, {}};
if (is_left)
column_of_max_length = ColumnWithTypeAndName(uint64->createColumnConst(rows, 1u), uint64, {});
else
column_of_max_length = ColumnWithTypeAndName(uint64->createColumnConst(rows, 0u), uint64, {});
auto max_length = ColumnUInt64::create(rows, (is_left ? 1u : 0u));
for (const auto & name : columns)
{
@ -138,11 +170,10 @@ ArrayJoinResultIterator::ArrayJoinResultIterator(const ArrayJoinAction * array_j
ColumnWithTypeAndName array_col = convertArrayJoinColumn(src_col);
ColumnsWithTypeAndName tmp_block{array_col}; //, {{}, uint64, {}}};
auto len_col = function_length->build(tmp_block)->execute(tmp_block, uint64, rows);
ColumnsWithTypeAndName tmp_block2{column_of_max_length, {len_col, uint64, {}}};
column_of_max_length.column = function_greatest->build(tmp_block2)->execute(tmp_block2, uint64, rows);
updateMaxLength(*max_length, *len_col);
}
ColumnWithTypeAndName column_of_max_length{std::move(max_length), uint64, {}};
for (const auto & name : columns)
{
auto & src_col = block.getByName(name);

View File

@ -33,14 +33,14 @@ public:
/// For unaligned [LEFT] ARRAY JOIN
FunctionOverloadResolverPtr function_length;
FunctionOverloadResolverPtr function_greatest;
FunctionOverloadResolverPtr function_array_resize;
/// For LEFT ARRAY JOIN.
FunctionOverloadResolverPtr function_builder;
ArrayJoinAction(const NameSet & array_joined_columns_, bool array_join_is_left, ContextPtr context);
void prepare(ColumnsWithTypeAndName & sample) const;
ArrayJoinAction(const Names & columns_, bool is_left_, bool is_unaligned_, size_t max_block_size_);
static void prepare(const NameSet & columns, ColumnsWithTypeAndName & sample);
static void prepare(const Names & columns, ColumnsWithTypeAndName & sample);
ArrayJoinResultIteratorPtr execute(Block block);
};

View File

@ -1059,16 +1059,16 @@ std::string ExpressionActionsChain::dumpChain() const
return ss.str();
}
ExpressionActionsChain::ArrayJoinStep::ArrayJoinStep(ArrayJoinActionPtr array_join_, ColumnsWithTypeAndName required_columns_)
ExpressionActionsChain::ArrayJoinStep::ArrayJoinStep(const Names & array_join_columns_, ColumnsWithTypeAndName required_columns_)
: Step({})
, array_join(std::move(array_join_))
, array_join_columns(array_join_columns_.begin(), array_join_columns_.end())
, result_columns(std::move(required_columns_))
{
for (auto & column : result_columns)
{
required_columns.emplace_back(NameAndTypePair(column.name, column.type));
if (array_join->columns.contains(column.name))
if (array_join_columns.contains(column.name))
{
const auto & array = getArrayJoinDataType(column.type);
column.type = array->getNestedType();
@ -1085,12 +1085,12 @@ void ExpressionActionsChain::ArrayJoinStep::finalize(const NameSet & required_ou
for (const auto & column : result_columns)
{
if (array_join->columns.contains(column.name) || required_output_.contains(column.name))
if (array_join_columns.contains(column.name) || required_output_.contains(column.name))
new_result_columns.emplace_back(column);
}
for (const auto & column : required_columns)
{
if (array_join->columns.contains(column.name) || required_output_.contains(column.name))
if (array_join_columns.contains(column.name) || required_output_.contains(column.name))
new_required_columns.emplace_back(column);
}

View File

@ -3,6 +3,7 @@
#include <Core/Block.h>
#include <Core/ColumnNumbers.h>
#include <Interpreters/ActionsDAG.h>
#include <Interpreters/ArrayJoin.h>
#include <Interpreters/ExpressionActionsSettings.h>
#include <variant>
@ -22,9 +23,6 @@ class TableJoin;
class IJoin;
using JoinPtr = std::shared_ptr<IJoin>;
class ArrayJoinAction;
using ArrayJoinActionPtr = std::shared_ptr<ArrayJoinAction>;
class ExpressionActions;
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
@ -223,11 +221,11 @@ struct ExpressionActionsChain : WithContext
struct ArrayJoinStep : public Step
{
ArrayJoinActionPtr array_join;
const NameSet array_join_columns;
NamesAndTypesList required_columns;
ColumnsWithTypeAndName result_columns;
ArrayJoinStep(ArrayJoinActionPtr array_join_, ColumnsWithTypeAndName required_columns_);
ArrayJoinStep(const Names & array_join_columns_, ColumnsWithTypeAndName required_columns_);
NamesAndTypesList getRequiredColumns() const override { return required_columns; }
ColumnsWithTypeAndName getResultColumns() const override { return result_columns; }

View File

@ -215,7 +215,7 @@ NamesAndTypesList ExpressionAnalyzer::getColumnsAfterArrayJoin(ActionsDAG & acti
auto array_join = addMultipleArrayJoinAction(actions, is_array_join_left);
auto sample_columns = actions.getResultColumns();
array_join->prepare(sample_columns);
ArrayJoinAction::prepare(array_join.columns, sample_columns);
actions = ActionsDAG(sample_columns);
NamesAndTypesList new_columns_after_array_join;
@ -889,9 +889,11 @@ const ASTSelectQuery * SelectQueryExpressionAnalyzer::getAggregatingQuery() cons
}
/// "Big" ARRAY JOIN.
ArrayJoinActionPtr ExpressionAnalyzer::addMultipleArrayJoinAction(ActionsDAG & actions, bool array_join_is_left) const
ArrayJoin ExpressionAnalyzer::addMultipleArrayJoinAction(ActionsDAG & actions, bool array_join_is_left) const
{
NameSet result_columns;
Names result_columns;
result_columns.reserve(syntax->array_join_result_to_source.size());
for (const auto & result_source : syntax->array_join_result_to_source)
{
/// Assign new names to columns, if needed.
@ -902,19 +904,19 @@ ArrayJoinActionPtr ExpressionAnalyzer::addMultipleArrayJoinAction(ActionsDAG & a
}
/// Make ARRAY JOIN (replace arrays with their insides) for the columns in these new names.
result_columns.insert(result_source.first);
result_columns.push_back(result_source.first);
}
return std::make_shared<ArrayJoinAction>(result_columns, array_join_is_left, getContext());
return {std::move(result_columns), array_join_is_left};
}
ArrayJoinActionPtr SelectQueryExpressionAnalyzer::appendArrayJoin(ExpressionActionsChain & chain, ActionsAndProjectInputsFlagPtr & before_array_join, bool only_types)
std::optional<ArrayJoin> SelectQueryExpressionAnalyzer::appendArrayJoin(ExpressionActionsChain & chain, ActionsAndProjectInputsFlagPtr & before_array_join, bool only_types)
{
const auto * select_query = getSelectQuery();
auto [array_join_expression_list, is_array_join_left] = select_query->arrayJoinExpressionList();
if (!array_join_expression_list)
return nullptr;
return {};
ExpressionActionsChain::Step & step = chain.lastStep(sourceColumns());
@ -923,7 +925,7 @@ ArrayJoinActionPtr SelectQueryExpressionAnalyzer::appendArrayJoin(ExpressionActi
auto array_join = addMultipleArrayJoinAction(step.actions()->dag, is_array_join_left);
before_array_join = chain.getLastActions();
chain.steps.push_back(std::make_unique<ExpressionActionsChain::ArrayJoinStep>(array_join, step.getResultColumns()));
chain.steps.push_back(std::make_unique<ExpressionActionsChain::ArrayJoinStep>(array_join.columns, step.getResultColumns()));
chain.addStep();

View File

@ -174,7 +174,7 @@ protected:
/// Find global subqueries in the GLOBAL IN/JOIN sections. Fills in external_tables.
void initGlobalSubqueriesAndExternalTables(bool do_global, bool is_explain);
ArrayJoinActionPtr addMultipleArrayJoinAction(ActionsDAG & actions, bool is_left) const;
ArrayJoin addMultipleArrayJoinAction(ActionsDAG & actions, bool is_left) const;
void getRootActions(const ASTPtr & ast, bool no_makeset_for_subqueries, ActionsDAG & actions, bool only_consts = false);
@ -234,7 +234,7 @@ struct ExpressionAnalysisResult
bool use_grouping_set_key = false;
ActionsAndProjectInputsFlagPtr before_array_join;
ArrayJoinActionPtr array_join;
std::optional<ArrayJoin> array_join;
ActionsAndProjectInputsFlagPtr before_join;
ActionsAndProjectInputsFlagPtr converting_join_columns;
JoinPtr join;
@ -388,7 +388,7 @@ private:
*/
/// Before aggregation:
ArrayJoinActionPtr appendArrayJoin(ExpressionActionsChain & chain, ActionsAndProjectInputsFlagPtr & before_array_join, bool only_types);
std::optional<ArrayJoin> appendArrayJoin(ExpressionActionsChain & chain, ActionsAndProjectInputsFlagPtr & before_array_join, bool only_types);
bool appendJoinLeftKeys(ExpressionActionsChain & chain, bool only_types);
JoinPtr appendJoin(ExpressionActionsChain & chain, ActionsAndProjectInputsFlagPtr & converting_join_columns);

View File

@ -86,6 +86,7 @@
#include <Core/Settings.h>
#include <Core/ServerSettings.h>
#include <Interpreters/Aggregator.h>
#include <Interpreters/ArrayJoinAction.h>
#include <Interpreters/HashTablesStatistics.h>
#include <Interpreters/IJoin.h>
#include <QueryPipeline/SizeLimits.h>
@ -1676,7 +1677,11 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
if (expressions.array_join)
{
QueryPlanStepPtr array_join_step
= std::make_unique<ArrayJoinStep>(query_plan.getCurrentDataStream(), expressions.array_join);
= std::make_unique<ArrayJoinStep>(
query_plan.getCurrentDataStream(),
*expressions.array_join,
settings.enable_unaligned_array_join,
settings.max_block_size);
array_join_step->setStepDescription("ARRAY JOIN");
query_plan.addStep(std::move(array_join_step));

View File

@ -304,21 +304,30 @@ Session::~Session()
LOG_DEBUG(log, "{} Logout, user_id: {}", toString(auth_id), toString(*user_id));
if (auto session_log = getSessionLog())
{
session_log->addLogOut(auth_id, user, getClientInfo());
session_log->addLogOut(auth_id, user, user_authenticated_with, getClientInfo());
}
}
}
AuthenticationType Session::getAuthenticationType(const String & user_name) const
std::unordered_set<AuthenticationType> Session::getAuthenticationTypes(const String & user_name) const
{
return global_context->getAccessControl().read<User>(user_name)->auth_data.getType();
std::unordered_set<AuthenticationType> authentication_types;
const auto user_to_query = global_context->getAccessControl().read<User>(user_name);
for (const auto & authentication_method : user_to_query->authentication_methods)
{
authentication_types.insert(authentication_method.getType());
}
return authentication_types;
}
AuthenticationType Session::getAuthenticationTypeOrLogInFailure(const String & user_name) const
std::unordered_set<AuthenticationType> Session::getAuthenticationTypesOrLogInFailure(const String & user_name) const
{
try
{
return getAuthenticationType(user_name);
return getAuthenticationTypes(user_name);
}
catch (const Exception & e)
{
@ -354,6 +363,7 @@ void Session::authenticate(const Credentials & credentials_, const Poco::Net::So
{
auto auth_result = global_context->getAccessControl().authenticate(credentials_, address.host(), getClientInfo().getLastForwardedFor());
user_id = auth_result.user_id;
user_authenticated_with = auth_result.authentication_data;
settings_from_auth_server = auth_result.settings;
LOG_DEBUG(log, "{} Authenticated with global context as user {}",
toString(auth_id), toString(*user_id));
@ -698,7 +708,8 @@ void Session::recordLoginSuccess(ContextPtr login_context) const
settings,
access->getAccess(),
getClientInfo(),
user);
user,
user_authenticated_with);
}
notified_session_log_about_login = true;

View File

@ -43,10 +43,10 @@ public:
Session & operator=(const Session &) = delete;
/// Provides information about the authentication type of a specified user.
AuthenticationType getAuthenticationType(const String & user_name) const;
std::unordered_set<AuthenticationType> getAuthenticationTypes(const String & user_name) const;
/// Same as getAuthenticationType, but adds LoginFailure event in case of error.
AuthenticationType getAuthenticationTypeOrLogInFailure(const String & user_name) const;
std::unordered_set<AuthenticationType> getAuthenticationTypesOrLogInFailure(const String & user_name) const;
/// Sets the current user, checks the credentials and that the specified address is allowed to connect from.
/// The function throws an exception if there is no such user or password is wrong.
@ -113,6 +113,7 @@ private:
mutable UserPtr user;
std::optional<UUID> user_id;
AuthenticationData user_authenticated_with;
ContextMutablePtr session_context;
mutable bool query_context_created = false;

View File

@ -214,7 +214,8 @@ void SessionLog::addLoginSuccess(const UUID & auth_id,
const Settings & settings,
const ContextAccessPtr & access,
const ClientInfo & client_info,
const UserPtr & login_user)
const UserPtr & login_user,
const AuthenticationData & user_authenticated_with)
{
SessionLogElement log_entry(auth_id, SESSION_LOGIN_SUCCESS);
log_entry.client_info = client_info;
@ -222,9 +223,11 @@ void SessionLog::addLoginSuccess(const UUID & auth_id,
if (login_user)
{
log_entry.user = login_user->getName();
log_entry.user_identified_with = login_user->auth_data.getType();
log_entry.user_identified_with = user_authenticated_with.getType();
}
log_entry.external_auth_server = login_user ? login_user->auth_data.getLDAPServerName() : "";
log_entry.external_auth_server = user_authenticated_with.getLDAPServerName();
log_entry.session_id = session_id;
@ -256,15 +259,19 @@ void SessionLog::addLoginFailure(
add(std::move(log_entry));
}
void SessionLog::addLogOut(const UUID & auth_id, const UserPtr & login_user, const ClientInfo & client_info)
void SessionLog::addLogOut(
const UUID & auth_id,
const UserPtr & login_user,
const AuthenticationData & user_authenticated_with,
const ClientInfo & client_info)
{
auto log_entry = SessionLogElement(auth_id, SESSION_LOGOUT);
if (login_user)
{
log_entry.user = login_user->getName();
log_entry.user_identified_with = login_user->auth_data.getType();
log_entry.user_identified_with = user_authenticated_with.getType();
}
log_entry.external_auth_server = login_user ? login_user->auth_data.getLDAPServerName() : "";
log_entry.external_auth_server = user_authenticated_with.getLDAPServerName();
log_entry.client_info = client_info;
add(std::move(log_entry));

View File

@ -22,6 +22,7 @@ class ContextAccess;
struct User;
using UserPtr = std::shared_ptr<const User>;
using ContextAccessPtr = std::shared_ptr<const ContextAccess>;
class AuthenticationData;
/** A struct which will be inserted as row into session_log table.
*
@ -71,17 +72,21 @@ struct SessionLogElement
class SessionLog : public SystemLog<SessionLogElement>
{
using SystemLog<SessionLogElement>::SystemLog;
public:
void addLoginSuccess(const UUID & auth_id,
const String & session_id,
const Settings & settings,
const ContextAccessPtr & access,
const ClientInfo & client_info,
const UserPtr & login_user);
const UserPtr & login_user,
const AuthenticationData & user_authenticated_with);
void addLoginFailure(const UUID & auth_id, const ClientInfo & info, const std::optional<String> & user, const Exception & reason);
void addLogOut(const UUID & auth_id, const UserPtr & login_user, const ClientInfo & client_info);
void addLogOut(
const UUID & auth_id,
const UserPtr & login_user,
const AuthenticationData & user_authenticated_with,
const ClientInfo & client_info);
};
}

View File

@ -44,7 +44,7 @@ void ASTAuthenticationData::formatImpl(const FormatSettings & settings, FormatSt
{
if (type && *type == AuthenticationType::NO_PASSWORD)
{
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " NOT IDENTIFIED"
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " no_password"
<< (settings.hilite ? IAST::hilite_none : "");
return;
}
@ -160,12 +160,9 @@ void ASTAuthenticationData::formatImpl(const FormatSettings & settings, FormatSt
auth_type_name = AuthenticationTypeInfo::get(*type).name;
}
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " IDENTIFIED" << (settings.hilite ? IAST::hilite_none : "");
if (!auth_type_name.empty())
{
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " WITH " << auth_type_name
<< (settings.hilite ? IAST::hilite_none : "");
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " " << auth_type_name << (settings.hilite ? IAST::hilite_none : "");
}
if (!prefix.empty())

View File

@ -19,9 +19,25 @@ namespace
<< quoteString(new_name);
}
void formatAuthenticationData(const ASTAuthenticationData & auth_data, const IAST::FormatSettings & settings)
void formatAuthenticationData(const std::vector<std::shared_ptr<ASTAuthenticationData>> & authentication_methods, const IAST::FormatSettings & settings)
{
auth_data.format(settings);
// safe because this method is only called if authentication_methods.size > 1
// if the first type is present, include the `WITH` keyword
if (authentication_methods[0]->type)
{
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " WITH" << (settings.hilite ? IAST::hilite_none : "");
}
for (std::size_t i = 0; i < authentication_methods.size(); i++)
{
authentication_methods[i]->format(settings);
bool is_last = i < authentication_methods.size() - 1;
if (is_last)
{
settings.ostr << (settings.hilite ? IAST::hilite_keyword : ",");
}
}
}
void formatValidUntil(const IAST & valid_until, const IAST::FormatSettings & settings)
@ -165,6 +181,7 @@ ASTPtr ASTCreateUserQuery::clone() const
{
auto res = std::make_shared<ASTCreateUserQuery>(*this);
res->children.clear();
res->authentication_methods.clear();
if (names)
res->names = std::static_pointer_cast<ASTUserNamesWithHost>(names->clone());
@ -181,10 +198,11 @@ ASTPtr ASTCreateUserQuery::clone() const
if (settings)
res->settings = std::static_pointer_cast<ASTSettingsProfileElements>(settings->clone());
if (auth_data)
for (const auto & authentication_method : authentication_methods)
{
res->auth_data = std::static_pointer_cast<ASTAuthenticationData>(auth_data->clone());
res->children.push_back(res->auth_data);
auto ast_clone = std::static_pointer_cast<ASTAuthenticationData>(authentication_method->clone());
res->authentication_methods.push_back(ast_clone);
res->children.push_back(ast_clone);
}
return res;
@ -223,8 +241,24 @@ void ASTCreateUserQuery::formatImpl(const FormatSettings & format, FormatState &
if (new_name)
formatRenameTo(*new_name, format);
if (auth_data)
formatAuthenticationData(*auth_data, format);
if (authentication_methods.empty())
{
// If identification (auth method) is missing from query, we should serialize it in the form of `NO_PASSWORD` unless it is alter query
if (!alter)
{
format.ostr << (format.hilite ? IAST::hilite_keyword : "") << " IDENTIFIED WITH no_password" << (format.hilite ? IAST::hilite_none : "");
}
}
else
{
if (add_identified_with)
{
format.ostr << (format.hilite ? IAST::hilite_keyword : "") << " ADD" << (format.hilite ? IAST::hilite_none : "");
}
format.ostr << (format.hilite ? IAST::hilite_keyword : "") << " IDENTIFIED" << (format.hilite ? IAST::hilite_none : "");
formatAuthenticationData(authentication_methods, format);
}
if (valid_until)
formatValidUntil(*valid_until, format);
@ -247,6 +281,9 @@ void ASTCreateUserQuery::formatImpl(const FormatSettings & format, FormatState &
if (grantees)
formatGrantees(*grantees, format);
if (reset_authentication_methods_to_new)
format.ostr << (format.hilite ? hilite_keyword : "") << " RESET AUTHENTICATION METHODS TO NEW" << (format.hilite ? hilite_none : "");
}
}

View File

@ -42,12 +42,15 @@ public:
bool if_exists = false;
bool if_not_exists = false;
bool or_replace = false;
bool reset_authentication_methods_to_new = false;
bool add_identified_with = false;
bool replace_authentication_methods = false;
std::shared_ptr<ASTUserNamesWithHost> names;
std::optional<String> new_name;
String storage_name;
std::shared_ptr<ASTAuthenticationData> auth_data;
std::vector<std::shared_ptr<ASTAuthenticationData>> authentication_methods;
std::optional<AllowedClientHosts> hosts;
std::optional<AllowedClientHosts> add_hosts;

View File

@ -43,21 +43,16 @@ namespace
});
}
bool parseAuthenticationData(IParserBase::Pos & pos, Expected & expected, std::shared_ptr<ASTAuthenticationData> & auth_data)
bool parseAuthenticationData(
IParserBase::Pos & pos,
Expected & expected,
std::shared_ptr<ASTAuthenticationData> & auth_data,
bool is_type_specifier_mandatory,
bool is_type_specifier_allowed,
bool should_parse_no_password)
{
return IParserBase::wrapParseImpl(pos, [&]
{
if (ParserKeyword{Keyword::NOT_IDENTIFIED}.ignore(pos, expected))
{
auth_data = std::make_shared<ASTAuthenticationData>();
auth_data->type = AuthenticationType::NO_PASSWORD;
return true;
}
if (!ParserKeyword{Keyword::IDENTIFIED}.ignore(pos, expected))
return false;
std::optional<AuthenticationType> type;
bool expect_password = false;
@ -68,51 +63,65 @@ namespace
bool expect_public_ssh_key = false;
bool expect_http_auth_server = false;
if (ParserKeyword{Keyword::WITH}.ignore(pos, expected))
auto parse_non_password_based_type = [&](auto check_type)
{
for (auto check_type : collections::range(AuthenticationType::MAX))
if (ParserKeyword{AuthenticationTypeInfo::get(check_type).keyword}.ignore(pos, expected))
{
if (ParserKeyword{AuthenticationTypeInfo::get(check_type).keyword}.ignore(pos, expected))
{
type = check_type;
type = check_type;
if (check_type == AuthenticationType::LDAP)
expect_ldap_server_name = true;
else if (check_type == AuthenticationType::KERBEROS)
expect_kerberos_realm = true;
else if (check_type == AuthenticationType::SSL_CERTIFICATE)
expect_ssl_cert_subjects = true;
else if (check_type == AuthenticationType::SSH_KEY)
expect_public_ssh_key = true;
else if (check_type == AuthenticationType::HTTP)
expect_http_auth_server = true;
else if (check_type != AuthenticationType::NO_PASSWORD)
expect_password = true;
if (check_type == AuthenticationType::LDAP)
expect_ldap_server_name = true;
else if (check_type == AuthenticationType::KERBEROS)
expect_kerberos_realm = true;
else if (check_type == AuthenticationType::SSL_CERTIFICATE)
expect_ssl_cert_subjects = true;
else if (check_type == AuthenticationType::SSH_KEY)
expect_public_ssh_key = true;
else if (check_type == AuthenticationType::HTTP)
expect_http_auth_server = true;
else if (check_type != AuthenticationType::NO_PASSWORD)
expect_password = true;
return true;
}
return false;
};
{
const auto first_authentication_type_element_to_check
= should_parse_no_password ? AuthenticationType::NO_PASSWORD : AuthenticationType::PLAINTEXT_PASSWORD;
for (auto check_type : collections::range(first_authentication_type_element_to_check, AuthenticationType::MAX))
{
if (parse_non_password_based_type(check_type))
break;
}
}
}
if (!type)
if (!type)
{
if (ParserKeyword{Keyword::SHA256_HASH}.ignore(pos, expected))
{
if (ParserKeyword{Keyword::SHA256_HASH}.ignore(pos, expected))
{
type = AuthenticationType::SHA256_PASSWORD;
expect_hash = true;
}
else if (ParserKeyword{Keyword::DOUBLE_SHA1_HASH}.ignore(pos, expected))
{
type = AuthenticationType::DOUBLE_SHA1_PASSWORD;
expect_hash = true;
}
else if (ParserKeyword{Keyword::BCRYPT_HASH}.ignore(pos, expected))
{
type = AuthenticationType::BCRYPT_PASSWORD;
expect_hash = true;
}
else
return false;
type = AuthenticationType::SHA256_PASSWORD;
expect_hash = true;
}
else if (ParserKeyword{Keyword::DOUBLE_SHA1_HASH}.ignore(pos, expected))
{
type = AuthenticationType::DOUBLE_SHA1_PASSWORD;
expect_hash = true;
}
else if (ParserKeyword{Keyword::BCRYPT_HASH}.ignore(pos, expected))
{
type = AuthenticationType::BCRYPT_PASSWORD;
expect_hash = true;
}
else if (is_type_specifier_mandatory)
return false;
}
else if (!is_type_specifier_allowed)
{
return false;
}
/// If authentication type is not specified, then the default password type is used
@ -219,6 +228,69 @@ namespace
}
bool parseIdentifiedWith(
IParserBase::Pos & pos,
Expected & expected,
std::vector<std::shared_ptr<ASTAuthenticationData>> & authentication_methods,
bool should_parse_no_password)
{
return IParserBase::wrapParseImpl(pos, [&]
{
if (!ParserKeyword{Keyword::IDENTIFIED}.ignore(pos, expected))
return false;
// Parse first authentication method which doesn't come with a leading comma
{
bool is_type_specifier_mandatory = ParserKeyword{Keyword::WITH}.ignore(pos, expected);
std::shared_ptr<ASTAuthenticationData> ast_authentication_data;
if (!parseAuthenticationData(pos, expected, ast_authentication_data, is_type_specifier_mandatory, is_type_specifier_mandatory, should_parse_no_password))
{
return false;
}
authentication_methods.push_back(ast_authentication_data);
}
// Need to save current position, process comma and only update real position in case there is an authentication method after
// the comma. Otherwise, position should not be changed as it needs to be processed by other parsers and possibly throw error
// on trailing comma.
IParserBase::Pos aux_pos = pos;
while (ParserToken{TokenType::Comma}.ignore(aux_pos, expected))
{
std::shared_ptr<ASTAuthenticationData> ast_authentication_data;
if (!parseAuthenticationData(aux_pos, expected, ast_authentication_data, false, true, should_parse_no_password))
{
break;
}
pos = aux_pos;
authentication_methods.push_back(ast_authentication_data);
}
return !authentication_methods.empty();
});
}
bool parseIdentifiedOrNotIdentified(IParserBase::Pos & pos, Expected & expected, std::vector<std::shared_ptr<ASTAuthenticationData>> & authentication_methods)
{
return IParserBase::wrapParseImpl(pos, [&]
{
if (ParserKeyword{Keyword::NOT_IDENTIFIED}.ignore(pos, expected))
{
authentication_methods.emplace_back(std::make_shared<ASTAuthenticationData>());
authentication_methods.back()->type = AuthenticationType::NO_PASSWORD;
return true;
}
return parseIdentifiedWith(pos, expected, authentication_methods, true);
});
}
bool parseHostsWithoutPrefix(IParserBase::Pos & pos, Expected & expected, AllowedClientHosts & hosts)
{
AllowedClientHosts res_hosts;
@ -411,6 +483,27 @@ namespace
return until_p.parse(pos, valid_until, expected);
});
}
bool parseAddIdentifiedWith(IParserBase::Pos & pos, Expected & expected, std::vector<std::shared_ptr<ASTAuthenticationData>> & auth_data)
{
return IParserBase::wrapParseImpl(pos, [&]
{
if (!ParserKeyword{Keyword::ADD}.ignore(pos, expected))
{
return false;
}
return parseIdentifiedWith(pos, expected, auth_data, false);
});
}
bool parseResetAuthenticationMethods(IParserBase::Pos & pos, Expected & expected)
{
return IParserBase::wrapParseImpl(pos, [&]
{
return ParserKeyword{Keyword::RESET_AUTHENTICATION_METHODS_TO_NEW}.ignore(pos, expected);
});
}
}
@ -456,7 +549,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
std::optional<AllowedClientHosts> hosts;
std::optional<AllowedClientHosts> add_hosts;
std::optional<AllowedClientHosts> remove_hosts;
std::shared_ptr<ASTAuthenticationData> auth_data;
std::vector<std::shared_ptr<ASTAuthenticationData>> auth_data;
std::shared_ptr<ASTRolesOrUsersSet> default_roles;
std::shared_ptr<ASTSettingsProfileElements> settings;
std::shared_ptr<ASTRolesOrUsersSet> grantees;
@ -464,19 +557,28 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
ASTPtr valid_until;
String cluster;
String storage_name;
bool reset_authentication_methods_to_new = false;
bool parsed_identified_with = false;
bool parsed_add_identified_with = false;
while (true)
{
if (!auth_data)
if (auth_data.empty() && !reset_authentication_methods_to_new)
{
std::shared_ptr<ASTAuthenticationData> new_auth_data;
if (parseAuthenticationData(pos, expected, new_auth_data))
parsed_identified_with = parseIdentifiedOrNotIdentified(pos, expected, auth_data);
if (!parsed_identified_with && alter)
{
auth_data = std::move(new_auth_data);
continue;
parsed_add_identified_with = parseAddIdentifiedWith(pos, expected, auth_data);
}
}
if (!reset_authentication_methods_to_new && alter && auth_data.empty())
{
reset_authentication_methods_to_new = parseResetAuthenticationMethods(pos, expected);
}
if (!valid_until)
{
parseValidUntil(pos, expected, valid_until);
@ -564,7 +666,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
query->cluster = std::move(cluster);
query->names = std::move(names);
query->new_name = std::move(new_name);
query->auth_data = std::move(auth_data);
query->authentication_methods = std::move(auth_data);
query->hosts = std::move(hosts);
query->add_hosts = std::move(add_hosts);
query->remove_hosts = std::move(remove_hosts);
@ -574,9 +676,14 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
query->default_database = std::move(default_database);
query->valid_until = std::move(valid_until);
query->storage_name = std::move(storage_name);
query->reset_authentication_methods_to_new = reset_authentication_methods_to_new;
query->add_identified_with = parsed_add_identified_with;
query->replace_authentication_methods = parsed_identified_with;
if (query->auth_data)
query->children.push_back(query->auth_data);
for (const auto & authentication_method : query->authentication_methods)
{
query->children.push_back(authentication_method);
}
if (query->valid_until)
query->children.push_back(query->valid_until);

View File

@ -407,6 +407,7 @@ namespace DB
MR_MACROS(REPLACE_PARTITION, "REPLACE PARTITION") \
MR_MACROS(REPLACE, "REPLACE") \
MR_MACROS(RESET_SETTING, "RESET SETTING") \
MR_MACROS(RESET_AUTHENTICATION_METHODS_TO_NEW, "RESET AUTHENTICATION METHODS TO NEW") \
MR_MACROS(RESPECT_NULLS, "RESPECT NULLS") \
MR_MACROS(RESTORE, "RESTORE") \
MR_MACROS(RESTRICT, "RESTRICT") \

View File

@ -87,7 +87,7 @@ TEST_P(ParserTest, parseQuery)
{
if (input_text.starts_with("ATTACH"))
{
auto salt = (dynamic_cast<const ASTCreateUserQuery *>(ast.get())->auth_data)->getSalt().value_or("");
auto salt = (dynamic_cast<const ASTCreateUserQuery *>(ast.get())->authentication_methods.back())->getSalt().value_or("");
EXPECT_TRUE(re2::RE2::FullMatch(salt, expected_ast));
}
else
@ -283,6 +283,18 @@ INSTANTIATE_TEST_SUITE_P(ParserCreateUserQuery, ParserTest,
"CREATE USER user1 IDENTIFIED WITH sha256_password BY 'qwe123'",
"CREATE USER user1 IDENTIFIED WITH sha256_password BY 'qwe123'"
},
{
"CREATE USER user1 IDENTIFIED WITH no_password",
"CREATE USER user1 IDENTIFIED WITH no_password"
},
{
"CREATE USER user1",
"CREATE USER user1 IDENTIFIED WITH no_password"
},
{
"CREATE USER user1 IDENTIFIED WITH plaintext_password BY 'abc123', plaintext_password BY 'def123', sha256_password BY 'ghi123'",
"CREATE USER user1 IDENTIFIED WITH plaintext_password BY 'abc123', plaintext_password BY 'def123', sha256_password BY 'ghi123'"
},
{
"CREATE USER user1 IDENTIFIED WITH sha256_hash BY '7A37B85C8918EAC19A9089C0FA5A2AB4DCE3F90528DCDEEC108B23DDF3607B99' SALT 'salt'",
"CREATE USER user1 IDENTIFIED WITH sha256_hash BY '7A37B85C8918EAC19A9089C0FA5A2AB4DCE3F90528DCDEEC108B23DDF3607B99' SALT 'salt'"
@ -291,6 +303,10 @@ INSTANTIATE_TEST_SUITE_P(ParserCreateUserQuery, ParserTest,
"ALTER USER user1 IDENTIFIED WITH sha256_password BY 'qwe123'",
"ALTER USER user1 IDENTIFIED WITH sha256_password BY 'qwe123'"
},
{
"ALTER USER user1 IDENTIFIED WITH plaintext_password BY 'abc123', plaintext_password BY 'def123', sha256_password BY 'ghi123'",
"ALTER USER user1 IDENTIFIED WITH plaintext_password BY 'abc123', plaintext_password BY 'def123', sha256_password BY 'ghi123'"
},
{
"ALTER USER user1 IDENTIFIED WITH sha256_hash BY '7A37B85C8918EAC19A9089C0FA5A2AB4DCE3F90528DCDEEC108B23DDF3607B99' SALT 'salt'",
"ALTER USER user1 IDENTIFIED WITH sha256_hash BY '7A37B85C8918EAC19A9089C0FA5A2AB4DCE3F90528DCDEEC108B23DDF3607B99' SALT 'salt'"
@ -298,6 +314,10 @@ INSTANTIATE_TEST_SUITE_P(ParserCreateUserQuery, ParserTest,
{
"CREATE USER user1 IDENTIFIED WITH sha256_password BY 'qwe123' SALT 'EFFD7F6B03B3EA68B8F86C1E91614DD50E42EB31EF7160524916444D58B5E264'",
"throws Syntax error"
},
{
"ALTER USER user1 IDENTIFIED WITH plaintext_password BY 'abc123' IDENTIFIED WITH plaintext_password BY 'def123'",
"throws Only one identified with is permitted"
}
})));

View File

@ -63,7 +63,7 @@ TEST_P(ParserKQLTest, parseKQLQuery)
{
if (input_text.starts_with("ATTACH"))
{
auto salt = (dynamic_cast<const ASTCreateUserQuery *>(ast.get())->auth_data)->getSalt().value_or("");
auto salt = (dynamic_cast<const ASTCreateUserQuery *>(ast.get())->authentication_methods.back())->getSalt().value_or("");
EXPECT_TRUE(re2::RE2::FullMatch(salt, expected_ast));
}
else

View File

@ -1674,11 +1674,12 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_
PlannerActionsVisitor actions_visitor(planner_context);
std::unordered_set<std::string> array_join_expressions_output_nodes;
NameSet array_join_column_names;
Names array_join_column_names;
array_join_column_names.reserve(array_join_node.getJoinExpressions().getNodes().size());
for (auto & array_join_expression : array_join_node.getJoinExpressions().getNodes())
{
const auto & array_join_column_identifier = planner_context->getColumnNodeIdentifierOrThrow(array_join_expression);
array_join_column_names.insert(array_join_column_identifier);
array_join_column_names.push_back(array_join_column_identifier);
auto & array_join_expression_column = array_join_expression->as<ColumnNode &>();
auto expression_dag_index_nodes = actions_visitor.visit(array_join_action_dag, array_join_expression_column.getExpressionOrThrow());
@ -1727,8 +1728,13 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_
drop_unused_columns_before_array_join_transform_step->setStepDescription("DROP unused columns before ARRAY JOIN");
plan.addStep(std::move(drop_unused_columns_before_array_join_transform_step));
auto array_join_action = std::make_shared<ArrayJoinAction>(array_join_column_names, array_join_node.isLeft(), planner_context->getQueryContext());
auto array_join_step = std::make_unique<ArrayJoinStep>(plan.getCurrentDataStream(), std::move(array_join_action));
const auto & settings = planner_context->getQueryContext()->getSettingsRef();
auto array_join_step = std::make_unique<ArrayJoinStep>(
plan.getCurrentDataStream(),
ArrayJoin{std::move(array_join_column_names), array_join_node.isLeft()},
settings.enable_unaligned_array_join,
settings.max_block_size);
array_join_step->setStepDescription("ARRAY JOIN");
plan.addStep(std::move(array_join_step));

View File

@ -24,27 +24,30 @@ static ITransformingStep::Traits getTraits()
};
}
ArrayJoinStep::ArrayJoinStep(const DataStream & input_stream_, ArrayJoinActionPtr array_join_)
ArrayJoinStep::ArrayJoinStep(const DataStream & input_stream_, ArrayJoin array_join_, bool is_unaligned_, size_t max_block_size_)
: ITransformingStep(
input_stream_,
ArrayJoinTransform::transformHeader(input_stream_.header, array_join_),
ArrayJoinTransform::transformHeader(input_stream_.header, array_join_.columns),
getTraits())
, array_join(std::move(array_join_))
, is_unaligned(is_unaligned_)
, max_block_size(max_block_size_)
{
}
void ArrayJoinStep::updateOutputStream()
{
output_stream = createOutputStream(
input_streams.front(), ArrayJoinTransform::transformHeader(input_streams.front().header, array_join), getDataStreamTraits());
input_streams.front(), ArrayJoinTransform::transformHeader(input_streams.front().header, array_join.columns), getDataStreamTraits());
}
void ArrayJoinStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
{
auto array_join_actions = std::make_shared<ArrayJoinAction>(array_join.columns, array_join.is_left, is_unaligned, max_block_size);
pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type)
{
bool on_totals = stream_type == QueryPipelineBuilder::StreamType::Totals;
return std::make_shared<ArrayJoinTransform>(header, array_join, on_totals);
return std::make_shared<ArrayJoinTransform>(header, array_join_actions, on_totals);
});
}
@ -53,8 +56,8 @@ void ArrayJoinStep::describeActions(FormatSettings & settings) const
String prefix(settings.offset, ' ');
bool first = true;
settings.out << prefix << (array_join->is_left ? "LEFT " : "") << "ARRAY JOIN ";
for (const auto & column : array_join->columns)
settings.out << prefix << (array_join.is_left ? "LEFT " : "") << "ARRAY JOIN ";
for (const auto & column : array_join.columns)
{
if (!first)
settings.out << ", ";
@ -68,10 +71,10 @@ void ArrayJoinStep::describeActions(FormatSettings & settings) const
void ArrayJoinStep::describeActions(JSONBuilder::JSONMap & map) const
{
map.add("Left", array_join->is_left);
map.add("Left", array_join.is_left);
auto columns_array = std::make_unique<JSONBuilder::JSONArray>();
for (const auto & column : array_join->columns)
for (const auto & column : array_join.columns)
columns_array->add(column);
map.add("Columns", std::move(columns_array));

View File

@ -1,5 +1,6 @@
#pragma once
#include <Processors/QueryPlan/ITransformingStep.h>
#include <Interpreters/ArrayJoin.h>
namespace DB
{
@ -10,7 +11,7 @@ using ArrayJoinActionPtr = std::shared_ptr<ArrayJoinAction>;
class ArrayJoinStep : public ITransformingStep
{
public:
explicit ArrayJoinStep(const DataStream & input_stream_, ArrayJoinActionPtr array_join_);
ArrayJoinStep(const DataStream & input_stream_, ArrayJoin array_join_, bool is_unaligned_, size_t max_block_size_);
String getName() const override { return "ArrayJoin"; }
void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override;
@ -18,12 +19,15 @@ public:
void describeActions(JSONBuilder::JSONMap & map) const override;
void describeActions(FormatSettings & settings) const override;
const ArrayJoinActionPtr & arrayJoin() const { return array_join; }
const Names & getColumns() const { return array_join.columns; }
bool isLeft() const { return array_join.is_left; }
private:
void updateOutputStream() override;
ArrayJoinActionPtr array_join;
ArrayJoin array_join;
bool is_unaligned = false;
size_t max_block_size = DEFAULT_BLOCK_SIZE;
};
}

View File

@ -520,13 +520,14 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes
if (auto * array_join = typeid_cast<ArrayJoinStep *>(child.get()))
{
const auto & array_join_actions = array_join->arrayJoin();
const auto & keys = array_join_actions->columns;
const auto & keys = array_join->getColumns();
std::unordered_set<std::string_view> keys_set(keys.begin(), keys.end());
const auto & array_join_header = array_join->getInputStreams().front().header;
Names allowed_inputs;
for (const auto & column : array_join_header)
if (!keys.contains(column.name))
if (!keys_set.contains(column.name))
allowed_inputs.push_back(column.name);
if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, allowed_inputs))

View File

@ -24,11 +24,11 @@ size_t tryLiftUpArrayJoin(QueryPlan::Node * parent_node, QueryPlan::Nodes & node
if (!(expression_step || filter_step) || !array_join_step)
return 0;
const auto & array_join = array_join_step->arrayJoin();
const auto & array_join_columns = array_join_step->getColumns();
const auto & expression = expression_step ? expression_step->getExpression()
: filter_step->getExpression();
auto split_actions = expression.splitActionsBeforeArrayJoin(array_join->columns);
auto split_actions = expression.splitActionsBeforeArrayJoin(array_join_columns);
/// No actions can be moved before ARRAY JOIN.
if (split_actions.first.trivial())

View File

@ -231,13 +231,15 @@ void buildSortingDAG(QueryPlan::Node & node, std::optional<ActionsDAG> & dag, Fi
{
/// Should ignore limit because ARRAY JOIN can reduce the number of rows in case of empty array.
/// But in case of LEFT ARRAY JOIN the result number of rows is always bigger.
if (!array_join->arrayJoin()->is_left)
if (!array_join->isLeft())
limit = 0;
const auto & array_joined_columns = array_join->arrayJoin()->columns;
const auto & array_joined_columns = array_join->getColumns();
if (dag)
{
std::unordered_set<std::string_view> keys_set(array_joined_columns.begin(), array_joined_columns.end());
/// Remove array joined columns from outputs.
/// Types are changed after ARRAY JOIN, and we can't use this columns anyway.
ActionsDAG::NodeRawConstPtrs outputs;
@ -245,7 +247,7 @@ void buildSortingDAG(QueryPlan::Node & node, std::optional<ActionsDAG> & dag, Fi
for (const auto & output : dag->getOutputs())
{
if (!array_joined_columns.contains(output->result_name))
if (!keys_set.contains(output->result_name))
outputs.push_back(output);
}

View File

@ -10,20 +10,26 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
}
Block ArrayJoinTransform::transformHeader(Block header, const ArrayJoinActionPtr & array_join)
template <typename Container>
Block transformHeaderImpl(Block header, const Container & array_join_columns)
{
auto columns = header.getColumnsWithTypeAndName();
array_join->prepare(columns);
ArrayJoinAction::prepare(array_join_columns, columns);
Block res{std::move(columns)};
res.setColumns(res.mutateColumns());
return res;
}
Block ArrayJoinTransform::transformHeader(Block header, const Names & array_join_columns)
{
return transformHeaderImpl(std::move(header), array_join_columns);
}
ArrayJoinTransform::ArrayJoinTransform(
const Block & header_,
ArrayJoinActionPtr array_join_,
bool /*on_totals_*/)
: IInflatingTransform(header_, transformHeader(header_, array_join_))
: IInflatingTransform(header_, transformHeaderImpl(header_, array_join_->columns))
, array_join(std::move(array_join_))
{
/// TODO

View File

@ -22,7 +22,7 @@ public:
String getName() const override { return "ArrayJoinTransform"; }
static Block transformHeader(Block header, const ArrayJoinActionPtr & array_join);
static Block transformHeader(Block header, const Names & array_join_columns);
protected:
void consume(Chunk chunk) override;

View File

@ -17,9 +17,7 @@ void WriteBufferFromHTTPServerResponse::startSendHeaders()
{
headers_started_sending = true;
if (response.getChunkedTransferEncoding())
setChunked();
else if (response.getContentLength() == Poco::Net::HTTPMessage::UNKNOWN_CONTENT_LENGTH)
if (!response.getChunkedTransferEncoding() && response.getContentLength() == Poco::Net::HTTPMessage::UNKNOWN_CONTENT_LENGTH)
{
/// In case there is no Content-Length we cannot use keep-alive,
/// since there is no way to know when the server send all the
@ -134,6 +132,8 @@ WriteBufferFromHTTPServerResponse::WriteBufferFromHTTPServerResponse(
, response(response_)
, is_http_method_head(is_http_method_head_)
{
if (response.getChunkedTransferEncoding())
setChunked();
}

View File

@ -376,11 +376,16 @@ void MySQLHandler::authenticate(const String & user_name, const String & auth_pl
{
try
{
// For compatibility with JavaScript MySQL client, Native41 authentication plugin is used when possible
// (if password is specified using double SHA1). Otherwise, SHA256 plugin is used.
if (session->getAuthenticationTypeOrLogInFailure(user_name) == DB::AuthenticationType::SHA256_PASSWORD)
const auto user_authentication_types = session->getAuthenticationTypesOrLogInFailure(user_name);
for (const auto user_authentication_type : user_authentication_types)
{
authPluginSSL();
// For compatibility with JavaScript MySQL client, Native41 authentication plugin is used when possible
// (if password is specified using double SHA1). Otherwise, SHA256 plugin is used.
if (user_authentication_type == DB::AuthenticationType::SHA256_PASSWORD)
{
authPluginSSL();
}
}
std::optional<String> auth_response = auth_plugin_name == auth_plugin->getName() ? std::make_optional<String>(initial_auth_response) : std::nullopt;

View File

@ -90,15 +90,15 @@ static inline void trySendExceptionToClient(
void StaticRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
{
applyHTTPResponseHeaders(response, http_response_headers_override);
if (request.getVersion() == Poco::Net::HTTPServerRequest::HTTP_1_1)
response.setChunkedTransferEncoding(true);
auto out = responseWriteBuffer(request, response);
try
{
applyHTTPResponseHeaders(response, http_response_headers_override);
if (request.getVersion() == Poco::Net::HTTPServerRequest::HTTP_1_1)
response.setChunkedTransferEncoding(true);
/// Workaround. Poco does not detect 411 Length Required case.
if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST && !request.getChunkedTransferEncoding() && !request.hasContentLength())
throw Exception(ErrorCodes::HTTP_LENGTH_REQUIRED,

View File

@ -1592,7 +1592,17 @@ void TCPHandler::receiveHello()
/// Perform handshake for SSH authentication
if (is_ssh_based_auth)
{
if (session->getAuthenticationTypeOrLogInFailure(user) != AuthenticationType::SSH_KEY)
const auto authentication_types = session->getAuthenticationTypesOrLogInFailure(user);
bool user_supports_ssh_authentication = std::find_if(
authentication_types.begin(),
authentication_types.end(),
[](auto authentication_type)
{
return authentication_type == AuthenticationType::SSH_KEY;
}) != authentication_types.end();
if (!user_supports_ssh_authentication)
throw Exception(ErrorCodes::AUTHENTICATION_FAILED, "Expected authentication with SSH key");
if (client_tcp_protocol_version < DBMS_MIN_REVISION_WITH_SSH_AUTHENTICATION)

View File

@ -22,12 +22,12 @@ namespace DB
namespace ErrorCodes
{
extern const int NETWORK_ERROR;
extern const int CANNOT_OPEN_FILE;
extern const int CANNOT_SEEK_THROUGH_FILE;
extern const int SEEK_POSITION_OUT_OF_BOUND;
extern const int LOGICAL_ERROR;
extern const int UNKNOWN_FILE_SIZE;
extern const int HDFS_ERROR;
extern const int CANNOT_OPEN_FILE;
extern const int CANNOT_SEEK_THROUGH_FILE;
extern const int SEEK_POSITION_OUT_OF_BOUND;
extern const int LOGICAL_ERROR;
extern const int UNKNOWN_FILE_SIZE;
}
@ -125,9 +125,12 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory<S
if (bytes_read < 0)
{
throw Exception(ErrorCodes::NETWORK_ERROR,
throw Exception(
ErrorCodes::HDFS_ERROR,
"Fail to read from HDFS: {}, file path: {}. Error: {}",
hdfs_uri, hdfs_file_path, std::string(hdfsGetLastError()));
hdfs_uri,
hdfs_file_path,
std::string(hdfsGetLastError()));
}
if (bytes_read)

View File

@ -348,11 +348,10 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error)
auto * materialized_storage = storage->as <StorageMaterializedPostgreSQL>();
try
{
auto [postgres_table_schema, postgres_table_name] = getSchemaAndTableName(table_name);
auto table_structure = fetchPostgreSQLTableStructure(tx, postgres_table_name, postgres_table_schema, true, true, true);
if (!table_structure.physical_columns)
auto table_structure = fetchTableStructure(tx, table_name);
if (!table_structure->physical_columns)
throw Exception(ErrorCodes::LOGICAL_ERROR, "No columns");
auto storage_info = StorageInfo(materialized_storage->getNested(), table_structure.physical_columns->attributes);
auto storage_info = StorageInfo(materialized_storage->getNested(), table_structure->physical_columns->attributes);
nested_storages.emplace(table_name, std::move(storage_info));
}
catch (Exception & e)
@ -399,9 +398,7 @@ ASTPtr PostgreSQLReplicationHandler::getCreateNestedTableQuery(StorageMaterializ
postgres::Connection connection(connection_info);
pqxx::nontransaction tx(connection.getRef());
auto [postgres_table_schema, postgres_table_name] = getSchemaAndTableName(table_name);
auto table_structure = std::make_unique<PostgreSQLTableStructure>(fetchPostgreSQLTableStructure(tx, postgres_table_name, postgres_table_schema, true, true, true));
auto table_structure = fetchTableStructure(tx, table_name);
auto table_override = tryGetTableOverride(current_database_name, table_name);
return storage->getCreateNestedTableQuery(std::move(table_structure), table_override ? table_override->as<ASTTableOverride>() : nullptr);
}
@ -415,16 +412,35 @@ StorageInfo PostgreSQLReplicationHandler::loadFromSnapshot(postgres::Connection
std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name);
tx->exec(query_str);
auto table_structure = fetchTableStructure(*tx, table_name);
PostgreSQLTableStructurePtr table_structure;
try
{
table_structure = fetchTableStructure(*tx, table_name);
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
table_structure = std::make_unique<PostgreSQLTableStructure>();
}
if (!table_structure->physical_columns)
throw Exception(ErrorCodes::LOGICAL_ERROR, "No table attributes");
auto table_attributes = table_structure->physical_columns->attributes;
auto columns = getTableAllowedColumns(table_name);
/// Load from snapshot, which will show table state before creation of replication slot.
/// Already connected to needed database, no need to add it to query.
auto quoted_name = doubleQuoteWithSchema(table_name);
query_str = fmt::format("SELECT * FROM ONLY {}", quoted_name);
if (columns.empty())
query_str = fmt::format("SELECT * FROM ONLY {}", quoted_name);
else
{
/// We should not use columns list from getTableAllowedColumns because it may have broken columns order
Strings allowed_columns;
for (const auto & column : table_structure->physical_columns->columns)
allowed_columns.push_back(column.name);
query_str = fmt::format("SELECT {} FROM ONLY {}", boost::algorithm::join(allowed_columns, ","), quoted_name);
}
LOG_DEBUG(log, "Loading PostgreSQL table {}.{}", postgres_database, quoted_name);
@ -700,6 +716,37 @@ void PostgreSQLReplicationHandler::setSetting(const SettingChange & setting)
}
/// Allowed columns for table from materialized_postgresql_tables_list setting
Strings PostgreSQLReplicationHandler::getTableAllowedColumns(const std::string & table_name) const
{
Strings result;
if (tables_list.empty())
return result;
size_t table_pos = tables_list.find(table_name);
if (table_pos == std::string::npos)
{
return result;
}
if (table_pos + table_name.length() + 1 > tables_list.length())
{
return result;
}
String column_list = tables_list.substr(table_pos + table_name.length() + 1);
column_list.erase(std::remove(column_list.begin(), column_list.end(), '"'), column_list.end());
boost::trim(column_list);
if (column_list.empty() || column_list[0] != '(')
return result;
size_t end_bracket_pos = column_list.find(')');
column_list = column_list.substr(1, end_bracket_pos - 1);
splitInto<','>(result, column_list);
return result;
}
void PostgreSQLReplicationHandler::shutdownFinal()
{
try
@ -749,11 +796,27 @@ std::set<String> PostgreSQLReplicationHandler::fetchRequiredTables()
Strings expected_tables;
if (!tables_list.empty())
{
splitInto<','>(expected_tables, tables_list);
if (expected_tables.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot parse tables list: {}", tables_list);
for (auto & table_name : expected_tables)
boost::trim(table_name);
/// Removing columns `table(col1, col2)` from tables_list
String cleared_tables_list = tables_list;
while (true)
{
size_t start_bracket_pos = cleared_tables_list.find('(');
size_t end_bracket_pos = cleared_tables_list.find(')');
if (start_bracket_pos == std::string::npos || end_bracket_pos == std::string::npos)
{
break;
}
cleared_tables_list = cleared_tables_list.substr(0, start_bracket_pos) + cleared_tables_list.substr(end_bracket_pos + 1);
}
splitInto<','>(expected_tables, cleared_tables_list);
if (expected_tables.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot parse tables list: {}", tables_list);
for (auto & table_name : expected_tables)
{
boost::trim(table_name);
}
}
/// Try to fetch tables list from publication if there is not tables list.
@ -864,18 +927,50 @@ std::set<String> PostgreSQLReplicationHandler::fetchRequiredTables()
/// `schema1.table1, schema2.table2, ...` -> `"schema1"."table1", "schema2"."table2", ...`
/// or
/// `table1, table2, ...` + setting `schema` -> `"schema"."table1", "schema"."table2", ...`
/// or
/// `table1, table2(id,name), ...` + setting `schema` -> `"schema"."table1", "schema"."table2"("id","name"), ...`
if (!tables_list.empty())
{
Strings tables_names;
splitInto<','>(tables_names, tables_list);
if (tables_names.empty())
Strings parts;
splitInto<','>(parts, tables_list);
if (parts.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Empty list of tables");
bool is_column = false;
WriteBufferFromOwnString buf;
for (auto & table_name : tables_names)
for (auto & part : parts)
{
boost::trim(table_name);
buf << doubleQuoteWithSchema(table_name);
boost::trim(part);
size_t bracket_pos = part.find('(');
if (bracket_pos != std::string::npos)
{
is_column = true;
std::string table_name = part.substr(0, bracket_pos);
boost::trim(table_name);
buf << doubleQuoteWithSchema(table_name);
part = part.substr(bracket_pos + 1);
boost::trim(part);
buf << '(';
buf << doubleQuoteString(part);
}
else if (part.back() == ')')
{
is_column = false;
part = part.substr(0, part.size() - 1);
boost::trim(part);
buf << doubleQuoteString(part);
buf << ')';
}
else if (is_column)
{
buf << doubleQuoteString(part);
}
else
{
buf << doubleQuoteWithSchema(part);
}
buf << ",";
}
tables_list = buf.str();
@ -902,23 +997,28 @@ std::set<String> PostgreSQLReplicationHandler::fetchTablesFromPublication(pqxx::
}
template<typename T>
PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure(
pqxx::ReplicationTransaction & tx, const std::string & table_name) const
T & tx, const std::string & table_name) const
{
PostgreSQLTableStructure structure;
try
{
auto [schema, table] = getSchemaAndTableName(table_name);
structure = fetchPostgreSQLTableStructure(tx, table, schema, true, true, true);
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
auto [schema, table] = getSchemaAndTableName(table_name);
structure = fetchPostgreSQLTableStructure(tx, table, schema, true, true, true, getTableAllowedColumns(table_name));
return std::make_unique<PostgreSQLTableStructure>(std::move(structure));
}
template
PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure(
pqxx::ReadTransaction & tx, const std::string & table_name) const;
template
PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure(
pqxx::ReplicationTransaction & tx, const std::string & table_name) const;
template
PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure(
pqxx::nontransaction & tx, const std::string & table_name) const;
void PostgreSQLReplicationHandler::addTableToReplication(StorageMaterializedPostgreSQL * materialized_storage, const String & postgres_table_name)
{

View File

@ -57,6 +57,8 @@ public:
void setSetting(const SettingChange & setting);
Strings getTableAllowedColumns(const std::string & table_name) const;
void cleanupFunc();
private:
@ -94,7 +96,8 @@ private:
StorageInfo loadFromSnapshot(postgres::Connection & connection, std::string & snapshot_name, const String & table_name, StorageMaterializedPostgreSQL * materialized_storage);
PostgreSQLTableStructurePtr fetchTableStructure(pqxx::ReplicationTransaction & tx, const String & table_name) const;
template<typename T>
PostgreSQLTableStructurePtr fetchTableStructure(T & tx, const String & table_name) const;
String doubleQuoteWithSchema(const String & table_name) const;

View File

@ -16,6 +16,7 @@
#include <Parsers/Access/ASTRolesOrUsersSet.h>
#include <Poco/JSON/JSON.h>
#include <Poco/JSON/Object.h>
#include <Poco/JSON/Array.h>
#include <Poco/JSON/Stringifier.h>
#include <Poco/JSONString.h>
@ -48,13 +49,15 @@ ColumnsDescription StorageSystemUsers::getColumnsDescription()
{"name", std::make_shared<DataTypeString>(), "User name."},
{"id", std::make_shared<DataTypeUUID>(), "User ID."},
{"storage", std::make_shared<DataTypeString>(), "Path to the storage of users. Configured in the access_control_path parameter."},
{"auth_type", std::make_shared<DataTypeEnum8>(getAuthenticationTypeEnumValues()),
"Shows the authentication type. "
{"auth_type", std::make_shared<DataTypeArray>(std::make_shared<DataTypeEnum8>(getAuthenticationTypeEnumValues())),
"Shows the authentication types. "
"There are multiple ways of user identification: "
"with no password, with plain text password, with SHA256-encoded password, "
"with double SHA-1-encoded password or with bcrypt-encoded password."
},
{"auth_params", std::make_shared<DataTypeString>(), "Authentication parameters in the JSON format depending on the auth_type."},
{"auth_params", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()),
"Authentication parameters in the JSON format depending on the auth_type."
},
{"host_ip", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()),
"IP addresses of hosts that are allowed to connect to the ClickHouse server."
},
@ -97,8 +100,10 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, ContextPtr conte
auto & column_name = assert_cast<ColumnString &>(*res_columns[column_index++]);
auto & column_id = assert_cast<ColumnUUID &>(*res_columns[column_index++]).getData();
auto & column_storage = assert_cast<ColumnString &>(*res_columns[column_index++]);
auto & column_auth_type = assert_cast<ColumnInt8 &>(*res_columns[column_index++]).getData();
auto & column_auth_params = assert_cast<ColumnString &>(*res_columns[column_index++]);
auto & column_auth_type = assert_cast<ColumnInt8 &>(assert_cast<ColumnArray &>(*res_columns[column_index]).getData());
auto & column_auth_type_offsets = assert_cast<ColumnArray &>(*res_columns[column_index++]).getOffsets();
auto & column_auth_params = assert_cast<ColumnString &>(assert_cast<ColumnArray &>(*res_columns[column_index]).getData());
auto & column_auth_params_offsets = assert_cast<ColumnArray &>(*res_columns[column_index++]).getOffsets();
auto & column_host_ip = assert_cast<ColumnString &>(assert_cast<ColumnArray &>(*res_columns[column_index]).getData());
auto & column_host_ip_offsets = assert_cast<ColumnArray &>(*res_columns[column_index++]).getOffsets();
auto & column_host_names = assert_cast<ColumnString &>(assert_cast<ColumnArray &>(*res_columns[column_index]).getData());
@ -122,7 +127,7 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, ContextPtr conte
auto add_row = [&](const String & name,
const UUID & id,
const String & storage_name,
const AuthenticationData & auth_data,
const std::vector<AuthenticationData> & authentication_methods,
const AllowedClientHosts & allowed_hosts,
const RolesOrUsersSet & default_roles,
const RolesOrUsersSet & grantees,
@ -131,11 +136,8 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, ContextPtr conte
column_name.insertData(name.data(), name.length());
column_id.push_back(id.toUnderType());
column_storage.insertData(storage_name.data(), storage_name.length());
column_auth_type.push_back(static_cast<Int8>(auth_data.getType()));
if (auth_data.getType() == AuthenticationType::LDAP ||
auth_data.getType() == AuthenticationType::KERBEROS ||
auth_data.getType() == AuthenticationType::SSL_CERTIFICATE)
for (const auto & auth_data : authentication_methods)
{
Poco::JSON::Object auth_params_json;
@ -167,16 +169,15 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, ContextPtr conte
std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
oss.exceptions(std::ios::failbit);
Poco::JSON::Stringifier::stringify(auth_params_json, oss);
const auto str = oss.str();
const auto authentication_params_str = oss.str();
column_auth_params.insertData(str.data(), str.size());
}
else
{
static constexpr std::string_view empty_json{"{}"};
column_auth_params.insertData(empty_json.data(), empty_json.length());
column_auth_params.insertData(authentication_params_str.data(), authentication_params_str.size());
column_auth_type.insertValue(static_cast<Int8>(auth_data.getType()));
}
column_auth_params_offsets.push_back(column_auth_params.size());
column_auth_type_offsets.push_back(column_auth_type.size());
if (allowed_hosts.containsAnyHost())
{
static constexpr std::string_view str{"::/0"};
@ -247,7 +248,7 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, ContextPtr conte
if (!storage)
continue;
add_row(user->getName(), id, storage->getStorageName(), user->auth_data, user->allowed_client_hosts,
add_row(user->getName(), id, storage->getStorageName(), user->authentication_methods, user->allowed_client_hosts,
user->default_roles, user->grantees, user->default_database);
}
}

View File

@ -105,7 +105,7 @@ setup_logs_replication
clickhouse-client --query "SHOW DATABASES"
clickhouse-client --query "CREATE DATABASE datasets"
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql
clickhouse-client < /repo/tests/docker_scripts/create.sql
clickhouse-client --query "SHOW TABLES FROM datasets"
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then

View File

@ -62,7 +62,7 @@ start_server
setup_logs_replication
clickhouse-client --query "CREATE DATABASE datasets"
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql
clickhouse-client < /repo/tests/docker_scripts/create.sql
clickhouse-client --query "SHOW TABLES FROM datasets"
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"

View File

@ -89,7 +89,6 @@ class Client:
command = self.command[:]
if stdin is None:
command += ["--multiquery"]
stdin = sql
else:
command += ["--query", sql]

View File

@ -19,6 +19,7 @@ import urllib.parse
import shlex
import urllib3
import requests
from pathlib import Path
try:
# Please, add modules that required for specific tests only here.
@ -52,6 +53,7 @@ from helpers.client import QueryRuntimeException
import docker
from .client import Client
from .random_settings import write_random_settings_config
from .retry_decorator import retry
from .config_cluster import *
@ -60,6 +62,9 @@ HELPERS_DIR = p.dirname(__file__)
CLICKHOUSE_ROOT_DIR = p.join(p.dirname(__file__), "../../..")
LOCAL_DOCKER_COMPOSE_DIR = p.join(CLICKHOUSE_ROOT_DIR, "tests/integration/compose/")
DEFAULT_ENV_NAME = ".env"
DEFAULT_BASE_CONFIG_DIR = os.environ.get(
"CLICKHOUSE_TESTS_BASE_CONFIG_DIR", "/etc/clickhouse-server/"
)
SANITIZER_SIGN = "=================="
@ -444,9 +449,7 @@ class ClickHouseCluster:
self.base_dir = p.dirname(base_path)
self.name = name if name is not None else extract_test_name(base_path)
self.base_config_dir = base_config_dir or os.environ.get(
"CLICKHOUSE_TESTS_BASE_CONFIG_DIR", "/etc/clickhouse-server/"
)
self.base_config_dir = base_config_dir or DEFAULT_BASE_CONFIG_DIR
self.server_bin_path = p.realpath(
server_bin_path
or os.environ.get("CLICKHOUSE_TESTS_SERVER_BIN_PATH", "/usr/bin/clickhouse")
@ -1741,6 +1744,7 @@ class ClickHouseCluster:
copy_common_configs=True,
config_root_name="clickhouse",
extra_configs=[],
randomize_settings=True,
) -> "ClickHouseInstance":
"""Add an instance to the cluster.
@ -1845,6 +1849,7 @@ class ClickHouseCluster:
mem_limit=mem_limit,
config_root_name=config_root_name,
extra_configs=extra_configs,
randomize_settings=randomize_settings,
)
docker_compose_yml_dir = get_docker_compose_path()
@ -3463,6 +3468,7 @@ class ClickHouseInstance:
mem_limit=None,
config_root_name="clickhouse",
extra_configs=[],
randomize_settings=True,
):
self.name = name
self.base_cmd = cluster.base_cmd
@ -3531,6 +3537,7 @@ class ClickHouseInstance:
self.with_coredns = with_coredns
self.coredns_config_dir = p.abspath(p.join(base_path, "coredns_config"))
self.use_old_analyzer = use_old_analyzer
self.randomize_settings = randomize_settings
self.main_config_name = main_config_name
self.users_config_name = users_config_name
@ -4602,6 +4609,10 @@ class ClickHouseInstance:
if len(self.custom_dictionaries_paths):
write_embedded_config("0_common_enable_dictionaries.xml", self.config_d_dir)
if self.randomize_settings and self.base_config_dir == DEFAULT_BASE_CONFIG_DIR:
# If custom main config is used, do not apply random settings to it
write_random_settings_config(Path(users_d_dir) / "0_random_settings.xml")
version = None
version_parts = self.tag.split(".")
if version_parts[0].isdigit() and version_parts[1].isdigit():

View File

@ -363,6 +363,7 @@ def check_tables_are_synchronized(
postgres_database="postgres_database",
materialized_database="test_database",
schema_name="",
columns=["*"],
):
assert_nested_table_is_created(
instance, table_name, materialized_database, schema_name
@ -378,7 +379,7 @@ def check_tables_are_synchronized(
result_query = f"select * from {table_path} order by {order_by};"
expected = instance.query(
f"select * from `{postgres_database}`.`{table_name}` order by {order_by};"
f"select {','.join(columns)} from `{postgres_database}`.`{table_name}` order by {order_by};"
)
result = instance.query(result_query)

View File

@ -0,0 +1,28 @@
import random
def randomize_settings():
yield "max_joined_block_size_rows", random.randint(8000, 100000)
if random.random() < 0.5:
yield "max_block_size", random.randint(8000, 100000)
def write_random_settings_config(destination):
with open(destination, "w") as f:
f.write(
"""
<clickhouse>
<profiles>
<default>
"""
)
for setting, value in randomize_settings():
f.write(f"<{setting}>{value}</{setting}>\n")
f.write(
"""
</default>
</profiles>
</clickhouse>
"""
)

View File

@ -42,9 +42,18 @@ def test_access_control_on_cluster():
ch1.query_with_retry(
"CREATE USER IF NOT EXISTS Alex ON CLUSTER 'cluster'", retry_count=5
)
assert ch1.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n"
assert ch2.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n"
assert ch3.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n"
assert (
ch2.query("SHOW CREATE USER Alex")
== "CREATE USER Alex IDENTIFIED WITH no_password\n"
)
assert (
ch1.query("SHOW CREATE USER Alex")
== "CREATE USER Alex IDENTIFIED WITH no_password\n"
)
assert (
ch3.query("SHOW CREATE USER Alex")
== "CREATE USER Alex IDENTIFIED WITH no_password\n"
)
ch2.query_with_retry(
"GRANT ON CLUSTER 'cluster' SELECT ON *.* TO Alex", retry_count=3

View File

@ -1236,7 +1236,10 @@ def test_system_users_required_privileges():
instance.query("GRANT SELECT ON test.* TO u2 WITH GRANT OPTION")
instance.query(f"RESTORE ALL FROM {backup_name}", user="u2")
assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1 DEFAULT ROLE r1\n"
assert (
instance.query("SHOW CREATE USER u1")
== "CREATE USER u1 IDENTIFIED WITH no_password DEFAULT ROLE r1\n"
)
assert instance.query("SHOW GRANTS FOR u1") == TSV(
["GRANT SELECT ON test.* TO u1", "GRANT r1 TO u1"]
)

View File

@ -769,7 +769,8 @@ def test_system_users():
)
assert (
node1.query("SHOW CREATE USER u1") == "CREATE USER u1 SETTINGS custom_a = 123\n"
node1.query("SHOW CREATE USER u1")
== "CREATE USER u1 IDENTIFIED WITH no_password SETTINGS custom_a = 123\n"
)
assert node1.query("SHOW GRANTS FOR u1") == "GRANT SELECT ON default.tbl TO u1\n"

View File

@ -46,7 +46,7 @@ def test_create():
def check():
assert (
instance.query("SHOW CREATE USER u1")
== "CREATE USER u1 SETTINGS PROFILE `s1`\n"
== "CREATE USER u1 IDENTIFIED WITH no_password SETTINGS PROFILE `s1`\n"
)
assert (
instance.query("SHOW CREATE USER u2")
@ -99,7 +99,7 @@ def test_alter():
def check():
assert (
instance.query("SHOW CREATE USER u1")
== "CREATE USER u1 SETTINGS PROFILE `s1`\n"
== "CREATE USER u1 IDENTIFIED WITH no_password SETTINGS PROFILE `s1`\n"
)
assert (
instance.query("SHOW CREATE USER u2")
@ -147,7 +147,10 @@ def test_drop():
instance.query("DROP SETTINGS PROFILE s1")
def check():
assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1\n"
assert (
instance.query("SHOW CREATE USER u1")
== "CREATE USER u1 IDENTIFIED WITH no_password\n"
)
assert (
instance.query("SHOW CREATE SETTINGS PROFILE s2")
== "CREATE SETTINGS PROFILE `s2`\n"

View File

@ -18,12 +18,16 @@ def started_cluster():
def test_enabling_access_management():
instance.query("DROP USER IF EXISTS Alex")
instance.query("CREATE USER Alex", user="default")
assert (
instance.query("SHOW CREATE USER Alex", user="default") == "CREATE USER Alex\n"
instance.query("SHOW CREATE USER Alex", user="default")
== "CREATE USER Alex IDENTIFIED WITH no_password\n"
)
assert (
instance.query("SHOW CREATE USER Alex", user="readonly") == "CREATE USER Alex\n"
instance.query("SHOW CREATE USER Alex", user="readonly")
== "CREATE USER Alex IDENTIFIED WITH no_password\n"
)
assert "Not enough privileges" in instance.query_and_get_error(
"SHOW CREATE USER Alex", user="xyz"
@ -35,3 +39,5 @@ def test_enabling_access_management():
assert "Not enough privileges" in instance.query_and_get_error(
"CREATE USER Robin", user="xyz"
)
instance.query("DROP USER IF EXISTS Alex")

Some files were not shown because too many files have changed in this diff Show More