Merge branch 'master' into mjoin

This commit is contained in:
chertus 2019-09-27 16:33:07 +03:00
commit c6f0fd09bd
43 changed files with 206 additions and 1262 deletions

View File

@ -21,4 +21,4 @@ ClickHouse is an open-source column-oriented database management system that all
* [ClickHouse Meetup in Tokyo](https://clickhouse.connpass.com/event/147001/) on November 14.
* [ClickHouse Meetup in Istanbul](https://www.eventbrite.com/e/clickhouse-meetup-istanbul-create-blazing-fast-experiences-w-clickhouse-tickets-73101120419) on November 19.
* [ClickHouse Meetup in Ankara](https://www.eventbrite.com/e/clickhouse-meetup-ankara-create-blazing-fast-experiences-w-clickhouse-tickets-73100530655) on November 21.
* [ClickHouse Meetup in Singapore](https://www.meetup.com/Singapore-Clickhouse-Meetup-Group/events/265085331/) on November 23.

View File

@ -64,6 +64,15 @@ if (CMAKE_CROSSCOMPILING)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
# FIXME: broken dependencies
set (USE_SNAPPY OFF)
set (ENABLE_SSL OFF)
set (ENABLE_PROTOBUF OFF)
set (ENABLE_PARQUET OFF)
set (ENABLE_READLINE OFF)
set (ENABLE_ICU OFF)
set (ENABLE_FASTOPS OFF)
endif ()
# Don't know why but CXX_STANDARD doesn't work for cross-compilation

View File

@ -65,7 +65,7 @@ if (USE_INTERNAL_ZLIB_LIBRARY)
endif ()
add_subdirectory (${INTERNAL_ZLIB_NAME})
# todo: make pull to Dead2/zlib-ng and remove:
# TODO: make pull to Dead2/zlib-ng and remove:
# We should use same defines when including zlib.h as used when zlib compiled
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
@ -125,11 +125,17 @@ if (USE_INTERNAL_SSL_LIBRARY)
endif ()
if (ENABLE_MYSQL AND USE_INTERNAL_MYSQL_LIBRARY)
add_subdirectory (mariadb-connector-c-cmake)
target_include_directories(mysqlclient BEFORE PRIVATE ${ZLIB_INCLUDE_DIR})
if(OPENSSL_INCLUDE_DIR)
target_include_directories(mysqlclient BEFORE PRIVATE ${OPENSSL_INCLUDE_DIR})
set(CLIENT_PLUGIN_CACHING_SHA2_PASSWORD STATIC)
set(CLIENT_PLUGIN_SHA256_PASSWORD STATIC)
set(CLIENT_PLUGIN_REMOTE_IO OFF)
set(CLIENT_PLUGIN_DIALOG OFF)
set(CLIENT_PLUGIN_CLIENT_ED25519 OFF)
set(CLIENT_PLUGIN_MYSQL_CLEAR_PASSWORD OFF)
set(SKIP_TESTS 1)
if (GLIBC_COMPATIBILITY)
set(LIBM glibc-compatibility)
endif()
add_subdirectory (mariadb-connector-c)
endif ()
if (USE_INTERNAL_RDKAFKA_LIBRARY)

@ -1 +1 @@
Subproject commit c6503d3acc85ca1a7f5e7e38b605d7c9410aac1e
Subproject commit 18016300b00825a3fcbc6fb2aa37ac3e51416f71

View File

@ -1,74 +0,0 @@
set(MARIADB_CLIENT_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c)
set(MARIADB_CLIENT_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/mariadb-connector-c)
set(SRCS
#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/bmove_upp.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/get_password.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_alloc.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_array.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_charset.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_compress.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_context.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_default.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_dtoa.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_errmsg.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_hash.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_init.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_io.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_list.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_ll2str.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_loaddata.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_net.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_password.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_pvio.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_async.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_charset.c
#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_dyncol.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_lib.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_stmt.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_sha1.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_stmt_codec.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_string.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_time.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_tls.c
${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/openssl_crypt.c
#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/gnutls.c
#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/ma_schannel.c
#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/schannel.c
#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/auth_gssapi_client.c
#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/dialog.c
#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/gssapi_client.c
#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/gssapi_errmsg.c
${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/mariadb_cleartext.c
${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/my_auth.c
${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/old_password.c
${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/sha256_pw.c
${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/caching_sha2_pw.c
#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/sspi_client.c
#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/sspi_errmsg.c
${MARIADB_CLIENT_SOURCE_DIR}/plugins/connection/aurora.c
${MARIADB_CLIENT_SOURCE_DIR}/plugins/connection/replication.c
#${MARIADB_CLIENT_SOURCE_DIR}/plugins/io/remote_io.c
#${MARIADB_CLIENT_SOURCE_DIR}/plugins/pvio/pvio_npipe.c
#${MARIADB_CLIENT_SOURCE_DIR}/plugins/pvio/pvio_shmem.c
${MARIADB_CLIENT_SOURCE_DIR}/plugins/pvio/pvio_socket.c
#${MARIADB_CLIENT_SOURCE_DIR}/plugins/trace/trace_example.c
${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/libmariadb/ma_client_plugin.c
)
if(OPENSSL_LIBRARIES)
list(APPEND SRCS ${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/openssl.c)
endif()
add_library(mysqlclient ${SRCS})
if(OPENSSL_LIBRARIES)
target_link_libraries(mysqlclient PRIVATE ${OPENSSL_LIBRARIES})
target_compile_definitions(mysqlclient PRIVATE -D HAVE_OPENSSL -D HAVE_TLS)
endif()
target_include_directories(mysqlclient PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include)
target_include_directories(mysqlclient PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/common/include)
target_include_directories(mysqlclient PUBLIC ${MARIADB_CLIENT_SOURCE_DIR}/include)
target_compile_definitions(mysqlclient PRIVATE -D THREAD)

View File

@ -1 +0,0 @@
#include <mysql.h>

View File

@ -1 +0,0 @@
#include <mysqld_error.h>

View File

@ -1,269 +0,0 @@
/*
* Include file constants (processed in LibmysqlIncludeFiles.txt 1
*/
#define HAVE_ALLOCA_H 1
/* #undef HAVE_BIGENDIAN */
#define HAVE_SETLOCALE 1
#define HAVE_NL_LANGINFO 1
#define HAVE_ARPA_INET_H 1
#define HAVE_CRYPT_H 1
#define HAVE_DIRENT_H 1
#define HAVE_DLFCN_H 1
#define HAVE_EXECINFO_H 1
#define HAVE_FCNTL_H 1
#define HAVE_FENV_H 1
#define HAVE_FLOAT_H 1
/* #undef HAVE_FPU_CONTROL_H */
#define HAVE_GRP_H 1
/* #undef HAVE_IEEEFP_H */
#define HAVE_LIMITS_H 1
#define HAVE_MALLOC_H 1
#define HAVE_MEMORY_H 1
#define HAVE_NETINET_IN_H 1
#define HAVE_PATHS_H 1
#define HAVE_PWD_H 1
#define HAVE_SCHED_H 1
/* #undef HAVE_SELECT_H */
#define HAVE_STDDEF_H 1
#define HAVE_STDINT_H 1
#define HAVE_STDLIB_H 1
#define HAVE_STRING_H 1
#define HAVE_STRINGS_H 1
/* #undef HAVE_SYNCH_H */
/* #undef HAVE_SYS_FPU_H */
#define HAVE_SYS_IOCTL_H 1
#define HAVE_SYS_IPC_H 1
#define HAVE_SYS_MMAN_H 1
#define HAVE_SYS_PRCTL_H 1
#define HAVE_SYS_SELECT_H 1
#define HAVE_SYS_SHM_H 1
#define HAVE_SYS_SOCKET_H 1
#define HAVE_SYS_STAT_H 1
/* #undef HAVE_SYS_STREAM_H */
#define HAVE_SYS_TIMEB_H 1
#define HAVE_SYS_TYPES_H 1
#define HAVE_SYS_UN_H 1
/* #undef HAVE_SYSENT_H */
#define HAVE_TERMIO_H 1
#define HAVE_TERMIOS_H 1
#define HAVE_UNISTD_H 1
#define HAVE_UTIME_H 1
#define HAVE_UCONTEXT_H 1
/*
* function definitions - processed in LibmysqlFunctions.txt
*/
#define HAVE_ACCESS 1
/* #undef HAVE_AIOWAIT */
#define HAVE_ALARM 1
/* #undef HAVE_ALLOCA */
#define HAVE_BCMP 1
/* #undef HAVE_BFILL */
/* #undef HAVE_BMOVE */
#define HAVE_BZERO 1
#define HAVE_CLOCK_GETTIME 1
/* #undef HAVE_COMPRESS */
/* #undef HAVE_CRYPT */
#define HAVE_DLERROR 1
#define HAVE_DLOPEN 1
#define HAVE_FCHMOD 1
#define HAVE_FCNTL 1
/* #undef HAVE_FCONVERT */
#define HAVE_FDATASYNC 1
#define HAVE_FESETROUND 1
#define HAVE_FINITE 1
#define HAVE_FSEEKO 1
#define HAVE_FSYNC 1
#define HAVE_GETADDRINFO 1
#define HAVE_GETCWD 1
#define HAVE_GETHOSTBYADDR_R 1
#define HAVE_GETHOSTBYNAME_R 1
/* #undef HAVE_GETHRTIME */
#define HAVE_GETNAMEINFO 1
#define HAVE_GETPAGESIZE 1
#define HAVE_GETPASS 1
/* #undef HAVE_GETPASSPHRASE */
#define HAVE_GETPWNAM 1
#define HAVE_GETPWUID 1
#define HAVE_GETRLIMIT 1
#define HAVE_GETRUSAGE 1
#define HAVE_GETWD 1
#define HAVE_GMTIME_R 1
#define HAVE_INITGROUPS 1
#define HAVE_LDIV 1
#define HAVE_LOCALTIME_R 1
#define HAVE_LOG2 1
#define HAVE_LONGJMP 1
#define HAVE_LSTAT 1
#define HAVE_MADVISE 1
#define HAVE_MALLINFO 1
#define HAVE_MEMALIGN 1
#define HAVE_MEMCPY 1
#define HAVE_MEMMOVE 1
#define HAVE_MKSTEMP 1
#define HAVE_MLOCK 1
#define HAVE_MLOCKALL 1
#define HAVE_MMAP 1
#define HAVE_MMAP64 1
#define HAVE_PERROR 1
#define HAVE_POLL 1
#define HAVE_PREAD 1
/* #undef HAVE_PTHREAD_ATTR_CREATE */
#define HAVE_PTHREAD_ATTR_GETSTACKSIZE 1
/* #undef HAVE_PTHREAD_ATTR_SETPRIO */
#define HAVE_PTHREAD_ATTR_SETSCHEDPARAM 1
#define HAVE_PTHREAD_ATTR_SETSCOPE 1
#define HAVE_PTHREAD_ATTR_SETSTACKSIZE 1
/* #undef HAVE_PTHREAD_CONDATTR_CREATE */
/* #undef HAVE_PTHREAD_INIT */
#define HAVE_PTHREAD_KEY_DELETE 1
#define HAVE_PTHREAD_KILL 1
#define HAVE_PTHREAD_RWLOCK_RDLOCK 1
/* #undef HAVE_PTHREAD_SETPRIO_NP */
#define HAVE_PTHREAD_SETSCHEDPARAM 1
#define HAVE_PTHREAD_SIGMASK 1
/* #undef HAVE_PTHREAD_THREADMASK */
/* #undef HAVE_PTHREAD_YIELD_NP */
#define HAVE_READDIR_R 1
#define HAVE_READLINK 1
#define HAVE_REALPATH 1
#define HAVE_RENAME 1
#define HAVE_SCHED_YIELD 1
#define HAVE_SELECT 1
/* #undef HAVE_SETFD */
/* #undef HAVE_SETFILEPOINTER */
#define HAVE_SIGNAL 1
#define HAVE_SIGACTION 1
/* #undef HAVE_SIGTHREADMASK */
#define HAVE_SIGWAIT 1
#define HAVE_SLEEP 1
#define HAVE_SNPRINTF 1
/* #undef HAVE_SQLITE */
#define HAVE_STPCPY 1
#define HAVE_STRERROR 1
/* #undef HAVE_STRLCPY */
#define HAVE_STRNLEN 1
#define HAVE_STRPBRK 1
#define HAVE_STRSEP 1
#define HAVE_STRSTR 1
#define HAVE_STRTOK_R 1
#define HAVE_STRTOL 1
#define HAVE_STRTOLL 1
#define HAVE_STRTOUL 1
#define HAVE_STRTOULL 1
/* #undef HAVE_TELL */
/* #undef HAVE_THR_SETCONCURRENCY */
/* #undef HAVE_THR_YIELD */
#define HAVE_VASPRINTF 1
#define HAVE_VSNPRINTF 1
/*
* types and sizes
*/
/* Types we may use */
#define SIZEOF_CHAR 1
#if defined(SIZEOF_CHAR)
# define HAVE_CHAR 1
#endif
#define SIZEOF_CHARP 8
#if defined(SIZEOF_CHARP)
# define HAVE_CHARP 1
#endif
#define SIZEOF_SHORT 2
#if defined(SIZEOF_SHORT)
# define HAVE_SHORT 1
#endif
#define SIZEOF_INT 4
#if defined(SIZEOF_INT)
# define HAVE_INT 1
#endif
#define SIZEOF_LONG 8
#if defined(SIZEOF_LONG)
# define HAVE_LONG 1
#endif
#define SIZEOF_LONG_LONG 8
#if defined(SIZEOF_LONG_LONG)
# define HAVE_LONG_LONG 1
#endif
#define SIZEOF_SIGSET_T 128
#if defined(SIZEOF_SIGSET_T)
# define HAVE_SIGSET_T 1
#endif
#define SIZEOF_SIZE_T 8
#if defined(SIZEOF_SIZE_T)
# define HAVE_SIZE_T 1
#endif
/* #undef SIZEOF_UCHAR */
#if defined(SIZEOF_UCHAR)
# define HAVE_UCHAR 1
#endif
#define SIZEOF_UINT 4
#if defined(SIZEOF_UINT)
# define HAVE_UINT 1
#endif
#define SIZEOF_ULONG 8
#if defined(SIZEOF_ULONG)
# define HAVE_ULONG 1
#endif
/* #undef SIZEOF_INT8 */
#if defined(SIZEOF_INT8)
# define HAVE_INT8 1
#endif
/* #undef SIZEOF_UINT8 */
#if defined(SIZEOF_UINT8)
# define HAVE_UINT8 1
#endif
/* #undef SIZEOF_INT16 */
#if defined(SIZEOF_INT16)
# define HAVE_INT16 1
#endif
/* #undef SIZEOF_UINT16 */
#if defined(SIZEOF_UINT16)
# define HAVE_UINT16 1
#endif
/* #undef SIZEOF_INT32 */
#if defined(SIZEOF_INT32)
# define HAVE_INT32 1
#endif
/* #undef SIZEOF_UINT32 */
#if defined(SIZEOF_UINT32)
# define HAVE_UINT32 1
#endif
/* #undef SIZEOF_U_INT32_T */
#if defined(SIZEOF_U_INT32_T)
# define HAVE_U_INT32_T 1
#endif
/* #undef SIZEOF_INT64 */
#if defined(SIZEOF_INT64)
# define HAVE_INT64 1
#endif
/* #undef SIZEOF_UINT64 */
#if defined(SIZEOF_UINT64)
# define HAVE_UINT64 1
#endif
/* #undef SIZEOF_SOCKLEN_T */
#if defined(SIZEOF_SOCKLEN_T)
# define HAVE_SOCKLEN_T 1
#endif
#define SOCKET_SIZE_TYPE socklen_t
#define MARIADB_DEFAULT_CHARSET "latin1"

View File

@ -1,269 +0,0 @@
/*
* Include file constants (processed in LibmysqlIncludeFiles.txt 1
*/
#define HAVE_ALLOCA_H 1
/* #undef HAVE_BIGENDIAN */
#define HAVE_SETLOCALE 1
#define HAVE_NL_LANGINFO 1
#define HAVE_ARPA_INET_H 1
#define HAVE_CRYPT_H 1
#define HAVE_DIRENT_H 1
#define HAVE_DLFCN_H 1
#define HAVE_EXECINFO_H 1
#define HAVE_FCNTL_H 1
#define HAVE_FENV_H 1
#define HAVE_FLOAT_H 1
/* #undef HAVE_FPU_CONTROL_H */
#define HAVE_GRP_H 1
/* #undef HAVE_IEEEFP_H */
#define HAVE_LIMITS_H 1
#define HAVE_MALLOC_H 1
#define HAVE_MEMORY_H 1
#define HAVE_NETINET_IN_H 1
#define HAVE_PATHS_H 1
#define HAVE_PWD_H 1
#define HAVE_SCHED_H 1
/* #undef HAVE_SELECT_H */
#define HAVE_STDDEF_H 1
#define HAVE_STDINT_H 1
#define HAVE_STDLIB_H 1
#define HAVE_STRING_H 1
#define HAVE_STRINGS_H 1
/* #undef HAVE_SYNCH_H */
/* #undef HAVE_SYS_FPU_H */
#define HAVE_SYS_IOCTL_H 1
#define HAVE_SYS_IPC_H 1
#define HAVE_SYS_MMAN_H 1
#define HAVE_SYS_PRCTL_H 1
#define HAVE_SYS_SELECT_H 1
#define HAVE_SYS_SHM_H 1
#define HAVE_SYS_SOCKET_H 1
#define HAVE_SYS_STAT_H 1
/* #undef HAVE_SYS_STREAM_H */
#define HAVE_SYS_TIMEB_H 1
#define HAVE_SYS_TYPES_H 1
#define HAVE_SYS_UN_H 1
/* #undef HAVE_SYSENT_H */
#define HAVE_TERMIO_H 1
#define HAVE_TERMIOS_H 1
#define HAVE_UNISTD_H 1
#define HAVE_UTIME_H 1
#define HAVE_UCONTEXT_H 1
/*
* function definitions - processed in LibmysqlFunctions.txt
*/
#define HAVE_ACCESS 1
/* #undef HAVE_AIOWAIT */
#define HAVE_ALARM 1
/* #undef HAVE_ALLOCA */
#define HAVE_BCMP 1
/* #undef HAVE_BFILL */
/* #undef HAVE_BMOVE */
#define HAVE_BZERO 1
#define HAVE_CLOCK_GETTIME 1
/* #undef HAVE_COMPRESS */
/* #undef HAVE_CRYPT */
#define HAVE_DLERROR 1
#define HAVE_DLOPEN 1
#define HAVE_FCHMOD 1
#define HAVE_FCNTL 1
/* #undef HAVE_FCONVERT */
#define HAVE_FDATASYNC 1
#define HAVE_FESETROUND 1
#define HAVE_FINITE 1
#define HAVE_FSEEKO 1
#define HAVE_FSYNC 1
#define HAVE_GETADDRINFO 1
#define HAVE_GETCWD 1
#define HAVE_GETHOSTBYADDR_R 1
#define HAVE_GETHOSTBYNAME_R 1
/* #undef HAVE_GETHRTIME */
#define HAVE_GETNAMEINFO 1
#define HAVE_GETPAGESIZE 1
#define HAVE_GETPASS 1
/* #undef HAVE_GETPASSPHRASE */
#define HAVE_GETPWNAM 1
#define HAVE_GETPWUID 1
#define HAVE_GETRLIMIT 1
#define HAVE_GETRUSAGE 1
#define HAVE_GETWD 1
#define HAVE_GMTIME_R 1
#define HAVE_INITGROUPS 1
#define HAVE_LDIV 1
#define HAVE_LOCALTIME_R 1
#define HAVE_LOG2 1
#define HAVE_LONGJMP 1
#define HAVE_LSTAT 1
#define HAVE_MADVISE 1
#define HAVE_MALLINFO 1
#define HAVE_MEMALIGN 1
#define HAVE_MEMCPY 1
#define HAVE_MEMMOVE 1
#define HAVE_MKSTEMP 1
#define HAVE_MLOCK 1
#define HAVE_MLOCKALL 1
#define HAVE_MMAP 1
#define HAVE_MMAP64 1
#define HAVE_PERROR 1
#define HAVE_POLL 1
#define HAVE_PREAD 1
/* #undef HAVE_PTHREAD_ATTR_CREATE */
#define HAVE_PTHREAD_ATTR_GETSTACKSIZE 1
/* #undef HAVE_PTHREAD_ATTR_SETPRIO */
#define HAVE_PTHREAD_ATTR_SETSCHEDPARAM 1
#define HAVE_PTHREAD_ATTR_SETSCOPE 1
#define HAVE_PTHREAD_ATTR_SETSTACKSIZE 1
/* #undef HAVE_PTHREAD_CONDATTR_CREATE */
/* #undef HAVE_PTHREAD_INIT */
#define HAVE_PTHREAD_KEY_DELETE 1
#define HAVE_PTHREAD_KILL 1
#define HAVE_PTHREAD_RWLOCK_RDLOCK 1
/* #undef HAVE_PTHREAD_SETPRIO_NP */
#define HAVE_PTHREAD_SETSCHEDPARAM 1
#define HAVE_PTHREAD_SIGMASK 1
/* #undef HAVE_PTHREAD_THREADMASK */
/* #undef HAVE_PTHREAD_YIELD_NP */
#define HAVE_READDIR_R 1
#define HAVE_READLINK 1
#define HAVE_REALPATH 1
#define HAVE_RENAME 1
#define HAVE_SCHED_YIELD 1
#define HAVE_SELECT 1
/* #undef HAVE_SETFD */
/* #undef HAVE_SETFILEPOINTER */
#define HAVE_SIGNAL 1
#define HAVE_SIGACTION 1
/* #undef HAVE_SIGTHREADMASK */
#define HAVE_SIGWAIT 1
#define HAVE_SLEEP 1
#define HAVE_SNPRINTF 1
/* #undef HAVE_SQLITE */
#define HAVE_STPCPY 1
#define HAVE_STRERROR 1
/* #undef HAVE_STRLCPY */
#define HAVE_STRNLEN 1
#define HAVE_STRPBRK 1
#define HAVE_STRSEP 1
#define HAVE_STRSTR 1
#define HAVE_STRTOK_R 1
#define HAVE_STRTOL 1
#define HAVE_STRTOLL 1
#define HAVE_STRTOUL 1
#define HAVE_STRTOULL 1
/* #undef HAVE_TELL */
/* #undef HAVE_THR_SETCONCURRENCY */
/* #undef HAVE_THR_YIELD */
#define HAVE_VASPRINTF 1
#define HAVE_VSNPRINTF 1
/*
* types and sizes
*/
/* Types we may use */
#define SIZEOF_CHAR 1
#if defined(SIZEOF_CHAR)
# define HAVE_CHAR 1
#endif
#define SIZEOF_CHARP 8
#if defined(SIZEOF_CHARP)
# define HAVE_CHARP 1
#endif
#define SIZEOF_SHORT 2
#if defined(SIZEOF_SHORT)
# define HAVE_SHORT 1
#endif
#define SIZEOF_INT 4
#if defined(SIZEOF_INT)
# define HAVE_INT 1
#endif
#define SIZEOF_LONG 8
#if defined(SIZEOF_LONG)
# define HAVE_LONG 1
#endif
#define SIZEOF_LONG_LONG 8
#if defined(SIZEOF_LONG_LONG)
# define HAVE_LONG_LONG 1
#endif
#define SIZEOF_SIGSET_T 128
#if defined(SIZEOF_SIGSET_T)
# define HAVE_SIGSET_T 1
#endif
#define SIZEOF_SIZE_T 8
#if defined(SIZEOF_SIZE_T)
# define HAVE_SIZE_T 1
#endif
/* #undef SIZEOF_UCHAR */
#if defined(SIZEOF_UCHAR)
# define HAVE_UCHAR 1
#endif
#define SIZEOF_UINT 4
#if defined(SIZEOF_UINT)
# define HAVE_UINT 1
#endif
#define SIZEOF_ULONG 8
#if defined(SIZEOF_ULONG)
# define HAVE_ULONG 1
#endif
/* #undef SIZEOF_INT8 */
#if defined(SIZEOF_INT8)
# define HAVE_INT8 1
#endif
/* #undef SIZEOF_UINT8 */
#if defined(SIZEOF_UINT8)
# define HAVE_UINT8 1
#endif
/* #undef SIZEOF_INT16 */
#if defined(SIZEOF_INT16)
# define HAVE_INT16 1
#endif
/* #undef SIZEOF_UINT16 */
#if defined(SIZEOF_UINT16)
# define HAVE_UINT16 1
#endif
/* #undef SIZEOF_INT32 */
#if defined(SIZEOF_INT32)
# define HAVE_INT32 1
#endif
/* #undef SIZEOF_UINT32 */
#if defined(SIZEOF_UINT32)
# define HAVE_UINT32 1
#endif
/* #undef SIZEOF_U_INT32_T */
#if defined(SIZEOF_U_INT32_T)
# define HAVE_U_INT32_T 1
#endif
/* #undef SIZEOF_INT64 */
#if defined(SIZEOF_INT64)
# define HAVE_INT64 1
#endif
/* #undef SIZEOF_UINT64 */
#if defined(SIZEOF_UINT64)
# define HAVE_UINT64 1
#endif
/* #undef SIZEOF_SOCKLEN_T */
#if defined(SIZEOF_SOCKLEN_T)
# define HAVE_SOCKLEN_T 1
#endif
#define SOCKET_SIZE_TYPE socklen_t
#define MARIADB_DEFAULT_CHARSET "latin1"

View File

@ -1,36 +0,0 @@
/* Copyright Abandoned 1996, 1999, 2001 MySQL AB
This file is public domain and comes with NO WARRANTY of any kind */
/* Version numbers for protocol & mysqld */
#ifndef _mariadb_version_h_
#define _mariadb_version_h_
#ifdef _CUSTOMCONFIG_
#include <custom_conf.h>
#else
#define PROTOCOL_VERSION 10
#define MARIADB_CLIENT_VERSION_STR "10.3.6"
#define MARIADB_BASE_VERSION "mariadb-10.3"
#define MARIADB_VERSION_ID 100306
#define MYSQL_VERSION_ID 100306
#define MARIADB_PORT 3306
#define MARIADB_UNIX_ADDR "/var/run/mysqld/mysqld.sock"
#define MYSQL_CONFIG_NAME "my"
#define MARIADB_PACKAGE_VERSION "3.0.6"
#define MARIADB_PACKAGE_VERSION_ID 30006
#define MARIADB_SYSTEM_TYPE "Linux"
#define MARIADB_MACHINE_TYPE "x86_64"
#define MARIADB_PLUGINDIR "lib/mariadb/plugin"
/* mysqld compile time options */
#ifndef MYSQL_CHARSET
#define MYSQL_CHARSET ""
#endif
#endif
/* Source information */
#define CC_SOURCE_REVISION "a0fd36cc5a5313414a5a2ebe9322577a29b4782a"
#endif /* _mariadb_version_h_ */

View File

@ -1,502 +0,0 @@
/* Copyright (C) 2010 - 2012 Sergei Golubchik and Monty Program Ab
2015-2016 MariaDB Corporation AB
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not see <http://www.gnu.org/licenses>
or write to the Free Software Foundation, Inc.,
51 Franklin St., Fifth Floor, Boston, MA 02110, USA */
/**
@file
Support code for the client side (libmariadb) plugins
Client plugins are somewhat different from server plugins, they are simpler.
They do not need to be installed or in any way explicitly loaded on the
client, they are loaded automatically on demand.
One client plugin per shared object, soname *must* match the plugin name.
There is no reference counting and no unloading either.
*/
#if _MSC_VER
/* Silence warnings about variable 'unused' being used. */
#define FORCE_INIT_OF_VARS 1
#endif
#include <ma_global.h>
#include <ma_sys.h>
#include <ma_common.h>
#include <ma_string.h>
#include <ma_pthread.h>
#include "errmsg.h"
#include <mysql/client_plugin.h>
struct st_client_plugin_int {
struct st_client_plugin_int *next;
void *dlhandle;
struct st_mysql_client_plugin *plugin;
};
static my_bool initialized= 0;
static MA_MEM_ROOT mem_root;
static uint valid_plugins[][2]= {
{MYSQL_CLIENT_AUTHENTICATION_PLUGIN, MYSQL_CLIENT_AUTHENTICATION_PLUGIN_INTERFACE_VERSION},
{MARIADB_CLIENT_PVIO_PLUGIN, MARIADB_CLIENT_PVIO_PLUGIN_INTERFACE_VERSION},
{MARIADB_CLIENT_TRACE_PLUGIN, MARIADB_CLIENT_TRACE_PLUGIN_INTERFACE_VERSION},
{MARIADB_CLIENT_CONNECTION_PLUGIN, MARIADB_CLIENT_CONNECTION_PLUGIN_INTERFACE_VERSION},
{0, 0}
};
/*
Loaded plugins are stored in a linked list.
The list is append-only, the elements are added to the head (like in a stack).
The elements are added under a mutex, but the list can be read and traversed
without any mutex because once an element is added to the list, it stays
there. The main purpose of a mutex is to prevent two threads from
loading the same plugin twice in parallel.
*/
struct st_client_plugin_int *plugin_list[MYSQL_CLIENT_MAX_PLUGINS + MARIADB_CLIENT_MAX_PLUGINS];
#ifdef THREAD
static pthread_mutex_t LOCK_load_client_plugin;
#endif
extern struct st_mysql_client_plugin mysql_native_password_client_plugin;
extern struct st_mysql_client_plugin mysql_old_password_client_plugin;
extern struct st_mysql_client_plugin pvio_socket_client_plugin;
extern struct st_mysql_client_plugin sha256_password_client_plugin;
extern struct st_mysql_client_plugin caching_sha2_password_client_plugin;
struct st_mysql_client_plugin *mysql_client_builtins[]=
{
(struct st_mysql_client_plugin *)&mysql_native_password_client_plugin,
(struct st_mysql_client_plugin *)&mysql_old_password_client_plugin,
(struct st_mysql_client_plugin *)&pvio_socket_client_plugin,
(struct st_mysql_client_plugin *)&sha256_password_client_plugin,
(struct st_mysql_client_plugin *)&caching_sha2_password_client_plugin,
0
};
static int is_not_initialized(MYSQL *mysql, const char *name)
{
if (initialized)
return 0;
my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD,
SQLSTATE_UNKNOWN, ER(CR_AUTH_PLUGIN_CANNOT_LOAD),
name, "not initialized");
return 1;
}
static int get_plugin_nr(uint type)
{
uint i= 0;
for(; valid_plugins[i][1]; i++)
if (valid_plugins[i][0] == type)
return i;
return -1;
}
static const char *check_plugin_version(struct st_mysql_client_plugin *plugin, unsigned int version)
{
if (plugin->interface_version < version ||
(plugin->interface_version >> 8) > (version >> 8))
return "Incompatible client plugin interface";
return 0;
}
/**
finds a plugin in the list
@param name plugin name to search for
@param type plugin type
@note this does NOT necessarily need a mutex, take care!
@retval a pointer to a found plugin or 0
*/
static struct st_mysql_client_plugin *find_plugin(const char *name, int type)
{
struct st_client_plugin_int *p;
int plugin_nr= get_plugin_nr(type);
DBUG_ASSERT(initialized);
if (plugin_nr == -1)
return 0;
if (!name)
return plugin_list[plugin_nr]->plugin;
for (p= plugin_list[plugin_nr]; p; p= p->next)
{
if (strcmp(p->plugin->name, name) == 0)
return p->plugin;
}
return NULL;
}
/**
verifies the plugin and adds it to the list
@param mysql MYSQL structure (for error reporting)
@param plugin plugin to install
@param dlhandle a handle to the shared object (returned by dlopen)
or 0 if the plugin was not dynamically loaded
@param argc number of arguments in the 'va_list args'
@param args arguments passed to the plugin initialization function
@retval a pointer to an installed plugin or 0
*/
static struct st_mysql_client_plugin *
add_plugin(MYSQL *mysql, struct st_mysql_client_plugin *plugin, void *dlhandle,
int argc, va_list args)
{
const char *errmsg;
struct st_client_plugin_int plugin_int, *p;
char errbuf[1024];
int plugin_nr;
DBUG_ASSERT(initialized);
plugin_int.plugin= plugin;
plugin_int.dlhandle= dlhandle;
if ((plugin_nr= get_plugin_nr(plugin->type)) == -1)
{
errmsg= "Unknown client plugin type";
goto err1;
}
if ((errmsg= check_plugin_version(plugin, valid_plugins[plugin_nr][1])))
goto err1;
/* Call the plugin initialization function, if any */
if (plugin->init && plugin->init(errbuf, sizeof(errbuf), argc, args))
{
errmsg= errbuf;
goto err1;
}
p= (struct st_client_plugin_int *)
ma_memdup_root(&mem_root, (char *)&plugin_int, sizeof(plugin_int));
if (!p)
{
errmsg= "Out of memory";
goto err2;
}
#ifdef THREAD
safe_mutex_assert_owner(&LOCK_load_client_plugin);
#endif
p->next= plugin_list[plugin_nr];
plugin_list[plugin_nr]= p;
return plugin;
err2:
if (plugin->deinit)
plugin->deinit();
err1:
my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, SQLSTATE_UNKNOWN,
ER(CR_AUTH_PLUGIN_CANNOT_LOAD), plugin->name, errmsg);
if (dlhandle)
(void)dlclose(dlhandle);
return NULL;
}
/**
Loads plugins which are specified in the environment variable
LIBMYSQL_PLUGINS.
Multiple plugins must be separated by semicolon. This function doesn't
return or log an error.
The function is be called by mysql_client_plugin_init
@todo
Support extended syntax, passing parameters to plugins, for example
LIBMYSQL_PLUGINS="plugin1(param1,param2);plugin2;..."
or
LIBMYSQL_PLUGINS="plugin1=int:param1,str:param2;plugin2;..."
*/
static void load_env_plugins(MYSQL *mysql)
{
char *plugs, *free_env, *s= getenv("LIBMYSQL_PLUGINS");
if (ma_check_env_str(s))
return;
free_env= strdup(s);
plugs= s= free_env;
do {
if ((s= strchr(plugs, ';')))
*s= '\0';
mysql_load_plugin(mysql, plugs, -1, 0);
plugs= s + 1;
} while (s);
free(free_env);
}
/********** extern functions to be used by libmariadb *********************/
/**
Initializes the client plugin layer.
This function must be called before any other client plugin function.
@retval 0 successful
@retval != 0 error occurred
*/
int mysql_client_plugin_init()
{
MYSQL mysql;
struct st_mysql_client_plugin **builtin;
va_list unused;
LINT_INIT_STRUCT(unused);
if (initialized)
return 0;
memset(&mysql, 0, sizeof(mysql)); /* dummy mysql for set_mysql_extended_error */
pthread_mutex_init(&LOCK_load_client_plugin, MY_MUTEX_INIT_SLOW);
ma_init_alloc_root(&mem_root, 128, 128);
memset(&plugin_list, 0, sizeof(plugin_list));
initialized= 1;
pthread_mutex_lock(&LOCK_load_client_plugin);
for (builtin= mysql_client_builtins; *builtin; builtin++)
add_plugin(&mysql, *builtin, 0, 0, unused);
pthread_mutex_unlock(&LOCK_load_client_plugin);
load_env_plugins(&mysql);
return 0;
}
/**
Deinitializes the client plugin layer.
Unloades all client plugins and frees any associated resources.
*/
void mysql_client_plugin_deinit()
{
int i;
struct st_client_plugin_int *p;
if (!initialized)
return;
for (i=0; i < MYSQL_CLIENT_MAX_PLUGINS; i++)
for (p= plugin_list[i]; p; p= p->next)
{
if (p->plugin->deinit)
p->plugin->deinit();
if (p->dlhandle)
(void)dlclose(p->dlhandle);
}
memset(&plugin_list, 0, sizeof(plugin_list));
initialized= 0;
ma_free_root(&mem_root, MYF(0));
pthread_mutex_destroy(&LOCK_load_client_plugin);
}
/************* public facing functions, for client consumption *********/
/* see <mysql/client_plugin.h> for a full description */
struct st_mysql_client_plugin * STDCALL
mysql_client_register_plugin(MYSQL *mysql,
struct st_mysql_client_plugin *plugin)
{
va_list unused;
LINT_INIT_STRUCT(unused);
if (is_not_initialized(mysql, plugin->name))
return NULL;
pthread_mutex_lock(&LOCK_load_client_plugin);
/* make sure the plugin wasn't loaded meanwhile */
if (find_plugin(plugin->name, plugin->type))
{
my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD,
SQLSTATE_UNKNOWN, ER(CR_AUTH_PLUGIN_CANNOT_LOAD),
plugin->name, "it is already loaded");
plugin= NULL;
}
else
plugin= add_plugin(mysql, plugin, 0, 0, unused);
pthread_mutex_unlock(&LOCK_load_client_plugin);
return plugin;
}
/* see <mysql/client_plugin.h> for a full description */
struct st_mysql_client_plugin * STDCALL
mysql_load_plugin_v(MYSQL *mysql, const char *name, int type,
int argc, va_list args)
{
const char *errmsg;
#ifdef _WIN32
char errbuf[1024];
#endif
char dlpath[FN_REFLEN+1];
void *sym, *dlhandle = NULL;
struct st_mysql_client_plugin *plugin;
char *env_plugin_dir= getenv("MARIADB_PLUGIN_DIR");
CLEAR_CLIENT_ERROR(mysql);
if (is_not_initialized(mysql, name))
return NULL;
pthread_mutex_lock(&LOCK_load_client_plugin);
/* make sure the plugin wasn't loaded meanwhile */
if (type >= 0 && find_plugin(name, type))
{
errmsg= "it is already loaded";
goto err;
}
/* Compile dll path */
snprintf(dlpath, sizeof(dlpath) - 1, "%s/%s%s",
mysql->options.extension && mysql->options.extension->plugin_dir ?
mysql->options.extension->plugin_dir : (env_plugin_dir) ? env_plugin_dir :
MARIADB_PLUGINDIR, name, SO_EXT);
/* Open new dll handle */
if (!(dlhandle= dlopen((const char *)dlpath, RTLD_NOW)))
{
#ifdef _WIN32
char winmsg[255];
size_t len;
winmsg[0] = 0;
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM,
NULL,
GetLastError(),
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
winmsg, 255, NULL);
len= strlen(winmsg);
while (len > 0 && (winmsg[len - 1] == '\n' || winmsg[len - 1] == '\r'))
len--;
if (len)
winmsg[len] = 0;
snprintf(errbuf, sizeof(errbuf), "%s Library path is '%s'", winmsg, dlpath);
errmsg= errbuf;
#else
errmsg= dlerror();
#endif
goto err;
}
if (!(sym= dlsym(dlhandle, plugin_declarations_sym)))
{
errmsg= "not a plugin";
(void)dlclose(dlhandle);
goto err;
}
plugin= (struct st_mysql_client_plugin*)sym;
if (type >=0 && type != plugin->type)
{
errmsg= "type mismatch";
goto err;
}
if (strcmp(name, plugin->name))
{
errmsg= "name mismatch";
goto err;
}
if (type < 0 && find_plugin(name, plugin->type))
{
errmsg= "it is already loaded";
goto err;
}
plugin= add_plugin(mysql, plugin, dlhandle, argc, args);
pthread_mutex_unlock(&LOCK_load_client_plugin);
return plugin;
err:
if (dlhandle)
dlclose(dlhandle);
pthread_mutex_unlock(&LOCK_load_client_plugin);
my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, SQLSTATE_UNKNOWN,
ER(CR_AUTH_PLUGIN_CANNOT_LOAD), name, errmsg);
return NULL;
}
/* see <mysql/client_plugin.h> for a full description */
struct st_mysql_client_plugin * STDCALL
mysql_load_plugin(MYSQL *mysql, const char *name, int type, int argc, ...)
{
struct st_mysql_client_plugin *p;
va_list args;
va_start(args, argc);
p= mysql_load_plugin_v(mysql, name, type, argc, args);
va_end(args);
return p;
}
/* see <mysql/client_plugin.h> for a full description */
struct st_mysql_client_plugin * STDCALL
mysql_client_find_plugin(MYSQL *mysql, const char *name, int type)
{
struct st_mysql_client_plugin *p;
int plugin_nr= get_plugin_nr(type);
if (is_not_initialized(mysql, name))
return NULL;
if (plugin_nr == -1)
{
my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, SQLSTATE_UNKNOWN,
ER(CR_AUTH_PLUGIN_CANNOT_LOAD), name, "invalid type");
}
if ((p= find_plugin(name, type)))
return p;
/* not found, load it */
return mysql_load_plugin(mysql, name, type, 0);
}

View File

@ -114,6 +114,7 @@
M(SelectedRanges, "Number of (non-adjacent) ranges in all data parts selected to read from a MergeTree table.") \
M(SelectedMarks, "Number of marks (index granules) selected to read from a MergeTree table.") \
\
M(Merge, "Number of launched background merges.") \
M(MergedRows, "Rows read for background merges. This is the number of rows before merge.") \
M(MergedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge.") \
M(MergesTimeMilliseconds, "Total time spent for background merges.")\

View File

@ -39,6 +39,7 @@ namespace ProfileEvents
extern const Event MergedRows;
extern const Event MergedUncompressedBytes;
extern const Event MergesTimeMilliseconds;
extern const Event Merge;
}
namespace CurrentMetrics
@ -508,7 +509,10 @@ public:
{
ProfileEvents::increment(ProfileEvents::MergedUncompressedBytes, value.read_bytes);
if (stage.is_first)
{
ProfileEvents::increment(ProfileEvents::MergedRows, value.read_rows);
ProfileEvents::increment(ProfileEvents::Merge);
}
updateWatch();
merge_entry->bytes_read_uncompressed += value.read_bytes;

View File

@ -0,0 +1,14 @@
DROP TABLE IF EXISTS new_table_test;
DROP TABLE IF EXISTS check_table_test;
CREATE TABLE new_table_test(name String) ENGINE = MergeTree ORDER BY name;
INSERT INTO new_table_test VALUES ('test');
CREATE TABLE check_table_test(value1 UInt64, value2 UInt64) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO check_table_test (value1) SELECT value FROM system.events WHERE event = 'Merge';
OPTIMIZE TABLE new_table_test FINAL;
INSERT INTO check_table_test (value2) SELECT value FROM system.events WHERE event = 'Merge';
SELECT count() FROM check_table_test WHERE value2 > value1;
DROP TABLE new_table_test;
DROP TABLE check_table_test;

View File

@ -377,52 +377,90 @@ By default: 1,000,000. It only works when reading from MergeTree engines.
ClickHouse uses multiple threads when reading from [MergeTree*](../table_engines/mergetree.md) tables. This setting turns on/off the uniform distribution of reading tasks over the working threads. The algorithm of the uniform distribution aims to make execution time for all the threads approximately equal in a `SELECT` query.
**Possible values**
Possible values:
- 0 — Do not use uniform read distribution.
- 1 — Use uniform read distribution.
**Default value**: 1.
Default value: 1.
## merge_tree_min_rows_for_concurrent_read {#setting-merge_tree_min_rows_for_concurrent_read}
If the number of rows to be read from a file of a [MergeTree*](../table_engines/mergetree.md) table exceeds `merge_tree_min_rows_for_concurrent_read` then ClickHouse tries to perform a concurrent reading from this file on several threads.
**Possible values**
Possible values:
Any positive integer.
- Any positive integer.
Default value: 163840.
## merge_tree_min_bytes_for_concurrent_read {#setting-merge_tree_min_bytes_for_concurrent_read}
If a number of bytes to read from one file of a [MergeTree*](../table_engines/mergetree.md)-engine table exceeds `merge_tree_min_bytes_for_concurrent_read` then ClickHouse tries to perform a concurrent reading from this file on several threads.
Possible values:
- Any positive integer.
Default value: 240 ✕ 1024 ✕ 1024.
**Default value**: 163840.
## merge_tree_min_rows_for_seek {#setting-merge_tree_min_rows_for_seek}
If the distance between two data blocks to be read in one file is less than `merge_tree_min_rows_for_seek` rows, then ClickHouse does not seek through the file, but reads the data sequentially.
**Possible values**
Possible values:
Any positive integer.
- Any positive integer.
Default value: 0.
## merge_tree_min_bytes_for_seek {#setting-merge_tree_min_bytes_for_seek}
If the distance between two data blocks to be read in one file is less than `merge_tree_min_bytes_for_seek` rows, then ClickHouse does not seek through the file, but reads the data sequentially.
Possible values:
- Any positive integer.
Default value: 0.
**Default value**: 0.
## merge_tree_coarse_index_granularity {#setting-merge_tree_coarse_index_granularity}
When searching data, ClickHouse checks the data marks in the index file. If ClickHouse finds that required keys are in some range, it divides this range into `merge_tree_coarse_index_granularity` subranges and searches the required keys there recursively.
**Possible values**
Possible values:
Any positive even integer.
- Any positive even integer.
**Default value**: 8.
Default value: 8.
## merge_tree_max_rows_to_use_cache {#setting-merge_tree_max_rows_to_use_cache}
If ClickHouse should read more than `merge_tree_max_rows_to_use_cache` rows in one query, it does not use the cash of uncompressed blocks. The [uncompressed_cache_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks.
If ClickHouse should read more than `merge_tree_max_rows_to_use_cache` rows in one query, it does not use the cache of uncompressed blocks. The [uncompressed_cache_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks.
**Possible values**
The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries reading a large amount of data.
Any positive integer.
Possible values:
- Any positive integer.
Default value: 128 ✕ 8192.
## merge_tree_max_bytes_to_use_cache {#setting-merge_tree_max_bytes_to_use_cache}
If ClickHouse should read more than `merge_tree_max_bytes_to_use_cache` bytes in one query, it does not use the cache of uncompressed blocks. The [uncompressed_cache_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks.
The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries reading a large amount of data.
Possible values:
- Any positive integer.
Default value: 1920 ✕ 1024 ✕ 1024.
**Default value**: 1048576.
## min_bytes_to_use_direct_io {#settings-min_bytes_to_use_direct_io}

View File

@ -251,6 +251,8 @@ Columns:
- `value` ([Int64](../data_types/int_uint.md)) — Metric value.
- `description` ([String](../data_types/string.md)) — Metric description.
The list of supported metrics you can find in the [dbms/src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Common/CurrentMetrics.cpp) source file of ClickHouse.
**Example**
```sql

View File

@ -58,7 +58,7 @@ Multiple path components can have globs. For being processed file should exists
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
- `{N..M}` — Substitutes any number in range from N to M including both borders.
Constructions with `{}` are similar to the [remote table function](../../query_language/table_functions/remote.md)).
Constructions with `{}` are similar to the [remote](../../query_language/table_functions/remote.md) table function.
**Example**

View File

@ -78,11 +78,14 @@ For a description of parameters, see the [CREATE query description](../../query_
For more details, see [TTL for columns and tables](#table_engine-mergetree-ttl)
- `SETTINGS` — Additional parameters that control the behavior of the `MergeTree`:
- `index_granularity` — The granularity of an index. The number of data rows between the "marks" of an index. By default, 8192. For the list of available parameters, see [MergeTreeSettings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Storages/MergeTree/MergeTreeSettings.h).
- `index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [Data Storage](#mergetree-data-storage).
- `index_granularity_bytes` — Maximum size of data granule in bytes. Default value: 10Mb. To restrict the size of granule only by number of rows set 0 (not recommended). See [Data Storage](#mergetree-data-storage).
- `enable_mixed_granularity_parts` — Enables or disables transition to controlling the granule size with the `index_granularity_bytes` setting. Before the version 19.11 there was the only `index_granularity` setting for the granule size restriction. The `index_granularity_bytes` setting improves ClickHouse performance when selecting data from the tables with big rows (tens and hundreds of megabytes). So if you have tables with big rows, you can turn the setting on for the tables to get better efficiency of your `SELECT` queries.
- `use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, then ZooKeeper stores less data. For more information, see the [setting description](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) in "Server configuration parameters".
- `min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse reads and writes the data to the storage disk using the direct I/O interface (`O_DIRECT` option). If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled. Default value: `10 * 1024 * 1024 * 1024` bytes.
<a name="mergetree_setting-merge_with_ttl_timeout"></a>
- `merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with TTL. Default value: 86400 (1 day).
- `write_final_mark` — Enables or disables writing the final index mark at the end of data part. Default value: 1. Don't turn it off.
**Example of Sections Setting**
@ -126,7 +129,7 @@ MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)
The `MergeTree` engine is configured in the same way as in the example above for the main engine configuration method.
</details>
## Data Storage
## Data Storage {#mergetree-data-storage}
A table consists of data parts sorted by primary key.
@ -134,9 +137,9 @@ When data is inserted in a table, separate data parts are created and each of th
Data belonging to different partitions are separated into different parts. In the background, ClickHouse merges data parts for more efficient storage. Parts belonging to different partitions are not merged. The merge mechanism does not guarantee that all rows with the same primary key will be in the same data part.
For each data part, ClickHouse creates an index file that contains the primary key value for each index row ("mark"). Index row numbers are defined as `n * index_granularity`. The maximum value `n` is equal to the integer part of dividing the total number of rows by the `index_granularity`. For each column, the "marks" are also written for the same index rows as the primary key. These "marks" allow you to find the data directly in the columns.
Each data part is logically divided by granules. A granule is the smallest indivisible data set that ClickHouse reads when selecting data. ClickHouse doesn't split rows or values, so each granule always contains an integer number of rows. The first row of a granule is marked with the value of the primary key for this row. For each data part, ClickHouse creates an index file that stores the marks. For each column, whether it is in the primary key or not, ClickHouse also stores the same marks. These marks allow finding the data directly in the columns.
You can use a single large table and continually add data to it in small chunks this is what the `MergeTree` engine is intended for.
The size of a granule is restricted by the `index_granularity` and `index_granularity_bytes` settings of the table engine. The number of rows in granule lays in the `[1, index_granularity]` range, depending on the size of rows. The size of a granule can exceed `index_granularity_bytes` if the size of the single row is greater than the value of the setting. In this case, the size of the granule equals the size of the row.
## Primary Keys and Indexes in Queries {#primary-keys-and-indexes-in-queries}
@ -159,9 +162,9 @@ If the data query specifies:
The examples above show that it is always more effective to use an index than a full scan.
A sparse index allows extra data to be read. When reading a single range of the primary key, up to `index_granularity * 2` extra rows in each data block can be read. In most cases, ClickHouse performance does not degrade when `index_granularity = 8192`.
A sparse index allows extra data to be read. When reading a single range of the primary key, up to `index_granularity * 2` extra rows in each data block can be read.
Sparse indexes allow you to work with a very large number of table rows, because such indexes are always stored in the computer's RAM.
Sparse indexes allow you to work with a very large number of table rows, because such indexes fit the computer's RAM in the very most cases.
ClickHouse does not require a unique primary key. You can insert multiple rows with the same primary key.

View File

@ -107,8 +107,6 @@ It is not possible to set default values for elements in nested data structures.
### Constraints {#constraints}
WARNING: This feature is experimental. Correct work is not guaranteed on non-MergeTree family engines.
Along with columns descriptions constraints could be defined:
```sql
@ -125,15 +123,15 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
Adding large amount of constraints can negatively affect performance of big `INSERT` queries.
### TTL expression
### TTL Expression
Defines storage time for values. Can be specified only for MergeTree-family tables. For the detailed description, see [TTL for columns and tables](../operations/table_engines/mergetree.md#table_engine-mergetree-ttl).
## Column Compression Codecs
### Column Compression Codecs
By default, ClickHouse applies to columns the compression method, defined in [server settings](../operations/server_settings/settings.md#compression). Also, you can define compression method for each individual column in the `CREATE TABLE` query.
By default, ClickHouse applies the compression method, defined in [server settings](../operations/server_settings/settings.md#compression), to columns. You can also define the compression method for each individual column in the `CREATE TABLE` query.
```
```sql
CREATE TABLE codec_example
(
dt Date CODEC(ZSTD),
@ -146,12 +144,12 @@ ENGINE = <Engine>
...
```
If a codec is specified, the default codec doesn't apply. Codecs can be combined in a pipeline, for example, `CODEC(Delta, ZSTD)`. To select the best codecs combination for you project, pass benchmarks, similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article.
If a codec is specified, the default codec doesn't apply. Codecs can be combined in a pipeline, for example, `CODEC(Delta, ZSTD)`. To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article.
!!!warning
You cannot decompress ClickHouse database files with external utilities, for example, `lz4`. Use the special utility, [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/programs/compressor).
!!!warning "Warning"
You can't decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/yandex/ClickHouse/tree/master/dbms/programs/compressor) utility.
Compression is supported for the table engines:
Compression is supported for the following table engines:
- [MergeTree](../operations/table_engines/mergetree.md) family
- [Log](../operations/table_engines/log_family.md) family
@ -160,9 +158,9 @@ Compression is supported for the table engines:
ClickHouse supports common purpose codecs and specialized codecs.
### Specialized codecs {#create-query-specialized-codecs}
#### Specialized Codecs {#create-query-specialized-codecs}
These codecs are designed to make compression more effective using specifities of the data. Some of this codecs don't compress data by itself, but they prepare data to be compressed better by common purpose codecs.
These codecs are designed to make compression more effective by using specific features of data. Some of these codecs don't compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation.
Specialized codecs:
@ -182,7 +180,7 @@ CREATE TABLE codec_example
ENGINE = MergeTree()
```
### Common purpose codecs {#create-query-common-purpose-codecs}
#### Common purpose codecs {#create-query-common-purpose-codecs}
Codecs:
@ -191,7 +189,7 @@ Codecs:
- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` applies the default level. Possible levels: [1, 12]. Recommended level range: [4, 9].
- `ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: [1, 22]. Default value: 1.
High compression levels useful for asymmetric scenarios, like compress once, decompress a lot of times. Greater levels stands for better compression and higher CPU usage.
High compression levels are useful for asymmetric scenarios, like compress once, decompress repeatedly. Higher levels mean better compression and higher CPU usage.
## Temporary Tables

View File

@ -0,0 +1 @@
../../en/development/build_cross.md

View File

@ -55,7 +55,7 @@ SELECT * FROM hdfs_engine_table LIMIT 2
- `{some_string,another_string,yet_another_one}` — Заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`.
- `{N..M}` — Заменяет любое число в интервале от `N` до `M` включительно.
Конструкция с `{}` аналогична табличной функции [remote](remote.md).
Конструкция с `{}` аналогична табличной функции [remote](../../query_language/table_functions/remote.md).
**Пример**

View File

@ -45,7 +45,7 @@ FROM (
└─────────────────────────────────────────────────────────────────────────┘
```
С помощью функции [bar](../other_functions.md#function-bar) можно визуализировать гистограмму, например:
С помощью функции [bar](../functions/other_functions.md#function-bar) можно визуализировать гистограмму, например:
```sql
WITH histogram(5)(rand() % 100) AS hist

View File

@ -105,9 +105,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ...
### Ограничения (constraints) {#constraints}
WARNING: Находится в экспериментальном режиме, поддержано в MergeTree (работоспособность на других типах движков таблиц не гарантируется).
Наряду с объявлением столбцов можно объявить ограчения на значения в столбцах таблицы:
Наряду с объявлением столбцов можно объявить ограничения на значения в столбцах таблицы:
```sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
@ -127,56 +125,70 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
Определяет время хранения значений. Может быть указано только для таблиц семейства MergeTree. Подробнее смотрите в [TTL для столбцов и таблиц](../operations/table_engines/mergetree.md#table_engine-mergetree-ttl).
## Кодеки сжатия столбцов
### Кодеки сжатия столбцов
Помимо сжатия данных по умолчанию, определяемого [конфигурационными параметрами сервера](../operations/server_settings/settings.md#compression), можно задать сжатие для каждого отдельного столбца.
Поддерживаемые алгоритмы сжатия:
- `NONE` — без сжатия.
- `LZ4` — [алгоритм сжатия данных](https://github.com/lz4/lz4) без потерь, используемый по умолчанию. Применяет быстрое сжатие LZ4.
- `LZ4HC[(level)]` — алгоритм сильного сжатия LZ4 HC с настраиваемым уровнем. Уровень по умолчанию — 9. Настройка `level <= 0` устанавливает уровень по умолчанию. Возможные уровни: [1, 12]. Рекомендуемый диапазон уровней: [4, 9].
- `ZSTD[(level)]` — [Алгоритм сжатия ZSTD](https://en.wikipedia.org/wiki/Zstandard) с настаиваемым уровнем `level`. Возможные уровни: [1, 22]. Значение по умолчанию — 1.
- `Delta(delta_bytes)` — способ сжатия, при котором исходные значения заменяются разностью двух соседних значений. Для хранение разностей используется до `delta_bytes` байтов, т.е. `delta_bytes` — это максимальный размер исходных значений. Возможные значения `delta_bytes` — 1, 2, 4, 8. Значение `delta_bytes` по умолчанию равно `sizeof(type)`, если вычисленный размер равен 1, 2, 4 или 8. Во всех остальных случаях — 1.
- `DoubleDelta` — Сжимает значения вплоть до размера в 1 бит благодаря сохранению разностей. Оптимальные уровни сжатия достигаются для монотонных последовательностей с постоянным шагом, например, временные ряды. Может использоваться с любым типом данных фиксированного размера. Реализует алгоритм, используемый в Gorilla TSDB, расширяя его для поддержки 64-битных типов. Использует 1 дополнительный бит для 32-байтовых значений: 5-битные префиксы вместо 4-битных префиксов. Подробнее смотрите в разделе "Compressing Time Stamps" в [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf).
- `Gorilla` — Сжимает значения вплоть до размера в 1 bit. Эффективен при хранении рядов медленно изменяющихся чисел с плавающей запятой, потому, что лучшее сжатие достигается, когда соседние значения бинарно равны. Реализует алгоритм, используемый в Gorilla TSDB, расширяя его для поддержки 64-битных типов. Подробнее смотрите в разделе "Compressing Values" в [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf).
Высокие уровни сжатия полезны для асимметричных сценариев, например, для таких, в которых требуется однократное сжатие и многократная распаковка. Более высокие уровни обеспечивают лучшее сжатие, но более высокое потребление вычислительных ресурсов.
!!! warning "Предупреждение"
Базу данных ClickHouse не получится распаковать с помощью внешних утилит типа `lz4`. Используйте специальную программу [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/programs/compressor).
Пример использования:
По умолчанию, ClickHouse применяет к столбцу метод сжатия, определённый в [конфигурации сервера](../operations/server_settings/settings.md#compression). Кроме этого, можно задать метод сжатия для каждого отдельного столбца в запросе `CREATE TABLE`.
```sql
CREATE TABLE codec_example
(
dt Date CODEC(ZSTD), /* используется уровень сжатия по умолчанию */
dt Date CODEC(ZSTD),
ts DateTime CODEC(LZ4HC),
float_value Float32 CODEC(NONE),
double_value Float64 CODEC(LZ4HC(9))
)
ENGINE = MergeTree
PARTITION BY tuple()
ORDER BY dt
```
Кодеки можно комбинировать. Если для колонки указана своя последовательность кодеков, то общий табличный кодек не применяется (должен быть указан в последовательности принудительно, если нужен). В примере ниже - оптимизация для хранения timeseries метрик.
Как правило, значения одной и той же метрики `path` не сильно различаются между собой, и выгоднее использовать дельта-компрессию вместо записи всего числа:
```sql
CREATE TABLE timeseries_example
(
dt Date,
ts DateTime,
path String,
value Float32 CODEC(Delta, ZSTD)
)
ENGINE = MergeTree
PARTITION BY dt
ORDER BY (path, ts)
ENGINE = <Engine>
...
```
Если задать кодек для столбца, то кодек по умолчанию не применяется. Кодеки можно последовательно комбинировать, например, `CODEC(Delta, ZSTD)`. Чтобы выбрать наиболее подходящую для вашего проекта комбинацию кодеков, необходимо провести сравнительные тесты, подобные тем, что описаны в статье Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse).
!!!warning "Предупреждение"
Нельзя распаковать базу данных ClickHouse с помощью сторонних утилит наподобие `lz4`. Необходимо использовать специальную утилиту [clickhouse-compressor](https://github.com/yandex/ClickHouse/tree/master/dbms/programs/compressor).
Сжатие поддерживается для следующих движков таблиц:
- [MergeTree family](../operations/table_engines/mergetree.md)
- [Log family](../operations/table_engines/log_family.md)
- [Set](../operations/table_engines/set.md)
- [Join](../operations/table_engines/join.md)
ClickHouse поддерживает кодеки общего назначения и специализированные кодеки.
#### Специализированные кодеки {#create-query-specialized-codecs}
Эти кодеки разработаны для того, чтобы, используя особенности данных сделать сжатие более эффективным. Некоторые из этих кодеков не сжимают данные самостоятельно. Они готовят данные для кодеков общего назначения, которые сжимают подготовленные данные эффективнее, чем неподготовленные.
Специализированные кодеки:
- `Delta(delta_bytes)` — Метод, в котором исходные значения заменяются разностью двух соседних значений, за исключением первого значения, которое остаётся неизменным. Для хранения разниц используется до `delta_bytes`, т.е. `delta_bytes` — это максимальный размер исходных данных. Возможные значения `delta_bytes`: 1, 2, 4, 8. Значение по умолчанию для `delta_bytes` равно `sizeof(type)`, если результат 1, 2, 4, or 8. Во всех других случаях — 1.
- `DoubleDelta` — Вычисляется разницу от разниц и сохраняет её в компакном бинарном виде. Оптимальная степень сжатия достигается для монотонных последовательностей с постоянным шагом, наподобие временных рядов. Можно использовать с любым типом данных фиксированного размера. Реализует алгоритм, используемый в TSDB Gorilla, поддерживает 64-битные типы данных. Использует 1 дополнительный бит для 32-байтовых значений: 5-битные префиксы вместо 4-битных префиксов. Подробнее читайте в разделе "Compressing Time Stamps" документа [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf).
- `Gorilla` — Вычисляет XOR между текущим и предыдущим значением и записывает результат в компактной бинарной форме. Еффективно сохраняет ряды медленно изменяющихся чисел с плавающей запятой, поскольку наилучший коэффициен сжатия достигается, если соседние значения одинаковые. Реализует алгоритм, используемый в TSDB Gorilla, адаптируя его для работы с 64-битными значениями. Подробнее читайте в разделе "Compressing Values" документа [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf).
- `T64` — Метод сжатия который обрезает неиспользуемые старшие биты целочисленных значений (включая `Enum`, `Date` и `DateTime`). На каждом шаге алгоритма, кодек помещает блок из 64 значений в матрицу 64✕64, транспонирует её, обрезает неиспользуемые биты, а то, что осталось возвращает в виде последовательности. Неиспользуемые биты, это биты, которые не изменяются от минимального к максимальному на всём диапазоне значений куска данных.
Кодеки `DoubleDelta` и `Gorilla` используются в TSDB Gorilla как компоненты алгоритма сжатия. Подход Gorilla эффективен в сценариях, когда данные представляют собой медленно изменяющиеся во времени величины. Метки времени эффективно сжимаются кодеком `DoubleDelta`, а значения кодеком `Gorilla`. Например, чтобы создать эффективно хранящуюся таблицу, используйте следующую конфигурацию:
```sql
CREATE TABLE codec_example
(
timestamp DateTime CODEC(DoubleDelta),
slow_values Float32 CODEC(Gorilla)
)
ENGINE = MergeTree()
```
#### Кодеки общего назначения {#create-query-common-purpose-codecs}
Кодеки:
- `NONE` — без сжатия.
- `LZ4` — [алгоритм сжатия без потерь](https://github.com/lz4/lz4) используемый по умолчанию. Применяет быстрое сжатие LZ4.
- `LZ4HC[(level)]` — алгоритм LZ4 HC (high compression) с настраиваемым уровнем сжатия. Уровень по умолчанию — 9. Настройка `level <= 0` устанавливает уровень сжания по умолчанию. Возможные уровни сжатия: [1, 12]. Рекомендуемый диапазон уровней: [4, 9].
- `ZSTD[(level)]` — [алгоритм сжатия ZSTD](https://en.wikipedia.org/wiki/Zstandard) с настраиваемым уровнем сжатия `level`. Возможные уровни сжатия: [1, 22]. Уровень сжатия по умолчанию: 1.
Высокие уровни сжатия полезны для ассимметричных сценариев, подобных "один раз сжал, много раз распаковал". Высокие уровни сжатия подразумеваю лучшее сжатие, но большее использование CPU.
## Временные таблицы

View File

@ -117,7 +117,7 @@ SELECT visibleWidth(NULL)
Функция кидает исключение, если таблица не существует.
Для элементов вложенной структуры данных функция проверяет существование столбца. Для самой же вложенной структуры данных функция возвращает 0.
## bar
## bar {#function-bar}
Позволяет построить unicode-art диаграмму.

View File

@ -92,7 +92,7 @@ FROM
└───────────┴───────────┘
```
### Секция FROM
### Секция FROM {#select-from}
Если секция FROM отсутствует, то данные будут читаться из таблицы `system.one`.
Таблица `system.one` содержит ровно одну строку (то есть, эта таблица выполняет такую же роль, как таблица DUAL, которую можно найти в других СУБД).

View File

@ -210,6 +210,7 @@ nav:
- 'Overview of ClickHouse Architecture': 'development/architecture.md'
- 'How to Build ClickHouse on Linux': 'development/build.md'
- 'How to Build ClickHouse on Mac OS X': 'development/build_osx.md'
- 'How to Build ClickHouse on Linux for Mac OS X': 'development/build_cross.md'
- 'How to Write C++ code': 'development/style.md'
- 'How to Run ClickHouse Tests': 'development/tests.md'
- 'Third-Party Libraries Used': 'development/contrib.md'

View File

@ -211,6 +211,7 @@ nav:
- 'Обзор архитектуры ClickHouse': 'development/architecture.md'
- 'Как собрать ClickHouse на Linux': 'development/build.md'
- 'Как собрать ClickHouse на Mac OS X': 'development/build_osx.md'
- 'Как собрать ClickHouse на Linux для Mac OS X': 'development/build_cross.md'
- 'Как писать код на C++': 'development/style.md'
- 'Как запустить тесты': 'development/tests.md'
- 'Сторонние библиотеки': 'development/contrib.md'

View File

@ -209,6 +209,7 @@ nav:
- 'ClickHouse架构概述': 'development/architecture.md'
- '如何在Linux中编译ClickHouse': 'development/build.md'
- '如何在Mac OS X中编译ClickHouse': 'development/build_osx.md'
- 'How to Build ClickHouse on Linux for Mac OS X': 'development/build_cross.md'
- '如何编写C++代码': 'development/style.md'
- '如何运行ClickHouse测试': 'development/tests.md'
- '使用的第三方库': 'development/contrib.md'

View File

@ -0,0 +1 @@
../../en/development/build_cross.md

View File

@ -10,7 +10,7 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及
这让你可以创建一个用于快速检索数据的小稀疏索引。
- 允许使用分区,如果指定了 [键](custom_partitioning_key.md) 的话。
- 允许使用分区,如果指定了 [分区键](custom_partitioning_key.md) 的话。
在相同数据集和相同结果集的情况下 ClickHouse 中某些带分区的操作会比普通操作更快。查询中指定了分区键时 ClickHouse 会自动截取分区数据。这也有效增加了查询性能。

View File

@ -29,7 +29,8 @@ add_library (mysqlxx
target_include_directories (mysqlxx PUBLIC include)
if (USE_INTERNAL_MYSQL_LIBRARY)
target_include_directories (mysqlxx PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/include)
target_include_directories (mysqlxx PUBLIC ${ClickHouse_BINARY_DIR}/contrib/mariadb-connector-c/include)
else ()
set(PLATFORM_LIBRARIES ${CMAKE_DL_LIBS})

View File

@ -6,14 +6,14 @@ if(ENABLE_MYSQL)
option(USE_INTERNAL_MYSQL_LIBRARY "Set to FALSE to use system mysqlclient library instead of bundled" OFF)
endif()
if(USE_INTERNAL_MYSQL_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/README.md")
if(USE_INTERNAL_MYSQL_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/README")
message(WARNING "submodule contrib/mariadb-connector-c is missing. to fix try run: \n git submodule update --init --recursive")
set(USE_INTERNAL_MYSQL_LIBRARY 0)
endif()
if (USE_INTERNAL_MYSQL_LIBRARY)
set (MYSQLCLIENT_LIBRARIES mysqlclient)
set (MYSQLCLIENT_LIBRARIES mariadbclient)
set (USE_MYSQL 1)
set (MYSQLXX_LIBRARY mysqlxx)
else ()

View File

@ -1,5 +1,5 @@
#if __has_include(<mariadb/mysql.h>)
#include <mariadb/mysql.h>
#if __has_include(<mysql.h>)
#include <mysql.h>
#else
#include <mysql/mysql.h>
#endif

View File

@ -1,5 +1,5 @@
#if __has_include(<mariadb/mysql.h>)
#include <mariadb/mysql.h>
#if __has_include(<mysql.h>)
#include <mysql.h>
#else
#include <mysql/mysql.h>
#endif

View File

@ -1,6 +1,6 @@
#if __has_include(<mariadb/mysql.h>)
#include <mariadb/mysql.h>
#include <mariadb/mysqld_error.h>
#if __has_include(<mysql.h>)
#include <mysql.h>
#include <mysqld_error.h>
#else
#include <mysql/mysql.h>
#include <mysql/mysqld_error.h>

View File

@ -1,5 +1,5 @@
#if __has_include(<mariadb/mysql.h>)
#include <mariadb/mysql.h>
#if __has_include(<mysql.h>)
#include <mysql.h>
#else
#include <mysql/mysql.h>
#endif

View File

@ -1,5 +1,5 @@
#if __has_include(<mariadb/mysql.h>)
#include <mariadb/mysql.h>
#if __has_include(<mysql.h>)
#include <mysql.h>
#else
#include <mysql/mysql.h>
#endif

View File

@ -1,5 +1,5 @@
#if __has_include(<mariadb/mysql.h>)
#include <mariadb/mysql.h>
#if __has_include(<mysql.h>)
#include <mysql.h>
#else
#include <mysql/mysql.h>
#endif

View File

@ -1,5 +1,5 @@
#if __has_include(<mariadb/mysql.h>)
#include <mariadb/mysql.h>
#if __has_include(<mysql.h>)
#include <mysql.h>
#else
#include <mysql/mysql.h>
#endif

View File

@ -1,5 +1,5 @@
#if __has_include(<mariadb/mysql.h>)
#include <mariadb/mysql.h>
#if __has_include(<mysql.h>)
#include <mysql.h>
#else
#include <mysql/mysql.h>
#endif

View File

@ -231,6 +231,8 @@ function make_rpm {
echo "Requires: clickhouse-common-static = $VERSION_FULL-2" >> ${PACKAGE}-$VERSION_FULL-2.spec
echo "Requires: tzdata" >> ${PACKAGE}-$VERSION_FULL-2.spec
echo "Requires: initscripts" >> ${PACKAGE}-$VERSION_FULL-2.spec
echo "Obsoletes: clickhouse-server-common < $VERSION_FULL" >> ${PACKAGE}-$VERSION_FULL-2.spec
cat ${PACKAGE}-$VERSION_FULL-2.spec_tmp >> ${PACKAGE}-$VERSION_FULL-2.spec
rpm_pack

View File

@ -16,7 +16,7 @@ server {
rewrite ^/docs/$ https://clickhouse.yandex/docs/en/ permanent;
rewrite ^/reference_en.html$ https://clickhouse.yandex/docs/en/single/ permanent;
rewrite ^/reference_ru.html$ https://clickhouse.yandex/docs/ru/single/ permanent;
rewrite ^/presentations/(.*)$ https://yandex.github.io/clickhouse-presentations/$1 permanent;
rewrite ^/presentations/(.*)$ https://clickhouse.github.io/clickhouse-presentations/$1 permanent;
include /usr/share/nginx/html/docs/redirects.conf;