mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 00:52:02 +00:00
Merge branch 'ClickHouse:master' into Check
This commit is contained in:
commit
5348cd5bd4
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -250,3 +250,6 @@
|
||||
[submodule "contrib/magic_enum"]
|
||||
path = contrib/magic_enum
|
||||
url = https://github.com/Neargye/magic_enum
|
||||
[submodule "contrib/sysroot"]
|
||||
path = contrib/sysroot
|
||||
url = https://github.com/ClickHouse-Extras/sysroot.git
|
||||
|
@ -1,4 +1,4 @@
|
||||
### ClickHouse release v21.10, 2021-10-08
|
||||
### ClickHouse release v21.10, 2021-10-14
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
|
@ -336,6 +336,10 @@ if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
|
||||
endif ()
|
||||
|
||||
if (COMPILER_GCC)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fcoroutines")
|
||||
endif ()
|
||||
|
||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
||||
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
||||
|
||||
|
@ -1,14 +1,14 @@
|
||||
[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/ClickHouse/raw/master/website/images/logo-400x240.png)](https://clickhouse.com)
|
||||
|
||||
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real time.
|
||||
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
|
||||
|
||||
## Useful Links
|
||||
|
||||
* [Official website](https://clickhouse.com/) has quick high-level overview of ClickHouse on main page.
|
||||
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster.
|
||||
* [Official website](https://clickhouse.com/) has a quick high-level overview of ClickHouse on the main page.
|
||||
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster.
|
||||
* [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information.
|
||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
|
||||
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||
* [Blog](https://clickhouse.com/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||
* [Code Browser](https://clickhouse.com/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
|
||||
* [Contacts](https://clickhouse.com/company/#contact) can help to get your questions answered if there are any.
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <Common/StackTrace.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Core/ServerUUID.h>
|
||||
#include <Common/hex.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "Common/config_version.h"
|
||||
@ -64,41 +65,6 @@ void setExtras()
|
||||
sentry_set_extra("disk_free_space", sentry_value_new_string(formatReadableSizeWithBinarySuffix(fs::space(server_data_path).free).c_str()));
|
||||
}
|
||||
|
||||
void sentry_logger(sentry_level_e level, const char * message, va_list args, void *)
|
||||
{
|
||||
auto * logger = &Poco::Logger::get("SentryWriter");
|
||||
size_t size = 1024;
|
||||
char buffer[size];
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wformat-nonliteral"
|
||||
#endif
|
||||
if (vsnprintf(buffer, size, message, args) >= 0)
|
||||
{
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic pop
|
||||
#endif
|
||||
switch (level)
|
||||
{
|
||||
case SENTRY_LEVEL_DEBUG:
|
||||
logger->debug(buffer);
|
||||
break;
|
||||
case SENTRY_LEVEL_INFO:
|
||||
logger->information(buffer);
|
||||
break;
|
||||
case SENTRY_LEVEL_WARNING:
|
||||
logger->warning(buffer);
|
||||
break;
|
||||
case SENTRY_LEVEL_ERROR:
|
||||
logger->error(buffer);
|
||||
break;
|
||||
case SENTRY_LEVEL_FATAL:
|
||||
logger->fatal(buffer);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -107,13 +73,13 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
|
||||
bool enabled = false;
|
||||
bool debug = config.getBool("send_crash_reports.debug", false);
|
||||
auto * logger = &Poco::Logger::get("SentryWriter");
|
||||
|
||||
if (config.getBool("send_crash_reports.enabled", false))
|
||||
{
|
||||
if (debug || (strlen(VERSION_OFFICIAL) > 0)) //-V560
|
||||
{
|
||||
enabled = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (enabled)
|
||||
{
|
||||
server_data_path = config.getString("path", "");
|
||||
@ -126,7 +92,6 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
|
||||
|
||||
sentry_options_t * options = sentry_options_new(); /// will be freed by sentry_init or sentry_shutdown
|
||||
sentry_options_set_release(options, VERSION_STRING_SHORT);
|
||||
sentry_options_set_logger(options, &sentry_logger, nullptr);
|
||||
if (debug)
|
||||
{
|
||||
sentry_options_set_debug(options, 1);
|
||||
@ -199,34 +164,34 @@ void SentryWriter::onFault(int sig, const std::string & error_message, const Sta
|
||||
if (stack_size > 0)
|
||||
{
|
||||
ssize_t offset = stack_trace.getOffset();
|
||||
char instruction_addr[100];
|
||||
|
||||
char instruction_addr[19]
|
||||
{
|
||||
'0', 'x',
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
|
||||
'\0'
|
||||
};
|
||||
|
||||
StackTrace::Frames frames;
|
||||
StackTrace::symbolize(stack_trace.getFramePointers(), offset, stack_size, frames);
|
||||
|
||||
for (ssize_t i = stack_size - 1; i >= offset; --i)
|
||||
{
|
||||
const StackTrace::Frame & current_frame = frames[i];
|
||||
sentry_value_t sentry_frame = sentry_value_new_object();
|
||||
UInt64 frame_ptr = reinterpret_cast<UInt64>(current_frame.virtual_addr);
|
||||
|
||||
if (std::snprintf(instruction_addr, sizeof(instruction_addr), "0x%" PRIx64, frame_ptr) >= 0)
|
||||
{
|
||||
writeHexUIntLowercase(frame_ptr, instruction_addr + 2);
|
||||
sentry_value_set_by_key(sentry_frame, "instruction_addr", sentry_value_new_string(instruction_addr));
|
||||
}
|
||||
|
||||
if (current_frame.symbol.has_value())
|
||||
{
|
||||
sentry_value_set_by_key(sentry_frame, "function", sentry_value_new_string(current_frame.symbol.value().c_str()));
|
||||
}
|
||||
|
||||
if (current_frame.file.has_value())
|
||||
{
|
||||
sentry_value_set_by_key(sentry_frame, "filename", sentry_value_new_string(current_frame.file.value().c_str()));
|
||||
}
|
||||
|
||||
if (current_frame.line.has_value())
|
||||
{
|
||||
sentry_value_set_by_key(sentry_frame, "lineno", sentry_value_new_int32(current_frame.line.value()));
|
||||
}
|
||||
|
||||
sentry_value_append(sentry_frames, sentry_frame);
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include <cstddef>
|
||||
#include <stddef.h>
|
||||
|
||||
#include <emmintrin.h>
|
||||
|
||||
|
@ -13,7 +13,6 @@ endif ()
|
||||
if ((ARCH_ARM AND NOT ARCH_AARCH64) OR ARCH_I386)
|
||||
message (FATAL_ERROR "32bit platforms are not supported")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
|
||||
set (ARCH_PPC64LE 1)
|
||||
endif ()
|
||||
|
@ -1,14 +1,7 @@
|
||||
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.12")
|
||||
macro(add_glob cur_list)
|
||||
file(GLOB __tmp RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} CONFIGURE_DEPENDS ${ARGN})
|
||||
list(APPEND ${cur_list} ${__tmp})
|
||||
endmacro()
|
||||
else ()
|
||||
macro(add_glob cur_list)
|
||||
macro(add_glob cur_list)
|
||||
file(GLOB __tmp RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${ARGN})
|
||||
list(APPEND ${cur_list} ${__tmp})
|
||||
endmacro()
|
||||
endif ()
|
||||
endmacro()
|
||||
|
||||
macro(add_headers_and_sources prefix common_path)
|
||||
add_glob(${prefix}_headers ${CMAKE_CURRENT_SOURCE_DIR} ${common_path}/*.h)
|
||||
|
@ -64,6 +64,7 @@ if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
|
||||
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR
|
||||
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "ppc64le" ) OR
|
||||
( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR
|
||||
( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "aarch64" ) OR
|
||||
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) OR
|
||||
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "aarch64" )
|
||||
)
|
||||
|
@ -53,12 +53,7 @@ endif ()
|
||||
if (NOT OPENSSL_FOUND AND NOT MISSING_INTERNAL_SSL_LIBRARY)
|
||||
set (USE_INTERNAL_SSL_LIBRARY 1)
|
||||
set (OPENSSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/boringssl")
|
||||
|
||||
if (ARCH_AMD64)
|
||||
set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include")
|
||||
elseif (ARCH_AARCH64)
|
||||
set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include")
|
||||
endif ()
|
||||
set (OPENSSL_CRYPTO_LIBRARY crypto)
|
||||
set (OPENSSL_SSL_LIBRARY ssl)
|
||||
set (OPENSSL_FOUND 1)
|
||||
|
22
cmake/freebsd/toolchain-aarch64.cmake
Normal file
22
cmake/freebsd/toolchain-aarch64.cmake
Normal file
@ -0,0 +1,22 @@
|
||||
set (CMAKE_SYSTEM_NAME "FreeBSD")
|
||||
set (CMAKE_SYSTEM_PROCESSOR "aarch64")
|
||||
set (CMAKE_C_COMPILER_TARGET "aarch64-unknown-freebsd12")
|
||||
set (CMAKE_CXX_COMPILER_TARGET "aarch64-unknown-freebsd12")
|
||||
set (CMAKE_ASM_COMPILER_TARGET "aarch64-unknown-freebsd12")
|
||||
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/freebsd-aarch64")
|
||||
|
||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
||||
|
||||
set (CMAKE_AR "/usr/bin/ar" CACHE FILEPATH "" FORCE)
|
||||
set (CMAKE_RANLIB "/usr/bin/ranlib" CACHE FILEPATH "" FORCE)
|
||||
|
||||
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
@ -5,7 +5,7 @@ set (DEFAULT_LIBS "-nodefaultlibs")
|
||||
|
||||
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
||||
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
||||
if (COMPILER_CLANG AND NOT (CMAKE_CROSSCOMPILING AND ARCH_AARCH64))
|
||||
if (COMPILER_CLANG AND NOT CMAKE_CROSSCOMPILING)
|
||||
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
else ()
|
||||
set (BUILTINS_LIBRARY "-lgcc")
|
||||
|
@ -1,17 +1,24 @@
|
||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
|
||||
|
||||
set (CMAKE_SYSTEM_NAME "Linux")
|
||||
set (CMAKE_SYSTEM_PROCESSOR "aarch64")
|
||||
set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu")
|
||||
set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu")
|
||||
set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu")
|
||||
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/aarch64-linux-gnu/libc")
|
||||
|
||||
# We don't use compiler from toolchain because it's gcc-8, and we provide support only for gcc-9.
|
||||
set (CMAKE_AR "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ar" CACHE FILEPATH "" FORCE)
|
||||
set (CMAKE_RANLIB "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ranlib" CACHE FILEPATH "" FORCE)
|
||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-aarch64")
|
||||
|
||||
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
|
||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc")
|
||||
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-13" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-13" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9")
|
||||
|
||||
set (CMAKE_AR "${LLVM_AR_PATH}" CACHE FILEPATH "" FORCE)
|
||||
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}" CACHE FILEPATH "" FORCE)
|
||||
|
||||
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||
|
||||
|
32
cmake/linux/toolchain-ppc64le.cmake
Normal file
32
cmake/linux/toolchain-ppc64le.cmake
Normal file
@ -0,0 +1,32 @@
|
||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
|
||||
|
||||
set (CMAKE_SYSTEM_NAME "Linux")
|
||||
set (CMAKE_SYSTEM_PROCESSOR "ppc64le")
|
||||
set (CMAKE_C_COMPILER_TARGET "ppc64le-linux-gnu")
|
||||
set (CMAKE_CXX_COMPILER_TARGET "ppc64le-linux-gnu")
|
||||
set (CMAKE_ASM_COMPILER_TARGET "ppc64le-linux-gnu")
|
||||
|
||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-powerpc64le")
|
||||
|
||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc")
|
||||
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-13" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-13" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9")
|
||||
|
||||
set (CMAKE_AR "${LLVM_AR_PATH}" CACHE FILEPATH "" FORCE)
|
||||
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}" CACHE FILEPATH "" FORCE)
|
||||
|
||||
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
32
cmake/linux/toolchain-x86_64.cmake
Normal file
32
cmake/linux/toolchain-x86_64.cmake
Normal file
@ -0,0 +1,32 @@
|
||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
|
||||
|
||||
set (CMAKE_SYSTEM_NAME "Linux")
|
||||
set (CMAKE_SYSTEM_PROCESSOR "x86_64")
|
||||
set (CMAKE_C_COMPILER_TARGET "x86_64-linux-gnu")
|
||||
set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-gnu")
|
||||
set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-gnu")
|
||||
|
||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64")
|
||||
|
||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/x86_64-linux-gnu/libc")
|
||||
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-13" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-13" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9")
|
||||
|
||||
set (CMAKE_AR "${LLVM_AR_PATH}" CACHE FILEPATH "" FORCE)
|
||||
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}" CACHE FILEPATH "" FORCE)
|
||||
|
||||
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
@ -20,40 +20,30 @@ endif ()
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
if (OS_DARWIN)
|
||||
# FIXME: broken dependencies
|
||||
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "") # no protobuf -> no grpc
|
||||
|
||||
set (USE_SNAPPY OFF CACHE INTERNAL "")
|
||||
set (ENABLE_PARQUET OFF CACHE INTERNAL "") # no snappy and protobuf -> no parquet
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "") # no arrow (parquet) -> no orc
|
||||
|
||||
set (ENABLE_ICU OFF CACHE INTERNAL "")
|
||||
set (ENABLE_FASTOPS OFF CACHE INTERNAL "")
|
||||
elseif (OS_LINUX OR OS_ANDROID)
|
||||
if (ARCH_AARCH64)
|
||||
# FIXME: broken dependencies
|
||||
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
|
||||
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "")
|
||||
|
||||
set (ENABLE_MYSQL OFF CACHE INTERNAL "")
|
||||
set (USE_SENTRY OFF CACHE INTERNAL "")
|
||||
elseif (ARCH_PPC64LE)
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
set (USE_SENTRY OFF CACHE INTERNAL "")
|
||||
endif ()
|
||||
elseif (OS_FREEBSD)
|
||||
# FIXME: broken dependencies
|
||||
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
|
||||
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "")
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "") # no protobuf -> no parquet -> no orc
|
||||
|
||||
set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "")
|
||||
else ()
|
||||
message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!")
|
||||
endif ()
|
||||
|
||||
# Don't know why but CXX_STANDARD doesn't work for cross-compilation
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++20")
|
||||
|
||||
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILE_TARGET}")
|
||||
endif ()
|
||||
|
@ -1,2 +0,0 @@
|
||||
wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz
|
||||
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz --strip-components=1
|
@ -89,4 +89,3 @@ if (LINKER_NAME)
|
||||
|
||||
message(STATUS "Using custom linker by name: ${LINKER_NAME}")
|
||||
endif ()
|
||||
|
||||
|
5
contrib/CMakeLists.txt
vendored
5
contrib/CMakeLists.txt
vendored
@ -163,7 +163,7 @@ endif ()
|
||||
if(USE_INTERNAL_SNAPPY_LIBRARY)
|
||||
set(SNAPPY_BUILD_TESTS 0 CACHE INTERNAL "")
|
||||
|
||||
add_subdirectory(snappy)
|
||||
add_subdirectory(snappy-cmake)
|
||||
|
||||
set (SNAPPY_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/snappy")
|
||||
endif()
|
||||
@ -215,6 +215,7 @@ function(add_llvm)
|
||||
|
||||
# Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
|
||||
set (CMAKE_INSTALL_RPATH "ON")
|
||||
set (LLVM_COMPILER_CHECKED 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
|
||||
@ -277,7 +278,7 @@ if (USE_FASTOPS)
|
||||
endif()
|
||||
|
||||
if (USE_AMQPCPP OR USE_CASSANDRA)
|
||||
add_subdirectory (libuv)
|
||||
add_subdirectory (libuv-cmake)
|
||||
endif()
|
||||
if (USE_AMQPCPP)
|
||||
add_subdirectory (amqpcpp-cmake)
|
||||
|
@ -54,7 +54,7 @@ target_link_libraries (${THRIFT_LIBRARY} PRIVATE boost::headers_only)
|
||||
set(ORC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/orc/c++")
|
||||
set(ORC_INCLUDE_DIR "${ORC_SOURCE_DIR}/include")
|
||||
set(ORC_SOURCE_SRC_DIR "${ORC_SOURCE_DIR}/src")
|
||||
set(ORC_SOURCE_WRAP_DIR "${ORC_SOURCE_DIR}/wrap")
|
||||
# set(ORC_SOURCE_WRAP_DIR "${ORC_SOURCE_DIR}/wrap")
|
||||
|
||||
set(ORC_BUILD_SRC_DIR "${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/src")
|
||||
set(ORC_BUILD_INCLUDE_DIR "${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/include")
|
||||
@ -101,7 +101,14 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||
set(CXX11_FLAGS "-std=c++0x")
|
||||
endif ()
|
||||
|
||||
include("${ClickHouse_SOURCE_DIR}/contrib/orc/cmake_modules/CheckSourceCompiles.cmake")
|
||||
set (ORC_CXX_HAS_INITIALIZER_LIST 1)
|
||||
set (ORC_CXX_HAS_NOEXCEPT 1)
|
||||
set (ORC_CXX_HAS_NULLPTR 1)
|
||||
set (ORC_CXX_HAS_OVERRIDE 1)
|
||||
set (ORC_CXX_HAS_UNIQUE_PTR 1)
|
||||
set (ORC_CXX_HAS_CSTDINT 1)
|
||||
set (ORC_CXX_HAS_THREAD_LOCAL 1)
|
||||
|
||||
include(orc_check.cmake)
|
||||
configure_file("${ORC_INCLUDE_DIR}/orc/orc-config.hh.in" "${ORC_BUILD_INCLUDE_DIR}/orc/orc-config.hh")
|
||||
configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/Adaptor.hh")
|
||||
|
@ -1,130 +1,14 @@
|
||||
# Not changed part of contrib/orc/c++/src/CMakeLists.txt
|
||||
set (HAS_PREAD 1)
|
||||
set (HAS_STRPTIME 1)
|
||||
set (HAS_STOLL 1)
|
||||
set (INT64_IS_LL 1)
|
||||
set (HAS_DIAGNOSTIC_PUSH 1)
|
||||
set (HAS_STD_ISNAN 1)
|
||||
set (HAS_STD_MUTEX 1)
|
||||
set (NEEDS_REDUNDANT_MOVE 1)
|
||||
set (HAS_PRE_1970 1)
|
||||
set (HAS_POST_2038 1)
|
||||
set (NEEDS_Z_PREFIX 0)
|
||||
|
||||
INCLUDE(CheckCXXSourceCompiles)
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include<fcntl.h>
|
||||
#include<unistd.h>
|
||||
int main(int,char*[]){
|
||||
int f = open(\"/x/y\", O_RDONLY);
|
||||
char buf[100];
|
||||
return pread(f, buf, 100, 1000) == 0;
|
||||
}"
|
||||
HAS_PREAD
|
||||
)
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include<time.h>
|
||||
int main(int,char*[]){
|
||||
struct tm time2020;
|
||||
return !strptime(\"2020-02-02 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2020);
|
||||
}"
|
||||
HAS_STRPTIME
|
||||
)
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include<string>
|
||||
int main(int,char* argv[]){
|
||||
return static_cast<int>(std::stoll(argv[0]));
|
||||
}"
|
||||
HAS_STOLL
|
||||
)
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include<stdint.h>
|
||||
#include<stdio.h>
|
||||
int main(int,char*[]){
|
||||
int64_t x = 1; printf(\"%lld\",x);
|
||||
}"
|
||||
INT64_IS_LL
|
||||
)
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored \"-Wdeprecated\"
|
||||
#pragma clang diagnostic pop
|
||||
#elif defined(__GNUC__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored \"-Wdeprecated\"
|
||||
#pragma GCC diagnostic pop
|
||||
#elif defined(_MSC_VER)
|
||||
#pragma warning( push )
|
||||
#pragma warning( disable : 4996 )
|
||||
#pragma warning( pop )
|
||||
#else
|
||||
unknownCompiler!
|
||||
#endif
|
||||
int main(int, char *[]) {}"
|
||||
HAS_DIAGNOSTIC_PUSH
|
||||
)
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include<cmath>
|
||||
int main(int, char *[]) {
|
||||
return std::isnan(1.0f);
|
||||
}"
|
||||
HAS_STD_ISNAN
|
||||
)
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include<mutex>
|
||||
int main(int, char *[]) {
|
||||
std::mutex test_mutex;
|
||||
std::lock_guard<std::mutex> lock_mutex(test_mutex);
|
||||
}"
|
||||
HAS_STD_MUTEX
|
||||
)
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include<string>
|
||||
std::string func() {
|
||||
std::string var = \"test\";
|
||||
return std::move(var);
|
||||
}
|
||||
int main(int, char *[]) {}"
|
||||
NEEDS_REDUNDANT_MOVE
|
||||
)
|
||||
|
||||
INCLUDE(CheckCXXSourceRuns)
|
||||
|
||||
CHECK_CXX_SOURCE_RUNS("
|
||||
#include<time.h>
|
||||
int main(int, char *[]) {
|
||||
time_t t = -14210715; // 1969-07-20 12:34:45
|
||||
struct tm *ptm = gmtime(&t);
|
||||
return !(ptm && ptm->tm_year == 69);
|
||||
}"
|
||||
HAS_PRE_1970
|
||||
)
|
||||
|
||||
CHECK_CXX_SOURCE_RUNS("
|
||||
#include<stdlib.h>
|
||||
#include<time.h>
|
||||
int main(int, char *[]) {
|
||||
setenv(\"TZ\", \"America/Los_Angeles\", 1);
|
||||
tzset();
|
||||
struct tm time2037;
|
||||
struct tm time2038;
|
||||
strptime(\"2037-05-05 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2037);
|
||||
strptime(\"2038-05-05 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2038);
|
||||
return mktime(&time2038) - mktime(&time2037) != 31536000;
|
||||
}"
|
||||
HAS_POST_2038
|
||||
)
|
||||
|
||||
set(CMAKE_REQUIRED_INCLUDES ${ZLIB_INCLUDE_DIR})
|
||||
set(CMAKE_REQUIRED_LIBRARIES zlib)
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#define Z_PREFIX
|
||||
#include<zlib.h>
|
||||
z_stream strm;
|
||||
int main(int, char *[]) {
|
||||
deflateReset(&strm);
|
||||
}"
|
||||
NEEDS_Z_PREFIX
|
||||
)
|
||||
|
||||
# See https://cmake.org/cmake/help/v3.14/policy/CMP0075.html. Without unsetting it breaks thrift.
|
||||
set(CMAKE_REQUIRED_INCLUDES)
|
||||
set(CMAKE_REQUIRED_LIBRARIES)
|
||||
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit 66d17f060c4867aeea99fa2a20cfdae89ae2a2ec
|
||||
Subproject commit 79358a3106aab6af464430ed67c7efafebf5cd6f
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
||||
Subproject commit c0f1bcb97fd2782f7c3f972fadd5aad5affac4b8
|
||||
Subproject commit 9edd0861d8328b2ae77e8fb5f4d7dcd1cf33b42b
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
||||
Subproject commit 60c986e15cae70aade721d26badabab1f822fdd6
|
||||
Subproject commit 7eac189a6badddac593580ec2ad1478bd2656fc7
|
@ -187,12 +187,12 @@ function(protobuf_generate_grpc)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${_generated_srcs}
|
||||
COMMAND protobuf::protoc
|
||||
COMMAND $<TARGET_FILE:protoc>
|
||||
ARGS --${protobuf_generate_grpc_LANGUAGE}_out ${_dll_export_decl}${protobuf_generate_grpc_PROTOC_OUT_DIR}
|
||||
--grpc_out ${_dll_export_decl}${protobuf_generate_grpc_PROTOC_OUT_DIR}
|
||||
--plugin=protoc-gen-grpc=$<TARGET_FILE:${protobuf_generate_grpc_PLUGIN}>
|
||||
${_dll_desc_out} ${_protobuf_include_path} ${_abs_file}
|
||||
DEPENDS ${_abs_file} protobuf::protoc ${protobuf_generate_grpc_PLUGIN}
|
||||
DEPENDS ${_abs_file} protoc ${protobuf_generate_grpc_PLUGIN}
|
||||
COMMENT "Running ${protobuf_generate_grpc_LANGUAGE} protocol buffer compiler on ${_proto}"
|
||||
VERBATIM)
|
||||
endforeach()
|
||||
|
@ -161,7 +161,7 @@
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
#define JEMALLOC_DSS
|
||||
/* #undef JEMALLOC_DSS */
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
@ -81,7 +81,7 @@
|
||||
/* #undef JEMALLOC_HAVE_ISSETUGID */
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_ATFORK */
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
@ -284,7 +284,7 @@
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||
#define JEMALLOC_DEFINE_MADVISE_FREE
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
|
2
contrib/libc-headers
vendored
2
contrib/libc-headers
vendored
@ -1 +1 @@
|
||||
Subproject commit a720b7105a610acbd7427eea475a5b6810c151eb
|
||||
Subproject commit aa5429bf67a346e48ad60efd88bcefc286644bf3
|
2
contrib/libcxx
vendored
2
contrib/libcxx
vendored
@ -1 +1 @@
|
||||
Subproject commit 2fa892f69acbaa40f8a18c6484854a6183a34482
|
||||
Subproject commit 61e60294b1de01483caa9f5d00f437c99b674de6
|
@ -1,10 +0,0 @@
|
||||
#include <exception>
|
||||
#include <stdexcept>
|
||||
|
||||
int main() {
|
||||
try {
|
||||
throw 2;
|
||||
} catch (int) {
|
||||
std::throw_with_nested(std::runtime_error("test"));
|
||||
}
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
#include <chrono>
|
||||
|
||||
using std::chrono::steady_clock;
|
||||
|
||||
void foo(const steady_clock &clock) {
|
||||
return;
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
OPTION(ENABLE_SSE "enable SSE4.2 buildin function" ON)
|
||||
OPTION(ENABLE_SSE "enable SSE4.2 builtin function" ON)
|
||||
|
||||
INCLUDE (CheckFunctionExists)
|
||||
CHECK_FUNCTION_EXISTS(dladdr HAVE_DLADDR)
|
||||
@ -22,7 +22,7 @@ ADD_DEFINITIONS(-D_GLIBCXX_USE_NANOSLEEP)
|
||||
|
||||
TRY_COMPILE(STRERROR_R_RETURN_INT
|
||||
${CMAKE_CURRENT_BINARY_DIR}
|
||||
"${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileStrerror.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/CMake/CMakeTestCompileStrerror.c"
|
||||
CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'"
|
||||
OUTPUT_VARIABLE OUTPUT)
|
||||
|
||||
@ -34,17 +34,8 @@ ELSE(STRERROR_R_RETURN_INT)
|
||||
MESSAGE(STATUS "Checking whether strerror_r returns an int -- no")
|
||||
ENDIF(STRERROR_R_RETURN_INT)
|
||||
|
||||
TRY_COMPILE(HAVE_STEADY_CLOCK
|
||||
${CMAKE_CURRENT_BINARY_DIR}
|
||||
"${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileSteadyClock.cpp"
|
||||
CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'"
|
||||
OUTPUT_VARIABLE OUTPUT)
|
||||
|
||||
TRY_COMPILE(HAVE_NESTED_EXCEPTION
|
||||
${CMAKE_CURRENT_BINARY_DIR}
|
||||
"${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileNestedException.cpp"
|
||||
CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'"
|
||||
OUTPUT_VARIABLE OUTPUT)
|
||||
set(HAVE_STEADY_CLOCK 1)
|
||||
set(HAVE_NESTED_EXCEPTION 1)
|
||||
|
||||
SET(HAVE_BOOST_CHRONO 0)
|
||||
SET(HAVE_BOOST_ATOMIC 0)
|
||||
|
2
contrib/libuv
vendored
2
contrib/libuv
vendored
@ -1 +1 @@
|
||||
Subproject commit e2e9b7e9f978ce8a1367b5fe781d97d1ce9f94ab
|
||||
Subproject commit 95081e7c16c9857babe6d4e2bc1c779198ea89ae
|
160
contrib/libuv-cmake/CMakeLists.txt
Normal file
160
contrib/libuv-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,160 @@
|
||||
# This file is a modified version of contrib/libuv/CMakeLists.txt
|
||||
|
||||
include(CMakeDependentOption)
|
||||
|
||||
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/libuv")
|
||||
set (BINARY_DIR "${CMAKE_BINARY_DIR}/contrib/libuv")
|
||||
|
||||
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU")
|
||||
list(APPEND uv_cflags -fvisibility=hidden --std=gnu89)
|
||||
list(APPEND uv_cflags -Wall -Wextra -Wstrict-prototypes)
|
||||
list(APPEND uv_cflags -Wno-unused-parameter)
|
||||
endif()
|
||||
|
||||
set(uv_sources
|
||||
src/fs-poll.c
|
||||
src/idna.c
|
||||
src/inet.c
|
||||
src/random.c
|
||||
src/strscpy.c
|
||||
src/threadpool.c
|
||||
src/timer.c
|
||||
src/uv-common.c
|
||||
src/uv-data-getter-setters.c
|
||||
src/version.c
|
||||
src/unix/async.c
|
||||
src/unix/core.c
|
||||
src/unix/dl.c
|
||||
src/unix/fs.c
|
||||
src/unix/getaddrinfo.c
|
||||
src/unix/getnameinfo.c
|
||||
src/unix/loop-watcher.c
|
||||
src/unix/loop.c
|
||||
src/unix/pipe.c
|
||||
src/unix/poll.c
|
||||
src/unix/process.c
|
||||
src/unix/random-devurandom.c
|
||||
src/unix/signal.c
|
||||
src/unix/stream.c
|
||||
src/unix/tcp.c
|
||||
src/unix/thread.c
|
||||
src/unix/tty.c
|
||||
src/unix/udp.c)
|
||||
|
||||
if(APPLE OR CMAKE_SYSTEM_NAME MATCHES "Android|Linux|OS/390")
|
||||
list(APPEND uv_sources src/unix/proctitle.c)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD")
|
||||
list(APPEND uv_sources src/unix/freebsd.c)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD|NetBSD|OpenBSD")
|
||||
list(APPEND uv_sources src/unix/posix-hrtime.c src/unix/bsd-proctitle.c)
|
||||
endif()
|
||||
|
||||
if(APPLE OR CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD|NetBSD|OpenBSD")
|
||||
list(APPEND uv_sources src/unix/bsd-ifaddrs.c src/unix/kqueue.c)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||
list(APPEND uv_sources src/unix/random-getrandom.c)
|
||||
endif()
|
||||
|
||||
if(APPLE OR CMAKE_SYSTEM_NAME STREQUAL "OpenBSD")
|
||||
list(APPEND uv_sources src/unix/random-getentropy.c)
|
||||
endif()
|
||||
|
||||
if(APPLE)
|
||||
list(APPEND uv_defines _DARWIN_UNLIMITED_SELECT=1 _DARWIN_USE_64_BIT_INODE=1)
|
||||
list(APPEND uv_sources
|
||||
src/unix/darwin-proctitle.c
|
||||
src/unix/darwin.c
|
||||
src/unix/fsevents.c)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
|
||||
list(APPEND uv_libraries dl rt)
|
||||
list(APPEND uv_sources
|
||||
src/unix/linux-core.c
|
||||
src/unix/linux-inotify.c
|
||||
src/unix/linux-syscalls.c
|
||||
src/unix/procfs-exepath.c
|
||||
src/unix/random-getrandom.c
|
||||
src/unix/random-sysctl-linux.c
|
||||
src/unix/sysinfo-loadavg.c)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD")
|
||||
list(APPEND uv_sources src/unix/netbsd.c)
|
||||
list(APPEND uv_libraries kvm)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "OpenBSD")
|
||||
list(APPEND uv_sources src/unix/openbsd.c)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "OS/390")
|
||||
list(APPEND uv_defines PATH_MAX=255)
|
||||
list(APPEND uv_defines _AE_BIMODAL)
|
||||
list(APPEND uv_defines _ALL_SOURCE)
|
||||
list(APPEND uv_defines _LARGE_TIME_API)
|
||||
list(APPEND uv_defines _OPEN_MSGQ_EXT)
|
||||
list(APPEND uv_defines _OPEN_SYS_FILE_EXT)
|
||||
list(APPEND uv_defines _OPEN_SYS_IF_EXT)
|
||||
list(APPEND uv_defines _OPEN_SYS_SOCK_EXT3)
|
||||
list(APPEND uv_defines _OPEN_SYS_SOCK_IPV6)
|
||||
list(APPEND uv_defines _UNIX03_SOURCE)
|
||||
list(APPEND uv_defines _UNIX03_THREADS)
|
||||
list(APPEND uv_defines _UNIX03_WITHDRAWN)
|
||||
list(APPEND uv_defines _XOPEN_SOURCE_EXTENDED)
|
||||
list(APPEND uv_sources
|
||||
src/unix/pthread-fixes.c
|
||||
src/unix/pthread-barrier.c
|
||||
src/unix/os390.c
|
||||
src/unix/os390-syscalls.c)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
|
||||
list(APPEND uv_defines __EXTENSIONS__ _XOPEN_SOURCE=500)
|
||||
list(APPEND uv_libraries kstat nsl sendfile socket)
|
||||
list(APPEND uv_sources src/unix/no-proctitle.c src/unix/sunos.c)
|
||||
endif()
|
||||
|
||||
set(uv_sources_tmp "")
|
||||
foreach(file ${uv_sources})
|
||||
list(APPEND uv_sources_tmp "${SOURCE_DIR}/${file}")
|
||||
endforeach(file)
|
||||
set(uv_sources "${uv_sources_tmp}")
|
||||
|
||||
list(APPEND uv_defines CLICKHOUSE_GLIBC_COMPATIBILITY)
|
||||
|
||||
add_library(uv ${uv_sources})
|
||||
target_compile_definitions(uv
|
||||
INTERFACE USING_UV_SHARED=1
|
||||
PRIVATE ${uv_defines} BUILDING_UV_SHARED=1)
|
||||
target_compile_options(uv PRIVATE ${uv_cflags})
|
||||
target_include_directories(uv PUBLIC ${SOURCE_DIR}/include PRIVATE ${SOURCE_DIR}/src)
|
||||
target_link_libraries(uv ${uv_libraries})
|
||||
|
||||
add_library(uv_a STATIC ${uv_sources})
|
||||
target_compile_definitions(uv_a PRIVATE ${uv_defines})
|
||||
target_compile_options(uv_a PRIVATE ${uv_cflags})
|
||||
target_include_directories(uv_a PUBLIC ${SOURCE_DIR}/include PRIVATE ${SOURCE_DIR}/src)
|
||||
target_link_libraries(uv_a ${uv_libraries})
|
||||
|
||||
if(UNIX)
|
||||
# Now for some gibbering horrors from beyond the stars...
|
||||
foreach(x ${uv_libraries})
|
||||
set(LIBS "${LIBS} -l${x}")
|
||||
endforeach(x)
|
||||
file(STRINGS ${SOURCE_DIR}/configure.ac configure_ac REGEX ^AC_INIT)
|
||||
string(REGEX MATCH [0-9]+[.][0-9]+[.][0-9]+ PACKAGE_VERSION "${configure_ac}")
|
||||
string(REGEX MATCH ^[0-9]+ UV_VERSION_MAJOR "${PACKAGE_VERSION}")
|
||||
# The version in the filename is mirroring the behaviour of autotools.
|
||||
set_target_properties(uv PROPERTIES VERSION ${UV_VERSION_MAJOR}.0.0
|
||||
SOVERSION ${UV_VERSION_MAJOR})
|
||||
endif()
|
||||
|
2
contrib/llvm
vendored
2
contrib/llvm
vendored
@ -1 +1 @@
|
||||
Subproject commit f30bbecef78b75b527e257c1304d0be2f2f95975
|
||||
Subproject commit 20607e61728e97c969e536644c3c0c1bb1a50672
|
63
contrib/openldap-cmake/freebsd_aarch64/include/lber_types.h
Normal file
63
contrib/openldap-cmake/freebsd_aarch64/include/lber_types.h
Normal file
@ -0,0 +1,63 @@
|
||||
/* include/lber_types.h. Generated from lber_types.hin by configure. */
|
||||
/* $OpenLDAP$ */
|
||||
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
|
||||
*
|
||||
* Copyright 1998-2020 The OpenLDAP Foundation.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted only as authorized by the OpenLDAP
|
||||
* Public License.
|
||||
*
|
||||
* A copy of this license is available in file LICENSE in the
|
||||
* top-level directory of the distribution or, alternatively, at
|
||||
* <http://www.OpenLDAP.org/license.html>.
|
||||
*/
|
||||
|
||||
/*
|
||||
* LBER types
|
||||
*/
|
||||
|
||||
#ifndef _LBER_TYPES_H
|
||||
#define _LBER_TYPES_H
|
||||
|
||||
#include <ldap_cdefs.h>
|
||||
|
||||
LDAP_BEGIN_DECL
|
||||
|
||||
/* LBER boolean, enum, integers (32 bits or larger) */
|
||||
#define LBER_INT_T int
|
||||
|
||||
/* LBER tags (32 bits or larger) */
|
||||
#define LBER_TAG_T long
|
||||
|
||||
/* LBER socket descriptor */
|
||||
#define LBER_SOCKET_T int
|
||||
|
||||
/* LBER lengths (32 bits or larger) */
|
||||
#define LBER_LEN_T long
|
||||
|
||||
/* ------------------------------------------------------------ */
|
||||
|
||||
/* booleans, enumerations, and integers */
|
||||
typedef LBER_INT_T ber_int_t;
|
||||
|
||||
/* signed and unsigned versions */
|
||||
typedef signed LBER_INT_T ber_sint_t;
|
||||
typedef unsigned LBER_INT_T ber_uint_t;
|
||||
|
||||
/* tags */
|
||||
typedef unsigned LBER_TAG_T ber_tag_t;
|
||||
|
||||
/* "socket" descriptors */
|
||||
typedef LBER_SOCKET_T ber_socket_t;
|
||||
|
||||
/* lengths */
|
||||
typedef unsigned LBER_LEN_T ber_len_t;
|
||||
|
||||
/* signed lengths */
|
||||
typedef signed LBER_LEN_T ber_slen_t;
|
||||
|
||||
LDAP_END_DECL
|
||||
|
||||
#endif /* _LBER_TYPES_H */
|
74
contrib/openldap-cmake/freebsd_aarch64/include/ldap_config.h
Normal file
74
contrib/openldap-cmake/freebsd_aarch64/include/ldap_config.h
Normal file
@ -0,0 +1,74 @@
|
||||
/* include/ldap_config.h. Generated from ldap_config.hin by configure. */
|
||||
/* $OpenLDAP$ */
|
||||
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
|
||||
*
|
||||
* Copyright 1998-2020 The OpenLDAP Foundation.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted only as authorized by the OpenLDAP
|
||||
* Public License.
|
||||
*
|
||||
* A copy of this license is available in file LICENSE in the
|
||||
* top-level directory of the distribution or, alternatively, at
|
||||
* <http://www.OpenLDAP.org/license.html>.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file works in conjunction with OpenLDAP configure system.
|
||||
* If you do no like the values below, adjust your configure options.
|
||||
*/
|
||||
|
||||
#ifndef _LDAP_CONFIG_H
|
||||
#define _LDAP_CONFIG_H
|
||||
|
||||
/* directory separator */
|
||||
#ifndef LDAP_DIRSEP
|
||||
#ifndef _WIN32
|
||||
#define LDAP_DIRSEP "/"
|
||||
#else
|
||||
#define LDAP_DIRSEP "\\"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* directory for temporary files */
|
||||
#if defined(_WIN32)
|
||||
# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */
|
||||
#elif defined( _P_tmpdir )
|
||||
# define LDAP_TMPDIR _P_tmpdir
|
||||
#elif defined( P_tmpdir )
|
||||
# define LDAP_TMPDIR P_tmpdir
|
||||
#elif defined( _PATH_TMPDIR )
|
||||
# define LDAP_TMPDIR _PATH_TMPDIR
|
||||
#else
|
||||
# define LDAP_TMPDIR LDAP_DIRSEP "tmp"
|
||||
#endif
|
||||
|
||||
/* directories */
|
||||
#ifndef LDAP_BINDIR
|
||||
#define LDAP_BINDIR "/tmp/ldap-prefix/bin"
|
||||
#endif
|
||||
#ifndef LDAP_SBINDIR
|
||||
#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin"
|
||||
#endif
|
||||
#ifndef LDAP_DATADIR
|
||||
#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap"
|
||||
#endif
|
||||
#ifndef LDAP_SYSCONFDIR
|
||||
#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap"
|
||||
#endif
|
||||
#ifndef LDAP_LIBEXECDIR
|
||||
#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec"
|
||||
#endif
|
||||
#ifndef LDAP_MODULEDIR
|
||||
#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap"
|
||||
#endif
|
||||
#ifndef LDAP_RUNDIR
|
||||
#define LDAP_RUNDIR "/tmp/ldap-prefix/var"
|
||||
#endif
|
||||
#ifndef LDAP_LOCALEDIR
|
||||
#define LDAP_LOCALEDIR ""
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* _LDAP_CONFIG_H */
|
@ -0,0 +1,61 @@
|
||||
/* include/ldap_features.h. Generated from ldap_features.hin by configure. */
|
||||
/* $OpenLDAP$ */
|
||||
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
|
||||
*
|
||||
* Copyright 1998-2020 The OpenLDAP Foundation.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted only as authorized by the OpenLDAP
|
||||
* Public License.
|
||||
*
|
||||
* A copy of this license is available in file LICENSE in the
|
||||
* top-level directory of the distribution or, alternatively, at
|
||||
* <http://www.OpenLDAP.org/license.html>.
|
||||
*/
|
||||
|
||||
/*
|
||||
* LDAP Features
|
||||
*/
|
||||
|
||||
#ifndef _LDAP_FEATURES_H
|
||||
#define _LDAP_FEATURES_H 1
|
||||
|
||||
/* OpenLDAP API version macros */
|
||||
#define LDAP_VENDOR_VERSION 20501
|
||||
#define LDAP_VENDOR_VERSION_MAJOR 2
|
||||
#define LDAP_VENDOR_VERSION_MINOR 5
|
||||
#define LDAP_VENDOR_VERSION_PATCH X
|
||||
|
||||
/*
|
||||
** WORK IN PROGRESS!
|
||||
**
|
||||
** OpenLDAP reentrancy/thread-safeness should be dynamically
|
||||
** checked using ldap_get_option().
|
||||
**
|
||||
** The -lldap implementation is not thread-safe.
|
||||
**
|
||||
** The -lldap_r implementation is:
|
||||
** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety)
|
||||
** but also be:
|
||||
** LDAP_API_FEATURE_SESSION_THREAD_SAFE
|
||||
** LDAP_API_FEATURE_OPERATION_THREAD_SAFE
|
||||
**
|
||||
** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE
|
||||
** can be used to determine if -lldap_r is available at compile
|
||||
** time. You must define LDAP_THREAD_SAFE if and only if you
|
||||
** link with -lldap_r.
|
||||
**
|
||||
** If you fail to define LDAP_THREAD_SAFE when linking with
|
||||
** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap,
|
||||
** provided header definitions and declarations may be incorrect.
|
||||
**
|
||||
*/
|
||||
|
||||
/* is -lldap_r available or not */
|
||||
#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1
|
||||
|
||||
/* LDAP v2 Referrals */
|
||||
/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */
|
||||
|
||||
#endif /* LDAP_FEATURES */
|
1169
contrib/openldap-cmake/freebsd_aarch64/include/portable.h
Normal file
1169
contrib/openldap-cmake/freebsd_aarch64/include/portable.h
Normal file
File diff suppressed because it is too large
Load Diff
2
contrib/protobuf
vendored
2
contrib/protobuf
vendored
@ -1 +1 @@
|
||||
Subproject commit 75601841d172c73ae6bf4ce8121f42b875cdbabd
|
||||
Subproject commit c1c5d02026059f4c3cb51aaa08e82288d3e08b89
|
@ -10,8 +10,82 @@ else ()
|
||||
set(protobuf_BUILD_SHARED_LIBS ON CACHE INTERNAL "" FORCE)
|
||||
endif ()
|
||||
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
# Will build 'protoc' for host arch instead of cross-compiling
|
||||
set(protobuf_BUILD_PROTOC_BINARIES OFF CACHE INTERNAL "" FORCE)
|
||||
endif ()
|
||||
|
||||
add_subdirectory("${protobuf_SOURCE_DIR}/cmake" "${protobuf_BINARY_DIR}")
|
||||
|
||||
# We don't want to stop compilation on warnings in protobuf's headers.
|
||||
# The following line overrides the value assigned by the command target_include_directories() in libprotobuf.cmake
|
||||
set_property(TARGET libprotobuf PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${protobuf_SOURCE_DIR}/src")
|
||||
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
# Build 'protoc' for host arch
|
||||
set (PROTOC_BUILD_DIR "${protobuf_BINARY_DIR}/build")
|
||||
|
||||
if (NOT EXISTS "${PROTOC_BUILD_DIR}/protoc")
|
||||
|
||||
# This is quite ugly but I cannot make dependencies work propery.
|
||||
|
||||
execute_process(
|
||||
COMMAND mkdir -p ${PROTOC_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND}
|
||||
"-G${CMAKE_GENERATOR}"
|
||||
"-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}"
|
||||
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
|
||||
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
|
||||
"-Dprotobuf_BUILD_TESTS=0"
|
||||
"-Dprotobuf_BUILD_CONFORMANCE=0"
|
||||
"-Dprotobuf_BUILD_EXAMPLES=0"
|
||||
"-Dprotobuf_BUILD_PROTOC_BINARIES=1"
|
||||
"${protobuf_SOURCE_DIR}/cmake"
|
||||
WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
endif ()
|
||||
|
||||
# add_custom_command (
|
||||
# OUTPUT ${PROTOC_BUILD_DIR}
|
||||
# COMMAND mkdir -p ${PROTOC_BUILD_DIR})
|
||||
#
|
||||
# add_custom_command (
|
||||
# OUTPUT "${PROTOC_BUILD_DIR}/CMakeCache.txt"
|
||||
#
|
||||
# COMMAND ${CMAKE_COMMAND}
|
||||
# -G"${CMAKE_GENERATOR}"
|
||||
# -DCMAKE_MAKE_PROGRAM="${CMAKE_MAKE_PROGRAM}"
|
||||
# -DCMAKE_C_COMPILER="${CMAKE_C_COMPILER}"
|
||||
# -DCMAKE_CXX_COMPILER="${CMAKE_CXX_COMPILER}"
|
||||
# -Dprotobuf_BUILD_TESTS=0
|
||||
# -Dprotobuf_BUILD_CONFORMANCE=0
|
||||
# -Dprotobuf_BUILD_EXAMPLES=0
|
||||
# -Dprotobuf_BUILD_PROTOC_BINARIES=1
|
||||
# "${protobuf_SOURCE_DIR}/cmake"
|
||||
#
|
||||
# DEPENDS "${PROTOC_BUILD_DIR}"
|
||||
# WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
|
||||
# COMMENT "Configuring 'protoc' for host architecture."
|
||||
# USES_TERMINAL)
|
||||
#
|
||||
# add_custom_command (
|
||||
# OUTPUT "${PROTOC_BUILD_DIR}/protoc"
|
||||
# COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
|
||||
# DEPENDS "${PROTOC_BUILD_DIR}/CMakeCache.txt"
|
||||
# COMMENT "Building 'protoc' for host architecture."
|
||||
# USES_TERMINAL)
|
||||
#
|
||||
# add_custom_target (protoc-host DEPENDS "${PROTOC_BUILD_DIR}/protoc")
|
||||
|
||||
add_executable(protoc IMPORTED GLOBAL)
|
||||
set_target_properties (protoc PROPERTIES IMPORTED_LOCATION "${PROTOC_BUILD_DIR}/protoc")
|
||||
add_dependencies(protoc "${PROTOC_BUILD_DIR}/protoc")
|
||||
|
||||
endif ()
|
||||
|
@ -181,11 +181,11 @@ function(protobuf_generate)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${_generated_srcs}
|
||||
COMMAND protobuf::protoc
|
||||
COMMAND $<TARGET_FILE:protoc>
|
||||
ARGS --${protobuf_generate_LANGUAGE}_out ${_dll_export_decl}${protobuf_generate_PROTOC_OUT_DIR} ${_dll_desc_out} ${_protobuf_include_path} ${_abs_file}
|
||||
DEPENDS ${_abs_file} protobuf::protoc
|
||||
DEPENDS ${_abs_file} protoc
|
||||
COMMENT "Running ${protobuf_generate_LANGUAGE} protocol buffer compiler on ${_proto}"
|
||||
VERBATIM )
|
||||
VERBATIM)
|
||||
endforeach()
|
||||
|
||||
set_source_files_properties(${_generated_srcs_all} PROPERTIES GENERATED TRUE)
|
||||
|
@ -106,18 +106,6 @@ if(NOT MSVC)
|
||||
set(CMAKE_REQUIRED_FLAGS "-msse4.2 -mpclmul")
|
||||
endif()
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include <cstdint>
|
||||
#include <nmmintrin.h>
|
||||
#include <wmmintrin.h>
|
||||
int main() {
|
||||
volatile uint32_t x = _mm_crc32_u32(0, 0);
|
||||
const auto a = _mm_set_epi64x(0, 0);
|
||||
const auto b = _mm_set_epi64x(0, 0);
|
||||
const auto c = _mm_clmulepi64_si128(a, b, 0x00);
|
||||
auto d = _mm_cvtsi128_si64(c);
|
||||
}
|
||||
" HAVE_SSE42)
|
||||
unset(CMAKE_REQUIRED_FLAGS)
|
||||
if(HAVE_SSE42)
|
||||
add_definitions(-DHAVE_SSE42)
|
||||
@ -126,14 +114,7 @@ elseif(FORCE_SSE42)
|
||||
message(FATAL_ERROR "FORCE_SSE42=ON but unable to compile with SSE4.2 enabled")
|
||||
endif()
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#if defined(_MSC_VER) && !defined(__thread)
|
||||
#define __thread __declspec(thread)
|
||||
#endif
|
||||
int main() {
|
||||
static __thread int tls;
|
||||
}
|
||||
" HAVE_THREAD_LOCAL)
|
||||
set (HAVE_THREAD_LOCAL 1)
|
||||
if(HAVE_THREAD_LOCAL)
|
||||
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
||||
endif()
|
||||
@ -174,7 +155,7 @@ endif()
|
||||
|
||||
option(WITH_FALLOCATE "build with fallocate" ON)
|
||||
if(WITH_FALLOCATE)
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
CHECK_C_SOURCE_COMPILES("
|
||||
#include <fcntl.h>
|
||||
#include <linux/falloc.h>
|
||||
int main() {
|
||||
@ -187,7 +168,7 @@ int main() {
|
||||
endif()
|
||||
endif()
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
CHECK_C_SOURCE_COMPILES("
|
||||
#include <fcntl.h>
|
||||
int main() {
|
||||
int fd = open(\"/dev/null\", 0);
|
||||
@ -198,7 +179,7 @@ if(HAVE_SYNC_FILE_RANGE_WRITE)
|
||||
add_definitions(-DROCKSDB_RANGESYNC_PRESENT)
|
||||
endif()
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
CHECK_C_SOURCE_COMPILES("
|
||||
#include <pthread.h>
|
||||
int main() {
|
||||
(void) PTHREAD_MUTEX_ADAPTIVE_NP;
|
||||
@ -228,6 +209,11 @@ if(HAVE_AUXV_GETAUXVAL)
|
||||
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
||||
endif()
|
||||
|
||||
check_cxx_symbol_exists(elf_aux_info sys/auxv.h HAVE_ELF_AUX_INFO)
|
||||
if(HAVE_ELF_AUX_INFO)
|
||||
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
||||
endif()
|
||||
|
||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||
|
2
contrib/s2geometry
vendored
2
contrib/s2geometry
vendored
@ -1 +1 @@
|
||||
Subproject commit 20ea540d81f4575a3fc0aea585aac611bcd03ede
|
||||
Subproject commit 38b7a290f927cc372218c2094602b83e35b18c05
|
47
contrib/snappy-cmake/CMakeLists.txt
Normal file
47
contrib/snappy-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,47 @@
|
||||
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/snappy")
|
||||
|
||||
set(SNAPPY_IS_BIG_ENDIAN 0)
|
||||
|
||||
include(CheckIncludeFile)
|
||||
check_include_file("byteswap.h" HAVE_BYTESWAP_H)
|
||||
check_include_file("sys/endian.h" HAVE_SYS_ENDIAN_H)
|
||||
check_include_file("sys/mman.h" HAVE_SYS_MMAN_H)
|
||||
check_include_file("sys/resource.h" HAVE_SYS_RESOURCE_H)
|
||||
check_include_file("sys/time.h" HAVE_SYS_TIME_H)
|
||||
check_include_file("sys/uio.h" HAVE_SYS_UIO_H)
|
||||
check_include_file("unistd.h" HAVE_UNISTD_H)
|
||||
check_include_file("windows.h" HAVE_WINDOWS_H)
|
||||
|
||||
set (HAVE_BUILTIN_EXPECT 1)
|
||||
set (HAVE_BUILTIN_CTZ 1)
|
||||
set (HAVE_FUNC_MMAP 1)
|
||||
set (HAVE_FUNC_SYSCONF 1)
|
||||
|
||||
if (ARCH_AMD64 AND ENABLE_SSSE3)
|
||||
set (SNAPPY_HAVE_SSSE3 1)
|
||||
else ()
|
||||
set (SNAPPY_HAVE_SSSE3 0)
|
||||
endif ()
|
||||
|
||||
configure_file(
|
||||
"${SOURCE_DIR}/cmake/config.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/config.h")
|
||||
|
||||
set(HAVE_SYS_UIO_H_01 1)
|
||||
|
||||
configure_file(
|
||||
"${SOURCE_DIR}/snappy-stubs-public.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/snappy-stubs-public.h")
|
||||
|
||||
add_library(snappy "")
|
||||
target_sources(snappy
|
||||
PRIVATE
|
||||
"${SOURCE_DIR}/snappy-internal.h"
|
||||
"${SOURCE_DIR}/snappy-stubs-internal.h"
|
||||
"${SOURCE_DIR}/snappy-c.cc"
|
||||
"${SOURCE_DIR}/snappy-sinksource.cc"
|
||||
"${SOURCE_DIR}/snappy-stubs-internal.cc"
|
||||
"${SOURCE_DIR}/snappy.cc")
|
||||
|
||||
target_include_directories(snappy PUBLIC ${SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
target_compile_definitions(snappy PRIVATE -DHAVE_CONFIG_H)
|
1
contrib/sysroot
vendored
Submodule
1
contrib/sysroot
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 002415524b5d14124bb8a61a3ce7ac65774f5479
|
@ -93,10 +93,6 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
||||
# Download toolchain and SDK for Darwin
|
||||
RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz
|
||||
|
||||
# Download toolchain for ARM
|
||||
# It contains all required headers and libraries. Note that it's named as "gcc" but actually we are using clang for cross compiling.
|
||||
RUN wget -nv "https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en" -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz
|
||||
|
||||
# Download toolchain for FreeBSD 11.3
|
||||
RUN wget -nv https://clickhouse-datasets.s3.yandex.net/toolchains/toolchains/freebsd-11.3-toolchain.tar.xz
|
||||
|
||||
|
@ -6,9 +6,6 @@ mkdir -p build/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
|
||||
|
||||
mkdir -p build/cmake/toolchain/linux-aarch64
|
||||
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1
|
||||
|
||||
mkdir -p build/cmake/toolchain/freebsd-x86_64
|
||||
tar xJf freebsd-11.3-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1
|
||||
|
||||
|
@ -61,6 +61,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
DARWIN_ARM_SUFFIX = "-darwin-aarch64"
|
||||
ARM_SUFFIX = "-aarch64"
|
||||
FREEBSD_SUFFIX = "-freebsd"
|
||||
PPC_SUFFIX = '-ppc64le'
|
||||
|
||||
result = []
|
||||
cmake_flags = ['$CMAKE_FLAGS', '-DADD_GDB_INDEX_FOR_GOLD=1']
|
||||
@ -69,8 +70,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
is_cross_darwin = compiler.endswith(DARWIN_SUFFIX)
|
||||
is_cross_darwin_arm = compiler.endswith(DARWIN_ARM_SUFFIX)
|
||||
is_cross_arm = compiler.endswith(ARM_SUFFIX)
|
||||
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
|
||||
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
||||
is_cross_compile = is_cross_darwin or is_cross_darwin_arm or is_cross_arm or is_cross_freebsd
|
||||
is_cross_compile = is_cross_darwin or is_cross_darwin_arm or is_cross_arm or is_cross_freebsd or is_cross_ppc
|
||||
|
||||
# Explicitly use LLD with Clang by default.
|
||||
# Don't force linker for cross-compilation.
|
||||
@ -97,6 +99,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
elif is_cross_freebsd:
|
||||
cc = compiler[:-len(FREEBSD_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake")
|
||||
elif is_cross_ppc:
|
||||
cc = compiler[:-len(PPC_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake")
|
||||
else:
|
||||
cc = compiler
|
||||
|
||||
@ -205,7 +210,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--build-type", choices=("debug", ""), default="")
|
||||
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64",
|
||||
"clang-12", "clang-12-darwin", "clang-12-darwin-aarch64", "clang-12-aarch64",
|
||||
"clang-13", "clang-13-darwin", "clang-13-darwin-aarch64", "clang-13-aarch64",
|
||||
"clang-13", "clang-13-darwin", "clang-13-darwin-aarch64", "clang-13-aarch64", "clang-13-ppc64le",
|
||||
"clang-11-freebsd", "clang-12-freebsd", "clang-13-freebsd", "gcc-11"), default="clang-13")
|
||||
parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="")
|
||||
parser.add_argument("--unbundled", action="store_true")
|
||||
|
@ -67,7 +67,7 @@ RUN apt-get update \
|
||||
unixodbc \
|
||||
--yes --no-install-recommends
|
||||
|
||||
RUN pip3 install numpy scipy pandas Jinja2
|
||||
RUN pip3 install numpy scipy pandas Jinja2 pandas clickhouse_driver
|
||||
|
||||
# This symlink required by gcc to find lld compiler
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
|
@ -262,11 +262,13 @@ function run_tests
|
||||
|
||||
start_server
|
||||
|
||||
set +e
|
||||
time clickhouse-test --hung-check -j 8 --order=random \
|
||||
--fast-tests-only --no-long --testname --shard --zookeeper \
|
||||
-- "$FASTTEST_FOCUS" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee "$FASTTEST_OUTPUT/test_result.txt"
|
||||
set -e
|
||||
}
|
||||
|
||||
case "$stage" in
|
||||
|
@ -27,7 +27,7 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip3 install Jinja2
|
||||
RUN pip3 install Jinja2 pandas clickhouse_driver
|
||||
|
||||
COPY * /
|
||||
|
||||
|
@ -108,8 +108,10 @@ function run_tests()
|
||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||
fi
|
||||
|
||||
set +e
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
set -e
|
||||
}
|
||||
|
||||
export -f run_tests
|
||||
|
@ -34,7 +34,7 @@ RUN apt-get update -y \
|
||||
postgresql-client \
|
||||
sqlite3
|
||||
|
||||
RUN pip3 install numpy scipy pandas Jinja2
|
||||
RUN pip3 install numpy scipy pandas Jinja2 clickhouse_driver
|
||||
|
||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
||||
|
@ -96,10 +96,12 @@ function run_tests()
|
||||
ADDITIONAL_OPTIONS+=('8')
|
||||
fi
|
||||
|
||||
set +e
|
||||
clickhouse-test --testname --shard --zookeeper --hung-check --print-time \
|
||||
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
set -e
|
||||
}
|
||||
|
||||
export -f run_tests
|
||||
@ -114,12 +116,6 @@ grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
|
||||
clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz &
|
||||
clickhouse-client -q "select * from system.query_thread_log format TSVWithNamesAndTypes" | pigz > /test_output/query-thread-log.tsv.gz &
|
||||
clickhouse-client --allow_introspection_functions=1 -q "
|
||||
WITH
|
||||
arrayMap(x -> concat(demangle(addressToSymbol(x)), ':', addressToLine(x)), trace) AS trace_array,
|
||||
arrayStringConcat(trace_array, '\n') AS trace_string
|
||||
SELECT * EXCEPT(trace), trace_string FROM system.trace_log FORMAT TSVWithNamesAndTypes
|
||||
" | pigz > /test_output/trace-log.tsv.gz &
|
||||
|
||||
# Also export trace log in flamegraph-friendly format.
|
||||
for trace_type in CPU Memory Real
|
||||
@ -146,6 +142,7 @@ fi
|
||||
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
|
||||
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
||||
tar -chf /test_output/zookeeper_log_dump.tar /var/lib/clickhouse/data/system/zookeeper_log ||:
|
||||
tar -chf /test_output/trace_log_dump.tar /var/lib/clickhouse/data/system/trace_log ||:
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
|
@ -71,42 +71,42 @@ def prepare_for_hung_check(drop_databases):
|
||||
# FIXME this function should not exist, but...
|
||||
|
||||
# ThreadFuzzer significantly slows down server and causes false-positive hung check failures
|
||||
call("clickhouse client -q 'SYSTEM STOP THREAD FUZZER'", shell=True, stderr=STDOUT)
|
||||
call("clickhouse client -q 'SYSTEM STOP THREAD FUZZER'", shell=True, stderr=STDOUT, timeout=30)
|
||||
|
||||
# We attach gdb to clickhouse-server before running tests
|
||||
# to print stacktraces of all crashes even if clickhouse cannot print it for some reason.
|
||||
# However, it obstruct checking for hung queries.
|
||||
logging.info("Will terminate gdb (if any)")
|
||||
call("kill -TERM $(pidof gdb)", shell=True, stderr=STDOUT)
|
||||
call("kill -TERM $(pidof gdb)", shell=True, stderr=STDOUT, timeout=30)
|
||||
|
||||
# Some tests set too low memory limit for default user and forget to reset in back.
|
||||
# It may cause SYSTEM queries to fail, let's disable memory limit.
|
||||
call("clickhouse client --max_memory_usage_for_user=0 -q 'SELECT 1 FORMAT Null'", shell=True, stderr=STDOUT)
|
||||
call("clickhouse client --max_memory_usage_for_user=0 -q 'SELECT 1 FORMAT Null'", shell=True, stderr=STDOUT, timeout=30)
|
||||
|
||||
# Some tests execute SYSTEM STOP MERGES or similar queries.
|
||||
# It may cause some ALTERs to hang.
|
||||
# Possibly we should fix tests and forbid to use such queries without specifying table.
|
||||
call("clickhouse client -q 'SYSTEM START MERGES'", shell=True, stderr=STDOUT)
|
||||
call("clickhouse client -q 'SYSTEM START DISTRIBUTED SENDS'", shell=True, stderr=STDOUT)
|
||||
call("clickhouse client -q 'SYSTEM START TTL MERGES'", shell=True, stderr=STDOUT)
|
||||
call("clickhouse client -q 'SYSTEM START MOVES'", shell=True, stderr=STDOUT)
|
||||
call("clickhouse client -q 'SYSTEM START FETCHES'", shell=True, stderr=STDOUT)
|
||||
call("clickhouse client -q 'SYSTEM START REPLICATED SENDS'", shell=True, stderr=STDOUT)
|
||||
call("clickhouse client -q 'SYSTEM START REPLICATION QUEUES'", shell=True, stderr=STDOUT)
|
||||
call("clickhouse client -q 'SYSTEM START MERGES'", shell=True, stderr=STDOUT, timeout=30)
|
||||
call("clickhouse client -q 'SYSTEM START DISTRIBUTED SENDS'", shell=True, stderr=STDOUT, timeout=30)
|
||||
call("clickhouse client -q 'SYSTEM START TTL MERGES'", shell=True, stderr=STDOUT, timeout=30)
|
||||
call("clickhouse client -q 'SYSTEM START MOVES'", shell=True, stderr=STDOUT, timeout=30)
|
||||
call("clickhouse client -q 'SYSTEM START FETCHES'", shell=True, stderr=STDOUT, timeout=30)
|
||||
call("clickhouse client -q 'SYSTEM START REPLICATED SENDS'", shell=True, stderr=STDOUT, timeout=30)
|
||||
call("clickhouse client -q 'SYSTEM START REPLICATION QUEUES'", shell=True, stderr=STDOUT, timeout=30)
|
||||
|
||||
# Issue #21004, live views are experimental, so let's just suppress it
|
||||
call("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """, shell=True, stderr=STDOUT)
|
||||
call("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """, shell=True, stderr=STDOUT, timeout=30)
|
||||
|
||||
# Kill other queries which known to be slow
|
||||
# It's query from 01232_preparing_sets_race_condition_long, it may take up to 1000 seconds in slow builds
|
||||
call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'insert into tableB select %'" """, shell=True, stderr=STDOUT)
|
||||
call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'insert into tableB select %'" """, shell=True, stderr=STDOUT, timeout=30)
|
||||
# Long query from 00084_external_agregation
|
||||
call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'" """, shell=True, stderr=STDOUT)
|
||||
call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'" """, shell=True, stderr=STDOUT, timeout=30)
|
||||
|
||||
if drop_databases:
|
||||
# Here we try to drop all databases in async mode. If some queries really hung, than drop will hung too.
|
||||
# Otherwise we will get rid of queries which wait for background pool. It can take a long time on slow builds (more than 900 seconds).
|
||||
databases = check_output('clickhouse client -q "SHOW DATABASES"', shell=True).decode('utf-8').strip().split()
|
||||
databases = check_output('clickhouse client -q "SHOW DATABASES"', shell=True, timeout=30).decode('utf-8').strip().split()
|
||||
for db in databases:
|
||||
if db == "system":
|
||||
continue
|
||||
@ -117,13 +117,13 @@ def prepare_for_hung_check(drop_databases):
|
||||
# Wait for last queries to finish if any, not longer than 300 seconds
|
||||
call("""clickhouse client -q "select sleepEachRow((
|
||||
select maxOrDefault(300 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 300
|
||||
) / 300) from numbers(300) format Null" """, shell=True, stderr=STDOUT)
|
||||
) / 300) from numbers(300) format Null" """, shell=True, stderr=STDOUT, timeout=330)
|
||||
|
||||
# Even if all clickhouse-test processes are finished, there are probably some sh scripts,
|
||||
# which still run some new queries. Let's ignore them.
|
||||
try:
|
||||
query = """clickhouse client -q "SELECT count() FROM system.processes where where elapsed > 300" """
|
||||
output = check_output(query, shell=True, stderr=STDOUT).decode('utf-8').strip()
|
||||
output = check_output(query, shell=True, stderr=STDOUT, timeout=30).decode('utf-8').strip()
|
||||
if int(output) == 0:
|
||||
return False
|
||||
except:
|
||||
@ -176,6 +176,7 @@ if __name__ == "__main__":
|
||||
if res != 0 and have_long_running_queries:
|
||||
logging.info("Hung check failed with exit code {}".format(res))
|
||||
hung_check_status = "Hung check failed\tFAIL\n"
|
||||
open(os.path.join(args.output_folder, "test_results.tsv"), 'w+').write(hung_check_status)
|
||||
with open(os.path.join(args.output_folder, "test_results.tsv"), 'w+') as results:
|
||||
results.write(hung_check_status)
|
||||
|
||||
logging.info("Stress test finished")
|
||||
|
@ -10,7 +10,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
python3-pip \
|
||||
pylint \
|
||||
yamllint \
|
||||
&& pip3 install codespell
|
||||
&& pip3 install codespell pandas clickhouse_driver
|
||||
|
||||
COPY run.sh /
|
||||
COPY process_style_check_result.py /
|
||||
|
@ -28,6 +28,7 @@ def process_test_log(log_path):
|
||||
test_results = []
|
||||
with open(log_path, 'r') as test_file:
|
||||
for line in test_file:
|
||||
original_line = line
|
||||
line = line.strip()
|
||||
if any(s in line for s in NO_TASK_TIMEOUT_SIGNS):
|
||||
task_timeout = False
|
||||
@ -49,19 +50,24 @@ def process_test_log(log_path):
|
||||
total += 1
|
||||
if TIMEOUT_SIGN in line:
|
||||
failed += 1
|
||||
test_results.append((test_name, "Timeout", test_time))
|
||||
test_results.append((test_name, "Timeout", test_time, []))
|
||||
elif FAIL_SIGN in line:
|
||||
failed += 1
|
||||
test_results.append((test_name, "FAIL", test_time))
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
elif UNKNOWN_SIGN in line:
|
||||
unknown += 1
|
||||
test_results.append((test_name, "FAIL", test_time))
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
elif SKIPPED_SIGN in line:
|
||||
skipped += 1
|
||||
test_results.append((test_name, "SKIPPED", test_time))
|
||||
test_results.append((test_name, "SKIPPED", test_time, []))
|
||||
else:
|
||||
success += int(OK_SIGN in line)
|
||||
test_results.append((test_name, "OK", test_time))
|
||||
test_results.append((test_name, "OK", test_time, []))
|
||||
elif len(test_results) > 0 and test_results[-1][1] == "FAIL":
|
||||
test_results[-1][3].append(original_line)
|
||||
|
||||
test_results = [(test[0], test[1], test[2], ''.join(test[3])) for test in test_results]
|
||||
|
||||
return total, skipped, unknown, failed, success, hung, task_timeout, retries, test_results
|
||||
|
||||
def process_result(result_path):
|
||||
@ -89,14 +95,14 @@ def process_result(result_path):
|
||||
if hung:
|
||||
description = "Some queries hung, "
|
||||
state = "failure"
|
||||
test_results.append(("Some queries hung", "FAIL", "0"))
|
||||
test_results.append(("Some queries hung", "FAIL", "0", ""))
|
||||
elif task_timeout:
|
||||
description = "Timeout, "
|
||||
state = "failure"
|
||||
test_results.append(("Timeout", "FAIL", "0"))
|
||||
test_results.append(("Timeout", "FAIL", "0", ""))
|
||||
elif retries:
|
||||
description = "Some tests restarted, "
|
||||
test_results.append(("Some tests restarted", "SKIPPED", "0"))
|
||||
test_results.append(("Some tests restarted", "SKIPPED", "0", ""))
|
||||
else:
|
||||
description = ""
|
||||
|
||||
|
59
docs/_includes/install/universal.sh
Executable file
59
docs/_includes/install/universal.sh
Executable file
@ -0,0 +1,59 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
OS=$(uname -s)
|
||||
ARCH=$(uname -m)
|
||||
|
||||
DIR=
|
||||
|
||||
if [ "${OS}" = "Linux" ]
|
||||
then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="amd64"
|
||||
elif [ "${ARCH}" = "aarch64" ]
|
||||
then
|
||||
DIR="aarch64"
|
||||
elif [ "${ARCH}" = "powerpc64le" ]
|
||||
then
|
||||
DIR="powerpc64le"
|
||||
fi
|
||||
elif [ "${OS}" = "FreeBSD" ]
|
||||
then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="freebsd"
|
||||
elif [ "${ARCH}" = "aarch64" ]
|
||||
then
|
||||
DIR="freebsd-aarch64"
|
||||
elif [ "${ARCH}" = "powerpc64le" ]
|
||||
then
|
||||
DIR="freebsd-powerpc64le"
|
||||
fi
|
||||
elif [ "${OS}" = "Darwin" ]
|
||||
then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="macos"
|
||||
elif [ "${ARCH}" = "aarch64" ]
|
||||
then
|
||||
DIR="macos-aarch64"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "${DIR}" ]
|
||||
then
|
||||
echo "The '${OS}' operating system with the '${ARCH}' architecture is not supported."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
URL="https://builds.clickhouse.com/master/${DIR}/clickhouse"
|
||||
echo "Will download ${URL}"
|
||||
curl -O "${URL}" && chmod a+x clickhouse &&
|
||||
echo "Successfully downloaded the ClickHouse binary, you can run it as:
|
||||
./clickhouse"
|
||||
|
||||
if [ "${OS}" = "Linux" ]
|
||||
then
|
||||
echo "You can also install it:
|
||||
sudo ./clickhouse install"
|
||||
fi
|
@ -9,15 +9,11 @@ This is for the case when you have Linux machine and want to use it to build `cl
|
||||
|
||||
The cross-build for AARCH64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
## Install Clang-8 {#install-clang-8}
|
||||
## Install Clang-13
|
||||
|
||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
|
||||
For example, in Ubuntu Bionic you can use the following commands:
|
||||
|
||||
``` bash
|
||||
echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list
|
||||
sudo apt-get update
|
||||
sudo apt-get install clang-8
|
||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
|
||||
```
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
||||
@ -34,7 +30,7 @@ tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cma
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
mkdir build-arm64
|
||||
CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake
|
||||
CC=clang-13 CXX=clang++-13 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake
|
||||
ninja -C build-arm64
|
||||
```
|
||||
|
||||
|
@ -688,7 +688,7 @@ Tags:
|
||||
- `policy_name_N` — Policy name. Policy names must be unique.
|
||||
- `volume_name_N` — Volume name. Volume names must be unique.
|
||||
- `disk` — a disk within a volume.
|
||||
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks.
|
||||
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
|
||||
- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1).
|
||||
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
||||
|
||||
|
@ -188,14 +188,15 @@ When the `max_parallel_replicas` option is enabled, query processing is parallel
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
- `_shard_num` — Contains the `shard_num` (from `system.clusters`). Type: [UInt32](../../../sql-reference/data-types/int-uint.md).
|
||||
- `_shard_num` — Contains the `shard_num` value from the table `system.clusters`. Type: [UInt32](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
!!! note "Note"
|
||||
Since [`remote`](../../../sql-reference/table-functions/remote.md)/`cluster` table functions internally create temporary instance of the same Distributed engine, `_shard_num` is available there too.
|
||||
Since [remote](../../../sql-reference/table-functions/remote.md) and [cluster](../../../sql-reference/table-functions/cluster.md) table functions internally create temporary Distributed table, `_shard_num` is available there too.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/special/index.md#table_engines-virtual_columns)
|
||||
- [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size)
|
||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) description
|
||||
- [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size) setting
|
||||
- [shardNum()](../../../sql-reference/functions/other-functions.md#shard-num) and [shardCount()](../../../sql-reference/functions/other-functions.md#shard-count) functions
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/distributed/) <!--hide-->
|
||||
|
@ -115,6 +115,7 @@ toc_title: Adopters
|
||||
| <a href="https://seo.do/" class="favicon">seo.do</a> | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) |
|
||||
| <a href="http://www.sgk.gov.tr/wps/portal/sgk/tr" class="favicon">SGK</a> | Government Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) |
|
||||
| <a href="http://english.sina.com/index.html" class="favicon">Sina</a> | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) |
|
||||
| <a href="https://www.sipfront.com/" class="favicon">Sipfront</a> | Analytics | — | — | — | [Tweet, October 2021](https://twitter.com/andreasgranig/status/1446404332337913895?s=20) |
|
||||
| <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) |
|
||||
| <a href="https://www.spark.co.nz/" class="favicon">Spark New Zealand</a> | Telecommunications | Security Operations | — | — | [Blog Post, Feb 2020](https://blog.n0p.me/2020/02/2020-02-05-dnsmonster/) |
|
||||
| <a href="https://splitbee.io" class="favicon">Splitbee</a> | Analytics | Main Product | — | — | [Blog Post, Mai 2021](https://splitbee.io/blog/new-pricing) |
|
||||
@ -162,5 +163,6 @@ toc_title: Adopters
|
||||
| <a href="https://zagravagames.com/en/" class="favicon">Zagrava Trading</a> | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) |
|
||||
| <a href="https://beeline.ru/" class="favicon">Beeline</a> | Telecom | Data Platform | — | — | [Blog post, July 2021](https://habr.com/en/company/beeline/blog/567508/) |
|
||||
| <a href="https://ecommpay.com/" class="favicon">Ecommpay</a> | Payment Processing | Logs | — | — | [Video, Nov 2019](https://www.youtube.com/watch?v=d3GdZTOWGLk) |
|
||||
| <a href="https://omnicomm.ru/" class="favicon">Omnicomm</a> | Transportation Monitoring | — | — | — | [Facebook post, Oct 2021](https://www.facebook.com/OmnicommTeam/posts/2824479777774500) |
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->
|
||||
|
@ -39,9 +39,9 @@ In ClickHouse, data can reside on different shards. Each shard can be a group of
|
||||
|
||||
ClickHouse supports a [declarative query language based on SQL](../sql-reference/index.md) that is identical to the ANSI SQL standard in [many cases](../sql-reference/ansi.md).
|
||||
|
||||
Supported queries include [GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), subqueries in [FROM](../sql-reference/statements/select/from.md), [JOIN](../sql-reference/statements/select/join.md) clause, [IN](../sql-reference/operators/in.md) operator, and scalar subqueries.
|
||||
Supported queries include [GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), subqueries in [FROM](../sql-reference/statements/select/from.md), [JOIN](../sql-reference/statements/select/join.md) clause, [IN](../sql-reference/operators/in.md) operator, [window functions](../sql-reference/window-functions/index.md) and scalar subqueries.
|
||||
|
||||
Correlated (dependent) subqueries and window functions are not supported at the time of writing but might become available in the future.
|
||||
Correlated (dependent) subqueries are not supported at the time of writing but might become available in the future.
|
||||
|
||||
## Vector Computation Engine {#vector-engine}
|
||||
|
||||
|
@ -3784,3 +3784,33 @@ Result:
|
||||
│ 10 │ 20 │ 30 │
|
||||
└─────┴─────┴───────┘
|
||||
```
|
||||
|
||||
## optimize_move_to_prewhere {#optimize_move_to_prewhere}
|
||||
|
||||
Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries.
|
||||
|
||||
Works only for [*MergeTree](../../engines/table-engines/mergetree-family/index.md) tables.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Automatic `PREWHERE` optimization is disabled.
|
||||
- 1 — Automatic `PREWHERE` optimization is enabled.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
## optimize_move_to_prewhere_if_final {#optimize_move_to_prewhere_if_final}
|
||||
|
||||
Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries with [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier.
|
||||
|
||||
Works only for [*MergeTree](../../engines/table-engines/mergetree-family/index.md) tables.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Automatic `PREWHERE` optimization in `SELECT` queries with `FINAL` modifier is disabled.
|
||||
- 1 — Automatic `PREWHERE` optimization in `SELECT` queries with `FINAL` modifier is enabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [optimize_move_to_prewhere](#optimize_move_to_prewhere) setting
|
@ -10,6 +10,9 @@ Columns:
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Index type.
|
||||
- `expr` ([String](../../sql-reference/data-types/string.md)) — Expression for the index calculation.
|
||||
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of granules in the block.
|
||||
- `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The size of compressed data, in bytes.
|
||||
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The size of decompressed data, in bytes.
|
||||
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The size of marks, in bytes.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -26,6 +29,9 @@ name: clicks_idx
|
||||
type: minmax
|
||||
expr: clicks
|
||||
granularity: 1
|
||||
data_compressed_bytes: 58
|
||||
data_uncompressed_bytes: 6
|
||||
marks: 48
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
@ -35,4 +41,7 @@ name: contacts_null_idx
|
||||
type: minmax
|
||||
expr: assumeNotNull(contacts_null)
|
||||
granularity: 1
|
||||
data_compressed_bytes: 58
|
||||
data_uncompressed_bytes: 6
|
||||
marks: 48
|
||||
```
|
||||
|
@ -38,6 +38,12 @@ Columns:
|
||||
|
||||
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The size of the file with marks.
|
||||
|
||||
- `secondary_indices_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of compressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||
|
||||
- `secondary_indices_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of uncompressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||
|
||||
- `secondary_indices_marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The size of the file with marks for secondary indices.
|
||||
|
||||
- `modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) – The time the directory with the data part was modified. This usually corresponds to the time of data part creation.
|
||||
|
||||
- `remove_time` ([DateTime](../../sql-reference/data-types/datetime.md)) – The time when the data part became inactive.
|
||||
@ -119,6 +125,9 @@ rows: 6
|
||||
bytes_on_disk: 310
|
||||
data_compressed_bytes: 157
|
||||
data_uncompressed_bytes: 91
|
||||
secondary_indices_compressed_bytes: 58
|
||||
secondary_indices_uncompressed_bytes: 6
|
||||
secondary_indices_marks_bytes: 48
|
||||
marks_bytes: 144
|
||||
modification_time: 2020-06-18 13:01:49
|
||||
remove_time: 1970-01-01 00:00:00
|
||||
|
@ -1,4 +1,3 @@
|
||||
---
|
||||
toc_priority: 58
|
||||
toc_title: Usage Recommendations
|
||||
---
|
||||
@ -60,7 +59,7 @@ $ echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size
|
||||
|
||||
Calculate the exact number from the number of devices and the block size, using the formula: `2 * num_devices * chunk_size_in_bytes / 4096`.
|
||||
|
||||
A block size of 1024 KB is sufficient for all RAID configurations.
|
||||
A block size of 64 KB is sufficient for most RAID configurations. The average clickhouse-server write size is approximately 1 MB (1024 KB), and thus the recommended stripe size is also 1 MB. The block size can be optimized if needed when set to 1 MB divided by the number of non-parity disks in the RAID array, such that each write is parallelized across all available non-parity disks.
|
||||
Never set the block size too small or too large.
|
||||
|
||||
You can use RAID-0 on SSD.
|
||||
|
@ -26,6 +26,7 @@ SELECT
|
||||
## timeZone {#timezone}
|
||||
|
||||
Returns the timezone of the server.
|
||||
If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -40,7 +40,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3IsValid(630814730351855103) as h3IsValid;
|
||||
SELECT h3IsValid(630814730351855103) AS h3IsValid;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -77,7 +77,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3GetResolution(639821929606596015) as resolution;
|
||||
SELECT h3GetResolution(639821929606596015) AS resolution;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -111,7 +111,7 @@ h3EdgeAngle(resolution)
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3EdgeAngle(10) as edgeAngle;
|
||||
SELECT h3EdgeAngle(10) AS edgeAngle;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -145,7 +145,7 @@ h3EdgeLengthM(resolution)
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3EdgeLengthM(15) as edgeLengthM;
|
||||
SELECT h3EdgeLengthM(15) AS edgeLengthM;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -184,7 +184,7 @@ Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index;
|
||||
SELECT geoToH3(37.79506683, 55.71290588, 15) AS h3Index;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -333,7 +333,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3GetBaseCell(612916788725809151) as basecell;
|
||||
SELECT h3GetBaseCell(612916788725809151) AS basecell;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -369,7 +369,7 @@ Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3HexAreaM2(13) as area;
|
||||
SELECT h3HexAreaM2(13) AS area;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -481,7 +481,7 @@ Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3ToParent(599405990164561919, 3) as parent;
|
||||
SELECT h3ToParent(599405990164561919, 3) AS parent;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -515,7 +515,7 @@ Type: [String](../../../sql-reference/data-types/string.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3ToString(617420388352917503) as h3_string;
|
||||
SELECT h3ToString(617420388352917503) AS h3_string;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -549,7 +549,7 @@ stringToH3(index_str)
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT stringToH3('89184926cc3ffff') as index;
|
||||
SELECT stringToH3('89184926cc3ffff') AS index;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -583,7 +583,7 @@ h3GetResolution(index)
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3GetResolution(617420388352917503) as res;
|
||||
SELECT h3GetResolution(617420388352917503) AS res;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -620,7 +620,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3IsResClassIII(617420388352917503) as res;
|
||||
SELECT h3IsResClassIII(617420388352917503) AS res;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -631,7 +631,7 @@ Result:
|
||||
└─────┘
|
||||
```
|
||||
|
||||
## h3IsPentagon {#h3ispentagon }
|
||||
## h3IsPentagon {#h3ispentagon}
|
||||
|
||||
Returns whether this [H3](#h3index) index represents a pentagonal cell.
|
||||
|
||||
@ -657,7 +657,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT SELECT h3IsPentagon(644721767722457330) as pentagon;
|
||||
SELECT h3IsPentagon(644721767722457330) AS pentagon;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -693,7 +693,7 @@ Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT SELECT h3GetFaces(599686042433355775) as faces;
|
||||
SELECT h3GetFaces(599686042433355775) AS faces;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
@ -141,20 +141,49 @@ This is a relatively fast non-cryptographic hash function of average quality for
|
||||
Calculates a 64-bit hash code from any type of integer.
|
||||
It works faster than intHash32. Average quality.
|
||||
|
||||
## SHA1 {#sha1}
|
||||
## SHA1, SHA224, SHA256, SHA512 {#sha}
|
||||
|
||||
## SHA224 {#sha224}
|
||||
Calculates SHA-1, SHA-224, SHA-256, SHA-512 hash from a string and returns the resulting set of bytes as [FixedString](../data-types/fixedstring.md).
|
||||
|
||||
## SHA256 {#sha256}
|
||||
**Syntax**
|
||||
|
||||
## SHA384 {#sha384}
|
||||
``` sql
|
||||
SHA1('s')
|
||||
...
|
||||
SHA512('s')
|
||||
```
|
||||
|
||||
## SHA512 {#sha512}
|
||||
|
||||
Calculates SHA-1, SHA-224, SHA-256, SHA-384 or SHA-512 from a string and returns the resulting set of bytes as FixedString(20), FixedString(28), FixedString(32), FixedString(48) or FixedString(64).
|
||||
The function works fairly slowly (SHA-1 processes about 5 million short strings per second per processor core, while SHA-224 and SHA-256 process about 2.2 million).
|
||||
We recommend using this function only in cases when you need a specific hash function and you can’t select it.
|
||||
Even in these cases, we recommend applying the function offline and pre-calculating values when inserting them into the table, instead of applying it in SELECTS.
|
||||
Even in these cases, we recommend applying the function offline and pre-calculating values when inserting them into the table, instead of applying it in `SELECT` queries.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s` — Input string for SHA hash calculation. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- SHA hash as a hex-unencoded FixedString. SHA-1 returns as FixedString(20), SHA-224 as FixedString(28), SHA-256 — FixedString(32), SHA-512 — FixedString(64).
|
||||
|
||||
Type: [FixedString](../data-types/fixedstring.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Use the [hex](../functions/encoding-functions.md#hex) function to represent the result as a hex-encoded string.
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT hex(SHA1('abc'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─hex(SHA1('abc'))─────────────────────────┐
|
||||
│ A9993E364706816ABA3E25717850C26C9CD0D89D │
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## URLHash(url\[, N\]) {#urlhashurl-n}
|
||||
|
||||
|
@ -8,6 +8,7 @@ toc_title: Other
|
||||
## hostName() {#hostname}
|
||||
|
||||
Returns a string with the name of the host that this function was performed on. For distributed processing, this is the name of the remote server host, if the function is performed on a remote server.
|
||||
If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
|
||||
|
||||
## getMacro {#getmacro}
|
||||
|
||||
@ -691,10 +692,18 @@ Returns the largest value of a and b.
|
||||
## uptime() {#uptime}
|
||||
|
||||
Returns the server’s uptime in seconds.
|
||||
If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
|
||||
|
||||
## version() {#version}
|
||||
|
||||
Returns the version of the server as a string.
|
||||
If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
|
||||
|
||||
## buildId() {#buildid}
|
||||
|
||||
Returns the build ID generated by a compiler for the running ClickHouse server binary.
|
||||
If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
|
||||
|
||||
|
||||
## blockNumber {#blocknumber}
|
||||
|
||||
@ -2101,6 +2110,7 @@ UNSUPPORTED_METHOD
|
||||
## tcpPort {#tcpPort}
|
||||
|
||||
Returns [native interface](../../interfaces/tcp.md) TCP port number listened by this server.
|
||||
If it is executed in the context of a distributed table, then it generates a normal column, otherwise it produces a constant value.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -2354,3 +2364,66 @@ Result:
|
||||
│ 1 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
## shardNum {#shard-num}
|
||||
|
||||
Returns the index of a shard which processes a part of data for a distributed query. Indices are started from `1`.
|
||||
If a query is not distributed then constant value `0` is returned.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
shardNum()
|
||||
```
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Shard index or constant `0`.
|
||||
|
||||
Type: [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
In the following example a configuration with two shards is used. The query is executed on the [system.one](../../operations/system-tables/one.md) table on every shard.
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE shard_num_example (dummy UInt8)
|
||||
ENGINE=Distributed(test_cluster_two_shards_localhost, system, one, dummy);
|
||||
SELECT dummy, shardNum(), shardCount() FROM shard_num_example;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─dummy─┬─shardNum()─┬─shardCount()─┐
|
||||
│ 0 │ 2 │ 2 │
|
||||
│ 0 │ 1 │ 2 │
|
||||
└───────┴────────────┴──────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md)
|
||||
|
||||
## shardCount {#shard-count}
|
||||
|
||||
Returns the total number of shards for a distributed query.
|
||||
If a query is not distributed then constant value `0` is returned.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
shardCount()
|
||||
```
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Total number of shards or `0`.
|
||||
|
||||
Type: [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**See Also**
|
||||
|
||||
- [shardNum()](#shard-num) function example also contains `shardCount()` function call.
|
||||
|
@ -10,7 +10,7 @@ A set of queries that allow changing the table structure.
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ...
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|RENAME|CLEAR|COMMENT|MODIFY|MATERIALIZE COLUMN ...
|
||||
```
|
||||
|
||||
In the query, specify a list of one or more comma-separated actions.
|
||||
@ -25,6 +25,7 @@ The following actions are supported:
|
||||
- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column.
|
||||
- [MODIFY COLUMN](#alter_modify-column) — Changes column’s type, default expression and TTL.
|
||||
- [MODIFY COLUMN REMOVE](#modify-remove) — Removes one of the column properties.
|
||||
- [MATERIALIZE COLUMN](#materialize-column) — Materializes the column in the parts where the column is missing.
|
||||
|
||||
These actions are described in detail below.
|
||||
|
||||
@ -193,6 +194,39 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
||||
|
||||
- [REMOVE TTL](ttl.md).
|
||||
|
||||
## MATERIALIZE COLUMN {#materialize-column}
|
||||
|
||||
Materializes the column in the parts where the column is missing. This is useful in case of creating a new column with complicated `DEFAULT` or `MATERIALIZED` expression. Calculation of the column directly on `SELECT` query can cause bigger request execution time, so it is reasonable to use `MATERIALIZE COLUMN` for such columns. To perform same manipulation for existing column, use `FINAL` modifier.
|
||||
|
||||
Syntax:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table MATERIALIZE COLUMN col [FINAL];
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS tmp;
|
||||
SET mutations_sync = 2;
|
||||
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
|
||||
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
|
||||
SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||
```
|
||||
|
||||
**Result:**
|
||||
|
||||
```sql
|
||||
┌─groupArray(x)─────────┬─groupArray(s)─────────────────────────────┐
|
||||
│ [0,1,2,3,4,5,6,7,8,9] │ ['0','1','2','3','4','5','6','7','8','9'] │
|
||||
└───────────────────────┴───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [MATERIALIZED](../../statements/create/table.md#materialized).
|
||||
|
||||
## Limitations {#alter-query-limitations}
|
||||
|
||||
The `ALTER` query lets you create and delete separate elements (columns) in nested data structures, but not whole nested data structures. To add a nested data structure, you can add columns with a name like `name.nested_name` and the type `Array(T)`. A nested data structure is equivalent to multiple array columns with a name that has the same prefix before the dot.
|
||||
|
@ -50,14 +50,13 @@ When creating a materialized view with `TO [db].[table]`, you must not use `POPU
|
||||
A materialized view is implemented as follows: when inserting data to the table specified in `SELECT`, part of the inserted data is converted by this `SELECT` query, and the result is inserted in the view.
|
||||
|
||||
!!! important "Important"
|
||||
Materialized views in ClickHouse use **column names** instead of column order during insertion into destination table. If some column names are not present in `SELECT`'s result ClickHouse will use a default value, even if column is not `Nullable`. A safe practice would be to add aliases for every column when using Materialized views.
|
||||
Materialized views in ClickHouse use **column names** instead of column order during insertion into destination table. If some column names are not present in the `SELECT` query result, ClickHouse uses a default value, even if the column is not [Nullable](../../data-types/nullable.md). A safe practice would be to add aliases for every column when using Materialized views.
|
||||
|
||||
!!! important "Important"
|
||||
Materialized views in ClickHouse are implemented more like insert triggers. If there’s some aggregation in the view query, it’s applied only to the batch of freshly inserted data. Any changes to existing data of source table (like update, delete, drop partition, etc.) does not change the materialized view.
|
||||
|
||||
If you specify `POPULATE`, the existing table data is inserted in the view when creating it, as if making a `CREATE TABLE ... AS SELECT ...` . Otherwise, the query contains only the data inserted in the table after creating the view. We **do not recommend** using POPULATE, since data inserted in the table during the view creation will not be inserted in it.
|
||||
If you specify `POPULATE`, the existing table data is inserted into the view when creating it, as if making a `CREATE TABLE ... AS SELECT ...` . Otherwise, the query contains only the data inserted in the table after creating the view. We **do not recommend** using `POPULATE`, since data inserted in the table during the view creation will not be inserted in it.
|
||||
|
||||
A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data won’t be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`.
|
||||
A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`. Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data won’t be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`.
|
||||
|
||||
The execution of [ALTER](../../../sql-reference/statements/alter/view.md) queries on materialized views has limitations, so they might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view.
|
||||
|
||||
|
@ -6,7 +6,7 @@ toc_title: PREWHERE
|
||||
|
||||
Prewhere is an optimization to apply filtering more efficiently. It is enabled by default even if `PREWHERE` clause is not specified explicitly. It works by automatically moving part of [WHERE](../../../sql-reference/statements/select/where.md) condition to prewhere stage. The role of `PREWHERE` clause is only to control this optimization if you think that you know how to do it better than it happens by default.
|
||||
|
||||
With prewhere optimization, at first only the columns necessary for executing prewhere expression are read. Then the other columns are read that are needed for running the rest of the query, but only those blocks where the prewhere expression is “true” at least for some rows. If there are a lot of blocks where prewhere expression is “false” for all rows and prewhere needs less columns than other parts of query, this often allows to read a lot less data from disk for query execution.
|
||||
With prewhere optimization, at first only the columns necessary for executing prewhere expression are read. Then the other columns are read that are needed for running the rest of the query, but only those blocks where the prewhere expression is `true` at least for some rows. If there are a lot of blocks where prewhere expression is `false` for all rows and prewhere needs less columns than other parts of query, this often allows to read a lot less data from disk for query execution.
|
||||
|
||||
## Controlling Prewhere Manually {#controlling-prewhere-manually}
|
||||
|
||||
@ -14,11 +14,13 @@ The clause has the same meaning as the `WHERE` clause. The difference is in whic
|
||||
|
||||
A query may simultaneously specify `PREWHERE` and `WHERE`. In this case, `PREWHERE` precedes `WHERE`.
|
||||
|
||||
If the `optimize_move_to_prewhere` setting is set to 0, heuristics to automatically move parts of expressions from `WHERE` to `PREWHERE` are disabled.
|
||||
If the [optimize_move_to_prewhere](../../../operations/settings/settings.md#optimize_move_to_prewhere) setting is set to 0, heuristics to automatically move parts of expressions from `WHERE` to `PREWHERE` are disabled.
|
||||
|
||||
If query has [FINAL](from.md#select-from-final) modifier, the `PREWHERE` optimization is not always correct. It is enabled only if both settings [optimize_move_to_prewhere](../../../operations/settings/settings.md#optimize_move_to_prewhere) and [optimize_move_to_prewhere_if_final](../../../operations/settings/settings.md#optimize_move_to_prewhere_if_final) are turned on.
|
||||
|
||||
!!! note "Attention"
|
||||
The `PREWHERE` section is executed before` FINAL`, so the results of `FROM FINAL` queries may be skewed when using` PREWHERE` with fields not in the `ORDER BY` section of a table.
|
||||
The `PREWHERE` section is executed before `FINAL`, so the results of `FROM ... FINAL` queries may be skewed when using `PREWHERE` with fields not in the `ORDER BY` section of a table.
|
||||
|
||||
## Limitations {#limitations}
|
||||
|
||||
`PREWHERE` is only supported by tables from the `*MergeTree` family.
|
||||
`PREWHERE` is only supported by tables from the [*MergeTree](../../../engines/table-engines/mergetree-family/index.md) family.
|
||||
|
@ -668,7 +668,7 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y);
|
||||
- `policy_name_N` — название политики. Названия политик должны быть уникальны.
|
||||
- `volume_name_N` — название тома. Названия томов должны быть уникальны.
|
||||
- `disk` — диск, находящийся внутри тома.
|
||||
- `max_data_part_size_bytes` — максимальный размер куска данных, который может находится на любом из дисков этого тома.
|
||||
- `max_data_part_size_bytes` — максимальный размер куска данных, который может находится на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том.
|
||||
- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1).
|
||||
- `prefer_not_to_merge` — Отключает слияние кусков данных, хранящихся на данном томе. Если данная настройка включена, то слияние данных, хранящихся на данном томе, не допускается. Это позволяет контролировать работу ClickHouse с медленными дисками.
|
||||
|
||||
|
@ -136,3 +136,15 @@ logs - имя кластера в конфигурационном файле с
|
||||
|
||||
При выставлении опции max_parallel_replicas выполнение запроса распараллеливается по всем репликам внутри одного шарда. Подробнее смотрите раздел [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas).
|
||||
|
||||
## Виртуальные столбцы {#virtual-columns}
|
||||
|
||||
- `_shard_num` — содержит значение `shard_num` из таблицы `system.clusters`. Тип: [UInt32](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
!!! note "Примечание"
|
||||
Так как табличные функции [remote](../../../sql-reference/table-functions/remote.md) и [cluster](../../../sql-reference/table-functions/cluster.md) создают временную таблицу на движке `Distributed`, то в ней также доступен столбец `_shard_num`.
|
||||
|
||||
**См. также**
|
||||
|
||||
- общее описание [виртуальных столбцов](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
- настройка [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size)
|
||||
- функции [shardNum()](../../../sql-reference/functions/other-functions.md#shard-num) и [shardCount()](../../../sql-reference/functions/other-functions.md#shard-count)
|
||||
|
@ -12,21 +12,21 @@ toc_title: "Анонимизированные данные Яндекс.Мет
|
||||
**Скачивание и импортирование партиций hits:**
|
||||
|
||||
``` bash
|
||||
$ curl -O https://datasets.clickhouse.com/hits/partitions/hits_v1.tar
|
||||
$ tar xvf hits_v1.tar -C /var/lib/clickhouse # путь к папке с данными ClickHouse
|
||||
$ # убедитесь, что установлены корректные права доступа на файлы
|
||||
$ sudo service clickhouse-server restart
|
||||
$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1"
|
||||
curl -O https://datasets.clickhouse.com/hits/partitions/hits_v1.tar
|
||||
tar xvf hits_v1.tar -C /var/lib/clickhouse # путь к папке с данными ClickHouse
|
||||
# убедитесь, что установлены корректные права доступа на файлы
|
||||
sudo service clickhouse-server restart
|
||||
clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1"
|
||||
```
|
||||
|
||||
**Скачивание и импортирование партиций visits:**
|
||||
|
||||
``` bash
|
||||
$ curl -O https://datasets.clickhouse.com/visits/partitions/visits_v1.tar
|
||||
$ tar xvf visits_v1.tar -C /var/lib/clickhouse # путь к папке с данными ClickHouse
|
||||
$ # убедитесь, что установлены корректные права доступа на файлы
|
||||
$ sudo service clickhouse-server restart
|
||||
$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
|
||||
curl -O https://datasets.clickhouse.com/visits/partitions/visits_v1.tar
|
||||
tar xvf visits_v1.tar -C /var/lib/clickhouse # путь к папке с данными ClickHouse
|
||||
# убедитесь, что установлены корректные права доступа на файлы
|
||||
sudo service clickhouse-server restart
|
||||
clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
|
||||
```
|
||||
|
||||
## Получение таблиц из сжатых tsv-файлов {#poluchenie-tablits-iz-szhatykh-tsv-failov}
|
||||
@ -34,29 +34,32 @@ $ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
|
||||
**Скачивание и импортирование hits из сжатого tsv-файла**
|
||||
|
||||
``` bash
|
||||
$ curl https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv
|
||||
$ # теперь создадим таблицу
|
||||
$ clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets"
|
||||
$ clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192"
|
||||
$ # импортируем данные
|
||||
$ cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000
|
||||
$ # опционально можно оптимизировать таблицу
|
||||
$ clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL"
|
||||
$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1"
|
||||
curl https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv
|
||||
# создадим таблицу hits_v1
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets"
|
||||
clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192"
|
||||
# создадим таблицу hits_100m_obfuscated
|
||||
clickhouse-client --query="CREATE TABLE hits_100m_obfuscated (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, Refresh UInt8, RefererCategoryID UInt16, RefererRegionID UInt32, URLCategoryID UInt16, URLRegionID UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, OriginalURL String, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), LocalEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, RemoteIP UInt32, WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming UInt32, DNSTiming UInt32, ConnectTiming UInt32, ResponseStartTiming UInt32, ResponseEndTiming UInt32, FetchTiming UInt32, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192"
|
||||
|
||||
# импортируем данные
|
||||
cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000
|
||||
# опционально можно оптимизировать таблицу
|
||||
clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL"
|
||||
clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1"
|
||||
```
|
||||
|
||||
**Скачивание и импортирование visits из сжатого tsv-файла**
|
||||
|
||||
``` bash
|
||||
$ curl https://datasets.clickhouse.com/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv
|
||||
$ # теперь создадим таблицу
|
||||
$ clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets"
|
||||
$ clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192"
|
||||
$ # импортируем данные
|
||||
$ cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000
|
||||
$ # опционально можно оптимизировать таблицу
|
||||
$ clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL"
|
||||
$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
|
||||
curl https://datasets.clickhouse.com/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv
|
||||
# теперь создадим таблицу
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets"
|
||||
clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192"
|
||||
# импортируем данные
|
||||
cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000
|
||||
# опционально можно оптимизировать таблицу
|
||||
clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL"
|
||||
clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
|
||||
```
|
||||
|
||||
## Запросы {#zaprosy}
|
||||
|
@ -3573,3 +3573,33 @@ SELECT * FROM positional_arguments ORDER BY 2,3;
|
||||
│ 10 │ 20 │ 30 │
|
||||
└─────┴─────┴───────┘
|
||||
```
|
||||
|
||||
## optimize_move_to_prewhere {#optimize_move_to_prewhere}
|
||||
|
||||
Включает или отключает автоматическую оптимизацию [PREWHERE](../../sql-reference/statements/select/prewhere.md) в запросах [SELECT](../../sql-reference/statements/select/index.md).
|
||||
|
||||
Работает только с таблицами семейства [*MergeTree](../../engines/table-engines/mergetree-family/index.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — автоматическая оптимизация `PREWHERE` отключена.
|
||||
- 1 — автоматическая оптимизация `PREWHERE` включена.
|
||||
|
||||
Значение по умолчанию: `1`.
|
||||
|
||||
## optimize_move_to_prewhere_if_final {#optimize_move_to_prewhere_if_final}
|
||||
|
||||
Включает или отключает автоматическую оптимизацию [PREWHERE](../../sql-reference/statements/select/prewhere.md) в запросах [SELECT](../../sql-reference/statements/select/index.md) с модификатором [FINAL](../../sql-reference/statements/select/from.md#select-from-final).
|
||||
|
||||
Работает только с таблицами семейства [*MergeTree](../../engines/table-engines/mergetree-family/index.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — автоматическая оптимизация `PREWHERE` в запросах `SELECT` с модификатором `FINAL` отключена.
|
||||
- 1 — автоматическая оптимизация `PREWHERE` в запросах `SELECT` с модификатором `FINAL` включена.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
**См. также**
|
||||
|
||||
- настройка [optimize_move_to_prewhere](#optimize_move_to_prewhere)
|
@ -26,6 +26,7 @@ SELECT
|
||||
## timeZone {#timezone}
|
||||
|
||||
Возвращает часовой пояс сервера.
|
||||
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
|
@ -6,7 +6,7 @@ toc_title: "Функции для работы с индексами H3"
|
||||
|
||||
[H3](https://eng.uber.com/h3/) — это система геокодирования, которая делит поверхность Земли на равные шестигранные ячейки. Система поддерживает иерархию (вложенность) ячеек, т.е. каждый "родительский" шестигранник может быть поделен на семь одинаковых вложенных "дочерних" шестигранников, и так далее.
|
||||
|
||||
Уровень вложенности назвается `разрешением` и может принимать значение от `0` до `15`, где `0` соответствует `базовым` ячейкам самого верхнего уровня (наиболее крупным).
|
||||
Уровень вложенности называется "разрешением" и может принимать значение от `0` до `15`, где `0` соответствует "базовым" ячейкам самого верхнего уровня (наиболее крупным).
|
||||
|
||||
Для каждой точки, имеющей широту и долготу, можно получить 64-битный индекс H3, соответствующий номеру шестигранной ячейки, где эта точка находится.
|
||||
|
||||
@ -38,7 +38,7 @@ h3IsValid(h3index)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3IsValid(630814730351855103) as h3IsValid;
|
||||
SELECT h3IsValid(630814730351855103) AS h3IsValid;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -75,7 +75,7 @@ h3GetResolution(h3index)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3GetResolution(639821929606596015) as resolution;
|
||||
SELECT h3GetResolution(639821929606596015) AS resolution;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -109,7 +109,7 @@ h3EdgeAngle(resolution)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3EdgeAngle(10) as edgeAngle;
|
||||
SELECT h3EdgeAngle(10) AS edgeAngle;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -143,7 +143,7 @@ h3EdgeLengthM(resolution)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3EdgeLengthM(15) as edgeLengthM;
|
||||
SELECT h3EdgeLengthM(15) AS edgeLengthM;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -182,7 +182,7 @@ geoToH3(lon, lat, resolution)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index;
|
||||
SELECT geoToH3(37.79506683, 55.71290588, 15) AS h3Index;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -295,7 +295,7 @@ h3GetBaseCell(index)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3GetBaseCell(612916788725809151) as basecell;
|
||||
SELECT h3GetBaseCell(612916788725809151) AS basecell;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -329,7 +329,7 @@ h3HexAreaM2(resolution)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3HexAreaM2(13) as area;
|
||||
SELECT h3HexAreaM2(13) AS area;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -441,7 +441,7 @@ h3ToParent(index, resolution)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3ToParent(599405990164561919, 3) as parent;
|
||||
SELECT h3ToParent(599405990164561919, 3) AS parent;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -475,7 +475,7 @@ h3ToString(index)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3ToString(617420388352917503) as h3_string;
|
||||
SELECT h3ToString(617420388352917503) AS h3_string;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -512,7 +512,7 @@ stringToH3(index_str)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT stringToH3('89184926cc3ffff') as index;
|
||||
SELECT stringToH3('89184926cc3ffff') AS index;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -548,7 +548,7 @@ h3GetResolution(index)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3GetResolution(617420388352917503) as res;
|
||||
SELECT h3GetResolution(617420388352917503) AS res;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -559,3 +559,114 @@ SELECT h3GetResolution(617420388352917503) as res;
|
||||
└─────┘
|
||||
```
|
||||
|
||||
## h3IsResClassIII {#h3isresclassIII}
|
||||
|
||||
Проверяет, имеет ли индекс [H3](#h3index) разрешение с ориентацией Class III.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
h3IsResClassIII(index)
|
||||
```
|
||||
|
||||
**Параметр**
|
||||
|
||||
- `index` — порядковый номер шестигранника. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
- `1` — индекс имеет разрешение с ориентацией Class III.
|
||||
- `0` — индекс не имеет разрешения с ориентацией Class III.
|
||||
|
||||
Тип: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3IsResClassIII(617420388352917503) AS res;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─res─┐
|
||||
│ 1 │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
## h3IsPentagon {#h3ispentagon}
|
||||
|
||||
Проверяет, является ли указанный индекс [H3](#h3index) пятиугольной ячейкой.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
h3IsPentagon(index)
|
||||
```
|
||||
|
||||
**Параметр**
|
||||
|
||||
- `index` — порядковый номер шестигранника. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
- `1` — индекс представляет собой пятиугольную ячейку.
|
||||
- `0` — индекс не является пятиугольной ячейкой.
|
||||
|
||||
Тип: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3IsPentagon(644721767722457330) AS pentagon;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─pentagon─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## h3GetFaces {#h3getfaces}
|
||||
|
||||
Возвращает все грани многоугольника (икосаэдра), пересекаемые заданным [H3](#h3index) индексом.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
h3GetFaces(index)
|
||||
```
|
||||
|
||||
**Параметр**
|
||||
|
||||
- `index` — индекс шестиугольной ячейки. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Массив, содержащий грани многоугольника (икосаэдра), пересекаемые заданным H3 индексом.
|
||||
|
||||
Тип: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-reference/data-types/int-uint.md)).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3GetFaces(599686042433355775) AS faces;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─faces─┐
|
||||
│ [7] │
|
||||
└───────┘
|
||||
```
|
||||
|
||||
[Оригинальная статья](https://clickhouse.com/docs/ru/sql-reference/functions/geo/h3) <!--hide-->
|
||||
|
@ -141,20 +141,49 @@ SELECT groupBitXor(cityHash64(*)) FROM table
|
||||
Вычисляет 64-битный хэш-код от целого числа любого типа.
|
||||
Работает быстрее, чем intHash32. Качество среднее.
|
||||
|
||||
## SHA1 {#sha1}
|
||||
## SHA1, SHA224, SHA256, SHA512 {#sha}
|
||||
|
||||
## SHA224 {#sha224}
|
||||
Вычисляет SHA-1, SHA-224, SHA-256, SHA-512 хеш строки и возвращает полученный набор байт в виде [FixedString](../data-types/fixedstring.md).
|
||||
|
||||
## SHA256 {#sha256}
|
||||
**Синтаксис**
|
||||
|
||||
## SHA384 {#sha384}
|
||||
``` sql
|
||||
SHA1('s')
|
||||
...
|
||||
SHA512('s')
|
||||
```
|
||||
|
||||
## SHA512 {#sha512}
|
||||
Функция работает достаточно медленно (SHA-1 — примерно 5 миллионов коротких строк в секунду на одном процессорном ядре, SHA-224 и SHA-256 — примерно 2.2 миллионов).
|
||||
Рекомендуется использовать эти функции лишь в тех случаях, когда вам нужна конкретная хеш-функция и вы не можете её выбрать.
|
||||
Даже в этих случаях рекомендуется применять функцию офлайн — заранее вычисляя значения при вставке в таблицу, вместо того чтобы применять её при выполнении `SELECT`.
|
||||
|
||||
Вычисляет SHA-1, SHA-224, SHA-256 от строки и возвращает полученный набор байт в виде FixedString(20), FixedString(28), FixedString(32), FixedLength(48) или FixedString(64).
|
||||
Функция работает достаточно медленно (SHA-1 - примерно 5 миллионов коротких строк в секунду на одном процессорном ядре, SHA-224 и SHA-256 - примерно 2.2 миллионов).
|
||||
Рекомендуется использовать эти функции лишь в тех случаях, когда вам нужна конкретная хэш-функция и вы не можете её выбрать.
|
||||
Даже в этих случаях, рекомендуется применять функцию оффлайн - заранее вычисляя значения при вставке в таблицу, вместо того, чтобы применять её при SELECT-ах.
|
||||
**Параметры**
|
||||
|
||||
- `s` — входная строка для вычисления хеша SHA. [String](../data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Хеш SHA в виде шестнадцатеричной некодированной строки FixedString. SHA-1 хеш как FixedString(20), SHA-224 как FixedString(28), SHA-256 — FixedString(32), SHA-512 — FixedString(64).
|
||||
|
||||
Тип: [FixedString](../data-types/fixedstring.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Используйте функцию [hex](../functions/encoding-functions.md#hex) для представления результата в виде строки с шестнадцатеричной кодировкой.
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT hex(SHA1('abc'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─hex(SHA1('abc'))─────────────────────────┐
|
||||
│ A9993E364706816ABA3E25717850C26C9CD0D89D │
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## URLHash(url\[, N\]) {#urlhashurl-n}
|
||||
|
||||
|
@ -8,6 +8,7 @@ toc_title: "Прочие функции"
|
||||
## hostName() {#hostname}
|
||||
|
||||
Возвращает строку - имя хоста, на котором эта функция была выполнена. При распределённой обработке запроса, это будет имя хоста удалённого сервера, если функция выполняется на удалённом сервере.
|
||||
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
|
||||
|
||||
## getMacro {#getmacro}
|
||||
|
||||
@ -643,10 +644,17 @@ SELECT
|
||||
## uptime() {#uptime}
|
||||
|
||||
Возвращает аптайм сервера в секундах.
|
||||
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
|
||||
|
||||
## version() {#version}
|
||||
|
||||
Возвращает версию сервера в виде строки.
|
||||
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
|
||||
|
||||
## buildId() {#buildid}
|
||||
|
||||
Возвращает ID сборки, сгенерированный компилятором для данного сервера ClickHouse.
|
||||
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
|
||||
|
||||
## rowNumberInBlock {#function-rownumberinblock}
|
||||
|
||||
@ -2304,3 +2312,66 @@ SELECT count(DISTINCT t) FROM (SELECT initialQueryID() AS t FROM remote('127.0.0
|
||||
│ 1 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
## shardNum {#shard-num}
|
||||
|
||||
Возвращает индекс шарда, который обрабатывает часть данных распределенного запроса. Индексы начинаются с `1`.
|
||||
Если запрос не распределенный, то возвращается значение `0`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
shardNum()
|
||||
```
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- индекс шарда или константа `0`.
|
||||
|
||||
Тип: [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
В примере ниже используется конфигурация с двумя шардами. На каждом шарде выполняется запрос к таблице [system.one](../../operations/system-tables/one.md).
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE shard_num_example (dummy UInt8)
|
||||
ENGINE=Distributed(test_cluster_two_shards_localhost, system, one, dummy);
|
||||
SELECT dummy, shardNum(), shardCount() FROM shard_num_example;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dummy─┬─shardNum()─┬─shardCount()─┐
|
||||
│ 0 │ 2 │ 2 │
|
||||
│ 0 │ 1 │ 2 │
|
||||
└───────┴────────────┴──────────────┘
|
||||
```
|
||||
|
||||
**См. также**
|
||||
|
||||
- Табличный движок [Distributed](../../engines/table-engines/special/distributed.md)
|
||||
|
||||
## shardCount {#shard-count}
|
||||
|
||||
Возвращает общее количество шардов для распределенного запроса.
|
||||
Если запрос не распределенный, то возвращается значение `0`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
shardCount()
|
||||
```
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Общее количество шардов или `0`.
|
||||
|
||||
Тип: [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**См. также**
|
||||
|
||||
- Пример использования функции [shardNum()](#shard-num) также содержит вызов `shardCount()`.
|
||||
|
@ -10,7 +10,7 @@ toc_title: "Манипуляции со столбцами"
|
||||
Синтаксис:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ...
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|RENAME|CLEAR|COMMENT|MODIFY|MATERIALIZE COLUMN ...
|
||||
```
|
||||
|
||||
В запросе можно указать сразу несколько действий над одной таблицей через запятую.
|
||||
@ -20,11 +20,12 @@ ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN
|
||||
|
||||
- [ADD COLUMN](#alter_add-column) — добавляет столбец в таблицу;
|
||||
- [DROP COLUMN](#alter_drop-column) — удаляет столбец;
|
||||
- [RENAME COLUMN](#alter_rename-column) — переименовывает существующий столбец.
|
||||
- [RENAME COLUMN](#alter_rename-column) — переименовывает существующий столбец;
|
||||
- [CLEAR COLUMN](#alter_clear-column) — сбрасывает все значения в столбце для заданной партиции;
|
||||
- [COMMENT COLUMN](#alter_comment-column) — добавляет комментарий к столбцу;
|
||||
- [MODIFY COLUMN](#alter_modify-column) — изменяет тип столбца, выражение для значения по умолчанию и TTL.
|
||||
- [MODIFY COLUMN REMOVE](#modify-remove) — удаляет какое-либо из свойств столбца.
|
||||
- [MODIFY COLUMN](#alter_modify-column) — изменяет тип столбца, выражение для значения по умолчанию и TTL;
|
||||
- [MODIFY COLUMN REMOVE](#modify-remove) — удаляет какое-либо из свойств столбца;
|
||||
- [MATERIALIZE COLUMN](#materialize-column) — делает столбец материализованным (`MATERIALIZED`) в кусках, в которых отсутствуют значения.
|
||||
|
||||
Подробное описание для каждого действия приведено ниже.
|
||||
|
||||
@ -193,6 +194,35 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
||||
|
||||
- [REMOVE TTL](ttl.md).
|
||||
|
||||
## MATERIALIZE COLUMN {#materialize-column}
|
||||
|
||||
Материализует столбец таблицы в кусках, в которых отсутствуют значения. Используется, если необходимо создать новый столбец со сложным материализованным выражением или выражением для заполнения по умолчанию (`DEFAULT`), потому как вычисление такого столбца прямо во время выполнения запроса `SELECT` оказывается ощутимо затратным. Чтобы совершить ту же операцию для существующего столбца, используйте модификатор `FINAL`.
|
||||
|
||||
Синтаксис:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table MATERIALIZE COLUMN col [FINAL];
|
||||
```
|
||||
|
||||
**Пример**
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS tmp;
|
||||
SET mutations_sync = 2;
|
||||
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
|
||||
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
|
||||
SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||
```
|
||||
|
||||
**Результат:**
|
||||
|
||||
```sql
|
||||
┌─groupArray(x)─────────┬─groupArray(s)─────────────────────────────┐
|
||||
│ [0,1,2,3,4,5,6,7,8,9] │ ['0','1','2','3','4','5','6','7','8','9'] │
|
||||
└───────────────────────┴───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Ограничения запроса ALTER {#ogranicheniia-zaprosa-alter}
|
||||
|
||||
Запрос `ALTER` позволяет создавать и удалять отдельные элементы (столбцы) вложенных структур данных, но не вложенные структуры данных целиком. Для добавления вложенной структуры данных, вы можете добавить столбцы с именем вида `name.nested_name` и типом `Array(T)` - вложенная структура данных полностью эквивалентна нескольким столбцам-массивам с именем, имеющим одинаковый префикс до точки.
|
||||
|
@ -48,9 +48,12 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]na
|
||||
Материализованное представление устроено следующим образом: при вставке данных в таблицу, указанную в SELECT-е, кусок вставляемых данных преобразуется этим запросом SELECT, и полученный результат вставляется в представление.
|
||||
|
||||
!!! important "Важно"
|
||||
|
||||
Материализованные представления в ClickHouse используют **имена столбцов** вместо порядка следования столбцов при вставке в целевую таблицу. Если в результатах запроса `SELECT` некоторые имена столбцов отсутствуют, то ClickHouse использует значение по умолчанию, даже если столбец не является [Nullable](../../data-types/nullable.md). Безопасной практикой при использовании материализованных представлений считается добавление псевдонимов для каждого столбца.
|
||||
|
||||
Материализованные представления в ClickHouse больше похожи на `after insert` триггеры. Если в запросе материализованного представления есть агрегирование, оно применяется только к вставляемому блоку записей. Любые изменения существующих данных исходной таблицы (например обновление, удаление, удаление раздела и т.д.) не изменяют материализованное представление.
|
||||
|
||||
Если указано `POPULATE`, то при создании представления, в него будут вставлены имеющиеся данные таблицы, как если бы был сделан запрос `CREATE TABLE ... AS SELECT ...` . Иначе, представление будет содержать только данные, вставляемые в таблицу после создания представления. Не рекомендуется использовать POPULATE, так как вставляемые в таблицу данные во время создания представления, не попадут в него.
|
||||
Если указано `POPULATE`, то при создании представления в него будут добавлены данные, уже содержащиеся в исходной таблице, как если бы был сделан запрос `CREATE TABLE ... AS SELECT ...` . Если `POPULATE` не указано, представление будет содержать только данные, добавленные в таблицу после создания представления. Использовать `POPULATE` не рекомендуется, так как в представление не попадут данные, добавляемые в таблицу во время создания представления.
|
||||
|
||||
Запрос `SELECT` может содержать `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Следует иметь ввиду, что соответствующие преобразования будут выполняться независимо, на каждый блок вставляемых данных. Например, при наличии `GROUP BY`, данные будут агрегироваться при вставке, но только в рамках одной пачки вставляемых данных. Далее, данные не будут доагрегированы. Исключение - использование ENGINE, производящего агрегацию данных самостоятельно, например, `SummingMergeTree`.
|
||||
|
||||
|
@ -8,17 +8,19 @@ Prewhere — это оптимизация для более эффективн
|
||||
|
||||
При оптимизации prewhere сначала читываются только те столбцы, которые необходимы для выполнения выражения prewhere. Затем читаются другие столбцы, необходимые для выполнения остальной части запроса, но только те блоки, в которых находится выражение prewhere «верно» по крайней мере для некоторых рядов. Если есть много блоков, где выражение prewhere «ложно» для всех строк и для выражения prewhere требуется меньше столбцов, чем для других частей запроса, это часто позволяет считывать гораздо меньше данных с диска для выполнения запроса.
|
||||
|
||||
## Управление prewhere вручную {#controlling-prewhere-manually}
|
||||
## Управление PREWHERE вручную {#controlling-prewhere-manually}
|
||||
|
||||
`PREWHERE` имеет смысл использовать, если есть условия фильтрации, которые использует меньшинство столбцов из тех, что есть в запросе, но достаточно сильно фильтрует данные. Таким образом, сокращается количество читаемых данных.
|
||||
|
||||
В запрос может быть одновременно указано и `PREWHERE` и `WHERE`. В этом случае, `PREWHERE` предшествует `WHERE`.
|
||||
В запросе может быть одновременно указаны и `PREWHERE`, и `WHERE`. В этом случае `PREWHERE` предшествует `WHERE`.
|
||||
|
||||
Если значение параметра `optimize_move_to_prewhere` равно 0, эвристика по автоматическому перемещнию части выражений из `WHERE` к `PREWHERE` отключается.
|
||||
Если значение параметра [optimize_move_to_prewhere](../../../operations/settings/settings.md#optimize_move_to_prewhere) равно 0, эвристика по автоматическому перемещению части выражений из `WHERE` к `PREWHERE` отключается.
|
||||
|
||||
Если в запросе есть модификатор [FINAL](from.md#select-from-final), оптимизация `PREWHERE` не всегда корректна. Она действует только если включены обе настройки [optimize_move_to_prewhere](../../../operations/settings/settings.md#optimize_move_to_prewhere) и [optimize_move_to_prewhere_if_final](../../../operations/settings/settings.md#optimize_move_to_prewhere_if_final).
|
||||
|
||||
!!! note "Внимание"
|
||||
Секция `PREWHERE` выполняется до `FINAL`, поэтому результаты запросов `FROM FINAL` могут исказится при использовании `PREWHERE` с полями не входящями в `ORDER BY` таблицы.
|
||||
Секция `PREWHERE` выполняется до `FINAL`, поэтому результаты запросов `FROM ... FINAL` могут исказиться при использовании `PREWHERE` с полями, не входящями в `ORDER BY` таблицы.
|
||||
|
||||
## Ограничения {#limitations}
|
||||
|
||||
`PREWHERE` поддерживается только табличными движками из семейства `*MergeTree`.
|
||||
`PREWHERE` поддерживается только табличными движками из семейства [*MergeTree](../../../engines/table-engines/mergetree-family/index.md).
|
||||
|
@ -14,6 +14,7 @@ module.exports = {
|
||||
|
||||
entry: [
|
||||
path.resolve(scssPath, 'bootstrap.scss'),
|
||||
path.resolve(scssPath, 'greenhouse.scss'),
|
||||
path.resolve(scssPath, 'main.scss'),
|
||||
path.resolve(jsPath, 'main.js'),
|
||||
],
|
||||
|
@ -156,6 +156,11 @@ def build_website(args):
|
||||
os.path.join(args.src_dir, 'utils', 'list-versions', 'version_date.tsv'),
|
||||
os.path.join(args.output_dir, 'data', 'version_date.tsv'))
|
||||
|
||||
# This file can be requested to install ClickHouse.
|
||||
shutil.copy2(
|
||||
os.path.join(args.src_dir, 'docs', '_includes', 'install', 'universal.sh'),
|
||||
os.path.join(args.output_dir, 'data', 'install.sh'))
|
||||
|
||||
for root, _, filenames in os.walk(args.output_dir):
|
||||
for filename in filenames:
|
||||
if filename == 'main.html':
|
||||
@ -226,6 +231,12 @@ def minify_file(path, css_digest, js_digest):
|
||||
|
||||
|
||||
def minify_website(args):
|
||||
# Output greenhouse css separately from main bundle to be included via the greenhouse iframe
|
||||
command = f"cat '{args.website_dir}/css/greenhouse.css' > '{args.output_dir}/css/greenhouse.css'"
|
||||
logging.info(command)
|
||||
output = subprocess.check_output(command, shell=True)
|
||||
logging.debug(output)
|
||||
|
||||
css_in = ' '.join(get_css_in(args))
|
||||
css_out = f'{args.output_dir}/css/base.css'
|
||||
if args.minify:
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
#include <IO/ConnectionTimeoutsContext.h>
|
||||
#include <IO/UseSSL.h>
|
||||
#include <DataStreams/RemoteBlockInputStream.h>
|
||||
#include <DataStreams/RemoteQueryExecutor.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Client/Connection.h>
|
||||
#include <Common/InterruptListener.h>
|
||||
@ -424,20 +424,19 @@ private:
|
||||
if (reconnect)
|
||||
connection.disconnect();
|
||||
|
||||
RemoteBlockInputStream stream(
|
||||
RemoteQueryExecutor executor(
|
||||
connection, query, {}, global_context, nullptr, Scalars(), Tables(), query_processing_stage);
|
||||
if (!query_id.empty())
|
||||
stream.setQueryId(query_id);
|
||||
executor.setQueryId(query_id);
|
||||
|
||||
Progress progress;
|
||||
stream.setProgressCallback([&progress](const Progress & value) { progress.incrementPiecewiseAtomically(value); });
|
||||
executor.setProgressCallback([&progress](const Progress & value) { progress.incrementPiecewiseAtomically(value); });
|
||||
|
||||
stream.readPrefix();
|
||||
while (Block block = stream.read());
|
||||
BlockStreamProfileInfo info;
|
||||
while (Block block = executor.read())
|
||||
info.update(block);
|
||||
|
||||
stream.readSuffix();
|
||||
|
||||
const BlockStreamProfileInfo & info = stream.getProfileInfo();
|
||||
executor.finish();
|
||||
|
||||
double seconds = watch.elapsedSeconds();
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <filesystem>
|
||||
#include <string>
|
||||
#include "Client.h"
|
||||
#include "Core/Protocol.h"
|
||||
|
||||
#include <base/argsToConfig.h>
|
||||
#include <base/find_symbols.h>
|
||||
@ -377,6 +378,9 @@ std::vector<String> Client::loadWarningMessages()
|
||||
case Protocol::Server::EndOfStream:
|
||||
return messages;
|
||||
|
||||
case Protocol::Server::ProfileEvents:
|
||||
continue;
|
||||
|
||||
default:
|
||||
throw Exception(ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}",
|
||||
packet.type, connection->getDescription());
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include <Processors/Executors/PullingPipelineExecutor.h>
|
||||
#include <Processors/Executors/PushingPipelineExecutor.h>
|
||||
#include <Processors/Sources/RemoteSource.h>
|
||||
#include <DataStreams/ExpressionBlockInputStream.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -87,7 +86,7 @@ decltype(auto) ClusterCopier::retry(T && func, UInt64 max_tries)
|
||||
if (try_number < max_tries)
|
||||
{
|
||||
tryLogCurrentException(log, "Will retry");
|
||||
std::this_thread::sleep_for(default_sleep_time);
|
||||
std::this_thread::sleep_for(retry_delay_ms);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -310,7 +309,7 @@ void ClusterCopier::process(const ConnectionTimeouts & timeouts)
|
||||
|
||||
/// Retry table processing
|
||||
bool table_is_done = false;
|
||||
for (UInt64 num_table_tries = 0; num_table_tries < max_table_tries; ++num_table_tries)
|
||||
for (UInt64 num_table_tries = 1; num_table_tries <= max_table_tries; ++num_table_tries)
|
||||
{
|
||||
if (tryProcessTable(timeouts, task_table))
|
||||
{
|
||||
@ -341,7 +340,7 @@ zkutil::EphemeralNodeHolder::Ptr ClusterCopier::createTaskWorkerNodeAndWaitIfNee
|
||||
const String & description,
|
||||
bool unprioritized)
|
||||
{
|
||||
std::chrono::milliseconds current_sleep_time = default_sleep_time;
|
||||
std::chrono::milliseconds current_sleep_time = retry_delay_ms;
|
||||
static constexpr std::chrono::milliseconds max_sleep_time(30000); // 30 sec
|
||||
|
||||
if (unprioritized)
|
||||
@ -367,7 +366,7 @@ zkutil::EphemeralNodeHolder::Ptr ClusterCopier::createTaskWorkerNodeAndWaitIfNee
|
||||
LOG_INFO(log, "Too many workers ({}, maximum {}). Postpone processing {}", stat.numChildren, task_cluster->max_workers, description);
|
||||
|
||||
if (unprioritized)
|
||||
current_sleep_time = std::min(max_sleep_time, current_sleep_time + default_sleep_time);
|
||||
current_sleep_time = std::min(max_sleep_time, current_sleep_time + retry_delay_ms);
|
||||
|
||||
std::this_thread::sleep_for(current_sleep_time);
|
||||
num_bad_version_errors = 0;
|
||||
@ -786,7 +785,7 @@ bool ClusterCopier::tryDropPartitionPiece(
|
||||
if (e.code == Coordination::Error::ZNODEEXISTS)
|
||||
{
|
||||
LOG_INFO(log, "Partition {} piece {} is cleaning now by somebody, sleep", task_partition.name, toString(current_piece_number));
|
||||
std::this_thread::sleep_for(default_sleep_time);
|
||||
std::this_thread::sleep_for(retry_delay_ms);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -799,7 +798,7 @@ bool ClusterCopier::tryDropPartitionPiece(
|
||||
if (stat.numChildren != 0)
|
||||
{
|
||||
LOG_INFO(log, "Partition {} contains {} active workers while trying to drop it. Going to sleep.", task_partition.name, stat.numChildren);
|
||||
std::this_thread::sleep_for(default_sleep_time);
|
||||
std::this_thread::sleep_for(retry_delay_ms);
|
||||
return false;
|
||||
}
|
||||
else
|
||||
@ -1006,7 +1005,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
|
||||
task_status = TaskStatus::Error;
|
||||
bool was_error = false;
|
||||
has_shard_to_process = true;
|
||||
for (UInt64 try_num = 0; try_num < max_shard_partition_tries; ++try_num)
|
||||
for (UInt64 try_num = 1; try_num <= max_shard_partition_tries; ++try_num)
|
||||
{
|
||||
task_status = tryProcessPartitionTask(timeouts, partition, is_unprioritized_task);
|
||||
|
||||
@ -1021,7 +1020,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
|
||||
break;
|
||||
|
||||
/// Repeat on errors
|
||||
std::this_thread::sleep_for(default_sleep_time);
|
||||
std::this_thread::sleep_for(retry_delay_ms);
|
||||
}
|
||||
|
||||
if (task_status == TaskStatus::Error)
|
||||
@ -1069,7 +1068,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
|
||||
break;
|
||||
|
||||
/// Repeat on errors.
|
||||
std::this_thread::sleep_for(default_sleep_time);
|
||||
std::this_thread::sleep_for(retry_delay_ms);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -1110,7 +1109,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
|
||||
|
||||
if (!table_is_done)
|
||||
{
|
||||
LOG_INFO(log, "Table {} is not processed yet.Copied {} of {}, will retry", task_table.table_id, finished_partitions, required_partitions);
|
||||
LOG_INFO(log, "Table {} is not processed yet. Copied {} of {}, will retry", task_table.table_id, finished_partitions, required_partitions);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1213,7 +1212,7 @@ TaskStatus ClusterCopier::iterateThroughAllPiecesInPartition(const ConnectionTim
|
||||
break;
|
||||
|
||||
/// Repeat on errors
|
||||
std::this_thread::sleep_for(default_sleep_time);
|
||||
std::this_thread::sleep_for(retry_delay_ms);
|
||||
}
|
||||
|
||||
was_active_pieces = (res == TaskStatus::Active);
|
||||
|
@ -65,6 +65,23 @@ public:
|
||||
experimental_use_sample_offset = value;
|
||||
}
|
||||
|
||||
void setMaxTableTries(UInt64 tries)
|
||||
{
|
||||
max_table_tries = tries;
|
||||
}
|
||||
void setMaxShardPartitionTries(UInt64 tries)
|
||||
{
|
||||
max_shard_partition_tries = tries;
|
||||
}
|
||||
void setMaxShardPartitionPieceTriesForAlter(UInt64 tries)
|
||||
{
|
||||
max_shard_partition_piece_tries_for_alter = tries;
|
||||
}
|
||||
void setRetryDelayMs(std::chrono::milliseconds ms)
|
||||
{
|
||||
retry_delay_ms = ms;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
String getWorkersPath() const
|
||||
@ -123,10 +140,6 @@ protected:
|
||||
bool tryDropPartitionPiece(ShardPartition & task_partition, size_t current_piece_number,
|
||||
const zkutil::ZooKeeperPtr & zookeeper, const CleanStateClock & clean_state_clock);
|
||||
|
||||
static constexpr UInt64 max_table_tries = 3;
|
||||
static constexpr UInt64 max_shard_partition_tries = 3;
|
||||
static constexpr UInt64 max_shard_partition_piece_tries_for_alter = 10;
|
||||
|
||||
bool tryProcessTable(const ConnectionTimeouts & timeouts, TaskTable & task_table);
|
||||
|
||||
TaskStatus tryCreateDestinationTable(const ConnectionTimeouts & timeouts, TaskTable & task_table);
|
||||
@ -218,6 +231,9 @@ private:
|
||||
|
||||
Poco::Logger * log;
|
||||
|
||||
std::chrono::milliseconds default_sleep_time{1000};
|
||||
UInt64 max_table_tries = 3;
|
||||
UInt64 max_shard_partition_tries = 3;
|
||||
UInt64 max_shard_partition_piece_tries_for_alter = 10;
|
||||
std::chrono::milliseconds retry_delay_ms{1000};
|
||||
};
|
||||
}
|
||||
|
@ -31,6 +31,10 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self)
|
||||
move_fault_probability = std::max(std::min(config().getDouble("move-fault-probability"), 1.0), 0.0);
|
||||
base_dir = (config().has("base-dir")) ? config().getString("base-dir") : fs::current_path().string();
|
||||
|
||||
max_table_tries = std::max<size_t>(config().getUInt("max-table-tries", 3), 1);
|
||||
max_shard_partition_tries = std::max<size_t>(config().getUInt("max-shard-partition-tries", 3), 1);
|
||||
max_shard_partition_piece_tries_for_alter = std::max<size_t>(config().getUInt("max-shard-partition-piece-tries-for-alter", 10), 1);
|
||||
retry_delay_ms = std::chrono::milliseconds(std::max<size_t>(config().getUInt("retry-delay-ms", 1000), 100));
|
||||
|
||||
if (config().has("experimental-use-sample-offset"))
|
||||
experimental_use_sample_offset = config().getBool("experimental-use-sample-offset");
|
||||
@ -100,6 +104,15 @@ void ClusterCopierApp::defineOptions(Poco::Util::OptionSet & options)
|
||||
.argument("experimental-use-sample-offset").binding("experimental-use-sample-offset"));
|
||||
options.addOption(Poco::Util::Option("status", "", "Get for status for current execution").binding("status"));
|
||||
|
||||
options.addOption(Poco::Util::Option("max-table-tries", "", "Number of tries for the copy table task")
|
||||
.argument("max-table-tries").binding("max-table-tries"));
|
||||
options.addOption(Poco::Util::Option("max-shard-partition-tries", "", "Number of tries for the copy one partition task")
|
||||
.argument("max-shard-partition-tries").binding("max-shard-partition-tries"));
|
||||
options.addOption(Poco::Util::Option("max-shard-partition-piece-tries-for-alter", "", "Number of tries for final ALTER ATTACH to destination table")
|
||||
.argument("max-shard-partition-piece-tries-for-alter").binding("max-shard-partition-piece-tries-for-alter"));
|
||||
options.addOption(Poco::Util::Option("retry-delay-ms", "", "Delay between task retries")
|
||||
.argument("retry-delay-ms").binding("retry-delay-ms"));
|
||||
|
||||
using Me = std::decay_t<decltype(*this)>;
|
||||
options.addOption(Poco::Util::Option("help", "", "produce this help message").binding("help")
|
||||
.callback(Poco::Util::OptionCallback<Me>(this, &Me::handleHelp)));
|
||||
@ -161,7 +174,10 @@ void ClusterCopierApp::mainImpl()
|
||||
copier->setSafeMode(is_safe_mode);
|
||||
copier->setCopyFaultProbability(copy_fault_probability);
|
||||
copier->setMoveFaultProbability(move_fault_probability);
|
||||
|
||||
copier->setMaxTableTries(max_table_tries);
|
||||
copier->setMaxShardPartitionTries(max_shard_partition_tries);
|
||||
copier->setMaxShardPartitionPieceTriesForAlter(max_shard_partition_piece_tries_for_alter);
|
||||
copier->setRetryDelayMs(retry_delay_ms);
|
||||
copier->setExperimentalUseSampleOffset(experimental_use_sample_offset);
|
||||
|
||||
auto task_file = config().getString("task-file", "");
|
||||
|
@ -83,6 +83,11 @@ private:
|
||||
double move_fault_probability = 0.0;
|
||||
bool is_help = false;
|
||||
|
||||
UInt64 max_table_tries = 3;
|
||||
UInt64 max_shard_partition_tries = 3;
|
||||
UInt64 max_shard_partition_piece_tries_for_alter = 10;
|
||||
std::chrono::milliseconds retry_delay_ms{1000};
|
||||
|
||||
bool experimental_use_sample_offset{false};
|
||||
|
||||
std::string base_dir;
|
||||
|
@ -49,7 +49,7 @@
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Formats/FormatSettings.h>
|
||||
#include <DataStreams/RemoteBlockInputStream.h>
|
||||
#include <DataStreams/RemoteQueryExecutor.h>
|
||||
#include <DataStreams/SquashingBlockInputStream.h>
|
||||
#include <DataStreams/copyData.h>
|
||||
#include <DataStreams/NullBlockOutputStream.h>
|
||||
|
@ -1036,6 +1036,10 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
server.start();
|
||||
|
||||
SCOPE_EXIT({
|
||||
/// Stop reloading of the main config. This must be done before `global_context->shutdown()` because
|
||||
/// otherwise the reloading may pass a changed config to some destroyed parts of ContextSharedPart.
|
||||
main_config_reloader.reset();
|
||||
|
||||
/** Ask to cancel background jobs all table engines,
|
||||
* and also query_log.
|
||||
* It is important to do early, not in destructor of Context, because
|
||||
@ -1076,9 +1080,6 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
/// Wait server pool to avoid use-after-free of destroyed context in the handlers
|
||||
server_pool.joinAll();
|
||||
|
||||
// Uses a raw pointer to global context for getting ZooKeeper.
|
||||
main_config_reloader.reset();
|
||||
|
||||
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
||||
* At this moment, no one could own shared part of Context.
|
||||
*/
|
||||
@ -1138,18 +1139,7 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
|
||||
/// Init trace collector only after trace_log system table was created
|
||||
/// Disable it if we collect test coverage information, because it will work extremely slow.
|
||||
///
|
||||
/// It also cannot work with sanitizers.
|
||||
/// Sanitizers are using quick "frame walking" stack unwinding (this implies -fno-omit-frame-pointer)
|
||||
/// And they do unwinding frequently (on every malloc/free, thread/mutex operations, etc).
|
||||
/// They change %rbp during unwinding and it confuses libunwind if signal comes during sanitizer unwinding
|
||||
/// and query profiler decide to unwind stack with libunwind at this moment.
|
||||
///
|
||||
/// Symptoms: you'll get silent Segmentation Fault - without sanitizer message and without usual ClickHouse diagnostics.
|
||||
///
|
||||
/// Look at compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
|
||||
///
|
||||
#if USE_UNWIND && !WITH_COVERAGE && !defined(SANITIZER) && defined(__x86_64__)
|
||||
#if USE_UNWIND && !WITH_COVERAGE && defined(__x86_64__)
|
||||
/// Profilers cannot work reliably with any other libunwind or without PHDR cache.
|
||||
if (hasPHDRCache())
|
||||
{
|
||||
@ -1181,7 +1171,7 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
#endif
|
||||
|
||||
#if defined(SANITIZER)
|
||||
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they cannot work under sanitizers"
|
||||
LOG_INFO(log, "Query Profiler disabled because they cannot work under sanitizers"
|
||||
" when two different stack unwinding methods will interfere with each other.");
|
||||
#endif
|
||||
|
||||
@ -1549,6 +1539,7 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
LOG_INFO(log, "Closed all listening sockets.");
|
||||
|
||||
/// Killing remaining queries.
|
||||
if (!config().getBool("shutdown_wait_unfinished_queries", false))
|
||||
global_context->getProcessList().killAllQueries();
|
||||
|
||||
if (current_connections)
|
||||
|
@ -62,6 +62,27 @@
|
||||
-->
|
||||
</logger>
|
||||
|
||||
<!-- Add headers to response in options request. OPTIONS method is used in CORS preflight requests. -->
|
||||
<!-- It is off by default. Next headers are obligate for CORS.-->
|
||||
<!-- http_options_response>
|
||||
<header>
|
||||
<name>Access-Control-Allow-Origin</name>
|
||||
<value>*</value>
|
||||
</header>
|
||||
<header>
|
||||
<name>Access-Control-Allow-Headers</name>
|
||||
<value>origin, x-requested-with</value>
|
||||
</header>
|
||||
<header>
|
||||
<name>Access-Control-Allow-Methods</name>
|
||||
<value>POST, GET, OPTIONS</value>
|
||||
</header>
|
||||
<header>
|
||||
<name>Access-Control-Max-Age</name>
|
||||
<value>86400</value>
|
||||
</header>
|
||||
</http_options_response -->
|
||||
|
||||
<!-- It is the name that will be shown in the clickhouse-client.
|
||||
By default, anything with "production" will be highlighted in red in query prompt.
|
||||
-->
|
||||
|
@ -9,6 +9,10 @@
|
||||
#include <base/LocalDate.h>
|
||||
#include <base/LineReader.h>
|
||||
#include <base/scope_guard_safe.h>
|
||||
#include "Columns/ColumnString.h"
|
||||
#include "Columns/ColumnsNumber.h"
|
||||
#include "Core/Block.h"
|
||||
#include "Core/Protocol.h"
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config_version.h>
|
||||
@ -48,7 +52,7 @@
|
||||
#include <IO/CompressionMethod.h>
|
||||
|
||||
#include <DataStreams/NullBlockOutputStream.h>
|
||||
#include <DataStreams/InternalTextLogsRowOutputStream.h>
|
||||
#include <DataStreams/InternalTextLogs.h>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
@ -72,6 +76,12 @@ namespace ErrorCodes
|
||||
|
||||
}
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event UserTimeMicroseconds;
|
||||
extern const Event SystemTimeMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -95,6 +105,9 @@ void interruptSignalHandler(int signum)
|
||||
_exit(signum);
|
||||
}
|
||||
|
||||
ClientBase::~ClientBase() = default;
|
||||
ClientBase::ClientBase() = default;
|
||||
|
||||
void ClientBase::setupSignalHandler()
|
||||
{
|
||||
exit_on_signal.test_and_set();
|
||||
@ -393,8 +406,7 @@ void ClientBase::initLogsOutputStream()
|
||||
}
|
||||
}
|
||||
|
||||
logs_out_stream = std::make_shared<InternalTextLogsRowOutputStream>(*wb, stdout_is_a_tty);
|
||||
logs_out_stream->writePrefix();
|
||||
logs_out_stream = std::make_unique<InternalTextLogs>(*wb, stdout_is_a_tty);
|
||||
}
|
||||
}
|
||||
|
||||
@ -426,11 +438,9 @@ void ClientBase::processTextAsSingleQuery(const String & full_query)
|
||||
catch (Exception & e)
|
||||
{
|
||||
if (!is_interactive)
|
||||
{
|
||||
e.addMessage("(in query: {})", full_query);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
if (have_error)
|
||||
processError(full_query);
|
||||
@ -611,6 +621,10 @@ bool ClientBase::receiveAndProcessPacket(ASTPtr parsed_query, bool cancelled)
|
||||
onEndOfStream();
|
||||
return false;
|
||||
|
||||
case Protocol::Server::ProfileEvents:
|
||||
onProfileEvents(packet.block);
|
||||
return true;
|
||||
|
||||
default:
|
||||
throw Exception(
|
||||
ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}", packet.type, connection->getDescription());
|
||||
@ -641,9 +655,6 @@ void ClientBase::onEndOfStream()
|
||||
if (block_out_stream)
|
||||
block_out_stream->writeSuffix();
|
||||
|
||||
if (logs_out_stream)
|
||||
logs_out_stream->writeSuffix();
|
||||
|
||||
resetOutput();
|
||||
|
||||
if (is_interactive && !written_first_block)
|
||||
@ -654,6 +665,45 @@ void ClientBase::onEndOfStream()
|
||||
}
|
||||
|
||||
|
||||
void ClientBase::onProfileEvents(Block & block)
|
||||
{
|
||||
const auto rows = block.rows();
|
||||
if (rows == 0)
|
||||
return;
|
||||
const auto & array_thread_id = typeid_cast<const ColumnUInt64 &>(*block.getByName("thread_id").column).getData();
|
||||
const auto & names = typeid_cast<const ColumnString &>(*block.getByName("name").column);
|
||||
const auto & host_names = typeid_cast<const ColumnString &>(*block.getByName("host_name").column);
|
||||
const auto & array_values = typeid_cast<const ColumnUInt64 &>(*block.getByName("value").column).getData();
|
||||
|
||||
const auto * user_time_name = ProfileEvents::getName(ProfileEvents::UserTimeMicroseconds);
|
||||
const auto * system_time_name = ProfileEvents::getName(ProfileEvents::SystemTimeMicroseconds);
|
||||
|
||||
HostToThreadTimesMap thread_times;
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
{
|
||||
auto thread_id = array_thread_id[i];
|
||||
auto host_name = host_names.getDataAt(i).toString();
|
||||
if (thread_id != 0)
|
||||
progress_indication.addThreadIdToList(host_name, thread_id);
|
||||
auto event_name = names.getDataAt(i);
|
||||
auto value = array_values[i];
|
||||
if (event_name == user_time_name)
|
||||
{
|
||||
thread_times[host_name][thread_id].user_ms = value;
|
||||
}
|
||||
else if (event_name == system_time_name)
|
||||
{
|
||||
thread_times[host_name][thread_id].system_ms = value;
|
||||
}
|
||||
else if (event_name == MemoryTracker::USAGE_EVENT_NAME)
|
||||
{
|
||||
thread_times[host_name][thread_id].memory_usage = value;
|
||||
}
|
||||
}
|
||||
progress_indication.updateThreadEventData(thread_times);
|
||||
}
|
||||
|
||||
|
||||
/// Flush all buffers.
|
||||
void ClientBase::resetOutput()
|
||||
{
|
||||
|
@ -32,12 +32,17 @@ enum MultiQueryProcessingStage
|
||||
|
||||
void interruptSignalHandler(int signum);
|
||||
|
||||
class InternalTextLogs;
|
||||
|
||||
class ClientBase : public Poco::Util::Application
|
||||
{
|
||||
|
||||
public:
|
||||
using Arguments = std::vector<String>;
|
||||
|
||||
ClientBase();
|
||||
~ClientBase() override;
|
||||
|
||||
void init(int argc, char ** argv);
|
||||
|
||||
protected:
|
||||
@ -109,6 +114,7 @@ private:
|
||||
void onReceiveExceptionFromServer(std::unique_ptr<Exception> && e);
|
||||
void onProfileInfo(const BlockStreamProfileInfo & profile_info);
|
||||
void onEndOfStream();
|
||||
void onProfileEvents(Block & block);
|
||||
|
||||
void sendData(Block & sample, const ColumnsDescription & columns_description, ASTPtr parsed_query);
|
||||
void sendDataFrom(ReadBuffer & buf, Block & sample,
|
||||
@ -177,7 +183,7 @@ protected:
|
||||
/// The user could specify special file for server logs (stderr by default)
|
||||
std::unique_ptr<WriteBuffer> out_logs_buf;
|
||||
String server_logs_file;
|
||||
BlockOutputStreamPtr logs_out_stream;
|
||||
std::unique_ptr<InternalTextLogs> logs_out_stream;
|
||||
|
||||
String home_path;
|
||||
String history_file; /// Path to a file containing command history.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user