Merge branch 'master' into removing-data-streams-folder

This commit is contained in:
Nikolai Kochetov 2021-10-16 09:46:05 +03:00
commit 067eaadadd
407 changed files with 8190 additions and 21046 deletions

4
.gitmodules vendored
View File

@ -213,6 +213,7 @@
[submodule "contrib/boringssl"]
path = contrib/boringssl
url = https://github.com/ClickHouse-Extras/boringssl.git
branch = MergeWithUpstream
[submodule "contrib/NuRaft"]
path = contrib/NuRaft
url = https://github.com/ClickHouse-Extras/NuRaft.git
@ -249,3 +250,6 @@
[submodule "contrib/magic_enum"]
path = contrib/magic_enum
url = https://github.com/Neargye/magic_enum
[submodule "contrib/sysroot"]
path = contrib/sysroot
url = https://github.com/ClickHouse-Extras/sysroot.git

View File

@ -1,4 +1,4 @@
### ClickHouse release v21.10, 2021-10-14
### ClickHouse release v21.10, 2021-10-16
#### Backward Incompatible Change

View File

@ -1,14 +1,14 @@
[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/ClickHouse/raw/master/website/images/logo-400x240.png)](https://clickhouse.com)
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real time.
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
## Useful Links
* [Official website](https://clickhouse.com/) has quick high-level overview of ClickHouse on main page.
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster.
* [Official website](https://clickhouse.com/) has a quick high-level overview of ClickHouse on the main page.
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster.
* [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information.
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
* [Blog](https://clickhouse.com/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser](https://clickhouse.com/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
* [Contacts](https://clickhouse.com/company/#contact) can help to get your questions answered if there are any.

View File

@ -13,6 +13,7 @@
#include <Common/StackTrace.h>
#include <Common/getNumberOfPhysicalCPUCores.h>
#include <Core/ServerUUID.h>
#include <Common/hex.h>
#if !defined(ARCADIA_BUILD)
# include "Common/config_version.h"
@ -64,41 +65,6 @@ void setExtras()
sentry_set_extra("disk_free_space", sentry_value_new_string(formatReadableSizeWithBinarySuffix(fs::space(server_data_path).free).c_str()));
}
void sentry_logger(sentry_level_e level, const char * message, va_list args, void *)
{
auto * logger = &Poco::Logger::get("SentryWriter");
size_t size = 1024;
char buffer[size];
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wformat-nonliteral"
#endif
if (vsnprintf(buffer, size, message, args) >= 0)
{
#ifdef __clang__
#pragma clang diagnostic pop
#endif
switch (level)
{
case SENTRY_LEVEL_DEBUG:
logger->debug(buffer);
break;
case SENTRY_LEVEL_INFO:
logger->information(buffer);
break;
case SENTRY_LEVEL_WARNING:
logger->warning(buffer);
break;
case SENTRY_LEVEL_ERROR:
logger->error(buffer);
break;
case SENTRY_LEVEL_FATAL:
logger->fatal(buffer);
break;
}
}
}
}
@ -107,13 +73,13 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
bool enabled = false;
bool debug = config.getBool("send_crash_reports.debug", false);
auto * logger = &Poco::Logger::get("SentryWriter");
if (config.getBool("send_crash_reports.enabled", false))
{
if (debug || (strlen(VERSION_OFFICIAL) > 0)) //-V560
{
enabled = true;
}
}
if (enabled)
{
server_data_path = config.getString("path", "");
@ -126,7 +92,6 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
sentry_options_t * options = sentry_options_new(); /// will be freed by sentry_init or sentry_shutdown
sentry_options_set_release(options, VERSION_STRING_SHORT);
sentry_options_set_logger(options, &sentry_logger, nullptr);
if (debug)
{
sentry_options_set_debug(options, 1);
@ -199,34 +164,34 @@ void SentryWriter::onFault(int sig, const std::string & error_message, const Sta
if (stack_size > 0)
{
ssize_t offset = stack_trace.getOffset();
char instruction_addr[100];
char instruction_addr[19]
{
'0', 'x',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
'\0'
};
StackTrace::Frames frames;
StackTrace::symbolize(stack_trace.getFramePointers(), offset, stack_size, frames);
for (ssize_t i = stack_size - 1; i >= offset; --i)
{
const StackTrace::Frame & current_frame = frames[i];
sentry_value_t sentry_frame = sentry_value_new_object();
UInt64 frame_ptr = reinterpret_cast<UInt64>(current_frame.virtual_addr);
if (std::snprintf(instruction_addr, sizeof(instruction_addr), "0x%" PRIx64, frame_ptr) >= 0)
{
sentry_value_set_by_key(sentry_frame, "instruction_addr", sentry_value_new_string(instruction_addr));
}
writeHexUIntLowercase(frame_ptr, instruction_addr + 2);
sentry_value_set_by_key(sentry_frame, "instruction_addr", sentry_value_new_string(instruction_addr));
if (current_frame.symbol.has_value())
{
sentry_value_set_by_key(sentry_frame, "function", sentry_value_new_string(current_frame.symbol.value().c_str()));
}
if (current_frame.file.has_value())
{
sentry_value_set_by_key(sentry_frame, "filename", sentry_value_new_string(current_frame.file.value().c_str()));
}
if (current_frame.line.has_value())
{
sentry_value_set_by_key(sentry_frame, "lineno", sentry_value_new_int32(current_frame.line.value()));
}
sentry_value_append(sentry_frames, sentry_frame);
}

View File

@ -1,4 +1,4 @@
#include <cstddef>
#include <stddef.h>
#include <emmintrin.h>

View File

@ -100,7 +100,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
columns[i++]->insert(msg.getSource());
columns[i++]->insert(msg.getText());
logs_queue->emplace(std::move(columns));
[[maybe_unused]] bool push_result = logs_queue->emplace(std::move(columns));
}
/// Also log to system.text_log table, if message is not too noisy

View File

@ -13,7 +13,6 @@ endif ()
if ((ARCH_ARM AND NOT ARCH_AARCH64) OR ARCH_I386)
message (FATAL_ERROR "32bit platforms are not supported")
endif ()
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
set (ARCH_PPC64LE 1)
endif ()

View File

@ -1,14 +1,7 @@
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.12")
macro(add_glob cur_list)
file(GLOB __tmp RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} CONFIGURE_DEPENDS ${ARGN})
list(APPEND ${cur_list} ${__tmp})
endmacro()
else ()
macro(add_glob cur_list)
file(GLOB __tmp RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${ARGN})
list(APPEND ${cur_list} ${__tmp})
endmacro()
endif ()
macro(add_glob cur_list)
file(GLOB __tmp RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${ARGN})
list(APPEND ${cur_list} ${__tmp})
endmacro()
macro(add_headers_and_sources prefix common_path)
add_glob(${prefix}_headers ${CMAKE_CURRENT_SOURCE_DIR} ${common_path}/*.h)

View File

@ -64,6 +64,7 @@ if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "ppc64le" ) OR
( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR
( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "aarch64" ) OR
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) OR
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "aarch64" )
)

View File

@ -53,12 +53,7 @@ endif ()
if (NOT OPENSSL_FOUND AND NOT MISSING_INTERNAL_SSL_LIBRARY)
set (USE_INTERNAL_SSL_LIBRARY 1)
set (OPENSSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/boringssl")
if (ARCH_AMD64)
set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include")
elseif (ARCH_AARCH64)
set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include")
endif ()
set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include")
set (OPENSSL_CRYPTO_LIBRARY crypto)
set (OPENSSL_SSL_LIBRARY ssl)
set (OPENSSL_FOUND 1)

View File

@ -0,0 +1,22 @@
set (CMAKE_SYSTEM_NAME "FreeBSD")
set (CMAKE_SYSTEM_PROCESSOR "aarch64")
set (CMAKE_C_COMPILER_TARGET "aarch64-unknown-freebsd12")
set (CMAKE_CXX_COMPILER_TARGET "aarch64-unknown-freebsd12")
set (CMAKE_ASM_COMPILER_TARGET "aarch64-unknown-freebsd12")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/freebsd-aarch64")
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
set (CMAKE_AR "/usr/bin/ar" CACHE FILEPATH "" FORCE)
set (CMAKE_RANLIB "/usr/bin/ranlib" CACHE FILEPATH "" FORCE)
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -5,7 +5,7 @@ set (DEFAULT_LIBS "-nodefaultlibs")
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
# See https://bugs.llvm.org/show_bug.cgi?id=16404
if (COMPILER_CLANG AND NOT (CMAKE_CROSSCOMPILING AND ARCH_AARCH64))
if (COMPILER_CLANG AND NOT CMAKE_CROSSCOMPILING)
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
else ()
set (BUILTINS_LIBRARY "-lgcc")

View File

@ -1,17 +1,24 @@
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
set (CMAKE_SYSTEM_NAME "Linux")
set (CMAKE_SYSTEM_PROCESSOR "aarch64")
set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/aarch64-linux-gnu/libc")
# We don't use compiler from toolchain because it's gcc-8, and we provide support only for gcc-9.
set (CMAKE_AR "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ar" CACHE FILEPATH "" FORCE)
set (CMAKE_RANLIB "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ranlib" CACHE FILEPATH "" FORCE)
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-aarch64")
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc")
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-13" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-13" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9")
set (CMAKE_AR "${LLVM_AR_PATH}" CACHE FILEPATH "" FORCE)
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}" CACHE FILEPATH "" FORCE)
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)

View File

@ -0,0 +1,32 @@
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
set (CMAKE_SYSTEM_NAME "Linux")
set (CMAKE_SYSTEM_PROCESSOR "ppc64le")
set (CMAKE_C_COMPILER_TARGET "ppc64le-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "ppc64le-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "ppc64le-linux-gnu")
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-powerpc64le")
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc")
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-13" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-13" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9")
set (CMAKE_AR "${LLVM_AR_PATH}" CACHE FILEPATH "" FORCE)
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}" CACHE FILEPATH "" FORCE)
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -0,0 +1,32 @@
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
set (CMAKE_SYSTEM_NAME "Linux")
set (CMAKE_SYSTEM_PROCESSOR "x86_64")
set (CMAKE_C_COMPILER_TARGET "x86_64-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-gnu")
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64")
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/x86_64-linux-gnu/libc")
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-13" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-13" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9")
set (CMAKE_AR "${LLVM_AR_PATH}" CACHE FILEPATH "" FORCE)
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}" CACHE FILEPATH "" FORCE)
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -20,40 +20,30 @@ endif ()
if (CMAKE_CROSSCOMPILING)
if (OS_DARWIN)
# FIXME: broken dependencies
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
set (ENABLE_GRPC OFF CACHE INTERNAL "") # no protobuf -> no grpc
set (USE_SNAPPY OFF CACHE INTERNAL "")
set (ENABLE_PARQUET OFF CACHE INTERNAL "") # no snappy and protobuf -> no parquet
set (ENABLE_ORC OFF CACHE INTERNAL "") # no arrow (parquet) -> no orc
set (ENABLE_ICU OFF CACHE INTERNAL "")
set (ENABLE_FASTOPS OFF CACHE INTERNAL "")
elseif (OS_LINUX OR OS_ANDROID)
if (ARCH_AARCH64)
# FIXME: broken dependencies
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
set (ENABLE_GRPC OFF CACHE INTERNAL "")
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
set (ENABLE_ORC OFF CACHE INTERNAL "")
set (ENABLE_MYSQL OFF CACHE INTERNAL "")
set (USE_SENTRY OFF CACHE INTERNAL "")
elseif (ARCH_PPC64LE)
set (ENABLE_GRPC OFF CACHE INTERNAL "")
set (USE_SENTRY OFF CACHE INTERNAL "")
endif ()
elseif (OS_FREEBSD)
# FIXME: broken dependencies
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
set (ENABLE_ORC OFF CACHE INTERNAL "")
set (ENABLE_GRPC OFF CACHE INTERNAL "")
set (ENABLE_ORC OFF CACHE INTERNAL "") # no protobuf -> no parquet -> no orc
set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "")
else ()
message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!")
endif ()
# Don't know why but CXX_STANDARD doesn't work for cross-compilation
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++20")
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILE_TARGET}")
endif ()

View File

@ -1,2 +0,0 @@
wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz --strip-components=1

View File

@ -89,4 +89,3 @@ if (LINKER_NAME)
message(STATUS "Using custom linker by name: ${LINKER_NAME}")
endif ()

View File

@ -163,7 +163,7 @@ endif ()
if(USE_INTERNAL_SNAPPY_LIBRARY)
set(SNAPPY_BUILD_TESTS 0 CACHE INTERNAL "")
add_subdirectory(snappy)
add_subdirectory(snappy-cmake)
set (SNAPPY_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/snappy")
endif()
@ -215,6 +215,7 @@ function(add_llvm)
# Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
set (CMAKE_INSTALL_RPATH "ON")
set (LLVM_COMPILER_CHECKED 1 CACHE INTERNAL "")
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "")
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
@ -277,7 +278,7 @@ if (USE_FASTOPS)
endif()
if (USE_AMQPCPP OR USE_CASSANDRA)
add_subdirectory (libuv)
add_subdirectory (libuv-cmake)
endif()
if (USE_AMQPCPP)
add_subdirectory (amqpcpp-cmake)

View File

@ -54,7 +54,7 @@ target_link_libraries (${THRIFT_LIBRARY} PRIVATE boost::headers_only)
set(ORC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/orc/c++")
set(ORC_INCLUDE_DIR "${ORC_SOURCE_DIR}/include")
set(ORC_SOURCE_SRC_DIR "${ORC_SOURCE_DIR}/src")
set(ORC_SOURCE_WRAP_DIR "${ORC_SOURCE_DIR}/wrap")
# set(ORC_SOURCE_WRAP_DIR "${ORC_SOURCE_DIR}/wrap")
set(ORC_BUILD_SRC_DIR "${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/src")
set(ORC_BUILD_INCLUDE_DIR "${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/include")
@ -101,7 +101,14 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
set(CXX11_FLAGS "-std=c++0x")
endif ()
include("${ClickHouse_SOURCE_DIR}/contrib/orc/cmake_modules/CheckSourceCompiles.cmake")
set (ORC_CXX_HAS_INITIALIZER_LIST 1)
set (ORC_CXX_HAS_NOEXCEPT 1)
set (ORC_CXX_HAS_NULLPTR 1)
set (ORC_CXX_HAS_OVERRIDE 1)
set (ORC_CXX_HAS_UNIQUE_PTR 1)
set (ORC_CXX_HAS_CSTDINT 1)
set (ORC_CXX_HAS_THREAD_LOCAL 1)
include(orc_check.cmake)
configure_file("${ORC_INCLUDE_DIR}/orc/orc-config.hh.in" "${ORC_BUILD_INCLUDE_DIR}/orc/orc-config.hh")
configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/Adaptor.hh")

View File

@ -1,130 +1,14 @@
# Not changed part of contrib/orc/c++/src/CMakeLists.txt
set (HAS_PREAD 1)
set (HAS_STRPTIME 1)
set (HAS_STOLL 1)
set (INT64_IS_LL 1)
set (HAS_DIAGNOSTIC_PUSH 1)
set (HAS_STD_ISNAN 1)
set (HAS_STD_MUTEX 1)
set (NEEDS_REDUNDANT_MOVE 1)
set (HAS_PRE_1970 1)
set (HAS_POST_2038 1)
set (NEEDS_Z_PREFIX 0)
INCLUDE(CheckCXXSourceCompiles)
CHECK_CXX_SOURCE_COMPILES("
#include<fcntl.h>
#include<unistd.h>
int main(int,char*[]){
int f = open(\"/x/y\", O_RDONLY);
char buf[100];
return pread(f, buf, 100, 1000) == 0;
}"
HAS_PREAD
)
CHECK_CXX_SOURCE_COMPILES("
#include<time.h>
int main(int,char*[]){
struct tm time2020;
return !strptime(\"2020-02-02 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2020);
}"
HAS_STRPTIME
)
CHECK_CXX_SOURCE_COMPILES("
#include<string>
int main(int,char* argv[]){
return static_cast<int>(std::stoll(argv[0]));
}"
HAS_STOLL
)
CHECK_CXX_SOURCE_COMPILES("
#include<stdint.h>
#include<stdio.h>
int main(int,char*[]){
int64_t x = 1; printf(\"%lld\",x);
}"
INT64_IS_LL
)
CHECK_CXX_SOURCE_COMPILES("
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored \"-Wdeprecated\"
#pragma clang diagnostic pop
#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored \"-Wdeprecated\"
#pragma GCC diagnostic pop
#elif defined(_MSC_VER)
#pragma warning( push )
#pragma warning( disable : 4996 )
#pragma warning( pop )
#else
unknownCompiler!
#endif
int main(int, char *[]) {}"
HAS_DIAGNOSTIC_PUSH
)
CHECK_CXX_SOURCE_COMPILES("
#include<cmath>
int main(int, char *[]) {
return std::isnan(1.0f);
}"
HAS_STD_ISNAN
)
CHECK_CXX_SOURCE_COMPILES("
#include<mutex>
int main(int, char *[]) {
std::mutex test_mutex;
std::lock_guard<std::mutex> lock_mutex(test_mutex);
}"
HAS_STD_MUTEX
)
CHECK_CXX_SOURCE_COMPILES("
#include<string>
std::string func() {
std::string var = \"test\";
return std::move(var);
}
int main(int, char *[]) {}"
NEEDS_REDUNDANT_MOVE
)
INCLUDE(CheckCXXSourceRuns)
CHECK_CXX_SOURCE_RUNS("
#include<time.h>
int main(int, char *[]) {
time_t t = -14210715; // 1969-07-20 12:34:45
struct tm *ptm = gmtime(&t);
return !(ptm && ptm->tm_year == 69);
}"
HAS_PRE_1970
)
CHECK_CXX_SOURCE_RUNS("
#include<stdlib.h>
#include<time.h>
int main(int, char *[]) {
setenv(\"TZ\", \"America/Los_Angeles\", 1);
tzset();
struct tm time2037;
struct tm time2038;
strptime(\"2037-05-05 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2037);
strptime(\"2038-05-05 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2038);
return mktime(&time2038) - mktime(&time2037) != 31536000;
}"
HAS_POST_2038
)
set(CMAKE_REQUIRED_INCLUDES ${ZLIB_INCLUDE_DIR})
set(CMAKE_REQUIRED_LIBRARIES zlib)
CHECK_CXX_SOURCE_COMPILES("
#define Z_PREFIX
#include<zlib.h>
z_stream strm;
int main(int, char *[]) {
deflateReset(&strm);
}"
NEEDS_Z_PREFIX
)
# See https://cmake.org/cmake/help/v3.14/policy/CMP0075.html. Without unsetting it breaks thrift.
set(CMAKE_REQUIRED_INCLUDES)
set(CMAKE_REQUIRED_LIBRARIES)

2
contrib/boost vendored

@ -1 +1 @@
Subproject commit 66d17f060c4867aeea99fa2a20cfdae89ae2a2ec
Subproject commit 79358a3106aab6af464430ed67c7efafebf5cd6f

2
contrib/boringssl vendored

@ -1 +1 @@
Subproject commit a6a2e2ab3e44d97ce98e51c558e989f211de7eb3
Subproject commit c1e01a441d6db234f4f12e63a7657d1f9e6db9c1

View File

@ -4,7 +4,7 @@
# This file is created by generate_build_files.py and edited accordingly.
cmake_minimum_required(VERSION 3.0)
cmake_minimum_required(VERSION 3.5)
project(BoringSSL LANGUAGES C CXX)
@ -20,12 +20,7 @@ if(CMAKE_COMPILER_IS_GNUCXX OR CLANG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-common")
if((CMAKE_C_COMPILER_VERSION VERSION_GREATER "4.8.99") OR CLANG)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11")
else()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-common -std=c11")
endif()
# pthread_rwlock_t requires a feature flag.
@ -55,7 +50,7 @@ add_definitions(-DBORINGSSL_IMPLEMENTATION)
# builds.
if(NOT OPENSSL_NO_ASM AND CMAKE_OSX_ARCHITECTURES)
list(LENGTH CMAKE_OSX_ARCHITECTURES NUM_ARCHES)
if(NOT ${NUM_ARCHES} EQUAL 1)
if(NOT NUM_ARCHES EQUAL 1)
message(FATAL_ERROR "Universal binaries not supported.")
endif()
list(GET CMAKE_OSX_ARCHITECTURES 0 CMAKE_SYSTEM_PROCESSOR)
@ -78,7 +73,13 @@ elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "AMD64")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86")
set(ARCH "x86")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "i386")
set(ARCH "x86")
# cmake uses `uname -p` to set the system processor, but Solaris
# systems support multiple architectures.
if((${CMAKE_SYSTEM_NAME} STREQUAL "SunOS") AND CMAKE_SIZEOF_VOID_P EQUAL 8)
set(ARCH "x86_64")
else()
set(ARCH "x86")
endif()
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "i686")
set(ARCH "x86")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
@ -289,6 +290,21 @@ set(
mac-x86_64/crypto/test/trampoline-x86_64.S
)
set(
CRYPTO_win_aarch64_SOURCES
win-aarch64/crypto/chacha/chacha-armv8.S
win-aarch64/crypto/fipsmodule/aesv8-armx64.S
win-aarch64/crypto/fipsmodule/armv8-mont.S
win-aarch64/crypto/fipsmodule/ghash-neon-armv8.S
win-aarch64/crypto/fipsmodule/ghashv8-armx64.S
win-aarch64/crypto/fipsmodule/sha1-armv8.S
win-aarch64/crypto/fipsmodule/sha256-armv8.S
win-aarch64/crypto/fipsmodule/sha512-armv8.S
win-aarch64/crypto/fipsmodule/vpaes-armv8.S
win-aarch64/crypto/test/trampoline-armv8.S
)
set(
CRYPTO_win_x86_SOURCES
@ -331,9 +347,9 @@ set(
win-x86_64/crypto/test/trampoline-x86_64.asm
)
if(APPLE AND ${ARCH} STREQUAL "aarch64")
if(APPLE AND ARCH STREQUAL "aarch64")
set(CRYPTO_ARCH_SOURCES ${CRYPTO_ios_aarch64_SOURCES})
elseif(APPLE AND ${ARCH} STREQUAL "arm")
elseif(APPLE AND ARCH STREQUAL "arm")
set(CRYPTO_ARCH_SOURCES ${CRYPTO_ios_arm_SOURCES})
elseif(APPLE)
set(CRYPTO_ARCH_SOURCES ${CRYPTO_mac_${ARCH}_SOURCES})
@ -360,6 +376,7 @@ add_library(
"${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_object.c"
"${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_octet.c"
"${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_print.c"
"${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_strex.c"
"${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_strnid.c"
"${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_time.c"
"${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_type.c"
@ -389,6 +406,7 @@ add_library(
"${BORINGSSL_SOURCE_DIR}/crypto/bio/printf.c"
"${BORINGSSL_SOURCE_DIR}/crypto/bio/socket.c"
"${BORINGSSL_SOURCE_DIR}/crypto/bio/socket_helper.c"
"${BORINGSSL_SOURCE_DIR}/crypto/blake2/blake2.c"
"${BORINGSSL_SOURCE_DIR}/crypto/bn_extra/bn_asn1.c"
"${BORINGSSL_SOURCE_DIR}/crypto/bn_extra/convert.c"
"${BORINGSSL_SOURCE_DIR}/crypto/buf/buf.c"
@ -413,6 +431,7 @@ add_library(
"${BORINGSSL_SOURCE_DIR}/crypto/conf/conf.c"
"${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-fuchsia.c"
"${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-linux.c"
"${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-win.c"
"${BORINGSSL_SOURCE_DIR}/crypto/cpu-arm-linux.c"
"${BORINGSSL_SOURCE_DIR}/crypto/cpu-arm.c"
"${BORINGSSL_SOURCE_DIR}/crypto/cpu-intel.c"
@ -452,7 +471,6 @@ add_library(
"${BORINGSSL_SOURCE_DIR}/crypto/ex_data.c"
"${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/bcm.c"
"${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/fips_shared_support.c"
"${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/is_fips.c"
"${BORINGSSL_SOURCE_DIR}/crypto/hkdf/hkdf.c"
"${BORINGSSL_SOURCE_DIR}/crypto/hpke/hpke.c"
"${BORINGSSL_SOURCE_DIR}/crypto/hrss/hrss.c"
@ -499,13 +517,13 @@ add_library(
"${BORINGSSL_SOURCE_DIR}/crypto/trust_token/voprf.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/a_digest.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/a_sign.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/a_strex.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/a_verify.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/algorithm.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/asn1_gen.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/by_dir.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/by_file.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/i2d_pr.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/name_print.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/rsa_pss.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/t_crl.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/t_req.c"
@ -519,7 +537,6 @@ add_library(
"${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_ext.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_lu.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_obj.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_r2x.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_req.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_set.c"
"${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_trs.c"
@ -589,6 +606,8 @@ add_library(
"${BORINGSSL_SOURCE_DIR}/ssl/d1_srtp.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/dtls_method.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/dtls_record.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/encrypted_client_hello.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/extensions.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/handoff.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/handshake.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/handshake_client.cc"
@ -611,7 +630,6 @@ add_library(
"${BORINGSSL_SOURCE_DIR}/ssl/ssl_versions.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/ssl_x509.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/t1_enc.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/t1_lib.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/tls13_both.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/tls13_client.cc"
"${BORINGSSL_SOURCE_DIR}/ssl/tls13_enc.cc"
@ -633,6 +651,7 @@ add_executable(
"${BORINGSSL_SOURCE_DIR}/tool/digest.cc"
"${BORINGSSL_SOURCE_DIR}/tool/fd.cc"
"${BORINGSSL_SOURCE_DIR}/tool/file.cc"
"${BORINGSSL_SOURCE_DIR}/tool/generate_ech.cc"
"${BORINGSSL_SOURCE_DIR}/tool/generate_ed25519.cc"
"${BORINGSSL_SOURCE_DIR}/tool/genrsa.cc"
"${BORINGSSL_SOURCE_DIR}/tool/pkcs12.cc"

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 60c986e15cae70aade721d26badabab1f822fdd6
Subproject commit 7eac189a6badddac593580ec2ad1478bd2656fc7

View File

@ -187,12 +187,12 @@ function(protobuf_generate_grpc)
add_custom_command(
OUTPUT ${_generated_srcs}
COMMAND protobuf::protoc
COMMAND $<TARGET_FILE:protoc>
ARGS --${protobuf_generate_grpc_LANGUAGE}_out ${_dll_export_decl}${protobuf_generate_grpc_PROTOC_OUT_DIR}
--grpc_out ${_dll_export_decl}${protobuf_generate_grpc_PROTOC_OUT_DIR}
--plugin=protoc-gen-grpc=$<TARGET_FILE:${protobuf_generate_grpc_PLUGIN}>
${_dll_desc_out} ${_protobuf_include_path} ${_abs_file}
DEPENDS ${_abs_file} protobuf::protoc ${protobuf_generate_grpc_PLUGIN}
DEPENDS ${_abs_file} protoc ${protobuf_generate_grpc_PLUGIN}
COMMENT "Running ${protobuf_generate_grpc_LANGUAGE} protocol buffer compiler on ${_proto}"
VERBATIM)
endforeach()
@ -204,4 +204,4 @@ function(protobuf_generate_grpc)
if(protobuf_generate_grpc_TARGET)
target_sources(${protobuf_generate_grpc_TARGET} PRIVATE ${_generated_srcs_all})
endif()
endfunction()
endfunction()

View File

@ -161,7 +161,7 @@
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
#define JEMALLOC_DSS
/* #undef JEMALLOC_DSS */
/* Support memory filling (junk/zero). */
#define JEMALLOC_FILL

View File

@ -81,7 +81,7 @@
/* #undef JEMALLOC_HAVE_ISSETUGID */
/* Defined if pthread_atfork(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_ATFORK
/* #undef JEMALLOC_HAVE_PTHREAD_ATFORK */
/* Defined if pthread_setname_np(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
@ -284,7 +284,7 @@
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
#define JEMALLOC_DEFINE_MADVISE_FREE
/*
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.

View File

@ -1,6 +1,6 @@
find_program(AWK_PROGRAM awk)
if(NOT AWK_PROGRAM)
message(FATAL_ERROR "You need the awk program to build ClickHouse with krb5 enabled.")
message(FATAL_ERROR "You need the awk program to build ClickHouse with krb5 enabled.")
endif()
set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src")

@ -1 +1 @@
Subproject commit a720b7105a610acbd7427eea475a5b6810c151eb
Subproject commit aa5429bf67a346e48ad60efd88bcefc286644bf3

View File

@ -1,10 +0,0 @@
#include <exception>
#include <stdexcept>
int main() {
try {
throw 2;
} catch (int) {
std::throw_with_nested(std::runtime_error("test"));
}
}

View File

@ -1,7 +0,0 @@
#include <chrono>
using std::chrono::steady_clock;
void foo(const steady_clock &clock) {
return;
}

View File

@ -1,4 +1,4 @@
OPTION(ENABLE_SSE "enable SSE4.2 buildin function" ON)
OPTION(ENABLE_SSE "enable SSE4.2 builtin function" ON)
INCLUDE (CheckFunctionExists)
CHECK_FUNCTION_EXISTS(dladdr HAVE_DLADDR)
@ -21,30 +21,21 @@ ADD_DEFINITIONS(-D_GNU_SOURCE)
ADD_DEFINITIONS(-D_GLIBCXX_USE_NANOSLEEP)
TRY_COMPILE(STRERROR_R_RETURN_INT
${CMAKE_CURRENT_BINARY_DIR}
"${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileStrerror.cpp"
${CMAKE_CURRENT_BINARY_DIR}
"${CMAKE_CURRENT_SOURCE_DIR}/CMake/CMakeTestCompileStrerror.c"
CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'"
OUTPUT_VARIABLE OUTPUT)
OUTPUT_VARIABLE OUTPUT)
MESSAGE(STATUS "Checking whether strerror_r returns an int")
IF(STRERROR_R_RETURN_INT)
MESSAGE(STATUS "Checking whether strerror_r returns an int -- yes")
MESSAGE(STATUS "Checking whether strerror_r returns an int -- yes")
ELSE(STRERROR_R_RETURN_INT)
MESSAGE(STATUS "Checking whether strerror_r returns an int -- no")
MESSAGE(STATUS "Checking whether strerror_r returns an int -- no")
ENDIF(STRERROR_R_RETURN_INT)
TRY_COMPILE(HAVE_STEADY_CLOCK
${CMAKE_CURRENT_BINARY_DIR}
"${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileSteadyClock.cpp"
CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'"
OUTPUT_VARIABLE OUTPUT)
TRY_COMPILE(HAVE_NESTED_EXCEPTION
${CMAKE_CURRENT_BINARY_DIR}
"${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileNestedException.cpp"
CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'"
OUTPUT_VARIABLE OUTPUT)
set(HAVE_STEADY_CLOCK 1)
set(HAVE_NESTED_EXCEPTION 1)
SET(HAVE_BOOST_CHRONO 0)
SET(HAVE_BOOST_ATOMIC 0)

2
contrib/libuv vendored

@ -1 +1 @@
Subproject commit e2e9b7e9f978ce8a1367b5fe781d97d1ce9f94ab
Subproject commit 95081e7c16c9857babe6d4e2bc1c779198ea89ae

View File

@ -0,0 +1,160 @@
# This file is a modified version of contrib/libuv/CMakeLists.txt
include(CMakeDependentOption)
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/libuv")
set (BINARY_DIR "${CMAKE_BINARY_DIR}/contrib/libuv")
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU")
list(APPEND uv_cflags -fvisibility=hidden --std=gnu89)
list(APPEND uv_cflags -Wall -Wextra -Wstrict-prototypes)
list(APPEND uv_cflags -Wno-unused-parameter)
endif()
set(uv_sources
src/fs-poll.c
src/idna.c
src/inet.c
src/random.c
src/strscpy.c
src/threadpool.c
src/timer.c
src/uv-common.c
src/uv-data-getter-setters.c
src/version.c
src/unix/async.c
src/unix/core.c
src/unix/dl.c
src/unix/fs.c
src/unix/getaddrinfo.c
src/unix/getnameinfo.c
src/unix/loop-watcher.c
src/unix/loop.c
src/unix/pipe.c
src/unix/poll.c
src/unix/process.c
src/unix/random-devurandom.c
src/unix/signal.c
src/unix/stream.c
src/unix/tcp.c
src/unix/thread.c
src/unix/tty.c
src/unix/udp.c)
if(APPLE OR CMAKE_SYSTEM_NAME MATCHES "Android|Linux|OS/390")
list(APPEND uv_sources src/unix/proctitle.c)
endif()
if(CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD")
list(APPEND uv_sources src/unix/freebsd.c)
endif()
if(CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD|NetBSD|OpenBSD")
list(APPEND uv_sources src/unix/posix-hrtime.c src/unix/bsd-proctitle.c)
endif()
if(APPLE OR CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD|NetBSD|OpenBSD")
list(APPEND uv_sources src/unix/bsd-ifaddrs.c src/unix/kqueue.c)
endif()
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
list(APPEND uv_sources src/unix/random-getrandom.c)
endif()
if(APPLE OR CMAKE_SYSTEM_NAME STREQUAL "OpenBSD")
list(APPEND uv_sources src/unix/random-getentropy.c)
endif()
if(APPLE)
list(APPEND uv_defines _DARWIN_UNLIMITED_SELECT=1 _DARWIN_USE_64_BIT_INODE=1)
list(APPEND uv_sources
src/unix/darwin-proctitle.c
src/unix/darwin.c
src/unix/fsevents.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
list(APPEND uv_libraries dl rt)
list(APPEND uv_sources
src/unix/linux-core.c
src/unix/linux-inotify.c
src/unix/linux-syscalls.c
src/unix/procfs-exepath.c
src/unix/random-getrandom.c
src/unix/random-sysctl-linux.c
src/unix/sysinfo-loadavg.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD")
list(APPEND uv_sources src/unix/netbsd.c)
list(APPEND uv_libraries kvm)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "OpenBSD")
list(APPEND uv_sources src/unix/openbsd.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "OS/390")
list(APPEND uv_defines PATH_MAX=255)
list(APPEND uv_defines _AE_BIMODAL)
list(APPEND uv_defines _ALL_SOURCE)
list(APPEND uv_defines _LARGE_TIME_API)
list(APPEND uv_defines _OPEN_MSGQ_EXT)
list(APPEND uv_defines _OPEN_SYS_FILE_EXT)
list(APPEND uv_defines _OPEN_SYS_IF_EXT)
list(APPEND uv_defines _OPEN_SYS_SOCK_EXT3)
list(APPEND uv_defines _OPEN_SYS_SOCK_IPV6)
list(APPEND uv_defines _UNIX03_SOURCE)
list(APPEND uv_defines _UNIX03_THREADS)
list(APPEND uv_defines _UNIX03_WITHDRAWN)
list(APPEND uv_defines _XOPEN_SOURCE_EXTENDED)
list(APPEND uv_sources
src/unix/pthread-fixes.c
src/unix/pthread-barrier.c
src/unix/os390.c
src/unix/os390-syscalls.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
list(APPEND uv_defines __EXTENSIONS__ _XOPEN_SOURCE=500)
list(APPEND uv_libraries kstat nsl sendfile socket)
list(APPEND uv_sources src/unix/no-proctitle.c src/unix/sunos.c)
endif()
set(uv_sources_tmp "")
foreach(file ${uv_sources})
list(APPEND uv_sources_tmp "${SOURCE_DIR}/${file}")
endforeach(file)
set(uv_sources "${uv_sources_tmp}")
list(APPEND uv_defines CLICKHOUSE_GLIBC_COMPATIBILITY)
add_library(uv ${uv_sources})
target_compile_definitions(uv
INTERFACE USING_UV_SHARED=1
PRIVATE ${uv_defines} BUILDING_UV_SHARED=1)
target_compile_options(uv PRIVATE ${uv_cflags})
target_include_directories(uv PUBLIC ${SOURCE_DIR}/include PRIVATE ${SOURCE_DIR}/src)
target_link_libraries(uv ${uv_libraries})
add_library(uv_a STATIC ${uv_sources})
target_compile_definitions(uv_a PRIVATE ${uv_defines})
target_compile_options(uv_a PRIVATE ${uv_cflags})
target_include_directories(uv_a PUBLIC ${SOURCE_DIR}/include PRIVATE ${SOURCE_DIR}/src)
target_link_libraries(uv_a ${uv_libraries})
if(UNIX)
# Now for some gibbering horrors from beyond the stars...
foreach(x ${uv_libraries})
set(LIBS "${LIBS} -l${x}")
endforeach(x)
file(STRINGS ${SOURCE_DIR}/configure.ac configure_ac REGEX ^AC_INIT)
string(REGEX MATCH [0-9]+[.][0-9]+[.][0-9]+ PACKAGE_VERSION "${configure_ac}")
string(REGEX MATCH ^[0-9]+ UV_VERSION_MAJOR "${PACKAGE_VERSION}")
# The version in the filename is mirroring the behaviour of autotools.
set_target_properties(uv PROPERTIES VERSION ${UV_VERSION_MAJOR}.0.0
SOVERSION ${UV_VERSION_MAJOR})
endif()

2
contrib/llvm vendored

@ -1 +1 @@
Subproject commit f30bbecef78b75b527e257c1304d0be2f2f95975
Subproject commit 20607e61728e97c969e536644c3c0c1bb1a50672

View File

@ -0,0 +1,63 @@
/* include/lber_types.h. Generated from lber_types.hin by configure. */
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 1998-2020 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/*
* LBER types
*/
#ifndef _LBER_TYPES_H
#define _LBER_TYPES_H
#include <ldap_cdefs.h>
LDAP_BEGIN_DECL
/* LBER boolean, enum, integers (32 bits or larger) */
#define LBER_INT_T int
/* LBER tags (32 bits or larger) */
#define LBER_TAG_T long
/* LBER socket descriptor */
#define LBER_SOCKET_T int
/* LBER lengths (32 bits or larger) */
#define LBER_LEN_T long
/* ------------------------------------------------------------ */
/* booleans, enumerations, and integers */
typedef LBER_INT_T ber_int_t;
/* signed and unsigned versions */
typedef signed LBER_INT_T ber_sint_t;
typedef unsigned LBER_INT_T ber_uint_t;
/* tags */
typedef unsigned LBER_TAG_T ber_tag_t;
/* "socket" descriptors */
typedef LBER_SOCKET_T ber_socket_t;
/* lengths */
typedef unsigned LBER_LEN_T ber_len_t;
/* signed lengths */
typedef signed LBER_LEN_T ber_slen_t;
LDAP_END_DECL
#endif /* _LBER_TYPES_H */

View File

@ -0,0 +1,74 @@
/* include/ldap_config.h. Generated from ldap_config.hin by configure. */
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 1998-2020 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/*
* This file works in conjunction with OpenLDAP configure system.
* If you do no like the values below, adjust your configure options.
*/
#ifndef _LDAP_CONFIG_H
#define _LDAP_CONFIG_H
/* directory separator */
#ifndef LDAP_DIRSEP
#ifndef _WIN32
#define LDAP_DIRSEP "/"
#else
#define LDAP_DIRSEP "\\"
#endif
#endif
/* directory for temporary files */
#if defined(_WIN32)
# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */
#elif defined( _P_tmpdir )
# define LDAP_TMPDIR _P_tmpdir
#elif defined( P_tmpdir )
# define LDAP_TMPDIR P_tmpdir
#elif defined( _PATH_TMPDIR )
# define LDAP_TMPDIR _PATH_TMPDIR
#else
# define LDAP_TMPDIR LDAP_DIRSEP "tmp"
#endif
/* directories */
#ifndef LDAP_BINDIR
#define LDAP_BINDIR "/tmp/ldap-prefix/bin"
#endif
#ifndef LDAP_SBINDIR
#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin"
#endif
#ifndef LDAP_DATADIR
#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap"
#endif
#ifndef LDAP_SYSCONFDIR
#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap"
#endif
#ifndef LDAP_LIBEXECDIR
#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec"
#endif
#ifndef LDAP_MODULEDIR
#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap"
#endif
#ifndef LDAP_RUNDIR
#define LDAP_RUNDIR "/tmp/ldap-prefix/var"
#endif
#ifndef LDAP_LOCALEDIR
#define LDAP_LOCALEDIR ""
#endif
#endif /* _LDAP_CONFIG_H */

View File

@ -0,0 +1,61 @@
/* include/ldap_features.h. Generated from ldap_features.hin by configure. */
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 1998-2020 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/*
* LDAP Features
*/
#ifndef _LDAP_FEATURES_H
#define _LDAP_FEATURES_H 1
/* OpenLDAP API version macros */
#define LDAP_VENDOR_VERSION 20501
#define LDAP_VENDOR_VERSION_MAJOR 2
#define LDAP_VENDOR_VERSION_MINOR 5
#define LDAP_VENDOR_VERSION_PATCH X
/*
** WORK IN PROGRESS!
**
** OpenLDAP reentrancy/thread-safeness should be dynamically
** checked using ldap_get_option().
**
** The -lldap implementation is not thread-safe.
**
** The -lldap_r implementation is:
** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety)
** but also be:
** LDAP_API_FEATURE_SESSION_THREAD_SAFE
** LDAP_API_FEATURE_OPERATION_THREAD_SAFE
**
** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE
** can be used to determine if -lldap_r is available at compile
** time. You must define LDAP_THREAD_SAFE if and only if you
** link with -lldap_r.
**
** If you fail to define LDAP_THREAD_SAFE when linking with
** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap,
** provided header definitions and declarations may be incorrect.
**
*/
/* is -lldap_r available or not */
#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1
/* LDAP v2 Referrals */
/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */
#endif /* LDAP_FEATURES */

File diff suppressed because it is too large Load Diff

2
contrib/protobuf vendored

@ -1 +1 @@
Subproject commit 75601841d172c73ae6bf4ce8121f42b875cdbabd
Subproject commit c1c5d02026059f4c3cb51aaa08e82288d3e08b89

View File

@ -10,8 +10,82 @@ else ()
set(protobuf_BUILD_SHARED_LIBS ON CACHE INTERNAL "" FORCE)
endif ()
if (CMAKE_CROSSCOMPILING)
# Will build 'protoc' for host arch instead of cross-compiling
set(protobuf_BUILD_PROTOC_BINARIES OFF CACHE INTERNAL "" FORCE)
endif ()
add_subdirectory("${protobuf_SOURCE_DIR}/cmake" "${protobuf_BINARY_DIR}")
# We don't want to stop compilation on warnings in protobuf's headers.
# The following line overrides the value assigned by the command target_include_directories() in libprotobuf.cmake
# The following line overrides the value assigned by the command target_include_directories() in libprotobuf.cmake
set_property(TARGET libprotobuf PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${protobuf_SOURCE_DIR}/src")
if (CMAKE_CROSSCOMPILING)
# Build 'protoc' for host arch
set (PROTOC_BUILD_DIR "${protobuf_BINARY_DIR}/build")
if (NOT EXISTS "${PROTOC_BUILD_DIR}/protoc")
# This is quite ugly but I cannot make dependencies work propery.
execute_process(
COMMAND mkdir -p ${PROTOC_BUILD_DIR}
COMMAND_ECHO STDOUT)
execute_process(
COMMAND ${CMAKE_COMMAND}
"-G${CMAKE_GENERATOR}"
"-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}"
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
"-Dprotobuf_BUILD_TESTS=0"
"-Dprotobuf_BUILD_CONFORMANCE=0"
"-Dprotobuf_BUILD_EXAMPLES=0"
"-Dprotobuf_BUILD_PROTOC_BINARIES=1"
"${protobuf_SOURCE_DIR}/cmake"
WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
COMMAND_ECHO STDOUT)
execute_process(
COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
COMMAND_ECHO STDOUT)
endif ()
# add_custom_command (
# OUTPUT ${PROTOC_BUILD_DIR}
# COMMAND mkdir -p ${PROTOC_BUILD_DIR})
#
# add_custom_command (
# OUTPUT "${PROTOC_BUILD_DIR}/CMakeCache.txt"
#
# COMMAND ${CMAKE_COMMAND}
# -G"${CMAKE_GENERATOR}"
# -DCMAKE_MAKE_PROGRAM="${CMAKE_MAKE_PROGRAM}"
# -DCMAKE_C_COMPILER="${CMAKE_C_COMPILER}"
# -DCMAKE_CXX_COMPILER="${CMAKE_CXX_COMPILER}"
# -Dprotobuf_BUILD_TESTS=0
# -Dprotobuf_BUILD_CONFORMANCE=0
# -Dprotobuf_BUILD_EXAMPLES=0
# -Dprotobuf_BUILD_PROTOC_BINARIES=1
# "${protobuf_SOURCE_DIR}/cmake"
#
# DEPENDS "${PROTOC_BUILD_DIR}"
# WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
# COMMENT "Configuring 'protoc' for host architecture."
# USES_TERMINAL)
#
# add_custom_command (
# OUTPUT "${PROTOC_BUILD_DIR}/protoc"
# COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
# DEPENDS "${PROTOC_BUILD_DIR}/CMakeCache.txt"
# COMMENT "Building 'protoc' for host architecture."
# USES_TERMINAL)
#
# add_custom_target (protoc-host DEPENDS "${PROTOC_BUILD_DIR}/protoc")
add_executable(protoc IMPORTED GLOBAL)
set_target_properties (protoc PROPERTIES IMPORTED_LOCATION "${PROTOC_BUILD_DIR}/protoc")
add_dependencies(protoc "${PROTOC_BUILD_DIR}/protoc")
endif ()

View File

@ -181,11 +181,11 @@ function(protobuf_generate)
add_custom_command(
OUTPUT ${_generated_srcs}
COMMAND protobuf::protoc
COMMAND $<TARGET_FILE:protoc>
ARGS --${protobuf_generate_LANGUAGE}_out ${_dll_export_decl}${protobuf_generate_PROTOC_OUT_DIR} ${_dll_desc_out} ${_protobuf_include_path} ${_abs_file}
DEPENDS ${_abs_file} protobuf::protoc
DEPENDS ${_abs_file} protoc
COMMENT "Running ${protobuf_generate_LANGUAGE} protocol buffer compiler on ${_proto}"
VERBATIM )
VERBATIM)
endforeach()
set_source_files_properties(${_generated_srcs_all} PROPERTIES GENERATED TRUE)

View File

@ -106,18 +106,6 @@ if(NOT MSVC)
set(CMAKE_REQUIRED_FLAGS "-msse4.2 -mpclmul")
endif()
CHECK_CXX_SOURCE_COMPILES("
#include <cstdint>
#include <nmmintrin.h>
#include <wmmintrin.h>
int main() {
volatile uint32_t x = _mm_crc32_u32(0, 0);
const auto a = _mm_set_epi64x(0, 0);
const auto b = _mm_set_epi64x(0, 0);
const auto c = _mm_clmulepi64_si128(a, b, 0x00);
auto d = _mm_cvtsi128_si64(c);
}
" HAVE_SSE42)
unset(CMAKE_REQUIRED_FLAGS)
if(HAVE_SSE42)
add_definitions(-DHAVE_SSE42)
@ -126,14 +114,7 @@ elseif(FORCE_SSE42)
message(FATAL_ERROR "FORCE_SSE42=ON but unable to compile with SSE4.2 enabled")
endif()
CHECK_CXX_SOURCE_COMPILES("
#if defined(_MSC_VER) && !defined(__thread)
#define __thread __declspec(thread)
#endif
int main() {
static __thread int tls;
}
" HAVE_THREAD_LOCAL)
set (HAVE_THREAD_LOCAL 1)
if(HAVE_THREAD_LOCAL)
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
endif()
@ -174,7 +155,7 @@ endif()
option(WITH_FALLOCATE "build with fallocate" ON)
if(WITH_FALLOCATE)
CHECK_CXX_SOURCE_COMPILES("
CHECK_C_SOURCE_COMPILES("
#include <fcntl.h>
#include <linux/falloc.h>
int main() {
@ -187,7 +168,7 @@ int main() {
endif()
endif()
CHECK_CXX_SOURCE_COMPILES("
CHECK_C_SOURCE_COMPILES("
#include <fcntl.h>
int main() {
int fd = open(\"/dev/null\", 0);
@ -198,7 +179,7 @@ if(HAVE_SYNC_FILE_RANGE_WRITE)
add_definitions(-DROCKSDB_RANGESYNC_PRESENT)
endif()
CHECK_CXX_SOURCE_COMPILES("
CHECK_C_SOURCE_COMPILES("
#include <pthread.h>
int main() {
(void) PTHREAD_MUTEX_ADAPTIVE_NP;
@ -228,6 +209,11 @@ if(HAVE_AUXV_GETAUXVAL)
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
endif()
check_cxx_symbol_exists(elf_aux_info sys/auxv.h HAVE_ELF_AUX_INFO)
if(HAVE_ELF_AUX_INFO)
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
endif()
include_directories(${ROCKSDB_SOURCE_DIR})
include_directories("${ROCKSDB_SOURCE_DIR}/include")
if(WITH_FOLLY_DISTRIBUTED_MUTEX)

2
contrib/s2geometry vendored

@ -1 +1 @@
Subproject commit 20ea540d81f4575a3fc0aea585aac611bcd03ede
Subproject commit 38b7a290f927cc372218c2094602b83e35b18c05

View File

@ -0,0 +1,47 @@
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/snappy")
set(SNAPPY_IS_BIG_ENDIAN 0)
include(CheckIncludeFile)
check_include_file("byteswap.h" HAVE_BYTESWAP_H)
check_include_file("sys/endian.h" HAVE_SYS_ENDIAN_H)
check_include_file("sys/mman.h" HAVE_SYS_MMAN_H)
check_include_file("sys/resource.h" HAVE_SYS_RESOURCE_H)
check_include_file("sys/time.h" HAVE_SYS_TIME_H)
check_include_file("sys/uio.h" HAVE_SYS_UIO_H)
check_include_file("unistd.h" HAVE_UNISTD_H)
check_include_file("windows.h" HAVE_WINDOWS_H)
set (HAVE_BUILTIN_EXPECT 1)
set (HAVE_BUILTIN_CTZ 1)
set (HAVE_FUNC_MMAP 1)
set (HAVE_FUNC_SYSCONF 1)
if (ARCH_AMD64 AND ENABLE_SSSE3)
set (SNAPPY_HAVE_SSSE3 1)
else ()
set (SNAPPY_HAVE_SSSE3 0)
endif ()
configure_file(
"${SOURCE_DIR}/cmake/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/config.h")
set(HAVE_SYS_UIO_H_01 1)
configure_file(
"${SOURCE_DIR}/snappy-stubs-public.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/snappy-stubs-public.h")
add_library(snappy "")
target_sources(snappy
PRIVATE
"${SOURCE_DIR}/snappy-internal.h"
"${SOURCE_DIR}/snappy-stubs-internal.h"
"${SOURCE_DIR}/snappy-c.cc"
"${SOURCE_DIR}/snappy-sinksource.cc"
"${SOURCE_DIR}/snappy-stubs-internal.cc"
"${SOURCE_DIR}/snappy.cc")
target_include_directories(snappy PUBLIC ${SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
target_compile_definitions(snappy PRIVATE -DHAVE_CONFIG_H)

1
contrib/sysroot vendored Submodule

@ -0,0 +1 @@
Subproject commit 002415524b5d14124bb8a61a3ce7ac65774f5479

View File

@ -93,10 +93,6 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \
# Download toolchain and SDK for Darwin
RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz
# Download toolchain for ARM
# It contains all required headers and libraries. Note that it's named as "gcc" but actually we are using clang for cross compiling.
RUN wget -nv "https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en" -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz
# Download toolchain for FreeBSD 11.3
RUN wget -nv https://clickhouse-datasets.s3.yandex.net/toolchains/toolchains/freebsd-11.3-toolchain.tar.xz

View File

@ -6,9 +6,6 @@ mkdir -p build/cmake/toolchain/darwin-x86_64
tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
mkdir -p build/cmake/toolchain/linux-aarch64
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1
mkdir -p build/cmake/toolchain/freebsd-x86_64
tar xJf freebsd-11.3-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1

View File

@ -61,6 +61,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
DARWIN_ARM_SUFFIX = "-darwin-aarch64"
ARM_SUFFIX = "-aarch64"
FREEBSD_SUFFIX = "-freebsd"
PPC_SUFFIX = '-ppc64le'
result = []
cmake_flags = ['$CMAKE_FLAGS', '-DADD_GDB_INDEX_FOR_GOLD=1']
@ -69,8 +70,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
is_cross_darwin = compiler.endswith(DARWIN_SUFFIX)
is_cross_darwin_arm = compiler.endswith(DARWIN_ARM_SUFFIX)
is_cross_arm = compiler.endswith(ARM_SUFFIX)
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
is_cross_compile = is_cross_darwin or is_cross_darwin_arm or is_cross_arm or is_cross_freebsd
is_cross_compile = is_cross_darwin or is_cross_darwin_arm or is_cross_arm or is_cross_freebsd or is_cross_ppc
# Explicitly use LLD with Clang by default.
# Don't force linker for cross-compilation.
@ -97,6 +99,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
elif is_cross_freebsd:
cc = compiler[:-len(FREEBSD_SUFFIX)]
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake")
elif is_cross_ppc:
cc = compiler[:-len(PPC_SUFFIX)]
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake")
else:
cc = compiler
@ -205,7 +210,7 @@ if __name__ == "__main__":
parser.add_argument("--build-type", choices=("debug", ""), default="")
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64",
"clang-12", "clang-12-darwin", "clang-12-darwin-aarch64", "clang-12-aarch64",
"clang-13", "clang-13-darwin", "clang-13-darwin-aarch64", "clang-13-aarch64",
"clang-13", "clang-13-darwin", "clang-13-darwin-aarch64", "clang-13-aarch64", "clang-13-ppc64le",
"clang-11-freebsd", "clang-12-freebsd", "clang-13-freebsd", "gcc-11"), default="clang-13")
parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="")
parser.add_argument("--unbundled", action="store_true")

View File

@ -67,7 +67,7 @@ RUN apt-get update \
unixodbc \
--yes --no-install-recommends
RUN pip3 install numpy scipy pandas Jinja2 pandas clickhouse_driver
RUN pip3 install numpy scipy pandas Jinja2
# This symlink required by gcc to find lld compiler
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld

View File

@ -262,11 +262,13 @@ function run_tests
start_server
set +e
time clickhouse-test --hung-check -j 8 --order=random \
--fast-tests-only --no-long --testname --shard --zookeeper \
-- "$FASTTEST_FOCUS" 2>&1 \
| ts '%Y-%m-%d %H:%M:%S' \
| tee "$FASTTEST_OUTPUT/test_result.txt"
set -e
}
case "$stage" in

View File

@ -27,7 +27,7 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN pip3 install Jinja2 pandas clickhouse_driver
RUN pip3 install Jinja2
COPY * /

View File

@ -108,8 +108,10 @@ function run_tests()
ADDITIONAL_OPTIONS+=('--replicated-database')
fi
set +e
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
set -e
}
export -f run_tests

View File

@ -34,7 +34,7 @@ RUN apt-get update -y \
postgresql-client \
sqlite3
RUN pip3 install numpy scipy pandas Jinja2 clickhouse_driver
RUN pip3 install numpy scipy pandas Jinja2
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \

View File

@ -96,10 +96,12 @@ function run_tests()
ADDITIONAL_OPTIONS+=('8')
fi
set +e
clickhouse-test --testname --shard --zookeeper --hung-check --print-time \
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
| ts '%Y-%m-%d %H:%M:%S' \
| tee -a test_output/test_result.txt
set -e
}
export -f run_tests
@ -114,12 +116,6 @@ grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz &
clickhouse-client -q "select * from system.query_thread_log format TSVWithNamesAndTypes" | pigz > /test_output/query-thread-log.tsv.gz &
clickhouse-client --allow_introspection_functions=1 -q "
WITH
arrayMap(x -> concat(demangle(addressToSymbol(x)), ':', addressToLine(x)), trace) AS trace_array,
arrayStringConcat(trace_array, '\n') AS trace_string
SELECT * EXCEPT(trace), trace_string FROM system.trace_log FORMAT TSVWithNamesAndTypes
" | pigz > /test_output/trace-log.tsv.gz &
# Also export trace log in flamegraph-friendly format.
for trace_type in CPU Memory Real
@ -146,6 +142,7 @@ fi
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
tar -chf /test_output/zookeeper_log_dump.tar /var/lib/clickhouse/data/system/zookeeper_log ||:
tar -chf /test_output/trace_log_dump.tar /var/lib/clickhouse/data/system/trace_log ||:
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then

View File

@ -71,42 +71,42 @@ def prepare_for_hung_check(drop_databases):
# FIXME this function should not exist, but...
# ThreadFuzzer significantly slows down server and causes false-positive hung check failures
call("clickhouse client -q 'SYSTEM STOP THREAD FUZZER'", shell=True, stderr=STDOUT)
call("clickhouse client -q 'SYSTEM STOP THREAD FUZZER'", shell=True, stderr=STDOUT, timeout=30)
# We attach gdb to clickhouse-server before running tests
# to print stacktraces of all crashes even if clickhouse cannot print it for some reason.
# However, it obstruct checking for hung queries.
logging.info("Will terminate gdb (if any)")
call("kill -TERM $(pidof gdb)", shell=True, stderr=STDOUT)
call("kill -TERM $(pidof gdb)", shell=True, stderr=STDOUT, timeout=30)
# Some tests set too low memory limit for default user and forget to reset in back.
# It may cause SYSTEM queries to fail, let's disable memory limit.
call("clickhouse client --max_memory_usage_for_user=0 -q 'SELECT 1 FORMAT Null'", shell=True, stderr=STDOUT)
call("clickhouse client --max_memory_usage_for_user=0 -q 'SELECT 1 FORMAT Null'", shell=True, stderr=STDOUT, timeout=30)
# Some tests execute SYSTEM STOP MERGES or similar queries.
# It may cause some ALTERs to hang.
# Possibly we should fix tests and forbid to use such queries without specifying table.
call("clickhouse client -q 'SYSTEM START MERGES'", shell=True, stderr=STDOUT)
call("clickhouse client -q 'SYSTEM START DISTRIBUTED SENDS'", shell=True, stderr=STDOUT)
call("clickhouse client -q 'SYSTEM START TTL MERGES'", shell=True, stderr=STDOUT)
call("clickhouse client -q 'SYSTEM START MOVES'", shell=True, stderr=STDOUT)
call("clickhouse client -q 'SYSTEM START FETCHES'", shell=True, stderr=STDOUT)
call("clickhouse client -q 'SYSTEM START REPLICATED SENDS'", shell=True, stderr=STDOUT)
call("clickhouse client -q 'SYSTEM START REPLICATION QUEUES'", shell=True, stderr=STDOUT)
call("clickhouse client -q 'SYSTEM START MERGES'", shell=True, stderr=STDOUT, timeout=30)
call("clickhouse client -q 'SYSTEM START DISTRIBUTED SENDS'", shell=True, stderr=STDOUT, timeout=30)
call("clickhouse client -q 'SYSTEM START TTL MERGES'", shell=True, stderr=STDOUT, timeout=30)
call("clickhouse client -q 'SYSTEM START MOVES'", shell=True, stderr=STDOUT, timeout=30)
call("clickhouse client -q 'SYSTEM START FETCHES'", shell=True, stderr=STDOUT, timeout=30)
call("clickhouse client -q 'SYSTEM START REPLICATED SENDS'", shell=True, stderr=STDOUT, timeout=30)
call("clickhouse client -q 'SYSTEM START REPLICATION QUEUES'", shell=True, stderr=STDOUT, timeout=30)
# Issue #21004, live views are experimental, so let's just suppress it
call("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """, shell=True, stderr=STDOUT)
call("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """, shell=True, stderr=STDOUT, timeout=30)
# Kill other queries which known to be slow
# It's query from 01232_preparing_sets_race_condition_long, it may take up to 1000 seconds in slow builds
call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'insert into tableB select %'" """, shell=True, stderr=STDOUT)
call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'insert into tableB select %'" """, shell=True, stderr=STDOUT, timeout=30)
# Long query from 00084_external_agregation
call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'" """, shell=True, stderr=STDOUT)
call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'" """, shell=True, stderr=STDOUT, timeout=30)
if drop_databases:
# Here we try to drop all databases in async mode. If some queries really hung, than drop will hung too.
# Otherwise we will get rid of queries which wait for background pool. It can take a long time on slow builds (more than 900 seconds).
databases = check_output('clickhouse client -q "SHOW DATABASES"', shell=True).decode('utf-8').strip().split()
databases = check_output('clickhouse client -q "SHOW DATABASES"', shell=True, timeout=30).decode('utf-8').strip().split()
for db in databases:
if db == "system":
continue
@ -117,13 +117,13 @@ def prepare_for_hung_check(drop_databases):
# Wait for last queries to finish if any, not longer than 300 seconds
call("""clickhouse client -q "select sleepEachRow((
select maxOrDefault(300 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 300
) / 300) from numbers(300) format Null" """, shell=True, stderr=STDOUT)
) / 300) from numbers(300) format Null" """, shell=True, stderr=STDOUT, timeout=330)
# Even if all clickhouse-test processes are finished, there are probably some sh scripts,
# which still run some new queries. Let's ignore them.
try:
query = """clickhouse client -q "SELECT count() FROM system.processes where where elapsed > 300" """
output = check_output(query, shell=True, stderr=STDOUT).decode('utf-8').strip()
output = check_output(query, shell=True, stderr=STDOUT, timeout=30).decode('utf-8').strip()
if int(output) == 0:
return False
except:
@ -176,6 +176,7 @@ if __name__ == "__main__":
if res != 0 and have_long_running_queries:
logging.info("Hung check failed with exit code {}".format(res))
hung_check_status = "Hung check failed\tFAIL\n"
open(os.path.join(args.output_folder, "test_results.tsv"), 'w+').write(hung_check_status)
with open(os.path.join(args.output_folder, "test_results.tsv"), 'w+') as results:
results.write(hung_check_status)
logging.info("Stress test finished")

View File

@ -10,7 +10,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
python3-pip \
pylint \
yamllint \
&& pip3 install codespell pandas clickhouse_driver
&& pip3 install codespell
COPY run.sh /
COPY process_style_check_result.py /

View File

@ -28,6 +28,7 @@ def process_test_log(log_path):
test_results = []
with open(log_path, 'r') as test_file:
for line in test_file:
original_line = line
line = line.strip()
if any(s in line for s in NO_TASK_TIMEOUT_SIGNS):
task_timeout = False
@ -49,19 +50,24 @@ def process_test_log(log_path):
total += 1
if TIMEOUT_SIGN in line:
failed += 1
test_results.append((test_name, "Timeout", test_time))
test_results.append((test_name, "Timeout", test_time, []))
elif FAIL_SIGN in line:
failed += 1
test_results.append((test_name, "FAIL", test_time))
test_results.append((test_name, "FAIL", test_time, []))
elif UNKNOWN_SIGN in line:
unknown += 1
test_results.append((test_name, "FAIL", test_time))
test_results.append((test_name, "FAIL", test_time, []))
elif SKIPPED_SIGN in line:
skipped += 1
test_results.append((test_name, "SKIPPED", test_time))
test_results.append((test_name, "SKIPPED", test_time, []))
else:
success += int(OK_SIGN in line)
test_results.append((test_name, "OK", test_time))
test_results.append((test_name, "OK", test_time, []))
elif len(test_results) > 0 and test_results[-1][1] == "FAIL":
test_results[-1][3].append(original_line)
test_results = [(test[0], test[1], test[2], ''.join(test[3])) for test in test_results]
return total, skipped, unknown, failed, success, hung, task_timeout, retries, test_results
def process_result(result_path):
@ -89,14 +95,14 @@ def process_result(result_path):
if hung:
description = "Some queries hung, "
state = "failure"
test_results.append(("Some queries hung", "FAIL", "0"))
test_results.append(("Some queries hung", "FAIL", "0", ""))
elif task_timeout:
description = "Timeout, "
state = "failure"
test_results.append(("Timeout", "FAIL", "0"))
test_results.append(("Timeout", "FAIL", "0", ""))
elif retries:
description = "Some tests restarted, "
test_results.append(("Some tests restarted", "SKIPPED", "0"))
test_results.append(("Some tests restarted", "SKIPPED", "0", ""))
else:
description = ""

View File

@ -0,0 +1,59 @@
#!/bin/sh -e
OS=$(uname -s)
ARCH=$(uname -m)
DIR=
if [ "${OS}" = "Linux" ]
then
if [ "${ARCH}" = "x86_64" ]
then
DIR="amd64"
elif [ "${ARCH}" = "aarch64" ]
then
DIR="aarch64"
elif [ "${ARCH}" = "powerpc64le" ]
then
DIR="powerpc64le"
fi
elif [ "${OS}" = "FreeBSD" ]
then
if [ "${ARCH}" = "x86_64" ]
then
DIR="freebsd"
elif [ "${ARCH}" = "aarch64" ]
then
DIR="freebsd-aarch64"
elif [ "${ARCH}" = "powerpc64le" ]
then
DIR="freebsd-powerpc64le"
fi
elif [ "${OS}" = "Darwin" ]
then
if [ "${ARCH}" = "x86_64" ]
then
DIR="macos"
elif [ "${ARCH}" = "aarch64" ]
then
DIR="macos-aarch64"
fi
fi
if [ -z "${DIR}" ]
then
echo "The '${OS}' operating system with the '${ARCH}' architecture is not supported."
exit 1
fi
URL="https://builds.clickhouse.com/master/${DIR}/clickhouse"
echo "Will download ${URL}"
curl -O "${URL}" && chmod a+x clickhouse &&
echo "Successfully downloaded the ClickHouse binary, you can run it as:
./clickhouse"
if [ "${OS}" = "Linux" ]
then
echo "You can also install it:
sudo ./clickhouse install"
fi

View File

@ -9,15 +9,11 @@ This is for the case when you have Linux machine and want to use it to build `cl
The cross-build for AARCH64 is based on the [Build instructions](../development/build.md), follow them first.
## Install Clang-8 {#install-clang-8}
## Install Clang-13
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
For example, in Ubuntu Bionic you can use the following commands:
``` bash
echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list
sudo apt-get update
sudo apt-get install clang-8
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
```
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
```
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
@ -34,7 +30,7 @@ tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cma
``` bash
cd ClickHouse
mkdir build-arm64
CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake
CC=clang-13 CXX=clang++-13 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake
ninja -C build-arm64
```

View File

@ -688,7 +688,7 @@ Tags:
- `policy_name_N` — Policy name. Policy names must be unique.
- `volume_name_N` — Volume name. Volume names must be unique.
- `disk` — a disk within a volume.
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volumes disks.
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volumes disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1).
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.

View File

@ -188,14 +188,15 @@ When the `max_parallel_replicas` option is enabled, query processing is parallel
## Virtual Columns {#virtual-columns}
- `_shard_num` — Contains the `shard_num` (from `system.clusters`). Type: [UInt32](../../../sql-reference/data-types/int-uint.md).
- `_shard_num` — Contains the `shard_num` value from the table `system.clusters`. Type: [UInt32](../../../sql-reference/data-types/int-uint.md).
!!! note "Note"
Since [`remote`](../../../sql-reference/table-functions/remote.md)/`cluster` table functions internally create temporary instance of the same Distributed engine, `_shard_num` is available there too.
Since [remote](../../../sql-reference/table-functions/remote.md) and [cluster](../../../sql-reference/table-functions/cluster.md) table functions internally create temporary Distributed table, `_shard_num` is available there too.
**See Also**
- [Virtual columns](../../../engines/table-engines/special/index.md#table_engines-virtual_columns)
- [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size)
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) description
- [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size) setting
- [shardNum()](../../../sql-reference/functions/other-functions.md#shard-num) and [shardCount()](../../../sql-reference/functions/other-functions.md#shard-count) functions
[Original article](https://clickhouse.com/docs/en/operations/table_engines/distributed/) <!--hide-->

View File

@ -127,6 +127,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
- `--secure` If specified, will connect to server over secure connection.
- `--history_file` — Path to a file containing command history.
- `--param_<name>` — Value for a [query with parameters](#cli-queries-with-parameters).
- `--hardware-utilization` — Print hardware utilization information in progress bar.
Since version 20.5, `clickhouse-client` has automatic syntax highlighting (always enabled).

View File

@ -15,6 +15,7 @@ toc_title: Adopters
| <a href="http://www.adscribe.tv/" class="favicon">AdScribe</a> | Ads | TV Analytics | — | — | [A quote from CTO](https://altinity.com/24x7-support/) |
| <a href="https://ahrefs.com/" class="favicon">Ahrefs</a> | SEO | Analytics | — | — | [Job listing](https://ahrefs.com/jobs/data-scientist-search) |
| <a href="https://cn.aliyun.com/" class="favicon">Alibaba Cloud</a> | Cloud | Managed Service | — | — | [Official Website](https://help.aliyun.com/product/144466.html) |
| <a href="https://cn.aliyun.com/" class="favicon">Alibaba Cloud</a> | Cloud | E-MapReduce | — | — | [Official Website](https://help.aliyun.com/document_detail/212195.html) |
| <a href="https://alohabrowser.com/" class="favicon">Aloha Browser</a> | Mobile App | Browser backend | — | — | [Slides in Russian, May 2019](https://presentations.clickhouse.com/meetup22/aloha.pdf) |
| <a href="https://altinity.com/" class="favicon">Altinity</a> | Cloud, SaaS | Main product | — | — | [Official Website](https://altinity.com/) |
| <a href="https://amadeus.com/" class="favicon">Amadeus</a> | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) |
@ -31,6 +32,7 @@ toc_title: Adopters
| <a href="https://cardsmobile.ru/" class="favicon">CardsMobile</a> | Finance | Analytics | — | — | [VC.ru](https://vc.ru/s/cardsmobile/143449-rukovoditel-gruppy-analiza-dannyh) |
| <a href="https://carto.com/" class="favicon">CARTO</a> | Business Intelligence | Geo analytics | — | — | [Geospatial processing with ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) |
| <a href="http://public.web.cern.ch/public/" class="favicon">CERN</a> | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) |
| <a href="https://www.checklyhq.com/" class="favicon">Checkly</a> | Software Development | Analytics | — | — | [Tweet, October 2021](https://twitter.com/tim_nolet/status/1445810665743081474?s=20) |
| <a href="http://cisco.com/" class="favicon">Cisco</a> | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) |
| <a href="https://www.citadelsecurities.com/" class="favicon">Citadel Securities</a> | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) |
| <a href="https://city-mobil.ru" class="favicon">Citymobil</a> | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) |
@ -115,6 +117,7 @@ toc_title: Adopters
| <a href="https://seo.do/" class="favicon">seo.do</a> | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) |
| <a href="http://www.sgk.gov.tr/wps/portal/sgk/tr" class="favicon">SGK</a> | Government Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) |
| <a href="http://english.sina.com/index.html" class="favicon">Sina</a> | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) |
| <a href="https://www.sipfront.com/" class="favicon">Sipfront</a> | Software Development | Analytics | — | — | [Tweet, October 2021](https://twitter.com/andreasgranig/status/1446404332337913895?s=20) |
| <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) |
| <a href="https://www.spark.co.nz/" class="favicon">Spark New Zealand</a> | Telecommunications | Security Operations | — | — | [Blog Post, Feb 2020](https://blog.n0p.me/2020/02/2020-02-05-dnsmonster/) |
| <a href="https://splitbee.io" class="favicon">Splitbee</a> | Analytics | Main Product | — | — | [Blog Post, Mai 2021](https://splitbee.io/blog/new-pricing) |
@ -162,5 +165,6 @@ toc_title: Adopters
| <a href="https://zagravagames.com/en/" class="favicon">Zagrava Trading</a> | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) |
| <a href="https://beeline.ru/" class="favicon">Beeline</a> | Telecom | Data Platform | — | — | [Blog post, July 2021](https://habr.com/en/company/beeline/blog/567508/) |
| <a href="https://ecommpay.com/" class="favicon">Ecommpay</a> | Payment Processing | Logs | — | — | [Video, Nov 2019](https://www.youtube.com/watch?v=d3GdZTOWGLk) |
| <a href="https://omnicomm.ru/" class="favicon">Omnicomm</a> | Transportation Monitoring | — | — | — | [Facebook post, Oct 2021](https://www.facebook.com/OmnicommTeam/posts/2824479777774500) |
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->

View File

@ -39,9 +39,9 @@ In ClickHouse, data can reside on different shards. Each shard can be a group of
ClickHouse supports a [declarative query language based on SQL](../sql-reference/index.md) that is identical to the ANSI SQL standard in [many cases](../sql-reference/ansi.md).
Supported queries include [GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), subqueries in [FROM](../sql-reference/statements/select/from.md), [JOIN](../sql-reference/statements/select/join.md) clause, [IN](../sql-reference/operators/in.md) operator, and scalar subqueries.
Supported queries include [GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), subqueries in [FROM](../sql-reference/statements/select/from.md), [JOIN](../sql-reference/statements/select/join.md) clause, [IN](../sql-reference/operators/in.md) operator, [window functions](../sql-reference/window-functions/index.md) and scalar subqueries.
Correlated (dependent) subqueries and window functions are not supported at the time of writing but might become available in the future.
Correlated (dependent) subqueries are not supported at the time of writing but might become available in the future.
## Vector Computation Engine {#vector-engine}

View File

@ -71,13 +71,13 @@ If no conditions met for a data part, ClickHouse uses the `lz4` compression.
## encryption {#server-settings-encryption}
Configures a command to obtain a key to be used by [encryption codecs](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). Key (or keys) should be written in enviroment variables or be set in configuration file.
Configures a command to obtain a key to be used by [encryption codecs](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). Key (or keys) should be written in environment variables or set in the configuration file.
Keys can be hex or string. Their length must be equal to 16.
Keys can be hex or string with a length equal to 16 bytes.
**Example**
Load from config:
Loading from config:
```xml
<encryption_codecs>
@ -88,9 +88,9 @@ Load from config:
```
!!! note "NOTE"
Storing keys in configuration file is not recommended. It isn't secure. You can move the keys into a separate config file on a secure disk and put a symlink to that config file to `config.d/` folder.
Storing keys in the configuration file is not recommended. It isn't secure. You can move the keys into a separate config file on a secure disk and put a symlink to that config file to `config.d/` folder.
Load from config, when key is in hex:
Loading from config, when the key is in hex:
```xml
<encryption_codecs>
@ -100,7 +100,7 @@ Load from config, when key is in hex:
</encryption_codecs>
```
Load key from environment variable:
Loading key from the environment variable:
```xml
<encryption_codecs>
@ -110,9 +110,9 @@ Load key from environment variable:
</encryption_codecs>
```
Where `current_key_id` sets the current key for encryption, and all specified keys can be used for decryption.
Here `current_key_id` sets the current key for encryption, and all specified keys can be used for decryption.
All this methods can be applied for multiple keys:
Each of these methods can be applied for multiple keys:
```xml
<encryption_codecs>
@ -124,9 +124,9 @@ All this methods can be applied for multiple keys:
</encryption_codecs>
```
Where `current_key_id` shows current key for encryption.
Here `current_key_id` shows current key for encryption.
Also user can add nonce that must be 12 bytes long (by default encryption and decryption will use nonce consisting of zero bytes):
Also, users can add nonce that must be 12 bytes long (by default encryption and decryption processes use nonce that consists of zero bytes):
```xml
<encryption_codecs>
@ -146,7 +146,7 @@ Or it can be set in hex:
</encryption_codecs>
```
Everything above can be applied for `aes_256_gcm_siv` (but key must be 32 bytes length).
Everything mentioned above can be applied for `aes_256_gcm_siv` (but the key must be 32 bytes long).
## custom_settings_prefixes {#custom_settings_prefixes}

View File

@ -1808,7 +1808,7 @@ Default value: 1000
## optimize_skip_unused_shards {#optimize-skip-unused-shards}
Enables or disables skipping of unused shards for [SELECT](../../sql-reference/statements/select/index.md) queries that have sharding key condition in `WHERE/PREWHERE` (assuming that the data is distributed by sharding key, otherwise does nothing).
Enables or disables skipping of unused shards for [SELECT](../../sql-reference/statements/select/index.md) queries that have sharding key condition in `WHERE/PREWHERE` (assuming that the data is distributed by sharding key, otherwise a query yields incorrect result).
Possible values:
@ -3783,4 +3783,34 @@ Result:
│ 20 │ 20 │ 10 │
│ 10 │ 20 │ 30 │
└─────┴─────┴───────┘
```
```
## optimize_move_to_prewhere {#optimize_move_to_prewhere}
Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries.
Works only for [*MergeTree](../../engines/table-engines/mergetree-family/index.md) tables.
Possible values:
- 0 — Automatic `PREWHERE` optimization is disabled.
- 1 — Automatic `PREWHERE` optimization is enabled.
Default value: `1`.
## optimize_move_to_prewhere_if_final {#optimize_move_to_prewhere_if_final}
Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries with [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier.
Works only for [*MergeTree](../../engines/table-engines/mergetree-family/index.md) tables.
Possible values:
- 0 — Automatic `PREWHERE` optimization in `SELECT` queries with `FINAL` modifier is disabled.
- 1 — Automatic `PREWHERE` optimization in `SELECT` queries with `FINAL` modifier is enabled.
Default value: `0`.
**See Also**
- [optimize_move_to_prewhere](#optimize_move_to_prewhere) setting

View File

@ -10,6 +10,9 @@ Columns:
- `type` ([String](../../sql-reference/data-types/string.md)) — Index type.
- `expr` ([String](../../sql-reference/data-types/string.md)) — Expression for the index calculation.
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of granules in the block.
- `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The size of compressed data, in bytes.
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The size of decompressed data, in bytes.
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The size of marks, in bytes.
**Example**
@ -26,6 +29,9 @@ name: clicks_idx
type: minmax
expr: clicks
granularity: 1
data_compressed_bytes: 58
data_uncompressed_bytes: 6
marks: 48
Row 2:
──────
@ -35,4 +41,7 @@ name: contacts_null_idx
type: minmax
expr: assumeNotNull(contacts_null)
granularity: 1
data_compressed_bytes: 58
data_uncompressed_bytes: 6
marks: 48
```

View File

@ -38,6 +38,12 @@ Columns:
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) The size of the file with marks.
- `secondary_indices_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) Total size of compressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
- `secondary_indices_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) Total size of uncompressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
- `secondary_indices_marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) The size of the file with marks for secondary indices.
- `modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) The time the directory with the data part was modified. This usually corresponds to the time of data part creation.
- `remove_time` ([DateTime](../../sql-reference/data-types/datetime.md)) The time when the data part became inactive.
@ -119,6 +125,9 @@ rows: 6
bytes_on_disk: 310
data_compressed_bytes: 157
data_uncompressed_bytes: 91
secondary_indices_compressed_bytes: 58
secondary_indices_uncompressed_bytes: 6
secondary_indices_marks_bytes: 48
marks_bytes: 144
modification_time: 2020-06-18 13:01:49
remove_time: 1970-01-01 00:00:00

View File

@ -1,4 +1,3 @@
---
toc_priority: 58
toc_title: Usage Recommendations
---
@ -60,7 +59,7 @@ $ echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size
Calculate the exact number from the number of devices and the block size, using the formula: `2 * num_devices * chunk_size_in_bytes / 4096`.
A block size of 1024 KB is sufficient for all RAID configurations.
A block size of 64 KB is sufficient for most RAID configurations. The average clickhouse-server write size is approximately 1 MB (1024 KB), and thus the recommended stripe size is also 1 MB. The block size can be optimized if needed when set to 1 MB divided by the number of non-parity disks in the RAID array, such that each write is parallelized across all available non-parity disks.
Never set the block size too small or too large.
You can use RAID-0 on SSD.

View File

@ -26,6 +26,7 @@ SELECT
## timeZone {#timezone}
Returns the timezone of the server.
If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
**Syntax**

View File

@ -40,7 +40,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
Query:
``` sql
SELECT h3IsValid(630814730351855103) as h3IsValid;
SELECT h3IsValid(630814730351855103) AS h3IsValid;
```
Result:
@ -77,7 +77,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
Query:
``` sql
SELECT h3GetResolution(639821929606596015) as resolution;
SELECT h3GetResolution(639821929606596015) AS resolution;
```
Result:
@ -111,7 +111,7 @@ h3EdgeAngle(resolution)
Query:
``` sql
SELECT h3EdgeAngle(10) as edgeAngle;
SELECT h3EdgeAngle(10) AS edgeAngle;
```
Result:
@ -145,7 +145,7 @@ h3EdgeLengthM(resolution)
Query:
``` sql
SELECT h3EdgeLengthM(15) as edgeLengthM;
SELECT h3EdgeLengthM(15) AS edgeLengthM;
```
Result:
@ -184,7 +184,7 @@ Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
Query:
``` sql
SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index;
SELECT geoToH3(37.79506683, 55.71290588, 15) AS h3Index;
```
Result:
@ -333,7 +333,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
Query:
``` sql
SELECT h3GetBaseCell(612916788725809151) as basecell;
SELECT h3GetBaseCell(612916788725809151) AS basecell;
```
Result:
@ -369,7 +369,7 @@ Type: [Float64](../../../sql-reference/data-types/float.md).
Query:
``` sql
SELECT h3HexAreaM2(13) as area;
SELECT h3HexAreaM2(13) AS area;
```
Result:
@ -481,7 +481,7 @@ Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
Query:
``` sql
SELECT h3ToParent(599405990164561919, 3) as parent;
SELECT h3ToParent(599405990164561919, 3) AS parent;
```
Result:
@ -515,7 +515,7 @@ Type: [String](../../../sql-reference/data-types/string.md).
Query:
``` sql
SELECT h3ToString(617420388352917503) as h3_string;
SELECT h3ToString(617420388352917503) AS h3_string;
```
Result:
@ -549,7 +549,7 @@ stringToH3(index_str)
Query:
``` sql
SELECT stringToH3('89184926cc3ffff') as index;
SELECT stringToH3('89184926cc3ffff') AS index;
```
Result:
@ -583,7 +583,7 @@ h3GetResolution(index)
Query:
``` sql
SELECT h3GetResolution(617420388352917503) as res;
SELECT h3GetResolution(617420388352917503) AS res;
```
Result:
@ -620,7 +620,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
Query:
``` sql
SELECT h3IsResClassIII(617420388352917503) as res;
SELECT h3IsResClassIII(617420388352917503) AS res;
```
Result:
@ -631,7 +631,7 @@ Result:
└─────┘
```
## h3IsPentagon {#h3ispentagon }
## h3IsPentagon {#h3ispentagon}
Returns whether this [H3](#h3index) index represents a pentagonal cell.
@ -657,7 +657,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
Query:
``` sql
SELECT SELECT h3IsPentagon(644721767722457330) as pentagon;
SELECT h3IsPentagon(644721767722457330) AS pentagon;
```
Result:
@ -693,7 +693,7 @@ Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-
Query:
``` sql
SELECT SELECT h3GetFaces(599686042433355775) as faces;
SELECT h3GetFaces(599686042433355775) AS faces;
```
Result:

View File

@ -141,20 +141,49 @@ This is a relatively fast non-cryptographic hash function of average quality for
Calculates a 64-bit hash code from any type of integer.
It works faster than intHash32. Average quality.
## SHA1 {#sha1}
## SHA1, SHA224, SHA256, SHA512 {#sha}
## SHA224 {#sha224}
Calculates SHA-1, SHA-224, SHA-256, SHA-512 hash from a string and returns the resulting set of bytes as [FixedString](../data-types/fixedstring.md).
## SHA256 {#sha256}
**Syntax**
## SHA384 {#sha384}
``` sql
SHA1('s')
...
SHA512('s')
```
## SHA512 {#sha512}
Calculates SHA-1, SHA-224, SHA-256, SHA-384 or SHA-512 from a string and returns the resulting set of bytes as FixedString(20), FixedString(28), FixedString(32), FixedString(48) or FixedString(64).
The function works fairly slowly (SHA-1 processes about 5 million short strings per second per processor core, while SHA-224 and SHA-256 process about 2.2 million).
We recommend using this function only in cases when you need a specific hash function and you cant select it.
Even in these cases, we recommend applying the function offline and pre-calculating values when inserting them into the table, instead of applying it in SELECTS.
Even in these cases, we recommend applying the function offline and pre-calculating values when inserting them into the table, instead of applying it in `SELECT` queries.
**Arguments**
- `s` — Input string for SHA hash calculation. [String](../data-types/string.md).
**Returned value**
- SHA hash as a hex-unencoded FixedString. SHA-1 returns as FixedString(20), SHA-224 as FixedString(28), SHA-256 — FixedString(32), SHA-512 — FixedString(64).
Type: [FixedString](../data-types/fixedstring.md).
**Example**
Use the [hex](../functions/encoding-functions.md#hex) function to represent the result as a hex-encoded string.
Query:
``` sql
SELECT hex(SHA1('abc'));
```
Result:
``` text
┌─hex(SHA1('abc'))─────────────────────────┐
│ A9993E364706816ABA3E25717850C26C9CD0D89D │
└──────────────────────────────────────────┘
```
## URLHash(url\[, N\]) {#urlhashurl-n}

View File

@ -8,6 +8,7 @@ toc_title: Other
## hostName() {#hostname}
Returns a string with the name of the host that this function was performed on. For distributed processing, this is the name of the remote server host, if the function is performed on a remote server.
If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
## getMacro {#getmacro}
@ -691,10 +692,18 @@ Returns the largest value of a and b.
## uptime() {#uptime}
Returns the servers uptime in seconds.
If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
## version() {#version}
Returns the version of the server as a string.
If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
## buildId() {#buildid}
Returns the build ID generated by a compiler for the running ClickHouse server binary.
If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
## blockNumber {#blocknumber}
@ -2101,6 +2110,7 @@ UNSUPPORTED_METHOD
## tcpPort {#tcpPort}
Returns [native interface](../../interfaces/tcp.md) TCP port number listened by this server.
If it is executed in the context of a distributed table, then it generates a normal column, otherwise it produces a constant value.
**Syntax**
@ -2354,3 +2364,66 @@ Result:
│ 1 │
└─────────┘
```
## shardNum {#shard-num}
Returns the index of a shard which processes a part of data for a distributed query. Indices are started from `1`.
If a query is not distributed then constant value `0` is returned.
**Syntax**
``` sql
shardNum()
```
**Returned value**
- Shard index or constant `0`.
Type: [UInt32](../../sql-reference/data-types/int-uint.md).
**Example**
In the following example a configuration with two shards is used. The query is executed on the [system.one](../../operations/system-tables/one.md) table on every shard.
Query:
``` sql
CREATE TABLE shard_num_example (dummy UInt8)
ENGINE=Distributed(test_cluster_two_shards_localhost, system, one, dummy);
SELECT dummy, shardNum(), shardCount() FROM shard_num_example;
```
Result:
``` text
┌─dummy─┬─shardNum()─┬─shardCount()─┐
│ 0 │ 2 │ 2 │
│ 0 │ 1 │ 2 │
└───────┴────────────┴──────────────┘
```
**See Also**
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md)
## shardCount {#shard-count}
Returns the total number of shards for a distributed query.
If a query is not distributed then constant value `0` is returned.
**Syntax**
``` sql
shardCount()
```
**Returned value**
- Total number of shards or `0`.
Type: [UInt32](../../sql-reference/data-types/int-uint.md).
**See Also**
- [shardNum()](#shard-num) function example also contains `shardCount()` function call.

View File

@ -28,7 +28,7 @@ The function also works for [arrays](array-functions.md#function-empty) or [UUID
**Returned value**
- Returns `1` for an empty string or `0` for a non-empty string.
- Returns `1` for an empty string or `0` for a non-empty string.
Type: [UInt8](../data-types/int-uint.md).
@ -68,7 +68,7 @@ The function also works for [arrays](array-functions.md#function-notempty) or [U
**Returned value**
- Returns `1` for a non-empty string or `0` for an empty string string.
- Returns `1` for a non-empty string or `0` for an empty string string.
Type: [UInt8](../data-types/int-uint.md).
@ -313,6 +313,32 @@ SELECT toValidUTF8('\x61\xF0\x80\x80\x80b');
└───────────────────────┘
```
## tokens {#tokens}
Split string into tokens using non-alpha numeric ASCII characters as separators.
**Arguments**
- `input_string` — Any set of bytes represented as the [String](../../sql-reference/data-types/string.md) data type object.
**Returned value**
- The resulting array of tokens from input string.
Type: [Array](../data-types/array.md).
**Example**
``` sql
SELECT tokens('test1,;\\ test2,;\\ test3,;\\ test4') AS tokens;
```
``` text
┌─tokens────────────────────────────┐
│ ['test1','test2','test3','test4'] │
└───────────────────────────────────┘
```
## repeat {#repeat}
Repeats a string as many times as specified and concatenates the replicated values as a single string.

View File

@ -279,24 +279,26 @@ Result:
└──────────────────────────────┴────────────────────┘
```
## toDate32OrDefault {#todate32-or-null}
## toDate32OrDefault {#todate32-or-default}
The same as [toDate32](#todate32) but returns default value if invalid argument is received.
Converts the argument to the [Date32](../../sql-reference/data-types/date32.md) data type. If the value is outside the range returns the lower border value supported by `Date32`. If the argument has [Date](../../sql-reference/data-types/date.md) type, borders of `Date` are taken into account. Returns default value if invalid argument is received.
**Example**
Query:
``` sql
SELECT toDate32OrDefault('1955-01-01'), toDate32OrDefault('');
SELECT
toDate32OrDefault('1930-01-01', toDate32('2020-01-01')),
toDate32OrDefault('xx1930-01-01', toDate32('2020-01-01'));
```
Result:
``` text
┌─toDate32OrDefault('1955-01-01')─┬─toDate32OrDefault('')─┐
1955-01-01 │ 1970-01-01 │
└─────────────────────────────────┴───────────────────────┘
┌─toDate32OrDefault('1930-01-01', toDate32('2020-01-01'))─┬─toDate32OrDefault('xx1930-01-01', toDate32('2020-01-01'))─┐
1930-01-01 │ 2020-01-01 │
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
## toDecimal(32\|64\|128\|256) {#todecimal3264128256}
@ -362,7 +364,7 @@ Result:
```
## toDecimal(32\|64\|128\|256)OrDefault {#todecimal3264128256ornull}
## toDecimal(32\|64\|128\|256)OrDefault {#todecimal3264128256ordefault}
Converts an input string to a [Decimal(P,S)](../../sql-reference/data-types/decimal.md) data type value. This family of functions include:
@ -855,25 +857,25 @@ Result:
```
## accurateCastOrDefault(x, T[, default_value]) {#type_conversion_function-accurate-cast_or_null}
## accurateCastOrDefault(x, T[, default_value]) {#type_conversion_function-accurate-cast_or_default}
Converts input value `x` to the specified data type `T`. Returns default type value or `default_value` if specified if the casted value is not representable in the target type.
**Syntax**
```sql
accurateCastOrNull(x, T)
accurateCastOrDefault(x, T)
```
**Parameters**
- `x` — Input value.
- `T` — The name of the returned data type.
- `default_value` - Default value of returned data type.
- `default_value` Default value of returned data type.
**Returned value**
- The value, converted to the specified data type `T`.
- The value converted to the specified data type `T`.
**Example**

View File

@ -10,7 +10,7 @@ A set of queries that allow changing the table structure.
Syntax:
``` sql
ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ...
ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|RENAME|CLEAR|COMMENT|MODIFY|MATERIALIZE COLUMN ...
```
In the query, specify a list of one or more comma-separated actions.
@ -25,6 +25,7 @@ The following actions are supported:
- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column.
- [MODIFY COLUMN](#alter_modify-column) — Changes columns type, default expression and TTL.
- [MODIFY COLUMN REMOVE](#modify-remove) — Removes one of the column properties.
- [MATERIALIZE COLUMN](#materialize-column) — Materializes the column in the parts where the column is missing.
These actions are described in detail below.
@ -193,6 +194,39 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
- [REMOVE TTL](ttl.md).
## MATERIALIZE COLUMN {#materialize-column}
Materializes the column in the parts where the column is missing. This is useful in case of creating a new column with complicated `DEFAULT` or `MATERIALIZED` expression. Calculation of the column directly on `SELECT` query can cause bigger request execution time, so it is reasonable to use `MATERIALIZE COLUMN` for such columns. To perform same manipulation for existing column, use `FINAL` modifier.
Syntax:
```sql
ALTER TABLE table MATERIALIZE COLUMN col [FINAL];
```
**Example**
```sql
DROP TABLE IF EXISTS tmp;
SET mutations_sync = 2;
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 10;
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
SELECT groupArray(x), groupArray(s) FROM tmp;
```
**Result:**
```sql
┌─groupArray(x)─────────┬─groupArray(s)─────────────────────────────┐
│ [0,1,2,3,4,5,6,7,8,9] │ ['0','1','2','3','4','5','6','7','8','9'] │
└───────────────────────┴───────────────────────────────────────────┘
```
**See Also**
- [MATERIALIZED](../../statements/create/table.md#materialized).
## Limitations {#alter-query-limitations}
The `ALTER` query lets you create and delete separate elements (columns) in nested data structures, but not whole nested data structures. To add a nested data structure, you can add columns with a name like `name.nested_name` and the type `Array(T)`. A nested data structure is equivalent to multiple array columns with a name that has the same prefix before the dot.

View File

@ -50,14 +50,13 @@ When creating a materialized view with `TO [db].[table]`, you must not use `POPU
A materialized view is implemented as follows: when inserting data to the table specified in `SELECT`, part of the inserted data is converted by this `SELECT` query, and the result is inserted in the view.
!!! important "Important"
Materialized views in ClickHouse use **column names** instead of column order during insertion into destination table. If some column names are not present in `SELECT`'s result ClickHouse will use a default value, even if column is not `Nullable`. A safe practice would be to add aliases for every column when using Materialized views.
Materialized views in ClickHouse use **column names** instead of column order during insertion into destination table. If some column names are not present in the `SELECT` query result, ClickHouse uses a default value, even if the column is not [Nullable](../../data-types/nullable.md). A safe practice would be to add aliases for every column when using Materialized views.
!!! important "Important"
Materialized views in ClickHouse are implemented more like insert triggers. If theres some aggregation in the view query, its applied only to the batch of freshly inserted data. Any changes to existing data of source table (like update, delete, drop partition, etc.) does not change the materialized view.
If you specify `POPULATE`, the existing table data is inserted in the view when creating it, as if making a `CREATE TABLE ... AS SELECT ...` . Otherwise, the query contains only the data inserted in the table after creating the view. We **do not recommend** using POPULATE, since data inserted in the table during the view creation will not be inserted in it.
If you specify `POPULATE`, the existing table data is inserted into the view when creating it, as if making a `CREATE TABLE ... AS SELECT ...` . Otherwise, the query contains only the data inserted in the table after creating the view. We **do not recommend** using `POPULATE`, since data inserted in the table during the view creation will not be inserted in it.
A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT` Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data wont be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`.
A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`. Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data wont be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`.
The execution of [ALTER](../../../sql-reference/statements/alter/view.md) queries on materialized views has limitations, so they might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view.

View File

@ -6,7 +6,7 @@ toc_title: PREWHERE
Prewhere is an optimization to apply filtering more efficiently. It is enabled by default even if `PREWHERE` clause is not specified explicitly. It works by automatically moving part of [WHERE](../../../sql-reference/statements/select/where.md) condition to prewhere stage. The role of `PREWHERE` clause is only to control this optimization if you think that you know how to do it better than it happens by default.
With prewhere optimization, at first only the columns necessary for executing prewhere expression are read. Then the other columns are read that are needed for running the rest of the query, but only those blocks where the prewhere expression is “true” at least for some rows. If there are a lot of blocks where prewhere expression is “false” for all rows and prewhere needs less columns than other parts of query, this often allows to read a lot less data from disk for query execution.
With prewhere optimization, at first only the columns necessary for executing prewhere expression are read. Then the other columns are read that are needed for running the rest of the query, but only those blocks where the prewhere expression is `true` at least for some rows. If there are a lot of blocks where prewhere expression is `false` for all rows and prewhere needs less columns than other parts of query, this often allows to read a lot less data from disk for query execution.
## Controlling Prewhere Manually {#controlling-prewhere-manually}
@ -14,11 +14,13 @@ The clause has the same meaning as the `WHERE` clause. The difference is in whic
A query may simultaneously specify `PREWHERE` and `WHERE`. In this case, `PREWHERE` precedes `WHERE`.
If the `optimize_move_to_prewhere` setting is set to 0, heuristics to automatically move parts of expressions from `WHERE` to `PREWHERE` are disabled.
If the [optimize_move_to_prewhere](../../../operations/settings/settings.md#optimize_move_to_prewhere) setting is set to 0, heuristics to automatically move parts of expressions from `WHERE` to `PREWHERE` are disabled.
If query has [FINAL](from.md#select-from-final) modifier, the `PREWHERE` optimization is not always correct. It is enabled only if both settings [optimize_move_to_prewhere](../../../operations/settings/settings.md#optimize_move_to_prewhere) and [optimize_move_to_prewhere_if_final](../../../operations/settings/settings.md#optimize_move_to_prewhere_if_final) are turned on.
!!! note "Attention"
The `PREWHERE` section is executed before` FINAL`, so the results of `FROM FINAL` queries may be skewed when using` PREWHERE` with fields not in the `ORDER BY` section of a table.
The `PREWHERE` section is executed before `FINAL`, so the results of `FROM ... FINAL` queries may be skewed when using `PREWHERE` with fields not in the `ORDER BY` section of a table.
## Limitations {#limitations}
`PREWHERE` is only supported by tables from the `*MergeTree` family.
`PREWHERE` is only supported by tables from the [*MergeTree](../../../engines/table-engines/mergetree-family/index.md) family.

View File

@ -668,7 +668,7 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y);
- `policy_name_N` — название политики. Названия политик должны быть уникальны.
- `volume_name_N` — название тома. Названия томов должны быть уникальны.
- `disk` — диск, находящийся внутри тома.
- `max_data_part_size_bytes` — максимальный размер куска данных, который может находится на любом из дисков этого тома.
- `max_data_part_size_bytes` — максимальный размер куска данных, который может находится на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том.
- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1).
- `prefer_not_to_merge` — Отключает слияние кусков данных, хранящихся на данном томе. Если данная настройка включена, то слияние данных, хранящихся на данном томе, не допускается. Это позволяет контролировать работу ClickHouse с медленными дисками.

View File

@ -136,3 +136,15 @@ logs - имя кластера в конфигурационном файле с
При выставлении опции max_parallel_replicas выполнение запроса распараллеливается по всем репликам внутри одного шарда. Подробнее смотрите раздел [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas).
## Виртуальные столбцы {#virtual-columns}
- `_shard_num` — содержит значение `shard_num` из таблицы `system.clusters`. Тип: [UInt32](../../../sql-reference/data-types/int-uint.md).
!!! note "Примечание"
Так как табличные функции [remote](../../../sql-reference/table-functions/remote.md) и [cluster](../../../sql-reference/table-functions/cluster.md) создают временную таблицу на движке `Distributed`, то в ней также доступен столбец `_shard_num`.
**См. также**
- общее описание [виртуальных столбцов](../../../engines/table-engines/index.md#table_engines-virtual_columns)
- настройка [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size)
- функции [shardNum()](../../../sql-reference/functions/other-functions.md#shard-num) и [shardCount()](../../../sql-reference/functions/other-functions.md#shard-count)

View File

@ -12,21 +12,21 @@ toc_title: "Анонимизированные данные Яндекс.Мет
**Скачивание и импортирование партиций hits:**
``` bash
$ curl -O https://datasets.clickhouse.com/hits/partitions/hits_v1.tar
$ tar xvf hits_v1.tar -C /var/lib/clickhouse # путь к папке с данными ClickHouse
$ # убедитесь, что установлены корректные права доступа на файлы
$ sudo service clickhouse-server restart
$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1"
curl -O https://datasets.clickhouse.com/hits/partitions/hits_v1.tar
tar xvf hits_v1.tar -C /var/lib/clickhouse # путь к папке с данными ClickHouse
# убедитесь, что установлены корректные права доступа на файлы
sudo service clickhouse-server restart
clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1"
```
**Скачивание и импортирование партиций visits:**
``` bash
$ curl -O https://datasets.clickhouse.com/visits/partitions/visits_v1.tar
$ tar xvf visits_v1.tar -C /var/lib/clickhouse # путь к папке с данными ClickHouse
$ # убедитесь, что установлены корректные права доступа на файлы
$ sudo service clickhouse-server restart
$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
curl -O https://datasets.clickhouse.com/visits/partitions/visits_v1.tar
tar xvf visits_v1.tar -C /var/lib/clickhouse # путь к папке с данными ClickHouse
# убедитесь, что установлены корректные права доступа на файлы
sudo service clickhouse-server restart
clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
```
## Получение таблиц из сжатых tsv-файлов {#poluchenie-tablits-iz-szhatykh-tsv-failov}
@ -34,29 +34,32 @@ $ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
**Скачивание и импортирование hits из сжатого tsv-файла**
``` bash
$ curl https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv
$ # теперь создадим таблицу
$ clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets"
$ clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192"
$ # импортируем данные
$ cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000
$ # опционально можно оптимизировать таблицу
$ clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL"
$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1"
curl https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv
# создадим таблицу hits_v1
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets"
clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192"
# создадим таблицу hits_100m_obfuscated
clickhouse-client --query="CREATE TABLE hits_100m_obfuscated (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, Refresh UInt8, RefererCategoryID UInt16, RefererRegionID UInt32, URLCategoryID UInt16, URLRegionID UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, OriginalURL String, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), LocalEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, RemoteIP UInt32, WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming UInt32, DNSTiming UInt32, ConnectTiming UInt32, ResponseStartTiming UInt32, ResponseEndTiming UInt32, FetchTiming UInt32, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192"
# импортируем данные
cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000
# опционально можно оптимизировать таблицу
clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL"
clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1"
```
**Скачивание и импортирование visits из сжатого tsv-файла**
``` bash
$ curl https://datasets.clickhouse.com/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv
$ # теперь создадим таблицу
$ clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets"
$ clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192"
$ # импортируем данные
$ cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000
$ # опционально можно оптимизировать таблицу
$ clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL"
$ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
curl https://datasets.clickhouse.com/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv
# теперь создадим таблицу
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets"
clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192"
# импортируем данные
cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000
# опционально можно оптимизировать таблицу
clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL"
clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
```
## Запросы {#zaprosy}

View File

@ -73,7 +73,7 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT
Формат `TabSeparated` поддерживает вывод тотальных значений (при использовании WITH TOTALS) и экстремальных значений (при настройке extremes выставленной в 1). В этих случаях, после основных данных выводятся тотальные значения, и экстремальные значения. Основной результат, тотальные значения и экстремальные значения, отделяются друг от друга пустой строкой. Пример:
``` sql
SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT TabSeparated``
SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT TabSeparated
```
``` text

View File

@ -71,9 +71,9 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
## encryption {#server-settings-encryption}
Настраивает комманду для получения ключа используемого [кодеками шифрования](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). Ключ (или несколько ключей) должны быть записаны в переменные окружения или установлены в конфигурационном файле .
Настраивает команду для получения ключа, используемого [кодеками шифрования](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). Ключ (или несколько ключей) должен быть записан в переменные окружения или установлен в конфигурационном файле.
Ключи могут быть представлены в шестнадцатеричной или строковой форме. Их длинна должна быть равна 16.
Ключи могут быть представлены в шестнадцатеричной или строковой форме. Их длина должна быть равна 16 байтам.
**Пример**
@ -146,7 +146,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
</encryption_codecs>
```
Все вышеперечисленное можно применить также для алгоритма `aes_256_gcm_siv` (но ключ должен быть длинной 32 байта).
Всё вышеперечисленное также применимо для алгоритма `aes_256_gcm_siv` (но ключ должен быть длиной 32 байта).
## custom_settings_prefixes {#custom_settings_prefixes}

View File

@ -1707,7 +1707,7 @@ ClickHouse генерирует исключение
## optimize_skip_unused_shards {#optimize-skip-unused-shards}
Включает или отключает пропуск неиспользуемых шардов для запросов [SELECT](../../sql-reference/statements/select/index.md) , в которых условие ключа шардирования задано в секции `WHERE/PREWHERE`. Предполагается, что данные распределены с помощью ключа шардирования, в противном случае настройка ничего не делает.
Включает или отключает пропуск неиспользуемых шардов для запросов [SELECT](../../sql-reference/statements/select/index.md) , в которых условие ключа шардирования задано в секции `WHERE/PREWHERE`. Предполагается, что данные распределены с помощью ключа шардирования, в противном случае запрос выдаст неверный результат.
Возможные значения:
@ -3572,4 +3572,34 @@ SELECT * FROM positional_arguments ORDER BY 2,3;
│ 20 │ 20 │ 10 │
│ 10 │ 20 │ 30 │
└─────┴─────┴───────┘
```
```
## optimize_move_to_prewhere {#optimize_move_to_prewhere}
Включает или отключает автоматическую оптимизацию [PREWHERE](../../sql-reference/statements/select/prewhere.md) в запросах [SELECT](../../sql-reference/statements/select/index.md).
Работает только с таблицами семейства [*MergeTree](../../engines/table-engines/mergetree-family/index.md).
Возможные значения:
- 0 — автоматическая оптимизация `PREWHERE` отключена.
- 1 — автоматическая оптимизация `PREWHERE` включена.
Значение по умолчанию: `1`.
## optimize_move_to_prewhere_if_final {#optimize_move_to_prewhere_if_final}
Включает или отключает автоматическую оптимизацию [PREWHERE](../../sql-reference/statements/select/prewhere.md) в запросах [SELECT](../../sql-reference/statements/select/index.md) с модификатором [FINAL](../../sql-reference/statements/select/from.md#select-from-final).
Работает только с таблицами семейства [*MergeTree](../../engines/table-engines/mergetree-family/index.md).
Возможные значения:
- 0 — автоматическая оптимизация `PREWHERE` в запросах `SELECT` с модификатором `FINAL` отключена.
- 1 — автоматическая оптимизация `PREWHERE` в запросах `SELECT` с модификатором `FINAL` включена.
Значение по умолчанию: `0`.
**См. также**
- настройка [optimize_move_to_prewhere](#optimize_move_to_prewhere)

View File

@ -251,7 +251,7 @@ ClickHouse получает от ODBC-драйвера информацию о
### Выявленная уязвимость в функционировании ODBC словарей {#vyiavlennaia-uiazvimost-v-funktsionirovanii-odbc-slovarei}
!!! attention "Attention"
!!! attention "Внимание"
При соединении с базой данных через ODBC можно заменить параметр соединения `Servername`. В этом случае, значения `USERNAME` и `PASSWORD` из `odbc.ini` отправляются на удаленный сервер и могут быть скомпрометированы.
**Пример небезопасного использования**
@ -764,7 +764,7 @@ Setting fields:
- `port` Порт для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
- `user` Имя пользователя для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
- `password` Пароль для пользователя PostgreSQL.
- `replica` Section of replica configurations. There can be multiple sections.
- `replica` Раздел конфигурации реплик. Может быть несколько.
- `replica/host` хост PostgreSQL.
- `replica/port` порт PostgreSQL .
- `replica/priority` Приоритет реплики. Во время попытки соединения, ClickHouse будет перебирать реплики в порядке приоритет. Меньшее значение означает более высокий приоритет.

View File

@ -26,6 +26,7 @@ SELECT
## timeZone {#timezone}
Возвращает часовой пояс сервера.
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
**Синтаксис**
@ -334,7 +335,7 @@ SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101;
**Синтаксис**
``` sql
toStartOfSecond(value[, timezone])
toStartOfSecond(value, [timezone])
```
**Аргументы**

View File

@ -6,7 +6,7 @@ toc_title: "Функции для работы с индексами H3"
[H3](https://eng.uber.com/h3/) — это система геокодирования, которая делит поверхность Земли на равные шестигранные ячейки. Система поддерживает иерархию (вложенность) ячеек, т.е. каждый "родительский" шестигранник может быть поделен на семь одинаковых вложенных "дочерних" шестигранников, и так далее.
Уровень вложенности назвается `разрешением` и может принимать значение от `0` до `15`, где `0` соответствует `базовым` ячейкам самого верхнего уровня (наиболее крупным).
Уровень вложенности называется "разрешением" и может принимать значение от `0` до `15`, где `0` соответствует "базовым" ячейкам самого верхнего уровня (наиболее крупным).
Для каждой точки, имеющей широту и долготу, можно получить 64-битный индекс H3, соответствующий номеру шестигранной ячейки, где эта точка находится.
@ -38,7 +38,7 @@ h3IsValid(h3index)
Запрос:
``` sql
SELECT h3IsValid(630814730351855103) as h3IsValid;
SELECT h3IsValid(630814730351855103) AS h3IsValid;
```
Результат:
@ -75,7 +75,7 @@ h3GetResolution(h3index)
Запрос:
``` sql
SELECT h3GetResolution(639821929606596015) as resolution;
SELECT h3GetResolution(639821929606596015) AS resolution;
```
Результат:
@ -109,7 +109,7 @@ h3EdgeAngle(resolution)
Запрос:
``` sql
SELECT h3EdgeAngle(10) as edgeAngle;
SELECT h3EdgeAngle(10) AS edgeAngle;
```
Результат:
@ -143,7 +143,7 @@ h3EdgeLengthM(resolution)
Запрос:
``` sql
SELECT h3EdgeLengthM(15) as edgeLengthM;
SELECT h3EdgeLengthM(15) AS edgeLengthM;
```
Результат:
@ -182,7 +182,7 @@ geoToH3(lon, lat, resolution)
Запрос:
``` sql
SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index;
SELECT geoToH3(37.79506683, 55.71290588, 15) AS h3Index;
```
Результат:
@ -295,7 +295,7 @@ h3GetBaseCell(index)
Запрос:
``` sql
SELECT h3GetBaseCell(612916788725809151) as basecell;
SELECT h3GetBaseCell(612916788725809151) AS basecell;
```
Результат:
@ -329,7 +329,7 @@ h3HexAreaM2(resolution)
Запрос:
``` sql
SELECT h3HexAreaM2(13) as area;
SELECT h3HexAreaM2(13) AS area;
```
Результат:
@ -441,7 +441,7 @@ h3ToParent(index, resolution)
Запрос:
``` sql
SELECT h3ToParent(599405990164561919, 3) as parent;
SELECT h3ToParent(599405990164561919, 3) AS parent;
```
Результат:
@ -475,7 +475,7 @@ h3ToString(index)
Запрос:
``` sql
SELECT h3ToString(617420388352917503) as h3_string;
SELECT h3ToString(617420388352917503) AS h3_string;
```
Результат:
@ -512,7 +512,7 @@ stringToH3(index_str)
Запрос:
``` sql
SELECT stringToH3('89184926cc3ffff') as index;
SELECT stringToH3('89184926cc3ffff') AS index;
```
Результат:
@ -548,7 +548,7 @@ h3GetResolution(index)
Запрос:
``` sql
SELECT h3GetResolution(617420388352917503) as res;
SELECT h3GetResolution(617420388352917503) AS res;
```
Результат:
@ -559,3 +559,114 @@ SELECT h3GetResolution(617420388352917503) as res;
└─────┘
```
## h3IsResClassIII {#h3isresclassIII}
Проверяет, имеет ли индекс [H3](#h3index) разрешение с ориентацией Class III.
**Синтаксис**
``` sql
h3IsResClassIII(index)
```
**Параметр**
- `index` — порядковый номер шестигранника. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md).
**Возвращаемые значения**
- `1` — индекс имеет разрешение с ориентацией Class III.
- `0` — индекс не имеет разрешения с ориентацией Class III.
Тип: [UInt8](../../../sql-reference/data-types/int-uint.md).
**Пример**
Запрос:
``` sql
SELECT h3IsResClassIII(617420388352917503) AS res;
```
Результат:
``` text
┌─res─┐
│ 1 │
└─────┘
```
## h3IsPentagon {#h3ispentagon}
Проверяет, является ли указанный индекс [H3](#h3index) пятиугольной ячейкой.
**Синтаксис**
``` sql
h3IsPentagon(index)
```
**Параметр**
- `index` — порядковый номер шестигранника. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md).
**Возвращаемые значения**
- `1` — индекс представляет собой пятиугольную ячейку.
- `0` — индекс не является пятиугольной ячейкой.
Тип: [UInt8](../../../sql-reference/data-types/int-uint.md).
**Пример**
Запрос:
``` sql
SELECT h3IsPentagon(644721767722457330) AS pentagon;
```
Результат:
``` text
┌─pentagon─┐
│ 0 │
└──────────┘
```
## h3GetFaces {#h3getfaces}
Возвращает все грани многоугольника (икосаэдра), пересекаемые заданным [H3](#h3index) индексом.
**Синтаксис**
``` sql
h3GetFaces(index)
```
**Параметр**
- `index` — индекс шестиугольной ячейки. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md).
**Возвращаемое значение**
- Массив, содержащий грани многоугольника (икосаэдра), пересекаемые заданным H3 индексом.
Тип: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-reference/data-types/int-uint.md)).
**Пример**
Запрос:
``` sql
SELECT h3GetFaces(599686042433355775) AS faces;
```
Результат:
``` text
┌─faces─┐
│ [7] │
└───────┘
```
[Оригинальная статья](https://clickhouse.com/docs/ru/sql-reference/functions/geo/h3) <!--hide-->

View File

@ -141,20 +141,49 @@ SELECT groupBitXor(cityHash64(*)) FROM table
Вычисляет 64-битный хэш-код от целого числа любого типа.
Работает быстрее, чем intHash32. Качество среднее.
## SHA1 {#sha1}
## SHA1, SHA224, SHA256, SHA512 {#sha}
## SHA224 {#sha224}
Вычисляет SHA-1, SHA-224, SHA-256, SHA-512 хеш строки и возвращает полученный набор байт в виде [FixedString](../data-types/fixedstring.md).
## SHA256 {#sha256}
**Синтаксис**
## SHA384 {#sha384}
``` sql
SHA1('s')
...
SHA512('s')
```
## SHA512 {#sha512}
Функция работает достаточно медленно (SHA-1 — примерно 5 миллионов коротких строк в секунду на одном процессорном ядре, SHA-224 и SHA-256 — примерно 2.2 миллионов).
Рекомендуется использовать эти функции лишь в тех случаях, когда вам нужна конкретная хеш-функция и вы не можете её выбрать.
Даже в этих случаях рекомендуется применять функцию офлайн — заранее вычисляя значения при вставке в таблицу, вместо того чтобы применять её при выполнении `SELECT`.
Вычисляет SHA-1, SHA-224, SHA-256 от строки и возвращает полученный набор байт в виде FixedString(20), FixedString(28), FixedString(32), FixedLength(48) или FixedString(64).
Функция работает достаточно медленно (SHA-1 - примерно 5 миллионов коротких строк в секунду на одном процессорном ядре, SHA-224 и SHA-256 - примерно 2.2 миллионов).
Рекомендуется использовать эти функции лишь в тех случаях, когда вам нужна конкретная хэш-функция и вы не можете её выбрать.
Даже в этих случаях, рекомендуется применять функцию оффлайн - заранее вычисляя значения при вставке в таблицу, вместо того, чтобы применять её при SELECT-ах.
**Параметры**
- `s` — входная строка для вычисления хеша SHA. [String](../data-types/string.md).
**Возвращаемое значение**
- Хеш SHA в виде шестнадцатеричной некодированной строки FixedString. SHA-1 хеш как FixedString(20), SHA-224 как FixedString(28), SHA-256 — FixedString(32), SHA-512 — FixedString(64).
Тип: [FixedString](../data-types/fixedstring.md).
**Пример**
Используйте функцию [hex](../functions/encoding-functions.md#hex) для представления результата в виде строки с шестнадцатеричной кодировкой.
Запрос:
``` sql
SELECT hex(SHA1('abc'));
```
Результат:
``` text
┌─hex(SHA1('abc'))─────────────────────────┐
│ A9993E364706816ABA3E25717850C26C9CD0D89D │
└──────────────────────────────────────────┘
```
## URLHash(url\[, N\]) {#urlhashurl-n}

View File

@ -8,6 +8,7 @@ toc_title: "Прочие функции"
## hostName() {#hostname}
Возвращает строку - имя хоста, на котором эта функция была выполнена. При распределённой обработке запроса, это будет имя хоста удалённого сервера, если функция выполняется на удалённом сервере.
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
## getMacro {#getmacro}
@ -643,10 +644,17 @@ SELECT
## uptime() {#uptime}
Возвращает аптайм сервера в секундах.
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
## version() {#version}
Возвращает версию сервера в виде строки.
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
## buildId() {#buildid}
Возвращает ID сборки, сгенерированный компилятором для данного сервера ClickHouse.
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
## rowNumberInBlock {#function-rownumberinblock}
@ -2304,3 +2312,66 @@ SELECT count(DISTINCT t) FROM (SELECT initialQueryID() AS t FROM remote('127.0.0
│ 1 │
└─────────┘
```
## shardNum {#shard-num}
Возвращает индекс шарда, который обрабатывает часть данных распределенного запроса. Индексы начинаются с `1`.
Если запрос не распределенный, то возвращается значение `0`.
**Синтаксис**
``` sql
shardNum()
```
**Возвращаемое значение**
- индекс шарда или константа `0`.
Тип: [UInt32](../../sql-reference/data-types/int-uint.md).
**Пример**
В примере ниже используется конфигурация с двумя шардами. На каждом шарде выполняется запрос к таблице [system.one](../../operations/system-tables/one.md).
Запрос:
``` sql
CREATE TABLE shard_num_example (dummy UInt8)
ENGINE=Distributed(test_cluster_two_shards_localhost, system, one, dummy);
SELECT dummy, shardNum(), shardCount() FROM shard_num_example;
```
Результат:
``` text
┌─dummy─┬─shardNum()─┬─shardCount()─┐
│ 0 │ 2 │ 2 │
│ 0 │ 1 │ 2 │
└───────┴────────────┴──────────────┘
```
**См. также**
- Табличный движок [Distributed](../../engines/table-engines/special/distributed.md)
## shardCount {#shard-count}
Возвращает общее количество шардов для распределенного запроса.
Если запрос не распределенный, то возвращается значение `0`.
**Синтаксис**
``` sql
shardCount()
```
**Возвращаемое значение**
- Общее количество шардов или `0`.
Тип: [UInt32](../../sql-reference/data-types/int-uint.md).
**См. также**
- Пример использования функции [shardNum()](#shard-num) также содержит вызов `shardCount()`.

View File

@ -34,15 +34,15 @@ empty(x)
Запрос:
```sql
SELECT notempty('text');
SELECT empty('text');
```
Результат:
```text
┌─empty('')─┐
1
└───────────┘
┌─empty('text')─┐
0
└───────────────
```
## notEmpty {#notempty}

View File

@ -90,6 +90,26 @@ SELECT toInt64OrNull('123123'), toInt8OrNull('123qwe123');
└─────────────────────────┴───────────────────────────┘
```
## toInt(8\|16\|32\|64\|128\|256)OrDefault {#toint8163264128256orDefault}
Принимает аргумент типа String и пытается его распарсить в Int(8\|16\|32\|64\|128\|256). Если не удалось — возвращает значение по умолчанию.
**Пример**
Запрос:
``` sql
SELECT toInt64OrDefault('123123', cast('-1' as Int64)), toInt8OrDefault('123qwe123', cast('-1' as Int8));
```
Результат:
``` text
┌─toInt64OrDefault('123123', CAST('-1', 'Int64'))─┬─toInt8OrDefault('123qwe123', CAST('-1', 'Int8'))─┐
│ 123123 │ -1 │
└─────────────────────────────────────────────────┴──────────────────────────────────────────────────┘
```
## toUInt(8\|16\|32\|64\|256) {#touint8163264}
Преобраует входное значение к типу [UInt](../../sql-reference/functions/type-conversion-functions.md). Семейство функций включает:
@ -132,12 +152,16 @@ SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8);
## toUInt(8\|16\|32\|64\|256)OrNull {#touint8163264ornull}
## toUInt(8\|16\|32\|64\|256)OrDefault {#touint8163264256ordefault}
## toFloat(32\|64) {#tofloat3264}
## toFloat(32\|64)OrZero {#tofloat3264orzero}
## toFloat(32\|64)OrNull {#tofloat3264ornull}
## toFloat(32\|64)OrDefault {#tofloat3264ordefault}
## toDate {#todate}
Cиноним: `DATE`.
@ -146,23 +170,27 @@ Cиноним: `DATE`.
## toDateOrNull {#todateornull}
## toDateOrDefault {#todateordefault}
## toDateTime {#todatetime}
## toDateTimeOrZero {#todatetimeorzero}
## toDateTimeOrNull {#todatetimeornull}
## toDateTimeOrDefault {#todatetimeordefault}
## toDate32 {#todate32}
Конвертирует аргумент в значение типа [Date32](../../sql-reference/data-types/date32.md). Если значение выходит за границы диапазона, возвращается пограничное значение `Date32`. Если аргумент имеет тип [Date](../../sql-reference/data-types/date.md), учитываются границы типа `Date`.
**Синтаксис**
**Синтаксис**
``` sql
toDate32(value)
```
**Аргументы**
**Аргументы**
- `value` — Значение даты. [String](../../sql-reference/data-types/string.md), [UInt32](../../sql-reference/data-types/int-uint.md) или [Date](../../sql-reference/data-types/date.md).
@ -250,6 +278,28 @@ SELECT toDate32OrNull('1955-01-01'), toDate32OrNull('');
└──────────────────────────────┴────────────────────┘
```
## toDate32OrDefault {#todate32-or-default}
Конвертирует аргумент в значение типа [Date32](../../sql-reference/data-types/date32.md). Если значение выходит за границы диапазона, возвращается нижнее пограничное значение `Date32`. Если аргумент имеет тип [Date](../../sql-reference/data-types/date.md), учитываются границы типа `Date`. Возвращает значение по умолчанию, если получен недопустимый аргумент.
**Пример**
Запрос:
``` sql
SELECT
toDate32OrDefault('1930-01-01', toDate32('2020-01-01')),
toDate32OrDefault('xx1930-01-01', toDate32('2020-01-01'));
```
Результат:
``` text
┌─toDate32OrDefault('1930-01-01', toDate32('2020-01-01'))─┬─toDate32OrDefault('xx1930-01-01', toDate32('2020-01-01'))─┐
│ 1930-01-01 │ 2020-01-01 │
└─────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘
```
## toDecimal(32\|64\|128\|256) {#todecimal3264128}
Преобразует `value` к типу данных [Decimal](../../sql-reference/functions/type-conversion-functions.md) с точностью `S`. `value` может быть числом или строкой. Параметр `S` (scale) задаёт число десятичных знаков.
@ -312,6 +362,59 @@ SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val);
└──────┴────────────────────────────────────────────────────┘
```
## toDecimal(32\|64\|128\|256)OrDefault {#todecimal3264128256ordefault}
Преобразует входную строку в значение с типом данных [Decimal(P,S)](../../sql-reference/data-types/decimal.md). Семейство функций включает в себя:
- `toDecimal32OrDefault(expr, S)` — возвращает значение типа `Decimal32(S)`.
- `toDecimal64OrDefault(expr, S)` — возвращает значение типа `Decimal64(S)`.
- `toDecimal128OrDefault(expr, S)` — возвращает значение типа `Decimal128(S)`.
- `toDecimal256OrDefault(expr, S)` — возвращает значение типа `Decimal256(S)`.
Эти функции следует использовать вместо функций `toDecimal*()`, если при ошибке обработки входного значения вы хотите получать значение по умолчанию вместо исключения.
**Аргументы**
- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../sql-reference/functions/type-conversion-functions.md). ClickHouse ожидает текстовое представление десятичного числа. Например, `'1.111'`.
- `S` — количество десятичных знаков в результирующем значении.
**Возвращаемое значение**
Значение типа `Decimal(P,S)`. Значение содержит:
- Число с `S` десятичными знаками, если ClickHouse распознал число во входной строке.
- Значение по умолчанию типа `Decimal(P,S)`, если ClickHouse не смог распознать число во входной строке или входное число содержит больше чем `S` десятичных знаков.
**Примеры**
Запрос:
``` sql
SELECT toDecimal32OrDefault(toString(-1.111), 5) AS val, toTypeName(val);
```
Результат:
``` text
┌────val─┬─toTypeName(toDecimal32OrDefault(toString(-1.111), 5))─┐
│ -1.111 │ Decimal(9, 5) │
└────────┴───────────────────────────────────────────────────────┘
```
Запрос:
``` sql
SELECT toDecimal32OrDefault(toString(-1.111), 2) AS val, toTypeName(val);
```
Результат:
``` text
┌─val─┬─toTypeName(toDecimal32OrDefault(toString(-1.111), 2))─┐
│ 0 │ Decimal(9, 2) │
└─────┴───────────────────────────────────────────────────────┘
```
## toDecimal(32\|64\|128\|256)OrZero {#todecimal3264128orzero}
Преобразует тип входного значения в [Decimal (P, S)](../../sql-reference/functions/type-conversion-functions.md). Семейство функций включает в себя:
@ -750,6 +853,63 @@ SELECT
└───────┴──────┴──────────────┘
```
## accurateCastOrDefault(x, T[, default_value]) {#type_conversion_function-accurate-cast_or_default}
Преобразует входное значение `x` в указанный тип данных `T`. Если исходное значение не может быть преобразовано к целевому типу, возвращает значение по умолчанию или `default_value`, если оно указано.
**Синтаксис**
```sql
accurateCastOrDefault(x, T)
```
**Аргументы**
- `x` — входное значение.
- `T` — имя возвращаемого типа данных.
- `default_value` — значение по умолчанию возвращаемого типа данных.
**Возвращаемое значение**
- Значение, преобразованное в указанный тип `T`.
**Пример**
Запрос:
``` sql
SELECT toTypeName(accurateCastOrDefault(5, 'UInt8'));
```
Результат:
``` text
┌─toTypeName(accurateCastOrDefault(5, 'UInt8'))─┐
│ UInt8 │
└───────────────────────────────────────────────┘
```
Запрос:
``` sql
SELECT
accurateCastOrDefault(-1, 'UInt8') as uint8,
accurateCastOrDefault(-1, 'UInt8', 5) as uint8_default,
accurateCastOrDefault(128, 'Int8') as int8,
accurateCastOrDefault(128, 'Int8', 5) as int8_default,
accurateCastOrDefault('Test', 'FixedString(2)') as fixed_string,
accurateCastOrDefault('Test', 'FixedString(2)', 'Te') as fixed_string_default;
```
Результат:
``` text
┌─uint8─┬─uint8_default─┬─int8─┬─int8_default─┬─fixed_string─┬─fixed_string_default─┐
│ 0 │ 5 │ 0 │ 5 │ │ Te │
└───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘
```
## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) {#function-tointerval}
Приводит аргумент из числового типа данных к типу данных [IntervalType](../../sql-reference/data-types/special-data-types/interval.md).

View File

@ -10,7 +10,7 @@ toc_title: "Манипуляции со столбцами"
Синтаксис:
``` sql
ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ...
ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|RENAME|CLEAR|COMMENT|MODIFY|MATERIALIZE COLUMN ...
```
В запросе можно указать сразу несколько действий над одной таблицей через запятую.
@ -20,11 +20,12 @@ ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN
- [ADD COLUMN](#alter_add-column) — добавляет столбец в таблицу;
- [DROP COLUMN](#alter_drop-column) — удаляет столбец;
- [RENAME COLUMN](#alter_rename-column) — переименовывает существующий столбец.
- [RENAME COLUMN](#alter_rename-column) — переименовывает существующий столбец;
- [CLEAR COLUMN](#alter_clear-column) — сбрасывает все значения в столбце для заданной партиции;
- [COMMENT COLUMN](#alter_comment-column) — добавляет комментарий к столбцу;
- [MODIFY COLUMN](#alter_modify-column) — изменяет тип столбца, выражение для значения по умолчанию и TTL.
- [MODIFY COLUMN REMOVE](#modify-remove) — удаляет какое-либо из свойств столбца.
- [MODIFY COLUMN](#alter_modify-column) — изменяет тип столбца, выражение для значения по умолчанию и TTL;
- [MODIFY COLUMN REMOVE](#modify-remove) — удаляет какое-либо из свойств столбца;
- [MATERIALIZE COLUMN](#materialize-column) — делает столбец материализованным (`MATERIALIZED`) в кусках, в которых отсутствуют значения.
Подробное описание для каждого действия приведено ниже.
@ -193,6 +194,35 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
- [REMOVE TTL](ttl.md).
## MATERIALIZE COLUMN {#materialize-column}
Материализует столбец таблицы в кусках, в которых отсутствуют значения. Используется, если необходимо создать новый столбец со сложным материализованным выражением или выражением для заполнения по умолчанию (`DEFAULT`), потому как вычисление такого столбца прямо во время выполнения запроса `SELECT` оказывается ощутимо затратным. Чтобы совершить ту же операцию для существующего столбца, используйте модификатор `FINAL`.
Синтаксис:
```sql
ALTER TABLE table MATERIALIZE COLUMN col [FINAL];
```
**Пример**
```sql
DROP TABLE IF EXISTS tmp;
SET mutations_sync = 2;
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 10;
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
SELECT groupArray(x), groupArray(s) FROM tmp;
```
**Результат:**
```sql
┌─groupArray(x)─────────┬─groupArray(s)─────────────────────────────┐
│ [0,1,2,3,4,5,6,7,8,9] │ ['0','1','2','3','4','5','6','7','8','9'] │
└───────────────────────┴───────────────────────────────────────────┘
```
## Ограничения запроса ALTER {#ogranicheniia-zaprosa-alter}
Запрос `ALTER` позволяет создавать и удалять отдельные элементы (столбцы) вложенных структур данных, но не вложенные структуры данных целиком. Для добавления вложенной структуры данных, вы можете добавить столбцы с именем вида `name.nested_name` и типом `Array(T)` - вложенная структура данных полностью эквивалентна нескольким столбцам-массивам с именем, имеющим одинаковый префикс до точки.

View File

@ -48,9 +48,12 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]na
Материализованное представление устроено следующим образом: при вставке данных в таблицу, указанную в SELECT-е, кусок вставляемых данных преобразуется этим запросом SELECT, и полученный результат вставляется в представление.
!!! important "Важно"
Материализованные представления в ClickHouse используют **имена столбцов** вместо порядка следования столбцов при вставке в целевую таблицу. Если в результатах запроса `SELECT` некоторые имена столбцов отсутствуют, то ClickHouse использует значение по умолчанию, даже если столбец не является [Nullable](../../data-types/nullable.md). Безопасной практикой при использовании материализованных представлений считается добавление псевдонимов для каждого столбца.
Материализованные представления в ClickHouse больше похожи на `after insert` триггеры. Если в запросе материализованного представления есть агрегирование, оно применяется только к вставляемому блоку записей. Любые изменения существующих данных исходной таблицы (например обновление, удаление, удаление раздела и т.д.) не изменяют материализованное представление.
Если указано `POPULATE`, то при создании представления, в него будут вставлены имеющиеся данные таблицы, как если бы был сделан запрос `CREATE TABLE ... AS SELECT ...` . Иначе, представление будет содержать только данные, вставляемые в таблицу после создания представления. Не рекомендуется использовать POPULATE, так как вставляемые в таблицу данные во время создания представления, не попадут в него.
Если указано `POPULATE`, то при создании представления в него будут добавлены данные, уже содержащиеся в исходной таблице, как если бы был сделан запрос `CREATE TABLE ... AS SELECT ...` . Если `POPULATE` не указано, представление будет содержать только данные, добавленные в таблицу после создания представления. Использовать `POPULATE` не рекомендуется, так как в представление не попадут данные, добавляемые в таблицу во время создания представления.
Запрос `SELECT` может содержать `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Следует иметь ввиду, что соответствующие преобразования будут выполняться независимо, на каждый блок вставляемых данных. Например, при наличии `GROUP BY`, данные будут агрегироваться при вставке, но только в рамках одной пачки вставляемых данных. Далее, данные не будут доагрегированы. Исключение - использование ENGINE, производящего агрегацию данных самостоятельно, например, `SummingMergeTree`.
@ -65,8 +68,8 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]na
## LIVE-представления [экспериментальный функционал] {#live-view}
!!! important "Важно"
Представления `LIVE VIEW` являются экспериментальной возможностью. Их использование может повлечь потерю совместимости в будущих версиях.
Чтобы использовать `LIVE VIEW` и запросы `WATCH`, включите настройку [allow_experimental_live_view](../../../operations/settings/settings.md#allow-experimental-live-view).
Представления `LIVE VIEW` являются экспериментальной возможностью. Их использование может повлечь потерю совместимости в будущих версиях.
Чтобы использовать `LIVE VIEW` и запросы `WATCH`, включите настройку [allow_experimental_live_view](../../../operations/settings/settings.md#allow-experimental-live-view).
```sql
CREATE LIVE VIEW [IF NOT EXISTS] [db.]table_name [WITH [TIMEOUT [value_in_sec] [AND]] [REFRESH [value_in_sec]]] AS SELECT ...

View File

@ -8,17 +8,19 @@ Prewhere — это оптимизация для более эффективн
При оптимизации prewhere сначала читываются только те столбцы, которые необходимы для выполнения выражения prewhere. Затем читаются другие столбцы, необходимые для выполнения остальной части запроса, но только те блоки, в которых находится выражение prewhere «верно» по крайней мере для некоторых рядов. Если есть много блоков, где выражение prewhere «ложно» для всех строк и для выражения prewhere требуется меньше столбцов, чем для других частей запроса, это часто позволяет считывать гораздо меньше данных с диска для выполнения запроса.
## Управление prewhere вручную {#controlling-prewhere-manually}
## Управление PREWHERE вручную {#controlling-prewhere-manually}
`PREWHERE` имеет смысл использовать, если есть условия фильтрации, которые использует меньшинство столбцов из тех, что есть в запросе, но достаточно сильно фильтрует данные. Таким образом, сокращается количество читаемых данных.
В запрос может быть одновременно указано и `PREWHERE` и `WHERE`. В этом случае, `PREWHERE` предшествует `WHERE`.
В запросе может быть одновременно указаны и `PREWHERE`, и `WHERE`. В этом случае `PREWHERE` предшествует `WHERE`.
Если значение параметра `optimize_move_to_prewhere` равно 0, эвристика по автоматическому перемещнию части выражений из `WHERE` к `PREWHERE` отключается.
Если значение параметра [optimize_move_to_prewhere](../../../operations/settings/settings.md#optimize_move_to_prewhere) равно 0, эвристика по автоматическому перемещению части выражений из `WHERE` к `PREWHERE` отключается.
Если в запросе есть модификатор [FINAL](from.md#select-from-final), оптимизация `PREWHERE` не всегда корректна. Она действует только если включены обе настройки [optimize_move_to_prewhere](../../../operations/settings/settings.md#optimize_move_to_prewhere) и [optimize_move_to_prewhere_if_final](../../../operations/settings/settings.md#optimize_move_to_prewhere_if_final).
!!! note "Внимание"
Секция `PREWHERE` выполняется до `FINAL`, поэтому результаты запросов `FROM FINAL` могут исказится при использовании `PREWHERE` с полями не входящями в `ORDER BY` таблицы.
Секция `PREWHERE` выполняется до `FINAL`, поэтому результаты запросов `FROM ... FINAL` могут исказиться при использовании `PREWHERE` с полями, не входящями в `ORDER BY` таблицы.
## Ограничения {#limitations}
`PREWHERE` поддерживается только табличными движками из семейства `*MergeTree`.
`PREWHERE` поддерживается только табличными движками из семейства [*MergeTree](../../../engines/table-engines/mergetree-family/index.md).

View File

@ -160,7 +160,10 @@ def generate_cmake_flags_files() -> None:
"docs/ru/development/cmake-in-clickhouse.md"]
for lang in other_languages:
os.symlink(output_file_name, os.path.join(root_path, lang))
other_file_name = os.path.join(root_path, lang)
if os.path.exists(other_file_name):
os.unlink(other_file_name)
os.symlink(output_file_name, other_file_name)
if __name__ == '__main__':
generate_cmake_flags_files()

View File

@ -14,6 +14,7 @@ module.exports = {
entry: [
path.resolve(scssPath, 'bootstrap.scss'),
path.resolve(scssPath, 'greenhouse.scss'),
path.resolve(scssPath, 'main.scss'),
path.resolve(jsPath, 'main.js'),
],

View File

@ -156,6 +156,11 @@ def build_website(args):
os.path.join(args.src_dir, 'utils', 'list-versions', 'version_date.tsv'),
os.path.join(args.output_dir, 'data', 'version_date.tsv'))
# This file can be requested to install ClickHouse.
shutil.copy2(
os.path.join(args.src_dir, 'docs', '_includes', 'install', 'universal.sh'),
os.path.join(args.output_dir, 'data', 'install.sh'))
for root, _, filenames in os.walk(args.output_dir):
for filename in filenames:
if filename == 'main.html':
@ -218,7 +223,7 @@ def minify_file(path, css_digest, js_digest):
# TODO: restore cssmin
# elif path.endswith('.css'):
# content = cssmin.cssmin(content)
# TODO: restore jsmin
# TODO: restore jsmin
# elif path.endswith('.js'):
# content = jsmin.jsmin(content)
with open(path, 'wb') as f:
@ -226,6 +231,12 @@ def minify_file(path, css_digest, js_digest):
def minify_website(args):
# Output greenhouse css separately from main bundle to be included via the greenhouse iframe
command = f"cat '{args.website_dir}/css/greenhouse.css' > '{args.output_dir}/css/greenhouse.css'"
logging.info(command)
output = subprocess.check_output(command, shell=True)
logging.debug(output)
css_in = ' '.join(get_css_in(args))
css_out = f'{args.output_dir}/css/base.css'
if args.minify:

Some files were not shown because too many files have changed in this diff Show More