mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
merge master
This commit is contained in:
commit
95ee742ee7
@ -15,15 +15,27 @@ using Poco::Message;
|
||||
using DB::LogsLevel;
|
||||
using DB::CurrentThread;
|
||||
|
||||
/// Logs a message to a specified logger with that level.
|
||||
|
||||
#define LOG_IMPL(logger, priority, PRIORITY, ...) do \
|
||||
namespace
|
||||
{
|
||||
template <typename... Ts> constexpr size_t numArgs(Ts &&...) { return sizeof...(Ts); }
|
||||
template <typename T, typename... Ts> constexpr auto firstArg(T && x, Ts &&...) { return std::forward<T>(x); }
|
||||
}
|
||||
|
||||
|
||||
/// Logs a message to a specified logger with that level.
|
||||
/// If more than one argument is provided,
|
||||
/// the first argument is interpreted as template with {}-substitutions
|
||||
/// and the latter arguments treat as values to substitute.
|
||||
/// If only one argument is provided, it is threat as message without substitutions.
|
||||
|
||||
#define LOG_IMPL(logger, priority, PRIORITY, ...) do \
|
||||
{ \
|
||||
const bool is_clients_log = (CurrentThread::getGroup() != nullptr) && \
|
||||
(CurrentThread::getGroup()->client_logs_level >= (priority)); \
|
||||
if ((logger)->is((PRIORITY)) || is_clients_log) \
|
||||
{ \
|
||||
std::string formatted_message = fmt::format(__VA_ARGS__); \
|
||||
std::string formatted_message = numArgs(__VA_ARGS__) > 1 ? fmt::format(__VA_ARGS__) : firstArg(__VA_ARGS__); \
|
||||
if (auto channel = (logger)->getChannel()) \
|
||||
{ \
|
||||
std::string file_function; \
|
||||
|
9
cmake/autogenerated_versions.txt
Normal file
9
cmake/autogenerated_versions.txt
Normal file
@ -0,0 +1,9 @@
|
||||
# This strings autochanged from release_lib.sh:
|
||||
SET(VERSION_REVISION 54435)
|
||||
SET(VERSION_MAJOR 20)
|
||||
SET(VERSION_MINOR 5)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 91df18a906dcffdbee6816e5389df6c65f86e35f)
|
||||
SET(VERSION_DESCRIBE v20.5.1.1-prestable)
|
||||
SET(VERSION_STRING 20.5.1.1)
|
||||
# end of autochange
|
@ -1,57 +1,26 @@
|
||||
set(_PROTOBUF_PROTOC $<TARGET_FILE:protoc>)
|
||||
set(_GRPC_CPP_PLUGIN_EXECUTABLE $<TARGET_FILE:grpc_cpp_plugin>)
|
||||
option (ENABLE_GRPC "Use gRPC" ${ENABLE_LIBRARIES})
|
||||
|
||||
function(PROTOBUF_GENERATE_GRPC_CPP SRCS HDRS)
|
||||
if(NOT ARGN)
|
||||
message(SEND_ERROR "Error: PROTOBUF_GENERATE_GRPC_CPP() called without any proto files")
|
||||
return()
|
||||
endif()
|
||||
if (ENABLE_GRPC)
|
||||
option (USE_INTERNAL_GRPC_LIBRARY "Set to FALSE to use system gRPC library instead of bundled" ${NOT_UNBUNDLED})
|
||||
|
||||
if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
|
||||
foreach(FIL ${ARGN})
|
||||
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
|
||||
get_filename_component(ABS_PATH ${ABS_FIL} PATH)
|
||||
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
|
||||
if(${_contains_already} EQUAL -1)
|
||||
list(APPEND _protobuf_include_path -I ${ABS_PATH})
|
||||
endif()
|
||||
endforeach()
|
||||
else()
|
||||
set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
endif()
|
||||
if (USE_INTERNAL_GRPC_LIBRARY)
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/grpc/include/grpc++/grpc++.h")
|
||||
message(WARNING "submodule contrib/grpc is missing. To fix try run: \n git submodule update --init --recursive")
|
||||
set (USE_INTERNAL_GRPC_LIBRARY OFF)
|
||||
elif (NOT USE_PROTOBUF)
|
||||
message(WARNING "gRPC requires protobuf which is disabled")
|
||||
set (USE_INTERNAL_GRPC_LIBRARY OFF)
|
||||
else()
|
||||
set (GRPC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc/include")
|
||||
set (GRPC_LIBRARY "libgrpc++")
|
||||
set (USE_GRPC ON)
|
||||
endif()
|
||||
else()
|
||||
find_package(grpc)
|
||||
if (GRPC_INCLUDE_DIR AND GRPC_LIBRARY)
|
||||
set (USE_GRPC ON)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(DEFINED PROTOBUF_IMPORT_DIRS)
|
||||
foreach(DIR ${Protobuf_IMPORT_DIRS})
|
||||
get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
|
||||
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
|
||||
if(${_contains_already} EQUAL -1)
|
||||
list(APPEND _protobuf_include_path -I ${ABS_PATH})
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
set(${SRCS})
|
||||
set(${HDRS})
|
||||
foreach(FIL ${ARGN})
|
||||
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
|
||||
get_filename_component(FIL_WE ${FIL} NAME_WE)
|
||||
|
||||
list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.cc")
|
||||
list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.h")
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.cc"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.h"
|
||||
COMMAND ${_PROTOBUF_PROTOC}
|
||||
ARGS --grpc_out=${CMAKE_CURRENT_BINARY_DIR}
|
||||
--plugin=protoc-gen-grpc=${_GRPC_CPP_PLUGIN_EXECUTABLE}
|
||||
${_protobuf_include_path} ${ABS_FIL}
|
||||
DEPENDS ${ABS_FIL}
|
||||
COMMENT "Running gRPC C++ protocol buffer compiler on ${FIL}"
|
||||
VERBATIM)
|
||||
endforeach()
|
||||
|
||||
set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
|
||||
set(${SRCS} ${${SRCS}} PARENT_SCOPE)
|
||||
set(${HDRS} ${${HDRS}} PARENT_SCOPE)
|
||||
endfunction()
|
||||
message(STATUS "Using gRPC=${USE_GRPC}: ${GRPC_INCLUDE_DIR} : ${GRPC_LIBRARY}")
|
||||
|
@ -28,68 +28,6 @@ elseif(NOT MISSING_INTERNAL_PROTOBUF_LIBRARY)
|
||||
set(Protobuf_LITE_LIBRARY libprotobuf-lite)
|
||||
|
||||
set(Protobuf_PROTOC_EXECUTABLE "$<TARGET_FILE:protoc>")
|
||||
|
||||
if(NOT DEFINED PROTOBUF_GENERATE_CPP_APPEND_PATH)
|
||||
set(PROTOBUF_GENERATE_CPP_APPEND_PATH TRUE)
|
||||
endif()
|
||||
|
||||
function(PROTOBUF_GENERATE_CPP SRCS HDRS)
|
||||
if(NOT ARGN)
|
||||
message(SEND_ERROR "Error: PROTOBUF_GENERATE_CPP() called without any proto files")
|
||||
return()
|
||||
endif()
|
||||
|
||||
if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
|
||||
# Create an include path for each file specified
|
||||
foreach(FIL ${ARGN})
|
||||
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
|
||||
get_filename_component(ABS_PATH ${ABS_FIL} PATH)
|
||||
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
|
||||
if(${_contains_already} EQUAL -1)
|
||||
list(APPEND _protobuf_include_path -I ${ABS_PATH})
|
||||
endif()
|
||||
endforeach()
|
||||
else()
|
||||
set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
if(DEFINED PROTOBUF_IMPORT_DIRS AND NOT DEFINED Protobuf_IMPORT_DIRS)
|
||||
set(Protobuf_IMPORT_DIRS "${PROTOBUF_IMPORT_DIRS}")
|
||||
endif()
|
||||
|
||||
if(DEFINED Protobuf_IMPORT_DIRS)
|
||||
foreach(DIR ${Protobuf_IMPORT_DIRS})
|
||||
get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
|
||||
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
|
||||
if(${_contains_already} EQUAL -1)
|
||||
list(APPEND _protobuf_include_path -I ${ABS_PATH})
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
set(${SRCS})
|
||||
set(${HDRS})
|
||||
foreach(FIL ${ARGN})
|
||||
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
|
||||
get_filename_component(FIL_WE ${FIL} NAME_WE)
|
||||
|
||||
list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc")
|
||||
list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h")
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h"
|
||||
COMMAND ${Protobuf_PROTOC_EXECUTABLE}
|
||||
ARGS --cpp_out ${CMAKE_CURRENT_BINARY_DIR} ${_protobuf_include_path} ${ABS_FIL}
|
||||
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE}
|
||||
COMMENT "Running C++ protocol buffer compiler on ${FIL}"
|
||||
VERBATIM )
|
||||
endforeach()
|
||||
|
||||
set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
|
||||
set(${SRCS} ${${SRCS}} PARENT_SCOPE)
|
||||
set(${HDRS} ${${HDRS}} PARENT_SCOPE)
|
||||
endfunction()
|
||||
endif()
|
||||
|
||||
if(OS_FREEBSD AND SANITIZE STREQUAL "address")
|
||||
@ -102,6 +40,7 @@ if(OS_FREEBSD AND SANITIZE STREQUAL "address")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include (${ClickHouse_SOURCE_DIR}/cmake/protobuf_generate_cpp.cmake)
|
||||
endif()
|
||||
|
||||
message(STATUS "Using protobuf=${USE_PROTOBUF}: ${Protobuf_INCLUDE_DIR} : ${Protobuf_LIBRARY} : ${Protobuf_PROTOC_EXECUTABLE}")
|
||||
|
171
cmake/protobuf_generate_cpp.cmake
Normal file
171
cmake/protobuf_generate_cpp.cmake
Normal file
@ -0,0 +1,171 @@
|
||||
# This file declares functions adding custom commands for generating C++ files from *.proto files:
|
||||
# function (protobuf_generate_cpp SRCS HDRS)
|
||||
# function (protobuf_generate_grpc_cpp SRCS HDRS)
|
||||
|
||||
if (NOT USE_PROTOBUF)
|
||||
message (WARNING "Could not use protobuf_generate_cpp() without the protobuf library")
|
||||
return()
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED PROTOBUF_PROTOC_EXECUTABLE)
|
||||
set (PROTOBUF_PROTOC_EXECUTABLE "$<TARGET_FILE:protoc>")
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED GRPC_CPP_PLUGIN_EXECUTABLE)
|
||||
set (GRPC_CPP_PLUGIN_EXECUTABLE $<TARGET_FILE:grpc_cpp_plugin>)
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED PROTOBUF_GENERATE_CPP_APPEND_PATH)
|
||||
set (PROTOBUF_GENERATE_CPP_APPEND_PATH TRUE)
|
||||
endif()
|
||||
|
||||
|
||||
function(protobuf_generate_cpp_impl SRCS HDRS MODES OUTPUT_FILE_EXTS PLUGIN)
|
||||
if(NOT ARGN)
|
||||
message(SEND_ERROR "Error: protobuf_generate_cpp() called without any proto files")
|
||||
return()
|
||||
endif()
|
||||
|
||||
if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
|
||||
# Create an include path for each file specified
|
||||
foreach(FIL ${ARGN})
|
||||
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
|
||||
get_filename_component(ABS_PATH ${ABS_FIL} PATH)
|
||||
list(FIND protobuf_include_path ${ABS_PATH} _contains_already)
|
||||
if(${_contains_already} EQUAL -1)
|
||||
list(APPEND protobuf_include_path -I ${ABS_PATH})
|
||||
endif()
|
||||
endforeach()
|
||||
else()
|
||||
set(protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
if(DEFINED PROTOBUF_IMPORT_DIRS AND NOT DEFINED Protobuf_IMPORT_DIRS)
|
||||
set(Protobuf_IMPORT_DIRS "${PROTOBUF_IMPORT_DIRS}")
|
||||
endif()
|
||||
|
||||
if(DEFINED Protobuf_IMPORT_DIRS)
|
||||
foreach(DIR ${Protobuf_IMPORT_DIRS})
|
||||
get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
|
||||
list(FIND protobuf_include_path ${ABS_PATH} _contains_already)
|
||||
if(${_contains_already} EQUAL -1)
|
||||
list(APPEND protobuf_include_path -I ${ABS_PATH})
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
set (intermediate_dir ${CMAKE_CURRENT_BINARY_DIR}/intermediate)
|
||||
|
||||
set (protoc_args)
|
||||
foreach (mode ${MODES})
|
||||
list (APPEND protoc_args "--${mode}_out" ${intermediate_dir})
|
||||
endforeach()
|
||||
if (PLUGIN)
|
||||
list (APPEND protoc_args "--plugin=${PLUGIN}")
|
||||
endif()
|
||||
|
||||
set(srcs)
|
||||
set(hdrs)
|
||||
set(all_intermediate_outputs)
|
||||
|
||||
foreach(input_name ${ARGN})
|
||||
get_filename_component(abs_name ${input_name} ABSOLUTE)
|
||||
get_filename_component(name ${input_name} NAME_WE)
|
||||
|
||||
set (intermediate_outputs)
|
||||
foreach (ext ${OUTPUT_FILE_EXTS})
|
||||
set (filename "${name}${ext}")
|
||||
set (output "${CMAKE_CURRENT_BINARY_DIR}/${filename}")
|
||||
set (intermediate_output "${intermediate_dir}/${filename}")
|
||||
list (APPEND intermediate_outputs "${intermediate_output}")
|
||||
list (APPEND all_intermediate_outputs "${intermediate_output}")
|
||||
|
||||
if (${ext} MATCHES ".*\\.h")
|
||||
list(APPEND hdrs "${output}")
|
||||
else()
|
||||
list(APPEND srcs "${output}")
|
||||
endif()
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${output}
|
||||
COMMAND ${CMAKE_COMMAND} -DPROTOBUF_GENERATE_CPP_SCRIPT_MODE=1 -DUSE_PROTOBUF=1 -DDIR=${CMAKE_CURRENT_BINARY_DIR} -DFILENAME=${filename} -DCOMPILER_ID=${CMAKE_CXX_COMPILER_ID} -P ${ClickHouse_SOURCE_DIR}/cmake/protobuf_generate_cpp.cmake
|
||||
DEPENDS ${intermediate_output})
|
||||
endforeach()
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${intermediate_outputs}
|
||||
COMMAND ${Protobuf_PROTOC_EXECUTABLE}
|
||||
ARGS ${protobuf_include_path} ${protoc_args} ${abs_name}
|
||||
DEPENDS ${abs_name} ${Protobuf_PROTOC_EXECUTABLE} ${PLUGIN}
|
||||
COMMENT "Running C++ protocol buffer compiler on ${name}"
|
||||
VERBATIM )
|
||||
endforeach()
|
||||
|
||||
set_source_files_properties(${srcs} ${hdrs} ${all_intermediate_outputs} PROPERTIES GENERATED TRUE)
|
||||
set(${SRCS} ${srcs} PARENT_SCOPE)
|
||||
set(${HDRS} ${hdrs} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
|
||||
if (PROTOBUF_GENERATE_CPP_SCRIPT_MODE)
|
||||
set (output "${DIR}/${FILENAME}")
|
||||
set (intermediate_dir ${DIR}/intermediate)
|
||||
set (intermediate_output "${intermediate_dir}/${FILENAME}")
|
||||
|
||||
if (COMPILER_ID STREQUAL "Clang")
|
||||
set (pragma_push "#pragma clang diagnostic push\n")
|
||||
set (pragma_pop "#pragma clang diagnostic pop\n")
|
||||
set (pragma_disable_warnings "#pragma clang diagnostic ignored \"-Weverything\"\n")
|
||||
elseif (COMPILER_ID MATCHES "GNU")
|
||||
set (pragma_push "#pragma GCC diagnostic push\n")
|
||||
set (pragma_pop "#pragma GCC diagnostic pop\n")
|
||||
set (pragma_disable_warnings "#pragma GCC diagnostic ignored \"-Wall\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wextra\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Warray-bounds\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wold-style-cast\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wshadow\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wsuggest-override\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wcast-qual\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n")
|
||||
endif()
|
||||
|
||||
if (${FILENAME} MATCHES ".*\\.h")
|
||||
file(WRITE "${output}"
|
||||
"#pragma once\n"
|
||||
${pragma_push}
|
||||
${pragma_disable_warnings}
|
||||
"#include \"${intermediate_output}\"\n"
|
||||
${pragma_pop}
|
||||
)
|
||||
else()
|
||||
file(WRITE "${output}"
|
||||
${pragma_disable_warnings}
|
||||
"#include \"${intermediate_output}\"\n"
|
||||
)
|
||||
endif()
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
||||
function(protobuf_generate_cpp SRCS HDRS)
|
||||
set (modes cpp)
|
||||
set (output_file_exts ".pb.cc" ".pb.h")
|
||||
set (plugin)
|
||||
|
||||
protobuf_generate_cpp_impl(srcs hdrs "${modes}" "${output_file_exts}" "${plugin}" ${ARGN})
|
||||
|
||||
set(${SRCS} ${srcs} PARENT_SCOPE)
|
||||
set(${HDRS} ${hdrs} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
|
||||
function(protobuf_generate_grpc_cpp SRCS HDRS)
|
||||
set (modes cpp grpc)
|
||||
set (output_file_exts ".pb.cc" ".pb.h" ".grpc.pb.cc" ".grpc.pb.h")
|
||||
set (plugin "protoc-gen-grpc=${GRPC_CPP_PLUGIN_EXECUTABLE}")
|
||||
|
||||
protobuf_generate_cpp_impl(srcs hdrs "${modes}" "${output_file_exts}" "${plugin}" ${ARGN})
|
||||
|
||||
set(${SRCS} ${srcs} PARENT_SCOPE)
|
||||
set(${HDRS} ${hdrs} PARENT_SCOPE)
|
||||
endfunction()
|
@ -1,12 +1,4 @@
|
||||
# This strings autochanged from release_lib.sh:
|
||||
set(VERSION_REVISION 54435)
|
||||
set(VERSION_MAJOR 20)
|
||||
set(VERSION_MINOR 5)
|
||||
set(VERSION_PATCH 1)
|
||||
set(VERSION_GITHASH 91df18a906dcffdbee6816e5389df6c65f86e35f)
|
||||
set(VERSION_DESCRIBE v20.5.1.1-prestable)
|
||||
set(VERSION_STRING 20.5.1.1)
|
||||
# end of autochange
|
||||
include(${CMAKE_SOURCE_DIR}/cmake/autogenerated_versions.txt)
|
||||
|
||||
set(VERSION_EXTRA "" CACHE STRING "")
|
||||
set(VERSION_TWEAK "" CACHE STRING "")
|
||||
|
25
cmake/yandex/ya.make.versions.inc
Normal file
25
cmake/yandex/ya.make.versions.inc
Normal file
@ -0,0 +1,25 @@
|
||||
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/autogenerated_versions.txt)
|
||||
|
||||
# TODO: not sure if this is customizable per-binary
|
||||
SET(VERSION_NAME "ClickHouse")
|
||||
|
||||
# TODO: not quite sure how to replace dash with space in ya.make
|
||||
SET(VERSION_FULL "${VERSION_NAME}-${VERSION_STRING}")
|
||||
|
||||
CFLAGS (GLOBAL -DDBMS_NAME=\"ClickHouse\")
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_MAJOR=${VERSION_MAJOR})
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_MINOR=${VERSION_MINOR})
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_PATCH=${VERSION_PATCH})
|
||||
CFLAGS (GLOBAL -DVERSION_FULL=\"\\\"${VERSION_FULL}\\\"\")
|
||||
CFLAGS (GLOBAL -DVERSION_MAJOR=${VERSION_MAJOR})
|
||||
CFLAGS (GLOBAL -DVERSION_MINOR=${VERSION_MINOR})
|
||||
CFLAGS (GLOBAL -DVERSION_PATCH=${VERSION_PATCH})
|
||||
|
||||
# TODO: not supported yet, not sure if ya.make supports arithmetics.
|
||||
CFLAGS (GLOBAL -DVERSION_INTEGER=0)
|
||||
|
||||
CFLAGS (GLOBAL -DVERSION_NAME=\"\\\"${VERSION_NAME}\\\"\")
|
||||
CFLAGS (GLOBAL -DVERSION_OFFICIAL=\"-arcadia\")
|
||||
CFLAGS (GLOBAL -DVERSION_REVISION=${VERSION_REVISION})
|
||||
CFLAGS (GLOBAL -DVERSION_STRING=\"\\\"${VERSION_STRING}\\\"\")
|
||||
|
14
contrib/CMakeLists.txt
vendored
14
contrib/CMakeLists.txt
vendored
@ -21,7 +21,6 @@ add_subdirectory (consistent-hashing-sumbur)
|
||||
add_subdirectory (consistent-hashing)
|
||||
add_subdirectory (croaring)
|
||||
add_subdirectory (FastMemcpy)
|
||||
add_subdirectory (grpc-cmake)
|
||||
add_subdirectory (jemalloc-cmake)
|
||||
add_subdirectory (libcpuid-cmake)
|
||||
add_subdirectory (murmurhash)
|
||||
@ -260,20 +259,17 @@ if (USE_INTERNAL_BROTLI_LIBRARY)
|
||||
endif ()
|
||||
|
||||
if (USE_INTERNAL_PROTOBUF_LIBRARY)
|
||||
if (MAKE_STATIC_LIBRARIES)
|
||||
set(protobuf_BUILD_SHARED_LIBS OFF CACHE INTERNAL "" FORCE)
|
||||
else ()
|
||||
set(protobuf_BUILD_SHARED_LIBS ON CACHE INTERNAL "" FORCE)
|
||||
endif ()
|
||||
set(protobuf_WITH_ZLIB 0 CACHE INTERNAL "" FORCE) # actually will use zlib, but skip find
|
||||
set(protobuf_BUILD_TESTS OFF CACHE INTERNAL "" FORCE)
|
||||
add_subdirectory(protobuf/cmake)
|
||||
add_subdirectory(protobuf-cmake)
|
||||
endif ()
|
||||
|
||||
if (USE_INTERNAL_HDFS3_LIBRARY)
|
||||
add_subdirectory(libhdfs3-cmake)
|
||||
endif ()
|
||||
|
||||
if (USE_INTERNAL_GRPC_LIBRARY)
|
||||
add_subdirectory(grpc-cmake)
|
||||
endif ()
|
||||
|
||||
if (USE_INTERNAL_AWS_S3_LIBRARY OR USE_SENTRY)
|
||||
set (save_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
|
||||
set (save_CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
|
||||
|
@ -618,7 +618,12 @@ if (USE_INTERNAL_CCTZ)
|
||||
|
||||
add_library(tzdata STATIC ${TZ_OBJS})
|
||||
set_target_properties(tzdata PROPERTIES LINKER_LANGUAGE C)
|
||||
target_link_libraries(cctz -Wl,--whole-archive tzdata -Wl,--no-whole-archive) # whole-archive prevents symbols from being discarded
|
||||
# whole-archive prevents symbols from being discarded for unknown reason
|
||||
# CMake can shuffle each of target_link_libraries arguments with other
|
||||
# libraries in linker command. To avoid this we hardcode whole-archive
|
||||
# library into single string.
|
||||
add_dependencies(cctz tzdata)
|
||||
target_link_libraries(cctz INTERFACE "-Wl,--whole-archive $<TARGET_FILE:tzdata> -Wl,--no-whole-archive")
|
||||
endif ()
|
||||
|
||||
else ()
|
||||
|
@ -4,6 +4,8 @@ cmake_minimum_required(VERSION 3.5.1)
|
||||
set(GRPC_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/grpc)
|
||||
set(GRPC_INCLUDE_DIR ${GRPC_SOURCE_DIR}/include/)
|
||||
set(GRPC_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/grpc)
|
||||
|
||||
|
||||
if(UNIX)
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
|
||||
set(_gRPC_PLATFORM_LINUX ON)
|
||||
@ -14,72 +16,28 @@ if(UNIX)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_gRPC_C_CXX_FLAGS} -w")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${_gRPC_C_CXX_FLAGS} -w")
|
||||
|
||||
set(_gRPC_PROTOBUF_LIBRARY_NAME "libprotobuf")
|
||||
|
||||
if(gRPC_BACKWARDS_COMPATIBILITY_MODE)
|
||||
add_definitions(-DGPR_BACKWARDS_COMPATIBILITY_MODE)
|
||||
if (_gRPC_PLATFORM_MAC)
|
||||
# some C++11 constructs not supported before OS X 10.9
|
||||
set(CMAKE_OSX_DEPLOYMENT_TARGET 10.9)
|
||||
endif()
|
||||
if(_gRPC_PLATFORM_MAC)
|
||||
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} m pthread)
|
||||
elseif(UNIX)
|
||||
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} rt m pthread)
|
||||
endif()
|
||||
|
||||
if (_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC)
|
||||
set(_gRPC_CORE_NOSTDCXX_FLAGS -fno-exceptions -fno-rtti)
|
||||
else()
|
||||
set(_gRPC_CORE_NOSTDCXX_FLAGS "")
|
||||
endif()
|
||||
|
||||
# address_sorting.cmake
|
||||
include(${GRPC_SOURCE_DIR}/cmake/address_sorting.cmake)
|
||||
set(_gRPC_ADDRESS_SORTING_INCLUDE_DIR "${GRPC_SOURCE_DIR}/third_party/address_sorting/include")
|
||||
set(_gRPC_ADDRESS_SORTING_LIBRARIES address_sorting)
|
||||
|
||||
# cares.cmake
|
||||
set(CARES_ROOT_DIR ${GRPC_SOURCE_DIR}/third_party/cares/cares)
|
||||
set(CARES_BINARY_DIR ${GRPC_BINARY_DIR}/third_party/cares/cares)
|
||||
set(CARES_SHARED OFF CACHE BOOL "disable shared library")
|
||||
set(CARES_STATIC ON CACHE BOOL "link cares statically")
|
||||
if(gRPC_BACKWARDS_COMPATIBILITY_MODE)
|
||||
# See https://github.com/grpc/grpc/issues/17255
|
||||
set(HAVE_LIBNSL OFF CACHE BOOL "avoid cares dependency on libnsl")
|
||||
set(CARES_SHARED ${BUILD_SHARED_LIBS} CACHE BOOL "" FORCE)
|
||||
if(BUILD_SHARED_LIBS)
|
||||
set(CARES_STATIC OFF CACHE BOOL "" FORCE)
|
||||
else()
|
||||
set(CARES_STATIC ON CACHE BOOL "" FORCE)
|
||||
endif()
|
||||
set(_gRPC_CARES_LIBRARIES c-ares)
|
||||
add_subdirectory(${CARES_ROOT_DIR} ${CARES_BINARY_DIR})
|
||||
if(TARGET c-ares)
|
||||
set(_gRPC_CARES_LIBRARIES c-ares)
|
||||
endif()
|
||||
|
||||
# protobuf.cmake
|
||||
set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../protobuf)
|
||||
|
||||
set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests")
|
||||
if(NOT protobuf_WITH_ZLIB)
|
||||
set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build protobuf with zlib.")
|
||||
endif()
|
||||
set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Link static runtime libraries")
|
||||
|
||||
set(_gRPC_PROTOBUF_LIBRARIES libprotobuf)
|
||||
set(_gRPC_PROTOBUF_PROTOC_LIBRARIES libprotoc)
|
||||
set(_gRPC_PROTOBUF_PROTOC protoc)
|
||||
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>)
|
||||
set(_gRPC_PROTOBUF_INCLUDE_DIR "${PROTOBUF_ROOT_DIR}/src")
|
||||
|
||||
if(gRPC_INSTALL)
|
||||
message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_PROTOBUF_PROVIDER is \"module\"")
|
||||
set(gRPC_INSTALL FALSE)
|
||||
endif()
|
||||
|
||||
# ssl.cmake
|
||||
set(BORINGSSL_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../ssl)
|
||||
if(TARGET ssl)
|
||||
set(_gRPC_SSL_LIBRARIES ssl)
|
||||
set(_gRPC_SSL_INCLUDE_DIR ${BORINGSSL_ROOT_DIR}/include)
|
||||
endif()
|
||||
|
||||
# upb.cmake
|
||||
set(UPB_ROOT_DIR ${GRPC_SOURCE_DIR}/third_party/upb)
|
||||
@ -87,23 +45,20 @@ set(_gRPC_UPB_INCLUDE_DIR "${UPB_ROOT_DIR}")
|
||||
set(_gRPC_UPB_GRPC_GENERATED_DIR "${GRPC_SOURCE_DIR}/src/core/ext/upb-generated")
|
||||
set(_gRPC_UPB_LIBRARIES upb)
|
||||
|
||||
# protobuf.cmake
|
||||
set(_gRPC_PROTOBUF_INCLUDE_DIR ${Protobuf_INCLUDE_DIR})
|
||||
set(_gRPC_PROTOBUF_LIBRARIES ${Protobuf_LIBRARY})
|
||||
set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ${Protobuf_PROTOC_LIBRARY})
|
||||
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE})
|
||||
|
||||
# ssl.cmake
|
||||
set(_gRPC_SSL_INCLUDE_DIR ${OPENSSL_INCLUDE_DIR})
|
||||
set(_gRPC_SSL_LIBRARIES ${OPENSSL_SSL_LIBRARY})
|
||||
|
||||
# zlib.cmake
|
||||
set(ZLIB_ROOT_DIR ${GRPC_SOURCE_DIR}/../zlib-ng)
|
||||
include_directories("${ZLIB_ROOT_DIR}")
|
||||
## add_subdirectory(${ZLIB_ROOT_DIR} ${ZLIB_ROOT_DIR})
|
||||
if(TARGET zlibstatic)
|
||||
set(_gRPC_ZLIB_LIBRARIES zlibstatic)
|
||||
set(_gRPC_ZLIB_INCLUDE_DIR "${ZLIB_ROOT_DIR}" "${GRPC_SOURCE_DIR}/third_party/zlib")
|
||||
endif()
|
||||
set(_gRPC_ZLIB_INCLUDE_DIR ${ZLIB_INCLUDE_DIR})
|
||||
set(_gRPC_ZLIB_LIBRARIES ${ZLIB_LIBRARIES})
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
|
||||
|
||||
if(_gRPC_PLATFORM_MAC)
|
||||
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} m pthread)
|
||||
elseif(UNIX)
|
||||
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} rt m pthread)
|
||||
endif()
|
||||
|
||||
add_library(address_sorting
|
||||
${GRPC_SOURCE_DIR}/third_party/address_sorting/address_sorting.c
|
||||
@ -112,7 +67,6 @@ add_library(address_sorting
|
||||
)
|
||||
|
||||
target_include_directories(address_sorting
|
||||
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${GRPC_SOURCE_DIR}/include>
|
||||
PRIVATE ${GRPC_SOURCE_DIR}
|
||||
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
|
||||
@ -124,11 +78,11 @@ target_include_directories(address_sorting
|
||||
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
|
||||
)
|
||||
target_link_libraries(address_sorting
|
||||
${_gRPC_BASELIB_LIBRARIES}
|
||||
${_gRPC_PROTOBUF_LIBRARIES}
|
||||
${_gRPC_ALLTARGETS_LIBRARIES}
|
||||
)
|
||||
|
||||
|
||||
add_library(gpr
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/gpr/alloc.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/gpr/atm.cc
|
||||
@ -184,6 +138,7 @@ target_include_directories(gpr
|
||||
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
|
||||
)
|
||||
|
||||
target_link_libraries(gpr
|
||||
${_gRPC_ALLTARGETS_LIBRARIES}
|
||||
${_gRPC_PROTOBUF_LIBRARIES}
|
||||
@ -569,7 +524,6 @@ add_library(grpc
|
||||
${GRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_plugin_registry.cc
|
||||
)
|
||||
|
||||
target_compile_options(grpc PUBLIC -fpermissive)
|
||||
|
||||
target_include_directories(grpc
|
||||
PUBLIC ${GRPC_INCLUDE_DIR}
|
||||
@ -583,8 +537,8 @@ target_include_directories(grpc
|
||||
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
|
||||
)
|
||||
|
||||
target_link_libraries(grpc
|
||||
${_gRPC_BASELIB_LIBRARIES}
|
||||
${_gRPC_SSL_LIBRARIES}
|
||||
${_gRPC_ZLIB_LIBRARIES}
|
||||
${_gRPC_CARES_LIBRARIES}
|
||||
@ -597,352 +551,6 @@ if (_gRPC_PLATFORM_MAC)
|
||||
target_link_libraries(grpc "-framework CoreFoundation")
|
||||
endif()
|
||||
|
||||
add_library(grpc_cronet
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/cronet/plugin_registry/grpc_cronet_plugin_registry.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/init.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/avl/avl.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/backoff/backoff.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/channel/channel_args.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack_builder.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/channel/channel_trace.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/channel/channelz.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/channel/channelz_registry.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/channel/connected_channel.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/channel/handshaker.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/channel/handshaker_registry.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/channel/status_util.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/compression/compression.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/compression/compression_args.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/compression/compression_internal.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/compression/message_compress.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/compression/stream_compression.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/compression/stream_compression_gzip.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/compression/stream_compression_identity.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/debug/stats.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/debug/stats_data.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/http/format_request.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/http/httpcli.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/http/parser.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/buffer_list.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/call_combiner.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/cfstream_handle.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/combiner.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_cfstream.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_uv.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/error.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/error_cfstream.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_epoll1_linux.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_epollex_linux.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_poll_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/exec_ctx.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/executor.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/executor/mpmcqueue.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/executor/threadpool.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_fallback.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_host_name_max.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_sysconf.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/internal_errqueue.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iocp_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_custom.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_internal.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix_cfstream.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_uv.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/is_epollexclusive_available.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/load_file.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/lockfree_event.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/polling_entity.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_custom.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set_custom.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_uv.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_custom.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/resource_quota.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/sockaddr_utils.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_factory_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_mutator.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_common_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_linux.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_uv.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_cfstream.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_custom.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_custom.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_custom.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_uv.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/time_averaged_stats.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_custom.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_generic.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_heap.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_manager.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_uv.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/udp_server.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix_noop.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_eventfd.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_nospecial.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_pipe.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/json/json.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/json/json_reader.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/json/json_string.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/json/json_writer.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/slice/b64.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/slice/percent_encoding.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/slice/slice.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/slice/slice_buffer.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/slice/slice_intern.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/slice/slice_string_helpers.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/api_trace.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer_reader.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/call.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/call_details.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/call_log_batch.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/channel.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/channel_init.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/channel_ping.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/channel_stack_type.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue_factory.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/event_string.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/lame_client.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/metadata_array.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/server.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/validate_metadata.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/version.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/bdp_estimator.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/byte_stream.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/connectivity_state.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/error_utils.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/metadata.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/metadata_batch.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/pid_controller.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/static_metadata.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/status_conversion.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/status_metadata.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/timeout_encoding.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/transport.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/transport/transport_op_string.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/uri/uri_parser.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/debug/trace.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/cronet/transport/cronet_api_dummy.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/cronet/transport/cronet_transport.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_decoder.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_encoder.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/context_list.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/flow_control.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_data.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_goaway.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_ping.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_settings.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_window_update.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parser.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_table.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/http2_settings.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/huffsyms.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/parsing.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/stream_lists.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/stream_map.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/varint.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/writing.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/alpn/alpn.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/http/client/http_client_filter.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/http/http_filters_plugin.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/http/message_compress/message_compress_filter.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/http/server/http_server_filter.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backend_metric.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backup_poller.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/channel_connectivity.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_channelz.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_factory.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_plugin.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/connector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/global_subchannel_pool.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/health/health_check_client.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/http_connect_handshaker.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/http_proxy.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy_registry.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/local_subchannel_pool.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/parse_address.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/proxy_mapper.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver_registry.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver_result_parsing.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolving_lb_policy.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_throttle.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/server_address.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/service_config.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel_pool_interface.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/filters/deadline/deadline_filter.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c
|
||||
${GRPC_SOURCE_DIR}/third_party/upb/upb/decode.c
|
||||
${GRPC_SOURCE_DIR}/third_party/upb/upb/encode.c
|
||||
${GRPC_SOURCE_DIR}/third_party/upb/upb/msg.c
|
||||
${GRPC_SOURCE_DIR}/third_party/upb/upb/port.c
|
||||
${GRPC_SOURCE_DIR}/third_party/upb/upb/table.c
|
||||
${GRPC_SOURCE_DIR}/third_party/upb/upb/upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/data/orca/v1/orca_load_report.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/gogoproto/gogo.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/validate/validate.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/annotations.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/http.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/any.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/duration.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/empty.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/struct.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/timestamp.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/wrappers.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/rpc/status.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/http/httpcli_security_connector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/context/security_context.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/alts_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/composite/composite_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/credentials_metadata.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/fake/fake_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/google_default/credentials_generic.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/google_default/google_default_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/iam/iam_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/json_token.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/jwt_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/jwt_verifier.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/local/local_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/plugin/plugin_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/ssl/ssl_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/spiffe_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/alts/alts_security_connector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/fake/fake_security_connector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_fallback.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_linux.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/local/local_security_connector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/security_connector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/ssl_utils.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/ssl_utils_config.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/tls/spiffe_security_connector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/client_auth_filter.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/secure_endpoint.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/security_handshaker.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/server_auth_filter.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/target_authority_table.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/tsi_error.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/util/json_util.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/init_secure.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/crypt/aes_gcm.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/crypt/gsec.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_counter.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_crypter.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_frame_protector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/frame_handler.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_handshaker_client.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_shared_resource.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_tsi_utils.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/transport_security_common_api.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/transport_security.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/insecure/channel_create.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/authority.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/chttp2_connector.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/fake_transport_security.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/local_transport_security.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_cache.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/ssl_transport_security.cc
|
||||
${GRPC_SOURCE_DIR}/src/core/tsi/transport_security_grpc.cc
|
||||
)
|
||||
|
||||
target_include_directories(grpc_cronet
|
||||
PUBLIC ${GRPC_INCLUDE_DIR}
|
||||
PRIVATE ${GRPC_SOURCE_DIR}
|
||||
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_UPB_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
|
||||
)
|
||||
target_link_libraries(grpc_cronet
|
||||
${_gRPC_BASELIB_LIBRARIES}
|
||||
${_gRPC_SSL_LIBRARIES}
|
||||
${_gRPC_ZLIB_LIBRARIES}
|
||||
${_gRPC_CARES_LIBRARIES}
|
||||
${_gRPC_ADDRESS_SORTING_LIBRARIES}
|
||||
${_gRPC_ALLTARGETS_LIBRARIES}
|
||||
${_gRPC_PROTOBUF_LIBRARIES}
|
||||
gpr
|
||||
)
|
||||
if (_gRPC_PLATFORM_MAC)
|
||||
target_link_libraries(grpc_cronet "-framework CoreFoundation")
|
||||
endif()
|
||||
|
||||
add_library(grpc_unsecure
|
||||
${GRPC_SOURCE_DIR}/src/core/lib/surface/init.cc
|
||||
@ -1249,19 +857,18 @@ add_library(grpc_unsecure
|
||||
${GRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc
|
||||
)
|
||||
|
||||
|
||||
target_include_directories(grpc_unsecure
|
||||
PUBLIC ${GRPC_INCLUDE_DIR}
|
||||
PRIVATE ${GRPC_SOURCE_DIR}
|
||||
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_UPB_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
|
||||
)
|
||||
|
||||
target_link_libraries(grpc_unsecure
|
||||
${_gRPC_BASELIB_LIBRARIES}
|
||||
${_gRPC_ZLIB_LIBRARIES}
|
||||
@ -1271,10 +878,12 @@ target_link_libraries(grpc_unsecure
|
||||
${_gRPC_PROTOBUF_LIBRARIES}
|
||||
gpr
|
||||
)
|
||||
|
||||
if (_gRPC_PLATFORM_MAC)
|
||||
target_link_libraries(grpc_unsecure "-framework CoreFoundation")
|
||||
endif()
|
||||
|
||||
|
||||
add_library(grpc++
|
||||
${GRPC_SOURCE_DIR}/src/cpp/client/insecure_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/cpp/client/secure_credentials.cc
|
||||
@ -1331,8 +940,6 @@ add_library(grpc++
|
||||
${GRPC_SOURCE_DIR}/src/cpp/codegen/codegen_init.cc
|
||||
)
|
||||
|
||||
target_compile_options(grpc++ PUBLIC -w)
|
||||
|
||||
target_include_directories(grpc++
|
||||
PUBLIC ${GRPC_INCLUDE_DIR}
|
||||
PRIVATE ${GRPC_SOURCE_DIR}
|
||||
@ -1344,10 +951,9 @@ target_include_directories(grpc++
|
||||
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_PROTO_GENS_DIR}
|
||||
)
|
||||
|
||||
target_link_libraries(grpc++
|
||||
${_gRPC_BASELIB_LIBRARIES}
|
||||
${_gRPC_SSL_LIBRARIES}
|
||||
${_gRPC_PROTOBUF_LIBRARIES}
|
||||
${_gRPC_ALLTARGETS_LIBRARIES}
|
||||
@ -1355,6 +961,7 @@ target_link_libraries(grpc++
|
||||
gpr
|
||||
)
|
||||
|
||||
|
||||
add_library(grpc++_unsecure
|
||||
${GRPC_SOURCE_DIR}/src/cpp/client/insecure_credentials.cc
|
||||
${GRPC_SOURCE_DIR}/src/cpp/common/insecure_create_auth_context.cc
|
||||
@ -1404,21 +1011,19 @@ add_library(grpc++_unsecure
|
||||
${GRPC_SOURCE_DIR}/src/cpp/codegen/codegen_init.cc
|
||||
)
|
||||
|
||||
target_compile_options(grpc++_unsecure PUBLIC -w)
|
||||
|
||||
target_include_directories(grpc++_unsecure
|
||||
PUBLIC ${GRPC_INCLUDE_DIR}
|
||||
PRIVATE ${GRPC_SOURCE_DIR}
|
||||
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_UPB_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_PROTO_GENS_DIR}
|
||||
)
|
||||
|
||||
target_link_libraries(grpc++_unsecure
|
||||
${_gRPC_BASELIB_LIBRARIES}
|
||||
${_gRPC_PROTOBUF_LIBRARIES}
|
||||
@ -1427,6 +1032,16 @@ target_link_libraries(grpc++_unsecure
|
||||
grpc_unsecure
|
||||
)
|
||||
|
||||
|
||||
if (_gRPC_SSL_INCLUDE_DIR AND _gRPC_SSL_LIBRARIES)
|
||||
add_library(libgrpc ALIAS grpc)
|
||||
add_library(libgrpc++ ALIAS grpc++)
|
||||
else()
|
||||
add_library(libgrpc ALIAS grpc_unsecure)
|
||||
add_library(libgrpc++ ALIAS grpc++_unsecure)
|
||||
endif()
|
||||
|
||||
|
||||
add_library(grpc_plugin_support
|
||||
${GRPC_SOURCE_DIR}/src/compiler/cpp_generator.cc
|
||||
)
|
||||
@ -1436,23 +1051,22 @@ target_include_directories(grpc_plugin_support
|
||||
PUBLIC ${GRPC_INCLUDE_DIR}
|
||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_UPB_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_PROTO_GENS_DIR}
|
||||
)
|
||||
|
||||
target_link_libraries(grpc_plugin_support
|
||||
${_gRPC_PROTOBUF_PROTOC_LIBRARIES}
|
||||
${_gRPC_PROTOBUF_LIBRARIES}
|
||||
${_gRPC_ALLTARGETS_LIBRARIES}
|
||||
)
|
||||
|
||||
|
||||
add_executable(grpc_cpp_plugin
|
||||
${GRPC_SOURCE_DIR}/src/compiler/cpp_plugin.cc
|
||||
)
|
||||
@ -1461,16 +1075,13 @@ target_include_directories(grpc_cpp_plugin
|
||||
PRIVATE ${GRPC_SOURCE_DIR}
|
||||
PUBLIC ${GRPC_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_UPB_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
|
||||
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
|
||||
PRIVATE ${_gRPC_PROTO_GENS_DIR}
|
||||
)
|
||||
|
||||
target_link_libraries(grpc_cpp_plugin
|
||||
@ -1479,4 +1090,3 @@ target_link_libraries(grpc_cpp_plugin
|
||||
${_gRPC_ALLTARGETS_LIBRARIES}
|
||||
grpc_plugin_support
|
||||
)
|
||||
|
||||
|
@ -17,7 +17,13 @@ if (ENABLE_JEMALLOC)
|
||||
#
|
||||
# By enabling percpu_arena number of arenas limited to number of CPUs and hence
|
||||
# this problem should go away.
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu" CACHE STRING "Change default configuration string of JEMalloc" )
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0")
|
||||
# CACHE variable is empty, to allow changing defaults without necessity
|
||||
# to purge cache
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" )
|
||||
if (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE)
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "${JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE}")
|
||||
endif()
|
||||
message (STATUS "jemalloc malloc_conf: ${JEMALLOC_CONFIG_MALLOC_CONF}")
|
||||
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/jemalloc")
|
||||
@ -55,6 +61,7 @@ if (ENABLE_JEMALLOC)
|
||||
${LIBRARY_DIR}/src/ticker.c
|
||||
${LIBRARY_DIR}/src/tsd.c
|
||||
${LIBRARY_DIR}/src/witness.c
|
||||
${LIBRARY_DIR}/src/safety_check.c
|
||||
)
|
||||
if (OS_DARWIN)
|
||||
list(APPEND SRCS ${LIBRARY_DIR}/src/zone.c)
|
||||
@ -89,6 +96,8 @@ if (ENABLE_JEMALLOC)
|
||||
endif ()
|
||||
|
||||
target_compile_options(jemalloc PRIVATE -Wno-redundant-decls)
|
||||
# for RTLD_NEXT
|
||||
target_compile_options(jemalloc PRIVATE -D_GNU_SOURCE)
|
||||
else ()
|
||||
find_library(LIBRARY_JEMALLOC jemalloc)
|
||||
find_path(INCLUDE_JEMALLOC jemalloc/jemalloc.h)
|
||||
|
@ -5,6 +5,12 @@
|
||||
/* Defined if alloc_size attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
|
||||
/* Defined if format_arg(...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_ARG
|
||||
|
||||
/* Defined if format(gnu_printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
|
||||
/* Defined if format(printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
|
||||
|
@ -4,12 +4,13 @@
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "5.1.0-56-g41b7372eadee941b9164751b8d4963f915d3ceae"
|
||||
#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756"
|
||||
#define JEMALLOC_VERSION_MAJOR 5
|
||||
#define JEMALLOC_VERSION_MINOR 1
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 56
|
||||
#define JEMALLOC_VERSION_GID "41b7372eadee941b9164751b8d4963f915d3ceae"
|
||||
#define JEMALLOC_VERSION_MINOR 2
|
||||
#define JEMALLOC_VERSION_BUGFIX 1
|
||||
#define JEMALLOC_VERSION_NREV 0
|
||||
#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756"
|
||||
#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756
|
||||
|
||||
#define MALLOCX_LG_ALIGN(la) ((int)(la))
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
@ -68,6 +69,7 @@
|
||||
# define JEMALLOC_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# endif
|
||||
# define JEMALLOC_FORMAT_ARG(i)
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE __declspec(noinline)
|
||||
# ifdef __cplusplus
|
||||
@ -95,6 +97,11 @@
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
||||
# endif
|
||||
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
|
||||
# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
|
||||
# else
|
||||
# define JEMALLOC_FORMAT_ARG(i)
|
||||
# endif
|
||||
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
|
||||
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
|
@ -17,6 +17,7 @@
|
||||
# define je_malloc_stats_print malloc_stats_print
|
||||
# define je_malloc_usable_size malloc_usable_size
|
||||
# define je_mallocx mallocx
|
||||
# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
|
||||
# define je_nallocx nallocx
|
||||
# define je_posix_memalign posix_memalign
|
||||
# define je_rallocx rallocx
|
||||
|
@ -65,13 +65,13 @@ typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
struct extent_hooks_s {
|
||||
extent_alloc_t *alloc;
|
||||
extent_dalloc_t *dalloc;
|
||||
extent_destroy_t *destroy;
|
||||
extent_commit_t *commit;
|
||||
extent_decommit_t *decommit;
|
||||
extent_purge_t *purge_lazy;
|
||||
extent_purge_t *purge_forced;
|
||||
extent_split_t *split;
|
||||
extent_merge_t *merge;
|
||||
extent_alloc_t *alloc;
|
||||
extent_dalloc_t *dalloc;
|
||||
extent_destroy_t *destroy;
|
||||
extent_commit_t *commit;
|
||||
extent_decommit_t *decommit;
|
||||
extent_purge_t *purge_lazy;
|
||||
extent_purge_t *purge_forced;
|
||||
extent_split_t *split;
|
||||
extent_merge_t *merge;
|
||||
};
|
@ -1,12 +1,6 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
@ -25,7 +19,7 @@
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
@ -41,7 +35,7 @@
|
||||
*/
|
||||
#define CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 0
|
||||
#define HAVE_CPU_SPINWAIT 9
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
@ -55,25 +49,13 @@
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
|
||||
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
|
||||
|
||||
/*
|
||||
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
|
||||
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -85,19 +67,13 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/*
|
||||
* Defined if OSSpin*() functions are available, as provided by Darwin, and
|
||||
* documented in the spinlock(3) manual page.
|
||||
*/
|
||||
/* #undef JEMALLOC_OSSPIN */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_SECURE_GETENV
|
||||
// #define JEMALLOC_HAVE_SECURE_GETENV
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
@ -243,6 +219,12 @@
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
@ -297,7 +279,7 @@
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
// #define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
@ -379,4 +361,7 @@
|
||||
*/
|
||||
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -21,7 +21,7 @@
|
||||
# include "jemalloc/jemalloc.h"
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
|
||||
#if defined(JEMALLOC_OSATOMIC)
|
||||
#include <libkern/OSAtomic.h>
|
||||
#endif
|
||||
|
||||
@ -161,7 +161,26 @@ static const bool config_log =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
|
||||
/*
|
||||
* Are extra safety checks enabled; things like checking the size of sized
|
||||
* deallocations, double-frees, etc.
|
||||
*/
|
||||
static const bool config_opt_safety_checks =
|
||||
#ifdef JEMALLOC_OPT_SAFETY_CHECKS
|
||||
true
|
||||
#elif defined(JEMALLOC_DEBUG)
|
||||
/*
|
||||
* This lets us only guard safety checks by one flag instead of two; fast
|
||||
* checks can guard solely by config_opt_safety_checks and run in debug mode
|
||||
* too.
|
||||
*/
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
#endif
|
||||
|
@ -1,123 +0,0 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "5.1.0-97-gcd2931ad9bbd78208565716ab102e86d858c2fff"
|
||||
#define JEMALLOC_VERSION_MAJOR 5
|
||||
#define JEMALLOC_VERSION_MINOR 1
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 97
|
||||
#define JEMALLOC_VERSION_GID "cd2931ad9bbd78208565716ab102e86d858c2fff"
|
||||
#define JEMALLOC_VERSION_GID_IDENT cd2931ad9bbd78208565716ab102e86d858c2fff
|
||||
|
||||
#define MALLOCX_LG_ALIGN(la) ((int)(la))
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
|
||||
#else
|
||||
# define MALLOCX_ALIGN(a) \
|
||||
((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
|
||||
ffs((int)(((size_t)(a))>>32))+31))
|
||||
#endif
|
||||
#define MALLOCX_ZERO ((int)0x40)
|
||||
/*
|
||||
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
|
||||
* encodes MALLOCX_TCACHE_NONE.
|
||||
*/
|
||||
#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
|
||||
#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
|
||||
/*
|
||||
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
|
||||
*/
|
||||
#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
|
||||
|
||||
/*
|
||||
* Use as arena index in "arena.<i>.{purge,decay,dss}" and
|
||||
* "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
|
||||
* definition is intentionally specified in raw decimal format to support
|
||||
* cpp-based string concatenation, e.g.
|
||||
*
|
||||
* #define STRINGIFY_HELPER(x) #x
|
||||
* #define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
*
|
||||
* mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
|
||||
* 0);
|
||||
*/
|
||||
#define MALLCTL_ARENAS_ALL 4096
|
||||
/*
|
||||
* Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
|
||||
* destroyed arenas.
|
||||
*/
|
||||
#define MALLCTL_ARENAS_DESTROYED 4097
|
||||
|
||||
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
|
||||
# define JEMALLOC_CXX_THROW throw()
|
||||
#else
|
||||
# define JEMALLOC_CXX_THROW
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# ifdef DLLEXPORT
|
||||
# define JEMALLOC_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define JEMALLOC_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# endif
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE __declspec(noinline)
|
||||
# ifdef __cplusplus
|
||||
# define JEMALLOC_NOTHROW __declspec(nothrow)
|
||||
# else
|
||||
# define JEMALLOC_NOTHROW
|
||||
# endif
|
||||
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
|
||||
# if _MSC_VER >= 1900 && !defined(__EDG__)
|
||||
# define JEMALLOC_ALLOCATOR __declspec(allocator)
|
||||
# else
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
# endif
|
||||
#elif defined(JEMALLOC_HAVE_ATTR)
|
||||
# define JEMALLOC_ATTR(s) __attribute__((s))
|
||||
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
|
||||
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
|
||||
# else
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# endif
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
||||
# endif
|
||||
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
|
||||
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
|
||||
# else
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# endif
|
||||
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
|
||||
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
|
||||
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
#else
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_ALIGNED(s)
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# define JEMALLOC_EXPORT
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE
|
||||
# define JEMALLOC_NOTHROW
|
||||
# define JEMALLOC_SECTION(s)
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
#endif
|
@ -1,77 +0,0 @@
|
||||
typedef struct extent_hooks_s extent_hooks_t;
|
||||
|
||||
/*
|
||||
* void *
|
||||
* extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
|
||||
* size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
|
||||
*/
|
||||
typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
|
||||
bool *, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* void
|
||||
* extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
|
||||
size_t, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
||||
* void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
struct extent_hooks_s {
|
||||
extent_alloc_t *alloc;
|
||||
extent_dalloc_t *dalloc;
|
||||
extent_destroy_t *destroy;
|
||||
extent_commit_t *commit;
|
||||
extent_decommit_t *decommit;
|
||||
extent_purge_t *purge_lazy;
|
||||
extent_purge_t *purge_forced;
|
||||
extent_split_t *split;
|
||||
extent_merge_t *merge;
|
||||
};
|
@ -1,11 +1,6 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
@ -24,7 +19,7 @@
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
@ -54,25 +49,13 @@
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
|
||||
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
|
||||
|
||||
/*
|
||||
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
|
||||
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -84,20 +67,13 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/*
|
||||
* Defined if OSSpin*() functions are available, as provided by Darwin, and
|
||||
* documented in the spinlock(3) manual page.
|
||||
*/
|
||||
/* #undef JEMALLOC_OSSPIN */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
// Don't want dependency on newer GLIBC
|
||||
//#define JEMALLOC_HAVE_SECURE_GETENV
|
||||
// #define JEMALLOC_HAVE_SECURE_GETENV
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
@ -160,6 +136,9 @@
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
@ -240,6 +219,12 @@
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
@ -252,6 +237,12 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
@ -288,7 +279,7 @@
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
//#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
@ -370,4 +361,7 @@
|
||||
*/
|
||||
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -21,7 +21,7 @@
|
||||
# include "jemalloc/jemalloc.h"
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
|
||||
#if defined(JEMALLOC_OSATOMIC)
|
||||
#include <libkern/OSAtomic.h>
|
||||
#endif
|
||||
|
||||
@ -161,7 +161,26 @@ static const bool config_log =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
|
||||
/*
|
||||
* Are extra safety checks enabled; things like checking the size of sized
|
||||
* deallocations, double-frees, etc.
|
||||
*/
|
||||
static const bool config_opt_safety_checks =
|
||||
#ifdef JEMALLOC_OPT_SAFETY_CHECKS
|
||||
true
|
||||
#elif defined(JEMALLOC_DEBUG)
|
||||
/*
|
||||
* This lets us only guard safety checks by one flag instead of two; fast
|
||||
* checks can guard solely by config_opt_safety_checks and run in debug mode
|
||||
* too.
|
||||
*/
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
#endif
|
||||
|
@ -1,43 +0,0 @@
|
||||
/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */
|
||||
/* Defined if __attribute__((...)) syntax is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR
|
||||
|
||||
/* Defined if alloc_size attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
|
||||
/* Defined if format(printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE_VALLOC
|
||||
|
||||
/*
|
||||
* At least Linux omits the "const" in:
|
||||
*
|
||||
* size_t malloc_usable_size(const void *ptr);
|
||||
*
|
||||
* Match the operating system's prototype.
|
||||
*/
|
||||
#define JEMALLOC_USABLE_SIZE_CONST
|
||||
|
||||
/*
|
||||
* If defined, specify throw() for the public function prototypes when compiling
|
||||
* with C++. The only justification for this is to match the prototypes that
|
||||
* glibc defines.
|
||||
*/
|
||||
#define JEMALLOC_USE_CXX_THROW
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# ifdef _WIN64
|
||||
# define LG_SIZEOF_PTR_WIN 3
|
||||
# else
|
||||
# define LG_SIZEOF_PTR_WIN 2
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
|
||||
#define LG_SIZEOF_PTR 3
|
@ -1,66 +0,0 @@
|
||||
/*
|
||||
* The je_ prefix on the following public symbol declarations is an artifact
|
||||
* of namespace management, and should be omitted in application code unless
|
||||
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
|
||||
*/
|
||||
extern JEMALLOC_EXPORT const char *je_malloc_conf;
|
||||
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
|
||||
const char *s);
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_malloc(size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
|
||||
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
|
||||
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
|
||||
JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
|
||||
JEMALLOC_CXX_THROW;
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
|
||||
int flags) JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
|
||||
size_t extra, int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
|
||||
int flags) JEMALLOC_ATTR(pure);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
|
||||
int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(pure);
|
||||
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
|
||||
size_t *mibp, size_t *miblenp);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
|
||||
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
|
||||
void (*write_cb)(void *, const char *), void *je_cbopaque,
|
||||
const char *opts);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
|
||||
JEMALLOC_ATTR(malloc);
|
||||
#endif
|
13
contrib/protobuf-cmake/CMakeLists.txt
Normal file
13
contrib/protobuf-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,13 @@
|
||||
set(protobuf_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/protobuf)
|
||||
set(protobuf_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/protobuf)
|
||||
|
||||
set(protobuf_WITH_ZLIB 0 CACHE INTERNAL "" FORCE) # actually will use zlib, but skip find
|
||||
set(protobuf_BUILD_TESTS OFF CACHE INTERNAL "" FORCE)
|
||||
|
||||
if (MAKE_STATIC_LIBRARIES)
|
||||
set(protobuf_BUILD_SHARED_LIBS OFF CACHE INTERNAL "" FORCE)
|
||||
else ()
|
||||
set(protobuf_BUILD_SHARED_LIBS ON CACHE INTERNAL "" FORCE)
|
||||
endif ()
|
||||
|
||||
add_subdirectory(${protobuf_SOURCE_DIR}/cmake ${protobuf_BINARY_DIR})
|
@ -21,7 +21,7 @@ RUN apt-get update \
|
||||
locales \
|
||||
ca-certificates \
|
||||
wget \
|
||||
tzata \
|
||||
tzdata \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
|
@ -36,7 +36,7 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio rpm-confluent-schemaregistry grpcio grpcio-tools
|
||||
RUN pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio rpm-confluent-schemaregistry grpcio grpcio-tools cassandra-driver
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 17.09.1-ce
|
||||
|
@ -317,7 +317,7 @@ function report
|
||||
rm -r report ||:
|
||||
mkdir report report/tmp ||:
|
||||
|
||||
rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv ||:
|
||||
rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv run-errors.tsv ||:
|
||||
|
||||
build_log_column_definitions
|
||||
|
||||
@ -434,7 +434,7 @@ create table wall_clock engine Memory as select *
|
||||
from file('wall-clock-times.tsv', TSV, 'test text, real float, user float, system float');
|
||||
|
||||
create table slow_on_client_tsv engine File(TSV, 'report/slow-on-client.tsv') as
|
||||
select client, server, floor(client/server, 3) p, query_display_name
|
||||
select client, server, floor(client/server, 3) p, test, query_display_name
|
||||
from query_time left join query_display_names using (test, query_index)
|
||||
where p > 1.02 order by p desc;
|
||||
|
||||
|
@ -163,6 +163,8 @@ for query_index, q in enumerate(test_queries):
|
||||
prewarm_id = f'{query_prefix}.prewarm0'
|
||||
res = c.execute(q, query_id = prewarm_id)
|
||||
print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}')
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
# If prewarm fails for some query -- skip it, and try to test the others.
|
||||
# This might happen if the new test introduces some function that the
|
||||
|
@ -189,7 +189,7 @@ if args.report == 'main':
|
||||
slow_on_client_rows = tsvRows('report/slow-on-client.tsv')
|
||||
error_tests += len(slow_on_client_rows)
|
||||
printSimpleTable('Slow on client',
|
||||
['Client time, s', 'Server time, s', 'Ratio', 'Query'],
|
||||
['Client time, s', 'Server time, s', 'Ratio', 'Test', 'Query'],
|
||||
slow_on_client_rows)
|
||||
|
||||
def print_changes():
|
||||
|
@ -12,7 +12,7 @@ readonly CLICKHOUSE_PACKAGES_ARG="${2}"
|
||||
CLICKHOUSE_SERVER_IMAGE="${3}"
|
||||
|
||||
if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
|
||||
readonly CLICKHOUSE_PACKAGES_DIR="$(realpath ${2})" # or --no-rebuild
|
||||
readonly CLICKHOUSE_PACKAGES_DIR="$(realpath ${2})" # or --no-rebuild
|
||||
fi
|
||||
|
||||
|
||||
@ -26,19 +26,19 @@ fi
|
||||
# TODO: optionally mount most recent clickhouse-test and queries directory from local machine
|
||||
|
||||
if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/test/stateless/clickhouse-statelest-test-runner.Dockerfile" \
|
||||
--target clickhouse-test-runner-base \
|
||||
-t clickhouse-test-runner-base:preinstall \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/test/stateless"
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/test/stateless/clickhouse-statelest-test-runner.Dockerfile" \
|
||||
--target clickhouse-test-runner-base \
|
||||
-t clickhouse-test-runner-base:preinstall \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/test/stateless"
|
||||
|
||||
docker rm -f clickhouse-test-runner-installing-packages || true
|
||||
docker run \
|
||||
-v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
|
||||
--name clickhouse-test-runner-installing-packages \
|
||||
clickhouse-test-runner-base:preinstall
|
||||
docker commit clickhouse-test-runner-installing-packages clickhouse-statelest-test-runner:local
|
||||
docker rm -f clickhouse-test-runner-installing-packages || true
|
||||
docker rm -f clickhouse-test-runner-installing-packages || true
|
||||
docker run \
|
||||
-v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
|
||||
--name clickhouse-test-runner-installing-packages \
|
||||
clickhouse-test-runner-base:preinstall
|
||||
docker commit clickhouse-test-runner-installing-packages clickhouse-statelest-test-runner:local
|
||||
docker rm -f clickhouse-test-runner-installing-packages || true
|
||||
fi
|
||||
|
||||
# # Create a bind-volume to the clickhouse-test script file
|
||||
@ -47,38 +47,38 @@ fi
|
||||
|
||||
# Build server image (optional) from local packages
|
||||
if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then
|
||||
CLICKHOUSE_SERVER_IMAGE="yandex/clickhouse-server:local"
|
||||
CLICKHOUSE_SERVER_IMAGE="yandex/clickhouse-server:local"
|
||||
|
||||
if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
|
||||
--target clickhouse-server-base \
|
||||
-t clickhouse-server-base:preinstall \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/server"
|
||||
if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
|
||||
--target clickhouse-server-base \
|
||||
-t clickhouse-server-base:preinstall \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/server"
|
||||
|
||||
docker rm -f clickhouse_server_base_installing_server || true
|
||||
docker run -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
|
||||
--name clickhouse_server_base_installing_server \
|
||||
clickhouse-server-base:preinstall
|
||||
docker commit clickhouse_server_base_installing_server clickhouse-server-base:postinstall
|
||||
docker rm -f clickhouse_server_base_installing_server || true
|
||||
docker run -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
|
||||
--name clickhouse_server_base_installing_server \
|
||||
clickhouse-server-base:preinstall
|
||||
docker commit clickhouse_server_base_installing_server clickhouse-server-base:postinstall
|
||||
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
|
||||
--target clickhouse-server \
|
||||
-t "${CLICKHOUSE_SERVER_IMAGE}" \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/server"
|
||||
fi
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
|
||||
--target clickhouse-server \
|
||||
-t "${CLICKHOUSE_SERVER_IMAGE}" \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/server"
|
||||
fi
|
||||
fi
|
||||
|
||||
docker rm -f test-runner || true
|
||||
docker-compose down
|
||||
CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \
|
||||
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
|
||||
create \
|
||||
--build --force-recreate
|
||||
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
|
||||
create \
|
||||
--build --force-recreate
|
||||
|
||||
CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \
|
||||
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
|
||||
run \
|
||||
--name test-runner \
|
||||
test-runner
|
||||
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
|
||||
run \
|
||||
--name test-runner \
|
||||
test-runner
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 62
|
||||
toc_title: Overview of ClickHouse Architecture
|
||||
toc_title: Architecture Overview
|
||||
---
|
||||
|
||||
# Overview of ClickHouse Architecture {#overview-of-clickhouse-architecture}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 63
|
||||
toc_title: Browse Source Code
|
||||
toc_priority: 71
|
||||
toc_title: Source Code
|
||||
---
|
||||
|
||||
# Browse ClickHouse Source Code {#browse-clickhouse-source-code}
|
||||
|
@ -1,19 +1,17 @@
|
||||
---
|
||||
toc_priority: 61
|
||||
toc_title: The Beginner ClickHouse Developer Instruction
|
||||
toc_title: For Beginners
|
||||
---
|
||||
|
||||
# The Beginner ClickHouse Developer Instruction
|
||||
|
||||
Building of ClickHouse is supported on Linux, FreeBSD and Mac OS X.
|
||||
|
||||
# If You Use Windows {#if-you-use-windows}
|
||||
|
||||
If you use Windows, you need to create a virtual machine with Ubuntu. To start working with a virtual machine please install VirtualBox. You can download Ubuntu from the website: https://www.ubuntu.com/#download. Please create a virtual machine from the downloaded image (you should reserve at least 4GB of RAM for it). To run a command-line terminal in Ubuntu, please locate a program containing the word “terminal” in its name (gnome-terminal, konsole etc.) or just press Ctrl+Alt+T.
|
||||
|
||||
# If You Use a 32-bit System {#if-you-use-a-32-bit-system}
|
||||
|
||||
ClickHouse cannot work or build on a 32-bit system. You should acquire access to a 64-bit system and you can continue reading.
|
||||
|
||||
# Creating a Repository on GitHub {#creating-a-repository-on-github}
|
||||
## Creating a Repository on GitHub {#creating-a-repository-on-github}
|
||||
|
||||
To start working with ClickHouse repository you will need a GitHub account.
|
||||
|
||||
@ -33,7 +31,7 @@ To do that in Ubuntu you would run in the command line terminal:
|
||||
A brief manual on using Git can be found here: https://services.github.com/on-demand/downloads/github-git-cheat-sheet.pdf.
|
||||
For a detailed manual on Git see https://git-scm.com/book/en/v2.
|
||||
|
||||
# Cloning a Repository to Your Development Machine {#cloning-a-repository-to-your-development-machine}
|
||||
## Cloning a Repository to Your Development Machine {#cloning-a-repository-to-your-development-machine}
|
||||
|
||||
Next, you need to download the source files onto your working machine. This is called “to clone a repository” because it creates a local copy of the repository on your working machine.
|
||||
|
||||
@ -77,7 +75,7 @@ You can also add original ClickHouse repo’s address to your local repository t
|
||||
|
||||
After successfully running this command you will be able to pull updates from the main ClickHouse repo by running `git pull upstream master`.
|
||||
|
||||
## Working with Submodules {#working-with-submodules}
|
||||
### Working with Submodules {#working-with-submodules}
|
||||
|
||||
Working with submodules in git could be painful. Next commands will help to manage it:
|
||||
|
||||
@ -107,7 +105,7 @@ The next commands would help you to reset all submodules to the initial state (!
|
||||
git submodule foreach git submodule foreach git reset --hard
|
||||
git submodule foreach git submodule foreach git clean -xfd
|
||||
|
||||
# Build System {#build-system}
|
||||
## Build System {#build-system}
|
||||
|
||||
ClickHouse uses CMake and Ninja for building.
|
||||
|
||||
@ -127,11 +125,11 @@ For installing CMake and Ninja on Mac OS X first install Homebrew and then insta
|
||||
|
||||
Next, check the version of CMake: `cmake --version`. If it is below 3.3, you should install a newer version from the website: https://cmake.org/download/.
|
||||
|
||||
# Optional External Libraries {#optional-external-libraries}
|
||||
## Optional External Libraries {#optional-external-libraries}
|
||||
|
||||
ClickHouse uses several external libraries for building. All of them do not need to be installed separately as they are built together with ClickHouse from the sources located in the submodules. You can check the list in `contrib`.
|
||||
|
||||
# C++ Compiler {#c-compiler}
|
||||
## C++ Compiler {#c-compiler}
|
||||
|
||||
Compilers GCC starting from version 9 and Clang version 8 or above are supported for building ClickHouse.
|
||||
|
||||
@ -145,7 +143,7 @@ Mac OS X build is supported only for Clang. Just run `brew install llvm`
|
||||
|
||||
If you decide to use Clang, you can also install `libc++` and `lld`, if you know what it is. Using `ccache` is also recommended.
|
||||
|
||||
# The Building Process {#the-building-process}
|
||||
## The Building Process {#the-building-process}
|
||||
|
||||
Now that you are ready to build ClickHouse we recommend you to create a separate directory `build` inside `ClickHouse` that will contain all of the build artefacts:
|
||||
|
||||
@ -202,7 +200,7 @@ Upon successful build you get an executable file `ClickHouse/<build_dir>/program
|
||||
|
||||
ls -l programs/clickhouse
|
||||
|
||||
# Running the Built Executable of ClickHouse {#running-the-built-executable-of-clickhouse}
|
||||
## Running the Built Executable of ClickHouse {#running-the-built-executable-of-clickhouse}
|
||||
|
||||
To run the server under the current user you need to navigate to `ClickHouse/programs/server/` (located outside of `build`) and run:
|
||||
|
||||
@ -229,7 +227,7 @@ You can also run your custom-built ClickHouse binary with the config file from t
|
||||
sudo service clickhouse-server stop
|
||||
sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml
|
||||
|
||||
# IDE (Integrated Development Environment) {#ide-integrated-development-environment}
|
||||
## IDE (Integrated Development Environment) {#ide-integrated-development-environment}
|
||||
|
||||
If you do not know which IDE to use, we recommend that you use CLion. CLion is commercial software, but it offers 30 days free trial period. It is also free of charge for students. CLion can be used both on Linux and on Mac OS X.
|
||||
|
||||
@ -239,7 +237,7 @@ As simple code editors, you can use Sublime Text or Visual Studio Code, or Kate
|
||||
|
||||
Just in case, it is worth mentioning that CLion creates `build` path on its own, it also on its own selects `debug` for build type, for configuration it uses a version of CMake that is defined in CLion and not the one installed by you, and finally, CLion will use `make` to run build tasks instead of `ninja`. This is normal behaviour, just keep that in mind to avoid confusion.
|
||||
|
||||
# Writing Code {#writing-code}
|
||||
## Writing Code {#writing-code}
|
||||
|
||||
The description of ClickHouse architecture can be found here: https://clickhouse.tech/docs/en/development/architecture/
|
||||
|
||||
@ -249,7 +247,7 @@ Writing tests: https://clickhouse.tech/docs/en/development/tests/
|
||||
|
||||
List of tasks: https://github.com/ClickHouse/ClickHouse/blob/master/testsructions/easy\_tasks\_sorted\_en.md
|
||||
|
||||
# Test Data {#test-data}
|
||||
## Test Data {#test-data}
|
||||
|
||||
Developing ClickHouse often requires loading realistic datasets. It is particularly important for performance testing. We have a specially prepared set of anonymized data from Yandex.Metrica. It requires additionally some 3GB of free disk space. Note that this data is not required to accomplish most of the development tasks.
|
||||
|
||||
@ -272,7 +270,7 @@ Developing ClickHouse often requires loading realistic datasets. It is particula
|
||||
clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.hits FORMAT TSV" < hits_v1.tsv
|
||||
clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.visits FORMAT TSV" < visits_v1.tsv
|
||||
|
||||
# Creating Pull Request {#creating-pull-request}
|
||||
## Creating Pull Request {#creating-pull-request}
|
||||
|
||||
Navigate to your fork repository in GitHub’s UI. If you have been developing in a branch, you need to select that branch. There will be a “Pull request” button located on the screen. In essence, this means “create a request for accepting my changes into the main repository”.
|
||||
|
||||
|
@ -37,6 +37,8 @@ The supported formats are:
|
||||
| [Avro](#data-format-avro) | ✔ | ✔ |
|
||||
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
|
||||
| [Parquet](#data-format-parquet) | ✔ | ✔ |
|
||||
| [Arrow](#data-format-arrow) | ✔ | ✔ |
|
||||
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
|
||||
| [ORC](#data-format-orc) | ✔ | ✗ |
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
@ -985,9 +987,9 @@ See also [how to read/write length-delimited protobuf messages in popular langua
|
||||
|
||||
## Avro {#data-format-avro}
|
||||
|
||||
[Apache Avro](http://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project.
|
||||
[Apache Avro](https://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project.
|
||||
|
||||
ClickHouse Avro format supports reading and writing [Avro data files](http://avro.apache.org/docs/current/spec.html#Object+Container+Files).
|
||||
ClickHouse Avro format supports reading and writing [Avro data files](https://avro.apache.org/docs/current/spec.html#Object+Container+Files).
|
||||
|
||||
### Data Types Matching {#data_types-matching}
|
||||
|
||||
@ -1009,7 +1011,7 @@ The table below shows supported data types and how they match ClickHouse [data t
|
||||
| `long (timestamp-millis)` \* | [DateTime64(3)](../sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \* |
|
||||
| `long (timestamp-micros)` \* | [DateTime64(6)](../sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \* |
|
||||
|
||||
\* [Avro logical types](http://avro.apache.org/docs/current/spec.html#Logical+Types)
|
||||
\* [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
|
||||
|
||||
Unsupported Avro data types: `record` (non-root), `map`
|
||||
|
||||
@ -1095,7 +1097,7 @@ SELECT * FROM topic1_stream;
|
||||
|
||||
## Parquet {#data-format-parquet}
|
||||
|
||||
[Apache Parquet](http://parquet.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. ClickHouse supports read and write operations for this format.
|
||||
[Apache Parquet](https://parquet.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. ClickHouse supports read and write operations for this format.
|
||||
|
||||
### Data Types Matching {#data_types-matching-2}
|
||||
|
||||
@ -1141,6 +1143,16 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_
|
||||
|
||||
To exchange data with Hadoop, you can use [HDFS table engine](../engines/table-engines/integrations/hdfs.md).
|
||||
|
||||
## Arrow {#data-format-arrow}
|
||||
|
||||
[Apache Arrow](https://arrow.apache.org/) comes with two built-in columnar storage formats. ClickHouse supports read and write operations for these formats.
|
||||
|
||||
`Arrow` is Apache Arrow's "file mode" format. It is designed for in-memory random access.
|
||||
|
||||
## ArrowStream {#data-format-arrow-stream}
|
||||
|
||||
`ArrowStream` is Apache Arrow's "stream mode" format. It is designed for in-memory stream processing.
|
||||
|
||||
## ORC {#data-format-orc}
|
||||
|
||||
[Apache ORC](https://orc.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. You can only insert data in this format to ClickHouse.
|
||||
|
@ -25,6 +25,7 @@ toc_title: Integrations
|
||||
- Message queues
|
||||
- [Kafka](https://kafka.apache.org)
|
||||
- [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (uses [Go client](https://github.com/ClickHouse/clickhouse-go/))
|
||||
- [stream-loader-clickhouse](https://github.com/adform/stream-loader)
|
||||
- Stream processing
|
||||
- [Flink](https://flink.apache.org)
|
||||
- [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink)
|
||||
|
@ -41,6 +41,7 @@ toc_title: Adopters
|
||||
| [Integros](https://integros.com){.favicon} | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||
| [Kodiak Data](https://www.kodiakdata.com/){.favicon} | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) |
|
||||
| [Kontur](https://kontur.ru){.favicon} | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) |
|
||||
| [Lawrence Berkeley National Laboratory](https://www.lbl.gov){.favicon} | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
|
||||
| [LifeStreet](https://lifestreet.com/){.favicon} | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) |
|
||||
| [Mail.ru Cloud Solutions](https://mcs.mail.ru/){.favicon} | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) |
|
||||
| [MessageBird](https://www.messagebird.com){.favicon} | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
|
||||
|
@ -586,11 +586,11 @@ If the table doesn’t exist, ClickHouse will create it. If the structure of the
|
||||
</query_log>
|
||||
```
|
||||
|
||||
## query\_thread\_log {#server_configuration_parameters-query-thread-log}
|
||||
## query\_thread\_log {#server_configuration_parameters-query_thread_log}
|
||||
|
||||
Setting for logging threads of queries received with the [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) setting.
|
||||
|
||||
Queries are logged in the [system.query\_thread\_log](../../operations/system-tables.md#system_tables-query-thread-log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
||||
Queries are logged in the [system.query\_thread\_log](../../operations/system-tables.md#system_tables-query_thread_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
||||
|
||||
Use the following parameters to configure logging:
|
||||
|
||||
|
@ -404,6 +404,65 @@ Possible values:
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## partial_merge_join_optimizations {#partial_merge_join_optimizations}
|
||||
|
||||
Disables optimizations in partial merge join algorithm for [JOIN](../../sql-reference/statements/select/join.md) queries.
|
||||
|
||||
By default, this setting enables improvements that could lead to wrong results. If you see suspicious results in your queries, disable optimizations by this setting. Optimizations can be different in different versions of the ClickHouse server.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Optimizations disabled.
|
||||
- 1 — Optimizations enabled.
|
||||
|
||||
Default value: 1.
|
||||
|
||||
## partial_merge_join_rows_in_right_blocks {#partial_merge_join_rows_in_right_blocks}
|
||||
|
||||
Limits sizes of right-hand join data blocks in partial merge join algorithm for [JOIN](../../sql-reference/statements/select/join.md) queries.
|
||||
|
||||
ClickHouse server:
|
||||
|
||||
1. Splits right-hand join data into blocks with up to the specified number of rows.
|
||||
2. Indexes each block with their minimum and maximum values
|
||||
3. Unloads prepared blocks to disk if possible.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer. Recommended range of values: [1000, 100000].
|
||||
|
||||
Default value: 65536.
|
||||
|
||||
## any_join_distinct_right_table_keys {#any_join_distinct_right_table_keys}
|
||||
|
||||
Enables legacy ClickHouse server behavior in `ANY INNER|LEFT JOIN` operations.
|
||||
|
||||
!!! note "Warning"
|
||||
Use this setting only for the purpose of backward compatibility if your use cases depend on legacy `JOIN` behavior.
|
||||
|
||||
When the legacy behavior enabled:
|
||||
|
||||
- Results of `t1 ANY LEFT JOIN t2` and `t2 ANY RIGHT JOIN t1` operations are not equal because ClickHouse uses the logic with many-to-one left-to-right table keys mapping.
|
||||
- Results of `ANY INNER JOIN` operations contain all rows from the left table like the `SEMI LEFT JOIN` operations do.
|
||||
|
||||
When the legacy behavior disabled:
|
||||
|
||||
- Results of `t1 ANY LEFT JOIN t2` and `t2 ANY RIGHT JOIN t1` operations are equal because ClickHouse uses the logic which provides one-to-many keys mapping in `ANY RIGHT JOIN` operations.
|
||||
- Results of `ANY INNER JOIN` operations contain one row per key from both left and right tables.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Legacy behavior is disabled.
|
||||
- 1 — Legacy behavior is enabled.
|
||||
|
||||
|
||||
Default value: 0.
|
||||
|
||||
See also:
|
||||
|
||||
- [JOIN strictness](../../sql-reference/statements/select/join.md#select-join-strictness)
|
||||
|
||||
|
||||
## max\_block\_size {#setting-max_block_size}
|
||||
|
||||
In ClickHouse, data is processed by blocks (sets of column parts). The internal processing cycles for a single block are efficient enough, but there are noticeable expenditures on each block. The `max_block_size` setting is a recommendation for what size of the block (in a count of rows) to load from tables. The block size shouldn’t be too small, so that the expenditures on each block are still noticeable, but not too large so that the query with LIMIT that is completed after the first block is processed quickly. The goal is to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality.
|
||||
@ -539,7 +598,7 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING'
|
||||
|
||||
Setting up query threads logging.
|
||||
|
||||
Queries’ threads runned by ClickHouse with this setup are logged according to the rules in the [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server configuration parameter.
|
||||
Queries’ threads runned by ClickHouse with this setup are logged according to the rules in the [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server configuration parameter.
|
||||
|
||||
Example:
|
||||
|
||||
@ -1265,4 +1324,63 @@ Possible values:
|
||||
|
||||
Default value: 16.
|
||||
|
||||
## low_cardinality_max_dictionary_size {#low_cardinality_max_dictionary_size}
|
||||
|
||||
Sets a maximum size in rows of a shared global dictionary for the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) data type that can be written to a storage file system. This setting prevents issues with RAM in case of unlimited dictionary growth. All the data that can't be encoded due to maximum dictionary size limitation ClickHouse writes in an ordinary method.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: 8192.
|
||||
|
||||
## low_cardinality_use_single_dictionary_for_part {#low_cardinality_use_single_dictionary_for_part}
|
||||
|
||||
Turns on or turns off using of single dictionary for the data part.
|
||||
|
||||
By default, ClickHouse server monitors the size of dictionaries and if a dictionary overflows then the server starts to write the next one. To prohibit creating several dictionaries set `low_cardinality_use_single_dictionary_for_part = 1`.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 1 — Creating several dictionaries for the data part is prohibited.
|
||||
- 0 — Creating several dictionaries for the data part is not prohibited.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## low_cardinality_allow_in_native_format {#low_cardinality_allow_in_native_format}
|
||||
|
||||
Allows or restricts using the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) data type with the [Native](../../interfaces/formats.md#native) format.
|
||||
|
||||
If usage of `LowCardinality` is restricted, ClickHouse server converts `LowCardinality`-columns to ordinary ones for `SELECT` queries, and convert ordinary columns to `LowCardinality`-columns for `INSERT` queries.
|
||||
|
||||
This setting is required mainly for third-party clients which don't support `LowCardinality` data type.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 1 — Usage of `LowCardinality` is not restricted.
|
||||
- 0 — Usage of `LowCardinality` is restricted.
|
||||
|
||||
Default value: 1.
|
||||
|
||||
|
||||
## allow_suspicious_low_cardinality_types {#allow_suspicious_low_cardinality_types}
|
||||
|
||||
Allows or restricts using [LowCardinality](../../sql-reference/data-types/lowcardinality.md) with data types with fixed size of 8 bytes or less: numeric data types and `FixedString(8_bytes_or_less)`.
|
||||
|
||||
For small fixed values using of `LowCardinality` is usually inefficient, because ClickHouse stores a numeric index for each row. As a result:
|
||||
|
||||
- Disk space usage can rise.
|
||||
- RAM consumption can be higher, depending on a dictionary size.
|
||||
- Some functions can work slower due to extra coding/encoding operations.
|
||||
|
||||
Merge times in [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md)-engine tables can grow due to all the reasons described above.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 1 — Usage of `LowCardinality` is not restricted.
|
||||
- 0 — Usage of `LowCardinality` is restricted.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
|
||||
|
@ -5,7 +5,7 @@ toc_title: System Tables
|
||||
|
||||
# System Tables {#system-tables}
|
||||
|
||||
## Introduction
|
||||
## Introduction {#system-tables-introduction}
|
||||
|
||||
System tables provide information about:
|
||||
|
||||
@ -18,9 +18,12 @@ System tables:
|
||||
- Available only for reading data.
|
||||
- Can't be dropped or altered, but can be detached.
|
||||
|
||||
The `metric_log`, `query_log`, `query_thread_log`, `trace_log` system tables store data in a storage filesystem. Other system tables store their data in RAM. ClickHouse server creates such system tables at the start.
|
||||
Most of system tables store their data in RAM. ClickHouse server creates such system tables at the start.
|
||||
|
||||
### Sources of System Metrics
|
||||
The [metric_log](#system_tables-metric_log), [query_log](#system_tables-query_log), [query_thread_log](#system_tables-query_thread_log), [trace_log](#system_tables-trace_log) system tables store data in a storage filesystem. You can alter them or remove from a disk manually. If you remove one of that tables from a disk, the ClickHouse server creates the table again at the time of the next recording. A storage period for these tables is not limited, and ClickHouse server doesn't delete their data automatically. You need to organize removing of outdated logs by yourself. For example, you can use [TTL](../sql-reference/statements/alter.md#manipulations-with-table-ttl) settings for removing outdated log records.
|
||||
|
||||
|
||||
### Sources of System Metrics {#system-tables-sources-of-system-metrics}
|
||||
|
||||
For collecting system metrics ClickHouse server uses:
|
||||
|
||||
@ -587,97 +590,150 @@ Columns:
|
||||
- `source_file` (LowCardinality(String)) — Source file from which the logging was done.
|
||||
- `source_line` (UInt64) — Source line from which the logging was done.
|
||||
|
||||
## system.query\_log {#system_tables-query_log}
|
||||
## system.query_log {#system_tables-query_log}
|
||||
|
||||
Contains information about execution of queries. For each query, you can see processing start time, duration of processing, error messages and other information.
|
||||
Contains information about executed queries, for example, start time, duration of processing, error messages.
|
||||
|
||||
!!! note "Note"
|
||||
The table doesn’t contain input data for `INSERT` queries.
|
||||
|
||||
ClickHouse creates this table only if the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
You can change settings of queries logging in the [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) section of the server configuration.
|
||||
|
||||
To enable query logging, set the [log\_queries](settings/settings.md#settings-log-queries) parameter to 1. For details, see the [Settings](settings/settings.md) section.
|
||||
You can disable queries logging by setting [log_queries = 0](settings/settings.md#settings-log-queries). We don't recommend to turn off logging because information in this table is important for solving issues.
|
||||
|
||||
The flushing period of logs is set in `flush_interval_milliseconds` parameter of the [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server settings section. To force flushing logs, use the [SYSTEM FLUSH LOGS](../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
||||
|
||||
ClickHouse doesn't delete logs from the table automatically. See [Introduction](#system-tables-introduction) for more details.
|
||||
|
||||
The `system.query_log` table registers two kinds of queries:
|
||||
|
||||
1. Initial queries that were run directly by the client.
|
||||
2. Child queries that were initiated by other queries (for distributed query execution). For these types of queries, information about the parent queries is shown in the `initial_*` columns.
|
||||
|
||||
Each query creates one or two rows in the `query_log` table, depending on the status (see the `type` column) of the query:
|
||||
|
||||
1. If the query execution was successful, two rows with the `QueryStart` and `QueryFinish` types are created .
|
||||
2. If an error occurred during query processing, two events with the `QueryStart` and `ExceptionWhileProcessing` types are created .
|
||||
3. If an error occurred before launching the query, a single event with the `ExceptionBeforeStart` type is created.
|
||||
|
||||
Columns:
|
||||
|
||||
- `type` (`Enum8`) — Type of event that occurred when executing the query. Values:
|
||||
- `type` ([Enum8](../sql-reference/data-types/enum.md)) — Type of an event that occurred when executing the query. Values:
|
||||
- `'QueryStart' = 1` — Successful start of query execution.
|
||||
- `'QueryFinish' = 2` — Successful end of query execution.
|
||||
- `'ExceptionBeforeStart' = 3` — Exception before the start of query execution.
|
||||
- `'ExceptionWhileProcessing' = 4` — Exception during the query execution.
|
||||
- `event_date` (Date) — Query starting date.
|
||||
- `event_time` (DateTime) — Query starting time.
|
||||
- `query_start_time` (DateTime) — Start time of query execution.
|
||||
- `query_duration_ms` (UInt64) — Duration of query execution.
|
||||
- `read_rows` (UInt64) — Number of read rows.
|
||||
- `read_bytes` (UInt64) — Number of read bytes.
|
||||
- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||
- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
||||
- `result_rows` (UInt64) — Number of rows in the result.
|
||||
- `result_bytes` (UInt64) — Number of bytes in the result.
|
||||
- `memory_usage` (UInt64) — Memory consumption by the query.
|
||||
- `query` (String) — Query string.
|
||||
- `exception` (String) — Exception message.
|
||||
- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully.
|
||||
- `is_initial_query` (UInt8) — Query type. Possible values:
|
||||
- `event_date` ([Date](../sql-reference/data-types/date.md)) — Query starting date.
|
||||
- `event_time` ([DateTime](../sql-reference/data-types/datetime.md)) — Query starting time.
|
||||
- `query_start_time` ([DateTime](../sql-reference/data-types/datetime.md)) — Start time of query execution.
|
||||
- `query_duration_ms` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds.
|
||||
- `read_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it's `read_rows` value, and the server-initiator of the query summarize all received and local values. The cache volumes doesn't affect this value.
|
||||
- `read_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it's `read_bytes` value, and the server-initiator of the query summarize all received and local values. The cache volumes doesn't affect this value.
|
||||
- `written_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||
- `written_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
||||
- `result_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of rows in a result of the `SELECT` query, or a number of rows in the `INSERT` query.
|
||||
- `result_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — RAM volume in bytes used to store a query result.
|
||||
- `memory_usage` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Memory consumption by the query.
|
||||
- `query` ([String](../sql-reference/data-types/string.md)) — Query string.
|
||||
- `exception` ([String](../sql-reference/data-types/string.md)) — Exception message.
|
||||
- `exception_code` ([Int32](../sql-reference/data-types/int-uint.md)) — Code of an exception.
|
||||
- `stack_trace` ([String](../sql-reference/data-types/string.md)) — [Stack trace](https://en.wikipedia.org/wiki/Stack_trace). An empty string, if the query was completed successfully.
|
||||
- `is_initial_query` ([UInt8](../sql-reference/data-types/int-uint.md)) — Query type. Possible values:
|
||||
- 1 — Query was initiated by the client.
|
||||
- 0 — Query was initiated by another query for distributed query execution.
|
||||
- `user` (String) — Name of the user who initiated the current query.
|
||||
- `query_id` (String) — ID of the query.
|
||||
- `address` (IPv6) — IP address that was used to make the query.
|
||||
- `port` (UInt16) — The client port that was used to make the query.
|
||||
- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` (String) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` (IPv6) — IP address that the parent query was launched from.
|
||||
- `initial_port` (UInt16) — The client port that was used to make the parent query.
|
||||
- `interface` (UInt8) — Interface that the query was initiated from. Possible values:
|
||||
- 0 — Query was initiated by another query as part of distributed query execution.
|
||||
- `user` ([String](../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
|
||||
- `query_id` ([String](../sql-reference/data-types/string.md)) — ID of the query.
|
||||
- `address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query.
|
||||
- `port` ([UInt16](../sql-reference/data-types/int-uint.md)) — The client port that was used to make the query.
|
||||
- `initial_user` ([String](../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` ([String](../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
|
||||
- `initial_port` ([UInt16](../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
|
||||
- `interface` ([UInt8](../sql-reference/data-types/int-uint.md)) — Interface that the query was initiated from. Possible values:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name.
|
||||
- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version.
|
||||
- `os_user` ([String](../sql-reference/data-types/string.md)) — Operating system username who runs [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` ([String](../sql-reference/data-types/string.md)) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` ([String](../sql-reference/data-types/string.md)) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name.
|
||||
- `client_revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_major` ([UInt32](../sql-reference/data-types/int-uint.md)) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_minor` ([UInt32](../sql-reference/data-types/int-uint.md)) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_patch` ([UInt32](../sql-reference/data-types/int-uint.md)) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version.
|
||||
- `http_method` (UInt8) — HTTP method that initiated the query. Possible values:
|
||||
- 0 — The query was launched from the TCP interface.
|
||||
- 1 — `GET` method was used.
|
||||
- 2 — `POST` method was used.
|
||||
- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request.
|
||||
- `quota_key` (String) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`).
|
||||
- `revision` (UInt32) — ClickHouse revision.
|
||||
- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
|
||||
- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` column.
|
||||
- `http_user_agent` ([String](../sql-reference/data-types/string.md)) — The `UserAgent` header passed in the HTTP request.
|
||||
- `quota_key` ([String](../sql-reference/data-types/string.md)) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`).
|
||||
- `revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||
- `thread_numbers` ([Array(UInt32)](../sql-reference/data-types/array.md)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` ([Array(String)](../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
|
||||
- `Settings.Names` ([Array(String)](../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `Settings.Values` ([Array(String)](../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` column.
|
||||
|
||||
Each query creates one or two rows in the `query_log` table, depending on the status of the query:
|
||||
**Example**
|
||||
|
||||
1. If the query execution is successful, two events with types 1 and 2 are created (see the `type` column).
|
||||
2. If an error occurred during query processing, two events with types 1 and 4 are created.
|
||||
3. If an error occurred before launching the query, a single event with type 3 is created.
|
||||
``` sql
|
||||
SELECT * FROM system.query_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryStart
|
||||
event_date: 2020-05-13
|
||||
event_time: 2020-05-13 14:02:28
|
||||
query_start_time: 2020-05-13 14:02:28
|
||||
query_duration_ms: 0
|
||||
read_rows: 0
|
||||
read_bytes: 0
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 0
|
||||
result_bytes: 0
|
||||
memory_usage: 0
|
||||
query: SELECT 1
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 57720
|
||||
initial_user: default
|
||||
initial_query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 57720
|
||||
interface: 1
|
||||
os_user: bayonet
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54434
|
||||
client_version_major: 20
|
||||
client_version_minor: 4
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54434
|
||||
thread_ids: []
|
||||
ProfileEvents.Names: []
|
||||
ProfileEvents.Values: []
|
||||
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
|
||||
Settings.Values: ['0','random','1','10000000000']
|
||||
|
||||
When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted.
|
||||
```
|
||||
**See Also**
|
||||
|
||||
!!! note "Note"
|
||||
The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself.
|
||||
- [system.query_thread_log](#system_tables-query_thread_log) — This table contains information about each query execution thread.
|
||||
|
||||
You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `partition_by` parameter).
|
||||
|
||||
## system.query\_thread\_log {#system_tables-query-thread-log}
|
||||
## system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
The table contains information about each query execution thread.
|
||||
|
||||
ClickHouse creates this table only if the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
ClickHouse creates this table only if the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
|
||||
To enable query logging, set the [log\_query\_threads](settings/settings.md#settings-log-query-threads) parameter to 1. For details, see the [Settings](settings/settings.md) section.
|
||||
|
||||
@ -729,14 +785,14 @@ Columns:
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column.
|
||||
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
|
||||
When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted.
|
||||
|
||||
!!! note "Note"
|
||||
The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself.
|
||||
|
||||
You can specify an arbitrary partitioning key for the `system.query_thread_log` table in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server setting (see the `partition_by` parameter).
|
||||
You can specify an arbitrary partitioning key for the `system.query_thread_log` table in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server setting (see the `partition_by` parameter).
|
||||
|
||||
## system.trace\_log {#system_tables-trace_log}
|
||||
|
||||
|
@ -116,7 +116,7 @@ Check:
|
||||
Check:
|
||||
|
||||
- The [tcp\_port\_secure](server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) setting.
|
||||
- Settings for [SSL sertificates](server-configuration-parameters/settings.md#server_configuration_parameters-openssl).
|
||||
- Settings for [SSL certificates](server-configuration-parameters/settings.md#server_configuration_parameters-openssl).
|
||||
|
||||
Use proper parameters while connecting. For example, use the `port_secure` parameter with `clickhouse_client`.
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 52
|
||||
toc_priority: 53
|
||||
toc_title: AggregateFunction
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 51
|
||||
toc_priority: 52
|
||||
toc_title: Array(T)
|
||||
---
|
||||
|
||||
|
59
docs/en/sql-reference/data-types/lowcardinality.md
Normal file
59
docs/en/sql-reference/data-types/lowcardinality.md
Normal file
@ -0,0 +1,59 @@
|
||||
---
|
||||
toc_priority: 51
|
||||
toc_title: LowCardinality
|
||||
---
|
||||
|
||||
# LowCardinality Data Type {#lowcardinality-data-type}
|
||||
|
||||
Changes the internal representation of other data types to be dictionary-encoded.
|
||||
|
||||
## Syntax {#lowcardinality-syntax}
|
||||
|
||||
```sql
|
||||
LowCardinality(data_type)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `data_type` — [String](string.md), [FixedString](fixedstring.md), [Date](date.md), [DateTime](datetime.md), and numbers excepting [Decimal](decimal.md). `LowCardinality` is not efficient for some data types, see the [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types) setting description.
|
||||
|
||||
## Description {#lowcardinality-dscr}
|
||||
|
||||
`LowCardinality` is a superstructure that changes a data storage method and rules of data processing. ClickHouse applies [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) to `LowCardinality`-columns. Operating with dictionary encoded data significantly increases performance of [SELECT](../statements/select/index.md) queries for many applications.
|
||||
|
||||
The efficiency of using `LowCarditality` data type depends on data diversity. If a dictionary contains less than 10,000 distinct values, then ClickHouse mostly shows higher efficiency of data reading and storing. If a dictionary contains more than 100,000 distinct values, then ClickHouse can perform worse in comparison with using ordinary data types.
|
||||
|
||||
Consider using `LowCardinality` instead of [Enum](enum.md) when working with strings. `LowCardinality` provides more flexibility in use and often reveals the same or higher efficiency.
|
||||
|
||||
## Example
|
||||
|
||||
Create a table with a `LowCardinality`-column:
|
||||
|
||||
```sql
|
||||
CREATE TABLE lc_t
|
||||
(
|
||||
`id` UInt16,
|
||||
`strings` LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY id
|
||||
```
|
||||
|
||||
## Related Settings and Functions
|
||||
|
||||
Settings:
|
||||
|
||||
- [low_cardinality_max_dictionary_size](../../operations/settings/settings.md#low_cardinality_max_dictionary_size)
|
||||
- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part)
|
||||
- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format)
|
||||
- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types)
|
||||
|
||||
Functions:
|
||||
|
||||
- [toLowCardinality](../functions/type-conversion-functions.md#tolowcardinality)
|
||||
|
||||
## See Also
|
||||
|
||||
- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality).
|
||||
- [Reducing Clickhouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
|
||||
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 54
|
||||
toc_priority: 55
|
||||
toc_title: Nullable
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 53
|
||||
toc_priority: 54
|
||||
toc_title: Tuple(T1, T2, ...)
|
||||
---
|
||||
|
||||
|
@ -516,7 +516,7 @@ Result:
|
||||
|
||||
**See Also**
|
||||
|
||||
- \[ISO 8601 announcement by @xkcd\](https://xkcd.com/1179/)
|
||||
- [ISO 8601 announcement by @xkcd](https://xkcd.com/1179/)
|
||||
- [RFC 1123](https://tools.ietf.org/html/rfc1123)
|
||||
- [toDate](#todate)
|
||||
- [toDateTime](#todatetime)
|
||||
@ -529,4 +529,129 @@ Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it r
|
||||
|
||||
Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed.
|
||||
|
||||
## toLowCardinality {#tolowcardinality}
|
||||
|
||||
Converts input parameter to the [LowCardianlity](../data-types/lowcardinality.md) version of same data type.
|
||||
|
||||
To convert data from the `LowCardinality` data type use the [CAST](#type_conversion_function-cast) function. For example, `CAST(x as String)`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toLowCardinality(expr)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `expr` — [Expression](../syntax.md#syntax-expressions) resulting in one of the [supported data types](../data-types/index.md#data_types).
|
||||
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Result of `expr`.
|
||||
|
||||
Type: `LowCardinality(expr_result_type)`
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT toLowCardinality('1')
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─toLowCardinality('1')─┐
|
||||
│ 1 │
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## toUnixTimestamp64Milli
|
||||
## toUnixTimestamp64Micro
|
||||
## toUnixTimestamp64Nano
|
||||
|
||||
Converts a `DateTime64` to a `Int64` value with fixed sub-second precision. Input value is scaled up or down appropriately depending on it precision. Please note that output value is a timestamp in UTC, not in timezone of `DateTime64`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toUnixTimestamp64Milli(value)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — DateTime64 value with any precision.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `value` converted to the `Int64` data type.
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64
|
||||
SELECT toUnixTimestamp64Milli(dt64)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toUnixTimestamp64Milli(dt64)─┐
|
||||
│ 1568650812345 │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64
|
||||
SELECT toUnixTimestamp64Nano(dt64)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toUnixTimestamp64Nano(dt64)─┐
|
||||
│ 1568650812345678000 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## fromUnixTimestamp64Milli
|
||||
## fromUnixTimestamp64Micro
|
||||
## fromUnixTimestamp64Nano
|
||||
|
||||
Converts an `Int64` to a `DateTime64` value with fixed sub-second precision and optional timezone. Input value is scaled up or down appropriately depending on it's precision. Please note that input value is treated as UTC timestamp, not timestamp at given (or implicit) timezone.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
fromUnixTimestamp64Milli(value [, ti])
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — `Int64` value with any precision.
|
||||
- `timezone` — `String` (optional) timezone name of the result.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `value` converted to the `DateTime64` data type.
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
WITH CAST(1234567891011, 'Int64') AS i64
|
||||
SELECT fromUnixTimestamp64Milli(i64, 'UTC')
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─fromUnixTimestamp64Milli(i64, 'UTC')─┐
|
||||
│ 2009-02-13 23:31:31.011 │
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) <!--hide-->
|
||||
|
@ -117,6 +117,10 @@ Returns the part of the domain that includes top-level subdomains up to the “f
|
||||
|
||||
For example, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`.
|
||||
|
||||
### port(URL[, default_port = 0]) {#port}
|
||||
|
||||
Returns the port or `default_port` if there is no port in the URL (or in case of validation error).
|
||||
|
||||
### path {#path}
|
||||
|
||||
Returns the path. Example: `/top/news.html` The path does not include the query string.
|
||||
|
@ -44,6 +44,8 @@ Modifies how matching by "join keys" is performed
|
||||
!!! note "Note"
|
||||
The default strictness value can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
|
||||
|
||||
Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
||||
|
||||
|
||||
### ASOF JOIN Usage
|
||||
|
||||
|
@ -574,11 +574,11 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat
|
||||
</query_log>
|
||||
```
|
||||
|
||||
## query\_thread\_log {#server_configuration_parameters-query-thread-log}
|
||||
## query\_thread\_log {#server_configuration_parameters-query_thread_log}
|
||||
|
||||
Настройка логирования потоков выполнения запросов, принятых с настройкой [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads).
|
||||
|
||||
Запросы логируются не в отдельный файл, а в системную таблицу [system.query\_thread\_log](../../operations/server-configuration-parameters/settings.md#system_tables-query-thread-log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже).
|
||||
Запросы логируются не в отдельный файл, а в системную таблицу [system.query\_thread\_log](../../operations/server-configuration-parameters/settings.md#system_tables-query_thread_log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже).
|
||||
|
||||
При настройке логирования используются следующие параметры:
|
||||
|
||||
|
@ -536,7 +536,7 @@ log_queries=1
|
||||
|
||||
Установка логирования информации о потоках выполнения запроса.
|
||||
|
||||
Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log).
|
||||
Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log).
|
||||
|
||||
Пример:
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
# Системные таблицы {#sistemnye-tablitsy}
|
||||
# Системные таблицы {#system-tables}
|
||||
|
||||
|
||||
## Введение {#system-tables-introduction}
|
||||
|
||||
Системные таблицы используются для реализации части функциональности системы, а также предоставляют доступ к информации о работе системы.
|
||||
Вы не можете удалить системную таблицу (хотя можете сделать DETACH).
|
||||
@ -544,182 +547,156 @@ CurrentMetric_ReplicatedChecks: 0
|
||||
- `source_file` (LowCardinality(String)) — Исходный файл, из которого была сделана запись.
|
||||
- `source_line` (UInt64) — Исходная строка, из которой была сделана запись.
|
||||
|
||||
## system.query\_log {#system_tables-query_log}
|
||||
## system.query_log {#system_tables-query_log}
|
||||
|
||||
Содержит информацию о выполнении запросов. Для каждого запроса вы можете увидеть время начала обработки, продолжительность обработки, сообщения об ошибках и другую информацию.
|
||||
Содержит информацию о выполняемых запросах, например, время начала обработки, продолжительность обработки, сообщения об ошибках.
|
||||
|
||||
!!! note "Внимание"
|
||||
Таблица не содержит входных данных для запросов `INSERT`.
|
||||
|
||||
ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы.
|
||||
Настойки логгирования можно изменить в секции серверной конфигурации [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log).
|
||||
|
||||
Чтобы включить логирование, задайте значение параметра [log\_queries](settings/settings.md#settings-log-queries) равным 1. Подробности смотрите в разделе [Настройки](settings/settings.md#settings).
|
||||
Можно отключить логгирование настройкой [log_queries = 0](settings/settings.md#settings-log-queries). По-возможности, не отключайте логгирование, поскольку информация из таблицы важна при решении проблем.
|
||||
|
||||
Период сброса логов в таблицу задаётся параметром `flush_interval_milliseconds` в конфигурационной секции [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос [SYSTEM FLUSH LOGS](../sql-reference/statements/system.md#query_language-system-flush_logs).
|
||||
|
||||
ClickHouse не удаляет логи из таблица автоматически. Смотрите [Введение](#system-tables-introduction).
|
||||
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) (параметр `partition_by`).
|
||||
|
||||
|
||||
|
||||
Если таблицу удалить вручную, она создается заново автоматически «на лету». При этом все логи на момент удаления таблицы будут убраны.
|
||||
|
||||
Таблица `system.query_log` содержит информацию о двух видах запросов:
|
||||
|
||||
1. Первоначальные запросы, которые были выполнены непосредственно клиентом.
|
||||
2. Дочерние запросы, инициированные другими запросами (для выполнения распределенных запросов). Для дочерних запросов информация о первоначальном запросе содержится в столбцах `initial_*`.
|
||||
|
||||
В зависимости от статуса (столбец `type`) каждый запрос создаёт одну или две строки в таблице `query_log`:
|
||||
|
||||
1. Если запрос выполнен успешно, создаются два события типа `QueryStart` и `QueryFinish`.
|
||||
2. Если во время обработки запроса возникла ошибка, создаются два события с типами `QueryStart` и `ExceptionWhileProcessing`.
|
||||
3. Если ошибка произошла ещё до запуска запроса, создается одно событие с типом `ExceptionBeforeStart`.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `type` (`Enum8`) — тип события, произошедшего при выполнении запроса. Значения:
|
||||
- `type` ([Enum8](../sql-reference/data-types/enum.md)) — тип события, произошедшего при выполнении запроса. Значения:
|
||||
- `'QueryStart' = 1` — успешное начало выполнения запроса.
|
||||
- `'QueryFinish' = 2` — успешное завершение выполнения запроса.
|
||||
- `'ExceptionBeforeStart' = 3` — исключение перед началом обработки запроса.
|
||||
- `'ExceptionWhileProcessing' = 4` — исключение во время обработки запроса.
|
||||
- `event_date` (Date) — дата начала запроса.
|
||||
- `event_time` (DateTime) — время начала запроса.
|
||||
- `query_start_time` (DateTime) — время начала обработки запроса.
|
||||
- `query_duration_ms` (UInt64) — длительность обработки запроса.
|
||||
- `read_rows` (UInt64) — количество прочитанных строк.
|
||||
- `read_bytes` (UInt64) — количество прочитанных байтов.
|
||||
- `written_rows` (UInt64) — количество записанных строк для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `written_bytes` (UInt64) — объём записанных данных в байтах для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `result_rows` (UInt64) — количество строк в результате.
|
||||
- `result_bytes` (UInt64) — объём результата в байтах.
|
||||
- `memory_usage` (UInt64) — потребление RAM запросом.
|
||||
- `query` (String) — текст запроса.
|
||||
- `exception` (String) — сообщение исключения, если запрос завершился по исключению.
|
||||
- `stack_trace` (String) — трассировка (список функций, последовательно вызванных перед ошибкой). Пустая строка, если запрос успешно завершен.
|
||||
- `is_initial_query` (UInt8) — вид запроса. Возможные значения:
|
||||
- `event_date` ([Date](../sql-reference/data-types/date.md)) — дата начала запроса.
|
||||
- `event_time` ([DateTime](../sql-reference/data-types/datetime.md)) — время начала запроса.
|
||||
- `query_start_time` ([DateTime](../sql-reference/data-types/datetime.md)) — время начала обработки запроса.
|
||||
- `query_duration_ms` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — длительность выполнения запроса в миллисекундах.
|
||||
- `read_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Общее количество строк, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_rows` включает в себя общее количество строк, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_rows`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся.
|
||||
- `read_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Общее количество байтов, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_bytes` включает в себя общее количество байтов, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_bytes`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся.
|
||||
- `written_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — количество записанных строк для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `written_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — объём записанных данных в байтах для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `result_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — количество строк в результате запроса `SELECT` или количество строк в запросе `INSERT`.
|
||||
- `result_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — объём RAM в байтах, использованный для хранения результата запроса.
|
||||
- `memory_usage` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — потребление RAM запросом.
|
||||
- `query` ([String](../sql-reference/data-types/string.md)) — текст запроса.
|
||||
- `exception` ([String](../sql-reference/data-types/string.md)) — сообщение исключения, если запрос завершился по исключению.
|
||||
- `exception_code` ([Int32](../sql-reference/data-types/int-uint.md)) — код исключения.
|
||||
- `stack_trace` ([String](../sql-reference/data-types/string.md)) — [stack trace](https://en.wikipedia.org/wiki/Stack_trace). Пустая строка, если запрос успешно завершен.
|
||||
- `is_initial_query` ([UInt8](../sql-reference/data-types/int-uint.md)) — вид запроса. Возможные значения:
|
||||
- 1 — запрос был инициирован клиентом.
|
||||
- 0 — запрос был инициирован другим запросом при распределенном запросе.
|
||||
- `user` (String) — пользователь, запустивший текущий запрос.
|
||||
- `query_id` (String) — ID запроса.
|
||||
- `address` (IPv6) — IP адрес, с которого пришел запрос.
|
||||
- `port` (UInt16) — порт, с которого клиент сделал запрос
|
||||
- `initial_user` (String) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
|
||||
- `initial_query_id` (String) — ID родительского запроса.
|
||||
- `initial_address` (IPv6) — IP адрес, с которого пришел родительский запрос.
|
||||
- `initial_port` (UInt16) — порт, с которого клиент сделал родительский запрос.
|
||||
- `interface` (UInt8) — интерфейс, с которого ушёл запрос. Возможные значения:
|
||||
- 0 — запрос был инициирован другим запросом при выполнении распределенного запроса.
|
||||
- `user` ([String](../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос.
|
||||
- `query_id` ([String](../sql-reference/data-types/string.md)) — ID запроса.
|
||||
- `address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос.
|
||||
- `port` ([UInt16](../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал запрос
|
||||
- `initial_user` ([String](../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
|
||||
- `initial_query_id` ([String](../sql-reference/data-types/string.md)) — ID родительского запроса.
|
||||
- `initial_address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
|
||||
- `initial_port` ([UInt16](../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал родительский запрос.
|
||||
- `interface` ([UInt8](../sql-reference/data-types/int-uint.md)) — интерфейс, с которого ушёл запрос. Возможные значения:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` (String) — имя пользователя в OS, который запустил [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` (String) — имя сервера, с которого присоединился [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_name` (String) — [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_revision` (UInt32) — ревизия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_major` (UInt32) — старшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_minor` (UInt32) — младшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_patch` (UInt32) — патч [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `http_method` (UInt8) — HTTP метод, инициировавший запрос. Возможные значения:
|
||||
- `os_user` ([String](../sql-reference/data-types/string.md)) — имя пользователя операционной системы, который запустил [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` ([String](../sql-reference/data-types/string.md)) — имя сервера, с которого присоединился [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_name` ([String](../sql-reference/data-types/string.md)) — [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — ревизия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_major` ([UInt32](../sql-reference/data-types/int-uint.md)) — старшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_minor` ([UInt32](../sql-reference/data-types/int-uint.md)) — младшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_patch` ([UInt32](../sql-reference/data-types/int-uint.md)) — патч [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `http_method` ([UInt8](../sql-reference/data-types/int-uint.md)) — HTTP метод, инициировавший запрос. Возможные значения:
|
||||
- 0 — запрос запущен с интерфейса TCP.
|
||||
- 1 — `GET`.
|
||||
- 2 — `POST`.
|
||||
- `http_user_agent` (String) — HTTP заголовок `UserAgent`.
|
||||
- `quota_key` (String) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`).
|
||||
- `revision` (UInt32) — ревизия ClickHouse.
|
||||
- `thread_numbers` (Array(UInt32)) — количество потоков, участвующих в обработке запросов.
|
||||
- `ProfileEvents.Names` (Array(String)) — Счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(\#system\_tables-events
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — метрики, перечисленные в столбце `ProfileEvents.Names`.
|
||||
- `Settings.Names` (Array(String)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1.
|
||||
- `Settings.Values` (Array(String)) — Значения настроек, которые перечислены в столбце `Settings.Names`.
|
||||
- `http_user_agent` ([String](../sql-reference/data-types/string.md)) — HTTP заголовок `UserAgent`.
|
||||
- `quota_key` ([String](../sql-reference/data-types/string.md)) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`).
|
||||
- `revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse.
|
||||
- `thread_numbers` ([Array(UInt32)](../sql-reference/data-types/array.md)) — количество потоков, участвующих в обработке запросов.
|
||||
- `ProfileEvents.Names` ([Array(String)](../sql-reference/data-types/array.md)) — Счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(\#system\_tables-events
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../sql-reference/data-types/array.md)) — метрики, перечисленные в столбце `ProfileEvents.Names`.
|
||||
- `Settings.Names` ([Array(String)](../sql-reference/data-types/array.md)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1.
|
||||
- `Settings.Values` ([Array(String)](../sql-reference/data-types/array.md)) — Значения настроек, которые перечислены в столбце `Settings.Names`.
|
||||
|
||||
Каждый запрос создаёт одну или две строки в таблице `query_log`, в зависимости от статуса запроса:
|
||||
**Пример**
|
||||
|
||||
1. Если запрос выполнен успешно, создаются два события типа 1 и 2 (смотрите столбец `type`).
|
||||
2. Если во время обработки запроса произошла ошибка, создаются два события с типами 1 и 4.
|
||||
3. Если ошибка произошла до запуска запроса, создается одно событие с типом 3.
|
||||
``` sql
|
||||
SELECT * FROM system.query_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`.
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryStart
|
||||
event_date: 2020-05-13
|
||||
event_time: 2020-05-13 14:02:28
|
||||
query_start_time: 2020-05-13 14:02:28
|
||||
query_duration_ms: 0
|
||||
read_rows: 0
|
||||
read_bytes: 0
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 0
|
||||
result_bytes: 0
|
||||
memory_usage: 0
|
||||
query: SELECT 1
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 57720
|
||||
initial_user: default
|
||||
initial_query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 57720
|
||||
interface: 1
|
||||
os_user: bayonet
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54434
|
||||
client_version_major: 20
|
||||
client_version_minor: 4
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54434
|
||||
thread_ids: []
|
||||
ProfileEvents.Names: []
|
||||
ProfileEvents.Values: []
|
||||
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
|
||||
Settings.Values: ['0','random','1','10000000000']
|
||||
|
||||
Если таблицу удалить вручную, она пересоздастся автоматически «на лету». При этом все логи на момент удаления таблицы будут удалены.
|
||||
```
|
||||
**Смотрите также**
|
||||
|
||||
!!! note "Примечание"
|
||||
Срок хранения логов не ограничен. Логи не удаляются из таблицы автоматически. Вам необходимо самостоятельно организовать удаление устаревших логов.
|
||||
- [system.query_thread_log](#system_tables-query_thread_log) — в этой таблице содержится информация о цепочке каждого выполненного запроса.
|
||||
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) (параметр `partition_by`).
|
||||
|
||||
## system.query\_log {#system_tables-query_log}
|
||||
|
||||
Contains information about execution of queries. For each query, you can see processing start time, duration of processing, error messages and other information.
|
||||
|
||||
!!! note "Note"
|
||||
The table doesn’t contain input data for `INSERT` queries.
|
||||
|
||||
ClickHouse creates this table only if the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
|
||||
To enable query logging, set the [log\_queries](settings/settings.md#settings-log-queries) parameter to 1. For details, see the [Settings](settings/settings.md) section.
|
||||
|
||||
The `system.query_log` table registers two kinds of queries:
|
||||
|
||||
1. Initial queries that were run directly by the client.
|
||||
2. Child queries that were initiated by other queries (for distributed query execution). For these types of queries, information about the parent queries is shown in the `initial_*` columns.
|
||||
|
||||
Columns:
|
||||
|
||||
- `type` (`Enum8`) — Type of event that occurred when executing the query. Values:
|
||||
- `'QueryStart' = 1` — Successful start of query execution.
|
||||
- `'QueryFinish' = 2` — Successful end of query execution.
|
||||
- `'ExceptionBeforeStart' = 3` — Exception before the start of query execution.
|
||||
- `'ExceptionWhileProcessing' = 4` — Exception during the query execution.
|
||||
- `event_date` (Date) — Query starting date.
|
||||
- `event_time` (DateTime) — Query starting time.
|
||||
- `query_start_time` (DateTime) — Start time of query execution.
|
||||
- `query_duration_ms` (UInt64) — Duration of query execution.
|
||||
- `read_rows` (UInt64) — Number of read rows.
|
||||
- `read_bytes` (UInt64) — Number of read bytes.
|
||||
- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||
- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
||||
- `result_rows` (UInt64) — Number of rows in the result.
|
||||
- `result_bytes` (UInt64) — Number of bytes in the result.
|
||||
- `memory_usage` (UInt64) — Memory consumption by the query.
|
||||
- `query` (String) — Query string.
|
||||
- `exception` (String) — Exception message.
|
||||
- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully.
|
||||
- `is_initial_query` (UInt8) — Query type. Possible values:
|
||||
- 1 — Query was initiated by the client.
|
||||
- 0 — Query was initiated by another query for distributed query execution.
|
||||
- `user` (String) — Name of the user who initiated the current query.
|
||||
- `query_id` (String) — ID of the query.
|
||||
- `address` (IPv6) — IP address that was used to make the query.
|
||||
- `port` (UInt16) — The client port that was used to make the query.
|
||||
- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` (String) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` (IPv6) — IP address that the parent query was launched from.
|
||||
- `initial_port` (UInt16) — The client port that was used to make the parent query.
|
||||
- `interface` (UInt8) — Interface that the query was initiated from. Possible values:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name.
|
||||
- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version.
|
||||
- `http_method` (UInt8) — HTTP method that initiated the query. Possible values:
|
||||
- 0 — The query was launched from the TCP interface.
|
||||
- 1 — `GET` method was used.
|
||||
- 2 — `POST` method was used.
|
||||
- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request.
|
||||
- `quota_key` (String) — The «quota key» specified in the [quotas](quotas.md) setting (see `keyed`).
|
||||
- `revision` (UInt32) — ClickHouse revision.
|
||||
- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
|
||||
- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` column.
|
||||
|
||||
Each query creates one or two rows in the `query_log` table, depending on the status of the query:
|
||||
|
||||
1. If the query execution is successful, two events with types 1 and 2 are created (see the `type` column).
|
||||
2. If an error occurred during query processing, two events with types 1 and 4 are created.
|
||||
3. If an error occurred before launching the query, a single event with type 3 is created.
|
||||
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
|
||||
When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted.
|
||||
|
||||
!!! note "Note"
|
||||
The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself.
|
||||
|
||||
You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `partition_by` parameter).
|
||||
\#\# system.query\_thread\_log {\#system\_tables-query-thread-log}
|
||||
## system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
Содержит информацию о каждом потоке выполняемых запросов.
|
||||
|
||||
ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы.
|
||||
ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы.
|
||||
|
||||
Чтобы включить логирование, задайте значение параметра [log\_query\_threads](settings/settings.md#settings-log-query-threads) равным 1. Подробности смотрите в разделе [Настройки](settings/settings.md#settings).
|
||||
|
||||
@ -770,16 +747,16 @@ ClickHouse создаёт таблицу только в том случае, к
|
||||
- `ProfileEvents.Names` (Array(String)) — Счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(\#system\_tables-events
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — метрики для данного потока, перечисленные в столбце `ProfileEvents.Names`.
|
||||
|
||||
По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`.
|
||||
По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`.
|
||||
|
||||
Если таблицу удалить вручную, она пересоздастся автоматически «на лету». При этом все логи на момент удаления таблицы будут удалены.
|
||||
|
||||
!!! note "Примечание"
|
||||
Срок хранения логов не ограничен. Логи не удаляются из таблицы автоматически. Вам необходимо самостоятельно организовать удаление устаревших логов.
|
||||
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) (параметр `partition_by`).
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) (параметр `partition_by`).
|
||||
|
||||
## system.query_thread_log {#system_tables-query-thread-log}
|
||||
## system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
Содержит информацию о каждом потоке исполнения запроса.
|
||||
|
||||
|
@ -73,10 +73,10 @@ Upd. Включено для системных таблиц.
|
||||
Q1. Закоммичено, но есть технический долг, который исправляется сейчас.
|
||||
Готово. Нет, не готово - там всё ещё технический долг.
|
||||
|
||||
### 1.9. Использование TTL для прореживания данных {#ispolzovanie-ttl-dlia-prorezhivaniia-dannykh}
|
||||
### 1.9. + Использование TTL для прореживания данных {#ispolzovanie-ttl-dlia-prorezhivaniia-dannykh}
|
||||
|
||||
Будет делать Сорокин Николай, ВШЭ и Яндекс.
|
||||
Upd. Есть pull request.
|
||||
Upd. Есть pull request. Upd. Сделано.
|
||||
|
||||
Сейчас пользователь может задать в таблице выражение, которое определяет, сколько времени хранятся данные. Обычно это выражение задаётся относительно значения столбца с датой - например: удалять данные через три месяца. https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/\#table_engine-mergetree-ttl
|
||||
|
||||
@ -124,7 +124,7 @@ Q2.
|
||||
Upd. Олег будет делать только часть про HDFS.
|
||||
Upd. Реализация поверх S3 является рабочей на уровне PoC.
|
||||
|
||||
### 1.13. Ускорение запросов с FINAL {#uskorenie-zaprosov-s-final}
|
||||
### 1.13. + Ускорение запросов с FINAL {#uskorenie-zaprosov-s-final}
|
||||
|
||||
Требует 2.1. Делает [Николай Кочетов](https://github.com/KochetovNicolai). Нужно для Яндекс.Метрики. Q2.
|
||||
Upd: PR [#10463](https://github.com/ClickHouse/ClickHouse/pull/10463)
|
||||
@ -203,10 +203,11 @@ Upd. SharedContext вынесен из Context.
|
||||
|
||||
Upd. В очереди. Иван Лежанкин.
|
||||
|
||||
### 2.9. Логгировние в format-стиле {#loggirovnie-v-format-stile}
|
||||
### 2.9. + Логгировние в format-стиле {#loggirovnie-v-format-stile}
|
||||
|
||||
Делает [Иван Лежанкин](https://github.com/abyss7). Низкий приоритет.
|
||||
[\#6049](https://github.com/ClickHouse/ClickHouse/issues/6049#issuecomment-570836998)
|
||||
[#6049](https://github.com/ClickHouse/ClickHouse/issues/6049#issuecomment-570836998)
|
||||
|
||||
Сделано.
|
||||
|
||||
### 2.10. Запрашивать у таблиц не столбцы, а срезы {#zaprashivat-u-tablits-ne-stolbtsy-a-srezy}
|
||||
|
||||
@ -282,24 +283,20 @@ Upd. Сейчас обсуждается, как сделать другую з
|
||||
|
||||
### 4.3. Ограничение числа одновременных скачиваний с реплик {#ogranichenie-chisla-odnovremennykh-skachivanii-s-replik}
|
||||
|
||||
Дмитрий Григорьев, ВШЭ.
|
||||
Изначально делал Олег Алексеенков, но пока решение не готово, хотя там не так уж много доделывать.
|
||||
|
||||
### 4.4. Ограничение сетевой полосы при репликации {#ogranichenie-setevoi-polosy-pri-replikatsii}
|
||||
|
||||
Дмитрий Григорьев, ВШЭ. Нужно для Метрики.
|
||||
Нужно для Метрики.
|
||||
|
||||
### 4.5. Возможность продолжить передачу куска данных при репликации после сбоя {#vozmozhnost-prodolzhit-peredachu-kuska-dannykh-pri-replikatsii-posle-sboia}
|
||||
|
||||
Дмитрий Григорьев, ВШЭ.
|
||||
|
||||
### 4.6. p2p передача для GLOBAL подзапросов {#p2p-peredacha-dlia-global-podzaprosov}
|
||||
|
||||
### 4.7. Ленивая загрузка множеств для IN и JOIN с помощью k/v запросов {#lenivaia-zagruzka-mnozhestv-dlia-in-i-join-s-pomoshchiu-kv-zaprosov}
|
||||
|
||||
### 4.8. Разделить background pool для fetch и merge {#razdelit-background-pool-dlia-fetch-i-merge}
|
||||
|
||||
Дмитрий Григорьев, ВШЭ.
|
||||
В очереди. Исправить проблему, что восстанавливающаяся реплика перестаёт мержить. Частично компенсируется 4.3.
|
||||
|
||||
|
||||
@ -329,6 +326,7 @@ Upd. Сделано. Эффективность работы под вопрос
|
||||
Метрика, БК, Маркет, Altinity уже используют более свежие версии чем LTS.
|
||||
Upd. Появилась вторая версия LTS - 20.3.
|
||||
|
||||
|
||||
## 6. Инструментирование {#instrumentirovanie}
|
||||
|
||||
### 6.1. + Исправления сэмплирующего профайлера запросов {#ispravleniia-sempliruiushchego-profailera-zaprosov}
|
||||
@ -425,11 +423,11 @@ Upd. Рассмотрели все проверки подряд.
|
||||
|
||||
UBSan включен в функциональных тестах, но не включен в интеграционных тестах. Требует 7.7.
|
||||
|
||||
### 7.11. Включение \*San в unit тестах {#vkliuchenie-san-v-unit-testakh}
|
||||
### 7.11. + Включение \*San в unit тестах {#vkliuchenie-san-v-unit-testakh}
|
||||
|
||||
У нас мало unit тестов по сравнению с функциональными тестами и их использование не обязательно. Но они всё-равно важны и нет причин не запускать их под всеми видами sanitizers.
|
||||
|
||||
Илья Яцишин.
|
||||
Илья Яцишин. Сделано.
|
||||
|
||||
### 7.12. Показывать тестовое покрытие нового кода в PR {#pokazyvat-testovoe-pokrytie-novogo-koda-v-pr}
|
||||
|
||||
@ -528,6 +526,8 @@ Upd. Есть сборки, [пример](https://clickhouse-builds.s3.yandex.n
|
||||
|
||||
Дарья Петрова, УрФУ.
|
||||
|
||||
Рабочий прототип: https://pulls-dashboard-demo.herokuapp.com/dashboard/ClickHouse/ClickHouse
|
||||
|
||||
Над ClickHouse одновременно работает большое количество разработчиков, которые оформляют свои изменения в виде pull requests. Когда непомерженных pull requests много, то возникает сложность с организацией работы - непонятно, на какой pull request смотреть в первую очередь.
|
||||
|
||||
Предлагается реализовать простое одностраничное веб-приложение, в котором отображается список pull requests со следующей информацией:
|
||||
@ -627,6 +627,7 @@ Upd. Готово (все директории кроме contrib).
|
||||
### 7.32. Обфускация продакшен запросов {#obfuskatsiia-prodakshen-zaprosov}
|
||||
|
||||
Роман Ильговский. Нужно для Яндекс.Метрики.
|
||||
Есть pull request, почти готово: https://github.com/ClickHouse/ClickHouse/pull/10973
|
||||
|
||||
Имея SQL запрос, требуется вывести структуру таблиц, на которых этот запрос будет выполнен, и заполнить эти таблицы случайными данными, такими, что результат этого запроса зависит от выбора подмножества данных.
|
||||
|
||||
@ -1397,11 +1398,11 @@ Constraints позволяют задать выражение, истиннос
|
||||
Василий Морозов, Арслан Гумеров, Альберт Кидрачев, ВШЭ.
|
||||
В прошлом году задачу начинал делать другой человек, но не добился достаточного прогресса.
|
||||
|
||||
1. Оптимизация top sort.
|
||||
+ 1. Оптимизация top sort.
|
||||
|
||||
В ClickHouse используется неоптимальный вариант top sort. Суть его в том, что из каждого блока достаётся top N записей, а затем, все блоки мержатся. Но доставание top N записей у каждого следующего блока бессмысленно, если мы знаем, что из них в глобальный top N войдёт меньше. Конечно нужно реализовать вариацию на тему priority queue (heap) с быстрым пропуском целых блоков, если ни одна строка не попадёт в накопленный top.
|
||||
|
||||
2. Рекурсивный вариант сортировки по кортежам.
|
||||
+ 2. Рекурсивный вариант сортировки по кортежам.
|
||||
|
||||
Для сортировки по кортежам используется обычная сортировка с компаратором, который в цикле по элементам кортежа делает виртуальные вызовы `IColumn::compareAt`. Это неоптимально - как из-за короткого цикла по неизвестному в compile-time количеству элементов, так и из-за виртуальных вызовов. Чтобы обойтись без виртуальных вызовов, есть метод `IColumn::getPermutation`. Он используется в случае сортировки по одному столбцу. Есть вариант, что в случае сортировки по кортежу, что-то похожее тоже можно применить… например, сделать метод `updatePermutation`, принимающий аргументы offset и limit, и допереставляющий перестановку в диапазоне значений, в которых предыдущий столбец имел равные значения.
|
||||
|
||||
|
@ -21,7 +21,7 @@ mkdocs-htmlproofer-plugin==0.0.3
|
||||
mkdocs-macros-plugin==0.4.9
|
||||
nltk==3.5
|
||||
nose==1.3.7
|
||||
protobuf==3.12.1
|
||||
protobuf==3.12.2
|
||||
numpy==1.18.4
|
||||
Pygments==2.5.2
|
||||
pymdown-extensions==7.1
|
||||
|
@ -1,101 +1,146 @@
|
||||
# 访问权限 {#access-rights}
|
||||
---
|
||||
toc_priority: 48
|
||||
toc_title: "访问权限和账户管理"
|
||||
---
|
||||
|
||||
用户和访问权限在用户配置中设置。 这通常是 `users.xml`.
|
||||
# 访问权限和账户管理 {#access-rights}
|
||||
ClickHouse支持基于[RBAC](https://en.wikipedia.org/wiki/Role-based_access_control)的访问控制管理。
|
||||
|
||||
用户被记录在 `users` 科。 这里是一个片段 `users.xml` 文件:
|
||||
ClickHouse权限实体包括:
|
||||
- [用户账户](#user-account-management)
|
||||
- [角色](#role-management)
|
||||
- [行策略](#row-policy-management)
|
||||
- [设置描述](#settings-profiles-management)
|
||||
- [配额](#quotas-management)
|
||||
|
||||
``` xml
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If the user name is not specified, the 'default' user is used. -->
|
||||
<default>
|
||||
<!-- Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
你可以通过如下方式配置权限实体:
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
- 通过SQL驱动的工作流方式.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
你需要[开启](#enabling-access-control)这个功能.
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
-->
|
||||
<password></password>
|
||||
- 服务端[配置文件](configuration-files.md) `users.xml` 和 `config.xml`.
|
||||
|
||||
<!-- A list of networks that access is allowed from.
|
||||
Each list item has one of the following forms:
|
||||
<ip> The IP address or subnet mask. For example: 198.51.100.0/24 or 2001:DB8::/32.
|
||||
<host> Host name. For example: example01. A DNS query is made for verification, and all addresses obtained are compared with the address of the customer.
|
||||
<host_regexp> Regular expression for host names. For example, ^example\d\d-\d\d-\d\.yandex\.ru$
|
||||
To check it, a DNS PTR request is made for the client's address and a regular expression is applied to the result.
|
||||
Then another DNS query is made for the result of the PTR query, and all received address are compared to the client address.
|
||||
We strongly recommend that the regex ends with \.yandex\.ru$.
|
||||
我们建议你使用SQL工作流的方式。当然配置的方式也可以同时起作用, 所以如果你正在用服务端配置的方式来管理权限和账户,你可以平滑的切换到SQL驱动的工作流方式。
|
||||
|
||||
If you are installing ClickHouse yourself, specify here:
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
-->
|
||||
<networks incl="networks" />
|
||||
!!! note "警告"
|
||||
你无法同时使用两个配置的方式来管理同一个权限实体。
|
||||
|
||||
<!-- Settings profile for the user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for the user. -->
|
||||
<quota>default</quota>
|
||||
</default>
|
||||
## 用法 {#access-control-usage}
|
||||
|
||||
<!-- For requests from the Yandex.Metrica user interface via the API for data on specific counters. -->
|
||||
<web>
|
||||
<password></password>
|
||||
<networks incl="networks" />
|
||||
<profile>web</profile>
|
||||
<quota>default</quota>
|
||||
<allow_databases>
|
||||
<database>test</database>
|
||||
</allow_databases>
|
||||
</web>
|
||||
```
|
||||
默认ClickHouse提供了一个 `default` 账号,这个账号有所有的权限,但是不能使用SQL驱动方式的访问权限和账户管理。`default`主要用在用户名还未设置的情况,比如从客户端登录或者执行分布式查询。在分布式查询中如果服务端或者集群没有指定[用户名密码](../engines/table-engines/special/distributed.md)那默认的账户就会被使用。
|
||||
|
||||
您可以看到两个用户的声明: `default`和`web`. 我们添加了 `web` 用户分开。
|
||||
如果你刚开始使用ClickHouse,考虑如下场景:
|
||||
|
||||
该 `default` 在用户名未通过的情况下选择用户。 该 `default` 如果服务器或群集的配置没有指定分布式查询处理,则user也用于分布式查询处理 `user` 和 `password` (见上的部分 [分布](../engines/table-engines/special/distributed.md) 发动机)。
|
||||
1. 为 `default` 用户[开启SQL驱动方式的访问权限和账户管理](#enabling-access-control) .
|
||||
2. 使用 `default` 用户登录并且创建所需要的所有用户。 不要忘记创建管理员账户 (`GRANT ALL ON *.* WITH GRANT OPTION TO admin_user_account`)。
|
||||
3. [限制](settings/permissions-for-queries.md#permissions_for_queries) `default` 用户的权限并且禁用SQL驱动方式的访问权限和账户管理。
|
||||
|
||||
The user that is used for exchanging information between servers combined in a cluster must not have substantial restrictions or quotas – otherwise, distributed queries will fail.
|
||||
### 当前解决方案的特性 {#access-control-properties}
|
||||
|
||||
密码以明文(不推荐)或SHA-256形式指定。 哈希没有腌制。 在这方面,您不应将这些密码视为提供了针对潜在恶意攻击的安全性。 相反,他们是必要的保护员工。
|
||||
- 你甚至可以在数据库和表不存在的时候授予权限。
|
||||
- 如果表被删除,和这张表关联的特权不会被删除。这意味着如果你创建一张同名的表,所有的特权仍旧有效。如果想删除这张表关联的特权,你可以执行 `REVOKE ALL PRIVILEGES ON db.table FROM ALL` 查询。
|
||||
- 特权没有生命周期。
|
||||
|
||||
指定允许访问的网络列表。 在此示例中,将从单独的文件加载两个用户的网络列表 (`/etc/metrika.xml`)包含 `networks` 替代。 这里是它的一个片段:
|
||||
## 用户账户 {#user-account-management}
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
...
|
||||
<networks>
|
||||
<ip>::/64</ip>
|
||||
<ip>203.0.113.0/24</ip>
|
||||
<ip>2001:DB8::/32</ip>
|
||||
...
|
||||
</networks>
|
||||
</yandex>
|
||||
```
|
||||
用户账户是权限实体,用来授权操作ClickHouse,用户账户包含:
|
||||
|
||||
您可以直接在以下内容中定义此网络列表 `users.xml`,或在文件中 `users.d` directory (for more information, see the section «[配置文件](configuration-files.md#configuration_files)»).
|
||||
- 标识符信息。
|
||||
- [特权](../sql-reference/statements/grant.md#grant-privileges)用来定义用户可以执行的查询的范围。
|
||||
- 可以连接到ClickHouse的主机。
|
||||
- 指定或者默认的角色。
|
||||
- 用户登录的时候默认的限制设置。
|
||||
- 指定的设置描述。
|
||||
|
||||
该配置包括解释如何从任何地方打开访问的注释。
|
||||
特权可以通过[GRANT](../sql-reference/statements/grant.md)查询授权给用户或者通过[角色](#role-management)授予。如果想撤销特权,可以使用[REVOKE](../sql-reference/statements/revoke.md)查询。查询用户所有的特权,使用[SHOW GRANTS](../sql-reference/statements/show.md#show-grants-statement)语句。
|
||||
|
||||
对于在生产中使用,仅指定 `ip` 元素(IP地址及其掩码),因为使用 `host` 和 `hoost_regexp` 可能会导致额外的延迟。
|
||||
查询管理:
|
||||
|
||||
Next the user settings profile is specified (see the section «[设置配置文件](settings/settings-profiles.md)»). You can specify the default profile, `default'`. 配置文件可以有任何名称。 您可以为不同的用户指定相同的配置文件。 您可以在设置配置文件中编写的最重要的事情是 `readonly=1`,这确保只读访问。
|
||||
Then specify the quota to be used (see the section «[配额](quotas.md#quotas)»). You can specify the default quota: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users – in this case, resource usage is calculated for each user individually.
|
||||
- [CREATE USER](../sql-reference/statements/create.md#create-user-statement)
|
||||
- [ALTER USER](../sql-reference/statements/alter.md#alter-user-statement)
|
||||
- [DROP USER](../sql-reference/statements/misc.md#drop-user-statement)
|
||||
- [SHOW CREATE USER](../sql-reference/statements/show.md#show-create-user-statement)
|
||||
|
||||
在可选 `<allow_databases>` 您还可以指定用户可以访问的数据库列表。 默认情况下,所有数据库都可供用户使用。 您可以指定 `default` 数据库。 在这种情况下,默认情况下,用户将接收对数据库的访问权限。
|
||||
### 设置应用规则 {#access-control-settings-applying}
|
||||
|
||||
访问 `system` 始终允许数据库(因为此数据库用于处理查询)。
|
||||
对于一个用户账户来说,设置可以通过多种方式配置:通过角色扮演和设置描述。对于一个登陆的账号来说,如果一个设置对应了多个不同的权限实体,这些设置的应用规则如下(优先权从高到底):
|
||||
|
||||
用户可以通过以下方式获取其中所有数据库和表的列表 `SHOW` 查询或系统表,即使不允许访问单个数据库。
|
||||
1. 用户账户设置。
|
||||
2. 用户账号默认的角色设置。如果这个设置配置了多个角色,那设置的应用是没有规定的顺序。
|
||||
3. 从设置描述分批给用户或者角色的设置。如果这个设置配置了多个角色,那设置的应用是没有规定的顺序。
|
||||
4. 对所有服务器有效的默认或者[default profile](server-configuration-parameters/settings.md#default-profile)的设置。
|
||||
|
||||
数据库访问是不相关的 [只读](settings/permissions-for-queries.md#settings_readonly) 设置。 您不能授予对一个数据库的完全访问权限,并 `readonly` 进入另一个。
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/operations/access_rights/) <!--hide-->
|
||||
## 角色 {#role-management}
|
||||
|
||||
角色是权限实体的集合,可以被授予用户账号。
|
||||
|
||||
角色包括:
|
||||
|
||||
- [特权](../sql-reference/statements/grant.md#grant-privileges)
|
||||
- 设置和限制
|
||||
- 分配的角色列表
|
||||
|
||||
查询管理:
|
||||
|
||||
- [CREATE ROLE](../sql-reference/statements/create.md#create-role-statement)
|
||||
- [ALTER ROLE](../sql-reference/statements/alter.md#alter-role-statement)
|
||||
- [DROP ROLE](../sql-reference/statements/misc.md#drop-role-statement)
|
||||
- [SET ROLE](../sql-reference/statements/misc.md#set-role-statement)
|
||||
- [SET DEFAULT ROLE](../sql-reference/statements/misc.md#set-default-role-statement)
|
||||
- [SHOW CREATE ROLE](../sql-reference/statements/show.md#show-create-role-statement)
|
||||
|
||||
使用[GRANT](../sql-reference/statements/grant.md) 查询可以把特权授予给角色。用[REVOKE](../sql-reference/statements/revoke.md)来撤回特权。
|
||||
|
||||
## 行策略 {#row-policy-management}
|
||||
|
||||
行策略是一个过滤器,用来定义哪些行数据可以被账户或者角色访问。对一个特定的表来说,行策略包括过滤器和使用这个策略的账户和角色。
|
||||
|
||||
查询管理:
|
||||
|
||||
- [CREATE ROW POLICY](../sql-reference/statements/create.md#create-row-policy-statement)
|
||||
- [ALTER ROW POLICY](../sql-reference/statements/alter.md#alter-row-policy-statement)
|
||||
- [DROP ROW POLICY](../sql-reference/statements/misc.md#drop-row-policy-statement)
|
||||
- [SHOW CREATE ROW POLICY](../sql-reference/statements/show.md#show-create-row-policy-statement)
|
||||
|
||||
|
||||
## 设置描述 {#settings-profiles-management}
|
||||
|
||||
设置描述是[设置](settings/index.md)的汇总。设置汇总包括设置和限制,当然也包括这些描述的对象:角色和账户。
|
||||
|
||||
查询管理:
|
||||
|
||||
- [CREATE SETTINGS PROFILE](../sql-reference/statements/create.md#create-settings-profile-statement)
|
||||
- [ALTER SETTINGS PROFILE](../sql-reference/statements/alter.md#alter-settings-profile-statement)
|
||||
- [DROP SETTINGS PROFILE](../sql-reference/statements/misc.md#drop-settings-profile-statement)
|
||||
- [SHOW CREATE SETTINGS PROFILE](../sql-reference/statements/show.md#show-create-settings-profile-statement)
|
||||
|
||||
|
||||
## 配额 {#quotas-management}
|
||||
|
||||
配额用来限制资源的使用情况。参考[配额](quotas.md).
|
||||
|
||||
配额包括特定时间的限制条件和使用这个配额的账户和角色。
|
||||
|
||||
Management queries:
|
||||
|
||||
- [CREATE QUOTA](../sql-reference/statements/create.md#create-quota-statement)
|
||||
- [ALTER QUOTA](../sql-reference/statements/alter.md#alter-quota-statement)
|
||||
- [DROP QUOTA](../sql-reference/statements/misc.md#drop-quota-statement)
|
||||
- [SHOW CREATE QUOTA](../sql-reference/statements/show.md#show-create-quota-statement)
|
||||
|
||||
|
||||
## 开启SQL驱动方式的访问权限和账户管理 {#enabling-access-control}
|
||||
|
||||
- 为配置的存储设置一个目录.
|
||||
|
||||
ClickHouse把访问实体的相关配置存储在[访问控制目录](server-configuration-parameters/settings.md#access_control_path),而这个目录可以通过服务端进行配置.
|
||||
|
||||
- 为至少一个账户开启SQL驱动方式的访问权限和账户管理.
|
||||
|
||||
默认情况,SQL驱动方式的访问权限和账户管理对所有用户都是关闭的。你需要在 `users.xml` 中配置至少一个用户,并且把[权限管理](settings/settings-users.md#access_management-user-setting)的值设置为1。
|
||||
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/access_rights/) <!--hide-->
|
||||
|
@ -1,3 +1,13 @@
|
||||
---
|
||||
toc_priority: 43
|
||||
toc_title: "操作"
|
||||
---
|
||||
|
||||
# 操作 {#operations}
|
||||
|
||||
Clickhouse运维手册主要包含下面几部分:
|
||||
|
||||
- 安装要求
|
||||
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/operations/) <!--hide-->
|
||||
|
@ -1,3 +1,8 @@
|
||||
---
|
||||
toc_priority: 45
|
||||
toc_title: "监控"
|
||||
---
|
||||
|
||||
# 监控 {#jian-kong}
|
||||
|
||||
可以监控到:
|
||||
@ -13,7 +18,7 @@ ClickHouse 本身不会去监控硬件资源的状态。
|
||||
|
||||
- 处理器上的负载和温度。
|
||||
|
||||
可以使用 [dmesg](https://en.wikipedia.org/wiki/Dmesg), [turbostat](https://www.linux.org/docs/man8/turbostat.html) 或者其他工具。
|
||||
可以使用[dmesg](https://en.wikipedia.org/wiki/Dmesg), [turbostat](https://www.linux.org/docs/man8/turbostat.html)或者其他工具。
|
||||
|
||||
- 磁盘存储,RAM和网络的使用率。
|
||||
|
||||
@ -21,17 +26,17 @@ ClickHouse 本身不会去监控硬件资源的状态。
|
||||
|
||||
ClickHouse服务本身具有用于自我状态监视指标。
|
||||
|
||||
要跟踪服务器事件,请观察服务器日志。 请参阅配置文件的\[logger\](server\_settings/settings.md\#server\_settings-logger)部分。
|
||||
要跟踪服务器事件,请观察服务器日志。 请参阅配置文件的 [logger](server-configuration-parameters/settings.md#server_configuration_parameters-logger)部分。
|
||||
|
||||
ClickHouse 收集的指标项:
|
||||
|
||||
- 服务用于计算的资源占用的各种指标。
|
||||
- 关于查询处理的常见统计信息。
|
||||
|
||||
可以在 [系统。指标](system-tables.md#system_tables-metrics) ,[系统。活动](system-tables.md#system_tables-events) 以及[系统。asynchronous\_metrics](system-tables.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。
|
||||
可以在 [系统指标](system-tables.md#system_tables-metrics) ,[系统事件](system-tables.md#system_tables-events) 以及[系统异步指标](system-tables.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。
|
||||
|
||||
可以配置ClickHouse 往 [石墨](https://github.com/graphite-project)导入指标。 参考 [石墨部分](server-configuration-parameters/settings.md#server_configuration_parameters-graphite) 配置文件。在配置指标导出之前,需要参考Graphite[官方教程](https://graphite.readthedocs.io/en/latest/install.html)搭建服务。
|
||||
|
||||
此外,您可以通过HTTP API监视服务器可用性。 将HTTP GET请求发送到 `/ping`。 如果服务器可用,它将以 `200 OK` 响应。
|
||||
|
||||
要监视服务器集群的配置中,应设置[max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries)参数并使用HTTP资源`/replicas_status`。 如果副本可用,并且不延迟在其他副本之后,则对`/replicas_status`的请求将返回200 OK。 如果副本滞后,请求将返回 `503 HTTP_SERVICE_UNAVAILABLE`,包括有关待办事项大小的信息。
|
||||
要监视服务器集群的配置,应设置[max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries)参数并使用HTTP资源`/replicas_status`。 如果副本可用,并且不延迟在其他副本之后,则对`/replicas_status`的请求将返回200 OK。 如果副本滞后,请求将返回 `503 HTTP_SERVICE_UNAVAILABLE`,包括有关待办事项大小的信息。
|
||||
|
@ -1,8 +1,6 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 44
|
||||
toc_title: "\u8981\u6C42"
|
||||
toc_title: "要求"
|
||||
---
|
||||
|
||||
# 要求 {#requirements}
|
||||
@ -13,20 +11,20 @@ toc_title: "\u8981\u6C42"
|
||||
|
||||
ClickHouse实现并行数据处理并使用所有可用的硬件资源。 在选择处理器时,考虑到ClickHouse在具有大量内核但时钟速率较低的配置中的工作效率要高于具有较少内核和较高时钟速率的配置。 例如,具有2600MHz的16核心优于具有3600MHz的8核心。
|
||||
|
||||
建议使用 **涡轮增压** 和 **超线程** 技术。 它显着提高了典型工作负载的性能。
|
||||
建议使用 **睿频加速** 和 **超线程** 技术。 它显着提高了典型工作负载的性能。
|
||||
|
||||
## RAM {#ram}
|
||||
|
||||
我们建议使用至少4GB的RAM来执行非平凡的查询。 ClickHouse服务器可以使用少得多的RAM运行,但它需要处理查询的内存。
|
||||
我们建议使用至少4GB的RAM来执行重要的查询。 ClickHouse服务器可以使用少得多的RAM运行,但它需要处理查询的内存。
|
||||
|
||||
RAM所需的体积取决于:
|
||||
|
||||
- 查询的复杂性。
|
||||
- 在查询中处理的数据量。
|
||||
- 查询中处理的数据量。
|
||||
|
||||
要计算所需的RAM体积,您应该估计临时数据的大小 [GROUP BY](../sql-reference/statements/select/group-by.md#select-group-by-clause), [DISTINCT](../sql-reference/statements/select/distinct.md#select-distinct), [JOIN](../sql-reference/statements/select/join.md#select-join) 和您使用的其他操作。
|
||||
|
||||
ClickHouse可以使用外部存储器来存储临时数据。 看 [在外部存储器中分组](../sql-reference/statements/select/group-by.md#select-group-by-in-external-memory) 有关详细信息。
|
||||
ClickHouse可以使用外部存储器来存储临时数据。看 [在外部存储器中分组](../sql-reference/statements/select/group-by.md#select-group-by-in-external-memory) 有关详细信息。
|
||||
|
||||
## 交换文件 {#swap-file}
|
||||
|
||||
@ -42,20 +40,20 @@ ClickHouse可以使用外部存储器来存储临时数据。 看 [在外部存
|
||||
|
||||
您可以采取数据的样本并从中获取行的平均大小。 然后将该值乘以计划存储的行数。
|
||||
|
||||
- 的数据压缩系数。
|
||||
- 数据压缩系数。
|
||||
|
||||
要估计数据压缩系数,请将数据的样本加载到ClickHouse中,并将数据的实际大小与存储的表的大小进行比较。 例如,点击流数据通常被压缩6-10次。
|
||||
要估计数据压缩系数,请将数据的样本加载到ClickHouse中,并将数据的实际大小与存储的表的大小进行比较。 例如,点击流数据通常被压缩6-10倍。
|
||||
|
||||
要计算要存储的最终数据量,请将压缩系数应用于估计的数据量。 如果计划将数据存储在多个副本中,则将估计的卷乘以副本数。
|
||||
要计算要存储的最终数据量,请将压缩系数应用于估计的数据量。 如果计划将数据存储在多个副本中,则将估计的量乘以副本数。
|
||||
|
||||
## 网络 {#network}
|
||||
|
||||
如果可能的话,使用10G或更高级别的网络。
|
||||
|
||||
网络带宽对于处理具有大量中间数据的分布式查询至关重要。 此外,网络速度会影响复制过程。
|
||||
网络带宽对于处理具有大量中间结果数据的分布式查询至关重要。 此外,网络速度会影响复制过程。
|
||||
|
||||
## 软件 {#software}
|
||||
|
||||
ClickHouse主要是为Linux系列操作系统开发的。 推荐的Linux发行版是Ubuntu。 该 `tzdata` 软件包应安装在系统中。
|
||||
ClickHouse主要是为Linux系列操作系统开发的。 推荐的Linux发行版是Ubuntu。 `tzdata` 软件包应安装在系统中。
|
||||
|
||||
ClickHouse也可以在其他操作系统系列中工作。 查看详细信息 [开始](../getting-started/index.md) 文档的部分。
|
||||
|
@ -1,23 +1,21 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 46
|
||||
toc_title: "\u7591\u96BE\u89E3\u7B54"
|
||||
toc_title: "常见问题"
|
||||
---
|
||||
|
||||
# 疑难解答 {#troubleshooting}
|
||||
# 常见问题 {#troubleshooting}
|
||||
|
||||
- [安装方式](#troubleshooting-installation-errors)
|
||||
- [安装](#troubleshooting-installation-errors)
|
||||
- [连接到服务器](#troubleshooting-accepts-no-connections)
|
||||
- [查询处理](#troubleshooting-does-not-process-queries)
|
||||
- [查询处理效率](#troubleshooting-too-slow)
|
||||
|
||||
## 安装方式 {#troubleshooting-installation-errors}
|
||||
## 安装 {#troubleshooting-installation-errors}
|
||||
|
||||
### 您无法使用Apt-get从ClickHouse存储库获取Deb软件包 {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get}
|
||||
|
||||
- 检查防火墙设置。
|
||||
- 如果出于任何原因无法访问存储库,请按照以下文件中的描述下载软件包 [开始](../getting-started/index.md) 文章并使用手动安装它们 `sudo dpkg -i <packages>` 指挥部 您还需要 `tzdata` 包。
|
||||
- 如果出于任何原因无法访问存储库,请按照[开始](../getting-started/index.md)中的描述下载软件包,并使用命令 `sudo dpkg -i <packages>` 手动安装它们。除此之外你还需要 `tzdata` 包。
|
||||
|
||||
## 连接到服务器 {#troubleshooting-accepts-no-connections}
|
||||
|
||||
@ -44,7 +42,7 @@ $ sudo service clickhouse-server start
|
||||
|
||||
**检查日志**
|
||||
|
||||
主日志 `clickhouse-server` 是在 `/var/log/clickhouse-server/clickhouse-server.log` 默认情况下。
|
||||
主日志 `clickhouse-server` 默认情况是在 `/var/log/clickhouse-server/clickhouse-server.log` 下。
|
||||
|
||||
如果服务器成功启动,您应该看到字符串:
|
||||
|
||||
@ -57,13 +55,13 @@ $ sudo service clickhouse-server start
|
||||
2019.01.11 15:23:25.549505 [ 45 ] {} <Error> ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused
|
||||
```
|
||||
|
||||
如果在文件末尾没有看到错误,请从字符串开始查看整个文件:
|
||||
如果在文件末尾没有看到错误,请从如下字符串开始查看整个文件:
|
||||
|
||||
``` text
|
||||
<Information> Application: starting up.
|
||||
```
|
||||
|
||||
如果您尝试启动第二个实例 `clickhouse-server` 在服务器上,您将看到以下日志:
|
||||
如果您尝试在服务器上启动第二个实例 `clickhouse-server` ,您将看到以下日志:
|
||||
|
||||
``` text
|
||||
2019.01.11 15:25:11.151730 [ 1 ] {} <Information> : Starting ClickHouse 19.1.0 with revision 54413
|
||||
@ -79,9 +77,9 @@ Revision: 54413
|
||||
2019.01.11 15:25:11.156716 [ 2 ] {} <Information> BaseDaemon: Stop SignalListener thread
|
||||
```
|
||||
|
||||
**请参阅系统。d日志**
|
||||
**查看系统日志**
|
||||
|
||||
如果你没有找到任何有用的信息 `clickhouse-server` 日志或没有任何日志,您可以查看 `system.d` 使用命令记录:
|
||||
如果你在 `clickhouse-server` 没有找到任何有用的信息或根本没有任何日志,您可以使用命令查看 `system.d` :
|
||||
|
||||
``` bash
|
||||
$ sudo journalctl -u clickhouse-server
|
||||
@ -99,9 +97,9 @@ $ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-se
|
||||
|
||||
检查:
|
||||
|
||||
- 码头工人设置。
|
||||
- Docker设置。
|
||||
|
||||
如果您在IPv6网络中的Docker中运行ClickHouse,请确保 `network=host` 已设置。
|
||||
如果您在IPv6网络中的Docker中运行ClickHouse,请确保 `network=host` 被设置。
|
||||
|
||||
- 端点设置。
|
||||
|
||||
@ -117,10 +115,10 @@ $ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-se
|
||||
|
||||
检查:
|
||||
|
||||
- 该 [tcp\_port\_secure](server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) 设置。
|
||||
- 设置 [SSL序列](server-configuration-parameters/settings.md#server_configuration_parameters-openssl).
|
||||
- [tcp\_port\_secure](server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) 设置。
|
||||
- [SSL证书](server-configuration-parameters/settings.md#server_configuration_parameters-openssl) 设置.
|
||||
|
||||
连接时使用正确的参数。 例如,使用 `port_secure` 参数 `clickhouse_client`.
|
||||
连接时使用正确的参数。 例如,使用 `clickhouse_client` 的时候使用 `port_secure` 参数 .
|
||||
|
||||
- 用户设置。
|
||||
|
||||
@ -135,7 +133,7 @@ $ curl 'http://localhost:8123/' --data-binary "SELECT a"
|
||||
Code: 47, e.displayText() = DB::Exception: Unknown identifier: a. Note that there are no tables (FROM clause) in your query, context: required_names: 'a' source_tables: table_aliases: private_aliases: column_aliases: public_columns: 'a' masked_columns: array_join_columns: source_columns: , e.what() = DB::Exception
|
||||
```
|
||||
|
||||
如果你开始 `clickhouse-client` 与 `stack-trace` 参数,ClickHouse返回包含错误描述的服务器堆栈跟踪。
|
||||
如果你使用 `clickhouse-client` 时设置了 `stack-trace` 参数,ClickHouse返回包含错误描述的服务器堆栈跟踪信息。
|
||||
|
||||
您可能会看到一条关于连接中断的消息。 在这种情况下,可以重复查询。 如果每次执行查询时连接中断,请检查服务器日志中是否存在错误。
|
||||
|
||||
|
@ -1,11 +1,9 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 47
|
||||
toc_title: "\u70B9\u51FB\u66F4\u65B0"
|
||||
toc_title: "更新"
|
||||
---
|
||||
|
||||
# 点击更新 {#clickhouse-update}
|
||||
# 更新 {#clickhouse-update}
|
||||
|
||||
如果从deb包安装ClickHouse,请在服务器上执行以下命令:
|
||||
|
||||
@ -15,6 +13,6 @@ $ sudo apt-get install clickhouse-client clickhouse-server
|
||||
$ sudo service clickhouse-server restart
|
||||
```
|
||||
|
||||
如果您使用除推荐的deb包之外的其他内容安装ClickHouse,请使用适当的更新方法。
|
||||
如果您使用除推荐的deb包之外的其他方式安装ClickHouse,请使用适当的更新方法。
|
||||
|
||||
ClickHouse不支持分布式更新。 该操作应在每个单独的服务器上连续执行。 不要同时更新群集上的所有服务器,否则群集将在一段时间内不可用。
|
||||
ClickHouse不支持分布式更新。该操作应在每个单独的服务器上连续执行。不要同时更新群集上的所有服务器,否则群集将在一段时间内不可用。
|
||||
|
@ -207,7 +207,7 @@ if (TARGET clickhouse-server AND TARGET copy-headers)
|
||||
endif ()
|
||||
|
||||
if (ENABLE_TESTS AND USE_GTEST)
|
||||
set (CLICKHOUSE_ALL_TESTS_TARGETS local_date_time_comparison unit_tests_libcommon unit_tests_dbms hashing_write_buffer hashing_read_buffer in_join_subqueries_preprocessor expression_analyzer)
|
||||
set (CLICKHOUSE_ALL_TESTS_TARGETS local_date_time_comparison unit_tests_libcommon unit_tests_dbms hashing_write_buffer hashing_read_buffer in_join_subqueries_preprocessor)
|
||||
add_custom_target (clickhouse-tests ALL DEPENDS ${CLICKHOUSE_ALL_TESTS_TARGETS})
|
||||
add_dependencies(clickhouse-bundle clickhouse-tests)
|
||||
endif()
|
||||
|
@ -6,14 +6,9 @@ set(CLICKHOUSE_CLIENT_SOURCES
|
||||
|
||||
set(CLICKHOUSE_CLIENT_LINK PRIVATE clickhouse_common_config clickhouse_functions clickhouse_aggregate_functions clickhouse_common_io clickhouse_parsers string_utils ${Boost_PROGRAM_OPTIONS_LIBRARY})
|
||||
|
||||
include(CheckSymbolExists)
|
||||
check_symbol_exists(readpassphrase readpassphrase.h HAVE_READPASSPHRASE)
|
||||
configure_file(config_client.h.in ${ConfigIncludePath}/config_client.h)
|
||||
|
||||
if(NOT HAVE_READPASSPHRASE)
|
||||
add_subdirectory(readpassphrase)
|
||||
list(APPEND CLICKHOUSE_CLIENT_LINK PRIVATE readpassphrase)
|
||||
endif()
|
||||
# Always use internal readpassphrase
|
||||
add_subdirectory(readpassphrase)
|
||||
list(APPEND CLICKHOUSE_CLIENT_LINK PRIVATE readpassphrase)
|
||||
|
||||
clickhouse_program_add(client)
|
||||
|
||||
|
@ -39,7 +39,6 @@
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include <Common/config_version.h>
|
||||
#include <Core/Types.h>
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
#include <Core/ExternalTable.h>
|
||||
@ -77,6 +76,10 @@
|
||||
#include <common/argsToConfig.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config_version.h>
|
||||
#endif
|
||||
|
||||
#ifndef __clang__
|
||||
#pragma GCC optimize("-fno-var-tracking-assignments")
|
||||
#endif
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <common/setTerminalEcho.h>
|
||||
#include <ext/scope_guard.h>
|
||||
#include <readpassphrase.h>
|
||||
#include "readpassphrase/readpassphrase.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,3 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#cmakedefine HAVE_READPASSPHRASE
|
@ -1,13 +1,7 @@
|
||||
|
||||
# wget https://raw.githubusercontent.com/openssh/openssh-portable/master/openbsd-compat/readpassphrase.c
|
||||
# wget https://raw.githubusercontent.com/openssh/openssh-portable/master/openbsd-compat/readpassphrase.h
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-result -Wno-reserved-id-macro")
|
||||
add_library(readpassphrase readpassphrase.c)
|
||||
|
||||
configure_file(includes.h.in ${CMAKE_CURRENT_BINARY_DIR}/include/includes.h)
|
||||
add_library(readpassphrase ${CMAKE_CURRENT_SOURCE_DIR}/readpassphrase.c)
|
||||
set_target_properties(readpassphrase
|
||||
PROPERTIES LINKER_LANGUAGE C
|
||||
)
|
||||
# . to allow #include <readpassphrase.h>
|
||||
target_include_directories(readpassphrase PUBLIC . ${CMAKE_CURRENT_BINARY_DIR}/include)
|
||||
set_target_properties(readpassphrase PROPERTIES LINKER_LANGUAGE C)
|
||||
target_compile_options(readpassphrase PRIVATE -Wno-unused-result -Wno-reserved-id-macro)
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#cmakedefine HAVE_READPASSPHRASE
|
||||
/* #undef HAVE_READPASSPHRASE */
|
||||
|
||||
#if !defined(HAVE_READPASSPHRASE)
|
||||
# ifndef _PATH_TTY
|
@ -25,13 +25,11 @@
|
||||
|
||||
#include "includes.h"
|
||||
|
||||
#ifndef HAVE_READPASSPHRASE
|
||||
|
||||
#include <termios.h>
|
||||
#include <signal.h>
|
||||
#include <ctype.h>
|
||||
#include <fcntl.h>
|
||||
#include <readpassphrase.h>
|
||||
#include "readpassphrase.h"
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
@ -193,19 +191,7 @@ restart:
|
||||
}
|
||||
//DEF_WEAK(readpassphrase);
|
||||
|
||||
#if 0
|
||||
char *
|
||||
getpass(const char *prompt)
|
||||
{
|
||||
static char buf[_PASSWORD_LEN + 1];
|
||||
|
||||
return(readpassphrase(prompt, buf, sizeof(buf), RPP_ECHO_OFF));
|
||||
}
|
||||
#endif
|
||||
|
||||
static void handler(int s)
|
||||
{
|
||||
|
||||
signo[s] = 1;
|
||||
}
|
||||
#endif /* HAVE_READPASSPHRASE */
|
||||
|
@ -23,39 +23,22 @@
|
||||
/* OPENBSD ORIGINAL: include/readpassphrase.h */
|
||||
|
||||
#pragma once
|
||||
// #ifndef _READPASSPHRASE_H_
|
||||
// #define _READPASSPHRASE_H_
|
||||
|
||||
//#include "includes.h"
|
||||
#include "config_client.h"
|
||||
|
||||
// Should not be included on BSD systems, but if it happen...
|
||||
#ifdef HAVE_READPASSPHRASE
|
||||
# include_next <readpassphrase.h>
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef HAVE_READPASSPHRASE
|
||||
|
||||
# ifdef __cplusplus
|
||||
extern "C" {
|
||||
# endif
|
||||
|
||||
|
||||
# define RPP_ECHO_OFF 0x00 /* Turn off echo (default). */
|
||||
# define RPP_ECHO_ON 0x01 /* Leave echo on. */
|
||||
# define RPP_REQUIRE_TTY 0x02 /* Fail if there is no tty. */
|
||||
# define RPP_FORCELOWER 0x04 /* Force input to lower case. */
|
||||
# define RPP_FORCEUPPER 0x08 /* Force input to upper case. */
|
||||
# define RPP_SEVENBIT 0x10 /* Strip the high bit from input. */
|
||||
# define RPP_STDIN 0x20 /* Read from stdin, not /dev/tty */
|
||||
#define RPP_ECHO_OFF 0x00 /* Turn off echo (default). */
|
||||
#define RPP_ECHO_ON 0x01 /* Leave echo on. */
|
||||
#define RPP_REQUIRE_TTY 0x02 /* Fail if there is no tty. */
|
||||
#define RPP_FORCELOWER 0x04 /* Force input to lower case. */
|
||||
#define RPP_FORCEUPPER 0x08 /* Force input to upper case. */
|
||||
#define RPP_SEVENBIT 0x10 /* Strip the high bit from input. */
|
||||
#define RPP_STDIN 0x20 /* Read from stdin, not /dev/tty */
|
||||
|
||||
char * readpassphrase(const char *, char *, size_t, int);
|
||||
|
||||
# ifdef __cplusplus
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
# endif
|
||||
|
||||
|
||||
#endif /* HAVE_READPASSPHRASE */
|
||||
|
||||
// #endif /* !_READPASSPHRASE_H_ */
|
||||
#endif
|
||||
|
7
programs/client/readpassphrase/ya.make
Normal file
7
programs/client/readpassphrase/ya.make
Normal file
@ -0,0 +1,7 @@
|
||||
LIBRARY()
|
||||
|
||||
SRCS(
|
||||
readpassphrase.c
|
||||
)
|
||||
|
||||
END()
|
@ -114,7 +114,7 @@ void ClusterCopierApp::mainImpl()
|
||||
registerDisks();
|
||||
|
||||
static const std::string default_database = "_local";
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database));
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database, *context));
|
||||
context->setCurrentDatabase(default_database);
|
||||
|
||||
/// Initialize query scope just in case.
|
||||
|
@ -118,13 +118,13 @@ void LocalServer::tryInitPath()
|
||||
}
|
||||
|
||||
|
||||
static void attachSystemTables()
|
||||
static void attachSystemTables(const Context & context)
|
||||
{
|
||||
DatabasePtr system_database = DatabaseCatalog::instance().tryGetDatabase(DatabaseCatalog::SYSTEM_DATABASE);
|
||||
if (!system_database)
|
||||
{
|
||||
/// TODO: add attachTableDelayed into DatabaseMemory to speedup loading
|
||||
system_database = std::make_shared<DatabaseMemory>(DatabaseCatalog::SYSTEM_DATABASE);
|
||||
system_database = std::make_shared<DatabaseMemory>(DatabaseCatalog::SYSTEM_DATABASE, context);
|
||||
DatabaseCatalog::instance().attachDatabase(DatabaseCatalog::SYSTEM_DATABASE, system_database);
|
||||
}
|
||||
|
||||
@ -202,7 +202,7 @@ try
|
||||
* if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons.
|
||||
*/
|
||||
std::string default_database = config().getString("default_database", "_local");
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database));
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database, *context));
|
||||
context->setCurrentDatabase(default_database);
|
||||
applyCmdOptions();
|
||||
|
||||
@ -213,14 +213,14 @@ try
|
||||
|
||||
LOG_DEBUG(log, "Loading metadata from {}", context->getPath());
|
||||
loadMetadataSystem(*context);
|
||||
attachSystemTables();
|
||||
attachSystemTables(*context);
|
||||
loadMetadata(*context);
|
||||
DatabaseCatalog::instance().loadDatabases();
|
||||
LOG_DEBUG(log, "Loaded metadata.");
|
||||
}
|
||||
else
|
||||
{
|
||||
attachSystemTables();
|
||||
attachSystemTables(*context);
|
||||
}
|
||||
|
||||
processQueries();
|
||||
|
@ -8,11 +8,8 @@
|
||||
#include <string>
|
||||
#include <utility> /// pair
|
||||
|
||||
#if __has_include("config_tools.h")
|
||||
#include "config_tools.h"
|
||||
#endif
|
||||
#if __has_include("config_core.h")
|
||||
#include "config_core.h"
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "config_tools.h"
|
||||
#endif
|
||||
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
@ -22,31 +19,31 @@
|
||||
|
||||
|
||||
/// Universal executable for various clickhouse applications
|
||||
#if ENABLE_CLICKHOUSE_SERVER || !defined(ENABLE_CLICKHOUSE_SERVER)
|
||||
#if ENABLE_CLICKHOUSE_SERVER
|
||||
int mainEntryClickHouseServer(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_CLIENT || !defined(ENABLE_CLICKHOUSE_CLIENT)
|
||||
#if ENABLE_CLICKHOUSE_CLIENT
|
||||
int mainEntryClickHouseClient(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_LOCAL || !defined(ENABLE_CLICKHOUSE_LOCAL)
|
||||
#if ENABLE_CLICKHOUSE_LOCAL
|
||||
int mainEntryClickHouseLocal(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_BENCHMARK || !defined(ENABLE_CLICKHOUSE_BENCHMARK)
|
||||
#if ENABLE_CLICKHOUSE_BENCHMARK
|
||||
int mainEntryClickHouseBenchmark(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG || !defined(ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG)
|
||||
#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG
|
||||
int mainEntryClickHouseExtractFromConfig(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_COMPRESSOR || !defined(ENABLE_CLICKHOUSE_COMPRESSOR)
|
||||
#if ENABLE_CLICKHOUSE_COMPRESSOR
|
||||
int mainEntryClickHouseCompressor(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_FORMAT || !defined(ENABLE_CLICKHOUSE_FORMAT)
|
||||
#if ENABLE_CLICKHOUSE_FORMAT
|
||||
int mainEntryClickHouseFormat(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_COPIER || !defined(ENABLE_CLICKHOUSE_COPIER)
|
||||
#if ENABLE_CLICKHOUSE_COPIER
|
||||
int mainEntryClickHouseClusterCopier(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_OBFUSCATOR || !defined(ENABLE_CLICKHOUSE_OBFUSCATOR)
|
||||
#if ENABLE_CLICKHOUSE_OBFUSCATOR
|
||||
int mainEntryClickHouseObfuscator(int argc, char ** argv);
|
||||
#endif
|
||||
|
||||
@ -60,31 +57,31 @@ using MainFunc = int (*)(int, char**);
|
||||
/// Add an item here to register new application
|
||||
std::pair<const char *, MainFunc> clickhouse_applications[] =
|
||||
{
|
||||
#if ENABLE_CLICKHOUSE_LOCAL || !defined(ENABLE_CLICKHOUSE_LOCAL)
|
||||
#if ENABLE_CLICKHOUSE_LOCAL
|
||||
{"local", mainEntryClickHouseLocal},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_CLIENT || !defined(ENABLE_CLICKHOUSE_CLIENT)
|
||||
#if ENABLE_CLICKHOUSE_CLIENT
|
||||
{"client", mainEntryClickHouseClient},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_BENCHMARK || !defined(ENABLE_CLICKHOUSE_BENCHMARK)
|
||||
#if ENABLE_CLICKHOUSE_BENCHMARK
|
||||
{"benchmark", mainEntryClickHouseBenchmark},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_SERVER || !defined(ENABLE_CLICKHOUSE_SERVER)
|
||||
#if ENABLE_CLICKHOUSE_SERVER
|
||||
{"server", mainEntryClickHouseServer},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG || !defined(ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG)
|
||||
#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG
|
||||
{"extract-from-config", mainEntryClickHouseExtractFromConfig},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_COMPRESSOR || !defined(ENABLE_CLICKHOUSE_COMPRESSOR)
|
||||
#if ENABLE_CLICKHOUSE_COMPRESSOR
|
||||
{"compressor", mainEntryClickHouseCompressor},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_FORMAT || !defined(ENABLE_CLICKHOUSE_FORMAT)
|
||||
#if ENABLE_CLICKHOUSE_FORMAT
|
||||
{"format", mainEntryClickHouseFormat},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_COPIER || !defined(ENABLE_CLICKHOUSE_COPIER)
|
||||
#if ENABLE_CLICKHOUSE_COPIER
|
||||
{"copier", mainEntryClickHouseClusterCopier},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_OBFUSCATOR || !defined(ENABLE_CLICKHOUSE_OBFUSCATOR)
|
||||
#if ENABLE_CLICKHOUSE_OBFUSCATOR
|
||||
{"obfuscator", mainEntryClickHouseObfuscator},
|
||||
#endif
|
||||
};
|
||||
@ -127,9 +124,10 @@ enum class InstructionFail
|
||||
SSSE3 = 2,
|
||||
SSE4_1 = 3,
|
||||
SSE4_2 = 4,
|
||||
AVX = 5,
|
||||
AVX2 = 6,
|
||||
AVX512 = 7
|
||||
POPCNT = 5,
|
||||
AVX = 6,
|
||||
AVX2 = 7,
|
||||
AVX512 = 8
|
||||
};
|
||||
|
||||
const char * instructionFailToString(InstructionFail fail)
|
||||
@ -146,6 +144,8 @@ const char * instructionFailToString(InstructionFail fail)
|
||||
return "SSE4.1";
|
||||
case InstructionFail::SSE4_2:
|
||||
return "SSE4.2";
|
||||
case InstructionFail::POPCNT:
|
||||
return "POPCNT";
|
||||
case InstructionFail::AVX:
|
||||
return "AVX";
|
||||
case InstructionFail::AVX2:
|
||||
@ -189,6 +189,16 @@ void checkRequiredInstructionsImpl(volatile InstructionFail & fail)
|
||||
__asm__ volatile ("pcmpgtq %%xmm0, %%xmm0" : : : "xmm0");
|
||||
#endif
|
||||
|
||||
/// Defined by -msse4.2
|
||||
#if defined(__POPCNT__)
|
||||
fail = InstructionFail::POPCNT;
|
||||
{
|
||||
uint64_t a = 0;
|
||||
uint64_t b = 0;
|
||||
__asm__ volatile ("popcnt %1, %0" : "=r"(a) :"r"(b) :);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__AVX__)
|
||||
fail = InstructionFail::AVX;
|
||||
__asm__ volatile ("vaddpd %%ymm0, %%ymm0, %%ymm0" : : : "ymm0");
|
||||
|
@ -1,21 +1,6 @@
|
||||
set(CLICKHOUSE_SERVER_SOURCES
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/HTTPHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/HTTPHandlerFactory.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/InterserverIOHTTPHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/MetricsTransmitter.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/NotFoundHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/PrometheusMetricsWriter.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/PrometheusRequestHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/ReplicasStatusHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/StaticRequestHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/Server.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/TCPHandler.cpp
|
||||
)
|
||||
|
||||
set(CLICKHOUSE_SERVER_SOURCES
|
||||
${CLICKHOUSE_SERVER_SOURCES}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/MySQLHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/MySQLHandlerFactory.cpp
|
||||
MetricsTransmitter.cpp
|
||||
Server.cpp
|
||||
)
|
||||
|
||||
set (CLICKHOUSE_SERVER_LINK
|
||||
|
@ -53,15 +53,19 @@
|
||||
#include <Dictionaries/registerDictionaries.h>
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Common/Config/ConfigReloader.h>
|
||||
#include "HTTPHandlerFactory.h"
|
||||
#include <Server/HTTPHandlerFactory.h>
|
||||
#include "MetricsTransmitter.h"
|
||||
#include <Common/StatusFile.h>
|
||||
#include "TCPHandlerFactory.h"
|
||||
#include <Server/TCPHandlerFactory.h>
|
||||
#include <Common/SensitiveDataMasker.h>
|
||||
#include <Common/ThreadFuzzer.h>
|
||||
<<<<<<< HEAD
|
||||
#include "MySQLHandlerFactory.h"
|
||||
#include <daemon/SentryWriter.h>
|
||||
|
||||
=======
|
||||
#include <Server/MySQLHandlerFactory.h>
|
||||
>>>>>>> a4e40fb5f209539cfee6af5da7f27c1c96e02eac
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "config_core.h"
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "IServer.h"
|
||||
#include <Server/IServer.h>
|
||||
|
||||
#include <daemon/BaseDaemon.h>
|
||||
|
||||
|
@ -11,19 +11,8 @@ PEERDIR(
|
||||
SRCS(
|
||||
clickhouse-server.cpp
|
||||
|
||||
HTTPHandler.cpp
|
||||
HTTPHandlerFactory.cpp
|
||||
InterserverIOHTTPHandler.cpp
|
||||
MetricsTransmitter.cpp
|
||||
MySQLHandler.cpp
|
||||
MySQLHandlerFactory.cpp
|
||||
NotFoundHandler.cpp
|
||||
PrometheusMetricsWriter.cpp
|
||||
PrometheusRequestHandler.cpp
|
||||
ReplicasStatusHandler.cpp
|
||||
StaticRequestHandler.cpp
|
||||
Server.cpp
|
||||
TCPHandler.cpp
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -1,3 +1,27 @@
|
||||
RECURSE(
|
||||
server
|
||||
PROGRAM(clickhouse)
|
||||
|
||||
CFLAGS(
|
||||
-DENABLE_CLICKHOUSE_CLIENT
|
||||
-DENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG
|
||||
-DENABLE_CLICKHOUSE_SERVER
|
||||
)
|
||||
|
||||
PEERDIR(
|
||||
clickhouse/base/daemon
|
||||
clickhouse/base/loggers
|
||||
clickhouse/programs/client/readpassphrase
|
||||
clickhouse/src
|
||||
)
|
||||
|
||||
SRCS(
|
||||
main.cpp
|
||||
|
||||
client/Client.cpp
|
||||
client/ConnectionParameters.cpp
|
||||
client/Suggest.cpp
|
||||
extract-from-config/ExtractFromConfig.cpp
|
||||
server/Server.cpp
|
||||
server/MetricsTransmitter.cpp
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -7,8 +7,8 @@ namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
||||
@ -36,8 +36,11 @@ Authentication::Digest Authentication::getPasswordDoubleSHA1() const
|
||||
|
||||
case DOUBLE_SHA1_PASSWORD:
|
||||
return password_hash;
|
||||
|
||||
case MAX_TYPE:
|
||||
break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("getPasswordDoubleSHA1(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
|
||||
@ -71,8 +74,11 @@ bool Authentication::isCorrectPassword(const String & password_) const
|
||||
|
||||
return encodeSHA1(first_sha1) == password_hash;
|
||||
}
|
||||
|
||||
case MAX_TYPE:
|
||||
break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Cannot check if the password is correct for authentication type " + toString(type), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Common/OpenSSLHelpers.h>
|
||||
#include <Poco/SHA1Engine.h>
|
||||
#include <boost/algorithm/hex.hpp>
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -14,6 +15,7 @@ namespace ErrorCodes
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
||||
@ -35,6 +37,15 @@ public:
|
||||
/// SHA1(SHA1(password)).
|
||||
/// This kind of hash is used by the `mysql_native_password` authentication plugin.
|
||||
DOUBLE_SHA1_PASSWORD,
|
||||
|
||||
MAX_TYPE,
|
||||
};
|
||||
|
||||
struct TypeInfo
|
||||
{
|
||||
const char * const raw_name;
|
||||
const String name; /// Lowercased with underscores, e.g. "sha256_password".
|
||||
static const TypeInfo & get(Type type_);
|
||||
};
|
||||
|
||||
using Digest = std::vector<uint8_t>;
|
||||
@ -85,6 +96,48 @@ private:
|
||||
};
|
||||
|
||||
|
||||
inline const Authentication::TypeInfo & Authentication::TypeInfo::get(Type type_)
|
||||
{
|
||||
static constexpr auto make_info = [](const char * raw_name_)
|
||||
{
|
||||
String init_name = raw_name_;
|
||||
boost::to_lower(init_name);
|
||||
return TypeInfo{raw_name_, std::move(init_name)};
|
||||
};
|
||||
|
||||
switch (type_)
|
||||
{
|
||||
case NO_PASSWORD:
|
||||
{
|
||||
static const auto info = make_info("NO_PASSWORD");
|
||||
return info;
|
||||
}
|
||||
case PLAINTEXT_PASSWORD:
|
||||
{
|
||||
static const auto info = make_info("PLAINTEXT_PASSWORD");
|
||||
return info;
|
||||
}
|
||||
case SHA256_PASSWORD:
|
||||
{
|
||||
static const auto info = make_info("SHA256_PASSWORD");
|
||||
return info;
|
||||
}
|
||||
case DOUBLE_SHA1_PASSWORD:
|
||||
{
|
||||
static const auto info = make_info("DOUBLE_SHA1_PASSWORD");
|
||||
return info;
|
||||
}
|
||||
case MAX_TYPE: break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
inline String toString(Authentication::Type type_)
|
||||
{
|
||||
return Authentication::TypeInfo::get(type_).raw_name;
|
||||
}
|
||||
|
||||
|
||||
inline Authentication::Digest Authentication::encodeSHA256(const std::string_view & text [[maybe_unused]])
|
||||
{
|
||||
#if USE_SSL
|
||||
@ -122,8 +175,10 @@ inline void Authentication::setPassword(const String & password_)
|
||||
|
||||
case DOUBLE_SHA1_PASSWORD:
|
||||
return setPasswordHashBinary(encodeDoubleSHA1(password_));
|
||||
|
||||
case MAX_TYPE: break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("setPassword(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
|
||||
@ -186,8 +241,10 @@ inline void Authentication::setPasswordHashBinary(const Digest & hash)
|
||||
password_hash = hash;
|
||||
return;
|
||||
}
|
||||
|
||||
case MAX_TYPE: break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("setPasswordHashBinary(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -68,15 +68,27 @@ void ExtendedRoleSet::init(const ASTExtendedRoleSet & ast, const AccessControlMa
|
||||
{
|
||||
all = ast.all;
|
||||
|
||||
auto name_to_id = [id_mode{ast.id_mode}, manager](const String & name) -> UUID
|
||||
auto name_to_id = [&ast, manager](const String & name) -> UUID
|
||||
{
|
||||
if (id_mode)
|
||||
if (ast.id_mode)
|
||||
return parse<UUID>(name);
|
||||
assert(manager);
|
||||
auto id = manager->find<User>(name);
|
||||
if (id)
|
||||
return *id;
|
||||
return manager->getID<Role>(name);
|
||||
if (ast.can_contain_users && ast.can_contain_roles)
|
||||
{
|
||||
auto id = manager->find<User>(name);
|
||||
if (id)
|
||||
return *id;
|
||||
return manager->getID<Role>(name);
|
||||
}
|
||||
else if (ast.can_contain_users)
|
||||
{
|
||||
return manager->getID<User>(name);
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(ast.can_contain_roles);
|
||||
return manager->getID<Role>(name);
|
||||
}
|
||||
};
|
||||
|
||||
if (!ast.names.empty() && !all)
|
||||
|
@ -143,6 +143,14 @@ const IAccessStorage & MultipleAccessStorage::getStorage(const UUID & id) const
|
||||
return const_cast<MultipleAccessStorage *>(this)->getStorage(id);
|
||||
}
|
||||
|
||||
void MultipleAccessStorage::addStorage(std::unique_ptr<Storage> nested_storage)
|
||||
{
|
||||
/// Note that IStorage::storage_name is not changed. It is ok as this method
|
||||
/// is considered as a temporary solution allowing third-party Arcadia applications
|
||||
/// using CH as a library to register their own access storages. Do not remove
|
||||
/// this method without providing any alternative :)
|
||||
nested_storages.emplace_back(std::move(nested_storage));
|
||||
}
|
||||
|
||||
AccessEntityPtr MultipleAccessStorage::readImpl(const UUID & id) const
|
||||
{
|
||||
|
@ -25,6 +25,8 @@ public:
|
||||
const Storage & getStorage(const UUID & id) const;
|
||||
Storage & getStorage(const UUID & id);
|
||||
|
||||
void addStorage(std::unique_ptr<Storage> nested_storage);
|
||||
|
||||
Storage & getStorageByIndex(size_t i) { return *(nested_storages[i]); }
|
||||
const Storage & getStorageByIndex(size_t i) const { return *(nested_storages[i]); }
|
||||
|
||||
|
@ -52,18 +52,20 @@ namespace
|
||||
|
||||
String user_config = "users." + user_name;
|
||||
|
||||
bool has_password = config.has(user_config + ".password");
|
||||
bool has_no_password = config.has(user_config + ".no_password");
|
||||
bool has_password_plaintext = config.has(user_config + ".password");
|
||||
bool has_password_sha256_hex = config.has(user_config + ".password_sha256_hex");
|
||||
bool has_password_double_sha1_hex = config.has(user_config + ".password_double_sha1_hex");
|
||||
|
||||
if (has_password + has_password_sha256_hex + has_password_double_sha1_hex > 1)
|
||||
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex' is used to specify password for user " + user_name + ". Must be only one of them.",
|
||||
size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex;
|
||||
if (num_password_fields > 1)
|
||||
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password' are used to specify password for user " + user_name + ". Must be only one of them.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
if (!has_password && !has_password_sha256_hex && !has_password_double_sha1_hex)
|
||||
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
||||
if (num_password_fields < 1)
|
||||
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
if (has_password)
|
||||
if (has_password_plaintext)
|
||||
{
|
||||
user->authentication = Authentication{Authentication::PLAINTEXT_PASSWORD};
|
||||
user->authentication.setPassword(config.getString(user_config + ".password"));
|
||||
|
@ -23,18 +23,10 @@ struct EntropyData
|
||||
{
|
||||
using Weight = UInt64;
|
||||
|
||||
using HashingMap = HashMap<
|
||||
Value, Weight,
|
||||
HashCRC32<Value>,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(std::pair<Value, Weight>) * (1 << 3)>>;
|
||||
using HashingMap = HashMapWithStackMemory<Value, Weight, HashCRC32<Value>, 4>;
|
||||
|
||||
/// For the case of pre-hashed values.
|
||||
using TrivialMap = HashMap<
|
||||
Value, Weight,
|
||||
UInt128TrivialHash,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(std::pair<Value, Weight>) * (1 << 3)>>;
|
||||
using TrivialMap = HashMapWithStackMemory<Value, Weight, UInt128TrivialHash, 4>;
|
||||
|
||||
using Map = std::conditional_t<std::is_same_v<UInt128, Value>, TrivialMap, HashingMap>;
|
||||
|
||||
|
@ -28,12 +28,7 @@ template <typename T>
|
||||
struct AggregateFunctionGroupUniqArrayData
|
||||
{
|
||||
/// When creating, the hash table must be small.
|
||||
using Set = HashSet<
|
||||
T,
|
||||
DefaultHash<T>,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(T) * (1 << 4)>
|
||||
>;
|
||||
using Set = HashSetWithStackMemory<T, DefaultHash<T>, 4>;
|
||||
|
||||
Set value;
|
||||
};
|
||||
@ -126,9 +121,10 @@ public:
|
||||
/// Generic implementation, it uses serialized representation as object descriptor.
|
||||
struct AggregateFunctionGroupUniqArrayGenericData
|
||||
{
|
||||
static constexpr size_t INIT_ELEMS = 2; /// adjustable
|
||||
static constexpr size_t ELEM_SIZE = sizeof(HashSetCellWithSavedHash<StringRef, StringRefHash>);
|
||||
using Set = HashSetWithSavedHash<StringRef, StringRefHash, HashTableGrower<INIT_ELEMS>, HashTableAllocatorWithStackMemory<INIT_ELEMS * ELEM_SIZE>>;
|
||||
static constexpr size_t INITIAL_SIZE_DEGREE = 3; /// adjustable
|
||||
|
||||
using Set = HashSetWithSavedHashWithStackMemory<StringRef, StringRefHash,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
Set value;
|
||||
};
|
||||
|
@ -23,13 +23,8 @@ namespace DB
|
||||
template <typename T>
|
||||
struct AggregateFunctionTopKData
|
||||
{
|
||||
using Set = SpaceSaving
|
||||
<
|
||||
T,
|
||||
HashCRC32<T>,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(T) * (1 << 4)>
|
||||
>;
|
||||
using Set = SpaceSaving<T, HashCRC32<T>>;
|
||||
|
||||
Set value;
|
||||
};
|
||||
|
||||
@ -109,13 +104,7 @@ public:
|
||||
/// Generic implementation, it uses serialized representation as object descriptor.
|
||||
struct AggregateFunctionTopKGenericData
|
||||
{
|
||||
using Set = SpaceSaving
|
||||
<
|
||||
StringRef,
|
||||
StringRefHash,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(StringRef) * (1 << 4)>
|
||||
>;
|
||||
using Set = SpaceSaving<StringRef, StringRefHash>;
|
||||
|
||||
Set value;
|
||||
};
|
||||
|
@ -33,12 +33,7 @@ struct QuantileExactWeighted
|
||||
using Hasher = std::conditional_t<std::is_same_v<Value, Decimal128>, Int128Hash, HashCRC32<UnderlyingType>>;
|
||||
|
||||
/// When creating, the hash table must be small.
|
||||
using Map = HashMap<
|
||||
UnderlyingType, Weight,
|
||||
Hasher,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(std::pair<Value, Weight>) * (1 << 3)>
|
||||
>;
|
||||
using Map = HashMapWithStackMemory<UnderlyingType, Weight, Hasher, 4>;
|
||||
|
||||
Map map;
|
||||
|
||||
|
@ -58,6 +58,7 @@ add_subdirectory (TableFunctions)
|
||||
add_subdirectory (Processors)
|
||||
add_subdirectory (Formats)
|
||||
add_subdirectory (Compression)
|
||||
add_subdirectory (Server)
|
||||
|
||||
|
||||
set(dbms_headers)
|
||||
@ -145,6 +146,7 @@ add_object_library(clickhouse_storages_distributed Storages/Distributed)
|
||||
add_object_library(clickhouse_storages_mergetree Storages/MergeTree)
|
||||
add_object_library(clickhouse_storages_liveview Storages/LiveView)
|
||||
add_object_library(clickhouse_client Client)
|
||||
add_object_library(clickhouse_server Server)
|
||||
add_object_library(clickhouse_formats Formats)
|
||||
add_object_library(clickhouse_processors Processors)
|
||||
add_object_library(clickhouse_processors_executors Processors/Executors)
|
||||
|
@ -549,6 +549,8 @@ void ColumnAggregateFunction::getPermutation(bool /*reverse*/, size_t /*limit*/,
|
||||
res[i] = i;
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::updatePermutation(bool, size_t, int, Permutation &, EqualRanges&) const {}
|
||||
|
||||
void ColumnAggregateFunction::gather(ColumnGathererStream & gatherer)
|
||||
{
|
||||
gatherer.gather(*this);
|
||||
|
@ -193,6 +193,7 @@ public:
|
||||
}
|
||||
|
||||
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
|
||||
void updatePermutation(bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_range) const override;
|
||||
|
||||
/** More efficient manipulation methods */
|
||||
Container & getData()
|
||||
|
@ -737,6 +737,76 @@ void ColumnArray::getPermutation(bool reverse, size_t limit, int nan_direction_h
|
||||
}
|
||||
}
|
||||
|
||||
void ColumnArray::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const
|
||||
{
|
||||
if (limit >= size() || limit >= equal_range.back().second)
|
||||
limit = 0;
|
||||
|
||||
size_t n = equal_range.size();
|
||||
|
||||
if (limit)
|
||||
--n;
|
||||
|
||||
EqualRanges new_ranges;
|
||||
for (size_t i = 0; i < n; ++i)
|
||||
{
|
||||
const auto& [first, last] = equal_range[i];
|
||||
|
||||
if (reverse)
|
||||
std::sort(res.begin() + first, res.begin() + last, Less<false>(*this, nan_direction_hint));
|
||||
else
|
||||
std::sort(res.begin() + first, res.begin() + last, Less<true>(*this, nan_direction_hint));
|
||||
auto new_first = first;
|
||||
|
||||
for (auto j = first + 1; j < last; ++j)
|
||||
{
|
||||
if (compareAt(res[new_first], res[j], *this, nan_direction_hint) != 0)
|
||||
{
|
||||
if (j - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, j);
|
||||
|
||||
new_first = j;
|
||||
}
|
||||
}
|
||||
|
||||
if (last - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, last);
|
||||
}
|
||||
|
||||
if (limit)
|
||||
{
|
||||
const auto& [first, last] = equal_range.back();
|
||||
if (reverse)
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, Less<false>(*this, nan_direction_hint));
|
||||
else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, Less<true>(*this, nan_direction_hint));
|
||||
auto new_first = first;
|
||||
for (auto j = first + 1; j < limit; ++j)
|
||||
{
|
||||
if (compareAt(res[new_first], res[j], *this, nan_direction_hint) != 0)
|
||||
{
|
||||
if (j - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, j);
|
||||
|
||||
new_first = j;
|
||||
}
|
||||
}
|
||||
auto new_last = limit;
|
||||
for (auto j = limit; j < last; ++j)
|
||||
{
|
||||
if (compareAt(res[new_first], res[j], *this, nan_direction_hint) == 0)
|
||||
{
|
||||
std::swap(res[new_last], res[j]);
|
||||
++new_last;
|
||||
}
|
||||
}
|
||||
if (new_last - new_first > 1)
|
||||
{
|
||||
new_ranges.emplace_back(new_first, new_last);
|
||||
}
|
||||
}
|
||||
equal_range = std::move(new_ranges);
|
||||
}
|
||||
|
||||
ColumnPtr ColumnArray::replicate(const Offsets & replicate_offsets) const
|
||||
{
|
||||
|
@ -73,6 +73,7 @@ public:
|
||||
template <typename Type> ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
|
||||
void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const override;
|
||||
void reserve(size_t n) override;
|
||||
size_t byteSize() const override;
|
||||
size_t allocatedBytes() const override;
|
||||
|
@ -120,6 +120,8 @@ void ColumnConst::getPermutation(bool /*reverse*/, size_t /*limit*/, int /*nan_d
|
||||
res[i] = i;
|
||||
}
|
||||
|
||||
void ColumnConst::updatePermutation(bool, size_t, int, Permutation &, EqualRanges &) const {}
|
||||
|
||||
void ColumnConst::updateWeakHash32(WeakHash32 & hash) const
|
||||
{
|
||||
if (hash.getData().size() != s)
|
||||
|
@ -170,6 +170,7 @@ public:
|
||||
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
||||
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
|
||||
void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const override;
|
||||
|
||||
size_t byteSize() const override
|
||||
{
|
||||
|
@ -108,6 +108,76 @@ void ColumnDecimal<T>::getPermutation(bool reverse, size_t limit, int , IColumn:
|
||||
permutation(reverse, limit, res);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void ColumnDecimal<T>::updatePermutation(bool reverse, size_t limit, int, IColumn::Permutation & res, EqualRanges & equal_range) const
|
||||
{
|
||||
if (limit >= data.size() || limit >= equal_range.back().second)
|
||||
limit = 0;
|
||||
|
||||
size_t n = equal_range.size();
|
||||
if (limit)
|
||||
--n;
|
||||
|
||||
EqualRanges new_ranges;
|
||||
for (size_t i = 0; i < n; ++i)
|
||||
{
|
||||
const auto& [first, last] = equal_range[i];
|
||||
if (reverse)
|
||||
std::partial_sort(res.begin() + first, res.begin() + last, res.begin() + last,
|
||||
[this](size_t a, size_t b) { return data[a] > data[b]; });
|
||||
else
|
||||
std::partial_sort(res.begin() + first, res.begin() + last, res.begin() + last,
|
||||
[this](size_t a, size_t b) { return data[a] < data[b]; });
|
||||
auto new_first = first;
|
||||
for (auto j = first + 1; j < last; ++j)
|
||||
{
|
||||
if (data[res[new_first]] != data[res[j]])
|
||||
{
|
||||
if (j - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, j);
|
||||
|
||||
new_first = j;
|
||||
}
|
||||
}
|
||||
if (last - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, last);
|
||||
}
|
||||
|
||||
if (limit)
|
||||
{
|
||||
const auto& [first, last] = equal_range.back();
|
||||
if (reverse)
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
|
||||
[this](size_t a, size_t b) { return data[a] > data[b]; });
|
||||
else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
|
||||
[this](size_t a, size_t b) { return data[a] < data[b]; });
|
||||
auto new_first = first;
|
||||
for (auto j = first + 1; j < limit; ++j)
|
||||
{
|
||||
if (data[res[new_first]] != data[res[j]])
|
||||
{
|
||||
if (j - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, j);
|
||||
|
||||
new_first = j;
|
||||
}
|
||||
}
|
||||
auto new_last = limit;
|
||||
for (auto j = limit; j < last; ++j)
|
||||
{
|
||||
if (data[res[new_first]] == data[res[j]])
|
||||
{
|
||||
std::swap(res[new_last], res[j]);
|
||||
++new_last;
|
||||
}
|
||||
}
|
||||
if (new_last - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, new_last);
|
||||
}
|
||||
equal_range = std::move(new_ranges);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ColumnPtr ColumnDecimal<T>::permute(const IColumn::Permutation & perm, size_t limit) const
|
||||
{
|
||||
|
@ -108,6 +108,7 @@ public:
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override;
|
||||
void updatePermutation(bool reverse, size_t limit, int, IColumn::Permutation & res, EqualRanges& equal_range) const override;
|
||||
|
||||
MutableColumnPtr cloneResized(size_t size) const override;
|
||||
|
||||
@ -152,6 +153,8 @@ public:
|
||||
const T & getElement(size_t n) const { return data[n]; }
|
||||
T & getElement(size_t n) { return data[n]; }
|
||||
|
||||
UInt32 getScale() const {return scale;}
|
||||
|
||||
protected:
|
||||
Container data;
|
||||
UInt32 scale;
|
||||
|
@ -162,6 +162,71 @@ void ColumnFixedString::getPermutation(bool reverse, size_t limit, int /*nan_dir
|
||||
}
|
||||
}
|
||||
|
||||
void ColumnFixedString::updatePermutation(bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_range) const
|
||||
{
|
||||
if (limit >= size() || limit >= equal_range.back().second)
|
||||
limit = 0;
|
||||
|
||||
size_t k = equal_range.size();
|
||||
if (limit)
|
||||
--k;
|
||||
|
||||
EqualRanges new_ranges;
|
||||
|
||||
for (size_t i = 0; i < k; ++i)
|
||||
{
|
||||
const auto& [first, last] = equal_range[i];
|
||||
if (reverse)
|
||||
std::sort(res.begin() + first, res.begin() + last, less<false>(*this));
|
||||
else
|
||||
std::sort(res.begin() + first, res.begin() + last, less<true>(*this));
|
||||
auto new_first = first;
|
||||
for (auto j = first + 1; j < last; ++j)
|
||||
{
|
||||
if (memcmpSmallAllowOverflow15(chars.data() + j * n, chars.data() + new_first * n, n) != 0)
|
||||
{
|
||||
if (j - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, j);
|
||||
|
||||
new_first = j;
|
||||
}
|
||||
}
|
||||
if (last - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, last);
|
||||
}
|
||||
if (limit)
|
||||
{
|
||||
const auto& [first, last] = equal_range.back();
|
||||
if (reverse)
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<false>(*this));
|
||||
else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<true>(*this));
|
||||
auto new_first = first;
|
||||
for (auto j = first + 1; j < limit; ++j)
|
||||
{
|
||||
if (memcmpSmallAllowOverflow15(chars.data() + j * n, chars.data() + new_first * n, n) != 0)
|
||||
{
|
||||
if (j - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, j);
|
||||
|
||||
new_first = j;
|
||||
}
|
||||
}
|
||||
auto new_last = limit;
|
||||
for (auto j = limit; j < last; ++j)
|
||||
{
|
||||
if (memcmpSmallAllowOverflow15(chars.data() + j * n, chars.data() + new_first * n, n) == 0)
|
||||
{
|
||||
std::swap(res[new_last], res[j]);
|
||||
++new_last;
|
||||
}
|
||||
}
|
||||
if (new_last - new_first > 1)
|
||||
new_ranges.emplace_back(new_first, new_last);
|
||||
}
|
||||
equal_range = std::move(new_ranges);
|
||||
}
|
||||
|
||||
void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
{
|
||||
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);
|
||||
|
@ -118,6 +118,8 @@ public:
|
||||
|
||||
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
|
||||
|
||||
void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const override;
|
||||
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
|
||||
ColumnPtr filter(const IColumn::Filter & filt, ssize_t result_size_hint) const override;
|
||||
|
@ -121,6 +121,11 @@ public:
|
||||
throw Exception("getPermutation is not implemented for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void updatePermutation(bool, size_t, int, Permutation &, EqualRanges &) const override
|
||||
{
|
||||
throw Exception("updatePermutation is not implemented for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void gather(ColumnGathererStream &) override
|
||||
{
|
||||
throw Exception("Method gather is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user