Merge branch 'master' of github.com:yandex/ClickHouse into group_by_in_order_optimization

This commit is contained in:
Dmitry 2020-05-26 21:27:50 +03:00
commit 38c585f867
383 changed files with 6543 additions and 3736 deletions

View File

@ -9,7 +9,7 @@ Changelog category (leave one):
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
- Other
- Non-significant (changelog entry is not required)
- Not for changelog (changelog entry is not required)
Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):

3
.gitmodules vendored
View File

@ -157,3 +157,6 @@
[submodule "contrib/openldap"]
path = contrib/openldap
url = https://github.com/openldap/openldap.git
[submodule "contrib/fmtlib"]
path = contrib/fmtlib
url = https://github.com/fmtlib/fmt.git

View File

@ -79,6 +79,7 @@ target_link_libraries (common
Poco::Util
Poco::Foundation
replxx
fmt
PRIVATE
cctz

View File

@ -2,16 +2,14 @@
/// Macros for convenient usage of Poco logger.
#include <sstream>
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <Poco/Logger.h>
#include <Poco/Message.h>
#include <Poco/Version.h>
#include <Common/CurrentThread.h>
#ifndef QUERY_PREVIEW_LENGTH
#define QUERY_PREVIEW_LENGTH 160
#endif
/// TODO Remove this.
using Poco::Logger;
using Poco::Message;
using DB::LogsLevel;
@ -19,21 +17,20 @@ using DB::CurrentThread;
/// Logs a message to a specified logger with that level.
#define LOG_SIMPLE(logger, message, priority, PRIORITY) do \
#define LOG_IMPL(logger, priority, PRIORITY, ...) do \
{ \
const bool is_clients_log = (CurrentThread::getGroup() != nullptr) && \
(CurrentThread::getGroup()->client_logs_level >= (priority)); \
if ((logger)->is((PRIORITY)) || is_clients_log) \
{ \
std::stringstream oss_internal_rare; \
oss_internal_rare << message; \
std::string formatted_message = fmt::format(__VA_ARGS__); \
if (auto channel = (logger)->getChannel()) \
{ \
std::string file_function; \
file_function += __FILE__; \
file_function += "; "; \
file_function += __PRETTY_FUNCTION__; \
Message poco_message((logger)->name(), oss_internal_rare.str(), \
Message poco_message((logger)->name(), formatted_message, \
(PRIORITY), file_function.c_str(), __LINE__); \
channel->log(poco_message); \
} \
@ -41,10 +38,9 @@ using DB::CurrentThread;
} while (false)
#define LOG_TRACE(logger, message) LOG_SIMPLE(logger, message, LogsLevel::trace, Message::PRIO_TRACE)
#define LOG_DEBUG(logger, message) LOG_SIMPLE(logger, message, LogsLevel::debug, Message::PRIO_DEBUG)
#define LOG_INFO(logger, message) LOG_SIMPLE(logger, message, LogsLevel::information, Message::PRIO_INFORMATION)
#define LOG_WARNING(logger, message) LOG_SIMPLE(logger, message, LogsLevel::warning, Message::PRIO_WARNING)
#define LOG_ERROR(logger, message) LOG_SIMPLE(logger, message, LogsLevel::error, Message::PRIO_ERROR)
#define LOG_FATAL(logger, message) LOG_SIMPLE(logger, message, LogsLevel::error, Message::PRIO_FATAL)
#define LOG_TRACE(logger, ...) LOG_IMPL(logger, LogsLevel::trace, Message::PRIO_TRACE, __VA_ARGS__)
#define LOG_DEBUG(logger, ...) LOG_IMPL(logger, LogsLevel::debug, Message::PRIO_DEBUG, __VA_ARGS__)
#define LOG_INFO(logger, ...) LOG_IMPL(logger, LogsLevel::information, Message::PRIO_INFORMATION, __VA_ARGS__)
#define LOG_WARNING(logger, ...) LOG_IMPL(logger, LogsLevel::warning, Message::PRIO_WARNING, __VA_ARGS__)
#define LOG_ERROR(logger, ...) LOG_IMPL(logger, LogsLevel::error, Message::PRIO_ERROR, __VA_ARGS__)
#define LOG_FATAL(logger, ...) LOG_IMPL(logger, LogsLevel::error, Message::PRIO_FATAL, __VA_ARGS__)

View File

@ -24,6 +24,7 @@ PEERDIR(
contrib/libs/cxxsupp/libcxx-filesystem
contrib/libs/poco/Net
contrib/libs/poco/Util
contrib/libs/fmt
contrib/restricted/boost
contrib/restricted/cityhash-1.0.2
)

View File

@ -180,7 +180,7 @@ public:
// levels and more info, but for completeness we log all signals
// here at trace level.
// Don't use strsignal here, because it's not thread-safe.
LOG_TRACE(log, "Received signal " << sig);
LOG_TRACE(log, "Received signal {}", sig);
if (sig == Signals::StopThread)
{
@ -236,7 +236,7 @@ private:
void onTerminate(const std::string & message, UInt32 thread_num) const
{
LOG_FATAL(log, "(version " << VERSION_STRING << VERSION_OFFICIAL << ") (from thread " << thread_num << ") " << message);
LOG_FATAL(log, "(version {}{}) (from thread {}) {}", VERSION_STRING, VERSION_OFFICIAL, thread_num, message);
}
void onFault(
@ -257,9 +257,9 @@ private:
message << " (no query)";
else
message << " (query_id: " << query_id << ")";
message << " Received signal " << strsignal(sig) << " (" << sig << ")" << ".";
message << " Received signal " << strsignal(sig) << " (" << sig << ").";
LOG_FATAL(log, message.rdbuf());
LOG_FATAL(log, message.str());
}
LOG_FATAL(log, signalToErrorMessage(sig, info, context));
@ -274,7 +274,7 @@ private:
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
bare_stacktrace << ' ' << stack_trace.getFrames()[i];
LOG_FATAL(log, bare_stacktrace.rdbuf());
LOG_FATAL(log, bare_stacktrace.str());
}
/// Write symbolized stack trace line by line for better grep-ability.
@ -302,7 +302,7 @@ static void sanitizerDeathCallback()
message << " (query_id: " << query_id << ")";
message << " Sanitizer trap.";
LOG_FATAL(log, message.rdbuf());
LOG_FATAL(log, message.str());
}
/// Just in case print our own stack trace. In case when llvm-symbolizer does not work.
@ -314,7 +314,7 @@ static void sanitizerDeathCallback()
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
bare_stacktrace << ' ' << stack_trace.getFrames()[i];
LOG_FATAL(log, bare_stacktrace.rdbuf());
LOG_FATAL(log, bare_stacktrace.str());
}
/// Write symbolized stack trace line by line for better grep-ability.
@ -379,7 +379,7 @@ static bool tryCreateDirectories(Poco::Logger * logger, const std::string & path
}
catch (...)
{
LOG_WARNING(logger, __PRETTY_FUNCTION__ << ": when creating " << path << ", " << DB::getCurrentExceptionMessage(true));
LOG_WARNING(logger, "{}: when creating {}, {}", __PRETTY_FUNCTION__, path, DB::getCurrentExceptionMessage(true));
}
return false;
}
@ -498,11 +498,10 @@ void debugIncreaseOOMScore()
}
catch (const Poco::Exception & e)
{
LOG_WARNING(&Logger::root(), "Failed to adjust OOM score: '" +
e.displayText() + "'.");
LOG_WARNING(&Logger::root(), "Failed to adjust OOM score: '{}'.", e.displayText());
return;
}
LOG_INFO(&Logger::root(), "Set OOM score adjustment to " + new_score);
LOG_INFO(&Logger::root(), "Set OOM score adjustment to {}", new_score);
}
#else
void debugIncreaseOOMScore() {}
@ -734,7 +733,7 @@ void BaseDaemon::handleNotification(Poco::TaskFailedNotification *_tfn)
task_failed = true;
Poco::AutoPtr<Poco::TaskFailedNotification> fn(_tfn);
Logger *lg = &(logger());
LOG_ERROR(lg, "Task '" << fn->task()->name() << "' failed. Daemon is shutting down. Reason - " << fn->reason().displayText());
LOG_ERROR(lg, "Task '{}' failed. Daemon is shutting down. Reason - {}", fn->task()->name(), fn->reason().displayText());
ServerApplication::terminate();
}
@ -850,7 +849,7 @@ void BaseDaemon::handleSignal(int signal_id)
void BaseDaemon::onInterruptSignals(int signal_id)
{
is_cancelled = true;
LOG_INFO(&logger(), "Received termination signal (" << strsignal(signal_id) << ")");
LOG_INFO(&logger(), "Received termination signal ({})", strsignal(signal_id));
if (sigint_signals_counter >= 2)
{

View File

@ -52,8 +52,7 @@ private:
}
catch (const Poco::Exception & e)
{
LOG_WARNING(&Poco::Util::Application::instance().logger(),
"Fail to write to Graphite " << host << ":" << port << ". e.what() = " << e.what() << ", e.message() = " << e.message());
LOG_WARNING(&Poco::Util::Application::instance().logger(), "Fail to write to Graphite {}:{}. e.what() = {}, e.message() = {}", host, port, e.what(), e.message());
}
}

View File

@ -4,6 +4,7 @@
#include <ctime>
#include <string>
#include <iomanip>
#include <sstream>
namespace ext

View File

@ -1,57 +1,26 @@
set(_PROTOBUF_PROTOC $<TARGET_FILE:protoc>)
set(_GRPC_CPP_PLUGIN_EXECUTABLE $<TARGET_FILE:grpc_cpp_plugin>)
option (ENABLE_GRPC "Use gRPC" ${ENABLE_LIBRARIES})
function(PROTOBUF_GENERATE_GRPC_CPP SRCS HDRS)
if(NOT ARGN)
message(SEND_ERROR "Error: PROTOBUF_GENERATE_GRPC_CPP() called without any proto files")
return()
endif()
if (ENABLE_GRPC)
option (USE_INTERNAL_GRPC_LIBRARY "Set to FALSE to use system gRPC library instead of bundled" ${NOT_UNBUNDLED})
if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
foreach(FIL ${ARGN})
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
get_filename_component(ABS_PATH ${ABS_FIL} PATH)
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
if(${_contains_already} EQUAL -1)
list(APPEND _protobuf_include_path -I ${ABS_PATH})
endif()
endforeach()
else()
set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
endif()
if (USE_INTERNAL_GRPC_LIBRARY)
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/grpc/include/grpc++/grpc++.h")
message(WARNING "submodule contrib/grpc is missing. To fix try run: \n git submodule update --init --recursive")
set (USE_INTERNAL_GRPC_LIBRARY OFF)
elif (NOT USE_PROTOBUF)
message(WARNING "gRPC requires protobuf which is disabled")
set (USE_INTERNAL_GRPC_LIBRARY OFF)
else()
set (GRPC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc/include")
set (GRPC_LIBRARY "libgrpc++")
set (USE_GRPC ON)
endif()
else()
find_package(grpc)
if (GRPC_INCLUDE_DIR AND GRPC_LIBRARY)
set (USE_GRPC ON)
endif()
endif()
endif()
if(DEFINED PROTOBUF_IMPORT_DIRS)
foreach(DIR ${Protobuf_IMPORT_DIRS})
get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
if(${_contains_already} EQUAL -1)
list(APPEND _protobuf_include_path -I ${ABS_PATH})
endif()
endforeach()
endif()
set(${SRCS})
set(${HDRS})
foreach(FIL ${ARGN})
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
get_filename_component(FIL_WE ${FIL} NAME_WE)
list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.cc")
list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.h")
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.cc"
"${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.h"
COMMAND ${_PROTOBUF_PROTOC}
ARGS --grpc_out=${CMAKE_CURRENT_BINARY_DIR}
--plugin=protoc-gen-grpc=${_GRPC_CPP_PLUGIN_EXECUTABLE}
${_protobuf_include_path} ${ABS_FIL}
DEPENDS ${ABS_FIL}
COMMENT "Running gRPC C++ protocol buffer compiler on ${FIL}"
VERBATIM)
endforeach()
set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
set(${SRCS} ${${SRCS}} PARENT_SCOPE)
set(${HDRS} ${${HDRS}} PARENT_SCOPE)
endfunction()
message(STATUS "Using gRPC=${USE_GRPC}: ${GRPC_INCLUDE_DIR} : ${GRPC_LIBRARY}")

View File

@ -28,68 +28,6 @@ elseif(NOT MISSING_INTERNAL_PROTOBUF_LIBRARY)
set(Protobuf_LITE_LIBRARY libprotobuf-lite)
set(Protobuf_PROTOC_EXECUTABLE "$<TARGET_FILE:protoc>")
if(NOT DEFINED PROTOBUF_GENERATE_CPP_APPEND_PATH)
set(PROTOBUF_GENERATE_CPP_APPEND_PATH TRUE)
endif()
function(PROTOBUF_GENERATE_CPP SRCS HDRS)
if(NOT ARGN)
message(SEND_ERROR "Error: PROTOBUF_GENERATE_CPP() called without any proto files")
return()
endif()
if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
# Create an include path for each file specified
foreach(FIL ${ARGN})
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
get_filename_component(ABS_PATH ${ABS_FIL} PATH)
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
if(${_contains_already} EQUAL -1)
list(APPEND _protobuf_include_path -I ${ABS_PATH})
endif()
endforeach()
else()
set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
endif()
if(DEFINED PROTOBUF_IMPORT_DIRS AND NOT DEFINED Protobuf_IMPORT_DIRS)
set(Protobuf_IMPORT_DIRS "${PROTOBUF_IMPORT_DIRS}")
endif()
if(DEFINED Protobuf_IMPORT_DIRS)
foreach(DIR ${Protobuf_IMPORT_DIRS})
get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
if(${_contains_already} EQUAL -1)
list(APPEND _protobuf_include_path -I ${ABS_PATH})
endif()
endforeach()
endif()
set(${SRCS})
set(${HDRS})
foreach(FIL ${ARGN})
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
get_filename_component(FIL_WE ${FIL} NAME_WE)
list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc")
list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h")
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc"
"${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h"
COMMAND ${Protobuf_PROTOC_EXECUTABLE}
ARGS --cpp_out ${CMAKE_CURRENT_BINARY_DIR} ${_protobuf_include_path} ${ABS_FIL}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE}
COMMENT "Running C++ protocol buffer compiler on ${FIL}"
VERBATIM )
endforeach()
set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
set(${SRCS} ${${SRCS}} PARENT_SCOPE)
set(${HDRS} ${${HDRS}} PARENT_SCOPE)
endfunction()
endif()
if(OS_FREEBSD AND SANITIZE STREQUAL "address")
@ -102,6 +40,7 @@ if(OS_FREEBSD AND SANITIZE STREQUAL "address")
endif()
endif()
include (${ClickHouse_SOURCE_DIR}/cmake/protobuf_generate_cpp.cmake)
endif()
message(STATUS "Using protobuf=${USE_PROTOBUF}: ${Protobuf_INCLUDE_DIR} : ${Protobuf_LIBRARY} : ${Protobuf_PROTOC_EXECUTABLE}")

View File

@ -0,0 +1,166 @@
# This file declares functions adding custom commands for generating C++ files from *.proto files:
# function (protobuf_generate_cpp SRCS HDRS)
# function (protobuf_generate_grpc_cpp SRCS HDRS)
if (NOT USE_PROTOBUF)
message (WARNING "Could not use protobuf_generate_cpp() without the protobuf library")
return()
endif()
if (NOT DEFINED PROTOBUF_PROTOC_EXECUTABLE)
set (PROTOBUF_PROTOC_EXECUTABLE "$<TARGET_FILE:protoc>")
endif()
if (NOT DEFINED GRPC_CPP_PLUGIN_EXECUTABLE)
set (GRPC_CPP_PLUGIN_EXECUTABLE $<TARGET_FILE:grpc_cpp_plugin>)
endif()
if (NOT DEFINED PROTOBUF_GENERATE_CPP_APPEND_PATH)
set (PROTOBUF_GENERATE_CPP_APPEND_PATH TRUE)
endif()
function(protobuf_generate_cpp_impl SRCS HDRS MODES OUTPUT_FILE_EXTS PLUGIN)
if(NOT ARGN)
message(SEND_ERROR "Error: protobuf_generate_cpp() called without any proto files")
return()
endif()
if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
# Create an include path for each file specified
foreach(FIL ${ARGN})
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
get_filename_component(ABS_PATH ${ABS_FIL} PATH)
list(FIND protobuf_include_path ${ABS_PATH} _contains_already)
if(${_contains_already} EQUAL -1)
list(APPEND protobuf_include_path -I ${ABS_PATH})
endif()
endforeach()
else()
set(protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
endif()
if(DEFINED PROTOBUF_IMPORT_DIRS AND NOT DEFINED Protobuf_IMPORT_DIRS)
set(Protobuf_IMPORT_DIRS "${PROTOBUF_IMPORT_DIRS}")
endif()
if(DEFINED Protobuf_IMPORT_DIRS)
foreach(DIR ${Protobuf_IMPORT_DIRS})
get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
list(FIND protobuf_include_path ${ABS_PATH} _contains_already)
if(${_contains_already} EQUAL -1)
list(APPEND protobuf_include_path -I ${ABS_PATH})
endif()
endforeach()
endif()
set (intermediate_dir ${CMAKE_CURRENT_BINARY_DIR}/intermediate)
set (protoc_args)
foreach (mode ${MODES})
list (APPEND protoc_args "--${mode}_out" ${intermediate_dir})
endforeach()
if (PLUGIN)
list (APPEND protoc_args "--plugin=${PLUGIN}")
endif()
set(srcs)
set(hdrs)
set(all_intermediate_outputs)
foreach(input_name ${ARGN})
get_filename_component(abs_name ${input_name} ABSOLUTE)
get_filename_component(name ${input_name} NAME_WE)
set (intermediate_outputs)
foreach (ext ${OUTPUT_FILE_EXTS})
set (filename "${name}${ext}")
set (output "${CMAKE_CURRENT_BINARY_DIR}/${filename}")
set (intermediate_output "${intermediate_dir}/${filename}")
list (APPEND intermediate_outputs "${intermediate_output}")
list (APPEND all_intermediate_outputs "${intermediate_output}")
if (${ext} MATCHES ".*\\.h")
list(APPEND hdrs "${output}")
else()
list(APPEND srcs "${output}")
endif()
add_custom_command(
OUTPUT ${output}
COMMAND ${CMAKE_COMMAND} -DPROTOBUF_GENERATE_CPP_SCRIPT_MODE=1 -DUSE_PROTOBUF=1 -DDIR=${CMAKE_CURRENT_BINARY_DIR} -DFILENAME=${filename} -DCOMPILER_ID=${CMAKE_CXX_COMPILER_ID} -P ${ClickHouse_SOURCE_DIR}/cmake/protobuf_generate_cpp.cmake
DEPENDS ${intermediate_output})
endforeach()
add_custom_command(
OUTPUT ${intermediate_outputs}
COMMAND ${Protobuf_PROTOC_EXECUTABLE}
ARGS ${protobuf_include_path} ${protoc_args} ${abs_name}
DEPENDS ${abs_name} ${Protobuf_PROTOC_EXECUTABLE} ${PLUGIN}
COMMENT "Running C++ protocol buffer compiler on ${name}"
VERBATIM )
endforeach()
set_source_files_properties(${srcs} ${hdrs} ${all_intermediate_outputs} PROPERTIES GENERATED TRUE)
set(${SRCS} ${srcs} PARENT_SCOPE)
set(${HDRS} ${hdrs} PARENT_SCOPE)
endfunction()
if (PROTOBUF_GENERATE_CPP_SCRIPT_MODE)
set (output "${DIR}/${FILENAME}")
set (intermediate_dir ${DIR}/intermediate)
set (intermediate_output "${intermediate_dir}/${FILENAME}")
if (COMPILER_ID STREQUAL "GNU")
set (pragma_push "#pragma GCC diagnostic push\n")
set (pragma_pop "#pragma GCC diagnostic pop\n")
set (pragma_disable_warnings "#pragma GCC diagnostic ignored \"-Wall\"\n"
"#pragma GCC diagnostic ignored \"-Wextra\"\n"
"#pragma GCC diagnostic ignored \"-Warray-bounds\"\n")
elseif (COMPILER_ID MATCHES "Clang")
set (pragma_push "#pragma clang diagnostic push\n")
set (pragma_pop "#pragma clang diagnostic pop\n")
set (pragma_disable_warnings "#pragma clang diagnostic ignored \"-Weverything\"\n")
endif()
if (${FILENAME} MATCHES ".*\\.h")
file(WRITE "${output}"
"#pragma once\n"
${pragma_push}
${pragma_disable_warnings}
"#include \"${intermediate_output}\"\n"
${pragma_pop}
)
else()
file(WRITE "${output}"
${pragma_disable_warnings}
"#include \"${intermediate_output}\"\n"
)
endif()
return()
endif()
function(protobuf_generate_cpp SRCS HDRS)
set (modes cpp)
set (output_file_exts ".pb.cc" ".pb.h")
set (plugin)
protobuf_generate_cpp_impl(srcs hdrs "${modes}" "${output_file_exts}" "${plugin}" ${ARGN})
set(${SRCS} ${srcs} PARENT_SCOPE)
set(${HDRS} ${hdrs} PARENT_SCOPE)
endfunction()
function(protobuf_generate_grpc_cpp SRCS HDRS)
set (modes cpp grpc)
set (output_file_exts ".pb.cc" ".pb.h" ".grpc.pb.cc" ".grpc.pb.h")
set (plugin "protoc-gen-grpc=${GRPC_CPP_PLUGIN_EXECUTABLE}")
protobuf_generate_cpp_impl(srcs hdrs "${modes}" "${output_file_exts}" "${plugin}" ${ARGN})
set(${SRCS} ${srcs} PARENT_SCOPE)
set(${HDRS} ${hdrs} PARENT_SCOPE)
endfunction()

View File

@ -162,4 +162,10 @@ elseif (COMPILER_GCC)
add_cxx_compile_options(-Wunused)
# Warn if vector operation is not implemented via SIMD capabilities of the architecture
add_cxx_compile_options(-Wvector-operation-performance)
# XXX: gcc10 stuck with this option while compiling GatherUtils code
# (anyway there are builds with clang, that will warn)
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 10)
add_cxx_compile_options(-Wno-sequence-point)
endif()
endif ()

View File

@ -21,7 +21,6 @@ add_subdirectory (consistent-hashing-sumbur)
add_subdirectory (consistent-hashing)
add_subdirectory (croaring)
add_subdirectory (FastMemcpy)
add_subdirectory (grpc-cmake)
add_subdirectory (jemalloc-cmake)
add_subdirectory (libcpuid-cmake)
add_subdirectory (murmurhash)
@ -260,20 +259,17 @@ if (USE_INTERNAL_BROTLI_LIBRARY)
endif ()
if (USE_INTERNAL_PROTOBUF_LIBRARY)
if (MAKE_STATIC_LIBRARIES)
set(protobuf_BUILD_SHARED_LIBS OFF CACHE INTERNAL "" FORCE)
else ()
set(protobuf_BUILD_SHARED_LIBS ON CACHE INTERNAL "" FORCE)
endif ()
set(protobuf_WITH_ZLIB 0 CACHE INTERNAL "" FORCE) # actually will use zlib, but skip find
set(protobuf_BUILD_TESTS OFF CACHE INTERNAL "" FORCE)
add_subdirectory(protobuf/cmake)
add_subdirectory(protobuf-cmake)
endif ()
if (USE_INTERNAL_HDFS3_LIBRARY)
add_subdirectory(libhdfs3-cmake)
endif ()
if (USE_INTERNAL_GRPC_LIBRARY)
add_subdirectory(grpc-cmake)
endif ()
if (USE_INTERNAL_AWS_S3_LIBRARY)
set (save_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
set (save_CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
@ -317,3 +313,5 @@ endif()
if (USE_FASTOPS)
add_subdirectory (fastops-cmake)
endif()
add_subdirectory (fmtlib-cmake)

2
contrib/cppkafka vendored

@ -1 +1 @@
Subproject commit 9b184d881c15cc50784b28688c7c99d3d764db24
Subproject commit f555ee36aaa74d17ca0dab3ce472070a610b2966

1
contrib/fmtlib vendored Submodule

@ -0,0 +1 @@
Subproject commit 297c3b2ed551a4989826fc8c4780bf533e964bd9

View File

@ -0,0 +1,20 @@
set (SRCS
../fmtlib/src/format.cc
../fmtlib/src/os.cc
../fmtlib/include/fmt/chrono.h
../fmtlib/include/fmt/color.h
../fmtlib/include/fmt/compile.h
../fmtlib/include/fmt/core.h
../fmtlib/include/fmt/format.h
../fmtlib/include/fmt/format-inl.h
../fmtlib/include/fmt/locale.h
../fmtlib/include/fmt/os.h
../fmtlib/include/fmt/ostream.h
../fmtlib/include/fmt/posix.h
../fmtlib/include/fmt/printf.h
../fmtlib/include/fmt/ranges.h
)
add_library(fmt ${SRCS})
target_include_directories(fmt SYSTEM PUBLIC ../fmtlib/include)

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit c1d176528fd8da9dd4066d16554bcd216d29033f
Subproject commit 8aea4e168e78f3eb9828080740fc8cb73d53bf79

View File

@ -4,6 +4,8 @@ cmake_minimum_required(VERSION 3.5.1)
set(GRPC_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/grpc)
set(GRPC_INCLUDE_DIR ${GRPC_SOURCE_DIR}/include/)
set(GRPC_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/grpc)
if(UNIX)
if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
set(_gRPC_PLATFORM_LINUX ON)
@ -14,72 +16,28 @@ if(UNIX)
endif()
endif()
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_gRPC_C_CXX_FLAGS} -w")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${_gRPC_C_CXX_FLAGS} -w")
set(_gRPC_PROTOBUF_LIBRARY_NAME "libprotobuf")
if(gRPC_BACKWARDS_COMPATIBILITY_MODE)
add_definitions(-DGPR_BACKWARDS_COMPATIBILITY_MODE)
if (_gRPC_PLATFORM_MAC)
# some C++11 constructs not supported before OS X 10.9
set(CMAKE_OSX_DEPLOYMENT_TARGET 10.9)
endif()
if(_gRPC_PLATFORM_MAC)
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} m pthread)
elseif(UNIX)
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} rt m pthread)
endif()
if (_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC)
set(_gRPC_CORE_NOSTDCXX_FLAGS -fno-exceptions -fno-rtti)
else()
set(_gRPC_CORE_NOSTDCXX_FLAGS "")
endif()
# address_sorting.cmake
include(${GRPC_SOURCE_DIR}/cmake/address_sorting.cmake)
set(_gRPC_ADDRESS_SORTING_INCLUDE_DIR "${GRPC_SOURCE_DIR}/third_party/address_sorting/include")
set(_gRPC_ADDRESS_SORTING_LIBRARIES address_sorting)
# cares.cmake
set(CARES_ROOT_DIR ${GRPC_SOURCE_DIR}/third_party/cares/cares)
set(CARES_BINARY_DIR ${GRPC_BINARY_DIR}/third_party/cares/cares)
set(CARES_SHARED OFF CACHE BOOL "disable shared library")
set(CARES_STATIC ON CACHE BOOL "link cares statically")
if(gRPC_BACKWARDS_COMPATIBILITY_MODE)
# See https://github.com/grpc/grpc/issues/17255
set(HAVE_LIBNSL OFF CACHE BOOL "avoid cares dependency on libnsl")
set(CARES_SHARED ${BUILD_SHARED_LIBS} CACHE BOOL "" FORCE)
if(BUILD_SHARED_LIBS)
set(CARES_STATIC OFF CACHE BOOL "" FORCE)
else()
set(CARES_STATIC ON CACHE BOOL "" FORCE)
endif()
set(_gRPC_CARES_LIBRARIES c-ares)
add_subdirectory(${CARES_ROOT_DIR} ${CARES_BINARY_DIR})
if(TARGET c-ares)
set(_gRPC_CARES_LIBRARIES c-ares)
endif()
# protobuf.cmake
set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../protobuf)
set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests")
if(NOT protobuf_WITH_ZLIB)
set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build protobuf with zlib.")
endif()
set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Link static runtime libraries")
set(_gRPC_PROTOBUF_LIBRARIES libprotobuf)
set(_gRPC_PROTOBUF_PROTOC_LIBRARIES libprotoc)
set(_gRPC_PROTOBUF_PROTOC protoc)
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>)
set(_gRPC_PROTOBUF_INCLUDE_DIR "${PROTOBUF_ROOT_DIR}/src")
if(gRPC_INSTALL)
message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_PROTOBUF_PROVIDER is \"module\"")
set(gRPC_INSTALL FALSE)
endif()
# ssl.cmake
set(BORINGSSL_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../ssl)
if(TARGET ssl)
set(_gRPC_SSL_LIBRARIES ssl)
set(_gRPC_SSL_INCLUDE_DIR ${BORINGSSL_ROOT_DIR}/include)
endif()
# upb.cmake
set(UPB_ROOT_DIR ${GRPC_SOURCE_DIR}/third_party/upb)
@ -87,23 +45,20 @@ set(_gRPC_UPB_INCLUDE_DIR "${UPB_ROOT_DIR}")
set(_gRPC_UPB_GRPC_GENERATED_DIR "${GRPC_SOURCE_DIR}/src/core/ext/upb-generated")
set(_gRPC_UPB_LIBRARIES upb)
# protobuf.cmake
set(_gRPC_PROTOBUF_INCLUDE_DIR ${Protobuf_INCLUDE_DIR})
set(_gRPC_PROTOBUF_LIBRARIES ${Protobuf_LIBRARY})
set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ${Protobuf_PROTOC_LIBRARY})
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE})
# ssl.cmake
set(_gRPC_SSL_INCLUDE_DIR ${OPENSSL_INCLUDE_DIR})
set(_gRPC_SSL_LIBRARIES ${OPENSSL_SSL_LIBRARY})
# zlib.cmake
set(ZLIB_ROOT_DIR ${GRPC_SOURCE_DIR}/../zlib-ng)
include_directories("${ZLIB_ROOT_DIR}")
## add_subdirectory(${ZLIB_ROOT_DIR} ${ZLIB_ROOT_DIR})
if(TARGET zlibstatic)
set(_gRPC_ZLIB_LIBRARIES zlibstatic)
set(_gRPC_ZLIB_INCLUDE_DIR "${ZLIB_ROOT_DIR}" "${GRPC_SOURCE_DIR}/third_party/zlib")
endif()
set(_gRPC_ZLIB_INCLUDE_DIR ${ZLIB_INCLUDE_DIR})
set(_gRPC_ZLIB_LIBRARIES ${ZLIB_LIBRARIES})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
if(_gRPC_PLATFORM_MAC)
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} m pthread)
elseif(UNIX)
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} rt m pthread)
endif()
add_library(address_sorting
${GRPC_SOURCE_DIR}/third_party/address_sorting/address_sorting.c
@ -112,7 +67,6 @@ add_library(address_sorting
)
target_include_directories(address_sorting
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${GRPC_SOURCE_DIR}/include>
PRIVATE ${GRPC_SOURCE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
@ -124,11 +78,11 @@ target_include_directories(address_sorting
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(address_sorting
${_gRPC_BASELIB_LIBRARIES}
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
)
add_library(gpr
${GRPC_SOURCE_DIR}/src/core/lib/gpr/alloc.cc
${GRPC_SOURCE_DIR}/src/core/lib/gpr/atm.cc
@ -184,6 +138,7 @@ target_include_directories(gpr
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(gpr
${_gRPC_ALLTARGETS_LIBRARIES}
${_gRPC_PROTOBUF_LIBRARIES}
@ -569,7 +524,6 @@ add_library(grpc
${GRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_plugin_registry.cc
)
target_compile_options(grpc PUBLIC -fpermissive)
target_include_directories(grpc
PUBLIC ${GRPC_INCLUDE_DIR}
@ -583,8 +537,8 @@ target_include_directories(grpc
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(grpc
${_gRPC_BASELIB_LIBRARIES}
${_gRPC_SSL_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_CARES_LIBRARIES}
@ -597,352 +551,6 @@ if (_gRPC_PLATFORM_MAC)
target_link_libraries(grpc "-framework CoreFoundation")
endif()
add_library(grpc_cronet
${GRPC_SOURCE_DIR}/src/core/ext/transport/cronet/plugin_registry/grpc_cronet_plugin_registry.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/init.cc
${GRPC_SOURCE_DIR}/src/core/lib/avl/avl.cc
${GRPC_SOURCE_DIR}/src/core/lib/backoff/backoff.cc
${GRPC_SOURCE_DIR}/src/core/lib/channel/channel_args.cc
${GRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack.cc
${GRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack_builder.cc
${GRPC_SOURCE_DIR}/src/core/lib/channel/channel_trace.cc
${GRPC_SOURCE_DIR}/src/core/lib/channel/channelz.cc
${GRPC_SOURCE_DIR}/src/core/lib/channel/channelz_registry.cc
${GRPC_SOURCE_DIR}/src/core/lib/channel/connected_channel.cc
${GRPC_SOURCE_DIR}/src/core/lib/channel/handshaker.cc
${GRPC_SOURCE_DIR}/src/core/lib/channel/handshaker_registry.cc
${GRPC_SOURCE_DIR}/src/core/lib/channel/status_util.cc
${GRPC_SOURCE_DIR}/src/core/lib/compression/compression.cc
${GRPC_SOURCE_DIR}/src/core/lib/compression/compression_args.cc
${GRPC_SOURCE_DIR}/src/core/lib/compression/compression_internal.cc
${GRPC_SOURCE_DIR}/src/core/lib/compression/message_compress.cc
${GRPC_SOURCE_DIR}/src/core/lib/compression/stream_compression.cc
${GRPC_SOURCE_DIR}/src/core/lib/compression/stream_compression_gzip.cc
${GRPC_SOURCE_DIR}/src/core/lib/compression/stream_compression_identity.cc
${GRPC_SOURCE_DIR}/src/core/lib/debug/stats.cc
${GRPC_SOURCE_DIR}/src/core/lib/debug/stats_data.cc
${GRPC_SOURCE_DIR}/src/core/lib/http/format_request.cc
${GRPC_SOURCE_DIR}/src/core/lib/http/httpcli.cc
${GRPC_SOURCE_DIR}/src/core/lib/http/parser.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/buffer_list.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/call_combiner.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/cfstream_handle.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/combiner.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_cfstream.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_uv.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/error.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/error_cfstream.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_epoll1_linux.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_epollex_linux.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_poll_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/exec_ctx.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/executor.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/executor/mpmcqueue.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/executor/threadpool.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_fallback.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_host_name_max.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_sysconf.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/internal_errqueue.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iocp_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_custom.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_internal.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix_cfstream.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_uv.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/is_epollexclusive_available.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/load_file.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/lockfree_event.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/polling_entity.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_custom.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set_custom.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_uv.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_custom.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/resource_quota.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/sockaddr_utils.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_factory_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_mutator.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_common_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_linux.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_uv.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_cfstream.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_custom.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_custom.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_custom.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_uv.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/time_averaged_stats.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_custom.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_generic.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_heap.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_manager.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_uv.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/udp_server.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix_noop.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_eventfd.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_nospecial.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_pipe.cc
${GRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_posix.cc
${GRPC_SOURCE_DIR}/src/core/lib/json/json.cc
${GRPC_SOURCE_DIR}/src/core/lib/json/json_reader.cc
${GRPC_SOURCE_DIR}/src/core/lib/json/json_string.cc
${GRPC_SOURCE_DIR}/src/core/lib/json/json_writer.cc
${GRPC_SOURCE_DIR}/src/core/lib/slice/b64.cc
${GRPC_SOURCE_DIR}/src/core/lib/slice/percent_encoding.cc
${GRPC_SOURCE_DIR}/src/core/lib/slice/slice.cc
${GRPC_SOURCE_DIR}/src/core/lib/slice/slice_buffer.cc
${GRPC_SOURCE_DIR}/src/core/lib/slice/slice_intern.cc
${GRPC_SOURCE_DIR}/src/core/lib/slice/slice_string_helpers.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/api_trace.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer_reader.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/call.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/call_details.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/call_log_batch.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/channel.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/channel_init.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/channel_ping.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/channel_stack_type.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue_factory.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/event_string.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/lame_client.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/metadata_array.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/server.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/validate_metadata.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/version.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/bdp_estimator.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/byte_stream.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/connectivity_state.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/error_utils.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/metadata.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/metadata_batch.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/pid_controller.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/static_metadata.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/status_conversion.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/status_metadata.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/timeout_encoding.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/transport.cc
${GRPC_SOURCE_DIR}/src/core/lib/transport/transport_op_string.cc
${GRPC_SOURCE_DIR}/src/core/lib/uri/uri_parser.cc
${GRPC_SOURCE_DIR}/src/core/lib/debug/trace.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/cronet/transport/cronet_api_dummy.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/cronet/transport/cronet_transport.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_decoder.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_encoder.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/context_list.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/flow_control.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_data.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_goaway.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_ping.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_settings.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_window_update.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parser.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_table.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/http2_settings.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/huffsyms.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/parsing.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/stream_lists.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/stream_map.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/varint.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/writing.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/alpn/alpn.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/http/client/http_client_filter.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/http/http_filters_plugin.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/http/message_compress/message_compress_filter.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/http/server/http_server_filter.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backend_metric.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backup_poller.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/channel_connectivity.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_channelz.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_factory.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_plugin.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/connector.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/global_subchannel_pool.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/health/health_check_client.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/http_connect_handshaker.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/http_proxy.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy_registry.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/local_subchannel_pool.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/parse_address.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/proxy_mapper.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver_registry.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver_result_parsing.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolving_lb_policy.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_throttle.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/server_address.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/service_config.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel_pool_interface.cc
${GRPC_SOURCE_DIR}/src/core/ext/filters/deadline/deadline_filter.cc
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c
${GRPC_SOURCE_DIR}/third_party/upb/upb/decode.c
${GRPC_SOURCE_DIR}/third_party/upb/upb/encode.c
${GRPC_SOURCE_DIR}/third_party/upb/upb/msg.c
${GRPC_SOURCE_DIR}/third_party/upb/upb/port.c
${GRPC_SOURCE_DIR}/third_party/upb/upb/table.c
${GRPC_SOURCE_DIR}/third_party/upb/upb/upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/data/orca/v1/orca_load_report.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/gogoproto/gogo.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/validate/validate.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/annotations.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/http.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/any.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/duration.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/empty.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/struct.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/timestamp.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/wrappers.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/rpc/status.upb.c
${GRPC_SOURCE_DIR}/src/core/lib/http/httpcli_security_connector.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/context/security_context.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/alts_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/composite/composite_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/credentials_metadata.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/fake/fake_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/google_default/credentials_generic.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/google_default/google_default_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/iam/iam_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/json_token.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/jwt_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/jwt_verifier.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/local/local_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/plugin/plugin_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/ssl/ssl_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/spiffe_credentials.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/alts/alts_security_connector.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/fake/fake_security_connector.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_fallback.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_linux.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/local/local_security_connector.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/security_connector.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/ssl_utils.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/ssl_utils_config.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/security_connector/tls/spiffe_security_connector.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/client_auth_filter.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/secure_endpoint.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/security_handshaker.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/server_auth_filter.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/target_authority_table.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/transport/tsi_error.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/util/json_util.cc
${GRPC_SOURCE_DIR}/src/core/lib/surface/init_secure.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/crypt/aes_gcm.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/crypt/gsec.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_counter.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_crypter.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_frame_protector.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/frame_handler.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_handshaker_client.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_shared_resource.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc
${GRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_tsi_utils.cc
${GRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/transport_security_common_api.cc
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c
${GRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c
${GRPC_SOURCE_DIR}/src/core/tsi/transport_security.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/insecure/channel_create.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/authority.cc
${GRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/chttp2_connector.cc
${GRPC_SOURCE_DIR}/src/core/tsi/fake_transport_security.cc
${GRPC_SOURCE_DIR}/src/core/tsi/local_transport_security.cc
${GRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc
${GRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_cache.cc
${GRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc
${GRPC_SOURCE_DIR}/src/core/tsi/ssl_transport_security.cc
${GRPC_SOURCE_DIR}/src/core/tsi/transport_security_grpc.cc
)
target_include_directories(grpc_cronet
PUBLIC ${GRPC_INCLUDE_DIR}
PRIVATE ${GRPC_SOURCE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_UPB_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(grpc_cronet
${_gRPC_BASELIB_LIBRARIES}
${_gRPC_SSL_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_CARES_LIBRARIES}
${_gRPC_ADDRESS_SORTING_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
${_gRPC_PROTOBUF_LIBRARIES}
gpr
)
if (_gRPC_PLATFORM_MAC)
target_link_libraries(grpc_cronet "-framework CoreFoundation")
endif()
add_library(grpc_unsecure
${GRPC_SOURCE_DIR}/src/core/lib/surface/init.cc
@ -1249,19 +857,18 @@ add_library(grpc_unsecure
${GRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc
)
target_include_directories(grpc_unsecure
PUBLIC ${GRPC_INCLUDE_DIR}
PRIVATE ${GRPC_SOURCE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_UPB_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(grpc_unsecure
${_gRPC_BASELIB_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
@ -1271,10 +878,12 @@ target_link_libraries(grpc_unsecure
${_gRPC_PROTOBUF_LIBRARIES}
gpr
)
if (_gRPC_PLATFORM_MAC)
target_link_libraries(grpc_unsecure "-framework CoreFoundation")
endif()
add_library(grpc++
${GRPC_SOURCE_DIR}/src/cpp/client/insecure_credentials.cc
${GRPC_SOURCE_DIR}/src/cpp/client/secure_credentials.cc
@ -1331,8 +940,6 @@ add_library(grpc++
${GRPC_SOURCE_DIR}/src/cpp/codegen/codegen_init.cc
)
target_compile_options(grpc++ PUBLIC -w)
target_include_directories(grpc++
PUBLIC ${GRPC_INCLUDE_DIR}
PRIVATE ${GRPC_SOURCE_DIR}
@ -1344,10 +951,9 @@ target_include_directories(grpc++
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(grpc++
${_gRPC_BASELIB_LIBRARIES}
${_gRPC_SSL_LIBRARIES}
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
@ -1355,6 +961,7 @@ target_link_libraries(grpc++
gpr
)
add_library(grpc++_unsecure
${GRPC_SOURCE_DIR}/src/cpp/client/insecure_credentials.cc
${GRPC_SOURCE_DIR}/src/cpp/common/insecure_create_auth_context.cc
@ -1404,21 +1011,19 @@ add_library(grpc++_unsecure
${GRPC_SOURCE_DIR}/src/cpp/codegen/codegen_init.cc
)
target_compile_options(grpc++_unsecure PUBLIC -w)
target_include_directories(grpc++_unsecure
PUBLIC ${GRPC_INCLUDE_DIR}
PRIVATE ${GRPC_SOURCE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_UPB_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(grpc++_unsecure
${_gRPC_BASELIB_LIBRARIES}
${_gRPC_PROTOBUF_LIBRARIES}
@ -1427,6 +1032,16 @@ target_link_libraries(grpc++_unsecure
grpc_unsecure
)
if (_gRPC_SSL_INCLUDE_DIR AND _gRPC_SSL_LIBRARIES)
add_library(libgrpc ALIAS grpc)
add_library(libgrpc++ ALIAS grpc++)
else()
add_library(libgrpc ALIAS grpc_unsecure)
add_library(libgrpc++ ALIAS grpc++_unsecure)
endif()
add_library(grpc_plugin_support
${GRPC_SOURCE_DIR}/src/compiler/cpp_generator.cc
)
@ -1436,23 +1051,22 @@ target_include_directories(grpc_plugin_support
PUBLIC ${GRPC_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_UPB_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(grpc_plugin_support
${_gRPC_PROTOBUF_PROTOC_LIBRARIES}
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
)
add_executable(grpc_cpp_plugin
${GRPC_SOURCE_DIR}/src/compiler/cpp_plugin.cc
)
@ -1461,16 +1075,13 @@ target_include_directories(grpc_cpp_plugin
PRIVATE ${GRPC_SOURCE_DIR}
PUBLIC ${GRPC_INCLUDE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_UPB_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_GRPC_GENERATED_DIR}
PRIVATE ${_gRPC_UPB_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(grpc_cpp_plugin
@ -1479,4 +1090,3 @@ target_link_libraries(grpc_cpp_plugin
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_plugin_support
)

View File

@ -9,6 +9,17 @@ if (ENABLE_JEMALLOC)
option (USE_INTERNAL_JEMALLOC "Use internal jemalloc library" ${NOT_UNBUNDLED})
if (USE_INTERNAL_JEMALLOC)
# ThreadPool select job randomly, and there can be some threads that had been
# performed some memory heavy task before and will be inactive for some time,
# but until it will became active again, the memory will not be freed since by
# default each thread has it's own arena, but there should be not more then
# 4*CPU arenas (see opt.nareans description).
#
# By enabling percpu_arena number of arenas limited to number of CPUs and hence
# this problem should go away.
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu" CACHE STRING "Change default configuration string of JEMalloc" )
message (STATUS "jemalloc malloc_conf: ${JEMALLOC_CONFIG_MALLOC_CONF}")
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/jemalloc")
set (SRCS
@ -52,11 +63,19 @@ if (ENABLE_JEMALLOC)
add_library(jemalloc ${SRCS})
target_include_directories(jemalloc PRIVATE ${LIBRARY_DIR}/include)
target_include_directories(jemalloc SYSTEM PUBLIC include)
set(JEMALLOC_INCLUDE)
if (ARCH_AMD64)
target_include_directories(jemalloc SYSTEM PUBLIC include_linux_x86_64)
set(JEMALLOC_INCLUDE_PREFIX include_linux_x86_64)
elseif (ARCH_ARM)
target_include_directories(jemalloc SYSTEM PUBLIC include_linux_aarch64)
set(JEMALLOC_INCLUDE_PREFIX include_linux_aarch64)
endif ()
target_include_directories(jemalloc SYSTEM PUBLIC
${JEMALLOC_INCLUDE_PREFIX})
configure_file(${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal/jemalloc_internal_defs.h.in
${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal/jemalloc_internal_defs.h)
target_include_directories(jemalloc SYSTEM PRIVATE
${CMAKE_CURRENT_BINARY_DIR}/${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal)
target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_NO_PRIVATE_NAMESPACE)

View File

@ -5,3 +5,4 @@ Added #define GNU_SOURCE
Added JEMALLOC_OVERRIDE___POSIX_MEMALIGN because why not.
Removed JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard.
Removed JEMALLOC_PURGE_MADVISE_FREE because it's available only from Linux 4.5.
Added JEMALLOC_CONFIG_MALLOC_CONF substitution

View File

@ -369,7 +369,7 @@
/* #undef JEMALLOC_EXPORT */
/* config.malloc_conf options string. */
#define JEMALLOC_CONFIG_MALLOC_CONF ""
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
#define JEMALLOC_IS_MALLOC 1

View File

@ -5,3 +5,4 @@ Added #define GNU_SOURCE
Added JEMALLOC_OVERRIDE___POSIX_MEMALIGN because why not.
Removed JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard.
Removed JEMALLOC_PURGE_MADVISE_FREE because it's available only from Linux 4.5.
Added JEMALLOC_CONFIG_MALLOC_CONF substitution

View File

@ -360,7 +360,7 @@
/* #undef JEMALLOC_EXPORT */
/* config.malloc_conf options string. */
#define JEMALLOC_CONFIG_MALLOC_CONF ""
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
#define JEMALLOC_IS_MALLOC 1

View File

@ -0,0 +1,13 @@
set(protobuf_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/protobuf)
set(protobuf_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/protobuf)
set(protobuf_WITH_ZLIB 0 CACHE INTERNAL "" FORCE) # actually will use zlib, but skip find
set(protobuf_BUILD_TESTS OFF CACHE INTERNAL "" FORCE)
if (MAKE_STATIC_LIBRARIES)
set(protobuf_BUILD_SHARED_LIBS OFF CACHE INTERNAL "" FORCE)
else ()
set(protobuf_BUILD_SHARED_LIBS ON CACHE INTERNAL "" FORCE)
endif ()
add_subdirectory(${protobuf_SOURCE_DIR}/cmake ${protobuf_BINARY_DIR})

View File

@ -76,6 +76,14 @@ directory.
More complex setup is possible, but inconvenient and requires some scripting.
See `manual-run.sh` for inspiration.
#### Compare two published releases
Use `compare-releases.sh`. It will download and extract static + dbg + test
packages for both releases, and then call the main comparison script
`compare.sh`, starting from `configure` stage.
```
compare-releaseses.sh 19.16.19.85 20.4.2.9
```
#### Statistical considerations
Generating randomization distribution for medians is tricky. Suppose we have N

View File

@ -0,0 +1,82 @@
#!/bin/bash
set -ex
set -o pipefail
trap "exit" INT TERM
trap 'kill $(jobs -pr) ||:' EXIT
left_version=${1}
right_version=${2}
if [ "$left_version" == "" ] || [ "$right_version" == "" ]
then
>&2 echo Usage: $(basename "$0") left_version right_version
exit 1
fi
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
repo_dir=${repo_dir:-$(readlink -f "$script_dir/../../..")}
function download_package() # (version, path)
{
version="$1"
path="$2"
cd "$path"
wget -nv -nd -nc "https://repo.clickhouse.tech/deb/stable/main/clickhouse-common-static-dbg_${version}_amd64.deb" ||:
wget -nv -nd -nc "https://repo.clickhouse.tech/deb/stable/main/clickhouse-common-static_${version}_amd64.deb" ||:
wget -nv -nd -nc "https://repo.clickhouse.tech/deb/stable/main/clickhouse-test_${version}_all.deb" ||:
mkdir tmp ||:
for x in *.deb; do dpkg-deb -x "$x" tmp ; done
mv tmp/usr/bin/clickhouse ./clickhouse
mkdir .debug
mv tmp/usr/lib/debug/usr/bin/clickhouse .debug/clickhouse
mv tmp/usr/share/clickhouse-test/performance .
ln -s clickhouse clickhouse-local
ln -s clickhouse clickhouse-client
ln -s clickhouse clickhouse-server
rm -rf tmp
}
function download
{
rm -r left right db0 ||:
mkdir left right db0 ||:
"$script_dir/download.sh" ||: &
download_package "$left_version" left &
download_package "$right_version" right &
wait
rm -rf {right,left}/tmp
}
function configure
{
# Configs
cp -av "$script_dir/config" right
cp -av "$script_dir/config" left
cp -av "$repo_dir"/programs/server/config* right/config
cp -av "$repo_dir"/programs/server/user* right/config
cp -av "$repo_dir"/programs/server/config* left/config
cp -av "$repo_dir"/programs/server/user* left/config
}
function run
{
left/clickhouse-local --query "select * from system.build_options format PrettySpace" | sed 's/ *$//' | fold -w 80 -s > left-commit.txt
right/clickhouse-local --query "select * from system.build_options format PrettySpace" | sed 's/ *$//' | fold -w 80 -s > right-commit.txt
PATH=right:"$PATH" \
CHPC_TEST_PATH=right/performance \
stage=configure \
"$script_dir/compare.sh" &> >(tee compare.log)
}
download
configure
run
rm output.7z
7z a output.7z ./*.{log,tsv,html,txt,rep,svg} {right,left}/{performance,db/preprocessed_configs}

View File

@ -155,9 +155,6 @@ function run_tests
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
# the grep is to filter out set -x output and keep only time output
{ time "$script_dir/perf.py" --host localhost localhost --port 9001 9002 -- "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue
# The test completed with zero status, so we treat stderr as warnings
mv "$test_name-err.log" "$test_name-warn.log"
done
unset TIMEFORMAT
@ -217,13 +214,8 @@ function get_profiles
clickhouse-client --port 9002 --query "select 1"
}
# Build and analyze randomization distribution for all queries.
function analyze_queries
function build_log_column_definitions
{
rm -v analyze-commands.txt analyze-errors.log all-queries.tsv unstable-queries.tsv ./*-report.tsv raw-queries.tsv ||:
rm -rfv analyze ||:
mkdir analyze ||:
# FIXME This loop builds column definitons from TSVWithNamesAndTypes in an
# absolutely atrocious way. This should be done by the file() function itself.
for x in {right,left}-{addresses,{query,query-thread,trace,metric}-log}.tsv
@ -233,6 +225,16 @@ do
<(sed -n '2{s/\t/\n/g;p;q}' "$x" ) \
| tr '\n' ', ' | sed 's/,$//' > "$x.columns"
done
}
# Build and analyze randomization distribution for all queries.
function analyze_queries
{
rm -v analyze-commands.txt analyze-errors.log all-queries.tsv unstable-queries.tsv ./*-report.tsv raw-queries.tsv ||:
rm -rf analyze ||:
mkdir analyze analyze/tmp ||:
build_log_column_definitions
# Split the raw test output into files suitable for analysis.
IFS=$'\n'
@ -272,12 +274,13 @@ create table query_metrics engine File(TSV, -- do not add header -- will parse w
, query_runs.time
] metrics
from (
select *, 0 version from left_query_log
select query_duration_ms, memory_usage, query_id, 0 version from left_query_log
union all
select *, 1 version from right_query_log
select query_duration_ms, memory_usage, query_id, 1 version from right_query_log
) query_logs
right join query_runs
using (query_id, version)
order by test, query_index
;
"
@ -291,8 +294,8 @@ query_index=1
IFS=$'\n'
for prefix in $(cut -f1,2 "analyze/query-run-metrics.tsv" | sort | uniq)
do
file="analyze/q$query_index.tmp"
grep -F "$prefix " "analyze/query-run-metrics.tsv" > "$file" &
file="analyze/tmp/$(echo "$prefix" | sed 's/\t/_/g').tsv"
grep "^$prefix " "analyze/query-run-metrics.tsv" > "$file" &
printf "%s\0\n" \
"clickhouse-local \
--file \"$file\" \
@ -301,23 +304,23 @@ do
>> \"analyze/query-reports.tsv\"" \
2>> analyze/errors.log \
>> analyze/commands.txt
query_index=$((query_index + 1))
done
wait
unset IFS
parallel --joblog analyze/parallel-log.txt --null < analyze/commands.txt
parallel --joblog analyze/parallel-log.txt --null < analyze/commands.txt 2>> analyze/errors.log
}
# Analyze results
function report
{
rm -r report ||:
mkdir report ||:
mkdir report report/tmp ||:
rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv ||:
build_log_column_definitions
cat analyze/errors.log >> report/errors.log ||:
cat profile-errors.log >> report/errors.log ||:
@ -329,7 +332,8 @@ create view query_display_names as select * from
create table query_metric_stats engine File(TSVWithNamesAndTypes,
'report/query-metric-stats.tsv') as
select *, metric_name
select metric_name, left, right, diff, stat_threshold, test, query_index,
query_display_name
from file ('analyze/query-reports.tsv', TSV, 'left Array(float),
right Array(float), diff Array(float), stat_threshold Array(float),
test text, query_index int') reports
@ -375,7 +379,8 @@ create table queries_old_format engine File(TSVWithNamesAndTypes, 'queries.rep')
-- save all test runs as JSON for the new comparison page
create table all_query_runs_json engine File(JSON, 'report/all-query-runs.json') as
select test, query_display_name query,
select test, query_index, query_display_name query,
left, right, diff, stat_threshold, report_threshold,
versions_runs[1] runs_left, versions_runs[2] runs_right
from (
select
@ -391,7 +396,17 @@ create table all_query_runs_json engine File(JSON, 'report/all-query-runs.json')
)
group by test, query_index
) runs
left join query_display_names using (test, query_index)
left join query_display_names
on runs.test = query_display_names.test
and runs.query_index = query_display_names.query_index
left join file('analyze/report-thresholds.tsv',
TSV, 'test text, report_threshold float') thresholds
on runs.test = thresholds.test
left join query_metric_stats
on runs.test = query_metric_stats.test
and runs.query_index = query_metric_stats.query_index
where
query_metric_stats.metric_name = 'server_time'
;
create table changed_perf_tsv engine File(TSV, 'report/changed-perf.tsv') as
@ -462,9 +477,10 @@ create table all_query_metrics_tsv engine File(TSV, 'report/all-query-metrics.ts
order by test, query_index;
" 2> >(tee -a report/errors.log 1>&2)
# Prepare source data for metrics and flamegraphs for unstable queries.
for version in {right,left}
do
do
rm -rf data
clickhouse-local --query "
create view queries_for_flamegraph as
@ -527,7 +543,13 @@ create view trace_log as select *
from file('$version-trace-log.tsv', TSVWithNamesAndTypes,
'$(cat "$version-trace-log.tsv.columns")');
create view addresses_src as select *
create view addresses_src as select addr,
-- Some functions change name between builds, e.g. '__clone' or 'clone' or
-- even '__GI__clone@@GLIBC_2.32'. This breaks differential flame graphs, so
-- filter them out here.
[name, 'clone.S (filtered by script)', 'pthread_cond_timedwait (filtered by script)']
-- this line is a subscript operator of the above array
[1 + multiSearchFirstIndex(name, ['clone.S', 'pthread_cond_timedwait'])] name
from file('$version-addresses.tsv', TSVWithNamesAndTypes,
'$(cat "$version-addresses.tsv.columns")');
@ -539,7 +561,8 @@ create table unstable_run_traces engine File(TSVWithNamesAndTypes,
select
test, query_index, query_id,
count() value,
joinGet(addresses_join_$version, 'name', arrayJoin(trace)) metric
joinGet(addresses_join_$version, 'name', arrayJoin(trace))
|| '(' || toString(trace_type) || ')' metric
from trace_log
join unstable_query_runs using query_id
group by test, query_index, query_id, metric
@ -547,7 +570,7 @@ create table unstable_run_traces engine File(TSVWithNamesAndTypes,
;
create table metric_devation engine File(TSVWithNamesAndTypes,
'metric-deviation.$version.rep') as
'report/metric-deviation.$version.tsv') as
-- first goes the key used to split the file with grep
select test, query_index, query_display_name,
d, q, metric
@ -562,16 +585,18 @@ create table metric_devation engine File(TSVWithNamesAndTypes,
group by test, query_index, metric
having d > 0.5
) metrics
left join unstable_query_runs using (test, query_index)
left join query_display_names using (test, query_index)
order by test, query_index, d desc
;
create table stacks engine File(TSV, 'stacks.$version.rep') as
create table stacks engine File(TSV, 'report/stacks.$version.tsv') as
select
-- first goes the key used to split the file with grep
test, query_index, any(query_display_name),
test, query_index, trace_type, any(query_display_name),
-- next go the stacks in flamegraph format: 'func1;...;funcN count'
arrayStringConcat(
arrayMap(x -> joinGet(addresses_join_$version, 'name', x),
arrayMap(
addr -> joinGet(addresses_join_$version, 'name', addr),
arrayReverse(trace)
),
';'
@ -579,30 +604,58 @@ create table stacks engine File(TSV, 'stacks.$version.rep') as
count() c
from trace_log
join unstable_query_runs using query_id
group by test, query_index, trace
group by test, query_index, trace_type, trace
order by test, query_index, trace_type, trace
;
" 2> >(tee -a report/errors.log 1>&2) # do not run in parallel because they use the same data dir for StorageJoins which leads to weird errors.
done
wait
# Create per-query flamegraphs and files with metrics
# Create per-query flamegraphs
IFS=$'\n'
for version in {right,left}
do
for query in $(cut -d' ' -f1,2,3 "stacks.$version.rep" | sort | uniq)
for query in $(cut -d' ' -f1-4 "report/stacks.$version.tsv" | sort | uniq)
do
query_file=$(echo "$query" | cut -c-120 | sed 's/[/ ]/_/g')
echo "$query_file" >> report/query-files.txt
# Build separate .svg flamegraph for each query.
# -F is somewhat unsafe because it might match not the beginning of the
# string, but this is unlikely and escaping the query for grep is a pain.
grep -F "$query " "report/stacks.$version.tsv" \
| cut -f 5- \
| sed 's/\t/ /g' \
| tee "report/tmp/$query_file.stacks.$version.tsv" \
| ~/fg/flamegraph.pl --hash > "$query_file.$version.svg" &
done
done
wait
unset IFS
# Create differential flamegraphs.
IFS=$'\n'
for query_file in $(cat report/query-files.txt)
do
~/fg/difffolded.pl "report/tmp/$query_file.stacks.left.tsv" \
"report/tmp/$query_file.stacks.right.tsv" \
| tee "report/tmp/$query_file.stacks.diff.tsv" \
| ~/fg/flamegraph.pl > "$query_file.diff.svg" &
done
unset IFS
wait
# Create per-query files with metrics. Note that the key is different from flamegraphs.
IFS=$'\n'
for version in {right,left}
do
for query in $(cut -d' ' -f1-3 "report/metric-deviation.$version.tsv" | sort | uniq)
do
query_file=$(echo "$query" | cut -c-120 | sed 's/[/ ]/_/g')
# Build separate .svg flamegraph for each query.
grep -F "$query " "stacks.$version.rep" \
| cut -d' ' -f 2- \
| sed 's/\t/ /g' \
| tee "$query_file.stacks.$version.rep" \
| ~/fg/flamegraph.pl > "$query_file.$version.svg" &
# Copy metric stats into separate files as well.
grep -F "$query " "metric-deviation.$version.rep" \
| cut -f2- > "$query_file.$version.metrics.rep" &
# Ditto the above comment about -F.
grep -F "$query " "report/metric-deviation.$version.tsv" \
| cut -f4- > "$query_file.$version.metrics.rep" &
done
done
wait

View File

@ -46,7 +46,13 @@ function download
done
mkdir ~/fg ||:
cd ~/fg && wget -nv -nd -c "https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl" && chmod +x ~/fg/flamegraph.pl &
(
cd ~/fg
wget -nv -nd -c "https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl"
wget -nv -nd -c "https://raw.githubusercontent.com/brendangregg/FlameGraph/master/difffolded.pl"
chmod +x ~/fg/difffolded.pl
chmod +x ~/fg/flamegraph.pl
) &
wait
}

View File

@ -81,8 +81,13 @@ if [ "$REF_PR" == "" ]; then echo Reference PR is not specified ; exit 1 ; fi
fi
) | tee right-commit.txt
# Prepare the list of changed tests for use by compare.sh
git -C ch diff --name-only "$SHA_TO_TEST" "$(git -C ch merge-base "$SHA_TO_TEST"~ master)" -- tests/performance | tee changed-tests.txt
if [ "$PR_TO_TEST" != "0" ]
then
# Prepare the list of tests changed in the PR for use by compare.sh. Compare to
# merge base, because master might be far in the future and have unrelated test
# changes.
git -C ch diff --name-only "$SHA_TO_TEST" "$(git -C ch merge-base "$SHA_TO_TEST" master)" -- tests/performance | tee changed-tests.txt
fi
# Set python output encoding so that we can print queries with Russian letters.
export PYTHONIOENCODING=utf-8

View File

@ -11,6 +11,9 @@ import string
import time
import traceback
def tsv_escape(s):
return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','')
stage_start_seconds = time.perf_counter()
def report_stage_end(stage_name):
@ -112,8 +115,9 @@ for t in tables:
try:
res = c.execute("select 1 from {} limit 1".format(t))
except:
print('skipped\t' + traceback.format_exception_only(*sys.exc_info()[:2])[-1])
traceback.print_exc()
exception_message = traceback.format_exception_only(*sys.exc_info()[:2])[-1]
skipped_message = ' '.join(exception_message.split('\n')[:2])
print(f'skipped\t{tsv_escape(skipped_message)}')
sys.exit(0)
report_stage_end('preconditions')
@ -135,9 +139,6 @@ for c in connections:
report_stage_end('fill')
# Run test queries
def tsv_escape(s):
return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','')
test_query_templates = [q.text for q in root.findall('query')]
test_queries = substitute_parameters(test_query_templates)
@ -162,6 +163,8 @@ for query_index, q in enumerate(test_queries):
prewarm_id = f'{query_prefix}.prewarm0'
res = c.execute(q, query_id = prewarm_id)
print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}')
except KeyboardInterrupt:
raise
except:
# If prewarm fails for some query -- skip it, and try to test the others.
# This might happen if the new test introduces some function that the

View File

@ -182,6 +182,16 @@ if args.report == 'main':
print_tested_commits()
run_error_rows = tsvRows('run-errors.tsv')
error_tests += len(run_error_rows)
printSimpleTable('Run errors', ['Test', 'Error'], run_error_rows)
slow_on_client_rows = tsvRows('report/slow-on-client.tsv')
error_tests += len(slow_on_client_rows)
printSimpleTable('Slow on client',
['Client time, s', 'Server time, s', 'Ratio', 'Query'],
slow_on_client_rows)
def print_changes():
rows = tsvRows('report/changed-perf.tsv')
if not rows:
@ -221,12 +231,6 @@ if args.report == 'main':
print_changes()
slow_on_client_rows = tsvRows('report/slow-on-client.tsv')
error_tests += len(slow_on_client_rows)
printSimpleTable('Slow on client',
['Client time, s', 'Server time, s', 'Ratio', 'Query'],
slow_on_client_rows)
def print_unstable_queries():
global unstable_queries
global very_unstable_queries
@ -265,10 +269,6 @@ if args.report == 'main':
print_unstable_queries()
run_error_rows = tsvRows('run-errors.tsv')
error_tests += len(run_error_rows)
printSimpleTable('Run errors', ['Test', 'Error'], run_error_rows)
skipped_tests_rows = tsvRows('analyze/skipped-tests.tsv')
printSimpleTable('Skipped tests', ['Test', 'Reason'], skipped_tests_rows)

View File

@ -1,6 +1,6 @@
---
toc_priority: 62
toc_title: Overview of ClickHouse Architecture
toc_title: Architecture Overview
---
# Overview of ClickHouse Architecture {#overview-of-clickhouse-architecture}

View File

@ -1,6 +1,6 @@
---
toc_priority: 63
toc_title: Browse Source Code
toc_priority: 71
toc_title: Source Code
---
# Browse ClickHouse Source Code {#browse-clickhouse-source-code}

View File

@ -1,19 +1,17 @@
---
toc_priority: 61
toc_title: The Beginner ClickHouse Developer Instruction
toc_title: For Beginners
---
# The Beginner ClickHouse Developer Instruction
Building of ClickHouse is supported on Linux, FreeBSD and Mac OS X.
# If You Use Windows {#if-you-use-windows}
If you use Windows, you need to create a virtual machine with Ubuntu. To start working with a virtual machine please install VirtualBox. You can download Ubuntu from the website: https://www.ubuntu.com/#download. Please create a virtual machine from the downloaded image (you should reserve at least 4GB of RAM for it). To run a command-line terminal in Ubuntu, please locate a program containing the word “terminal” in its name (gnome-terminal, konsole etc.) or just press Ctrl+Alt+T.
# If You Use a 32-bit System {#if-you-use-a-32-bit-system}
ClickHouse cannot work or build on a 32-bit system. You should acquire access to a 64-bit system and you can continue reading.
# Creating a Repository on GitHub {#creating-a-repository-on-github}
## Creating a Repository on GitHub {#creating-a-repository-on-github}
To start working with ClickHouse repository you will need a GitHub account.
@ -33,7 +31,7 @@ To do that in Ubuntu you would run in the command line terminal:
A brief manual on using Git can be found here: https://services.github.com/on-demand/downloads/github-git-cheat-sheet.pdf.
For a detailed manual on Git see https://git-scm.com/book/en/v2.
# Cloning a Repository to Your Development Machine {#cloning-a-repository-to-your-development-machine}
## Cloning a Repository to Your Development Machine {#cloning-a-repository-to-your-development-machine}
Next, you need to download the source files onto your working machine. This is called “to clone a repository” because it creates a local copy of the repository on your working machine.
@ -77,7 +75,7 @@ You can also add original ClickHouse repos address to your local repository t
After successfully running this command you will be able to pull updates from the main ClickHouse repo by running `git pull upstream master`.
## Working with Submodules {#working-with-submodules}
### Working with Submodules {#working-with-submodules}
Working with submodules in git could be painful. Next commands will help to manage it:
@ -107,7 +105,7 @@ The next commands would help you to reset all submodules to the initial state (!
git submodule foreach git submodule foreach git reset --hard
git submodule foreach git submodule foreach git clean -xfd
# Build System {#build-system}
## Build System {#build-system}
ClickHouse uses CMake and Ninja for building.
@ -127,11 +125,11 @@ For installing CMake and Ninja on Mac OS X first install Homebrew and then insta
Next, check the version of CMake: `cmake --version`. If it is below 3.3, you should install a newer version from the website: https://cmake.org/download/.
# Optional External Libraries {#optional-external-libraries}
## Optional External Libraries {#optional-external-libraries}
ClickHouse uses several external libraries for building. All of them do not need to be installed separately as they are built together with ClickHouse from the sources located in the submodules. You can check the list in `contrib`.
# C++ Compiler {#c-compiler}
## C++ Compiler {#c-compiler}
Compilers GCC starting from version 9 and Clang version 8 or above are supported for building ClickHouse.
@ -145,7 +143,7 @@ Mac OS X build is supported only for Clang. Just run `brew install llvm`
If you decide to use Clang, you can also install `libc++` and `lld`, if you know what it is. Using `ccache` is also recommended.
# The Building Process {#the-building-process}
## The Building Process {#the-building-process}
Now that you are ready to build ClickHouse we recommend you to create a separate directory `build` inside `ClickHouse` that will contain all of the build artefacts:
@ -202,7 +200,7 @@ Upon successful build you get an executable file `ClickHouse/<build_dir>/program
ls -l programs/clickhouse
# Running the Built Executable of ClickHouse {#running-the-built-executable-of-clickhouse}
## Running the Built Executable of ClickHouse {#running-the-built-executable-of-clickhouse}
To run the server under the current user you need to navigate to `ClickHouse/programs/server/` (located outside of `build`) and run:
@ -229,7 +227,7 @@ You can also run your custom-built ClickHouse binary with the config file from t
sudo service clickhouse-server stop
sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml
# IDE (Integrated Development Environment) {#ide-integrated-development-environment}
## IDE (Integrated Development Environment) {#ide-integrated-development-environment}
If you do not know which IDE to use, we recommend that you use CLion. CLion is commercial software, but it offers 30 days free trial period. It is also free of charge for students. CLion can be used both on Linux and on Mac OS X.
@ -239,7 +237,7 @@ As simple code editors, you can use Sublime Text or Visual Studio Code, or Kate
Just in case, it is worth mentioning that CLion creates `build` path on its own, it also on its own selects `debug` for build type, for configuration it uses a version of CMake that is defined in CLion and not the one installed by you, and finally, CLion will use `make` to run build tasks instead of `ninja`. This is normal behaviour, just keep that in mind to avoid confusion.
# Writing Code {#writing-code}
## Writing Code {#writing-code}
The description of ClickHouse architecture can be found here: https://clickhouse.tech/docs/en/development/architecture/
@ -249,7 +247,7 @@ Writing tests: https://clickhouse.tech/docs/en/development/tests/
List of tasks: https://github.com/ClickHouse/ClickHouse/blob/master/testsructions/easy\_tasks\_sorted\_en.md
# Test Data {#test-data}
## Test Data {#test-data}
Developing ClickHouse often requires loading realistic datasets. It is particularly important for performance testing. We have a specially prepared set of anonymized data from Yandex.Metrica. It requires additionally some 3GB of free disk space. Note that this data is not required to accomplish most of the development tasks.
@ -272,7 +270,7 @@ Developing ClickHouse often requires loading realistic datasets. It is particula
clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.hits FORMAT TSV" < hits_v1.tsv
clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.visits FORMAT TSV" < visits_v1.tsv
# Creating Pull Request {#creating-pull-request}
## Creating Pull Request {#creating-pull-request}
Navigate to your fork repository in GitHubs UI. If you have been developing in a branch, you need to select that branch. There will be a “Pull request” button located on the screen. In essence, this means “create a request for accepting my changes into the main repository”.

View File

@ -1,6 +1,8 @@
---
toc_folder_title: Engines
toc_priority: 25
toc_title: hidden
toc_hidden: true
---
{## [Original article](https://clickhouse.tech/docs/en/engines/) ##}

View File

@ -1,6 +1,8 @@
---
toc_folder_title: F.A.Q.
toc_priority: 76
toc_title: hidden
toc_hidden: true
---

View File

@ -404,6 +404,36 @@ Possible values:
Default value: 0.
## any_join_distinct_right_table_keys {#any_join_distinct_right_table_keys}
Enables legacy ClickHouse server behavior in `ANY INNER|LEFT JOIN` operations.
!!! note "Warning"
Use this setting only for the purpose of backward compatibility if your use cases depend on legacy `JOIN` behavior.
When the legacy behavior enabled:
- Results of `t1 ANY LEFT JOIN t2` and `t2 ANY RIGHT JOIN t1` operations are not equal because ClickHouse uses the logic with many-to-one left-to-right table keys mapping.
- Results of `ANY INNER JOIN` operations contain all rows from the left table like the `SEMI LEFT JOIN` operations do.
When the legacy behavior disabled:
- Results of `t1 ANY LEFT JOIN t2` and `t2 ANY RIGHT JOIN t1` operations are equal because ClickHouse uses the logic which provides one-to-many keys mapping in `ANY RIGHT JOIN` operations.
- Results of `ANY INNER JOIN` operations contain one row per key from both left and right tables.
Possible values:
- 0 — Legacy behavior is disabled.
- 1 — Legacy behavior is enabled.
Default value: 0.
See also:
- [JOIN strictness](../../sql-reference/statements/select/join.md#select-join-strictness)
## max\_block\_size {#setting-max_block_size}
In ClickHouse, data is processed by blocks (sets of column parts). The internal processing cycles for a single block are efficient enough, but there are noticeable expenditures on each block. The `max_block_size` setting is a recommendation for what size of the block (in a count of rows) to load from tables. The block size shouldnt be too small, so that the expenditures on each block are still noticeable, but not too large so that the query with LIMIT that is completed after the first block is processed quickly. The goal is to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality.
@ -1265,4 +1295,63 @@ Possible values:
Default value: 16.
## low_cardinality_max_dictionary_size {#low_cardinality_max_dictionary_size}
Sets a maximum size in rows of a shared global dictionary for the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) data type that can be written to a storage file system. This setting prevents issues with RAM in case of unlimited dictionary growth. All the data that can't be encoded due to maximum dictionary size limitation ClickHouse writes in an ordinary method.
Possible values:
- Any positive integer.
Default value: 8192.
## low_cardinality_use_single_dictionary_for_part {#low_cardinality_use_single_dictionary_for_part}
Turns on or turns off using of single dictionary for the data part.
By default, ClickHouse server monitors the size of dictionaries and if a dictionary overflows then the server starts to write the next one. To prohibit creating several dictionaries set `low_cardinality_use_single_dictionary_for_part = 1`.
Possible values:
- 1 — Creating several dictionaries for the data part is prohibited.
- 0 — Creating several dictionaries for the data part is not prohibited.
Default value: 0.
## low_cardinality_allow_in_native_format {#low_cardinality_allow_in_native_format}
Allows or restricts using the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) data type with the [Native](../../interfaces/formats.md#native) format.
If usage of `LowCardinality` is restricted, ClickHouse server converts `LowCardinality`-columns to ordinary ones for `SELECT` queries, and convert ordinary columns to `LowCardinality`-columns for `INSERT` queries.
This setting is required mainly for third-party clients which don't support `LowCardinality` data type.
Possible values:
- 1 — Usage of `LowCardinality` is not restricted.
- 0 — Usage of `LowCardinality` is restricted.
Default value: 1.
## allow_suspicious_low_cardinality_types {#allow_suspicious_low_cardinality_types}
Allows or restricts using [LowCardinality](../../sql-reference/data-types/lowcardinality.md) with data types with fixed size of 8 bytes or less: numeric data types and `FixedString(8_bytes_or_less)`.
For small fixed values using of `LowCardinality` is usually inefficient, because ClickHouse stores a numeric index for each row. As a result:
- Disk space usage can rise.
- RAM consumption can be higher, depending on a dictionary size.
- Some functions can work slower due to extra coding/encoding operations.
Merge times in [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md)-engine tables can grow due to all the reasons described above.
Possible values:
- 1 — Usage of `LowCardinality` is not restricted.
- 0 — Usage of `LowCardinality` is restricted.
Default value: 0.
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->

View File

@ -5,11 +5,41 @@ toc_title: System Tables
# System Tables {#system-tables}
System tables are used for implementing part of the systems functionality, and for providing access to information about how the system is working.
You cant delete a system table (but you can perform DETACH).
System tables dont have files with data on the disk or files with metadata. The server creates all the system tables when it starts.
System tables are read-only.
They are located in the system database.
## Introduction
System tables provide information about:
- Server states, processes, and environment.
- Server's internal processes.
System tables:
- Located in the `system` database.
- Available only for reading data.
- Can't be dropped or altered, but can be detached.
The `metric_log`, `query_log`, `query_thread_log`, `trace_log` system tables store data in a storage filesystem. Other system tables store their data in RAM. ClickHouse server creates such system tables at the start.
### Sources of System Metrics
For collecting system metrics ClickHouse server uses:
- `CAP_NET_ADMIN` capability.
- [procfs](https://en.wikipedia.org/wiki/Procfs) (only in Linux).
**procfs**
If ClickHouse server doesn't have `CAP_NET_ADMIN` capability, it tries to fall back to `ProcfsMetricsProvider`. `ProcfsMetricsProvider` allows collecting per-query system metrics (for CPU and I/O).
If procfs is supported and enabled on the system, ClickHouse server collects these metrics:
- `OSCPUVirtualTimeMicroseconds`
- `OSCPUWaitMicroseconds`
- `OSIOWaitMicroseconds`
- `OSReadChars`
- `OSWriteChars`
- `OSReadBytes`
- `OSWriteBytes`
## system.asynchronous\_metrics {#system_tables-asynchronous_metrics}

View File

@ -116,7 +116,7 @@ Check:
Check:
- The [tcp\_port\_secure](server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) setting.
- Settings for [SSL sertificates](server-configuration-parameters/settings.md#server_configuration_parameters-openssl).
- Settings for [SSL certificates](server-configuration-parameters/settings.md#server_configuration_parameters-openssl).
Use proper parameters while connecting. For example, use the `port_secure` parameter with `clickhouse_client`.

View File

@ -1,5 +1,5 @@
---
toc_priority: 52
toc_priority: 53
toc_title: AggregateFunction
---

View File

@ -1,5 +1,5 @@
---
toc_priority: 51
toc_priority: 52
toc_title: Array(T)
---

View File

@ -0,0 +1,59 @@
---
toc_priority: 51
toc_title: LowCardinality
---
# LowCardinality Data Type {#lowcardinality-data-type}
Changes the internal representation of other data types to be dictionary-encoded.
## Syntax {#lowcardinality-syntax}
```sql
LowCardinality(data_type)
```
**Parameters**
- `data_type` — [String](string.md), [FixedString](fixedstring.md), [Date](date.md), [DateTime](datetime.md), and numbers excepting [Decimal](decimal.md). `LowCardinality` is not efficient for some data types, see the [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types) setting description.
## Description {#lowcardinality-dscr}
`LowCardinality` is a superstructure that changes a data storage method and rules of data processing. ClickHouse applies [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) to `LowCardinality`-columns. Operating with dictionary encoded data significantly increases performance of [SELECT](../statements/select/index.md) queries for many applications.
The efficiency of using `LowCarditality` data type depends on data diversity. If a dictionary contains less than 10,000 distinct values, then ClickHouse mostly shows higher efficiency of data reading and storing. If a dictionary contains more than 100,000 distinct values, then ClickHouse can perform worse in comparison with using ordinary data types.
Consider using `LowCardinality` instead of [Enum](enum.md) when working with strings. `LowCardinality` provides more flexibility in use and often reveals the same or higher efficiency.
## Example
Create a table with a `LowCardinality`-column:
```sql
CREATE TABLE lc_t
(
`id` UInt16,
`strings` LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY id
```
## Related Settings and Functions
Settings:
- [low_cardinality_max_dictionary_size](../../operations/settings/settings.md#low_cardinality_max_dictionary_size)
- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part)
- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format)
- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types)
Functions:
- [toLowCardinality](../functions/type-conversion-functions.md#tolowcardinality)
## See Also
- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality).
- [Reducing Clickhouse Storage Cost with the Low Cardinality Type Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).

View File

@ -1,5 +1,5 @@
---
toc_priority: 54
toc_priority: 55
toc_title: Nullable
---

View File

@ -1,5 +1,5 @@
---
toc_priority: 53
toc_priority: 54
toc_title: Tuple(T1, T2, ...)
---

View File

@ -11,7 +11,7 @@ All the functions accept zero arguments or one argument.
If an argument is passed, it can be any type, and its value is not used for anything.
The only purpose of this argument is to prevent common subexpression elimination, so that two different instances of the same function return different columns with different random numbers.
## rand {#rand}
## rand, rand32 {#rand}
Returns a pseudo-random UInt32 number, evenly distributed among all UInt32-type numbers.
Uses a linear congruential generator.

View File

@ -516,7 +516,7 @@ Result:
**See Also**
- \[ISO 8601 announcement by @xkcd\](https://xkcd.com/1179/)
- [ISO 8601 announcement by @xkcd](https://xkcd.com/1179/)
- [RFC 1123](https://tools.ietf.org/html/rfc1123)
- [toDate](#todate)
- [toDateTime](#todatetime)
@ -529,4 +529,129 @@ Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it r
Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed.
## toLowCardinality {#tolowcardinality}
Converts input parameter to the [LowCardianlity](../data-types/lowcardinality.md) version of same data type.
To convert data from the `LowCardinality` data type use the [CAST](#type_conversion_function-cast) function. For example, `CAST(x as String)`.
**Syntax**
```sql
toLowCardinality(expr)
```
**Parameters**
- `expr` — [Expression](../syntax.md#syntax-expressions) resulting in one of the [supported data types](../data-types/index.md#data_types).
**Returned values**
- Result of `expr`.
Type: `LowCardinality(expr_result_type)`
**Example**
Query:
```sql
SELECT toLowCardinality('1')
```
Result:
```text
┌─toLowCardinality('1')─┐
│ 1 │
└───────────────────────┘
```
## toUnixTimestamp64Milli
## toUnixTimestamp64Micro
## toUnixTimestamp64Nano
Converts a `DateTime64` to a `Int64` value with fixed sub-second precision. Input value is scaled up or down appropriately depending on it precision. Please note that output value is a timestamp in UTC, not in timezone of `DateTime64`.
**Syntax**
``` sql
toUnixTimestamp64Milli(value)
```
**Parameters**
- `value` — DateTime64 value with any precision.
**Returned value**
- `value` converted to the `Int64` data type.
**Examples**
Query:
``` sql
WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64
SELECT toUnixTimestamp64Milli(dt64)
```
Result:
``` text
┌─toUnixTimestamp64Milli(dt64)─┐
│ 1568650812345 │
└──────────────────────────────┘
```
``` sql
WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64
SELECT toUnixTimestamp64Nano(dt64)
```
Result:
``` text
┌─toUnixTimestamp64Nano(dt64)─┐
│ 1568650812345678000 │
└─────────────────────────────┘
```
## fromUnixTimestamp64Milli
## fromUnixTimestamp64Micro
## fromUnixTimestamp64Nano
Converts an `Int64` to a `DateTime64` value with fixed sub-second precision and optional timezone. Input value is scaled up or down appropriately depending on it's precision. Please note that input value is treated as UTC timestamp, not timestamp at given (or implicit) timezone.
**Syntax**
``` sql
fromUnixTimestamp64Milli(value [, ti])
```
**Parameters**
- `value``Int64` value with any precision.
- `timezone``String` (optional) timezone name of the result.
**Returned value**
- `value` converted to the `DateTime64` data type.
**Examples**
``` sql
WITH CAST(1234567891011, 'Int64') AS i64
SELECT fromUnixTimestamp64Milli(i64, 'UTC')
```
``` text
┌─fromUnixTimestamp64Milli(i64, 'UTC')─┐
│ 2009-02-13 23:31:31.011 │
└──────────────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) <!--hide-->

View File

@ -44,6 +44,8 @@ Modifies how matching by "join keys" is performed
!!! note "Note"
The default strictness value can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
### ASOF JOIN Usage

View File

@ -291,30 +291,30 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE minmax GRANULARITY
INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARITY 4
```
#### Поддержка для функций {#podderzhka-dlia-funktsii}
#### Поддержка для функций {#functions-support}
Условия в секции `WHERE` содержат вызовы функций, оперирующих со столбцами. Если столбец - часть индекса, ClickHouse пытается использовать индекс при выполнении функции. Для разных видов индексов, ClickHouse поддерживает различные наборы функций, которые могут использоваться индексами.
Индекс `set` используется со всеми функциями. Наборы функций для остальных индексов представлены в таблице ниже.
| Function (operator) / Index | primary key | minmax | ngrambf\_v1 | tokenbf\_v1 | bloom\_filter |
|----------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------|
| [equals (=, ==)](../../../engines/table-engines/mergetree-family/mergetree.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notEquals(!=, \<\>)](../../../engines/table-engines/mergetree-family/mergetree.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](../../../engines/table-engines/mergetree-family/mergetree.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ |
| [notLike](../../../engines/table-engines/mergetree-family/mergetree.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ |
| [startsWith](../../../engines/table-engines/mergetree-family/mergetree.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ |
| [endsWith](../../../engines/table-engines/mergetree-family/mergetree.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ |
| [multiSearchAny](../../../engines/table-engines/mergetree-family/mergetree.md#function-multisearchany) | ✗ | ✗ | ✔ | ✔ | ✗ |
| [in](../../../engines/table-engines/mergetree-family/mergetree.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](../../../engines/table-engines/mergetree-family/mergetree.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [less (\<)](../../../engines/table-engines/mergetree-family/mergetree.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [greater (\>)](../../../engines/table-engines/mergetree-family/mergetree.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [lessOrEquals (\<=)](../../../engines/table-engines/mergetree-family/mergetree.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [greaterOrEquals (\>=)](../../../engines/table-engines/mergetree-family/mergetree.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [empty](../../../engines/table-engines/mergetree-family/mergetree.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [notEmpty](../../../engines/table-engines/mergetree-family/mergetree.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
| Функция (оператор) / Индекс | primary key | minmax | ngrambf\_v1 | tokenbf\_v1 | bloom\_filter |
|------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------|
| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notEquals(!=, \<\>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ |
| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✗ | ✗ |
| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ |
| [endsWith](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ |
| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ |
| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
Функции с постоянным агрументом, который меньше, чем размер ngram не могут использовать индекс `ngrambf_v1` для оптимизации запроса.

View File

@ -30,7 +30,7 @@ PyYAML==5.3.1
repackage==0.7.3
requests==2.23.0
singledispatch==3.4.0.3
six==1.14.0
six==1.15.0
soupsieve==2.0.1
termcolor==1.1.0
tornado==5.1.1

View File

@ -1,101 +1,146 @@
# 访问权限 {#access-rights}
---
toc_priority: 48
toc_title: "访问权限和账户管理"
---
用户和访问权限在用户配置中设置。 这通常是 `users.xml`.
# 访问权限和账户管理 {#access-rights}
ClickHouse支持基于[RBAC](https://en.wikipedia.org/wiki/Role-based_access_control)的访问控制管理。
用户被记录在 `users` 科。 这里是一个片段 `users.xml` 文件:
ClickHouse权限实体包括
- [用户账户](#user-account-management)
- [角色](#role-management)
- [行策略](#row-policy-management)
- [设置描述](#settings-profiles-management)
- [配额](#quotas-management)
``` xml
<!-- Users and ACL. -->
<users>
<!-- If the user name is not specified, the 'default' user is used. -->
<default>
<!-- Password could be specified in plaintext or in SHA256 (in hex format).
你可以通过如下方式配置权限实体:
If you want to specify password in plaintext (not recommended), place it in 'password' element.
Example: <password>qwerty</password>.
Password could be empty.
- 通过SQL驱动的工作流方式.
If you want to specify SHA256, place it in 'password_sha256_hex' element.
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
你需要[开启](#enabling-access-control)这个功能.
How to generate decent password:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
In first line will be password and in second - corresponding SHA256.
-->
<password></password>
- 服务端[配置文件](configuration-files.md) `users.xml``config.xml`.
<!-- A list of networks that access is allowed from.
Each list item has one of the following forms:
<ip> The IP address or subnet mask. For example: 198.51.100.0/24 or 2001:DB8::/32.
<host> Host name. For example: example01. A DNS query is made for verification, and all addresses obtained are compared with the address of the customer.
<host_regexp> Regular expression for host names. For example, ^example\d\d-\d\d-\d\.yandex\.ru$
To check it, a DNS PTR request is made for the client's address and a regular expression is applied to the result.
Then another DNS query is made for the result of the PTR query, and all received address are compared to the client address.
We strongly recommend that the regex ends with \.yandex\.ru$.
我们建议你使用SQL工作流的方式。当然配置的方式也可以同时起作用, 所以如果你正在用服务端配置的方式来管理权限和账户你可以平滑的切换到SQL驱动的工作流方式。
If you are installing ClickHouse yourself, specify here:
<networks>
<ip>::/0</ip>
</networks>
-->
<networks incl="networks" />
!!! note "警告"
你无法同时使用两个配置的方式来管理同一个权限实体。
<!-- Settings profile for the user. -->
<profile>default</profile>
<!-- Quota for the user. -->
<quota>default</quota>
</default>
## 用法 {#access-control-usage}
<!-- For requests from the Yandex.Metrica user interface via the API for data on specific counters. -->
<web>
<password></password>
<networks incl="networks" />
<profile>web</profile>
<quota>default</quota>
<allow_databases>
<database>test</database>
</allow_databases>
</web>
```
默认ClickHouse提供了一个 `default` 账号这个账号有所有的权限但是不能使用SQL驱动方式的访问权限和账户管理。`default`主要用在用户名还未设置的情况,比如从客户端登录或者执行分布式查询。在分布式查询中如果服务端或者集群没有指定[用户名密码](../engines/table-engines/special/distributed.md)那默认的账户就会被使用。
您可以看到两个用户的声明: `default`和`web`. 我们添加了 `web` 用户分开。
如果你刚开始使用ClickHouse考虑如下场景
`default` 在用户名未通过的情况下选择用户。 该 `default` 如果服务器或群集的配置没有指定分布式查询处理则user也用于分布式查询处理 `user``password` (见上的部分 [分布](../engines/table-engines/special/distributed.md) 发动机)。
1. 为 `default` 用户[开启SQL驱动方式的访问权限和账户管理](#enabling-access-control) .
2. 使用 `default` 用户登录并且创建所需要的所有用户。 不要忘记创建管理员账户 (`GRANT ALL ON *.* WITH GRANT OPTION TO admin_user_account`)。
3. [限制](settings/permissions-for-queries.md#permissions_for_queries) `default` 用户的权限并且禁用SQL驱动方式的访问权限和账户管理。
The user that is used for exchanging information between servers combined in a cluster must not have substantial restrictions or quotas otherwise, distributed queries will fail.
### 当前解决方案的特性 {#access-control-properties}
密码以明文不推荐或SHA-256形式指定。 哈希没有腌制。 在这方面,您不应将这些密码视为提供了针对潜在恶意攻击的安全性。 相反,他们是必要的保护员工。
- 你甚至可以在数据库和表不存在的时候授予权限。
- 如果表被删除,和这张表关联的特权不会被删除。这意味着如果你创建一张同名的表,所有的特权仍旧有效。如果想删除这张表关联的特权,你可以执行 `REVOKE ALL PRIVILEGES ON db.table FROM ALL` 查询。
- 特权没有生命周期。
指定允许访问的网络列表。 在此示例中,将从单独的文件加载两个用户的网络列表 (`/etc/metrika.xml`)包含 `networks` 替代。 这里是它的一个片段:
## 用户账户 {#user-account-management}
``` xml
<yandex>
...
<networks>
<ip>::/64</ip>
<ip>203.0.113.0/24</ip>
<ip>2001:DB8::/32</ip>
...
</networks>
</yandex>
```
用户账户是权限实体用来授权操作ClickHouse用户账户包含
您可以直接在以下内容中定义此网络列表 `users.xml`,或在文件中 `users.d` directory (for more information, see the section «[配置文件](configuration-files.md#configuration_files)»).
- 标识符信息。
- [特权](../sql-reference/statements/grant.md#grant-privileges)用来定义用户可以执行的查询的范围。
- 可以连接到ClickHouse的主机。
- 指定或者默认的角色。
- 用户登录的时候默认的限制设置。
- 指定的设置描述。
该配置包括解释如何从任何地方打开访问的注释。
特权可以通过[GRANT](../sql-reference/statements/grant.md)查询授权给用户或者通过[角色](#role-management)授予。如果想撤销特权,可以使用[REVOKE](../sql-reference/statements/revoke.md)查询。查询用户所有的特权,使用[SHOW GRANTS](../sql-reference/statements/show.md#show-grants-statement)语句。
对于在生产中使用,仅指定 `ip` 元素IP地址及其掩码因为使用 `host``hoost_regexp` 可能会导致额外的延迟。
查询管理:
Next the user settings profile is specified (see the section «[设置配置文件](settings/settings-profiles.md)»). You can specify the default profile, `default'`. 配置文件可以有任何名称。 您可以为不同的用户指定相同的配置文件。 您可以在设置配置文件中编写的最重要的事情是 `readonly=1`,这确保只读访问。
Then specify the quota to be used (see the section «[配额](quotas.md#quotas)»). You can specify the default quota: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users in this case, resource usage is calculated for each user individually.
- [CREATE USER](../sql-reference/statements/create.md#create-user-statement)
- [ALTER USER](../sql-reference/statements/alter.md#alter-user-statement)
- [DROP USER](../sql-reference/statements/misc.md#drop-user-statement)
- [SHOW CREATE USER](../sql-reference/statements/show.md#show-create-user-statement)
在可选 `<allow_databases>` 您还可以指定用户可以访问的数据库列表。 默认情况下,所有数据库都可供用户使用。 您可以指定 `default` 数据库。 在这种情况下,默认情况下,用户将接收对数据库的访问权限。
### 设置应用规则 {#access-control-settings-applying}
访问 `system` 始终允许数据库(因为此数据库用于处理查询)。
对于一个用户账户来说,设置可以通过多种方式配置:通过角色扮演和设置描述。对于一个登陆的账号来说,如果一个设置对应了多个不同的权限实体,这些设置的应用规则如下(优先权从高到底):
用户可以通过以下方式获取其中所有数据库和表的列表 `SHOW` 查询或系统表,即使不允许访问单个数据库。
1. 用户账户设置。
2. 用户账号默认的角色设置。如果这个设置配置了多个角色,那设置的应用是没有规定的顺序。
3. 从设置描述分批给用户或者角色的设置。如果这个设置配置了多个角色,那设置的应用是没有规定的顺序。
4. 对所有服务器有效的默认或者[default profile](server-configuration-parameters/settings.md#default-profile)的设置。
数据库访问是不相关的 [只读](settings/permissions-for-queries.md#settings_readonly) 设置。 您不能授予对一个数据库的完全访问权限,并 `readonly` 进入另一个。
[原始文章](https://clickhouse.tech/docs/en/operations/access_rights/) <!--hide-->
## 角色 {#role-management}
角色是权限实体的集合,可以被授予用户账号。
角色包括:
- [特权](../sql-reference/statements/grant.md#grant-privileges)
- 设置和限制
- 分配的角色列表
查询管理:
- [CREATE ROLE](../sql-reference/statements/create.md#create-role-statement)
- [ALTER ROLE](../sql-reference/statements/alter.md#alter-role-statement)
- [DROP ROLE](../sql-reference/statements/misc.md#drop-role-statement)
- [SET ROLE](../sql-reference/statements/misc.md#set-role-statement)
- [SET DEFAULT ROLE](../sql-reference/statements/misc.md#set-default-role-statement)
- [SHOW CREATE ROLE](../sql-reference/statements/show.md#show-create-role-statement)
使用[GRANT](../sql-reference/statements/grant.md) 查询可以把特权授予给角色。用[REVOKE](../sql-reference/statements/revoke.md)来撤回特权。
## 行策略 {#row-policy-management}
行策略是一个过滤器,用来定义哪些行数据可以被账户或者角色访问。对一个特定的表来说,行策略包括过滤器和使用这个策略的账户和角色。
查询管理:
- [CREATE ROW POLICY](../sql-reference/statements/create.md#create-row-policy-statement)
- [ALTER ROW POLICY](../sql-reference/statements/alter.md#alter-row-policy-statement)
- [DROP ROW POLICY](../sql-reference/statements/misc.md#drop-row-policy-statement)
- [SHOW CREATE ROW POLICY](../sql-reference/statements/show.md#show-create-row-policy-statement)
## 设置描述 {#settings-profiles-management}
设置描述是[设置](settings/index.md)的汇总。设置汇总包括设置和限制,当然也包括这些描述的对象:角色和账户。
查询管理:
- [CREATE SETTINGS PROFILE](../sql-reference/statements/create.md#create-settings-profile-statement)
- [ALTER SETTINGS PROFILE](../sql-reference/statements/alter.md#alter-settings-profile-statement)
- [DROP SETTINGS PROFILE](../sql-reference/statements/misc.md#drop-settings-profile-statement)
- [SHOW CREATE SETTINGS PROFILE](../sql-reference/statements/show.md#show-create-settings-profile-statement)
## 配额 {#quotas-management}
配额用来限制资源的使用情况。参考[配额](quotas.md).
配额包括特定时间的限制条件和使用这个配额的账户和角色。
Management queries:
- [CREATE QUOTA](../sql-reference/statements/create.md#create-quota-statement)
- [ALTER QUOTA](../sql-reference/statements/alter.md#alter-quota-statement)
- [DROP QUOTA](../sql-reference/statements/misc.md#drop-quota-statement)
- [SHOW CREATE QUOTA](../sql-reference/statements/show.md#show-create-quota-statement)
## 开启SQL驱动方式的访问权限和账户管理 {#enabling-access-control}
- 为配置的存储设置一个目录.
ClickHouse把访问实体的相关配置存储在[访问控制目录](server-configuration-parameters/settings.md#access_control_path),而这个目录可以通过服务端进行配置.
- 为至少一个账户开启SQL驱动方式的访问权限和账户管理.
默认情况SQL驱动方式的访问权限和账户管理对所有用户都是关闭的。你需要在 `users.xml` 中配置至少一个用户,并且把[权限管理](settings/settings-users.md#access_management-user-setting)的值设置为1。
[Original article](https://clickhouse.tech/docs/en/operations/access_rights/) <!--hide-->

View File

@ -1,3 +1,13 @@
---
toc_priority: 43
toc_title: "操作"
---
# 操作 {#operations}
Clickhouse运维手册主要包含下面几部分
- 安装要求
[原始文章](https://clickhouse.tech/docs/en/operations/) <!--hide-->

View File

@ -1,3 +1,8 @@
---
toc_priority: 45
toc_title: "监控"
---
# 监控 {#jian-kong}
可以监控到:
@ -13,7 +18,7 @@ ClickHouse 本身不会去监控硬件资源的状态。
- 处理器上的负载和温度。
可以使用 [dmesg](https://en.wikipedia.org/wiki/Dmesg), [turbostat](https://www.linux.org/docs/man8/turbostat.html) 或者其他工具。
可以使用[dmesg](https://en.wikipedia.org/wiki/Dmesg), [turbostat](https://www.linux.org/docs/man8/turbostat.html)或者其他工具。
- 磁盘存储RAM和网络的使用率。
@ -21,17 +26,17 @@ ClickHouse 本身不会去监控硬件资源的状态。
ClickHouse服务本身具有用于自我状态监视指标。
要跟踪服务器事件,请观察服务器日志。 请参阅配置文件的\[logger\]server\_settings/settings.md\#server\_settings-logger部分。
要跟踪服务器事件,请观察服务器日志。 请参阅配置文件的 [logger](server-configuration-parameters/settings.md#server_configuration_parameters-logger)部分。
ClickHouse 收集的指标项:
- 服务用于计算的资源占用的各种指标。
- 关于查询处理的常见统计信息。
可以在 [系统指标](system-tables.md#system_tables-metrics) [系统。活动](system-tables.md#system_tables-events) 以及[系统。asynchronous\_metrics](system-tables.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。
可以在 [系统指标](system-tables.md#system_tables-metrics) [系统事件](system-tables.md#system_tables-events) 以及[系统异步指标](system-tables.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。
可以配置ClickHouse 往 [石墨](https://github.com/graphite-project)导入指标。 参考 [石墨部分](server-configuration-parameters/settings.md#server_configuration_parameters-graphite) 配置文件。在配置指标导出之前需要参考Graphite[官方教程](https://graphite.readthedocs.io/en/latest/install.html)搭建服务。
此外您可以通过HTTP API监视服务器可用性。 将HTTP GET请求发送到 `/ping`。 如果服务器可用,它将以 `200 OK` 响应。
要监视服务器集群的配置,应设置[max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries)参数并使用HTTP资源`/replicas_status`。 如果副本可用,并且不延迟在其他副本之后,则对`/replicas_status`的请求将返回200 OK。 如果副本滞后,请求将返回 `503 HTTP_SERVICE_UNAVAILABLE`,包括有关待办事项大小的信息。
要监视服务器集群的配置,应设置[max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries)参数并使用HTTP资源`/replicas_status`。 如果副本可用,并且不延迟在其他副本之后,则对`/replicas_status`的请求将返回200 OK。 如果副本滞后,请求将返回 `503 HTTP_SERVICE_UNAVAILABLE`,包括有关待办事项大小的信息。

View File

@ -1,8 +1,6 @@
---
machine_translated: true
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_priority: 44
toc_title: "\u8981\u6C42"
toc_title: "要求"
---
# 要求 {#requirements}
@ -13,20 +11,20 @@ toc_title: "\u8981\u6C42"
ClickHouse实现并行数据处理并使用所有可用的硬件资源。 在选择处理器时考虑到ClickHouse在具有大量内核但时钟速率较低的配置中的工作效率要高于具有较少内核和较高时钟速率的配置。 例如具有2600MHz的16核心优于具有3600MHz的8核心。
建议使用 **涡轮增压** 和 **超线程** 技术。 它显着提高了典型工作负载的性能。
建议使用 **睿频加速** 和 **超线程** 技术。 它显着提高了典型工作负载的性能。
## RAM {#ram}
我们建议使用至少4GB的RAM来执行非平凡的查询。 ClickHouse服务器可以使用少得多的RAM运行但它需要处理查询的内存。
我们建议使用至少4GB的RAM来执行重要的查询。 ClickHouse服务器可以使用少得多的RAM运行但它需要处理查询的内存。
RAM所需的体积取决于:
- 查询的复杂性。
- 查询中处理的数据量。
- 查询中处理的数据量。
要计算所需的RAM体积您应该估计临时数据的大小 [GROUP BY](../sql-reference/statements/select/group-by.md#select-group-by-clause), [DISTINCT](../sql-reference/statements/select/distinct.md#select-distinct), [JOIN](../sql-reference/statements/select/join.md#select-join) 和您使用的其他操作。
ClickHouse可以使用外部存储器来存储临时数据。 看 [在外部存储器中分组](../sql-reference/statements/select/group-by.md#select-group-by-in-external-memory) 有关详细信息。
ClickHouse可以使用外部存储器来存储临时数据。看 [在外部存储器中分组](../sql-reference/statements/select/group-by.md#select-group-by-in-external-memory) 有关详细信息。
## 交换文件 {#swap-file}
@ -42,20 +40,20 @@ ClickHouse可以使用外部存储器来存储临时数据。 看 [在外部存
您可以采取数据的样本并从中获取行的平均大小。 然后将该值乘以计划存储的行数。
- 数据压缩系数。
- 数据压缩系数。
要估计数据压缩系数请将数据的样本加载到ClickHouse中并将数据的实际大小与存储的表的大小进行比较。 例如点击流数据通常被压缩6-10
要估计数据压缩系数请将数据的样本加载到ClickHouse中并将数据的实际大小与存储的表的大小进行比较。 例如点击流数据通常被压缩6-10
要计算要存储的最终数据量,请将压缩系数应用于估计的数据量。 如果计划将数据存储在多个副本中,则将估计的乘以副本数。
要计算要存储的最终数据量,请将压缩系数应用于估计的数据量。 如果计划将数据存储在多个副本中,则将估计的乘以副本数。
## 网络 {#network}
如果可能的话使用10G或更高级别的网络。
网络带宽对于处理具有大量中间数据的分布式查询至关重要。 此外,网络速度会影响复制过程。
网络带宽对于处理具有大量中间结果数据的分布式查询至关重要。 此外,网络速度会影响复制过程。
## 软件 {#software}
ClickHouse主要是为Linux系列操作系统开发的。 推荐的Linux发行版是Ubuntu。 `tzdata` 软件包应安装在系统中。
ClickHouse主要是为Linux系列操作系统开发的。 推荐的Linux发行版是Ubuntu。 `tzdata` 软件包应安装在系统中。
ClickHouse也可以在其他操作系统系列中工作。 查看详细信息 [开始](../getting-started/index.md) 文档的部分。

View File

@ -1,23 +1,21 @@
---
machine_translated: true
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_priority: 46
toc_title: "\u7591\u96BE\u89E3\u7B54"
toc_title: "常见问题"
---
# 疑难解答 {#troubleshooting}
# 常见问题 {#troubleshooting}
- [安装方式](#troubleshooting-installation-errors)
- [安装](#troubleshooting-installation-errors)
- [连接到服务器](#troubleshooting-accepts-no-connections)
- [查询处理](#troubleshooting-does-not-process-queries)
- [查询处理效率](#troubleshooting-too-slow)
## 安装方式 {#troubleshooting-installation-errors}
## 安装 {#troubleshooting-installation-errors}
### 您无法使用Apt-get从ClickHouse存储库获取Deb软件包 {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get}
- 检查防火墙设置。
- 如果出于任何原因无法访问存储库,请按照以下文件中的描述下载软件包 [开始](../getting-started/index.md) 文章并使用手动安装它们 `sudo dpkg -i <packages>` 指挥部 您还需要 `tzdata` 包。
- 如果出于任何原因无法访问存储库,请按照[开始](../getting-started/index.md)中的描述下载软件包,并使用命令 `sudo dpkg -i <packages>` 手动安装它们。除此之外你还需要 `tzdata` 包。
## 连接到服务器 {#troubleshooting-accepts-no-connections}
@ -44,7 +42,7 @@ $ sudo service clickhouse-server start
**检查日志**
主日志 `clickhouse-server` 是在 `/var/log/clickhouse-server/clickhouse-server.log` 默认情况下。
主日志 `clickhouse-server` 默认情况是在 `/var/log/clickhouse-server/clickhouse-server.log` 下。
如果服务器成功启动,您应该看到字符串:
@ -57,13 +55,13 @@ $ sudo service clickhouse-server start
2019.01.11 15:23:25.549505 [ 45 ] {} <Error> ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused
```
如果在文件末尾没有看到错误,请从字符串开始查看整个文件:
如果在文件末尾没有看到错误,请从如下字符串开始查看整个文件:
``` text
<Information> Application: starting up.
```
如果您尝试启动第二个实例 `clickhouse-server` 在服务器上,您将看到以下日志:
如果您尝试在服务器上启动第二个实例 `clickhouse-server` ,您将看到以下日志:
``` text
2019.01.11 15:25:11.151730 [ 1 ] {} <Information> : Starting ClickHouse 19.1.0 with revision 54413
@ -79,9 +77,9 @@ Revision: 54413
2019.01.11 15:25:11.156716 [ 2 ] {} <Information> BaseDaemon: Stop SignalListener thread
```
**请参阅系统。d日志**
**查看系统日志**
如果你没有找到任何有用的信息 `clickhouse-server` 日志或没有任何日志,您可以查看 `system.d` 使用命令记录:
如果你`clickhouse-server` 没有找到任何有用的信息或根本没有任何日志,您可以使用命令查看 `system.d` :
``` bash
$ sudo journalctl -u clickhouse-server
@ -99,9 +97,9 @@ $ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-se
检查:
- 码头工人设置。
- Docker设置。
如果您在IPv6网络中的Docker中运行ClickHouse请确保 `network=host` 设置。
如果您在IPv6网络中的Docker中运行ClickHouse请确保 `network=host` 设置。
- 端点设置。
@ -117,10 +115,10 @@ $ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-se
检查:
- [tcp\_port\_secure](server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) 设置。
- 设置 [SSL序列](server-configuration-parameters/settings.md#server_configuration_parameters-openssl).
- [tcp\_port\_secure](server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) 设置。
- [SSL证书](server-configuration-parameters/settings.md#server_configuration_parameters-openssl) 设置.
连接时使用正确的参数。 例如,使用 `port_secure` 参数 `clickhouse_client`.
连接时使用正确的参数。 例如,使用 `clickhouse_client` 的时候使用 `port_secure` 参数 .
- 用户设置。
@ -135,7 +133,7 @@ $ curl 'http://localhost:8123/' --data-binary "SELECT a"
Code: 47, e.displayText() = DB::Exception: Unknown identifier: a. Note that there are no tables (FROM clause) in your query, context: required_names: 'a' source_tables: table_aliases: private_aliases: column_aliases: public_columns: 'a' masked_columns: array_join_columns: source_columns: , e.what() = DB::Exception
```
如果你开始 `clickhouse-client` `stack-trace` 参数ClickHouse返回包含错误描述的服务器堆栈跟踪。
如果你使用 `clickhouse-client` 时设置了 `stack-trace` 参数ClickHouse返回包含错误描述的服务器堆栈跟踪信息
您可能会看到一条关于连接中断的消息。 在这种情况下,可以重复查询。 如果每次执行查询时连接中断,请检查服务器日志中是否存在错误。

View File

@ -1,11 +1,9 @@
---
machine_translated: true
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_priority: 47
toc_title: "\u70B9\u51FB\u66F4\u65B0"
toc_title: "更新"
---
# 点击更新 {#clickhouse-update}
# 更新 {#clickhouse-update}
如果从deb包安装ClickHouse请在服务器上执行以下命令:
@ -15,6 +13,6 @@ $ sudo apt-get install clickhouse-client clickhouse-server
$ sudo service clickhouse-server restart
```
如果您使用除推荐的deb包之外的其他内容安装ClickHouse请使用适当的更新方法。
如果您使用除推荐的deb包之外的其他方式安装ClickHouse请使用适当的更新方法。
ClickHouse不支持分布式更新。 该操作应在每个单独的服务器上连续执行。 不要同时更新群集上的所有服务器,否则群集将在一段时间内不可用。
ClickHouse不支持分布式更新。该操作应在每个单独的服务器上连续执行。不要同时更新群集上的所有服务器否则群集将在一段时间内不可用。

View File

@ -207,7 +207,7 @@ if (TARGET clickhouse-server AND TARGET copy-headers)
endif ()
if (ENABLE_TESTS AND USE_GTEST)
set (CLICKHOUSE_ALL_TESTS_TARGETS local_date_time_comparison unit_tests_libcommon unit_tests_dbms hashing_write_buffer hashing_read_buffer in_join_subqueries_preprocessor expression_analyzer)
set (CLICKHOUSE_ALL_TESTS_TARGETS local_date_time_comparison unit_tests_libcommon unit_tests_dbms hashing_write_buffer hashing_read_buffer in_join_subqueries_preprocessor)
add_custom_target (clickhouse-tests ALL DEPENDS ${CLICKHOUSE_ALL_TESTS_TARGETS})
add_dependencies(clickhouse-bundle clickhouse-tests)
endif()

View File

@ -424,7 +424,7 @@ private:
std::cerr << percent << "%\t\t";
for (const auto & info : infos)
{
std::cerr << info->sampler.quantileNearest(percent / 100.0) << " sec." << "\t";
std::cerr << info->sampler.quantileNearest(percent / 100.0) << " sec.\t";
}
std::cerr << "\n";
};
@ -459,7 +459,7 @@ private:
auto print_percentile = [&json_out](Stats & info, auto percent, bool with_comma = true)
{
json_out << "\"" << percent << "\"" << ": " << info.sampler.quantileNearest(percent / 100.0) << (with_comma ? ",\n" : "\n");
json_out << "\"" << percent << "\": " << info.sampler.quantileNearest(percent / 100.0) << (with_comma ? ",\n" : "\n");
};
json_out << "{\n";
@ -469,7 +469,7 @@ private:
const auto & info = infos[i];
json_out << double_quote << connections[i]->getDescription() << ": {\n";
json_out << double_quote << "statistics" << ": {\n";
json_out << double_quote << "statistics: {\n";
print_key_value("QPS", info->queries / info->work_time);
print_key_value("RPS", info->read_rows / info->work_time);
@ -479,7 +479,7 @@ private:
print_key_value("num_queries", info->queries.load(), false);
json_out << "},\n";
json_out << double_quote << "query_time_percentiles" << ": {\n";
json_out << double_quote << "query_time_percentiles: {\n";
for (int percent = 0; percent <= 90; percent += 10)
print_percentile(*info, percent);

View File

@ -26,7 +26,7 @@ void ClusterCopier::init()
if (response.error != Coordination::ZOK)
return;
UInt64 version = ++task_description_version;
LOG_DEBUG(log, "Task description should be updated, local version " << version);
LOG_DEBUG(log, "Task description should be updated, local version {}", version);
};
task_description_path = task_zookeeper_path + "/description";
@ -47,7 +47,7 @@ void ClusterCopier::init()
task_table.initShards(task_cluster->random_engine);
}
LOG_DEBUG(log, "Will process " << task_cluster->table_tasks.size() << " table tasks");
LOG_DEBUG(log, "Will process {} table tasks", task_cluster->table_tasks.size());
/// Do not initialize tables, will make deferred initialization in process()
@ -85,7 +85,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts,
{
TaskTable & task_table = task_shard->task_table;
LOG_INFO(log, "Discover partitions of shard " << task_shard->getDescription());
LOG_INFO(log, "Discover partitions of shard {}", task_shard->getDescription());
auto get_partitions = [&] () { return getShardPartitions(timeouts, *task_shard); };
auto existing_partitions_names = retry(get_partitions, 60);
@ -132,8 +132,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts,
{
if (!task_table.enabled_partitions_set.count(partition_name))
{
LOG_DEBUG(log, "Partition " << partition_name << " will not be processed, since it is not in "
<< "enabled_partitions of " << task_table.table_id);
LOG_DEBUG(log, "Partition {} will not be processed, since it is not in enabled_partitions of {}", partition_name, task_table.table_id);
}
}
}
@ -165,11 +164,10 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts,
for (const String & missing_partition : missing_partitions)
ss << " " << missing_partition;
LOG_WARNING(log, "There are no " << missing_partitions.size() << " partitions from enabled_partitions in shard "
<< task_shard->getDescription() << " :" << ss.str());
LOG_WARNING(log, "There are no {} partitions from enabled_partitions in shard {} :{}", missing_partitions.size(), task_shard->getDescription(), ss.str());
}
LOG_DEBUG(log, "Will copy " << task_shard->partition_tasks.size() << " partitions from shard " << task_shard->getDescription());
LOG_DEBUG(log, "Will copy {} partitions from shard {}", task_shard->partition_tasks.size(), task_shard->getDescription());
}
void ClusterCopier::discoverTablePartitions(const ConnectionTimeouts & timeouts, TaskTable & task_table, UInt64 num_threads)
@ -181,7 +179,7 @@ void ClusterCopier::discoverTablePartitions(const ConnectionTimeouts & timeouts,
for (const TaskShardPtr & task_shard : task_table.all_shards)
thread_pool.scheduleOrThrowOnError([this, timeouts, task_shard]() { discoverShardPartitions(timeouts, task_shard); });
LOG_DEBUG(log, "Waiting for " << thread_pool.active() << " setup jobs");
LOG_DEBUG(log, "Waiting for {} setup jobs", thread_pool.active());
thread_pool.wait();
}
}
@ -205,7 +203,8 @@ void ClusterCopier::uploadTaskDescription(const std::string & task_path, const s
if (code && force)
zookeeper->createOrUpdate(local_task_description_path, task_config_str, zkutil::CreateMode::Persistent);
LOG_DEBUG(log, "Task description " << ((code && !force) ? "not " : "") << "uploaded to " << local_task_description_path << " with result " << code << " ("<< zookeeper->error2string(code) << ")");
LOG_DEBUG(log, "Task description {} uploaded to {} with result {} ({})",
((code && !force) ? "not " : ""), local_task_description_path, code, zookeeper->error2string(code));
}
void ClusterCopier::reloadTaskDescription()
@ -221,7 +220,7 @@ void ClusterCopier::reloadTaskDescription()
if (code)
throw Exception("Can't get description node " + task_description_path, ErrorCodes::BAD_ARGUMENTS);
LOG_DEBUG(log, "Loading description, zxid=" << task_description_current_stat.czxid);
LOG_DEBUG(log, "Loading description, zxid={}", task_description_current_stat.czxid);
auto config = getConfigurationFromXMLString(task_config_str);
/// Setup settings
@ -251,9 +250,7 @@ void ClusterCopier::process(const ConnectionTimeouts & timeouts)
{
for (TaskTable & task_table : task_cluster->table_tasks)
{
LOG_INFO(log, "Process table task " << task_table.table_id << " with "
<< task_table.all_shards.size() << " shards, "
<< task_table.local_shards.size() << " of them are local ones");
LOG_INFO(log, "Process table task {} with {} shards, {} of them are local ones", task_table.table_id, task_table.all_shards.size(), task_table.local_shards.size());
if (task_table.all_shards.empty())
continue;
@ -357,8 +354,7 @@ zkutil::EphemeralNodeHolder::Ptr ClusterCopier::createTaskWorkerNodeAndWaitIfNee
if (static_cast<UInt64>(stat.numChildren) >= task_cluster->max_workers)
{
LOG_DEBUG(log, "Too many workers (" << stat.numChildren << ", maximum " << task_cluster->max_workers << ")"
<< ". Postpone processing " << description);
LOG_DEBUG(log, "Too many workers ({}, maximum {}). Postpone processing {}", stat.numChildren, task_cluster->max_workers, description);
if (unprioritized)
current_sleep_time = std::min(max_sleep_time, current_sleep_time + default_sleep_time);
@ -419,7 +415,7 @@ bool ClusterCopier::checkAllPiecesInPartitionAreDone(const TaskTable & task_tabl
{
bool piece_is_done = checkPartitionPieceIsDone(task_table, partition_name, piece_number, shards_with_partition);
if (!piece_is_done)
LOG_DEBUG(log, "Partition " << partition_name << " piece " + toString(piece_number) + " is not already done.");
LOG_DEBUG(log, "Partition {} piece {} is not already done.", partition_name, piece_number);
answer &= piece_is_done;
}
@ -435,8 +431,7 @@ bool ClusterCopier::checkAllPiecesInPartitionAreDone(const TaskTable & task_tabl
bool ClusterCopier::checkPartitionPieceIsDone(const TaskTable & task_table, const String & partition_name,
size_t piece_number, const TasksShard & shards_with_partition)
{
LOG_DEBUG(log, "Check that all shards processed partition " << partition_name
<< " piece " + toString(piece_number) + " successfully");
LOG_DEBUG(log, "Check that all shards processed partition {} piece {} successfully", partition_name, piece_number);
auto zookeeper = context.getZooKeeper();
@ -465,8 +460,7 @@ bool ClusterCopier::checkPartitionPieceIsDone(const TaskTable & task_table, cons
TaskStateWithOwner status = TaskStateWithOwner::fromString(res.data);
if (status.state != TaskState::Finished)
{
LOG_INFO(log, "The task " << res.data << " is being rewritten by "
<< status.owner << ". Partition piece will be rechecked");
LOG_INFO(log, "The task {} is being rewritten by {}. Partition piece will be rechecked", res.data, status.owner);
return false;
}
@ -484,7 +478,7 @@ bool ClusterCopier::checkPartitionPieceIsDone(const TaskTable & task_table, cons
if (!is_clean)
{
LOG_INFO(log, "Partition " << partition_name << " become dirty");
LOG_INFO(log, "Partition {} become dirty", partition_name);
return false;
}
@ -501,8 +495,7 @@ bool ClusterCopier::checkPartitionPieceIsDone(const TaskTable & task_table, cons
}
catch (const Coordination::Exception & e)
{
LOG_INFO(log, "A ZooKeeper error occurred while checking partition " << partition_name << " piece number "
<< toString(piece_number) << ". Will recheck the partition. Error: " << e.displayText());
LOG_INFO(log, "A ZooKeeper error occurred while checking partition {} piece number {}. Will recheck the partition. Error: {}", partition_name, toString(piece_number), e.displayText());
return false;
}
@ -511,12 +504,12 @@ bool ClusterCopier::checkPartitionPieceIsDone(const TaskTable & task_table, cons
{
if (zxid1[shard_num] != zxid2[shard_num])
{
LOG_INFO(log, "The task " << piece_status_paths[shard_num] << " is being modified now. Partition piece will be rechecked");
LOG_INFO(log, "The task {} is being modified now. Partition piece will be rechecked", piece_status_paths[shard_num]);
return false;
}
}
LOG_INFO(log, "Partition " << partition_name << " piece number " << toString(piece_number) << " is copied successfully");
LOG_INFO(log, "Partition {} piece number {} is copied successfully", partition_name, toString(piece_number));
return true;
}
@ -530,7 +523,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
inject_fault = value < move_fault_probability;
}
LOG_DEBUG(log, "Try to move " << partition_name << " to destionation table");
LOG_DEBUG(log, "Try to move {} to destionation table", partition_name);
auto zookeeper = context.getZooKeeper();
@ -548,7 +541,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
{
if (e.code == Coordination::ZNODEEXISTS)
{
LOG_DEBUG(log, "Someone is already moving pieces " << current_partition_attach_is_active);
LOG_DEBUG(log, "Someone is already moving pieces {}", current_partition_attach_is_active);
return TaskStatus::Active;
}
@ -565,16 +558,13 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
TaskStateWithOwner status = TaskStateWithOwner::fromString(status_data);
if (status.state == TaskState::Finished)
{
LOG_DEBUG(log, "All pieces for partition from this task " << current_partition_attach_is_active
<< " has been successfully moved to destination table by " << status.owner);
LOG_DEBUG(log, "All pieces for partition from this task {} has been successfully moved to destination table by {}", current_partition_attach_is_active, status.owner);
return TaskStatus::Finished;
}
/// Task is abandoned, because previously we created ephemeral node, possibly in other copier's process.
/// Initialize DROP PARTITION
LOG_DEBUG(log, "Moving piece for partition " << current_partition_attach_is_active
<< " has not been successfully finished by " << status.owner
<< ". Will try to move by myself.");
LOG_DEBUG(log, "Moving piece for partition {} has not been successfully finished by {}. Will try to move by myself.", current_partition_attach_is_active, status.owner);
/// Remove is_done marker.
zookeeper->remove(current_partition_attach_is_done);
@ -591,9 +581,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
/// Move partition to original destination table.
for (size_t current_piece_number = 0; current_piece_number < task_table.number_of_splits; ++current_piece_number)
{
LOG_DEBUG(log, "Trying to move partition " << partition_name
<< " piece " << toString(current_piece_number)
<< " to original table");
LOG_DEBUG(log, "Trying to move partition {} piece {} to original table", partition_name, toString(current_piece_number));
ASTPtr query_alter_ast;
String query_alter_ast_string;
@ -614,7 +602,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
" ATTACH PARTITION " + partition_name +
" FROM " + getQuotedTable(helping_table);
LOG_DEBUG(log, "Executing ALTER query: " << query_alter_ast_string);
LOG_DEBUG(log, "Executing ALTER query: {}", query_alter_ast_string);
try
{
@ -626,13 +614,11 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
PoolMode::GET_MANY,
ClusterExecutionMode::ON_EACH_NODE);
LOG_INFO(log, "Number of nodes that executed ALTER query successfully : " << toString(num_nodes));
LOG_INFO(log, "Number of nodes that executed ALTER query successfully : {}", toString(num_nodes));
}
catch (...)
{
LOG_DEBUG(log, "Error while moving partition " << partition_name
<< " piece " << toString(current_piece_number)
<< "to original table");
LOG_DEBUG(log, "Error while moving partition {} piece {} to original table", partition_name, toString(current_piece_number));
throw;
}
@ -647,7 +633,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
query_deduplicate_ast_string += " OPTIMIZE TABLE " + getQuotedTable(original_table) +
" PARTITION " + partition_name + " DEDUPLICATE;";
LOG_DEBUG(log, "Executing OPTIMIZE DEDUPLICATE query: " << query_alter_ast_string);
LOG_DEBUG(log, "Executing OPTIMIZE DEDUPLICATE query: {}", query_alter_ast_string);
UInt64 num_nodes = executeQueryOnCluster(
task_table.cluster_push,
@ -656,14 +642,12 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
&task_cluster->settings_push,
PoolMode::GET_MANY);
LOG_INFO(log, "Number of shard that executed OPTIMIZE DEDUPLICATE query successfully : "
<< toString(num_nodes));
LOG_INFO(log, "Number of shard that executed OPTIMIZE DEDUPLICATE query successfully : {}", toString(num_nodes));
}
}
catch (...)
{
LOG_DEBUG(log, "Error while executing OPTIMIZE DEDUPLICATE partition " << partition_name
<< "in the original table");
LOG_DEBUG(log, "Error while executing OPTIMIZE DEDUPLICATE partition {}in the original table", partition_name);
throw;
}
}
@ -759,8 +743,7 @@ bool ClusterCopier::tryDropPartitionPiece(
{
if (e.code == Coordination::ZNODEEXISTS)
{
LOG_DEBUG(log, "Partition " << task_partition.name << " piece "
<< toString(current_piece_number) << " is cleaning now by somebody, sleep");
LOG_DEBUG(log, "Partition {} piece {} is cleaning now by somebody, sleep", task_partition.name, toString(current_piece_number));
std::this_thread::sleep_for(default_sleep_time);
return false;
}
@ -773,8 +756,7 @@ bool ClusterCopier::tryDropPartitionPiece(
{
if (stat.numChildren != 0)
{
LOG_DEBUG(log, "Partition " << task_partition.name << " contains " << stat.numChildren
<< " active workers while trying to drop it. Going to sleep.");
LOG_DEBUG(log, "Partition {} contains {} active workers while trying to drop it. Going to sleep.", task_partition.name, stat.numChildren);
std::this_thread::sleep_for(default_sleep_time);
return false;
}
@ -794,7 +776,7 @@ bool ClusterCopier::tryDropPartitionPiece(
{
if (e.code == Coordination::ZNODEEXISTS)
{
LOG_DEBUG(log, "Partition " << task_partition.name << " is being filled now by somebody, sleep");
LOG_DEBUG(log, "Partition {} is being filled now by somebody, sleep", task_partition.name);
return false;
}
@ -832,7 +814,7 @@ bool ClusterCopier::tryDropPartitionPiece(
/// It is important, DROP PARTITION must be done synchronously
settings_push.replication_alter_partitions_sync = 2;
LOG_DEBUG(log, "Execute distributed DROP PARTITION: " << query);
LOG_DEBUG(log, "Execute distributed DROP PARTITION: {}", query);
/// We have to drop partition_piece on each replica
size_t num_shards = executeQueryOnCluster(
cluster_push, query,
@ -841,7 +823,7 @@ bool ClusterCopier::tryDropPartitionPiece(
PoolMode::GET_MANY,
ClusterExecutionMode::ON_EACH_NODE);
LOG_INFO(log, "DROP PARTITION was successfully executed on " << num_shards << " nodes of a cluster.");
LOG_INFO(log, "DROP PARTITION was successfully executed on {} nodes of a cluster.", num_shards);
/// Update the locking node
if (!my_clock.is_stale())
@ -859,13 +841,12 @@ bool ClusterCopier::tryDropPartitionPiece(
return false;
}
LOG_INFO(log, "Partition " << task_partition.name << " piece " << toString(current_piece_number)
<< " was dropped on cluster " << task_table.cluster_push_name);
LOG_INFO(log, "Partition {} piece {} was dropped on cluster {}", task_partition.name, toString(current_piece_number), task_table.cluster_push_name);
if (zookeeper->tryCreate(current_shards_path, host_id, zkutil::CreateMode::Persistent) == Coordination::ZNODEEXISTS)
zookeeper->set(current_shards_path, host_id);
}
LOG_INFO(log, "Partition " << task_partition.name << " piece " << toString(current_piece_number) << " is safe for work now.");
LOG_INFO(log, "Partition {} piece {} is safe for work now.", task_partition.name, toString(current_piece_number));
return true;
}
@ -889,7 +870,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
++cluster_partition.total_tries;
LOG_DEBUG(log, "Processing partition " << partition_name << " for the whole cluster");
LOG_DEBUG(log, "Processing partition {} for the whole cluster", partition_name);
/// Process each source shard having current partition and copy current partition
/// NOTE: shards are sorted by "distance" to current host
@ -911,7 +892,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
{
const size_t number_of_splits = task_table.number_of_splits;
shard->partition_tasks.emplace(partition_name, ShardPartition(*shard, partition_name, number_of_splits));
LOG_DEBUG(log, "Discovered partition " << partition_name << " in shard " << shard->getDescription());
LOG_DEBUG(log, "Discovered partition {} in shard {}", partition_name, shard->getDescription());
/// To save references in the future.
auto shard_partition_it = shard->partition_tasks.find(partition_name);
PartitionPieces & shard_partition_pieces = shard_partition_it->second.pieces;
@ -924,7 +905,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
}
else
{
LOG_DEBUG(log, "Found that shard " << shard->getDescription() << " does not contain current partition " << partition_name);
LOG_DEBUG(log, "Found that shard {} does not contain current partition {}", shard->getDescription(), partition_name);
continue;
}
}
@ -1030,21 +1011,20 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
task_table.rows_copied += cluster_partition.rows_copied;
double elapsed = cluster_partition.elapsed_time_seconds;
LOG_INFO(log, "It took " << std::fixed << std::setprecision(2) << elapsed << " seconds to copy partition " << partition_name
<< ": " << formatReadableSizeWithDecimalSuffix(cluster_partition.bytes_copied) << " uncompressed bytes"
<< ", " << formatReadableQuantity(cluster_partition.rows_copied) << " rows"
<< " and " << cluster_partition.blocks_copied << " source blocks are copied");
LOG_INFO(log, "It took {} seconds to copy partition {}: {} uncompressed bytes, {} rows and {} source blocks are copied",
elapsed, partition_name,
formatReadableSizeWithDecimalSuffix(cluster_partition.bytes_copied),
formatReadableQuantity(cluster_partition.rows_copied),
cluster_partition.blocks_copied);
if (cluster_partition.rows_copied)
{
LOG_INFO(log, "Average partition speed: "
<< formatReadableSizeWithDecimalSuffix(cluster_partition.bytes_copied / elapsed) << " per second.");
LOG_INFO(log, "Average partition speed: {} per second.", formatReadableSizeWithDecimalSuffix(cluster_partition.bytes_copied / elapsed));
}
if (task_table.rows_copied)
{
LOG_INFO(log, "Average table " << task_table.table_id << " speed: "
<< formatReadableSizeWithDecimalSuffix(task_table.bytes_copied / elapsed) << " per second.");
LOG_INFO(log, "Average table {} speed: {} per second.", task_table.table_id, formatReadableSizeWithDecimalSuffix(task_table.bytes_copied / elapsed));
}
}
}
@ -1055,8 +1035,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
if (!table_is_done)
{
LOG_INFO(log, "Table " + task_table.table_id + " is not processed yet."
<< "Copied " << finished_partitions << " of " << required_partitions << ", will retry");
LOG_INFO(log, "Table {} is not processed yet.Copied {} of {}, will retry", task_table.table_id, finished_partitions, required_partitions);
}
return table_is_done;
@ -1104,9 +1083,11 @@ TaskStatus ClusterCopier::iterateThroughAllPiecesInPartition(const ConnectionTim
{
for (UInt64 try_num = 0; try_num < max_shard_partition_tries; ++try_num)
{
LOG_INFO(log, "Attempt number " << try_num << " to process partition " << task_partition.name
<< " piece number " << piece_number << " on shard number " << task_partition.task_shard.numberInCluster()
<< " with index " << task_partition.task_shard.indexInCluster());
LOG_INFO(log, "Attempt number {} to process partition {} piece number {} on shard number {} with index {}.",
try_num, task_partition.name, piece_number,
task_partition.task_shard.numberInCluster(),
task_partition.task_shard.indexInCluster());
res = processPartitionPieceTaskImpl(timeouts, task_partition, piece_number, is_unprioritized_task);
/// Exit if success
@ -1210,7 +1191,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
/// Load balancing
auto worker_node_holder = createTaskWorkerNodeAndWaitIfNeed(zookeeper, current_task_piece_status_path, is_unprioritized_task);
LOG_DEBUG(log, "Processing " << current_task_piece_status_path);
LOG_DEBUG(log, "Processing {}", current_task_piece_status_path);
const String piece_status_path = partition_piece.getPartitionPieceShardsPath();
@ -1221,14 +1202,12 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
/// Do not start if partition piece is dirty, try to clean it
if (is_clean)
{
LOG_DEBUG(log, "Partition " << task_partition.name
<< " piece " + toString(current_piece_number) + " appears to be clean");
LOG_DEBUG(log, "Partition {} piece {} appears to be clean", task_partition.name, current_piece_number);
zookeeper->createAncestors(current_task_piece_status_path);
}
else
{
LOG_DEBUG(log, "Partition " << task_partition.name
<< " piece " + toString(current_piece_number) + " is dirty, try to drop it");
LOG_DEBUG(log, "Partition {} piece {} is dirty, try to drop it", task_partition.name, current_piece_number);
try
{
@ -1253,7 +1232,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
{
if (e.code == Coordination::ZNODEEXISTS)
{
LOG_DEBUG(log, "Someone is already processing " << current_task_piece_is_active_path);
LOG_DEBUG(log, "Someone is already processing {}", current_task_piece_is_active_path);
return TaskStatus::Active;
}
@ -1269,16 +1248,13 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
TaskStateWithOwner status = TaskStateWithOwner::fromString(status_data);
if (status.state == TaskState::Finished)
{
LOG_DEBUG(log, "Task " << current_task_piece_status_path
<< " has been successfully executed by " << status.owner);
LOG_DEBUG(log, "Task {} has been successfully executed by {}", current_task_piece_status_path, status.owner);
return TaskStatus::Finished;
}
/// Task is abandoned, because previously we created ephemeral node, possibly in other copier's process.
/// Initialize DROP PARTITION
LOG_DEBUG(log, "Task " << current_task_piece_status_path
<< " has not been successfully finished by " << status.owner
<< ". Partition will be dropped and refilled.");
LOG_DEBUG(log, "Task {} has not been successfully finished by {}. Partition will be dropped and refilled.", current_task_piece_status_path, status.owner);
create_is_dirty_node(clean_state_clock);
return TaskStatus::Error;
@ -1293,11 +1269,9 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
String state_finished = TaskStateWithOwner::getData(TaskState::Finished, host_id);
auto res = zookeeper->tryCreate(current_task_piece_status_path, state_finished, zkutil::CreateMode::Persistent);
if (res == Coordination::ZNODEEXISTS)
LOG_DEBUG(log, "Partition " << task_partition.name << " piece "
+ toString(current_piece_number) + " is absent on current replica of a shard. But other replicas have already marked it as done.");
LOG_DEBUG(log, "Partition {} piece {} is absent on current replica of a shard. But other replicas have already marked it as done.", task_partition.name, current_piece_number);
if (res == Coordination::ZOK)
LOG_DEBUG(log, "Partition " << task_partition.name << " piece "
+ toString(current_piece_number) + " is absent on current replica of a shard. Will mark it as done. Other replicas will do the same.");
LOG_DEBUG(log, "Partition {} piece {} is absent on current replica of a shard. Will mark it as done. Other replicas will do the same.", task_partition.name, current_piece_number);
return TaskStatus::Finished;
}
@ -1325,18 +1299,14 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
if (count != 0)
{
LOG_INFO(log, "Partition " << task_partition.name << " piece "
<< current_piece_number << "is not empty. In contains " << count << " rows.");
LOG_INFO(log, "Partition {} piece {}is not empty. In contains {} rows.", task_partition.name, current_piece_number, count);
Coordination::Stat stat_shards{};
zookeeper->get(partition_piece.getPartitionPieceShardsPath(), &stat_shards);
/// NOTE: partition is still fresh if dirt discovery happens before cleaning
if (stat_shards.numChildren == 0)
{
LOG_WARNING(log, "There are no workers for partition " << task_partition.name
<< " piece " << toString(current_piece_number)
<< ", but destination table contains " << count << " rows"
<< ". Partition will be dropped and refilled.");
LOG_WARNING(log, "There are no workers for partition {} piece {}, but destination table contains {} rows. Partition will be dropped and refilled.", task_partition.name, toString(current_piece_number), count);
create_is_dirty_node(clean_state_clock);
return TaskStatus::Error;
@ -1353,14 +1323,12 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
CleanStateClock new_clean_state_clock (zookeeper, piece_is_dirty_flag_path, piece_is_dirty_cleaned_path);
if (clean_state_clock != new_clean_state_clock)
{
LOG_INFO(log, "Partition " << task_partition.name << " piece "
<< toString(current_piece_number) << " clean state changed, cowardly bailing");
LOG_INFO(log, "Partition {} piece {} clean state changed, cowardly bailing", task_partition.name, toString(current_piece_number));
return TaskStatus::Error;
}
else if (!new_clean_state_clock.is_clean())
{
LOG_INFO(log, "Partition " << task_partition.name << " piece "
<< toString(current_piece_number) << " is dirty and will be dropped and refilled");
LOG_INFO(log, "Partition {} piece {} is dirty and will be dropped and refilled", task_partition.name, toString(current_piece_number));
create_is_dirty_node(new_clean_state_clock);
return TaskStatus::Error;
}
@ -1387,12 +1355,11 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
create_query_push_ast->as<ASTCreateQuery &>().if_not_exists = true;
String query = queryToString(create_query_push_ast);
LOG_DEBUG(log, "Create destination tables. Query: " << query);
LOG_DEBUG(log, "Create destination tables. Query: {}", query);
UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query,
create_query_push_ast, &task_cluster->settings_push,
PoolMode::GET_MANY);
LOG_DEBUG(log, "Destination tables " << getQuotedTable(task_table.table_push)
<< " have been created on " << shards << " shards of " << task_table.cluster_push->getShardCount());
LOG_DEBUG(log, "Destination tables {} have been created on {} shards of {}", getQuotedTable(task_table.table_push), shards, task_table.cluster_push->getShardCount());
}
/// Do the copying
@ -1407,8 +1374,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
// Select all fields
ASTPtr query_select_ast = get_select_query(task_shard.table_read_shard, "*", /*enable_splitting*/ true, inject_fault ? "1" : "");
LOG_DEBUG(log, "Executing SELECT query and pull from " << task_shard.getDescription()
<< " : " << queryToString(query_select_ast));
LOG_DEBUG(log, "Executing SELECT query and pull from {} : {}", task_shard.getDescription(), queryToString(query_select_ast));
ASTPtr query_insert_ast;
{
@ -1419,7 +1385,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
const auto & settings = context.getSettingsRef();
query_insert_ast = parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth);
LOG_DEBUG(log, "Executing INSERT query: " << query);
LOG_DEBUG(log, "Executing INSERT query: {}", query);
}
try
@ -1501,8 +1467,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
}
}
LOG_INFO(log, "Partition " << task_partition.name << " piece "
<< toString(current_piece_number) << " copied. But not moved to original destination table.");
LOG_INFO(log, "Partition {} piece {} copied. But not moved to original destination table.", task_partition.name, toString(current_piece_number));
/// Try create original table (if not exists) on each shard
@ -1513,12 +1478,11 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
create_query_push_ast->as<ASTCreateQuery &>().if_not_exists = true;
String query = queryToString(create_query_push_ast);
LOG_DEBUG(log, "Create destination tables. Query: " << query);
LOG_DEBUG(log, "Create destination tables. Query: {}", query);
UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query,
create_query_push_ast, &task_cluster->settings_push,
PoolMode::GET_MANY);
LOG_DEBUG(log, "Destination tables " << getQuotedTable(task_table.table_push)
<< " have been created on " << shards << " shards of " << task_table.cluster_push->getShardCount());
LOG_DEBUG(log, "Destination tables {} have been created on {} shards of {}", getQuotedTable(task_table.table_push), shards, task_table.cluster_push->getShardCount());
}
catch (...)
{
@ -1531,14 +1495,12 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
CleanStateClock new_clean_state_clock (zookeeper, piece_is_dirty_flag_path, piece_is_dirty_cleaned_path);
if (clean_state_clock != new_clean_state_clock)
{
LOG_INFO(log, "Partition " << task_partition.name << " piece "
<< toString(current_piece_number) << " clean state changed, cowardly bailing");
LOG_INFO(log, "Partition {} piece {} clean state changed, cowardly bailing", task_partition.name, toString(current_piece_number));
return TaskStatus::Error;
}
else if (!new_clean_state_clock.is_clean())
{
LOG_INFO(log, "Partition " << task_partition.name << " piece "
<< toString(current_piece_number) << " became dirty and will be dropped and refilled");
LOG_INFO(log, "Partition {} piece {} became dirty and will be dropped and refilled", task_partition.name, toString(current_piece_number));
create_is_dirty_node(new_clean_state_clock);
return TaskStatus::Error;
}
@ -1582,7 +1544,7 @@ void ClusterCopier::dropHelpingTables(const TaskTable & task_table)
const ClusterPtr & cluster_push = task_table.cluster_push;
Settings settings_push = task_cluster->settings_push;
LOG_DEBUG(log, "Execute distributed DROP TABLE: " << query);
LOG_DEBUG(log, "Execute distributed DROP TABLE: {}", query);
/// We have to drop partition_piece on each replica
UInt64 num_nodes = executeQueryOnCluster(
cluster_push, query,
@ -1591,7 +1553,7 @@ void ClusterCopier::dropHelpingTables(const TaskTable & task_table)
PoolMode::GET_MANY,
ClusterExecutionMode::ON_EACH_NODE);
LOG_DEBUG(log, "DROP TABLE query was successfully executed on " << toString(num_nodes) << " nodes.");
LOG_DEBUG(log, "DROP TABLE query was successfully executed on {} nodes.", toString(num_nodes));
}
}
@ -1609,7 +1571,7 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT
const ClusterPtr & cluster_push = task_table.cluster_push;
Settings settings_push = task_cluster->settings_push;
LOG_DEBUG(log, "Execute distributed DROP PARTITION: " << query);
LOG_DEBUG(log, "Execute distributed DROP PARTITION: {}", query);
/// We have to drop partition_piece on each replica
UInt64 num_nodes = executeQueryOnCluster(
cluster_push, query,
@ -1618,9 +1580,9 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT
PoolMode::GET_MANY,
ClusterExecutionMode::ON_EACH_NODE);
LOG_DEBUG(log, "DROP PARTITION query was successfully executed on " << toString(num_nodes) << " nodes.");
LOG_DEBUG(log, "DROP PARTITION query was successfully executed on {} nodes.", toString(num_nodes));
}
LOG_DEBUG(log, "All helping tables dropped partition " << partition_name);
LOG_DEBUG(log, "All helping tables dropped partition {}", partition_name);
}
String ClusterCopier::getRemoteCreateTable(const DatabaseAndTableName & table, Connection & connection, const Settings * settings)
@ -1724,7 +1686,7 @@ std::set<String> ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti
const auto & settings = context.getSettingsRef();
ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth);
LOG_DEBUG(log, "Computing destination partition set, executing query: " << query);
LOG_DEBUG(log, "Computing destination partition set, executing query: {}", query);
Context local_context = context;
local_context.setSettings(task_cluster->settings_pull);
@ -1744,7 +1706,7 @@ std::set<String> ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti
}
}
LOG_DEBUG(log, "There are " << res.size() << " destination partitions in shard " << task_shard.getDescription());
LOG_DEBUG(log, "There are {} destination partitions in shard {}", res.size(), task_shard.getDescription());
return res;
}
@ -1765,8 +1727,7 @@ bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts,
query += " LIMIT 1";
LOG_DEBUG(log, "Checking shard " << task_shard.getDescription() << " for partition "
<< partition_quoted_name << " existence, executing query: " << query);
LOG_DEBUG(log, "Checking shard {} for partition {} existence, executing query: {}", task_shard.getDescription(), partition_quoted_name, query);
ParserQuery parser_query(query.data() + query.size());
const auto & settings = context.getSettingsRef();
@ -1805,9 +1766,7 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi
query += " LIMIT 1";
LOG_DEBUG(log, "Checking shard " << task_shard.getDescription() << " for partition "
<< partition_quoted_name << " piece " << std::to_string(current_piece_number)
<< "existence, executing query: " << query);
LOG_DEBUG(log, "Checking shard {} for partition {} piece {} existence, executing query: {}", task_shard.getDescription(), partition_quoted_name, std::to_string(current_piece_number), query);
ParserQuery parser_query(query.data() + query.size());
const auto & settings = context.getSettingsRef();
@ -1817,11 +1776,9 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi
local_context.setSettings(task_cluster->settings_pull);
auto result = InterpreterFactory::get(query_ast, local_context)->execute().in->read().rows();
if (result != 0)
LOG_DEBUG(log, "Partition " << partition_quoted_name << " piece number "
<< std::to_string(current_piece_number) << " is PRESENT on shard " << task_shard.getDescription());
LOG_DEBUG(log, "Partition {} piece number {} is PRESENT on shard {}", partition_quoted_name, std::to_string(current_piece_number), task_shard.getDescription());
else
LOG_DEBUG(log, "Partition " << partition_quoted_name << " piece number "
<< std::to_string(current_piece_number) << " is ABSENT on shard " << task_shard.getDescription());
LOG_DEBUG(log, "Partition {} piece number {} is ABSENT on shard {}", partition_quoted_name, std::to_string(current_piece_number), task_shard.getDescription());
return result != 0;
}
@ -1938,8 +1895,7 @@ UInt64 ClusterCopier::executeQueryOnCluster(
if (execution_mode == ClusterExecutionMode::ON_EACH_NODE && successful_nodes != origin_replicas_number)
{
LOG_INFO(log, "There was an error while executing ALTER on each node. Query was executed on "
<< toString(successful_nodes) << " nodes. But had to be executed on " << toString(origin_replicas_number.load()));
LOG_INFO(log, "There was an error while executing ALTER on each node. Query was executed on {} nodes. But had to be executed on {}", toString(successful_nodes), toString(origin_replicas_number.load()));
}

View File

@ -95,11 +95,7 @@ void ClusterCopierApp::mainImpl()
ThreadStatus thread_status;
auto * log = &logger();
LOG_INFO(log, "Starting clickhouse-copier ("
<< "id " << process_id << ", "
<< "host_id " << host_id << ", "
<< "path " << process_path << ", "
<< "revision " << ClickHouseRevision::get() << ")");
LOG_INFO(log, "Starting clickhouse-copier (id {}, host_id {}, path {}, revision {})", process_id, host_id, process_path, ClickHouseRevision::get());
SharedContextHolder shared_context = Context::createShared();
auto context = std::make_unique<Context>(Context::createGlobal(shared_context.get()));

View File

@ -183,11 +183,11 @@ public:
switch (rsp.type)
{
case Coordination::CREATED:
LOG_DEBUG(logger, "CleanStateClock change: CREATED, at " << rsp.path);
LOG_DEBUG(logger, "CleanStateClock change: CREATED, at {}", rsp.path);
stale->store(true);
break;
case Coordination::CHANGED:
LOG_DEBUG(logger, "CleanStateClock change: CHANGED, at" << rsp.path);
LOG_DEBUG(logger, "CleanStateClock change: CHANGED, at {}", rsp.path);
stale->store(true);
}
}

View File

@ -211,7 +211,7 @@ try
/// Lock path directory before read
status.emplace(context->getPath() + "status");
LOG_DEBUG(log, "Loading metadata from " << context->getPath());
LOG_DEBUG(log, "Loading metadata from {}", context->getPath());
loadMetadataSystem(*context);
attachSystemTables();
loadMetadata(*context);

View File

@ -62,7 +62,7 @@ namespace
void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response)
{
Poco::Net::HTMLForm params(request, request.stream());
LOG_TRACE(log, "Request URI: " + request.getURI());
LOG_TRACE(log, "Request URI: {}", request.getURI());
auto process_error = [&response, this](const std::string & message)
{
@ -89,11 +89,11 @@ void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & reques
if (params.has("schema"))
{
schema_name = params.get("schema");
LOG_TRACE(log, "Will fetch info for table '" << schema_name + "." + table_name << "'");
LOG_TRACE(log, "Will fetch info for table '{}'", schema_name + "." + table_name);
}
else
LOG_TRACE(log, "Will fetch info for table '" << table_name << "'");
LOG_TRACE(log, "Got connection str '" << connection_string << "'");
LOG_TRACE(log, "Will fetch info for table '{}'", table_name);
LOG_TRACE(log, "Got connection str '{}'", connection_string);
try
{
@ -124,7 +124,7 @@ void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & reques
select->format(settings);
std::string query = ss.str();
LOG_TRACE(log, "Inferring structure with query '" << query << "'");
LOG_TRACE(log, "Inferring structure with query '{}'", query);
if (POCO_SQL_ODBC_CLASS::Utility::isError(POCO_SQL_ODBC_CLASS::SQLPrepare(hstmt, reinterpret_cast<SQLCHAR *>(query.data()), query.size())))
throw POCO_SQL_ODBC_CLASS::DescriptorException(session.dbc());

View File

@ -10,7 +10,7 @@ namespace DB
Poco::Net::HTTPRequestHandler * HandlerFactory::createRequestHandler(const Poco::Net::HTTPServerRequest & request)
{
Poco::URI uri{request.getURI()};
LOG_TRACE(log, "Request URI: " + uri.toString());
LOG_TRACE(log, "Request URI: {}", uri.toString());
if (uri.getPath() == "/ping" && request.getMethod() == Poco::Net::HTTPRequest::HTTP_GET)
return new PingHandler(keep_alive_timeout);

View File

@ -25,7 +25,7 @@ namespace DB
void IdentifierQuoteHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response)
{
Poco::Net::HTMLForm params(request, request.stream());
LOG_TRACE(log, "Request URI: " + request.getURI());
LOG_TRACE(log, "Request URI: {}", request.getURI());
auto process_error = [&response, this](const std::string & message)
{

View File

@ -84,7 +84,7 @@ void ODBCHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne
Poco::Net::HTMLForm params(request);
if (mode == "read")
params.read(request.stream());
LOG_TRACE(log, "Request URI: " + request.getURI());
LOG_TRACE(log, "Request URI: {}", request.getURI());
if (mode == "read" && !params.has("query"))
{
@ -132,7 +132,7 @@ void ODBCHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne
std::string format = params.get("format", "RowBinary");
std::string connection_string = params.get("connection_string");
LOG_TRACE(log, "Connection string: '" << connection_string << "'");
LOG_TRACE(log, "Connection string: '{}'", connection_string);
WriteBufferFromHTTPServerResponse out(request, response, keep_alive_timeout);
@ -152,7 +152,7 @@ void ODBCHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne
}
std::string db_name = params.get("db_name");
std::string table_name = params.get("table_name");
LOG_TRACE(log, "DB name: '" << db_name << "', table name: '" << table_name << "'");
LOG_TRACE(log, "DB name: '{}', table name: '{}'", db_name, table_name);
auto quoting_style = IdentifierQuotingStyle::None;
#if USE_ODBC
@ -171,7 +171,7 @@ void ODBCHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne
else
{
std::string query = params.get("query");
LOG_TRACE(log, "Query: " << query);
LOG_TRACE(log, "Query: {}", query);
BlockOutputStreamPtr writer = FormatFactory::instance().getOutput(format, out, *sample_block, context);
auto pool = getPool(connection_string);

View File

@ -48,12 +48,7 @@ namespace
#endif
)
{
LOG_ERROR(log,
"Cannot resolve listen_host (" << host << "), error " << e.code() << ": " << e.message()
<< ". "
"If it is an IPv6 address and your host has disabled IPv6, then consider to "
"specify IPv4 address to listen in <listen_host> element of configuration "
"file. Example: <listen_host>0.0.0.0</listen_host>");
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. If it is an IPv6 address and your host has disabled IPv6, then consider to specify IPv4 address to listen in <listen_host> element of configuration file. Example: <listen_host>0.0.0.0</listen_host>", host, e.code(), e.message());
}
throw;
@ -188,7 +183,7 @@ int ODBCBridge::main(const std::vector<std::string> & /*args*/)
new HandlerFactory("ODBCRequestHandlerFactory-factory", keep_alive_timeout, context), server_pool, socket, http_params);
server.start();
LOG_INFO(log, "Listening http://" + address.toString());
LOG_INFO(log, "Listening http://{}", address.toString());
SCOPE_EXIT({
LOG_DEBUG(log, "Received termination signal.");
@ -198,7 +193,7 @@ int ODBCBridge::main(const std::vector<std::string> & /*args*/)
{
if (server.currentConnections() == 0)
break;
LOG_DEBUG(log, "Waiting for " << server.currentConnections() << " connections, try " << count);
LOG_DEBUG(log, "Waiting for {} connections, try {}", server.currentConnections(), count);
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
}
});

View File

@ -241,7 +241,7 @@ void HTTPHandler::processQuery(
CurrentThread::QueryScope query_scope(context);
LOG_TRACE(log, "Request URI: " << request.getURI());
LOG_TRACE(log, "Request URI: {}", request.getURI());
std::istream & istr = request.stream();

View File

@ -30,13 +30,10 @@ HTTPRequestHandlerFactoryMain::HTTPRequestHandlerFactoryMain(const std::string &
Poco::Net::HTTPRequestHandler * HTTPRequestHandlerFactoryMain::createRequestHandler(const Poco::Net::HTTPServerRequest & request)
{
LOG_TRACE(log, "HTTP Request for " << name << ". "
<< "Method: " << request.getMethod()
<< ", Address: " << request.clientAddress().toString()
<< ", User-Agent: " << (request.has("User-Agent") ? request.get("User-Agent") : "none")
<< (request.hasContentLength() ? (", Length: " + std::to_string(request.getContentLength())) : (""))
<< ", Content Type: " << request.getContentType()
<< ", Transfer Encoding: " << request.getTransferEncoding());
LOG_TRACE(log, "HTTP Request for {}. Method: {}, Address: {}, User-Agent: {}{}, Content Type: {}, Transfer Encoding: {}",
name, request.getMethod(), request.clientAddress().toString(), request.has("User-Agent") ? request.get("User-Agent") : "none",
(request.hasContentLength() ? (", Length: " + std::to_string(request.getContentLength())) : ("")),
request.getContentType(), request.getTransferEncoding());
for (auto & handler_factory : child_factories)
{

View File

@ -53,7 +53,7 @@ void InterserverIOHTTPHandler::processQuery(Poco::Net::HTTPServerRequest & reque
{
HTMLForm params(request);
LOG_TRACE(log, "Request URI: " << request.getURI());
LOG_TRACE(log, "Request URI: {}", request.getURI());
String endpoint_name = params.get("endpoint");
bool compress = params.get("compress") == "true";
@ -103,7 +103,7 @@ void InterserverIOHTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & requ
response.setStatusAndReason(Poco::Net::HTTPServerResponse::HTTP_UNAUTHORIZED);
if (!response.sent())
writeString(message, *used_output.out);
LOG_WARNING(log, "Query processing failed request: '" << request.getURI() << "' authentication failed");
LOG_WARNING(log, "Query processing failed request: '{}' authentication failed", request.getURI());
}
}
catch (Exception & e)

View File

@ -83,19 +83,15 @@ void MySQLHandler::run()
if (!connection_context.mysql.max_packet_size)
connection_context.mysql.max_packet_size = MAX_PACKET_LENGTH;
LOG_TRACE(log, "Capabilities: " << handshake_response.capability_flags
<< ", max_packet_size: "
<< handshake_response.max_packet_size
<< ", character_set: "
<< static_cast<int>(handshake_response.character_set)
<< ", user: "
<< handshake_response.username
<< ", auth_response length: "
<< handshake_response.auth_response.length()
<< ", database: "
<< handshake_response.database
<< ", auth_plugin_name: "
<< handshake_response.auth_plugin_name);
LOG_TRACE(log,
"Capabilities: {}, max_packet_size: {}, character_set: {}, user: {}, auth_response length: {}, database: {}, auth_plugin_name: {}",
handshake_response.capability_flags,
handshake_response.max_packet_size,
static_cast<int>(handshake_response.character_set),
handshake_response.username,
handshake_response.auth_response.length(),
handshake_response.database,
handshake_response.auth_plugin_name);
client_capability_flags = handshake_response.capability_flags;
if (!(client_capability_flags & CLIENT_PROTOCOL_41))
@ -129,7 +125,9 @@ void MySQLHandler::run()
// For commands which are executed without MemoryTracker.
LimitReadBuffer limited_payload(payload, 10000, true, "too long MySQL packet.");
LOG_DEBUG(log, "Received command: " << static_cast<int>(static_cast<unsigned char>(command)) << ". Connection id: " << connection_id << ".");
LOG_DEBUG(log, "Received command: {}. Connection id: {}.",
static_cast<int>(static_cast<unsigned char>(command)), connection_id);
try
{
switch (command)
@ -197,7 +195,7 @@ void MySQLHandler::finishHandshake(MySQLProtocol::HandshakeResponse & packet)
read_bytes(3); /// We can find out whether it is SSLRequest of HandshakeResponse by first 3 bytes.
size_t payload_size = unalignedLoad<uint32_t>(buf) & 0xFFFFFFu;
LOG_TRACE(log, "payload size: " << payload_size);
LOG_TRACE(log, "payload size: {}", payload_size);
if (payload_size == SSL_REQUEST_PAYLOAD_SIZE)
{
@ -234,18 +232,18 @@ void MySQLHandler::authenticate(const String & user_name, const String & auth_pl
}
catch (const Exception & exc)
{
LOG_ERROR(log, "Authentication for user " << user_name << " failed.");
LOG_ERROR(log, "Authentication for user {} failed.", user_name);
packet_sender->sendPacket(ERR_Packet(exc.code(), "00000", exc.message()), true);
throw;
}
LOG_INFO(log, "Authentication for user " << user_name << " succeeded.");
LOG_INFO(log, "Authentication for user {} succeeded.", user_name);
}
void MySQLHandler::comInitDB(ReadBuffer & payload)
{
String database;
readStringUntilEOF(database, payload);
LOG_DEBUG(log, "Setting current database to " << database);
LOG_DEBUG(log, "Setting current database to {}", database);
connection_context.setCurrentDatabase(database);
packet_sender->sendPacket(OK_Packet(0, client_capability_flags, 0, 0, 1), true);
}

View File

@ -32,7 +32,7 @@ MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_)
}
catch (...)
{
LOG_TRACE(log, "Failed to create SSL context. SSL will be disabled. Error: " << getCurrentExceptionMessage(false));
LOG_TRACE(log, "Failed to create SSL context. SSL will be disabled. Error: {}", getCurrentExceptionMessage(false));
ssl_enabled = false;
}
@ -43,7 +43,7 @@ MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_)
}
catch (...)
{
LOG_TRACE(log, "Failed to read RSA key pair from server certificate. Error: " << getCurrentExceptionMessage(false));
LOG_TRACE(log, "Failed to read RSA key pair from server certificate. Error: {}", getCurrentExceptionMessage(false));
generateRSAKeys();
}
#endif
@ -122,7 +122,7 @@ void MySQLHandlerFactory::generateRSAKeys()
Poco::Net::TCPServerConnection * MySQLHandlerFactory::createConnection(const Poco::Net::StreamSocket & socket)
{
size_t connection_id = last_connection_id++;
LOG_TRACE(log, "MySQL connection. Id: " << connection_id << ". Address: " << socket.peerAddress().toString());
LOG_TRACE(log, "MySQL connection. Id: {}. Address: {}", connection_id, socket.peerAddress().toString());
#if USE_SSL
return new MySQLHandlerSSL(server, socket, ssl_enabled, connection_id, *public_key, *private_key);
#else

View File

@ -91,7 +91,7 @@ namespace
void setupTmpPath(Logger * log, const std::string & path)
{
LOG_DEBUG(log, "Setting up " << path << " to store temporary data in it");
LOG_DEBUG(log, "Setting up {} to store temporary data in it", path);
Poco::File(path).createDirectories();
@ -101,11 +101,11 @@ void setupTmpPath(Logger * log, const std::string & path)
{
if (it->isFile() && startsWith(it.name(), "tmp"))
{
LOG_DEBUG(log, "Removing old temporary file " << it->path());
LOG_DEBUG(log, "Removing old temporary file {}", it->path());
it->remove();
}
else
LOG_DEBUG(log, "Skipped file in temporary path " << it->path());
LOG_DEBUG(log, "Skipped file in temporary path {}", it->path());
}
}
@ -276,7 +276,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
{
LOG_TRACE(log, "Will mlockall to prevent executable memory from being paged out. It may take a few seconds.");
if (0 != mlockall(MCL_CURRENT))
LOG_WARNING(log, "Failed mlockall: " + errnoToString(ErrorCodes::SYSTEM_ERROR));
LOG_WARNING(log, "Failed mlockall: {}", errnoToString(ErrorCodes::SYSTEM_ERROR));
else
LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed");
}
@ -284,8 +284,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
{
LOG_INFO(log, "It looks like the process has no CAP_IPC_LOCK capability, binary mlock will be disabled."
" It could happen due to incorrect ClickHouse package installation."
" You could resolve the problem manually with 'sudo setcap cap_ipc_lock=+ep " << executable_path << "'."
" Note that it will not work on 'nosuid' mounted filesystems.");
" You could resolve the problem manually with 'sudo setcap cap_ipc_lock=+ep {}'."
" Note that it will not work on 'nosuid' mounted filesystems.", executable_path);
}
}
}
@ -349,7 +349,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
if (rlim.rlim_cur == rlim.rlim_max)
{
LOG_DEBUG(log, "rlimit on number of file descriptors is " << rlim.rlim_cur);
LOG_DEBUG(log, "rlimit on number of file descriptors is {}", rlim.rlim_cur);
}
else
{
@ -357,12 +357,9 @@ int Server::main(const std::vector<std::string> & /*args*/)
rlim.rlim_cur = config().getUInt("max_open_files", rlim.rlim_max);
int rc = setrlimit(RLIMIT_NOFILE, &rlim);
if (rc != 0)
LOG_WARNING(log,
"Cannot set max number of file descriptors to " << rlim.rlim_cur
<< ". Try to specify max_open_files according to your system limits. error: "
<< strerror(errno));
LOG_WARNING(log, "Cannot set max number of file descriptors to {}. Try to specify max_open_files according to your system limits. error: {}", rlim.rlim_cur, strerror(errno));
else
LOG_DEBUG(log, "Set max number of file descriptors to " << rlim.rlim_cur << " (was " << old << ").");
LOG_DEBUG(log, "Set max number of file descriptors to {} (was {}).", rlim.rlim_cur, old);
}
}
@ -372,7 +369,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
/// Initialize DateLUT early, to not interfere with running time of first query.
LOG_DEBUG(log, "Initializing DateLUT.");
DateLUT::instance();
LOG_TRACE(log, "Initialized DateLUT with time zone '" << DateLUT::instance().getTimeZone() << "'.");
LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::instance().getTimeZone());
/// Storage with temporary data for processing of heavy queries.
@ -431,9 +428,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
if (this_host.empty())
{
this_host = getFQDNOrHostName();
LOG_DEBUG(log,
"Configuration parameter '" + String(host_tag) + "' doesn't exist or exists and empty. Will use '" + this_host
+ "' as replica host.");
LOG_DEBUG(log, "Configuration parameter '{}' doesn't exist or exists and empty. Will use '{}' as replica host.",
host_tag, this_host);
}
String port_str = config().getString(port_tag);
@ -538,8 +534,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
if (uncompressed_cache_size > max_cache_size)
{
uncompressed_cache_size = max_cache_size;
LOG_INFO(log, "Uncompressed cache size was lowered to " << formatReadableSizeWithBinarySuffix(uncompressed_cache_size)
<< " because the system has low amount of memory");
LOG_INFO(log, "Uncompressed cache size was lowered to {} because the system has low amount of memory", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
}
global_context->setUncompressedCache(uncompressed_cache_size);
@ -554,8 +549,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
if (mark_cache_size > max_cache_size)
{
mark_cache_size = max_cache_size;
LOG_INFO(log, "Mark cache size was lowered to " << formatReadableSizeWithBinarySuffix(uncompressed_cache_size)
<< " because the system has low amount of memory");
LOG_INFO(log, "Mark cache size was lowered to {} because the system has low amount of memory", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
}
global_context->setMarkCache(mark_cache_size);
@ -579,20 +573,19 @@ int Server::main(const std::vector<std::string> & /*args*/)
if (max_server_memory_usage == 0)
{
max_server_memory_usage = default_max_server_memory_usage;
LOG_INFO(log, "Setting max_server_memory_usage was set to " << formatReadableSizeWithBinarySuffix(max_server_memory_usage));
LOG_INFO(log, "Setting max_server_memory_usage was set to {}", formatReadableSizeWithBinarySuffix(max_server_memory_usage));
}
else if (max_server_memory_usage > default_max_server_memory_usage)
{
max_server_memory_usage = default_max_server_memory_usage;
LOG_INFO(log, "Setting max_server_memory_usage was lowered to " << formatReadableSizeWithBinarySuffix(max_server_memory_usage)
<< " because the system has low amount of memory");
LOG_INFO(log, "Setting max_server_memory_usage was lowered to {} because the system has low amount of memory", formatReadableSizeWithBinarySuffix(max_server_memory_usage));
}
total_memory_tracker.setOrRaiseHardLimit(max_server_memory_usage);
total_memory_tracker.setDescription("(total)");
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
LOG_INFO(log, "Loading metadata from " + path);
LOG_INFO(log, "Loading metadata from {}", path);
try
{
@ -694,17 +687,19 @@ int Server::main(const std::vector<std::string> & /*args*/)
" neither clickhouse-server process has CAP_NET_ADMIN capability."
" 'taskstats' performance statistics will be disabled."
" It could happen due to incorrect ClickHouse package installation."
" You can try to resolve the problem manually with 'sudo setcap cap_net_admin=+ep " << executable_path << "'."
" You can try to resolve the problem manually with 'sudo setcap cap_net_admin=+ep {}'."
" Note that it will not work on 'nosuid' mounted filesystems."
" It also doesn't work if you run clickhouse-server inside network namespace as it happens in some containers.");
" It also doesn't work if you run clickhouse-server inside network namespace as it happens in some containers.",
executable_path);
}
if (!hasLinuxCapability(CAP_SYS_NICE))
{
LOG_INFO(log, "It looks like the process has no CAP_SYS_NICE capability, the setting 'os_thread_nice' will have no effect."
" It could happen due to incorrect ClickHouse package installation."
" You could resolve the problem manually with 'sudo setcap cap_sys_nice=+ep " << executable_path << "'."
" Note that it will not work on 'nosuid' mounted filesystems.");
" You could resolve the problem manually with 'sudo setcap cap_sys_nice=+ep {}'."
" Note that it will not work on 'nosuid' mounted filesystems.",
executable_path);
}
#else
LOG_INFO(log, "TaskStats is not implemented for this OS. IO accounting will be disabled.");
@ -746,11 +741,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
#endif
)
{
LOG_ERROR(log,
"Cannot resolve listen_host (" << host << "), error " << e.code() << ": " << e.message() << ". "
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. "
"If it is an IPv6 address and your host has disabled IPv6, then consider to "
"specify IPv4 address to listen in <listen_host> element of configuration "
"file. Example: <listen_host>0.0.0.0</listen_host>");
"file. Example: <listen_host>0.0.0.0</listen_host>",
host, e.code(), e.message());
}
throw;
@ -802,11 +797,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
if (listen_try)
{
LOG_ERROR(log, message
<< ". If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
LOG_ERROR(log, "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
"specify not disabled IPv4 or IPv6 address to listen in <listen_host> element of configuration "
"file. Example for disabled IPv6: <listen_host>0.0.0.0</listen_host> ."
" Example for disabled IPv4: <listen_host>::</listen_host>");
" Example for disabled IPv4: <listen_host>::</listen_host>",
message);
}
else
{
@ -826,7 +821,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
createHandlerFactory(*this, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params));
LOG_INFO(log, "Listening for http://" + address.toString());
LOG_INFO(log, "Listening for http://{}", address.toString());
});
/// HTTPS
@ -840,7 +835,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
createHandlerFactory(*this, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params));
LOG_INFO(log, "Listening for https://" + address.toString());
LOG_INFO(log, "Listening for https://{}", address.toString());
#else
UNUSED(port);
throw Exception{"HTTPS protocol is disabled because Poco library was built without NetSSL support.",
@ -861,7 +856,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
socket,
new Poco::Net::TCPServerParams));
LOG_INFO(log, "Listening for connections with native protocol (tcp): " + address.toString());
LOG_INFO(log, "Listening for connections with native protocol (tcp): {}", address.toString());
});
/// TCP with SSL
@ -877,7 +872,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
server_pool,
socket,
new Poco::Net::TCPServerParams));
LOG_INFO(log, "Listening for connections with secure native protocol (tcp_secure): " + address.toString());
LOG_INFO(log, "Listening for connections with secure native protocol (tcp_secure): {}", address.toString());
#else
UNUSED(port);
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
@ -895,7 +890,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
createHandlerFactory(*this, async_metrics, "InterserverIOHTTPHandler-factory"), server_pool, socket, http_params));
LOG_INFO(log, "Listening for replica communication (interserver): http://" + address.toString());
LOG_INFO(log, "Listening for replica communication (interserver): http://{}", address.toString());
});
create_server("interserver_https_port", [&](UInt16 port)
@ -908,7 +903,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
createHandlerFactory(*this, async_metrics, "InterserverIOHTTPSHandler-factory"), server_pool, socket, http_params));
LOG_INFO(log, "Listening for secure replica communication (interserver): https://" + address.toString());
LOG_INFO(log, "Listening for secure replica communication (interserver): https://{}", address.toString());
#else
UNUSED(port);
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
@ -928,7 +923,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
socket,
new Poco::Net::TCPServerParams));
LOG_INFO(log, "Listening for MySQL compatibility protocol: " + address.toString());
LOG_INFO(log, "Listening for MySQL compatibility protocol: {}", address.toString());
});
/// Prometheus (if defined and not setup yet with http_port)
@ -941,7 +936,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
createHandlerFactory(*this, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
LOG_INFO(log, "Listening for Prometheus: http://" + address.toString());
LOG_INFO(log, "Listening for Prometheus: http://{}", address.toString());
});
}
@ -966,12 +961,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
dns_cache_updater->start();
{
std::stringstream message;
message << "Available RAM: " << formatReadableSizeWithBinarySuffix(memory_amount) << ";"
<< " physical cores: " << getNumberOfPhysicalCPUCores() << ";"
// on ARM processors it can show only enabled at current moment cores
<< " logical cores: " << std::thread::hardware_concurrency() << ".";
LOG_INFO(log, message.str());
LOG_INFO(log, "Available RAM: {}; physical cores: {}; logical cores: {}.",
formatReadableSizeWithBinarySuffix(memory_amount),
getNumberOfPhysicalCPUCores(), // on ARM processors it can show only enabled at current moment cores
std::thread::hardware_concurrency());
}
LOG_INFO(log, "Ready for connections.");
@ -989,9 +982,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
current_connections += server->currentConnections();
}
LOG_INFO(log,
"Closed all listening sockets."
<< (current_connections ? " Waiting for " + toString(current_connections) + " outstanding connections." : ""));
if (current_connections)
LOG_INFO(log, "Closed all listening sockets. Waiting for {} outstanding connections.", current_connections);
else
LOG_INFO(log, "Closed all listening sockets.");
/// Killing remaining queries.
global_context->getProcessList().killAllQueries();
@ -1013,9 +1007,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
}
}
LOG_INFO(
log, "Closed connections." << (current_connections ? " But " + toString(current_connections) + " remains."
" Tip: To increase wait time add to config: <shutdown_wait_unfinished>60</shutdown_wait_unfinished>" : ""));
if (current_connections)
LOG_INFO(log, "Closed connections. But {} remain."
" Tip: To increase wait time add to config: <shutdown_wait_unfinished>60</shutdown_wait_unfinished>", current_connections);
else
LOG_INFO(log, "Closed connections.");
dns_cache_updater.reset();
main_config_reloader.reset();

View File

@ -28,7 +28,7 @@
#include <Compression/CompressionFactory.h>
#include <common/logger_useful.h>
#include <Processors/Executors/PullingPipelineExecutor.h>
#include <Processors/Executors/PullingAsyncPipelineExecutor.h>
#include "TCPHandler.h"
@ -115,8 +115,7 @@ void TCPHandler::runImpl()
if (!DatabaseCatalog::instance().isDatabaseExist(default_database))
{
Exception e("Database " + backQuote(default_database) + " doesn't exist", ErrorCodes::UNKNOWN_DATABASE);
LOG_ERROR(log, "Code: " << e.code() << ", e.displayText() = " << e.displayText()
<< ", Stack trace:\n\n" << e.getStackTraceString());
LOG_ERROR(log, "Code: {}, e.displayText() = {}, Stack trace:\n\n{}", e.code(), e.displayText(), e.getStackTraceString());
sendException(e, connection_context.getSettingsRef().calculate_text_stack_trace);
return;
}
@ -379,8 +378,7 @@ void TCPHandler::runImpl()
watch.stop();
LOG_INFO(log, std::fixed << std::setprecision(3)
<< "Processed in " << watch.elapsedSeconds() << " sec.");
LOG_INFO(log, "Processed in {} sec.", watch.elapsedSeconds());
/// It is important to destroy query context here. We do not want it to live arbitrarily longer than the query.
query_context.reset();
@ -566,7 +564,7 @@ void TCPHandler::processOrdinaryQueryWithProcessors()
}
{
PullingPipelineExecutor executor(pipeline);
PullingAsyncPipelineExecutor executor(pipeline);
CurrentMetrics::Increment query_thread_metric_increment{CurrentMetrics::QueryThread};
Block block;
@ -732,14 +730,12 @@ void TCPHandler::receiveHello()
readStringBinary(user, *in);
readStringBinary(password, *in);
LOG_DEBUG(log, "Connected " << client_name
<< " version " << client_version_major
<< "." << client_version_minor
<< "." << client_version_patch
<< ", revision: " << client_revision
<< (!default_database.empty() ? ", database: " + default_database : "")
<< (!user.empty() ? ", user: " + user : "")
<< ".");
LOG_DEBUG(log, "Connected {} version {}.{}.{}, revision: {}{}{}.",
client_name,
client_version_major, client_version_minor, client_version_patch,
client_revision,
(!default_database.empty() ? ", database: " + default_database : ""),
(!user.empty() ? ", user: " + user : ""));
connection_context.setUser(user, password, socket().peerAddress());
}
@ -1205,8 +1201,7 @@ void TCPHandler::run()
/// Timeout - not an error.
if (!strcmp(e.what(), "Timeout"))
{
LOG_DEBUG(log, "Poco::Exception. Code: " << ErrorCodes::POCO_EXCEPTION << ", e.code() = " << e.code()
<< ", e.displayText() = " << e.displayText() << ", e.what() = " << e.what());
LOG_DEBUG(log, "Poco::Exception. Code: {}, e.code() = {}, e.displayText() = {}, e.what() = {}", ErrorCodes::POCO_EXCEPTION, e.code(), e.displayText(), e.what());
}
else
throw;

View File

@ -35,7 +35,7 @@ public:
{
try
{
LOG_TRACE(log, "TCP Request. Address: " << socket.peerAddress().toString());
LOG_TRACE(log, "TCP Request. Address: {}", socket.peerAddress().toString());
return new TCPHandler(server, socket);
}
catch (const Poco::Net::NetException &)

View File

@ -227,7 +227,7 @@
and to prevent clickhouse executable from being paged out under high IO load.
Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
-->
<mlock_executable>false</mlock_executable>
<mlock_executable>true</mlock_executable>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.tech/docs/en/operations/table_engines/distributed/

View File

@ -251,12 +251,11 @@ public:
void logTree(Poco::Logger * log) const
{
LOG_TRACE(log, "Tree(" << level << "): name=" << (node_name ? *node_name : "NULL")
<< ", access=" << access.toString()
<< ", final_access=" << final_access.toString()
<< ", min_access=" << min_access.toString()
<< ", max_access=" << max_access.toString()
<< ", num_children=" << (children ? children->size() : 0));
LOG_TRACE(log, "Tree({}): name={}, access={}, final_access={}, min_access={}, max_access={}, num_children={}",
level, node_name ? *node_name : "NULL", access.toString(),
final_access.toString(), min_access.toString(), max_access.toString(),
(children ? children->size() : 0));
if (children)
{
for (auto & child : *children | boost::adaptors::map_values)

View File

@ -310,8 +310,8 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const
/// Try to ignore DNS errors: if host cannot be resolved, skip it and try next.
LOG_WARNING(
&Logger::get("AddressPatterns"),
"Failed to check if the allowed client hosts contain address " << client_address.toString() << ". " << e.displayText()
<< ", code = " << e.code());
"Failed to check if the allowed client hosts contain address {}. {}, code = {}",
client_address.toString(), e.displayText(), e.code());
return false;
}
};
@ -343,8 +343,8 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const
/// Try to ignore DNS errors: if host cannot be resolved, skip it and try next.
LOG_WARNING(
&Logger::get("AddressPatterns"),
"Failed to check if the allowed client hosts contain address " << client_address.toString() << ". " << e.displayText()
<< ", code = " << e.code());
"Failed to check if the allowed client hosts contain address {}. {}, code = {}",
client_address.toString(), e.displayText(), e.code());
return false;
}
};

View File

@ -200,7 +200,7 @@ bool ContextAccess::calculateResultAccessAndCheck(Poco::Logger * log_, const Acc
bool is_granted = access->isGranted(flags, args...);
if (trace_log)
LOG_TRACE(trace_log, "Access " << (is_granted ? "granted" : "denied") << ": " << (AccessRightsElement{flags, args...}.toString()));
LOG_TRACE(trace_log, "Access {}: {}", (is_granted ? "granted" : "denied"), (AccessRightsElement{flags, args...}.toString()));
if (is_granted)
return true;
@ -219,7 +219,7 @@ bool ContextAccess::calculateResultAccessAndCheck(Poco::Logger * log_, const Acc
if constexpr (mode == THROW_IF_ACCESS_DENIED)
throw Exception(user_name + ": " + msg, error_code);
else if constexpr (mode == LOG_WARNING_IF_ACCESS_DENIED)
LOG_WARNING(log_, user_name + ": " + msg + formatSkippedMessage(args...));
LOG_WARNING(log_, "{}: {}{}", user_name, msg, formatSkippedMessage(args...));
};
if (!user)
@ -451,15 +451,18 @@ boost::shared_ptr<const AccessRights> ContextAccess::calculateResultAccess(bool
if (trace_log && (params.readonly == readonly_) && (params.allow_ddl == allow_ddl_) && (params.allow_introspection == allow_introspection_))
{
LOG_TRACE(trace_log, "List of all grants: " << merged_access->toString() << (grant_option ? " WITH GRANT OPTION" : ""));
if (grant_option)
LOG_TRACE(trace_log, "List of all grants: {} WITH GRANT OPTION", merged_access->toString());
else
LOG_TRACE(trace_log, "List of all grants: {}", merged_access->toString());
if (roles_info && !roles_info->getCurrentRolesNames().empty())
{
LOG_TRACE(
trace_log,
"Current_roles: " << boost::algorithm::join(roles_info->getCurrentRolesNames(), ", ")
<< ", enabled_roles: " << boost::algorithm::join(roles_info->getEnabledRolesNames(), ", "));
LOG_TRACE(trace_log, "Current_roles: {}, enabled_roles: {}",
boost::algorithm::join(roles_info->getCurrentRolesNames(), ", "),
boost::algorithm::join(roles_info->getEnabledRolesNames(), ", "));
}
LOG_TRACE(trace_log, "Settings: readonly=" << readonly_ << ", allow_ddl=" << allow_ddl_ << ", allow_introspection_functions=" << allow_introspection_);
LOG_TRACE(trace_log, "Settings: readonly={}, allow_ddl={}, allow_introspection_functions={}", readonly_, allow_ddl_, allow_introspection_);
}
res = std::move(merged_access);

View File

@ -367,7 +367,7 @@ bool DiskAccessStorage::readLists()
auto file_path = getListFilePath(directory_path, type);
if (!std::filesystem::exists(file_path))
{
LOG_WARNING(getLogger(), "File " + file_path.string() + " doesn't exist");
LOG_WARNING(getLogger(), "File {} doesn't exist", file_path.string());
ok = false;
break;
}
@ -496,7 +496,7 @@ void DiskAccessStorage::listsWritingThreadFunc()
/// and then saves the files "users.list", "roles.list", etc. to the same directory.
bool DiskAccessStorage::rebuildLists()
{
LOG_WARNING(getLogger(), "Recovering lists in directory " + directory_path);
LOG_WARNING(getLogger(), "Recovering lists in directory {}", directory_path);
clear();
for (const auto & directory_entry : std::filesystem::directory_iterator(directory_path))

View File

@ -52,18 +52,20 @@ namespace
String user_config = "users." + user_name;
bool has_password = config.has(user_config + ".password");
bool has_no_password = config.has(user_config + ".no_password");
bool has_password_plaintext = config.has(user_config + ".password");
bool has_password_sha256_hex = config.has(user_config + ".password_sha256_hex");
bool has_password_double_sha1_hex = config.has(user_config + ".password_double_sha1_hex");
if (has_password + has_password_sha256_hex + has_password_double_sha1_hex > 1)
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex' is used to specify password for user " + user_name + ". Must be only one of them.",
size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex;
if (num_password_fields > 1)
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password' are used to specify password for user " + user_name + ". Must be only one of them.",
ErrorCodes::BAD_ARGUMENTS);
if (!has_password && !has_password_sha256_hex && !has_password_double_sha1_hex)
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
if (num_password_fields < 1)
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
if (has_password)
if (has_password_plaintext)
{
user->authentication = Authentication{Authentication::PLAINTEXT_PASSWORD};
user->authentication.setPassword(config.getString(user_config + ".password"));

View File

@ -103,6 +103,7 @@ class QuantileTDigest
struct RadixSortTraits
{
using Element = Centroid;
using Result = Element;
using Key = Value;
using CountType = UInt32;
using KeyBits = UInt32;
@ -114,6 +115,7 @@ class QuantileTDigest
/// The function to get the key from an array element.
static Key & extractKey(Element & elem) { return elem.mean; }
static Result & extractResult(Element & elem) { return elem; }
};
/** Adds a centroid `c` to the digest

View File

@ -61,11 +61,11 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
if (connected)
disconnect();
LOG_TRACE(log_wrapper.get(), "Connecting. Database: "
<< (default_database.empty() ? "(not specified)" : default_database)
<< ". User: " << user
<< (static_cast<bool>(secure) ? ". Secure" : "")
<< (static_cast<bool>(compression) ? "" : ". Uncompressed"));
LOG_TRACE(log_wrapper.get(), "Connecting. Database: {}. User: {}{}{}",
default_database.empty() ? "(not specified)" : default_database,
user,
static_cast<bool>(secure) ? ". Secure" : "",
static_cast<bool>(compression) ? "" : ". Uncompressed");
if (static_cast<bool>(secure))
{
@ -107,11 +107,8 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
sendHello();
receiveHello();
LOG_TRACE(log_wrapper.get(), "Connected to " << server_name
<< " server version " << server_version_major
<< "." << server_version_minor
<< "." << server_version_patch
<< ".");
LOG_TRACE(log_wrapper.get(), "Connected to {} server version {}.{}.{}.",
server_name, server_version_major, server_version_minor, server_version_patch);
}
catch (Poco::Net::NetException & e)
{
@ -132,8 +129,6 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
void Connection::disconnect()
{
//LOG_TRACE(log_wrapper.get(), "Disconnecting");
in = nullptr;
last_input_packet_type.reset();
out = nullptr; // can write to socket
@ -186,8 +181,6 @@ void Connection::sendHello()
void Connection::receiveHello()
{
//LOG_TRACE(log_wrapper.get(), "Receiving hello");
/// Receive hello packet.
UInt64 packet_type = 0;
@ -391,8 +384,6 @@ void Connection::sendQuery(
query_id = query_id_;
//LOG_TRACE(log_wrapper.get(), "Sending query");
writeVarUInt(Protocol::Client::Query, *out);
writeStringBinary(query_id, *out);
@ -441,8 +432,6 @@ void Connection::sendCancel()
if (!out)
return;
//LOG_TRACE(log_wrapper.get(), "Sending cancel");
writeVarUInt(Protocol::Client::Cancel, *out);
out->next();
}
@ -450,8 +439,6 @@ void Connection::sendCancel()
void Connection::sendData(const Block & block, const String & name, bool scalar)
{
//LOG_TRACE(log_wrapper.get(), "Sending data");
if (!block_out)
{
if (compression == Protocol::Compression::Enable)
@ -516,19 +503,23 @@ void Connection::sendScalarsData(Scalars & data)
maybe_compressed_out_bytes = maybe_compressed_out->count() - maybe_compressed_out_bytes;
double elapsed = watch.elapsedSeconds();
std::stringstream msg;
msg << std::fixed << std::setprecision(3);
msg << "Sent data for " << data.size() << " scalars, total " << rows << " rows in " << elapsed << " sec., "
<< static_cast<size_t>(rows / watch.elapsedSeconds()) << " rows/sec., "
<< maybe_compressed_out_bytes / 1048576.0 << " MiB (" << maybe_compressed_out_bytes / 1048576.0 / watch.elapsedSeconds() << " MiB/sec.)";
if (compression == Protocol::Compression::Enable)
msg << ", compressed " << static_cast<double>(maybe_compressed_out_bytes) / out_bytes << " times to "
<< out_bytes / 1048576.0 << " MiB (" << out_bytes / 1048576.0 / watch.elapsedSeconds() << " MiB/sec.)";
LOG_DEBUG(log_wrapper.get(),
"Sent data for {} scalars, total {} rows in {} sec., {} rows/sec., {} ({}/sec.), compressed {} times to {} ({}/sec.)",
data.size(), rows, elapsed,
static_cast<size_t>(rows / watch.elapsedSeconds()),
formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes),
formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes / watch.elapsedSeconds()),
static_cast<double>(maybe_compressed_out_bytes) / out_bytes,
formatReadableSizeWithBinarySuffix(out_bytes),
formatReadableSizeWithBinarySuffix(out_bytes / watch.elapsedSeconds()));
else
msg << ", no compression.";
LOG_DEBUG(log_wrapper.get(), msg.rdbuf());
LOG_DEBUG(log_wrapper.get(),
"Sent data for {} scalars, total {} rows in {} sec., {} rows/sec., {} ({}/sec.), no compression.",
data.size(), rows, elapsed,
static_cast<size_t>(rows / watch.elapsedSeconds()),
formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes),
formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes / watch.elapsedSeconds()));
}
namespace
@ -616,19 +607,23 @@ void Connection::sendExternalTablesData(ExternalTablesData & data)
maybe_compressed_out_bytes = maybe_compressed_out->count() - maybe_compressed_out_bytes;
double elapsed = watch.elapsedSeconds();
std::stringstream msg;
msg << std::fixed << std::setprecision(3);
msg << "Sent data for " << data.size() << " external tables, total " << rows << " rows in " << elapsed << " sec., "
<< static_cast<size_t>(rows / watch.elapsedSeconds()) << " rows/sec., "
<< maybe_compressed_out_bytes / 1048576.0 << " MiB (" << maybe_compressed_out_bytes / 1048576.0 / watch.elapsedSeconds() << " MiB/sec.)";
if (compression == Protocol::Compression::Enable)
msg << ", compressed " << static_cast<double>(maybe_compressed_out_bytes) / out_bytes << " times to "
<< out_bytes / 1048576.0 << " MiB (" << out_bytes / 1048576.0 / watch.elapsedSeconds() << " MiB/sec.)";
LOG_DEBUG(log_wrapper.get(),
"Sent data for {} external tables, total {} rows in {} sec., {} rows/sec., {} ({}/sec.), compressed {} times to {} ({}/sec.)",
data.size(), rows, elapsed,
static_cast<size_t>(rows / watch.elapsedSeconds()),
formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes),
formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes / watch.elapsedSeconds()),
static_cast<double>(maybe_compressed_out_bytes) / out_bytes,
formatReadableSizeWithBinarySuffix(out_bytes),
formatReadableSizeWithBinarySuffix(out_bytes / watch.elapsedSeconds()));
else
msg << ", no compression.";
LOG_DEBUG(log_wrapper.get(), msg.rdbuf());
LOG_DEBUG(log_wrapper.get(),
"Sent data for {} external tables, total {} rows in {} sec., {} rows/sec., {} ({}/sec.), no compression.",
data.size(), rows, elapsed,
static_cast<size_t>(rows / watch.elapsedSeconds()),
formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes),
formatReadableSizeWithBinarySuffix(maybe_compressed_out_bytes / watch.elapsedSeconds()));
}
std::optional<Poco::Net::SocketAddress> Connection::getResolvedAddress() const
@ -682,12 +677,9 @@ Packet Connection::receivePacket()
}
else
{
//LOG_TRACE(log_wrapper.get(), "Receiving packet type");
readVarUInt(res.type, *in);
}
//LOG_TRACE(log_wrapper.get(), "Receiving packet " << res.type << " " << Protocol::Server::toString(res.type));
//std::cerr << "Client got packet: " << Protocol::Server::toString(res.type) << "\n";
switch (res.type)
{
case Protocol::Server::Data: [[fallthrough]];
@ -740,8 +732,6 @@ Packet Connection::receivePacket()
Block Connection::receiveData()
{
//LOG_TRACE(log_wrapper.get(), "Receiving data");
initBlockInput();
return receiveDataImpl(block_in);
}
@ -820,8 +810,6 @@ void Connection::setDescription()
std::unique_ptr<Exception> Connection::receiveException()
{
//LOG_TRACE(log_wrapper.get(), "Receiving exception");
return std::make_unique<Exception>(readException(*in, "Received from " + getDescription()));
}
@ -838,8 +826,6 @@ std::vector<String> Connection::receiveMultistringMessage(UInt64 msg_type)
Progress Connection::receiveProgress()
{
//LOG_TRACE(log_wrapper.get(), "Receiving progress");
Progress progress;
progress.read(*in, server_revision);
return progress;

View File

@ -222,8 +222,8 @@ ConnectionPoolWithFailover::tryGetEntry(
auto table_status_it = status_response.table_states_by_id.find(*table_to_check);
if (table_status_it == status_response.table_states_by_id.end())
{
fail_message = "There is no table " + table_to_check->database + "." + table_to_check->table
+ " on server: " + result.entry->getDescription();
const char * message_pattern = "There is no table {}.{} on server: {}";
fail_message = fmt::format(message_pattern, backQuote(table_to_check->database), backQuote(table_to_check->table), result.entry->getDescription());
LOG_WARNING(log, fail_message);
ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable);
@ -248,10 +248,7 @@ ConnectionPoolWithFailover::tryGetEntry(
result.is_up_to_date = false;
result.staleness = delay;
LOG_TRACE(
log, "Server " << result.entry->getDescription() << " has unacceptable replica delay "
<< "for table " << table_to_check->database << "." << table_to_check->table
<< ": " << delay);
LOG_TRACE(log, "Server {} has unacceptable replica delay for table {}.{}: {}", result.entry->getDescription(), table_to_check->database, table_to_check->table, delay);
ProfileEvents::increment(ProfileEvents::DistributedConnectionStaleReplica);
}
}

View File

@ -35,7 +35,7 @@ TimeoutSetter::~TimeoutSetter()
catch (std::exception & e)
{
// Sometimes catched on macos
LOG_ERROR(&Logger::get("Client"), std::string{"TimeoutSetter: Can't reset timeouts: "} + e.what());
LOG_ERROR(&Logger::get("Client"), "TimeoutSetter: Can't reset timeouts: {}", e.what());
}
}
}

View File

@ -549,6 +549,8 @@ void ColumnAggregateFunction::getPermutation(bool /*reverse*/, size_t /*limit*/,
res[i] = i;
}
void ColumnAggregateFunction::updatePermutation(bool, size_t, int, Permutation &, EqualRanges&) const {}
void ColumnAggregateFunction::gather(ColumnGathererStream & gatherer)
{
gatherer.gather(*this);

View File

@ -193,6 +193,7 @@ public:
}
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_range) const override;
/** More efficient manipulation methods */
Container & getData()

View File

@ -737,6 +737,76 @@ void ColumnArray::getPermutation(bool reverse, size_t limit, int nan_direction_h
}
}
void ColumnArray::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const
{
if (limit >= size() || limit >= equal_range.back().second)
limit = 0;
size_t n = equal_range.size();
if (limit)
--n;
EqualRanges new_ranges;
for (size_t i = 0; i < n; ++i)
{
const auto& [first, last] = equal_range[i];
if (reverse)
std::sort(res.begin() + first, res.begin() + last, Less<false>(*this, nan_direction_hint));
else
std::sort(res.begin() + first, res.begin() + last, Less<true>(*this, nan_direction_hint));
auto new_first = first;
for (auto j = first + 1; j < last; ++j)
{
if (compareAt(res[new_first], res[j], *this, nan_direction_hint) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
if (last - new_first > 1)
new_ranges.emplace_back(new_first, last);
}
if (limit)
{
const auto& [first, last] = equal_range.back();
if (reverse)
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, Less<false>(*this, nan_direction_hint));
else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, Less<true>(*this, nan_direction_hint));
auto new_first = first;
for (auto j = first + 1; j < limit; ++j)
{
if (compareAt(res[new_first], res[j], *this, nan_direction_hint) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
auto new_last = limit;
for (auto j = limit; j < last; ++j)
{
if (compareAt(res[new_first], res[j], *this, nan_direction_hint) == 0)
{
std::swap(res[new_last], res[j]);
++new_last;
}
}
if (new_last - new_first > 1)
{
new_ranges.emplace_back(new_first, new_last);
}
}
equal_range = std::move(new_ranges);
}
ColumnPtr ColumnArray::replicate(const Offsets & replicate_offsets) const
{

View File

@ -73,6 +73,7 @@ public:
template <typename Type> ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const override;
void reserve(size_t n) override;
size_t byteSize() const override;
size_t allocatedBytes() const override;

View File

@ -120,6 +120,8 @@ void ColumnConst::getPermutation(bool /*reverse*/, size_t /*limit*/, int /*nan_d
res[i] = i;
}
void ColumnConst::updatePermutation(bool, size_t, int, Permutation &, EqualRanges &) const {}
void ColumnConst::updateWeakHash32(WeakHash32 & hash) const
{
if (hash.getData().size() != s)

View File

@ -170,6 +170,7 @@ public:
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const override;
size_t byteSize() const override
{

View File

@ -108,6 +108,76 @@ void ColumnDecimal<T>::getPermutation(bool reverse, size_t limit, int , IColumn:
permutation(reverse, limit, res);
}
template <typename T>
void ColumnDecimal<T>::updatePermutation(bool reverse, size_t limit, int, IColumn::Permutation & res, EqualRanges & equal_range) const
{
if (limit >= data.size() || limit >= equal_range.back().second)
limit = 0;
size_t n = equal_range.size();
if (limit)
--n;
EqualRanges new_ranges;
for (size_t i = 0; i < n; ++i)
{
const auto& [first, last] = equal_range[i];
if (reverse)
std::partial_sort(res.begin() + first, res.begin() + last, res.begin() + last,
[this](size_t a, size_t b) { return data[a] > data[b]; });
else
std::partial_sort(res.begin() + first, res.begin() + last, res.begin() + last,
[this](size_t a, size_t b) { return data[a] < data[b]; });
auto new_first = first;
for (auto j = first + 1; j < last; ++j)
{
if (data[res[new_first]] != data[res[j]])
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
if (last - new_first > 1)
new_ranges.emplace_back(new_first, last);
}
if (limit)
{
const auto& [first, last] = equal_range.back();
if (reverse)
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
[this](size_t a, size_t b) { return data[a] > data[b]; });
else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
[this](size_t a, size_t b) { return data[a] < data[b]; });
auto new_first = first;
for (auto j = first + 1; j < limit; ++j)
{
if (data[res[new_first]] != data[res[j]])
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
auto new_last = limit;
for (auto j = limit; j < last; ++j)
{
if (data[res[new_first]] == data[res[j]])
{
std::swap(res[new_last], res[j]);
++new_last;
}
}
if (new_last - new_first > 1)
new_ranges.emplace_back(new_first, new_last);
}
equal_range = std::move(new_ranges);
}
template <typename T>
ColumnPtr ColumnDecimal<T>::permute(const IColumn::Permutation & perm, size_t limit) const
{

View File

@ -108,6 +108,7 @@ public:
void updateWeakHash32(WeakHash32 & hash) const override;
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int, IColumn::Permutation & res, EqualRanges& equal_range) const override;
MutableColumnPtr cloneResized(size_t size) const override;
@ -152,6 +153,8 @@ public:
const T & getElement(size_t n) const { return data[n]; }
T & getElement(size_t n) { return data[n]; }
UInt32 getScale() const {return scale;}
protected:
Container data;
UInt32 scale;

View File

@ -162,6 +162,71 @@ void ColumnFixedString::getPermutation(bool reverse, size_t limit, int /*nan_dir
}
}
void ColumnFixedString::updatePermutation(bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_range) const
{
if (limit >= size() || limit >= equal_range.back().second)
limit = 0;
size_t k = equal_range.size();
if (limit)
--k;
EqualRanges new_ranges;
for (size_t i = 0; i < k; ++i)
{
const auto& [first, last] = equal_range[i];
if (reverse)
std::sort(res.begin() + first, res.begin() + last, less<false>(*this));
else
std::sort(res.begin() + first, res.begin() + last, less<true>(*this));
auto new_first = first;
for (auto j = first + 1; j < last; ++j)
{
if (memcmpSmallAllowOverflow15(chars.data() + j * n, chars.data() + new_first * n, n) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
if (last - new_first > 1)
new_ranges.emplace_back(new_first, last);
}
if (limit)
{
const auto& [first, last] = equal_range.back();
if (reverse)
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<false>(*this));
else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<true>(*this));
auto new_first = first;
for (auto j = first + 1; j < limit; ++j)
{
if (memcmpSmallAllowOverflow15(chars.data() + j * n, chars.data() + new_first * n, n) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
auto new_last = limit;
for (auto j = limit; j < last; ++j)
{
if (memcmpSmallAllowOverflow15(chars.data() + j * n, chars.data() + new_first * n, n) == 0)
{
std::swap(res[new_last], res[j]);
++new_last;
}
}
if (new_last - new_first > 1)
new_ranges.emplace_back(new_first, new_last);
}
equal_range = std::move(new_ranges);
}
void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
{
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);

View File

@ -118,6 +118,8 @@ public:
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const override;
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
ColumnPtr filter(const IColumn::Filter & filt, ssize_t result_size_hint) const override;

View File

@ -121,6 +121,11 @@ public:
throw Exception("getPermutation is not implemented for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
void updatePermutation(bool, size_t, int, Permutation &, EqualRanges &) const override
{
throw Exception("updatePermutation is not implemented for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
void gather(ColumnGathererStream &) override
{
throw Exception("Method gather is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);

View File

@ -314,6 +314,76 @@ void ColumnLowCardinality::getPermutation(bool reverse, size_t limit, int nan_di
}
}
void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const
{
if (limit >= size() || limit >= equal_range.back().second)
limit = 0;
size_t n = equal_range.size();
if (limit)
--n;
EqualRanges new_ranges;
for (size_t i = 0; i < n; ++i)
{
const auto& [first, last] = equal_range[i];
if (reverse)
std::sort(res.begin() + first, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b)
{return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) > 0; });
else
std::sort(res.begin() + first, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b)
{return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) < 0; });
auto new_first = first;
for (auto j = first + 1; j < last; ++j)
{
if (compareAt(new_first, j, *this, nan_direction_hint) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
if (last - new_first > 1)
new_ranges.emplace_back(new_first, last);
}
if (limit)
{
const auto& [first, last] = equal_range.back();
if (reverse)
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b)
{return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) > 0; });
else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b)
{return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) < 0; });
auto new_first = first;
for (auto j = first + 1; j < limit; ++j)
{
if (getDictionary().compareAt(getIndexes().getUInt(new_first), getIndexes().getUInt(j), getDictionary(), nan_direction_hint) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
auto new_last = limit;
for (auto j = limit; j < last; ++j)
{
if (getDictionary().compareAt(getIndexes().getUInt(new_first), getIndexes().getUInt(j), getDictionary(), nan_direction_hint) == 0)
{
std::swap(res[new_last], res[j]);
++new_last;
}
}
if (new_last - new_first > 1)
new_ranges.emplace_back(new_first, new_last);
}
equal_range = std::move(new_ranges);
}
std::vector<MutableColumnPtr> ColumnLowCardinality::scatter(ColumnIndex num_columns, const Selector & selector) const
{
auto columns = getIndexes().scatter(num_columns, selector);

View File

@ -111,6 +111,8 @@ public:
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int, IColumn::Permutation & res, EqualRanges & equal_range) const override;
ColumnPtr replicate(const Offsets & offsets) const override
{
return ColumnLowCardinality::create(dictionary.getColumnUniquePtr(), getIndexes().replicate(offsets));

View File

@ -321,6 +321,75 @@ void ColumnNullable::getPermutation(bool reverse, size_t limit, int null_directi
}
}
void ColumnNullable::updatePermutation(bool reverse, size_t limit, int null_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const
{
if (limit >= equal_range.back().second || limit >= size())
limit = 0;
EqualRanges new_ranges, temp_ranges;
for (const auto &[first, last] : equal_range)
{
bool direction = ((null_direction_hint > 0) != reverse);
/// Shift all NULL values to the end.
size_t read_idx = first;
size_t write_idx = first;
while (read_idx < last && (isNullAt(res[read_idx])^direction))
{
++read_idx;
++write_idx;
}
++read_idx;
/// Invariants:
/// write_idx < read_idx
/// write_idx points to NULL
/// read_idx will be incremented to position of next not-NULL
/// there are range of NULLs between write_idx and read_idx - 1,
/// We are moving elements from end to begin of this range,
/// so range will "bubble" towards the end.
/// Relative order of NULL elements could be changed,
/// but relative order of non-NULLs is preserved.
while (read_idx < last && write_idx < last)
{
if (isNullAt(res[read_idx])^direction)
{
std::swap(res[read_idx], res[write_idx]);
++write_idx;
}
++read_idx;
}
if (write_idx - first > 1)
{
if (direction)
temp_ranges.emplace_back(first, write_idx);
else
new_ranges.emplace_back(first, write_idx);
}
if (last - write_idx > 1)
{
if (direction)
new_ranges.emplace_back(write_idx, last);
else
temp_ranges.emplace_back(write_idx, last);
}
}
while (!new_ranges.empty() && limit && limit <= new_ranges.back().first)
new_ranges.pop_back();
if (!temp_ranges.empty())
getNestedColumn().updatePermutation(reverse, limit, null_direction_hint, res, temp_ranges);
equal_range.resize(temp_ranges.size() + new_ranges.size());
std::merge(temp_ranges.begin(), temp_ranges.end(), new_ranges.begin(), new_ranges.end(), equal_range.begin());
}
void ColumnNullable::gather(ColumnGathererStream & gatherer)
{
gatherer.gather(*this);

View File

@ -78,6 +78,7 @@ public:
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
void getPermutation(bool reverse, size_t limit, int null_direction_hint, Permutation & res) const override;
void updatePermutation(bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_range) const override;
void reserve(size_t n) override;
size_t byteSize() const override;
size_t allocatedBytes() const override;

View File

@ -302,6 +302,77 @@ void ColumnString::getPermutation(bool reverse, size_t limit, int /*nan_directio
}
}
void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direction_hint*/, Permutation & res, EqualRanges & equal_range) const
{
if (limit >= size() || limit > equal_range.back().second)
limit = 0;
EqualRanges new_ranges;
auto less_true = less<true>(*this);
auto less_false = less<false>(*this);
size_t n = equal_range.size();
if (limit)
--n;
for (size_t i = 0; i < n; ++i)
{
const auto &[first, last] = equal_range[i];
if (reverse)
std::sort(res.begin() + first, res.begin() + last, less_false);
else
std::sort(res.begin() + first, res.begin() + last, less_true);
size_t new_first = first;
for (size_t j = first + 1; j < last; ++j)
{
if (memcmpSmallAllowOverflow15(
chars.data() + offsetAt(res[j]), sizeAt(res[j]) - 1,
chars.data() + offsetAt(res[new_first]), sizeAt(res[new_first]) - 1) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
if (last - new_first > 1)
new_ranges.emplace_back(new_first, last);
}
if (limit)
{
const auto &[first, last] = equal_range.back();
if (reverse)
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less_false);
else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less_true);
size_t new_first = first;
for (size_t j = first + 1; j < limit; ++j)
{
if (memcmpSmallAllowOverflow15(
chars.data() + offsetAt(res[j]), sizeAt(res[j]) - 1,
chars.data() + offsetAt(res[new_first]), sizeAt(res[new_first]) - 1) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
size_t new_last = limit;
for (size_t j = limit; j < last; ++j)
{
if (memcmpSmallAllowOverflow15(
chars.data() + offsetAt(res[j]), sizeAt(res[j]) - 1,
chars.data() + offsetAt(res[new_first]), sizeAt(res[new_first]) - 1) == 0)
{
std::swap(res[j], res[new_last]);
++new_last;
}
}
if (new_last - new_first > 1)
new_ranges.emplace_back(new_first, new_last);
}
equal_range = std::move(new_ranges);
}
ColumnPtr ColumnString::replicate(const Offsets & replicate_offsets) const
{
@ -440,6 +511,77 @@ void ColumnString::getPermutationWithCollation(const Collator & collator, bool r
}
}
void ColumnString::updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int, Permutation &res, EqualRanges &equal_range) const
{
if (limit >= size() || limit >= equal_range.back().second)
limit = 0;
size_t n = equal_range.size();
if (limit)
--n;
EqualRanges new_ranges;
for (size_t i = 0; i < n; ++i)
{
const auto& [first, last] = equal_range[i];
if (reverse)
std::sort(res.begin() + first, res.begin() + last, lessWithCollation<false>(*this, collator));
else
std::sort(res.begin() + first, res.begin() + last, lessWithCollation<true>(*this, collator));
auto new_first = first;
for (auto j = first + 1; j < last; ++j)
{
if (collator.compare(
reinterpret_cast<const char *>(&chars[offsetAt(res[new_first])]), sizeAt(res[new_first]),
reinterpret_cast<const char *>(&chars[offsetAt(res[j])]), sizeAt(res[j])) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
if (last - new_first > 1)
new_ranges.emplace_back(new_first, last);
}
if (limit)
{
const auto& [first, last] = equal_range.back();
if (reverse)
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, lessWithCollation<false>(*this, collator));
else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, lessWithCollation<true>(*this, collator));
auto new_first = first;
for (auto j = first + 1; j < limit; ++j)
{
if (collator.compare(
reinterpret_cast<const char *>(&chars[offsetAt(res[new_first])]), sizeAt(res[new_first]),
reinterpret_cast<const char *>(&chars[offsetAt(res[j])]), sizeAt(res[j])) != 0)
{
if (j - new_first > 1)
new_ranges.emplace_back(new_first, j);
new_first = j;
}
}
auto new_last = limit;
for (auto j = limit; j < last; ++j)
{
if (collator.compare(
reinterpret_cast<const char *>(&chars[offsetAt(res[new_first])]), sizeAt(res[new_first]),
reinterpret_cast<const char *>(&chars[offsetAt(res[j])]), sizeAt(res[j])) == 0)
{
std::swap(res[new_last], res[j]);
++new_last;
}
}
if (new_last - new_first > 1)
new_ranges.emplace_back(new_first, new_last);
}
equal_range = std::move(new_ranges);
}
void ColumnString::protect()
{

Some files were not shown because too many files have changed in this diff Show More