mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 16:42:05 +00:00
Merge remote-tracking branch 'ck/master' into mysql_global_variables
This commit is contained in:
commit
53997f23e6
@ -289,8 +289,9 @@ set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
|
||||
|
||||
if (MAKE_STATIC_LIBRARIES)
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
if (OS_LINUX)
|
||||
if (OS_LINUX AND NOT ARCH_ARM)
|
||||
# Slightly more efficient code can be generated
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-no-pie")
|
||||
|
@ -13,8 +13,3 @@ ClickHouse is an open-source column-oriented database management system that all
|
||||
* [Yandex.Messenger channel](https://yandex.ru/chat/#/join/20e380d9-c7be-4123-ab06-e95fb946975e) shares announcements and useful links in Russian.
|
||||
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
|
||||
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [ClickHouse Workshop in Novosibirsk](https://2020.codefest.ru/lecture/1628) on TBD date.
|
||||
* [Yandex C++ Open-Source Sprints in Moscow](https://events.yandex.ru/events/otkrytyj-kod-v-yandek-28-03-2020) on TBD date.
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include <Common/getMultipleKeysFromConfig.h>
|
||||
#include <Common/ClickHouseRevision.h>
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include <Common/SymbolIndex.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config_version.h>
|
||||
@ -84,7 +85,8 @@ static const size_t signal_pipe_buf_size =
|
||||
+ sizeof(ucontext_t)
|
||||
+ sizeof(StackTrace)
|
||||
+ sizeof(UInt32)
|
||||
+ max_query_id_size + 1; /// query_id + varint encoded length
|
||||
+ max_query_id_size + 1 /// query_id + varint encoded length
|
||||
+ sizeof(void*);
|
||||
|
||||
|
||||
using signal_function = void(int, siginfo_t*, void*);
|
||||
@ -134,6 +136,7 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
|
||||
DB::writePODBinary(stack_trace, out);
|
||||
DB::writeBinary(UInt32(getThreadId()), out);
|
||||
DB::writeStringBinary(query_id, out);
|
||||
DB::writePODBinary(DB::current_thread, out);
|
||||
|
||||
out.next();
|
||||
|
||||
@ -217,16 +220,18 @@ public:
|
||||
StackTrace stack_trace(NoCapture{});
|
||||
UInt32 thread_num;
|
||||
std::string query_id;
|
||||
DB::ThreadStatus * thread_ptr{};
|
||||
|
||||
DB::readPODBinary(info, in);
|
||||
DB::readPODBinary(context, in);
|
||||
DB::readPODBinary(stack_trace, in);
|
||||
DB::readBinary(thread_num, in);
|
||||
DB::readBinary(query_id, in);
|
||||
DB::readPODBinary(thread_ptr, in);
|
||||
|
||||
/// This allows to receive more signals if failure happens inside onFault function.
|
||||
/// Example: segfault while symbolizing stack trace.
|
||||
std::thread([=, this] { onFault(sig, info, context, stack_trace, thread_num, query_id); }).detach();
|
||||
std::thread([=, this] { onFault(sig, info, context, stack_trace, thread_num, query_id, thread_ptr); }).detach();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -237,7 +242,8 @@ private:
|
||||
|
||||
void onTerminate(const std::string & message, UInt32 thread_num) const
|
||||
{
|
||||
LOG_FATAL(log, "(version {}{}) (from thread {}) {}", VERSION_STRING, VERSION_OFFICIAL, thread_num, message);
|
||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) {}",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, message);
|
||||
}
|
||||
|
||||
void onFault(
|
||||
@ -246,21 +252,30 @@ private:
|
||||
const ucontext_t & context,
|
||||
const StackTrace & stack_trace,
|
||||
UInt32 thread_num,
|
||||
const std::string & query_id) const
|
||||
const std::string & query_id,
|
||||
DB::ThreadStatus * thread_ptr) const
|
||||
{
|
||||
DB::ThreadStatus thread_status;
|
||||
|
||||
/// Send logs from this thread to client if possible.
|
||||
/// It will allow client to see failure messages directly.
|
||||
if (thread_ptr)
|
||||
{
|
||||
if (auto logs_queue = thread_ptr->getInternalTextLogsQueue())
|
||||
DB::CurrentThread::attachInternalTextLogsQueue(logs_queue, DB::LogsLevel::trace);
|
||||
}
|
||||
|
||||
LOG_FATAL(log, "########################################");
|
||||
|
||||
if (query_id.empty())
|
||||
{
|
||||
std::stringstream message;
|
||||
message << "(version " << VERSION_STRING << VERSION_OFFICIAL << ")";
|
||||
message << " (from thread " << thread_num << ")";
|
||||
if (query_id.empty())
|
||||
message << " (no query)";
|
||||
else
|
||||
message << " (query_id: " << query_id << ")";
|
||||
message << " Received signal " << strsignal(sig) << " (" << sig << ").";
|
||||
|
||||
LOG_FATAL(log, message.str());
|
||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (no query) Received signal {} ({})",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, strsignal(sig), sig);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (query_id: {}) Received signal {} ({})",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, query_id, strsignal(sig), sig);
|
||||
}
|
||||
|
||||
LOG_FATAL(log, signalToErrorMessage(sig, info, context));
|
||||
@ -280,6 +295,10 @@ private:
|
||||
|
||||
/// Write symbolized stack trace line by line for better grep-ability.
|
||||
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
|
||||
|
||||
/// When everything is done, we will try to send these error messages to client.
|
||||
if (thread_ptr)
|
||||
thread_ptr->onFatalError();
|
||||
}
|
||||
};
|
||||
|
||||
@ -293,17 +312,15 @@ static void sanitizerDeathCallback()
|
||||
|
||||
StringRef query_id = DB::CurrentThread::getQueryId(); /// This is signal safe.
|
||||
|
||||
if (query_id.size == 0)
|
||||
{
|
||||
std::stringstream message;
|
||||
message << "(version " << VERSION_STRING << VERSION_OFFICIAL << ")";
|
||||
message << " (from thread " << getThreadId() << ")";
|
||||
if (query_id.size == 0)
|
||||
message << " (no query)";
|
||||
else
|
||||
message << " (query_id: " << query_id << ")";
|
||||
message << " Sanitizer trap.";
|
||||
|
||||
LOG_FATAL(log, message.str());
|
||||
LOG_FATAL(log, "(version {}{}) (from thread {}) (no query) Sanitizer trap.",
|
||||
VERSION_STRING, VERSION_OFFICIAL, getThreadId());
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_FATAL(log, "(version {}{}) (from thread {}) (query_id: {}) Sanitizer trap.",
|
||||
VERSION_STRING, VERSION_OFFICIAL, getThreadId(), query_id);
|
||||
}
|
||||
|
||||
/// Just in case print our own stack trace. In case when llvm-symbolizer does not work.
|
||||
@ -712,12 +729,23 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
|
||||
signal_listener = std::make_unique<SignalListener>(*this);
|
||||
signal_listener_thread.start(*signal_listener);
|
||||
|
||||
#if defined(__ELF__) && !defined(__FreeBSD__)
|
||||
String build_id_hex = DB::SymbolIndex::instance().getBuildIDHex();
|
||||
if (build_id_hex.empty())
|
||||
build_id_info = "no build id";
|
||||
else
|
||||
build_id_info = "build id: " + build_id_hex;
|
||||
#else
|
||||
build_id_info = "no build id";
|
||||
#endif
|
||||
}
|
||||
|
||||
void BaseDaemon::logRevision() const
|
||||
{
|
||||
Poco::Logger::root().information("Starting " + std::string{VERSION_FULL}
|
||||
+ " with revision " + std::to_string(ClickHouseRevision::get())
|
||||
+ ", " + build_id_info
|
||||
+ ", PID " + std::to_string(getpid()));
|
||||
}
|
||||
|
||||
|
@ -198,6 +198,8 @@ protected:
|
||||
std::string config_path;
|
||||
DB::ConfigProcessor::LoadedConfig loaded_config;
|
||||
Poco::Util::AbstractConfiguration * last_configuration = nullptr;
|
||||
|
||||
String build_id_info;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# This strings autochanged from release_lib.sh:
|
||||
SET(VERSION_REVISION 54435)
|
||||
SET(VERSION_REVISION 54436)
|
||||
SET(VERSION_MAJOR 20)
|
||||
SET(VERSION_MINOR 5)
|
||||
SET(VERSION_MINOR 6)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 91df18a906dcffdbee6816e5389df6c65f86e35f)
|
||||
SET(VERSION_DESCRIBE v20.5.1.1-prestable)
|
||||
SET(VERSION_STRING 20.5.1.1)
|
||||
SET(VERSION_GITHASH efc57fb063b3fb4df968d916720ec4d4ced4642e)
|
||||
SET(VERSION_DESCRIBE v20.6.1.1-prestable)
|
||||
SET(VERSION_STRING 20.6.1.1)
|
||||
# end of autochange
|
||||
|
@ -18,7 +18,7 @@ message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
|
||||
# glibc-compatibility library relies to fixed version of libc headers
|
||||
# glibc-compatibility library relies to constant version of libc headers
|
||||
# (because minor changes in function attributes between different glibc versions will introduce incompatibilities)
|
||||
# This is for x86_64. For other architectures we have separate toolchains.
|
||||
if (ARCH_AMD64 AND NOT_UNBUNDLED)
|
||||
|
@ -1,23 +1,31 @@
|
||||
option (ENABLE_JEMALLOC "Enable jemalloc allocator" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (SANITIZE OR NOT OS_LINUX OR NOT (ARCH_AMD64 OR ARCH_ARM))
|
||||
if (SANITIZE OR NOT (ARCH_AMD64 OR ARCH_ARM) OR NOT (OS_LINUX OR OS_FREEBSD OR OS_DARWIN))
|
||||
set (ENABLE_JEMALLOC OFF)
|
||||
message (STATUS "jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used on Linux with x86_64 or aarch64.")
|
||||
message (STATUS "jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64 or aarch64 on linux or freebsd.")
|
||||
endif ()
|
||||
|
||||
if (ENABLE_JEMALLOC)
|
||||
if (NOT OS_LINUX)
|
||||
message (WARNING "jemalloc support on non-linux is EXPERIMENTAL")
|
||||
endif()
|
||||
|
||||
option (USE_INTERNAL_JEMALLOC "Use internal jemalloc library" ${NOT_UNBUNDLED})
|
||||
|
||||
if (USE_INTERNAL_JEMALLOC)
|
||||
# ThreadPool select job randomly, and there can be some threads that had been
|
||||
# performed some memory heavy task before and will be inactive for some time,
|
||||
# but until it will became active again, the memory will not be freed since by
|
||||
# default each thread has it's own arena, but there should be not more then
|
||||
# 4*CPU arenas (see opt.nareans description).
|
||||
#
|
||||
# By enabling percpu_arena number of arenas limited to number of CPUs and hence
|
||||
# this problem should go away.
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0")
|
||||
if (OS_LINUX)
|
||||
# ThreadPool select job randomly, and there can be some threads that had been
|
||||
# performed some memory heavy task before and will be inactive for some time,
|
||||
# but until it will became active again, the memory will not be freed since by
|
||||
# default each thread has it's own arena, but there should be not more then
|
||||
# 4*CPU arenas (see opt.nareans description).
|
||||
#
|
||||
# By enabling percpu_arena number of arenas limited to number of CPUs and hence
|
||||
# this problem should go away.
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0")
|
||||
else()
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0")
|
||||
endif()
|
||||
# CACHE variable is empty, to allow changing defaults without necessity
|
||||
# to purge cache
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" )
|
||||
@ -71,14 +79,26 @@ if (ENABLE_JEMALLOC)
|
||||
target_include_directories(jemalloc PRIVATE ${LIBRARY_DIR}/include)
|
||||
target_include_directories(jemalloc SYSTEM PUBLIC include)
|
||||
|
||||
set(JEMALLOC_INCLUDE)
|
||||
if (ARCH_AMD64)
|
||||
set(JEMALLOC_INCLUDE_PREFIX include_linux_x86_64)
|
||||
elseif (ARCH_ARM)
|
||||
set(JEMALLOC_INCLUDE_PREFIX include_linux_aarch64)
|
||||
set (JEMALLOC_INCLUDE_PREFIX)
|
||||
# OS_
|
||||
if (OS_LINUX)
|
||||
set (JEMALLOC_INCLUDE_PREFIX "include_linux")
|
||||
elseif (OS_FREEBSD)
|
||||
set (JEMALLOC_INCLUDE_PREFIX "include_freebsd")
|
||||
elseif (OS_DARWIN)
|
||||
set (JEMALLOC_INCLUDE_PREFIX "include_darwin")
|
||||
else ()
|
||||
message (FATAL_ERROR "This OS is not supported")
|
||||
endif ()
|
||||
target_include_directories(jemalloc SYSTEM PUBLIC
|
||||
${JEMALLOC_INCLUDE_PREFIX})
|
||||
# ARCH_
|
||||
if (ARCH_AMD64)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_x86_64")
|
||||
elseif (ARCH_ARM)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_aarch64")
|
||||
else ()
|
||||
message (FATAL_ERROR "This arch is not supported")
|
||||
endif ()
|
||||
|
||||
configure_file(${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal/jemalloc_internal_defs.h.in
|
||||
${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal/jemalloc_internal_defs.h)
|
||||
target_include_directories(jemalloc SYSTEM PRIVATE
|
||||
@ -128,6 +148,10 @@ if (ENABLE_JEMALLOC)
|
||||
endif ()
|
||||
|
||||
set_property(TARGET jemalloc APPEND PROPERTY INTERFACE_COMPILE_DEFINITIONS USE_JEMALLOC=1)
|
||||
if (MAKE_STATIC_LIBRARIES)
|
||||
# To detect whether we need to register jemalloc for osx as default zone.
|
||||
set_property(TARGET jemalloc APPEND PROPERTY INTERFACE_COMPILE_DEFINITIONS BUNDLED_STATIC_JEMALLOC=1)
|
||||
endif()
|
||||
|
||||
message (STATUS "Using jemalloc")
|
||||
else ()
|
||||
|
@ -1,3 +1,13 @@
|
||||
// OSX does not have this for system alloc functions, so you will get
|
||||
// "exception specification in declaration" error.
|
||||
#if defined(__APPLE__) || defined(__FreeBSD__)
|
||||
# undef JEMALLOC_NOTHROW
|
||||
# define JEMALLOC_NOTHROW
|
||||
|
||||
# undef JEMALLOC_CXX_THROW
|
||||
# define JEMALLOC_CXX_THROW
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The je_ prefix on the following public symbol declarations is an artifact
|
||||
* of namespace management, and should be omitted in application code unless
|
||||
|
@ -0,0 +1,372 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
* multiple allocators simultaneously.
|
||||
*/
|
||||
#define JEMALLOC_PREFIX "je_"
|
||||
#define JEMALLOC_CPREFIX "JE_"
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_CALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_FREE */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_MALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_REALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_VALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#define CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 0
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
#define JEMALLOC_OS_UNFAIR_LOCK
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
/* #undef JEMALLOC_USE_SYSCALL */
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_ISSETUGID
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC */
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME 1
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||
* malloc_tsd.
|
||||
*/
|
||||
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
|
||||
|
||||
/*
|
||||
* Defined if threaded initialization is known to be safe on this platform.
|
||||
* Among other things, it must be possible to initialize a mutex without
|
||||
* triggering allocation in order for threaded allocation to be safe.
|
||||
*/
|
||||
/* #undef JEMALLOC_THREADED_INIT */
|
||||
|
||||
/*
|
||||
* Defined if the pthreads implementation defines
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
/* #undef JEMALLOC_MUTEX_INIT_CB */
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
*/
|
||||
/* #undef JEMALLOC_DEBUG */
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
/* Use libunwind for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||
|
||||
/* Use libgcc for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||
|
||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_GCC */
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
/* #undef JEMALLOC_DSS */
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
/* #undef JEMALLOC_LAZY_LOCK */
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 16
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
#define LG_HUGEPAGE 29
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
#define JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
/* #undef JEMALLOC_RETAIN */
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
/* #undef JEMALLOC_TLS */
|
||||
|
||||
/*
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||
|
||||
/*
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
#define JEMALLOC_ZONE
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
|
||||
/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
#define JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MADVISE_HUGE */
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_DONTDUMP */
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#define LG_SIZEOF_INT 2
|
||||
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#define LG_SIZEOF_LONG 3
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#define LG_SIZEOF_LONG_LONG 3
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
|
||||
|
||||
/* glibc memalign hook. */
|
||||
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
|
||||
|
||||
/* pthread support */
|
||||
#define JEMALLOC_HAVE_PTHREAD
|
||||
|
||||
/* dlsym() support */
|
||||
#define JEMALLOC_HAVE_DLSYM
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP */
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
/* #undef JEMALLOC_HAVE_SCHED_GETCPU */
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
/* #undef JEMALLOC_HAVE_SCHED_SETAFFINITY */
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
/* #undef JEMALLOC_BACKGROUND_THREAD */
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
/* #undef JEMALLOC_IS_MALLOC */
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
/* #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE */
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
@ -0,0 +1,372 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
* multiple allocators simultaneously.
|
||||
*/
|
||||
#define JEMALLOC_PREFIX "je_"
|
||||
#define JEMALLOC_CPREFIX "JE_"
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_CALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_FREE */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_MALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_REALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_VALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#define CPU_SPINWAIT __asm__ volatile("pause")
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 1
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
#define JEMALLOC_OS_UNFAIR_LOCK
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
/* #undef JEMALLOC_USE_SYSCALL */
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_ISSETUGID
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC */
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME 1
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||
* malloc_tsd.
|
||||
*/
|
||||
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
|
||||
|
||||
/*
|
||||
* Defined if threaded initialization is known to be safe on this platform.
|
||||
* Among other things, it must be possible to initialize a mutex without
|
||||
* triggering allocation in order for threaded allocation to be safe.
|
||||
*/
|
||||
/* #undef JEMALLOC_THREADED_INIT */
|
||||
|
||||
/*
|
||||
* Defined if the pthreads implementation defines
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
/* #undef JEMALLOC_MUTEX_INIT_CB */
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
*/
|
||||
/* #undef JEMALLOC_DEBUG */
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
/* Use libunwind for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||
|
||||
/* Use libgcc for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||
|
||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_GCC */
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
/* #undef JEMALLOC_DSS */
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
/* #undef JEMALLOC_LAZY_LOCK */
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 12
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
#define LG_HUGEPAGE 21
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
#define JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
/* #undef JEMALLOC_RETAIN */
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
/* #undef JEMALLOC_TLS */
|
||||
|
||||
/*
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||
|
||||
/*
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
#define JEMALLOC_ZONE
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
|
||||
/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
#define JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MADVISE_HUGE */
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_DONTDUMP */
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#define LG_SIZEOF_INT 2
|
||||
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#define LG_SIZEOF_LONG 3
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#define LG_SIZEOF_LONG_LONG 3
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
|
||||
|
||||
/* glibc memalign hook. */
|
||||
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
|
||||
|
||||
/* pthread support */
|
||||
#define JEMALLOC_HAVE_PTHREAD
|
||||
|
||||
/* dlsym() support */
|
||||
#define JEMALLOC_HAVE_DLSYM
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP */
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
/* #undef JEMALLOC_HAVE_SCHED_GETCPU */
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
/* #undef JEMALLOC_HAVE_SCHED_SETAFFINITY */
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
/* #undef JEMALLOC_BACKGROUND_THREAD */
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
/* #undef JEMALLOC_IS_MALLOC */
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
/* #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE */
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
@ -0,0 +1,373 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
* multiple allocators simultaneously.
|
||||
*/
|
||||
/* #undef JEMALLOC_PREFIX */
|
||||
/* #undef JEMALLOC_CPREFIX */
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_CALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_FREE */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_MALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_REALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_VALLOC */
|
||||
#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#define CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 0
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_ISSETUGID
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
// Only since 12.1-STABLE
|
||||
// #define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME 1
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||
* malloc_tsd.
|
||||
*/
|
||||
#define JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
|
||||
/*
|
||||
* Defined if threaded initialization is known to be safe on this platform.
|
||||
* Among other things, it must be possible to initialize a mutex without
|
||||
* triggering allocation in order for threaded allocation to be safe.
|
||||
*/
|
||||
/* #undef JEMALLOC_THREADED_INIT */
|
||||
|
||||
/*
|
||||
* Defined if the pthreads implementation defines
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
#define JEMALLOC_MUTEX_INIT_CB 1
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
*/
|
||||
/* #undef JEMALLOC_DEBUG */
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
/* Use libunwind for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||
|
||||
/* Use libgcc for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||
|
||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_GCC */
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
#define JEMALLOC_DSS
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
#define JEMALLOC_LAZY_LOCK
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 16
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
#define LG_HUGEPAGE 29
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
#define JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
/* #undef JEMALLOC_RETAIN */
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
#define JEMALLOC_TLS
|
||||
|
||||
/*
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||
|
||||
/*
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
/* #undef JEMALLOC_ZONE */
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
#define JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
#define JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MADVISE_HUGE */
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_DONTDUMP */
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#define LG_SIZEOF_INT 2
|
||||
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#define LG_SIZEOF_LONG 3
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#define LG_SIZEOF_LONG_LONG 3
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
|
||||
|
||||
/* glibc memalign hook. */
|
||||
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
|
||||
|
||||
/* pthread support */
|
||||
#define JEMALLOC_HAVE_PTHREAD
|
||||
|
||||
/* dlsym() support */
|
||||
#define JEMALLOC_HAVE_DLSYM
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
/* #undef JEMALLOC_HAVE_SCHED_GETCPU */
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
/* #undef JEMALLOC_HAVE_SCHED_SETAFFINITY */
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
/* #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE */
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
@ -0,0 +1,373 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
* multiple allocators simultaneously.
|
||||
*/
|
||||
/* #undef JEMALLOC_PREFIX */
|
||||
/* #undef JEMALLOC_CPREFIX */
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_CALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_FREE */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_MALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_REALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_VALLOC */
|
||||
#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#define CPU_SPINWAIT __asm__ volatile("pause")
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 1
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_ISSETUGID
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
// Only since 12.1-STABLE
|
||||
// #define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME 1
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||
* malloc_tsd.
|
||||
*/
|
||||
#define JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
|
||||
/*
|
||||
* Defined if threaded initialization is known to be safe on this platform.
|
||||
* Among other things, it must be possible to initialize a mutex without
|
||||
* triggering allocation in order for threaded allocation to be safe.
|
||||
*/
|
||||
/* #undef JEMALLOC_THREADED_INIT */
|
||||
|
||||
/*
|
||||
* Defined if the pthreads implementation defines
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
#define JEMALLOC_MUTEX_INIT_CB 1
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
*/
|
||||
/* #undef JEMALLOC_DEBUG */
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
/* Use libunwind for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||
|
||||
/* Use libgcc for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||
|
||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_GCC */
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
#define JEMALLOC_DSS
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
#define JEMALLOC_LAZY_LOCK
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 12
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
#define LG_HUGEPAGE 21
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
#define JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
/* #undef JEMALLOC_RETAIN */
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
#define JEMALLOC_TLS
|
||||
|
||||
/*
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||
|
||||
/*
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
/* #undef JEMALLOC_ZONE */
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
#define JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
#define JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MADVISE_HUGE */
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_DONTDUMP */
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#define LG_SIZEOF_INT 2
|
||||
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#define LG_SIZEOF_LONG 3
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#define LG_SIZEOF_LONG_LONG 3
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
|
||||
|
||||
/* glibc memalign hook. */
|
||||
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
|
||||
|
||||
/* pthread support */
|
||||
#define JEMALLOC_HAVE_PTHREAD
|
||||
|
||||
/* dlsym() support */
|
||||
#define JEMALLOC_HAVE_DLSYM
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
/* #undef JEMALLOC_HAVE_SCHED_GETCPU */
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
/* #undef JEMALLOC_HAVE_SCHED_SETAFFINITY */
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
/* #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE */
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
@ -35,7 +35,7 @@
|
||||
*/
|
||||
#define CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 9
|
||||
#define HAVE_CPU_SPINWAIT 0
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
|
@ -1,213 +0,0 @@
|
||||
#ifndef JEMALLOC_PREAMBLE_H
|
||||
#define JEMALLOC_PREAMBLE_H
|
||||
|
||||
#include "jemalloc_internal_defs.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_decls.h"
|
||||
|
||||
#ifdef JEMALLOC_UTRACE
|
||||
#include <sys/ktrace.h>
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_NO_DEMANGLE
|
||||
#ifdef JEMALLOC_JET
|
||||
# undef JEMALLOC_IS_MALLOC
|
||||
# define JEMALLOC_N(n) jet_##n
|
||||
# include "jemalloc/internal/public_namespace.h"
|
||||
# define JEMALLOC_NO_RENAME
|
||||
# include "jemalloc/jemalloc.h"
|
||||
# undef JEMALLOC_NO_RENAME
|
||||
#else
|
||||
# define JEMALLOC_N(n) je_##n
|
||||
# include "jemalloc/jemalloc.h"
|
||||
#endif
|
||||
|
||||
#if defined(JEMALLOC_OSATOMIC)
|
||||
#include <libkern/OSAtomic.h>
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_ZONE
|
||||
#include <mach/mach_error.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/vm_map.h>
|
||||
#endif
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_macros.h"
|
||||
|
||||
/*
|
||||
* Note that the ordering matters here; the hook itself is name-mangled. We
|
||||
* want the inclusion of hooks to happen early, so that we hook as much as
|
||||
* possible.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
|
||||
# ifndef JEMALLOC_JET
|
||||
# include "jemalloc/internal/private_namespace.h"
|
||||
# else
|
||||
# include "jemalloc/internal/private_namespace_jet.h"
|
||||
# endif
|
||||
#endif
|
||||
#include "jemalloc/internal/test_hooks.h"
|
||||
|
||||
#ifdef JEMALLOC_DEFINE_MADVISE_FREE
|
||||
# define JEMALLOC_MADV_FREE 8
|
||||
#endif
|
||||
|
||||
static const bool config_debug =
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_dss =
|
||||
#ifdef JEMALLOC_DSS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_madvise_huge =
|
||||
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_fill =
|
||||
#ifdef JEMALLOC_FILL
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_lazy_lock =
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
|
||||
static const bool config_prof =
|
||||
#ifdef JEMALLOC_PROF
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_prof_libgcc =
|
||||
#ifdef JEMALLOC_PROF_LIBGCC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_prof_libunwind =
|
||||
#ifdef JEMALLOC_PROF_LIBUNWIND
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool maps_coalesce =
|
||||
#ifdef JEMALLOC_MAPS_COALESCE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_stats =
|
||||
#ifdef JEMALLOC_STATS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_tls =
|
||||
#ifdef JEMALLOC_TLS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_utrace =
|
||||
#ifdef JEMALLOC_UTRACE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_xmalloc =
|
||||
#ifdef JEMALLOC_XMALLOC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_cache_oblivious =
|
||||
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Undocumented, for jemalloc development use only at the moment. See the note
|
||||
* in jemalloc/internal/log.h.
|
||||
*/
|
||||
static const bool config_log =
|
||||
#ifdef JEMALLOC_LOG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Are extra safety checks enabled; things like checking the size of sized
|
||||
* deallocations, double-frees, etc.
|
||||
*/
|
||||
static const bool config_opt_safety_checks =
|
||||
#ifdef JEMALLOC_OPT_SAFETY_CHECKS
|
||||
true
|
||||
#elif defined(JEMALLOC_DEBUG)
|
||||
/*
|
||||
* This lets us only guard safety checks by one flag instead of two; fast
|
||||
* checks can guard solely by config_opt_safety_checks and run in debug mode
|
||||
* too.
|
||||
*/
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
#endif
|
||||
static const bool have_percpu_arena =
|
||||
#ifdef JEMALLOC_PERCPU_ARENA
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Undocumented, and not recommended; the application should take full
|
||||
* responsibility for tracking provenance.
|
||||
*/
|
||||
static const bool force_ivsalloc =
|
||||
#ifdef JEMALLOC_FORCE_IVSALLOC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_background_thread =
|
||||
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#endif /* JEMALLOC_PREAMBLE_H */
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
||||
clickhouse (20.5.1.1) unstable; urgency=low
|
||||
clickhouse (20.6.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Tue, 28 Apr 2020 20:12:13 +0300
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 22 Jun 2020 20:40:23 +0300
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.5.1.*
|
||||
ARG version=20.6.1.*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
|
@ -6,7 +6,6 @@
|
||||
"docker/test/compatibility/ubuntu": "yandex/clickhouse-test-old-ubuntu",
|
||||
"docker/test/integration/base": "yandex/clickhouse-integration-test",
|
||||
"docker/test/performance-comparison": "yandex/clickhouse-performance-comparison",
|
||||
"docker/test/pvs": "yandex/clickhouse-pvs-test",
|
||||
"docker/test/stateful": "yandex/clickhouse-stateful-test",
|
||||
"docker/test/stateful_with_coverage": "yandex/clickhouse-stateful-test-with-coverage",
|
||||
"docker/test/stateless": "yandex/clickhouse-stateless-test",
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-binary-builder .
|
||||
# docker build -t yandex/clickhouse-binary-builder .
|
||||
FROM ubuntu:19.10
|
||||
|
||||
RUN apt-get --allow-unauthenticated update -y && apt-get install --yes wget gnupg
|
||||
|
@ -18,7 +18,7 @@ ccache --zero-stats ||:
|
||||
ln -s /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/libOpenCL.so ||:
|
||||
rm -f CMakeCache.txt
|
||||
cmake .. -LA -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSANITIZE=$SANITIZER $CMAKE_FLAGS
|
||||
ninja clickhouse-bundle
|
||||
ninja -v clickhouse-bundle
|
||||
mv ./programs/clickhouse* /output
|
||||
mv ./src/unit_tests_dbms /output
|
||||
find . -name '*.so' -print -exec mv '{}' /output \;
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.5.1.*
|
||||
ARG version=20.6.1.*
|
||||
ARG gosu_ver=1.10
|
||||
|
||||
RUN apt-get update \
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.5.1.*
|
||||
ARG version=20.6.1.*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y apt-transport-https dirmngr && \
|
||||
|
@ -7,3 +7,7 @@ services:
|
||||
POSTGRES_PASSWORD: mysecretpassword
|
||||
ports:
|
||||
- 5432:5432
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- postgre-sql.local
|
||||
|
@ -20,7 +20,7 @@ RUN apt-get --allow-unauthenticated update -y \
|
||||
# apt-get --allow-unauthenticated install --yes --no-install-recommends \
|
||||
# pvs-studio
|
||||
|
||||
ENV PKG_VERSION="pvs-studio-7.07.38234.48-amd64.deb"
|
||||
ENV PKG_VERSION="pvs-studio-7.08.39365.50-amd64.deb"
|
||||
|
||||
RUN wget "https://files.viva64.com/$PKG_VERSION"
|
||||
RUN sudo dpkg -i "$PKG_VERSION"
|
||||
|
@ -5,4 +5,11 @@ toc_priority: 25
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
# ClickHouse Engines
|
||||
|
||||
There are two key engine kinds in ClickHouse:
|
||||
|
||||
- [Table engines](table-engines/index.md)
|
||||
- [Database engines](database-engines/index.md)
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/engines/) ##}
|
||||
|
@ -19,7 +19,7 @@ The table engine (type of table) determines:
|
||||
|
||||
### MergeTree {#mergetree}
|
||||
|
||||
The most universal and functional table engines for high-load tasks. The property shared by these engines is quick data insertion with subsequent background data processing. `MergeTree` family engines support data replication (with [Replicated\*](../../engines/table-engines/mergetree-family/replication.md#table_engines-replication) versions of engines), partitioning, and other features not supported in other engines.
|
||||
The most universal and functional table engines for high-load tasks. The property shared by these engines is quick data insertion with subsequent background data processing. `MergeTree` family engines support data replication (with [Replicated\*](../../engines/table-engines/mergetree-family/replication.md#table_engines-replication) versions of engines), partitioning, secondary data-skipping indexes, and other features not supported in other engines.
|
||||
|
||||
Engines in the family:
|
||||
|
||||
@ -80,4 +80,4 @@ To select data from a virtual column, you must specify its name in the `SELECT`
|
||||
|
||||
If you create a table with a column that has the same name as one of the table virtual columns, the virtual column becomes inaccessible. We don’t recommend doing this. To help avoid conflicts, virtual column names are usually prefixed with an underscore.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/) <!--hide-->
|
||||
|
@ -1,58 +0,0 @@
|
||||
---
|
||||
toc_priority: 78
|
||||
toc_title: General Questions
|
||||
---
|
||||
|
||||
# General Questions {#general-questions}
|
||||
|
||||
## Why Not Use Something Like MapReduce? {#why-not-use-something-like-mapreduce}
|
||||
|
||||
We can refer to systems like MapReduce as distributed computing systems in which the reduce operation is based on distributed sorting. The most common open-source solution in this class is [Apache Hadoop](http://hadoop.apache.org). Yandex uses its in-house solution, YT.
|
||||
|
||||
These systems aren’t appropriate for online queries due to their high latency. In other words, they can’t be used as the back-end for a web interface. These types of systems aren’t useful for real-time data updates. Distributed sorting isn’t the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is an optimal way to perform reduce operations. A common approach to optimizing map-reduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. Distributed sorting is one of the main causes of reduced performance when running simple map-reduce tasks.
|
||||
|
||||
Most MapReduce implementations allow you to execute arbitrary code on a cluster. But a declarative query language is better suited to OLAP to run experiments quickly. For example, Hadoop has Hive and Pig. Also consider Cloudera Impala or Shark (outdated) for Spark, as well as Spark SQL, Presto, and Apache Drill. Performance when running such tasks is highly sub-optimal compared to specialized systems, but relatively high latency makes it unrealistic to use these systems as the backend for a web interface.
|
||||
|
||||
## What If I Have a Problem with Encodings When Using Oracle Through ODBC? {#oracle-odbc-encodings}
|
||||
|
||||
If you use Oracle through the ODBC driver as a source of external dictionaries, you need to set the correct value for the `NLS_LANG` environment variable in `/etc/default/clickhouse`. For more information, see the [Oracle NLS\_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
NLS_LANG=RUSSIAN_RUSSIA.UTF8
|
||||
```
|
||||
|
||||
## How Do I Export Data from ClickHouse to a File? {#how-to-export-to-file}
|
||||
|
||||
### Using INTO OUTFILE Clause {#using-into-outfile-clause}
|
||||
|
||||
Add an [INTO OUTFILE](../sql-reference/statements/select/into-outfile.md#into-outfile-clause) clause to your query.
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file'
|
||||
```
|
||||
|
||||
By default, ClickHouse uses the [TabSeparated](../interfaces/formats.md#tabseparated) format for output data. To select the [data format](../interfaces/formats.md), use the [FORMAT clause](../sql-reference/statements/select/format.md#format-clause).
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV
|
||||
```
|
||||
|
||||
### Using a File-Engine Table {#using-a-file-engine-table}
|
||||
|
||||
See [File](../engines/table-engines/special/file.md).
|
||||
|
||||
### Using Command-Line Redirection {#using-command-line-redirection}
|
||||
|
||||
``` sql
|
||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||
```
|
||||
|
||||
See [clickhouse-client](../interfaces/cli.md).
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/general/) ##}
|
25
docs/en/faq/general/columnar-database.md
Normal file
25
docs/en/faq/general/columnar-database.md
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
title: What is a columnar database?
|
||||
toc_hidden: true
|
||||
toc_priority: 101
|
||||
---
|
||||
|
||||
# What Is a Columnar Database? {#what-is-a-columnar-database}
|
||||
|
||||
A columnar database stores data of each column independently. This allows to read data from disks only for those columns that are used in any given query. The cost is that operations that affect whole rows become proportionally more expensive. The synonym for a columnar database is a column-oriented database management system. ClickHouse is a typical example of such a system.
|
||||
|
||||
Key columnar database advantages are:
|
||||
|
||||
- Queries that use only a few columns out of many.
|
||||
- Aggregating queries against large volumes of data.
|
||||
- Column-wise data compression.
|
||||
|
||||
Here is the illustration of the difference between traditional row-oriented systems and columnar databases when building reports:
|
||||
|
||||
**Traditional row-oriented**
|
||||
![Traditional row-oriented](https://clickhouse.tech/docs/en/images/row-oriented.gif#)
|
||||
|
||||
**Columnar**
|
||||
![Columnar](https://clickhouse.tech/docs/en/images/column-oriented.gif#)
|
||||
|
||||
A columnar database is a preferred choice for analytical applications because it allows to have many columns in a table just in case, but don’t pay the cost for unused columns on read query execution time. Column-oriented databases are designed for big data processing because and data warehousing, they often natively scale using distributed clusters of low-cost hardware to increase throughput. ClickHouse does it with combination of [distributed](../../engines/table-engines/special/distributed.md) and [replicated](../../engines/table-engines/mergetree-family/replication.md) tables.
|
17
docs/en/faq/general/dbms-naming.md
Normal file
17
docs/en/faq/general/dbms-naming.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
title: "What does \u201CClickHouse\u201D mean?"
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
# What Does “ClickHouse” Mean? {#what-does-clickhouse-mean}
|
||||
|
||||
It’s a combination of “**Click**stream” and “Data ware**House**”. It comes from the original use case at Yandex.Metrica, where ClickHouse was supposed to keep records of all clicks by people from all over the Internet and it still does the job. You can read more about this use case on [ClickHouse history](../../introduction/history.md) page.
|
||||
|
||||
This two-part meaning has two consequences:
|
||||
|
||||
- The only correct way to write Click**H**ouse is with capital H.
|
||||
- If you need to abbreviate it, use **CH**. For some historical reasons, abbreviating as CK is also popular in China, mostly because one of the first talks about ClickHouse in Chinese used this form.
|
||||
|
||||
!!! info "Fun fact"
|
||||
Many years after ClickHouse got its name, this approach of combining two words that are meaningful on their own has been highlighted as the best way to name a database in a [research by Andy Pavlo](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html), an Associate Professor of Databases at Carnegie Mellon University. ClickHouse shared his “best database name of all time” award with Postgres.
|
24
docs/en/faq/general/index.md
Normal file
24
docs/en/faq/general/index.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
title: General questions about ClickHouse
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 1
|
||||
toc_title: General
|
||||
---
|
||||
|
||||
# General Questions About ClickHouse {#general-questions}
|
||||
|
||||
Questions:
|
||||
|
||||
- [What is ClickHouse?](../../index.md#what-is-clickhouse)
|
||||
- [Why ClickHouse is so fast?](../../faq/general/why-clickhouse-is-so-fast.md)
|
||||
- [Who is using ClickHouse?](../../faq/general/who-is-using-clickhouse.md)
|
||||
- [What does “ClickHouse” mean?](../../faq/general/dbms-naming.md)
|
||||
- [What does “Не тормозит” mean?](../../faq/general/ne-tormozit.md)
|
||||
- [What is OLAP?](../../faq/general/olap.md)
|
||||
- [What is a columnar database?](../../faq/general/columnar-database.md)
|
||||
- [Why not use something like MapReduce?](../../faq/general/mapreduce.md)
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/general/) ##}
|
13
docs/en/faq/general/mapreduce.md
Normal file
13
docs/en/faq/general/mapreduce.md
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
title: Why not use something like MapReduce?
|
||||
toc_hidden: true
|
||||
toc_priority: 110
|
||||
---
|
||||
|
||||
# Why Not Use Something Like MapReduce? {#why-not-use-something-like-mapreduce}
|
||||
|
||||
We can refer to systems like MapReduce as distributed computing systems in which the reduce operation is based on distributed sorting. The most common open-source solution in this class is [Apache Hadoop](http://hadoop.apache.org). Yandex uses its in-house solution, YT.
|
||||
|
||||
These systems aren’t appropriate for online queries due to their high latency. In other words, they can’t be used as the back-end for a web interface. These types of systems aren’t useful for real-time data updates. Distributed sorting isn’t the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is an optimal way to perform reduce operations. A common approach to optimizing map-reduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. Distributed sorting is one of the main causes of reduced performance when running simple map-reduce tasks.
|
||||
|
||||
Most MapReduce implementations allow you to execute arbitrary code on a cluster. But a declarative query language is better suited to OLAP to run experiments quickly. For example, Hadoop has Hive and Pig. Also consider Cloudera Impala or Shark (outdated) for Spark, as well as Spark SQL, Presto, and Apache Drill. Performance when running such tasks is highly sub-optimal compared to specialized systems, but relatively high latency makes it unrealistic to use these systems as the backend for a web interface.
|
26
docs/en/faq/general/ne-tormozit.md
Normal file
26
docs/en/faq/general/ne-tormozit.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
title: "What does \u201C\u043D\u0435 \u0442\u043E\u0440\u043C\u043E\u0437\u0438\u0442\
|
||||
\u201D mean?"
|
||||
toc_hidden: true
|
||||
toc_priority: 11
|
||||
---
|
||||
|
||||
# What Does “Не тормозит” Mean? {#what-does-ne-tormozit-mean}
|
||||
|
||||
This question usually arises when people see official ClickHouse t-shirts. They have large words **“ClickHouse не тормозит”** on the front.
|
||||
|
||||
Before ClickHouse became open-source, it has been developed as an in-house storage system by the largest Russian IT company, [Yandex](https://yandex.com/company/). That’s why it initially got its slogan in Russian, which is “не тормозит” (pronounced as “ne tormozit”). After the open-source release we first produced some of those t-shirts for events in Russia and it was a no-brainer to use the slogan as-is.
|
||||
|
||||
One of the following batches of those t-shirts was supposed to be given away on events outside of Russia and we tried to make the English version of the slogan. Unfortunately, the Russian language is kind of elegant in terms of expressing stuff and there was a restriction of limited space on a t-shirt, so we failed to come up with good enough translation (most options appeared to be either long or inaccurate) and decided to keep the slogan in Russian even on t-shirts produced for international events. It appeared to be a great decision because people all over the world get positively surprised and curious when they see it.
|
||||
|
||||
So, what does it mean? Here are some ways to translate *“не тормозит”*:
|
||||
|
||||
- If you translate it literally, it’d be something like *“ClickHouse doesn’t press the brake pedal”*.
|
||||
- If you’d want to express it as close to how it sounds to a Russian person with IT background, it’d be something like *“If you larger system lags, it’s not because it uses ClickHouse”*.
|
||||
- Shorter, but not so precise versions could be *“ClickHouse is not slow”*, *“ClickHouse doesn’t lag”* or just *“ClickHouse is fast”*.
|
||||
|
||||
If you haven’t seen one of those t-shirts in person, you can check them out online in many ClickHouse-related videos. For example, this one:
|
||||
|
||||
![iframe](https://www.youtube.com/embed/bSyQahMVZ7w)
|
||||
|
||||
P.S. These t-shirts are not for sale, they are given away for free on most [ClickHouse Meetups](https://clickhouse.tech/#meet), usually for best questions or other forms of active participation.
|
39
docs/en/faq/general/olap.md
Normal file
39
docs/en/faq/general/olap.md
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
title: What is OLAP?
|
||||
toc_hidden: true
|
||||
toc_priority: 100
|
||||
---
|
||||
|
||||
# What Is OLAP? {#what-is-olap}
|
||||
|
||||
[OLAP](https://en.wikipedia.org/wiki/Online_analytical_processing) stands for Online Analytical Processing. It is a broad term that can be looked at from two perspectives: technical and business. But at the very high level, you can just read these words backward:
|
||||
|
||||
Processing
|
||||
: Some source data is processed…
|
||||
|
||||
Analytical
|
||||
: …to produce some analytical reports and insights…
|
||||
|
||||
Online
|
||||
: …in real-time.
|
||||
|
||||
## OLAP from the Business Perspective {#olap-from-the-business-perspective}
|
||||
|
||||
In recent years, business people started to realize the value of data. Companies who make their decisions blindly, more often than not fail to keep up with the competition. The data-driven approach of successful companies forces them to collect all data that might be remotely useful for making business decisions and need mechanisms to timely analyze them. Here’s where OLAP database management systems (DBMS) come in.
|
||||
|
||||
In a business sense, OLAP allows companies to continuously plan, analyze, and report operational activities, thus maximizing efficiency, reducing expenses, and ultimately conquering the market share. It could be done either in an in-house system or outsourced to SaaS providers like web/mobile analytics services, CRM services, etc. OLAP is the technology behind many BI applications (Business Intelligence).
|
||||
|
||||
ClickHouse is an OLAP database management system that is pretty often used as a backend for those SaaS solutions for analyzing domain-specific data. However, some businesses are still reluctant to share their data with third-party providers and an in-house data warehouse scenario is also viable.
|
||||
|
||||
## OLAP from the Technical Perspective {#olap-from-the-technical-perspective}
|
||||
|
||||
All database management systems could be classified into two groups: OLAP (Online **Analytical** Processing) and OLTP (Online **Transactional** Processing). Former focuses on building reports, each based on large volumes of historical data, but doing it not so frequently. While the latter usually handle a continuous stream of transactions, constantly modifying the current state of data.
|
||||
|
||||
In practice OLAP and OLTP are not categories, it’s more like a spectrum. Most real systems usually focus on one of them but provide some solutions or workarounds if the opposite kind of workload is also desired. This situation often forces businesses to operate multiple storage systems integrated, which might be not so big deal but having more systems make it more expensive to maintain. So the trend of recent years is HTAP (**Hybrid Transactional/Analytical Processing**) when both kinds of the workload are handled equally well by a single database management system.
|
||||
|
||||
Even if a DBMS started as a pure OLAP or pure OLTP, they are forced to move towards that HTAP direction to keep up with their competition. And ClickHouse is no exception, initially, it has been designed as [fast-as-possible OLAP system](../../faq/general/why-clickhouse-is-so-fast.md) and it still doesn’t have full-fledged transaction support, but some features like consistent read/writes and mutations for updating/deleting data had to be added.
|
||||
|
||||
The fundamental trade-off between OLAP and OLTP systems remains:
|
||||
|
||||
- To build analytical reports efficiently it’s crucial to be able to read columns separately, thus most OLAP databases are [columnar](../../faq/general/columnar-database.md),
|
||||
- While storing columns separately increases costs of operations on rows, like append or in-place modification, proportionally to the number of columns (which can be huge if the systems try to collect all details of an event just in case). Thus, most OLTP systems store data arranged by rows.
|
19
docs/en/faq/general/who-is-using-clickhouse.md
Normal file
19
docs/en/faq/general/who-is-using-clickhouse.md
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
title: Who is using ClickHouse?
|
||||
toc_hidden: true
|
||||
toc_priority: 9
|
||||
---
|
||||
|
||||
# Who Is Using ClickHouse? {#who-is-using-clickhouse}
|
||||
|
||||
Being an open-source product makes this question not so straightforward to answer. You don’t have to tell anyone if you want to start using ClickHouse, you just go grab source code or pre-compiled packages. There’s no contract to sign and the [Apache 2.0 license](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) allows for unconstrained software distribution.
|
||||
|
||||
Also, the technology stack is often in a grey zone of what’s covered by an NDA. Some companies consider technologies they use as a competitive advantage even if they are open-source and don’t allow employees to share any details publicly. Some see some PR risks and allow employees to share implementation details only with their PR department approval.
|
||||
|
||||
So how to tell who is using ClickHouse?
|
||||
|
||||
One way is to **ask around**. If it’s not in writing, people are much more willing to share what technologies are used in their companies, what the use cases are, what kind of hardware is used, data volumes, etc. We’re talking with users regularly on [ClickHouse Meetups](https://www.youtube.com/channel/UChtmrD-dsdpspr42P_PyRAw/playlists) all over the world and have heard stories about 1000+ companies that use ClickHouse. Unfortunately, that’s not reproducible and we try to treat such stories as if they were told under NDA to avoid any potential troubles. But you can come to any of our future meetups and talk with other users on your own. There are multiple ways how meetups are announced, for example, you can subscribe to [our Twitter](http://twitter.com/ClickHouseDB/).
|
||||
|
||||
The second way is to look for companies **publicly saying** that they use ClickHouse. It’s more substantial because there’s usually some hard evidence like a blog post, talk video recording, slide deck, etc. We collect the collection of links to such evidence on our **[Adopters](../../introduction/adopters.md)** page. Feel free to contribute the story of your employer or just some links you’ve stumbled upon (but try not to violate your NDA in the process).
|
||||
|
||||
You can find names of very large companies in the adopters list, like Bloomberg, Cisco, China Telecom, Tencent, or Uber, but with the first approach, we found that there are many more. For example, if you take [the list of largest IT companies by Forbes (2020)](https://www.forbes.com/sites/hanktucker/2020/05/13/worlds-largest-technology-companies-2020-apple-stays-on-top-zoom-and-uber-debut/) over half of them are using ClickHouse in some way. Also, it would be unfair not to mention [Yandex](../../introduction/history.md), the company which initially open-sourced ClickHouse in 2016 and happens to be one of the largest IT companies in Europe.
|
63
docs/en/faq/general/why-clickhouse-is-so-fast.md
Normal file
63
docs/en/faq/general/why-clickhouse-is-so-fast.md
Normal file
@ -0,0 +1,63 @@
|
||||
---
|
||||
title: Why ClickHouse is so fast?
|
||||
toc_hidden: true
|
||||
toc_priority: 8
|
||||
---
|
||||
|
||||
# Why ClickHouse Is So Fast? {#why-clickhouse-is-so-fast}
|
||||
|
||||
It was designed to be fast. Query execution performance has always been a top priority during the development process, but other important characteristics like user-friendliness, scalability, and security were also considered so ClickHouse could become a real production system.
|
||||
|
||||
ClickHouse was initially built as a prototype to do just a single task well: to filter and aggregate data as fast as possible. That’s what needs to be done to build a typical analytical report and that’s what a typical [GROUP BY](../../sql-reference/statements/select/group-by.md) query does. ClickHouse team has made several high-level decisions that combined made achieving this task possible:
|
||||
|
||||
Column-oriented storage
|
||||
: Source data often contain hundreds or even thousands of columns, while a report can use just a few of them. The system needs to avoid reading unnecessary columns, or most expensive disk read operations would be wasted.
|
||||
|
||||
Indexes
|
||||
: ClickHouse keeps data structures in memory that allows reading not only used columns but only necessary row ranges of those columns.
|
||||
|
||||
Data compression
|
||||
: Storing different values of the same column together often leads to better compression ratios (compared to row-oriented systems) because in real data column often has the same or not so many different values for neighboring rows. In addition to general-purpose compression, ClickHouse supports [specialized codecs](../../sql-reference/statements/create.md#create-query-specialized-codecs) that can make data even more compact.
|
||||
|
||||
Vectorized query execution
|
||||
: ClickHouse not only stores data in columns but also processes data in columns. It leads to better CPU cache utilization and allows for [SIMD](https://en.wikipedia.org/wiki/SIMD) CPU instructions usage.
|
||||
|
||||
Scalability
|
||||
: ClickHouse can leverage all available CPU cores and disks to execute even a single query. Not only on a single server but all CPU cores and disks of a cluster as well.
|
||||
|
||||
But many other database management systems use similar techniques. What really makes ClickHouse stand out is **attention to low-level details**. Most programming languages provide implementations for most common algorithms and data structures, but they tend to be too generic to be effective. Every task can be considered as a landscape with various characteristics, instead of just throwing in random implementation. For example, if you need a hash table, here are some key questions to consider:
|
||||
|
||||
- Which hash function to choose?
|
||||
- Collision resolution algorithm: [open addressing](https://en.wikipedia.org/wiki/Open_addressing) vs [chaining](https://en.wikipedia.org/wiki/Hash_table#Separate_chaining)?
|
||||
- Memory layout: one array for keys and values or separate arrays? Will it store small or large values?
|
||||
- Fill factor: when and how to resize? How to move values around on resize?
|
||||
- Will values be removed and which algorithm will work better if they will?
|
||||
- Will we need fast probing with bitmaps, inline placement of string keys, support for non-movable values, prefetch, and batching?
|
||||
|
||||
Hash table is a key data structure for `GROUP BY` implementation and ClickHouse automatically chooses one of [30+ variations](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Aggregator.h) for each specific query.
|
||||
|
||||
The same goes for algorithms, for example, in sorting you might consider:
|
||||
|
||||
- What will be sorted: an array of numbers, tuples, strings, or structures?
|
||||
- Is all data available completely in RAM?
|
||||
- Do we need a stable sort?
|
||||
- Do we need a full sort? Maybe partial sort or n-th element will suffice?
|
||||
- How to implement comparisons?
|
||||
- Are we sorting data that has already been partially sorted?
|
||||
|
||||
Algorithms that they rely on characteristics of data they are working with can often do better than their generic counterparts. If it is not really known in advance, the system can try various implementations and choose the one that works best in runtime. For example, see an [article on how LZ4 decompression is implemented in ClickHouse](https://habr.com/en/company/yandex/blog/457612/).
|
||||
|
||||
Last but not least, the ClickHouse team always monitors the Internet on people claiming that they came up with the best implementation, algorithm, or data structure to do something and tries it out. Those claims mostly appear to be false, but from time to time you’ll indeed find a gem.
|
||||
|
||||
!!! info "Tips for building your own high-performance software"
|
||||
|
||||
|
||||
- Keep in mind low-level details when designing your system.
|
||||
- Design based on hardware capabilities.
|
||||
- Choose data structures and abstractions based on the needs of the task.
|
||||
- Provide specializations for special cases.
|
||||
- Try new, “best” algorithms, that you read about yesterday.
|
||||
- Choose an algorithm in runtime based on statistics.
|
||||
- Benchmark on real datasets.
|
||||
- Test for performance regressions in CI.
|
||||
- Measure and observe everything.
|
@ -2,7 +2,45 @@
|
||||
toc_folder_title: F.A.Q.
|
||||
toc_hidden: true
|
||||
toc_priority: 76
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
# ClickHouse F.A.Q {#clickhouse-f-a-q}
|
||||
|
||||
This section of the documentation is a place to collect answers to ClickHouse-related questions that arise often.
|
||||
|
||||
Categories:
|
||||
|
||||
- **[General](../faq/general/index.md)**
|
||||
- [What is ClickHouse?](../index.md#what-is-clickhouse)
|
||||
- [Why ClickHouse is so fast?](../faq/general/why-clickhouse-is-so-fast.md)
|
||||
- [Who is using ClickHouse?](../faq/general/who-is-using-clickhouse.md)
|
||||
- [What does “ClickHouse” mean?](../faq/general/dbms-naming.md)
|
||||
- [What does “Не тормозит” mean?](../faq/general/ne-tormozit.md)
|
||||
- [What is OLAP?](../faq/general/olap.md)
|
||||
- [What is a columnar database?](../faq/general/columnar-database.md)
|
||||
- [Why not use something like MapReduce?](../faq/general/mapreduce.md)
|
||||
- **[Use Cases](../faq/use-cases/index.md)**
|
||||
- [Can I use ClickHouse as a time-series database?](../faq/use-cases/time-series.md)
|
||||
- [Can I use ClickHouse as a key-value storage?](../faq/use-cases/key-value.md)
|
||||
- **[Operations](../faq/operations/index.md)**
|
||||
- [Which ClickHouse version to use in production?](../faq/operations/production.md)
|
||||
- [Is it possible to delete old records from a ClickHouse table?](../faq/operations/delete-old-data.md)
|
||||
- **[Integration](../faq/integration/index.md)**
|
||||
- [How do I export data from ClickHouse to a file?](../faq/integration/file-export.md)
|
||||
- [What if I have a problem with encodings when connecting to Oracle via ODBC?](../faq/integration/oracle-odbc.md)
|
||||
|
||||
{## TODO
|
||||
Question candidates:
|
||||
- How to choose a primary key?
|
||||
- How to add a column in ClickHouse?
|
||||
- Too many parts
|
||||
- How to filter ClickHouse table by an array column contents?
|
||||
- How to insert all rows from one table to another of identical structure?
|
||||
- How to kill a process (query) in ClickHouse?
|
||||
- How to implement pivot (like in pandas)?
|
||||
- How to remove the default ClickHouse user through users.d?
|
||||
- Importing MySQL dump to Clickhouse
|
||||
- Window function workarounds (row\_number, lag/lead, running diff/sum/average)
|
||||
##}
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq) ##}
|
||||
|
37
docs/en/faq/integration/file-export.md
Normal file
37
docs/en/faq/integration/file-export.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
title: How do I export data from ClickHouse to a file?
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
# How Do I Export Data from ClickHouse to a File? {#how-to-export-to-file}
|
||||
|
||||
## Using INTO OUTFILE Clause {#using-into-outfile-clause}
|
||||
|
||||
Add an [INTO OUTFILE](../../sql-reference/statements/select/into-outfile.md#into-outfile-clause) clause to your query.
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file'
|
||||
```
|
||||
|
||||
By default, ClickHouse uses the [TabSeparated](../../interfaces/formats.md#tabseparated) format for output data. To select the [data format](../../interfaces/formats.md), use the [FORMAT clause](../../sql-reference/statements/select/format.md#format-clause).
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV
|
||||
```
|
||||
|
||||
## Using a File-Engine Table {#using-a-file-engine-table}
|
||||
|
||||
See [File](../../engines/table-engines/special/file.md) table engine.
|
||||
|
||||
## Using Command-Line Redirection {#using-command-line-redirection}
|
||||
|
||||
``` sql
|
||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||
```
|
||||
|
||||
See [clickhouse-client](../../interfaces/cli.md).
|
19
docs/en/faq/integration/index.md
Normal file
19
docs/en/faq/integration/index.md
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
title: Questions about integrating ClickHouse and other systems
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 4
|
||||
toc_title: Integration
|
||||
---
|
||||
|
||||
# Questions About Integrating ClickHouse and Other Systems {#question-about-integrating-clickhouse-and-other-systems}
|
||||
|
||||
Questions:
|
||||
|
||||
- [How do I export data from ClickHouse to a file?](../../faq/integration/file-export.md)
|
||||
- [How to import JSON into ClickHouse?](../../faq/integration/json-import.md)
|
||||
- [What if I have a problem with encodings when connecting to Oracle via ODBC?](../../faq/integration/oracle-odbc.md)
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/integration/) ##}
|
33
docs/en/faq/integration/json-import.md
Normal file
33
docs/en/faq/integration/json-import.md
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
title: How to import JSON into ClickHouse?
|
||||
toc_hidden: true
|
||||
toc_priority: 11
|
||||
---
|
||||
|
||||
# How to Import JSON Into ClickHouse? {#how-to-import-json-into-clickhouse}
|
||||
|
||||
ClickHouse supports a wide range of [data formats for input and output](../../interfaces/formats.md). There are multiple JSON variations among them, but the most commonly used for data ingestion is [JSONEachRow](../../interfaces/formats.md#jsoneachrow). It expects one JSON object per row, each object separated by a newline.
|
||||
|
||||
## Examples {#examples}
|
||||
|
||||
Using [HTTP interface](../../interfaces/http.md):
|
||||
|
||||
``` bash
|
||||
$ echo '{"foo":"bar"}' | curl 'http://localhost:8123/?query=INSERT%20INTO%20test%20FORMAT%20JSONEachRow' --data-binary @-
|
||||
```
|
||||
|
||||
Using [CLI interface](../../interfaces/cli.md):
|
||||
|
||||
``` bash
|
||||
$ echo '{"foo":"bar"}' | clickhouse-client ---query="INSERT INTO test FORMAT 20JSONEachRow"
|
||||
```
|
||||
|
||||
Instead of inserting data manually, you might consider to use one of [client libraries](../../interfaces/index.md) instead.
|
||||
|
||||
## Useful Settings {#useful-settings}
|
||||
|
||||
- `input_format_skip_unknown_fields` allows to insert JSON even if there were additional fields not present in table schema (by discarding them).
|
||||
- `input_format_import_nested_json` allows to insert nested JSON objects into columns of [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) type.
|
||||
|
||||
!!! note "Note"
|
||||
Settings are specified as `GET` parameters for the HTTP interface or as additional command-line arguments prefixed with `--` for the CLI interface.
|
15
docs/en/faq/integration/oracle-odbc.md
Normal file
15
docs/en/faq/integration/oracle-odbc.md
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
title: What if I have a problem with encodings when using Oracle via ODBC?
|
||||
toc_hidden: true
|
||||
toc_priority: 20
|
||||
---
|
||||
|
||||
# What If I Have a Problem with Encodings When Using Oracle Via ODBC? {#oracle-odbc-encodings}
|
||||
|
||||
If you use Oracle as a source of ClickHouse external dictionaries via Oracle ODBC driver, you need to set the correct value for the `NLS_LANG` environment variable in `/etc/default/clickhouse`. For more information, see the [Oracle NLS\_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
NLS_LANG=RUSSIAN_RUSSIA.UTF8
|
||||
```
|
42
docs/en/faq/operations/delete-old-data.md
Normal file
42
docs/en/faq/operations/delete-old-data.md
Normal file
@ -0,0 +1,42 @@
|
||||
---
|
||||
title: Is it possible to delete old records from a ClickHouse table?
|
||||
toc_hidden: true
|
||||
toc_priority: 20
|
||||
---
|
||||
|
||||
# Is It Possible to Delete Old Records from a ClickHouse Table? {#is-it-possible-to-delete-old-records-from-a-clickhouse-table}
|
||||
|
||||
The short answer is “yes”. ClickHouse has multiple mechanisms that allow freeing up disk space by removing old data. Each mechanism is aimed for different scenarios.
|
||||
|
||||
## TTL {#ttl}
|
||||
|
||||
ClickHouse allows to automatically drop values when some condition happens. This condition is configured as an expression based on any columns, usually just static offset for any timestamp column.
|
||||
|
||||
The key advantage of this approach is that it doesn’t need any external system to trigger, once TTL is configured, data removal happens automatically in background.
|
||||
|
||||
!!! note "Note"
|
||||
TTL can also be used to move data not only to [/dev/null](https://en.wikipedia.org/wiki/Null_device), but also between different storage systems, like from SSD to HDD.
|
||||
|
||||
More details on [configuring TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
|
||||
## ALTER DELETE {#alter-delete}
|
||||
|
||||
ClickHouse doesn’t have real-time point deletes like in [OLTP](https://en.wikipedia.org/wiki/Online_transaction_processing) databases. The closest thing to them are mutations. They are issued as `ALTER ... DELETE` or `ALTER ... UPDATE` queries to distinguish from normal `DELETE` or `UPDATE` as they are asynchronous batch operations, not immediate modifications. The rest of syntax after `ALTER TABLE` prefix is similar.
|
||||
|
||||
`ALTER DELETE` can be issued to flexibly remove old data. If you need to do it regularly, the main downside will be the need to have an external system to submit the query. There are also some performance considerations since mutation rewrite complete parts even there’s only a single row to be deleted.
|
||||
|
||||
This is the most common approach to make your system based on ClickHouse [GDPR](https://gdpr-info.eu)-compliant.
|
||||
|
||||
More details on [mutations](../../sql-reference/statements/alter.md#alter-mutations).
|
||||
|
||||
## DROP PARTITION {#drop-partition}
|
||||
|
||||
`ALTER TABLE ... DROP PARTITION` provides a cost-efficient way to drop a whole partition. It’s not that flexible and needs proper partitioning scheme configured on table creation, but still covers most common cases. Like mutations need to be executed from an external system for regular use.
|
||||
|
||||
More details on [manipulating partitions](../../sql-reference/statements/alter.md#alter_drop-partition).
|
||||
|
||||
## TRUNCATE {#truncate}
|
||||
|
||||
It’s rather radical to drop all data from a table, but in some cases it might be exactly what you need.
|
||||
|
||||
More details on [table truncation](../../sql-reference/statements/alter.md#alter_drop-partition).
|
18
docs/en/faq/operations/index.md
Normal file
18
docs/en/faq/operations/index.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
title: Question about operating ClickHouse servers and clusters
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 3
|
||||
toc_title: Operations
|
||||
---
|
||||
|
||||
# Question About Operating ClickHouse Servers and Clusters {#question-about-operating-clickhouse-servers-and-clusters}
|
||||
|
||||
Questions:
|
||||
|
||||
- [Which ClickHouse version to use in production?](../../faq/operations/production.md)
|
||||
- [Is it possible to delete old records from a ClickHouse table?](../../faq/operations/delete-old-data.md)
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/production/) ##}
|
70
docs/en/faq/operations/production.md
Normal file
70
docs/en/faq/operations/production.md
Normal file
@ -0,0 +1,70 @@
|
||||
---
|
||||
title: Which ClickHouse version to use in production?
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
# Which ClickHouse Version to Use in Production? {#which-clickhouse-version-to-use-in-production}
|
||||
|
||||
First of all, let’s discuss why people ask this question in the first place. There are two key reasons:
|
||||
|
||||
1. ClickHouse is developed with pretty high velocity and usually, there are 10+ stable releases per year. It makes a wide range of releases to choose from, which is not so trivial choice.
|
||||
2. Some users want to avoid spending time figuring out which version works best for their use case and just follow someone else’s advice.
|
||||
|
||||
The second reason is more fundamental, so we’ll start with it and then get back to navigating through various ClickHouse releases.
|
||||
|
||||
## Which ClickHouse Version Do You Recommend? {#which-clickhouse-version-do-you-recommend}
|
||||
|
||||
It’s tempting to hire consultants or trust some known experts to get rid of responsibility for your production environment. You install some specific ClickHouse version that someone else recommended, now if there’s some issue with it - it’s not your fault, it’s someone else’s. This line of reasoning is a big trap. No external person knows better what’s going on in your company’s production environment.
|
||||
|
||||
So how to properly choose which ClickHouse version to upgrade to? Or how to choose your first ClickHouse version? First of all, you need to invest in setting up a **realistic pre-production environment**. In an ideal world, it could be a completely identical shadow copy, but that’s usually expensive.
|
||||
|
||||
Here’re some key points to get reasonable fidelity in a pre-production environment with not so high costs:
|
||||
|
||||
- Pre-production environment needs to run an as close set of queries as you intend to run in production:
|
||||
- Don’t make it read-only with some frozen data.
|
||||
- Don’t make it write-only with just copying data without building some typical reports.
|
||||
- Don’t wipe it clean instead of applying schema migrations.
|
||||
- Use a sample of real production data and queries. Try to choose a sample that’s still representative and makes `SELECT` queries return reasonable results. Use obfuscation if your data is sensitive and internal policies don’t allow it to leave the production environment.
|
||||
- Make sure that pre-production is covered by your monitoring and alerting software the same way as your production environment does.
|
||||
- If your production spans across multiple datacenters or regions, make your pre-production does the same.
|
||||
- If your production uses complex features like replication, distributed table, cascading materialize views, make sure they are configured similarly in pre-production.
|
||||
- There’s a trade-off on using the roughly same number of servers or VMs in pre-production as in production, but of smaller size, or much less of them, but of the same size. The first option might catch extra network-related issues, while the latter is easier to manage.
|
||||
|
||||
The second area to invest in is **automated testing infrastructure**. Don’t assume that if some kind of query has executed successfully once, it’ll continue to do so forever. It’s ok to have some unit tests where ClickHouse is mocked but make sure your product has a reasonable set of automated tests that are run against real ClickHouse and check that all important use cases are still working as expected.
|
||||
|
||||
Extra step forward could be contributing those automated tests to [ClickHouse’s open-source test infrastructure](https://github.com/ClickHouse/ClickHouse/tree/master/tests) that’s continuously used in its day-to-day development. It definitely will take some additional time and effort to learn [how to run it](../../development/tests.md) and then how to adapt your tests to this framework, but it’ll pay off by ensuring that ClickHouse releases are already tested against them when they are announced stable, instead of repeatedly losing time on reporting the issue after the fact and then waiting for a bugfix to be implemented, backported and released. Some companies even have such test contributions to infrastructure by its use as an internal policy, most notably it’s called [Beyonce’s Rule](https://www.oreilly.com/library/view/software-engineering-at/9781492082781/ch01.html#policies_that_scale_well) at Google.
|
||||
|
||||
When you have your pre-production environment and testing infrastructure in place, choosing the best version is straightforward:
|
||||
|
||||
1. Routinely run your automated tests against new ClickHouse releases. You can do it even for ClickHouse releases that are marked as `testing`, but going forward to the next steps with them is not recommended.
|
||||
2. Deploy the ClickHouse release that passed the tests to pre-production and check that all processes are running as expected.
|
||||
3. Report any issues you discovered to [ClickHouse GitHub Issues](https://github.com/ClickHouse/ClickHouse/issues).
|
||||
4. If there were no major issues, it should be safe to start deploying ClickHouse release to your production environment. Investing in gradual release automation that implements an approach similar to [canary releases](https://martinfowler.com/bliki/CanaryRelease.html) or [green-blue deployments](https://martinfowler.com/bliki/BlueGreenDeployment.html) might further reduce the risk of issues in production.
|
||||
|
||||
As you might have noticed, there’s nothing specific to ClickHouse in the approach described above, people do that for any piece of infrastructure they rely on if they take their production environment seriously.
|
||||
|
||||
## How to Choose Between ClickHouse Releases? {#how-to-choose-between-clickhouse-releases}
|
||||
|
||||
If you look into contents of ClickHouse package repository, you’ll see four kinds of packages:
|
||||
|
||||
1. `testing`
|
||||
2. `prestable`
|
||||
3. `stable`
|
||||
4. `lts` (long-term support)
|
||||
|
||||
As was mentioned earlier, `testing` is good mostly to notice issues early, running them in production is not recommended because each of them is not tested as thoroughly as other kinds of packages.
|
||||
|
||||
`prestable` is a release candidate which generally looks promising and is likely to become announced as `stable` soon. You can try them out in pre-production and report issues if you see any.
|
||||
|
||||
For production use, there are two key options: `stable` and `lts`. Here is some guidance on how to choose between them:
|
||||
|
||||
- `stable` is the kind of package we recommend by default. They are released roughly monthly (and thus provide new features with reasonable delay) and three latest stable releases are supported in terms of diagnostics and backporting of bugfixes.
|
||||
- `lts` are released twice a year and are supported for a year after their initial release. You might prefer them over `stable` in the following cases:
|
||||
- Your company has some internal policies that don’t allow for frequent upgrades or using non-LTS software.
|
||||
- You are using ClickHouse in some secondary products that either doesn’t require any complex ClickHouse features and don’t have enough resources to keep it updated.
|
||||
|
||||
Many teams who initially thought that `lts` is the way to go, often switch to `stable` anyway because of some recent feature that’s important for their product.
|
||||
|
||||
!!! warning "Important"
|
||||
One more thing to keep in mind when upgrading ClickHouse: we’re always keeping eye on compatibility across releases, but sometimes it’s not reasonable to keep and some minor details might change. So make sure you check the [changelog](../../whats-new/changelog/index.md) before upgrading to see if there are any notes about backward-incompatible changes.
|
18
docs/en/faq/use-cases/index.md
Normal file
18
docs/en/faq/use-cases/index.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
title: Questions about ClickHouse use cases
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 2
|
||||
toc_title: Use Cases
|
||||
---
|
||||
|
||||
# Questions About ClickHouse Use Cases {#questions-about-clickhouse-use-cases}
|
||||
|
||||
Questions:
|
||||
|
||||
- [Can I use ClickHouse as a time-series database?](../../faq/use-cases/time-series.md)
|
||||
- [Can I use ClickHouse as a key-value storage?](../../faq/use-cases/key-value.md)
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/use-cases/) ##}
|
17
docs/en/faq/use-cases/key-value.md
Normal file
17
docs/en/faq/use-cases/key-value.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
title: Can I use ClickHouse as a key-value storage?
|
||||
toc_hidden: true
|
||||
toc_priority: 101
|
||||
---
|
||||
|
||||
# Can I Use ClickHouse As a Key-Value Storage? {#can-i-use-clickhouse-as-a-key-value-storage}
|
||||
|
||||
The short answer is **“no”**. The key-value workload is among top positions in the list of cases when NOT{.text-danger} to use ClickHouse. It’s an [OLAP](../../faq/general/olap.md) system after all, while there are many excellent key-value storage systems out there.
|
||||
|
||||
However, there might be situations where it still makes sense to use ClickHouse for key-value-like queries. Usually, it’s some low-budget products where the main workload is analytical in nature and fits ClickHouse well, but there’s also some secondary process that needs a key-value pattern with not so high request throughput and without strict latency requirements. If you had an unlimited budget, you would have installed a secondary key-value database for thus secondary workload, but in reality, there’s an additional cost of maintaining one more storage system (monitoring, backups, etc.) which might be desirable to avoid.
|
||||
|
||||
If you decide to go against recommendations and run some key-value-like queries against ClickHouse, here’re some tips:
|
||||
|
||||
- The key reason why point queries are expensive in ClickHouse is its sparse primary index of main [MergeTree table engine family](../../engines/table-engines/mergetree-family/mergetree.md). This index can’t point to each specific row of data, instead, it points to each N-th and the system has to scan from the neighboring N-th row to the desired one, reading excessive data along the way. In a key-value scenario, it might be useful to reduce the value of N with the `index_granularity` setting.
|
||||
- ClickHouse keeps each column in a separate set of files, so to assemble one complete row it needs to go through each of those files. Their count increases linearly with the number of columns, so in the key-value scenario, it might be worth to avoid using many columns and put all your payload in a single `String` column encoded in some serialization format like JSON, Protobuf or whatever makes sense.
|
||||
- There’s an alternative approach that uses [Join](../../engines/table-engines/special/join.md) table engine instead of normal `MergeTree` tables and [joinGet](../../sql-reference/functions/other-functions.md#joinget) function to retrieve the data. It can provide better query performance but might have some usability and reliability issues. Here’s an [usage example](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00800_versatile_storage_join.sql#L49-L51).
|
15
docs/en/faq/use-cases/time-series.md
Normal file
15
docs/en/faq/use-cases/time-series.md
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Can I use ClickHouse as a time-series database?
|
||||
toc_hidden: true
|
||||
toc_priority: 101
|
||||
---
|
||||
|
||||
# Can I Use ClickHouse As a Time-Series Database? {#can-i-use-clickhouse-as-a-time-series-database}
|
||||
|
||||
ClickHouse is a generic data storage solution for [OLAP](../../faq/general/olap.md) workloads, while there are many specialized time-series database management systems. Nevertheless, ClickHouse’s [focus on query execution speed](../../faq/general/why-clickhouse-is-so-fast.md) allows it to outperform specialized systems in many cases. There are many independent benchmarks on this topic out there ([example](https://medium.com/@AltinityDB/clickhouse-for-time-series-scalability-benchmarks-e181132a895b)), so we’re not going to conduct one here. Instead, let’s focus on ClickHouse features that are important to use if that’s your use case.
|
||||
|
||||
First of all, there are **[specialized codecs](../../sql-reference/statements/create.md#create-query-specialized-codecs)** which make typical time-series. Either common algorithms like `DoubleDelta` and `Gorilla` or specific to ClickHouse like `T64`.
|
||||
|
||||
Second, time-series queries often hit only recent data, like one day or one week old. It makes sense to use servers that have both fast nVME/SSD drives and high-capacity HDD drives. ClickHouse [TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) feature allows to configure keeping fresh hot data on fast drives and gradually move it to slower drives as it ages. Rollup or removal of even older data is also possible if your requirements demand it.
|
||||
|
||||
Even though it’s against ClickHouse philosophy of storing and processing raw data, you can use [materialized views](../../sql-reference/statements/create.md#create-view) to fit into even tighter latency or costs requirements.
|
@ -94,6 +94,18 @@ For production environments, it’s recommended to use the latest `stable`-versi
|
||||
|
||||
To run ClickHouse inside Docker follow the guide on [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Those images use official `deb` packages inside.
|
||||
|
||||
### From Precompiled Binaries for Non-Standard Environments {#from-binaries-non-linux}
|
||||
|
||||
For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse builds are provided as a cross-compiled binary from the latest commit of the `master` branch (with a few hours delay).
|
||||
|
||||
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
|
||||
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
|
||||
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
|
||||
|
||||
After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data. To run `clickhouse server`, you have to additionally download [server](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml) and [users](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/users.xml) configuration files from GitHub.
|
||||
|
||||
These builds are not recommended for use in production environments because they are less thoroughly tested, but you can do so on your own risk. They also have only a subset of ClickHouse features available.
|
||||
|
||||
### From Sources {#from-sources}
|
||||
|
||||
To manually compile ClickHouse, follow the instructions for [Linux](../development/build.md) or [Mac OS X](../development/build-osx.md).
|
||||
|
@ -426,6 +426,18 @@ The value 0 means that you can delete all tables without any restrictions.
|
||||
<max_table_size_to_drop>0</max_table_size_to_drop>
|
||||
```
|
||||
|
||||
## max\_thread\_pool\_size {#max-thread-pool-size}
|
||||
|
||||
The maximum number of threads in the Global Thread pool.
|
||||
|
||||
Default value: 10000.
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<max_thread_pool_size>12000</max_thread_pool_size>
|
||||
```
|
||||
|
||||
## merge\_tree {#server_configuration_parameters-merge_tree}
|
||||
|
||||
Fine tuning for tables in the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md).
|
||||
|
@ -821,6 +821,10 @@ ClickHouse supports the following algorithms of choosing replicas:
|
||||
- [First or random](#load_balancing-first_or_random)
|
||||
- [Round robin](#load_balancing-round_robin)
|
||||
|
||||
See also:
|
||||
|
||||
- [distributed\_replica\_max\_ignored\_errors](#settings-distributed_replica_max_ignored_errors)
|
||||
|
||||
### Random (by Default) {#load_balancing-random}
|
||||
|
||||
``` sql
|
||||
@ -1125,6 +1129,18 @@ Possible values:
|
||||
|
||||
Default value: 0
|
||||
|
||||
## optimize\_skip\_unused\_shards\_nesting {#optimize-skip-unused-shards-nesting}
|
||||
|
||||
Controls [`optimize_skip_unused_shards`](#optimize-skip-unused-shards) (hence still requires [`optimize_skip_unused_shards`](#optimize-skip-unused-shards)) depends on the nesting level of the distributed query (case when you have `Distributed` table that look into another `Distributed` table).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled, `optimize_skip_unused_shards` works always.
|
||||
- 1 — Enables `optimize_skip_unused_shards` only for the first level.
|
||||
- 2 — Enables `optimize_skip_unused_shards` up to the second level.
|
||||
|
||||
Default value: 0
|
||||
|
||||
## force\_optimize\_skip\_unused\_shards {#force-optimize-skip-unused-shards}
|
||||
|
||||
Enables or disables query execution if [optimize\_skip\_unused\_shards](#optimize-skip-unused-shards) is enabled and skipping of unused shards is not possible. If the skipping is not possible and the setting is enabled, an exception will be thrown.
|
||||
@ -1137,16 +1153,17 @@ Possible values:
|
||||
|
||||
Default value: 0
|
||||
|
||||
## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested}
|
||||
## force\_optimize\_skip\_unused\_shards\_nesting {#settings-force_optimize_skip_unused_shards_nesting}
|
||||
|
||||
Reset [`optimize_skip_unused_shards`](#optimize-skip-unused-shards) for nested `Distributed` table
|
||||
Controls [`force_optimize_skip_unused_shards`](#force-optimize-skip-unused-shards) (hence still requires [`force_optimize_skip_unused_shards`](#force-optimize-skip-unused-shards)) depends on the nesting level of the distributed query (case when you have `Distributed` table that look into another `Distributed` table).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 1 — Enabled.
|
||||
- 0 — Disabled.
|
||||
- 0 - Disabled, `force_optimize_skip_unused_shards` works always.
|
||||
- 1 — Enables `force_optimize_skip_unused_shards` only for the first level.
|
||||
- 2 — Enables `force_optimize_skip_unused_shards` up to the second level.
|
||||
|
||||
Default value: 0.
|
||||
Default value: 0
|
||||
|
||||
## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop}
|
||||
|
||||
@ -1170,8 +1187,10 @@ Controls how fast errors in distributed tables are zeroed. If a replica is unava
|
||||
|
||||
See also:
|
||||
|
||||
- [load\_balancing](#load_balancing-round_robin)
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap)
|
||||
- [distributed\_replica\_max\_ignored\_errors](#settings-distributed_replica_max_ignored_errors)
|
||||
|
||||
## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap}
|
||||
|
||||
@ -1182,8 +1201,24 @@ Error count of each replica is capped at this value, preventing a single replica
|
||||
|
||||
See also:
|
||||
|
||||
- [load\_balancing](#load_balancing-round_robin)
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life)
|
||||
- [distributed\_replica\_max\_ignored\_errors](#settings-distributed_replica_max_ignored_errors)
|
||||
|
||||
## distributed\_replica\_max\_ignored\_errors {#settings-distributed_replica_max_ignored_errors}
|
||||
|
||||
- Type: unsigned int
|
||||
- Default value: 0
|
||||
|
||||
Number of errors that will be ignored while choosing replicas (according to `load_balancing` algorithm).
|
||||
|
||||
See also:
|
||||
|
||||
- [load\_balancing](#load_balancing-round_robin)
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap)
|
||||
- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life)
|
||||
|
||||
## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms}
|
||||
|
||||
|
@ -9,6 +9,7 @@ The following aggregate functions are supported:
|
||||
- [`min`](../../sql-reference/aggregate-functions/reference/min.md#agg_function-min)
|
||||
- [`max`](../../sql-reference/aggregate-functions/reference/max.md#agg_function-max)
|
||||
- [`sum`](../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum)
|
||||
- [`sumWithOverflow`](../../sql-reference/aggregate-functions/reference/sumwithoverflow.md#sumwithoverflowx)
|
||||
- [`groupBitAnd`](../../sql-reference/aggregate-functions/reference/groupbitand.md#groupbitand)
|
||||
- [`groupBitOr`](../../sql-reference/aggregate-functions/reference/groupbitor.md#groupbitor)
|
||||
- [`groupBitXor`](../../sql-reference/aggregate-functions/reference/groupbitxor.md#groupbitxor)
|
||||
|
@ -206,7 +206,7 @@ Setting fields:
|
||||
|
||||
ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database.
|
||||
|
||||
If you have a problems with encodings when using Oracle, see the corresponding [FAQ](../../../faq/general.md#oracle-odbc-encodings) article.
|
||||
If you have a problems with encodings when using Oracle, see the corresponding [F.A.Q.](../../../faq/integration/oracle-odbc.md) item.
|
||||
|
||||
### Known Vulnerability of the ODBC Dictionary Functionality {#known-vulnerability-of-the-odbc-dictionary-functionality}
|
||||
|
||||
|
@ -176,6 +176,54 @@ hasAny(array1, array2)
|
||||
|
||||
`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` returns `1`.
|
||||
|
||||
## hasSubstr {#hassubstr}
|
||||
|
||||
Checks whether all the elements of array2 appear in array1 in the same exact order. Therefore, the function will return 1, if and only if `array1 = prefix + array2 + suffix`.
|
||||
|
||||
``` sql
|
||||
hasSubstr(array1, array2)
|
||||
```
|
||||
|
||||
In other words, the functions will check whether all the elements of `array2` are contained in `array1` like
|
||||
the `hasAll` function. In addition, it will check that the elements are observed in the same order in both `array1` and `array2`.
|
||||
|
||||
For Example:
|
||||
- `hasSubstr([1,2,3,4], [2,3])` returns 1. However, `hasSubstr([1,2,3,4], [3,2])` will return `0`.
|
||||
- `hasSubstr([1,2,3,4], [1,2,3])` returns 1. However, `hasSubstr([1,2,3,4], [1,2,4])` will return `0`.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `array1` – Array of any type with a set of elements.
|
||||
- `array2` – Array of any type with a set of elements.
|
||||
|
||||
**Return values**
|
||||
|
||||
- `1`, if `array1` contains `array2`.
|
||||
- `0`, otherwise.
|
||||
|
||||
**Peculiar properties**
|
||||
|
||||
- The function will return `1` if `array2` is empty.
|
||||
- `Null` processed as a value. In other words `hasSubstr([1, 2, NULL, 3, 4], [2,3])` will return `0`. However, `hasSubstr([1, 2, NULL, 3, 4], [2,NULL,3])` will return `1`
|
||||
- Order of values in both of arrays does matter.
|
||||
|
||||
**Examples**
|
||||
|
||||
`SELECT hasSubstr([], [])` returns 1.
|
||||
|
||||
`SELECT hasSubstr([1, Null], [Null])` returns 1.
|
||||
|
||||
`SELECT hasSubstr([1.0, 2, 3, 4], [1, 3])` returns 0.
|
||||
|
||||
`SELECT hasSubstr(['a', 'b'], ['a'])` returns 1.
|
||||
|
||||
`SELECT hasSubstr(['a', 'b' , 'c'], ['a', 'b'])` returns 1.
|
||||
|
||||
`SELECT hasSubstr(['a', 'b' , 'c'], ['a', 'c'])` returns 0.
|
||||
|
||||
`SELECT hasSubstr([[1, 2], [3, 4], [5, 6]], [[1, 2], [3, 4]])` returns 1.
|
||||
|
||||
|
||||
## indexOf(arr, x) {#indexofarr-x}
|
||||
|
||||
Returns the index of the first ‘x’ element (starting from 1) if it is in the array, or 0 if it is not.
|
||||
|
@ -17,7 +17,7 @@ SELECT [DISTINCT] expr_list
|
||||
[FROM [db.]table | (subquery) | table_function] [FINAL]
|
||||
[SAMPLE sample_coeff]
|
||||
[ARRAY JOIN ...]
|
||||
[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list
|
||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON <expr_list>)|(USING <column_list>)
|
||||
[PREWHERE expr]
|
||||
[WHERE expr]
|
||||
[GROUP BY expr_list] [WITH TOTALS]
|
||||
|
@ -12,6 +12,7 @@ toc_title: SYSTEM
|
||||
- [DROP MARK CACHE](#query_language-system-drop-mark-cache)
|
||||
- [DROP UNCOMPRESSED CACHE](#query_language-system-drop-uncompressed-cache)
|
||||
- [DROP COMPILED EXPRESSION CACHE](#query_language-system-drop-compiled-expression-cache)
|
||||
- [DROP REPLICA](#query_language-system-drop-replica)
|
||||
- [FLUSH LOGS](#query_language-system-flush_logs)
|
||||
- [RELOAD CONFIG](#query_language-system-reload-config)
|
||||
- [SHUTDOWN](#query_language-system-shutdown)
|
||||
@ -67,6 +68,24 @@ For more convenient (automatic) cache management, see disable\_internal\_dns\_ca
|
||||
|
||||
Resets the mark cache. Used in development of ClickHouse and performance tests.
|
||||
|
||||
## DROP REPLICA {#query_language-system-drop-replica}
|
||||
|
||||
Dead replicas can be dropped using following syntax:
|
||||
|
||||
```sql
|
||||
SYSTEM DROP REPLICA 'replica_name' FROM TABLE database.table;
|
||||
SYSTEM DROP REPLICA 'replica_name' FROM DATABASE database;
|
||||
SYSTEM DROP REPLICA 'replica_name';
|
||||
SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/path/to/table/in/zk';
|
||||
```
|
||||
|
||||
Queries will remove the replica path in ZooKeeper. It's useful when replica is dead and its metadata cannot be removed from ZooKeeper by `DROP TABLE` because there is no such table anymore. It will only drop the inactive/stale replica, and it can't drop local replica, please use `DROP TABLE` for that. `DROP REPLICA` does not drop any tables and does not remove any data or metadata from disk.
|
||||
|
||||
The first one removes metadata of `'replica_name'` replica of `database.table` table.
|
||||
The second one does the same for all replicated tables in the database.
|
||||
The third one does the same for all replicated tables on local server.
|
||||
The forth one is useful to remove metadata of dead replica when all other replicas of a table were dropped. It requires the table path to be specified explicitly. It must be the same path as was passed to the first argument of `ReplicatedMergeTree` engine on table creation.
|
||||
|
||||
## DROP UNCOMPRESSED CACHE {#query_language-system-drop-uncompressed-cache}
|
||||
|
||||
Reset the uncompressed data cache. Used in development of ClickHouse and performance tests.
|
||||
|
@ -1,6 +1,10 @@
|
||||
---
|
||||
toc_folder_title: What's New
|
||||
toc_priority: 72
|
||||
toc_priority: 82
|
||||
---
|
||||
|
||||
# What's New In ClickHouse?
|
||||
|
||||
There's a short high-level [roadmap](roadmap.md) and a detailed [changelog](changelog/index.md) for releases that have already been published.
|
||||
|
||||
|
||||
|
@ -5,12 +5,14 @@ toc_title: Roadmap
|
||||
|
||||
# Roadmap {#roadmap}
|
||||
|
||||
## Q2 2020 {#q2-2020}
|
||||
|
||||
- Integration with external authentication services
|
||||
|
||||
## Q3 2020 {#q3-2020}
|
||||
|
||||
- High durability mode (`fsync` and WAL)
|
||||
- Support spilling data to disk in `GLOBAL JOIN`
|
||||
|
||||
## Q4 2020 {#q4-2020}
|
||||
|
||||
- Improved efficiency of distributed queries
|
||||
- Resource pools for more precise distribution of cluster capacity between users
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/roadmap/) ##}
|
||||
|
@ -1048,17 +1048,6 @@ Valores posibles:
|
||||
|
||||
Valor predeterminado: 0
|
||||
|
||||
## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested}
|
||||
|
||||
Restablecer [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) para anidados `Distributed` tabla
|
||||
|
||||
Valores posibles:
|
||||
|
||||
- 1 — Enabled.
|
||||
- 0 — Disabled.
|
||||
|
||||
Valor predeterminado: 0.
|
||||
|
||||
## Optize\_throw\_if\_noop {#setting-optimize_throw_if_noop}
|
||||
|
||||
Habilita o deshabilita el lanzamiento de una excepción [OPTIMIZE](../../sql-reference/statements/misc.md#misc_operations-optimize) la consulta no realizó una fusión.
|
||||
|
@ -15,7 +15,7 @@ SELECT [DISTINCT] expr_list
|
||||
[FROM [db.]table | (subquery) | table_function] [FINAL]
|
||||
[SAMPLE sample_coeff]
|
||||
[ARRAY JOIN ...]
|
||||
[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list
|
||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON <expr_list>)|(USING <column_list>)
|
||||
[PREWHERE expr]
|
||||
[WHERE expr]
|
||||
[GROUP BY expr_list] [WITH TOTALS]
|
||||
|
@ -1048,17 +1048,6 @@ The results of the compilation are saved in the build directory in the form of .
|
||||
|
||||
مقدار پیشفرض: 0
|
||||
|
||||
## به زور \_بهتیتیتیتی\_سکیپ\_اس\_ش\_شارد\_مایش داده میشود {#settings-force_optimize_skip_unused_shards_no_nested}
|
||||
|
||||
بازنشانی [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) برای تو در تو `Distributed` جدول
|
||||
|
||||
مقادیر ممکن:
|
||||
|
||||
- 1 — Enabled.
|
||||
- 0 — Disabled.
|
||||
|
||||
مقدار پیش فرض: 0.
|
||||
|
||||
## ا\_فزون\_ف\_کوپ {#setting-optimize_throw_if_noop}
|
||||
|
||||
را قادر می سازد و یا غیر فعال پرتاب یک استثنا اگر یک [OPTIMIZE](../../sql-reference/statements/misc.md#misc_operations-optimize) پرس و جو یک ادغام انجام نمی.
|
||||
|
@ -15,7 +15,7 @@ SELECT [DISTINCT] expr_list
|
||||
[FROM [db.]table | (subquery) | table_function] [FINAL]
|
||||
[SAMPLE sample_coeff]
|
||||
[ARRAY JOIN ...]
|
||||
[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list
|
||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON <expr_list>)|(USING <column_list>)
|
||||
[PREWHERE expr]
|
||||
[WHERE expr]
|
||||
[GROUP BY expr_list] [WITH TOTALS]
|
||||
|
@ -1048,17 +1048,6 @@ Valeurs possibles:
|
||||
|
||||
Valeur par défaut: 0
|
||||
|
||||
## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested}
|
||||
|
||||
Réinitialiser [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) pour imbriquée `Distributed` table
|
||||
|
||||
Valeurs possibles:
|
||||
|
||||
- 1 — Enabled.
|
||||
- 0 — Disabled.
|
||||
|
||||
Valeur par défaut: 0.
|
||||
|
||||
## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop}
|
||||
|
||||
Active ou désactive le lancement d'une exception si [OPTIMIZE](../../sql-reference/statements/misc.md#misc_operations-optimize) la requête n'a pas effectué de fusion.
|
||||
|
@ -15,7 +15,7 @@ SELECT [DISTINCT] expr_list
|
||||
[FROM [db.]table | (subquery) | table_function] [FINAL]
|
||||
[SAMPLE sample_coeff]
|
||||
[ARRAY JOIN ...]
|
||||
[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list
|
||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON <expr_list>)|(USING <column_list>)
|
||||
[PREWHERE expr]
|
||||
[WHERE expr]
|
||||
[GROUP BY expr_list] [WITH TOTALS]
|
||||
|
@ -1048,17 +1048,6 @@ PREWHERE/WHEREにシャーディングキー条件があるSELECTクエリの未
|
||||
|
||||
デフォルト値:0
|
||||
|
||||
## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested}
|
||||
|
||||
リセット [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) 入れ子の場合 `Distributed` テーブル
|
||||
|
||||
可能な値:
|
||||
|
||||
- 1 — Enabled.
|
||||
- 0 — Disabled.
|
||||
|
||||
デフォルト値は0です。
|
||||
|
||||
## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop}
|
||||
|
||||
例外のスローを有効または無効にします。 [OPTIMIZE](../../sql-reference/statements/misc.md#misc_operations-optimize) クエリがマージを実行しませんでした。
|
||||
|
@ -82,6 +82,18 @@ sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh
|
||||
|
||||
Для запуска ClickHouse в Docker нужно следовать инструкции на [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Внутри образов используются официальные `deb` пакеты.
|
||||
|
||||
### Из исполняемых файлов для нестандартных окружений {#from-binaries-non-linux}
|
||||
|
||||
Для других операционных систем и арихитектуры AArch64, сборки ClickHouse предоставляются в виде кросс-компилированного бинарника с последнего коммита ветки master (с задержкой в несколько часов).
|
||||
|
||||
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
|
||||
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
|
||||
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
|
||||
|
||||
После скачивания, можно воспользоваться `clickhouse client` для подключения к серверу, или `clickhouse local` для обработки локальных данных. Для запуска `clickhouse server` необходимо скачать конфигурационные файлы [сервера](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml) и [пользователей](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/users.xml) с GitHub.
|
||||
|
||||
Данные сборки не рекомендуются для использования в продакшене, так как они недостаточно тщательно протестированны. Также, в них присутствуют не все возможности ClickHouse.
|
||||
|
||||
### Из исходного кода {#from-sources}
|
||||
|
||||
Для компиляции ClickHouse вручную, используйте инструкцию для [Linux](../development/build.md) или [Mac OS X](../development/build-osx.md).
|
||||
|
@ -276,7 +276,7 @@ $ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&
|
||||
### Пример {#primer}
|
||||
|
||||
``` bash
|
||||
$ curl -sS "<address>?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}"
|
||||
$ curl -sS "http://localhost:8123/?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}"
|
||||
```
|
||||
|
||||
## Предопределенный HTTP интерфейс {#predefined_http_interface}
|
||||
|
@ -1025,7 +1025,7 @@ ClickHouse генерирует исключение
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
|
||||
## optimize_skip_unused_shards {#optimize-skip-unused-shards}
|
||||
## optimize\_skip\_unused\_shards {#optimize-skip-unused-shards}
|
||||
|
||||
Включает или отключает пропуск неиспользуемых шардов для запросов [SELECT](../../sql-reference/statements/select/index.md) , в которых условие ключа шардирования задано в секции `WHERE/PREWHERE`. Предполагается, что данные распределены с помощью ключа шардирования, в противном случае настройка ничего не делает.
|
||||
|
||||
@ -1036,15 +1036,39 @@ ClickHouse генерирует исключение
|
||||
|
||||
Значение по умолчанию: 0
|
||||
|
||||
## force_optimize_skip_unused_shards {#force-optimize-skip-unused-shards}
|
||||
## optimize\_skip\_unused\_shards\_nesting {#optimize-skip-unused-shards-nesting}
|
||||
|
||||
Контролирует настройку [`optimize_skip_unused_shards`](#optimize-skip-unused-shards) (поэтому все еще требует `optimize_skip_unused_shards`) в зависимости от вложенности распределенного запроса (когда у вас есть `Distributed` таблица которая смотрит на другую `Distributed` таблицу).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — Выключена, `optimize_skip_unused_shards` работает всегда.
|
||||
- 1 — Включает `optimize_skip_unused_shards` только для 1-ого уровня вложенности.
|
||||
- 2 — Включает `optimize_skip_unused_shards` для 1-ого и 2-ого уровня вложенности.
|
||||
|
||||
Значение по умолчанию: 0
|
||||
|
||||
## force\_optimize\_skip\_unused\_shards {#force-optimize-skip-unused-shards}
|
||||
|
||||
Разрешает или запрещает выполнение запроса, если настройка [optimize_skip_unused_shards](#optimize-skip-unused-shards) включена, а пропуск неиспользуемых шардов невозможен. Если данная настройка включена и пропуск невозможен, ClickHouse генерирует исключение.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — Выключена. ClickHouse не генерирует исключение.
|
||||
- 1 — Включена. Выполнение запроса запрещается, только если у таблицы есть ключ шардирования.
|
||||
- 2 — Включена. Выполнение запроса запрещается, даже если для таблицы не определен ключ шардирования.
|
||||
- 0 — Выключена, `force_optimize_skip_unused_shards` работает всегда.
|
||||
- 1 — Включает `force_optimize_skip_unused_shards` только для 1-ого уровня вложенности.
|
||||
- 2 — Включает `force_optimize_skip_unused_shards` для 1-ого и 2-ого уровня вложенности.
|
||||
|
||||
Значение по умолчанию: 0
|
||||
|
||||
## force\_optimize\_skip\_unused\_shards\_nesting {#settings-force_optimize_skip_unused_shards_nesting}
|
||||
|
||||
Контролирует настройку [`force_optimize_skip_unused_shards`](#force-optimize-skip-unused-shards) (поэтому все еще требует `optimize_skip_unused_shards`) в зависимости от вложенности распределенного запроса (когда у вас есть `Distributed` таблица которая смотрит на другую `Distributed` таблицу).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 - Disabled, `force_optimize_skip_unused_shards` works on all levels.
|
||||
- 1 — Enables `force_optimize_skip_unused_shards` only for the first level.
|
||||
- 2 — Enables `force_optimize_skip_unused_shards` up to the second level.
|
||||
|
||||
Значение по умолчанию: 0
|
||||
|
||||
|
@ -13,7 +13,7 @@ SELECT [DISTINCT] expr_list
|
||||
[FROM [db.]table | (subquery) | table_function] [FINAL]
|
||||
[SAMPLE sample_coeff]
|
||||
[ARRAY JOIN ...]
|
||||
[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list
|
||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON <expr_list>)|(USING <column_list>)
|
||||
[PREWHERE expr]
|
||||
[WHERE expr]
|
||||
[GROUP BY expr_list] [WITH TOTALS]
|
||||
|
@ -20,7 +20,18 @@ Usually those also have some way to preview how Markdown will look like, which a
|
||||
|
||||
It’ll take some effort to go through, but the result will be very close to production documentation.
|
||||
|
||||
For the first time you’ll need to install [wkhtmltopdf](https://wkhtmltopdf.org/) and set up virtualenv:
|
||||
For the first time you’ll need to:
|
||||
|
||||
#### 1. Install [wkhtmltopdf](https://wkhtmltopdf.org/)
|
||||
|
||||
Follow the instructions on it's official website: <https://wkhtmltopdf.org/downloads.html>
|
||||
|
||||
#### 2. Install CLI tools from npm
|
||||
|
||||
1. `apt-get install npm` for Debian/Ubuntu or `brew install npm` on Mac OS X.
|
||||
2. `npm install -g purifycss amphtml-validator`.
|
||||
|
||||
#### 3. Set up virtualenv
|
||||
|
||||
``` bash
|
||||
$ cd ClickHouse/docs/tools
|
||||
@ -30,7 +41,9 @@ $ source venv/bin/activate
|
||||
$ pip3 install -r requirements.txt
|
||||
```
|
||||
|
||||
Then running `build.py` without args (there are some, check `build.py --help`) will generate `ClickHouse/docs/build` folder with complete static html website.
|
||||
#### 4. Run build.py
|
||||
|
||||
When all prerequisites are installed, running `build.py` without args (there are some, check `build.py --help`) will generate `ClickHouse/docs/build` folder with complete static html website.
|
||||
|
||||
The easiest way to see the result is to use `--livereload=8888` argument of build.py. Alternatively, you can manually launch a HTTP server to serve the docs, for example by running `cd ClickHouse/docs/build && python3 -m http.server 8888`. Then go to http://localhost:8888 in browser. Feel free to use any other port instead of 8888.
|
||||
|
||||
|
@ -85,6 +85,15 @@ def html_to_amp(content):
|
||||
tag.attrs['width'] = '640'
|
||||
if not tag.attrs.get('height'):
|
||||
tag.attrs['height'] = '320'
|
||||
if tag.name == 'iframe':
|
||||
tag.name = 'amp-iframe'
|
||||
tag.attrs['layout'] = 'responsive'
|
||||
del tag.attrs['alt']
|
||||
del tag.attrs['allowfullscreen']
|
||||
if not tag.attrs.get('width'):
|
||||
tag.attrs['width'] = '640'
|
||||
if not tag.attrs.get('height'):
|
||||
tag.attrs['height'] = '320'
|
||||
elif tag.name == 'a':
|
||||
href = tag.attrs.get('href')
|
||||
if href:
|
||||
|
@ -26,6 +26,7 @@ MARKDOWN_EXTENSIONS = [
|
||||
'mdx_clickhouse',
|
||||
'admonition',
|
||||
'attr_list',
|
||||
'def_list',
|
||||
'codehilite',
|
||||
'nl2br',
|
||||
'sane_lists',
|
||||
|
@ -1,5 +1,6 @@
|
||||
import collections
|
||||
import datetime
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
|
||||
@ -39,13 +40,17 @@ def build_nav_entry(root, args):
|
||||
title = meta.get('toc_folder_title', 'hidden')
|
||||
prio = meta.get('toc_priority', 9999)
|
||||
logging.debug(f'Nav entry: {prio}, {title}, {path}')
|
||||
if not content.strip():
|
||||
if meta.get('toc_hidden') or not content.strip():
|
||||
title = 'hidden'
|
||||
if title == 'hidden':
|
||||
title = 'hidden-' + hashlib.sha1(content.encode('utf-8')).hexdigest()
|
||||
if args.nav_limit and len(result_items) >= args.nav_limit:
|
||||
break
|
||||
result_items.append((prio, title, path))
|
||||
result_items = sorted(result_items, key=lambda x: (x[0], x[1]))
|
||||
result = collections.OrderedDict([(item[1], item[2]) for item in result_items])
|
||||
if index_meta.get('toc_hidden_folder'):
|
||||
current_title += '|hidden-folder'
|
||||
return index_meta.get('toc_priority', 10000), current_title, result
|
||||
|
||||
|
||||
|
@ -117,6 +117,7 @@ def translate_filter(key, value, _format, _):
|
||||
admonition_value = []
|
||||
remaining_para_value = []
|
||||
in_admonition = True
|
||||
break_value = [pandocfilters.LineBreak(), pandocfilters.Str(' ' * 4)]
|
||||
for item in value:
|
||||
if in_admonition:
|
||||
if item.get('t') == 'SoftBreak':
|
||||
@ -124,9 +125,11 @@ def translate_filter(key, value, _format, _):
|
||||
else:
|
||||
admonition_value.append(item)
|
||||
else:
|
||||
remaining_para_value.append(item)
|
||||
if item.get('t') == 'SoftBreak':
|
||||
remaining_para_value += break_value
|
||||
else:
|
||||
remaining_para_value.append(item)
|
||||
|
||||
break_value = [pandocfilters.LineBreak(), pandocfilters.Str(' ' * 4)]
|
||||
if admonition_value[-1].get('t') == 'Quoted':
|
||||
text = process_sentence(admonition_value[-1]['c'][-1])
|
||||
text[0]['c'] = '"' + text[0]['c']
|
||||
@ -136,7 +139,7 @@ def translate_filter(key, value, _format, _):
|
||||
else:
|
||||
text = admonition_value[-1].get('c')
|
||||
if text:
|
||||
text = translate(text[0].upper() + text[1:])
|
||||
text = translate.translate(text[0].upper() + text[1:])
|
||||
admonition_value.append(pandocfilters.Space())
|
||||
admonition_value.append(pandocfilters.Str(f'"{text}"'))
|
||||
|
||||
|
@ -16,7 +16,7 @@ source "${BASE_DIR}/venv/bin/activate"
|
||||
${BASE_DIR}/split_meta.py "${INPUT_PATH}"
|
||||
|
||||
pandoc "${INPUT_CONTENT}" --filter "${BASE_DIR}/filter.py" -o "${TEMP_FILE}" \
|
||||
-f "markdown-space_in_atx_header" -t "markdown_strict+pipe_tables+markdown_attribute+all_symbols_escapable+backtick_code_blocks+autolink_bare_uris-link_attributes+markdown_attribute+mmd_link_attributes-raw_attribute+header_attributes-grid_tables" \
|
||||
-f "markdown-space_in_atx_header" -t "markdown_strict+pipe_tables+markdown_attribute+all_symbols_escapable+backtick_code_blocks+autolink_bare_uris-link_attributes+markdown_attribute+mmd_link_attributes-raw_attribute+header_attributes-grid_tables+definition_lists" \
|
||||
--atx-headers --wrap=none --columns=99999 --tab-stop=4
|
||||
perl -pi -e 's/{\\#\\#/{##/g' "${TEMP_FILE}"
|
||||
perl -pi -e 's/\\#\\#}/##}/g' "${TEMP_FILE}"
|
||||
|
@ -67,6 +67,13 @@ def adjust_markdown_html(content):
|
||||
summary.extract()
|
||||
details.insert(0, summary)
|
||||
|
||||
for dd in soup.find_all('dd'):
|
||||
dd_class = dd.attrs.get('class')
|
||||
if dd_class:
|
||||
dd.attrs['class'] = dd_class + ['pl-3']
|
||||
else:
|
||||
dd.attrs['class'] = 'pl-3'
|
||||
|
||||
for div in soup.find_all('div'):
|
||||
div_class = div.attrs.get('class')
|
||||
is_admonition = div_class and 'admonition' in div.attrs.get('class')
|
||||
|
@ -1048,17 +1048,6 @@ Olası değerler:
|
||||
|
||||
Varsayılan değer: 0
|
||||
|
||||
## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested}
|
||||
|
||||
Sıfırlamak [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) iç içe geçmiş için `Distributed` Tablo
|
||||
|
||||
Olası değerler:
|
||||
|
||||
- 1 — Enabled.
|
||||
- 0 — Disabled.
|
||||
|
||||
Varsayılan değer: 0.
|
||||
|
||||
## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop}
|
||||
|
||||
Bir özel durum atmayı etkinleştirir veya devre dışı bırakır. [OPTIMIZE](../../sql-reference/statements/misc.md#misc_operations-optimize) sorgu birleştirme gerçekleştirmedi.
|
||||
|
@ -1 +0,0 @@
|
||||
../../en/commercial/support.md
|
23
docs/zh/commercial/support.md
Normal file
23
docs/zh/commercial/support.md
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
toc_priority: 3
|
||||
toc_title: "\u788C\u83BD\u7984Support:"
|
||||
---
|
||||
|
||||
# ClickHouse商业支持服务提供商 {#clickhouse-commercial-support-service-providers}
|
||||
|
||||
!!! info "信息"
|
||||
如果您已经推出ClickHouse商业支持服务,请随时 [打开拉取请求](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) 将其添加到以下列表。
|
||||
|
||||
## 敏锐性 {#altinity}
|
||||
|
||||
隆隆隆隆路虏脢..陇.貌.垄拢卢虏禄and陇.貌路.隆拢脳枚脢虏 隆隆隆隆路虏脢..陇.貌.垄拢卢虏禄.陇 访问 [www.altinity.com](https://www.altinity.com/) 欲了解更多信息.
|
||||
|
||||
## Mafiree {#mafiree}
|
||||
|
||||
[服务说明](http://mafiree.com/clickhouse-analytics-services.php)
|
||||
|
||||
## MinervaDB {#minervadb}
|
||||
|
||||
[服务说明](https://minervadb.com/index.php/clickhouse-consulting-and-support-by-minervadb/)
|
@ -33,7 +33,7 @@ ClickHouse 收集的指标项:
|
||||
- 服务用于计算的资源占用的各种指标。
|
||||
- 关于查询处理的常见统计信息。
|
||||
|
||||
可以在 [系统指标](system-tables.md#system_tables-metrics) ,[系统事件](system-tables.md#system_tables-events) 以及[系统异步指标](system-tables.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。
|
||||
可以在 [系统指标](system-tables/metrics.md#system_tables-metrics) ,[系统事件](system-tables/events.md#system_tables-events) 以及[系统异步指标](system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。
|
||||
|
||||
可以配置ClickHouse 往 [石墨](https://github.com/graphite-project)导入指标。 参考 [石墨部分](server-configuration-parameters/settings.md#server_configuration_parameters-graphite) 配置文件。在配置指标导出之前,需要参考Graphite[官方教程](https://graphite.readthedocs.io/en/latest/install.html)搭建服务。
|
||||
|
||||
|
@ -13,7 +13,7 @@ ClickHouse运行允许分析查询执行的采样探查器。 使用探查器,
|
||||
|
||||
- 设置 [trace\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) 服务器配置部分。
|
||||
|
||||
本节配置 [trace\_log](../../operations/system-tables.md#system_tables-trace_log) 系统表包含探查器运行的结果。 它是默认配置的。 请记住,此表中的数据仅对正在运行的服务器有效。 服务器重新启动后,ClickHouse不会清理表,所有存储的虚拟内存地址都可能无效。
|
||||
本节配置 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) 系统表包含探查器运行的结果。 它是默认配置的。 请记住,此表中的数据仅对正在运行的服务器有效。 服务器重新启动后,ClickHouse不会清理表,所有存储的虚拟内存地址都可能无效。
|
||||
|
||||
- 设置 [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) 或 [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) 设置。 这两种设置可以同时使用。
|
||||
|
||||
|
@ -145,10 +145,10 @@ ClickHouse每x秒重新加载内置字典。 这使得编辑字典 “on the fly
|
||||
- interval – The interval for sending, in seconds.
|
||||
- timeout – The timeout for sending data, in seconds.
|
||||
- root\_path – Prefix for keys.
|
||||
- metrics – Sending data from the [系统。指标](../../operations/system-tables.md#system_tables-metrics) 桌子
|
||||
- events – Sending deltas data accumulated for the time period from the [系统。活动](../../operations/system-tables.md#system_tables-events) 桌子
|
||||
- events\_cumulative – Sending cumulative data from the [系统。活动](../../operations/system-tables.md#system_tables-events) 桌子
|
||||
- asynchronous\_metrics – Sending data from the [系统。asynchronous\_metrics](../../operations/system-tables.md#system_tables-asynchronous_metrics) 桌子
|
||||
- metrics – Sending data from the [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) 桌子
|
||||
- events – Sending deltas data accumulated for the time period from the [系统。活动](../../operations/system-tables/events.md#system_tables-events) 桌子
|
||||
- events\_cumulative – Sending cumulative data from the [系统。活动](../../operations/system-tables/events.md#system_tables-events) 桌子
|
||||
- asynchronous\_metrics – Sending data from the [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) 桌子
|
||||
|
||||
您可以配置多个 `<graphite>` 条款 例如,您可以使用它以不同的时间间隔发送不同的数据。
|
||||
|
||||
@ -503,7 +503,7 @@ SSL客户端/服务器配置。
|
||||
|
||||
记录与之关联的事件 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). 例如,添加或合并数据。 您可以使用日志来模拟合并算法并比较它们的特征。 您可以可视化合并过程。
|
||||
|
||||
查询记录在 [系统。part\_log](../../operations/system-tables.md#system_tables-part-log) 表,而不是在一个单独的文件。 您可以在以下命令中配置此表的名称 `table` 参数(见下文)。
|
||||
查询记录在 [系统。part\_log](../../operations/system-tables/part_log.md#system_tables-part-log) 表,而不是在一个单独的文件。 您可以在以下命令中配置此表的名称 `table` 参数(见下文)。
|
||||
|
||||
使用以下参数配置日志记录:
|
||||
|
||||
@ -540,7 +540,7 @@ SSL客户端/服务器配置。
|
||||
|
||||
用于记录接收到的查询的设置 [log\_queries=1](../settings/settings.md) 设置。
|
||||
|
||||
查询记录在 [系统。query\_log](../../operations/system-tables.md#system_tables-query_log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。
|
||||
查询记录在 [系统。query\_log](../../operations/system-tables/query_log.md#system_tables-query_log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。
|
||||
|
||||
使用以下参数配置日志记录:
|
||||
|
||||
@ -566,7 +566,7 @@ SSL客户端/服务器配置。
|
||||
|
||||
设置用于记录接收到的查询的线程 [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) 设置。
|
||||
|
||||
查询记录在 [系统。query\_thread\_log](../../operations/system-tables.md#system_tables-query-thread-log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。
|
||||
查询记录在 [系统。query\_thread\_log](../../operations/system-tables/query_thread_log.md#system_tables-query-thread-log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。
|
||||
|
||||
使用以下参数配置日志记录:
|
||||
|
||||
@ -590,7 +590,7 @@ SSL客户端/服务器配置。
|
||||
|
||||
## trace\_log {#server_configuration_parameters-trace_log}
|
||||
|
||||
设置为 [trace\_log](../../operations/system-tables.md#system_tables-trace_log) 系统表操作。
|
||||
设置为 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) 系统表操作。
|
||||
|
||||
参数:
|
||||
|
||||
|
@ -1048,17 +1048,6 @@ ClickHouse生成异常
|
||||
|
||||
默认值:0
|
||||
|
||||
## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested}
|
||||
|
||||
重置 [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) 对于嵌套 `Distributed` 表
|
||||
|
||||
可能的值:
|
||||
|
||||
- 1 — Enabled.
|
||||
- 0 — Disabled.
|
||||
|
||||
默认值:0。
|
||||
|
||||
## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop}
|
||||
|
||||
启用或禁用抛出异常,如果 [OPTIMIZE](../../sql-reference/statements/misc.md#misc_operations-optimize) 查询未执行合并。
|
||||
@ -1165,7 +1154,7 @@ ClickHouse生成异常
|
||||
|
||||
另请参阅:
|
||||
|
||||
- 系统表 [trace\_log](../../operations/system-tables.md#system_tables-trace_log)
|
||||
- 系统表 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
||||
|
||||
## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns}
|
||||
|
||||
@ -1188,7 +1177,7 @@ ClickHouse生成异常
|
||||
|
||||
另请参阅:
|
||||
|
||||
- 系统表 [trace\_log](../../operations/system-tables.md#system_tables-trace_log)
|
||||
- 系统表 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
||||
|
||||
## allow\_introspection\_functions {#settings-allow_introspection_functions}
|
||||
|
||||
@ -1204,7 +1193,7 @@ ClickHouse生成异常
|
||||
**另请参阅**
|
||||
|
||||
- [采样查询探查器](../optimizing-performance/sampling-query-profiler.md)
|
||||
- 系统表 [trace\_log](../../operations/system-tables.md#system_tables-trace_log)
|
||||
- 系统表 [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
||||
|
||||
## input\_format\_parallel\_parsing {#input-format-parallel-parsing}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,8 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
## 系统。asynchronous\_metric\_log {#system-tables-async-log}
|
||||
|
||||
包含以下内容的历史值 `system.asynchronous_log` (见 [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics))
|
41
docs/zh/operations/system-tables/asynchronous_metrics.md
Normal file
41
docs/zh/operations/system-tables/asynchronous_metrics.md
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。asynchronous\_metrics {#system_tables-asynchronous_metrics}
|
||||
|
||||
包含在后台定期计算的指标。 例如,在使用的RAM量。
|
||||
|
||||
列:
|
||||
|
||||
- `metric` ([字符串](../../sql-reference/data-types/string.md)) — Metric name.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.asynchronous_metrics LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─metric──────────────────────────────────┬──────value─┐
|
||||
│ jemalloc.background_thread.run_interval │ 0 │
|
||||
│ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ jemalloc.background_thread.num_threads │ 0 │
|
||||
│ jemalloc.retained │ 422551552 │
|
||||
│ jemalloc.mapped │ 1682989056 │
|
||||
│ jemalloc.resident │ 1656446976 │
|
||||
│ jemalloc.metadata_thp │ 0 │
|
||||
│ jemalloc.metadata │ 10226856 │
|
||||
│ UncompressedCacheCells │ 0 │
|
||||
│ MarkCacheFiles │ 0 │
|
||||
└─────────────────────────────────────────┴────────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [监测](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
||||
- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
|
||||
- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that have occurred.
|
||||
- [系统。metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
|
29
docs/zh/operations/system-tables/clusters.md
Normal file
29
docs/zh/operations/system-tables/clusters.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。集群 {#system-clusters}
|
||||
|
||||
包含有关配置文件中可用的集群及其中的服务器的信息。
|
||||
|
||||
列:
|
||||
|
||||
- `cluster` (String) — The cluster name.
|
||||
- `shard_num` (UInt32) — The shard number in the cluster, starting from 1.
|
||||
- `shard_weight` (UInt32) — The relative weight of the shard when writing data.
|
||||
- `replica_num` (UInt32) — The replica number in the shard, starting from 1.
|
||||
- `host_name` (String) — The host name, as specified in the config.
|
||||
- `host_address` (String) — The host IP address obtained from DNS.
|
||||
- `port` (UInt16) — The port to use for connecting to the server.
|
||||
- `user` (String) — The name of the user for connecting to the server.
|
||||
- `errors_count` (UInt32)-此主机无法到达副本的次数。
|
||||
- `estimated_recovery_time` (UInt32)-剩下的秒数,直到副本错误计数归零,它被认为是恢复正常。
|
||||
|
||||
请注意 `errors_count` 每个查询集群更新一次,但 `estimated_recovery_time` 按需重新计算。 所以有可能是非零的情况 `errors_count` 和零 `estimated_recovery_time`,下一个查询将为零 `errors_count` 并尝试使用副本,就好像它没有错误。
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [表引擎分布式](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_cap设置](../../operations/settings/settings.md#settings-distributed_replica_error_cap)
|
||||
- [distributed\_replica\_error\_half\_life设置](../../operations/settings/settings.md#settings-distributed_replica_error_half_life)
|
27
docs/zh/operations/system-tables/columns.md
Normal file
27
docs/zh/operations/system-tables/columns.md
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。列 {#system-columns}
|
||||
|
||||
包含有关所有表中列的信息。
|
||||
|
||||
您可以使用此表获取类似于以下内容的信息 [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table) 查询,但对于多个表一次。
|
||||
|
||||
该 `system.columns` 表包含以下列(列类型显示在括号中):
|
||||
|
||||
- `database` (String) — Database name.
|
||||
- `table` (String) — Table name.
|
||||
- `name` (String) — Column name.
|
||||
- `type` (String) — Column type.
|
||||
- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`)为默认值,如果没有定义,则为空字符串。
|
||||
- `default_expression` (String) — Expression for the default value, or an empty string if it is not defined.
|
||||
- `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes.
|
||||
- `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes.
|
||||
- `marks_bytes` (UInt64) — The size of marks, in bytes.
|
||||
- `comment` (String) — Comment on the column, or an empty string if it is not defined.
|
||||
- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression.
|
||||
- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression.
|
||||
- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression.
|
||||
- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression.
|
45
docs/zh/operations/system-tables/contributors.md
Normal file
45
docs/zh/operations/system-tables/contributors.md
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。贡献者 {#system-contributors}
|
||||
|
||||
包含有关贡献者的信息。 该顺序在查询执行时是随机的。
|
||||
|
||||
列:
|
||||
|
||||
- `name` (String) — Contributor (author) name from git log.
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.contributors LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name─────────────┐
|
||||
│ Olga Khvostikova │
|
||||
│ Max Vetrov │
|
||||
│ LiuYangkuan │
|
||||
│ svladykin │
|
||||
│ zamulla │
|
||||
│ Šimon Podlipský │
|
||||
│ BayoNet │
|
||||
│ Ilya Khomutov │
|
||||
│ Amy Krishnevsky │
|
||||
│ Loud_Scream │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
要在表中找出自己,请使用查询:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.contributors WHERE name = 'Olga Khvostikova'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name─────────────┐
|
||||
│ Olga Khvostikova │
|
||||
└──────────────────┘
|
||||
```
|
39
docs/zh/operations/system-tables/data_type_families.md
Normal file
39
docs/zh/operations/system-tables/data_type_families.md
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。data\_type\_families {#system_tables-data_type_families}
|
||||
|
||||
包含有关受支持的信息 [数据类型](../../sql-reference/data-types/).
|
||||
|
||||
列:
|
||||
|
||||
- `name` ([字符串](../../sql-reference/data-types/string.md)) — Data type name.
|
||||
- `case_insensitive` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Property that shows whether you can use a data type name in a query in case insensitive manner or not. For example, `Date` 和 `date` 都是有效的。
|
||||
- `alias_to` ([字符串](../../sql-reference/data-types/string.md)) — Data type name for which `name` 是个化名
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.data_type_families WHERE alias_to = 'String'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name───────┬─case_insensitive─┬─alias_to─┐
|
||||
│ LONGBLOB │ 1 │ String │
|
||||
│ LONGTEXT │ 1 │ String │
|
||||
│ TINYTEXT │ 1 │ String │
|
||||
│ TEXT │ 1 │ String │
|
||||
│ VARCHAR │ 1 │ String │
|
||||
│ MEDIUMBLOB │ 1 │ String │
|
||||
│ BLOB │ 1 │ String │
|
||||
│ TINYBLOB │ 1 │ String │
|
||||
│ CHAR │ 1 │ String │
|
||||
│ MEDIUMTEXT │ 1 │ String │
|
||||
└────────────┴──────────────────┴──────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [语法](../../sql-reference/syntax.md) — Information about supported syntax.
|
12
docs/zh/operations/system-tables/databases.md
Normal file
12
docs/zh/operations/system-tables/databases.md
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。数据库 {#system-databases}
|
||||
|
||||
此表包含一个名为"字符串"的列 ‘name’ – the name of a database.
|
||||
|
||||
服务器知道的每个数据库在表中都有相应的条目。
|
||||
|
||||
该系统表用于实现 `SHOW DATABASES` 查询。
|
14
docs/zh/operations/system-tables/detached_parts.md
Normal file
14
docs/zh/operations/system-tables/detached_parts.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。detached\_parts {#system_tables-detached_parts}
|
||||
|
||||
包含有关分离部分的信息 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 桌子 该 `reason` 列指定分离部件的原因。
|
||||
|
||||
对于用户分离的部件,原因是空的。 这些部件可以附加 [ALTER TABLE ATTACH PARTITION\|PART](../../sql-reference/statements/alter.md#alter_attach-partition) 指挥部
|
||||
|
||||
有关其他列的说明,请参阅 [系统。零件](../../operations/system-tables/parts.md#system_tables-parts).
|
||||
|
||||
如果部件名称无效,某些列的值可能为 `NULL`. 这些部分可以删除 [ALTER TABLE DROP DETACHED PART](../../sql-reference/statements/alter.md#alter_drop-detached).
|
66
docs/zh/operations/system-tables/dictionaries.md
Normal file
66
docs/zh/operations/system-tables/dictionaries.md
Normal file
@ -0,0 +1,66 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。字典 {#system_tables-dictionaries}
|
||||
|
||||
包含以下信息 [外部字典](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
|
||||
列:
|
||||
|
||||
- `database` ([字符串](../../sql-reference/data-types/string.md)) — Name of the database containing the dictionary created by DDL query. Empty string for other dictionaries.
|
||||
- `name` ([字符串](../../sql-reference/data-types/string.md)) — [字典名称](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md).
|
||||
- `status` ([枚举8](../../sql-reference/data-types/enum.md)) — Dictionary status. Possible values:
|
||||
- `NOT_LOADED` — Dictionary was not loaded because it was not used.
|
||||
- `LOADED` — Dictionary loaded successfully.
|
||||
- `FAILED` — Unable to load the dictionary as a result of an error.
|
||||
- `LOADING` — Dictionary is loading now.
|
||||
- `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary) 查询,超时,字典配置已更改)。
|
||||
- `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now.
|
||||
- `origin` ([字符串](../../sql-reference/data-types/string.md)) — Path to the configuration file that describes the dictionary.
|
||||
- `type` ([字符串](../../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [在内存中存储字典](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md).
|
||||
- `key` — [密钥类型](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key):数字键 ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) or Сomposite key ([字符串](../../sql-reference/data-types/string.md)) — form “(type 1, type 2, …, type n)”.
|
||||
- `attribute.names` ([阵列](../../sql-reference/data-types/array.md)([字符串](../../sql-reference/data-types/string.md))) — Array of [属性名称](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) 由字典提供。
|
||||
- `attribute.types` ([阵列](../../sql-reference/data-types/array.md)([字符串](../../sql-reference/data-types/string.md))) — Corresponding array of [属性类型](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) 这是由字典提供。
|
||||
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary.
|
||||
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot.
|
||||
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache.
|
||||
- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary.
|
||||
- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table).
|
||||
- `source` ([字符串](../../sql-reference/data-types/string.md)) — Text describing the [数据源](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) 为了字典
|
||||
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Minimum [使用寿命](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) 在内存中的字典,之后ClickHouse尝试重新加载字典(如果 `invalidate_query` 被设置,那么只有当它已经改变)。 在几秒钟内设置。
|
||||
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Maximum [使用寿命](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) 在内存中的字典,之后ClickHouse尝试重新加载字典(如果 `invalidate_query` 被设置,那么只有当它已经改变)。 在几秒钟内设置。
|
||||
- `loading_start_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — Start time for loading the dictionary.
|
||||
- `last_successful_update_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes.
|
||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading.
|
||||
- `last_exception` ([字符串](../../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created.
|
||||
|
||||
**示例**
|
||||
|
||||
配置字典。
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY dictdb.dict
|
||||
(
|
||||
`key` Int64 DEFAULT -1,
|
||||
`value_default` String DEFAULT 'world',
|
||||
`value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)'
|
||||
)
|
||||
PRIMARY KEY key
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb'))
|
||||
LIFETIME(MIN 0 MAX 1)
|
||||
LAYOUT(FLAT())
|
||||
```
|
||||
|
||||
确保字典已加载。
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.dictionaries
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐
|
||||
│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │
|
||||
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
|
||||
```
|
31
docs/zh/operations/system-tables/disks.md
Normal file
31
docs/zh/operations/system-tables/disks.md
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。磁盘 {#system_tables-disks}
|
||||
|
||||
包含有关在定义的磁盘信息 [服务器配置](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure).
|
||||
|
||||
列:
|
||||
|
||||
- `name` ([字符串](../../sql-reference/data-types/string.md)) — Name of a disk in the server configuration.
|
||||
- `path` ([字符串](../../sql-reference/data-types/string.md)) — Path to the mount point in the file system.
|
||||
- `free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Free space on disk in bytes.
|
||||
- `total_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Disk volume in bytes.
|
||||
- `keep_free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` 磁盘配置参数。
|
||||
|
||||
## 系统。storage\_policies {#system_tables-storage_policies}
|
||||
|
||||
包含有关存储策略和卷中定义的信息 [服务器配置](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure).
|
||||
|
||||
列:
|
||||
|
||||
- `policy_name` ([字符串](../../sql-reference/data-types/string.md)) — Name of the storage policy.
|
||||
- `volume_name` ([字符串](../../sql-reference/data-types/string.md)) — Volume name defined in the storage policy.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration.
|
||||
- `disks` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Disk names, defined in the storage policy.
|
||||
- `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit).
|
||||
- `move_factor` ([Float64](../../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order.
|
||||
|
||||
如果存储策略包含多个卷,则每个卷的信息将存储在表的单独行中。
|
37
docs/zh/operations/system-tables/events.md
Normal file
37
docs/zh/operations/system-tables/events.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。活动 {#system_tables-events}
|
||||
|
||||
包含有关系统中发生的事件数的信息。 例如,在表中,您可以找到多少 `SELECT` 自ClickHouse服务器启动以来已处理查询。
|
||||
|
||||
列:
|
||||
|
||||
- `event` ([字符串](../../sql-reference/data-types/string.md)) — Event name.
|
||||
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of events occurred.
|
||||
- `description` ([字符串](../../sql-reference/data-types/string.md)) — Event description.
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.events LIMIT 5
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─event─────────────────────────────────┬─value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Query │ 12 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │
|
||||
│ SelectQuery │ 8 │ Same as Query, but only for SELECT queries. │
|
||||
│ FileOpen │ 73 │ Number of files opened. │
|
||||
│ ReadBufferFromFileDescriptorRead │ 155 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │
|
||||
│ ReadBufferFromFileDescriptorReadBytes │ 9931 │ Number of bytes read from file descriptors. If the file is compressed, this will show the compressed data size. │
|
||||
└───────────────────────────────────────┴───────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
|
||||
- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
|
||||
- [系统。metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
|
||||
- [监测](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
13
docs/zh/operations/system-tables/functions.md
Normal file
13
docs/zh/operations/system-tables/functions.md
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。功能 {#system-functions}
|
||||
|
||||
包含有关正常函数和聚合函数的信息。
|
||||
|
||||
列:
|
||||
|
||||
- `name`(`String`) – The name of the function.
|
||||
- `is_aggregate`(`UInt8`) — Whether the function is aggregate.
|
20
docs/zh/operations/system-tables/graphite_retentions.md
Normal file
20
docs/zh/operations/system-tables/graphite_retentions.md
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。graphite\_retentions {#system-graphite-retentions}
|
||||
|
||||
包含有关参数的信息 [graphite\_rollup](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-graphite) 这是在表中使用 [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md) 引擎
|
||||
|
||||
列:
|
||||
|
||||
- `config_name` (字符串) - `graphite_rollup` 参数名称。
|
||||
- `regexp` (String)-指标名称的模式。
|
||||
- `function` (String)-聚合函数的名称。
|
||||
- `age` (UInt64)-以秒为单位的数据的最小期限。
|
||||
- `precision` (UInt64)-如何精确地定义以秒为单位的数据的年龄。
|
||||
- `priority` (UInt16)-模式优先级。
|
||||
- `is_default` (UInt8)-模式是否为默认值。
|
||||
- `Tables.database` (Array(String))-使用数据库表名称的数组 `config_name` 参数。
|
||||
- `Tables.table` (Array(String))-使用表名称的数组 `config_name` 参数。
|
50
docs/zh/operations/system-tables/index.md
Normal file
50
docs/zh/operations/system-tables/index.md
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
toc_priority: 52
|
||||
toc_title: "\u7CFB\u7EDF\u8868"
|
||||
---
|
||||
|
||||
# 系统表 {#system-tables}
|
||||
|
||||
## 导言 {#system-tables-introduction}
|
||||
|
||||
系统表提供以下信息:
|
||||
|
||||
- 服务器状态、进程和环境。
|
||||
- 服务器的内部进程。
|
||||
|
||||
系统表:
|
||||
|
||||
- 坐落于 `system` 数据库。
|
||||
- 仅适用于读取数据。
|
||||
- 不能删除或更改,但可以分离。
|
||||
|
||||
大多数系统表将数据存储在RAM中。 ClickHouse服务器在开始时创建此类系统表。
|
||||
|
||||
与其他系统表不同,系统表 [metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log), [query\_log](../../operations/system-tables/query_log.md#system_tables-query_log), [query\_thread\_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log), [trace\_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) 由 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 表引擎并将其数据存储在存储文件系统中。 如果从文件系统中删除表,ClickHouse服务器会在下一次写入数据时再次创建空表。 如果系统表架构在新版本中发生更改,则ClickHouse会重命名当前表并创建一个新表。
|
||||
|
||||
默认情况下,表增长是无限的。 要控制表的大小,可以使用 [TTL](../../sql-reference/statements/alter.md#manipulations-with-table-ttl) 删除过期日志记录的设置。 你也可以使用分区功能 `MergeTree`-发动机表。
|
||||
|
||||
## 系统指标的来源 {#system-tables-sources-of-system-metrics}
|
||||
|
||||
用于收集ClickHouse服务器使用的系统指标:
|
||||
|
||||
- `CAP_NET_ADMIN` 能力。
|
||||
- [procfs](https://en.wikipedia.org/wiki/Procfs) (仅在Linux中)。
|
||||
|
||||
**procfs**
|
||||
|
||||
如果ClickHouse服务器没有 `CAP_NET_ADMIN` 能力,它试图回落到 `ProcfsMetricsProvider`. `ProcfsMetricsProvider` 允许收集每个查询系统指标(用于CPU和I/O)。
|
||||
|
||||
如果系统上支持并启用procfs,ClickHouse server将收集这些指标:
|
||||
|
||||
- `OSCPUVirtualTimeMicroseconds`
|
||||
- `OSCPUWaitMicroseconds`
|
||||
- `OSIOWaitMicroseconds`
|
||||
- `OSReadChars`
|
||||
- `OSWriteChars`
|
||||
- `OSReadBytes`
|
||||
- `OSWriteBytes`
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/operations/system-tables/) <!--hide-->
|
16
docs/zh/operations/system-tables/merge_tree_settings.md
Normal file
16
docs/zh/operations/system-tables/merge_tree_settings.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。merge\_tree\_settings {#system-merge_tree_settings}
|
||||
|
||||
包含有关以下设置的信息 `MergeTree` 桌子
|
||||
|
||||
列:
|
||||
|
||||
- `name` (String) — Setting name.
|
||||
- `value` (String) — Setting value.
|
||||
- `description` (String) — Setting description.
|
||||
- `type` (String) — Setting type (implementation specific string value).
|
||||
- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed.
|
24
docs/zh/operations/system-tables/merges.md
Normal file
24
docs/zh/operations/system-tables/merges.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。合并 {#system-merges}
|
||||
|
||||
包含有关MergeTree系列中表当前正在进行的合并和部件突变的信息。
|
||||
|
||||
列:
|
||||
|
||||
- `database` (String) — The name of the database the table is in.
|
||||
- `table` (String) — Table name.
|
||||
- `elapsed` (Float64) — The time elapsed (in seconds) since the merge started.
|
||||
- `progress` (Float64) — The percentage of completed work from 0 to 1.
|
||||
- `num_parts` (UInt64) — The number of pieces to be merged.
|
||||
- `result_part_name` (String) — The name of the part that will be formed as the result of merging.
|
||||
- `is_mutation` (UInt8)-1如果这个过程是一个部分突变.
|
||||
- `total_size_bytes_compressed` (UInt64) — The total size of the compressed data in the merged chunks.
|
||||
- `total_size_marks` (UInt64) — The total number of marks in the merged parts.
|
||||
- `bytes_read_uncompressed` (UInt64) — Number of bytes read, uncompressed.
|
||||
- `rows_read` (UInt64) — Number of rows read.
|
||||
- `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed.
|
||||
- `rows_written` (UInt64) — Number of rows written.
|
60
docs/zh/operations/system-tables/metric_log.md
Normal file
60
docs/zh/operations/system-tables/metric_log.md
Normal file
@ -0,0 +1,60 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。metric\_log {#system_tables-metric_log}
|
||||
|
||||
包含表中度量值的历史记录 `system.metrics` 和 `system.events`,定期刷新到磁盘。
|
||||
打开指标历史记录收集 `system.metric_log`,创建 `/etc/clickhouse-server/config.d/metric_log.xml` 具有以下内容:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<metric_log>
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
</metric_log>
|
||||
</yandex>
|
||||
```
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
event_date: 2020-02-18
|
||||
event_time: 2020-02-18 07:15:33
|
||||
milliseconds: 554
|
||||
ProfileEvent_Query: 0
|
||||
ProfileEvent_SelectQuery: 0
|
||||
ProfileEvent_InsertQuery: 0
|
||||
ProfileEvent_FileOpen: 0
|
||||
ProfileEvent_Seek: 0
|
||||
ProfileEvent_ReadBufferFromFileDescriptorRead: 1
|
||||
ProfileEvent_ReadBufferFromFileDescriptorReadFailed: 0
|
||||
ProfileEvent_ReadBufferFromFileDescriptorReadBytes: 0
|
||||
ProfileEvent_WriteBufferFromFileDescriptorWrite: 1
|
||||
ProfileEvent_WriteBufferFromFileDescriptorWriteFailed: 0
|
||||
ProfileEvent_WriteBufferFromFileDescriptorWriteBytes: 56
|
||||
...
|
||||
CurrentMetric_Query: 0
|
||||
CurrentMetric_Merge: 0
|
||||
CurrentMetric_PartMutation: 0
|
||||
CurrentMetric_ReplicatedFetch: 0
|
||||
CurrentMetric_ReplicatedSend: 0
|
||||
CurrentMetric_ReplicatedChecks: 0
|
||||
...
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
|
||||
- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
|
||||
- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
|
||||
- [监测](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
44
docs/zh/operations/system-tables/metrics.md
Normal file
44
docs/zh/operations/system-tables/metrics.md
Normal file
@ -0,0 +1,44 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 系统。指标 {#system_tables-metrics}
|
||||
|
||||
包含可以立即计算或具有当前值的指标。 例如,同时处理的查询的数量或当前副本的延迟。 此表始终是最新的。
|
||||
|
||||
列:
|
||||
|
||||
- `metric` ([字符串](../../sql-reference/data-types/string.md)) — Metric name.
|
||||
- `value` ([Int64](../../sql-reference/data-types/int-uint.md)) — Metric value.
|
||||
- `description` ([字符串](../../sql-reference/data-types/string.md)) — Metric description.
|
||||
|
||||
支持的指标列表,您可以在 [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp) ClickHouse的源文件。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.metrics LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─metric─────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Query │ 1 │ Number of executing queries │
|
||||
│ Merge │ 0 │ Number of executing background merges │
|
||||
│ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │
|
||||
│ ReplicatedFetch │ 0 │ Number of data parts being fetched from replicas │
|
||||
│ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │
|
||||
│ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │
|
||||
│ BackgroundPoolTask │ 0 │ Number of active tasks in BackgroundProcessingPool (merges, mutations, fetches, or replication queue bookkeeping) │
|
||||
│ BackgroundSchedulePoolTask │ 0 │ Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc. │
|
||||
│ DiskSpaceReservedForMerge │ 0 │ Disk space reserved for currently running background merges. It is slightly more than the total size of currently merging parts. │
|
||||
│ DistributedSend │ 0 │ Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode. │
|
||||
└────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
|
||||
- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
|
||||
- [系统。metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
|
||||
- [监测](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user